repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
aehsani/ergo
[ "1c03494fcbc89192212b9595bb00acc794bd621c" ]
[ "ergo/foretold.py" ]
[ "from dataclasses import dataclass\nfrom typing import List, Union\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport seaborn\nimport torch\n\nfrom ergo.ppl import uniform\n\n\nclass Foretold:\n \"\"\"Interface to Foretold\"\"\"\n\n def __init__(self, token=None):\n \"\"\"token (string): Specify an authorization token (supports Bot tokens from Foretold)\"\"\"\n self.token = token\n self.api_url = \"https://prediction-backend.herokuapp.com/graphql\"\n\n def get_question(self, id):\n \"\"\"Retrieve a single question by its id\"\"\"\n question = ForetoldQuestion(id, self)\n question.refresh_question()\n return question\n\n def get_questions(self, ids):\n \"\"\"Retrieve many questions by their ids\n ids (List[string]): List of foretold question ids (should be less than 500 per request)\n Returns: List of questions corresponding to the ids, or None for questions that weren't found.\"\"\"\n measurables = self._query_measurables(ids)\n return [\n ForetoldQuestion(measurable[\"id\"], self, measurable) if measurable else None\n for measurable in measurables\n ]\n\n def _post(self, json_data):\n \"\"\"Send a json post request to the foretold API, with proper authorization\"\"\"\n headers = {}\n if self.token is not None:\n headers[\"Authorization\"] = f\"Bearer {self.token}\"\n response = requests.post(self.api_url, json=json_data, headers=headers)\n response.raise_for_status()\n return response.json()\n\n def _query_measurable(self, id):\n \"\"\"Retrieve data from api about single question by its id\"\"\"\n response = self._post(\n {\n \"variables\": {\"measurableId\": id},\n \"query\": \"\"\"query ($measurableId: String!) {\n measurable(id:$measurableId) {\n id\n channelId\n previousAggregate {\n value {\n floatCdf {\n xs\n ys\n }\n }\n }\n }\n }\"\"\",\n }\n )\n return response[\"data\"][\"measurable\"]\n\n def _query_measurables(self, ids):\n \"\"\"Retrieve data from api about many question by a list of ids\"\"\"\n if len(ids) > 500:\n # If we want to implement this later, we can properly use the pageInfo in the request\n raise NotImplementedError(\n \"We haven't implemented support for more than 500 ids per request\"\n )\n response = self._post(\n {\n \"variables\": {\"measurableIds\": ids},\n \"query\": \"\"\"query ($measurableIds: [String!]) {\n measurables(measurableIds: $measurableIds, first: 500) {\n total\n pageInfo {\n hasPreviousPage\n hasNextPage\n startCursor\n endCursor\n __typename\n }\n edges {\n node {\n id\n channelId\n previousAggregate {\n value {\n floatCdf {\n xs\n ys\n }\n }\n }\n }\n }\n }\n }\"\"\",\n }\n )\n if \"errors\" in response:\n raise ValueError(\n \"Error retrieving foretold measurables. You may not have authorization \"\n \"to load one or more measurables, or one of the measureable ids may be incorrect\"\n )\n if response[\"data\"][\"measurables\"][\"pageInfo\"][\"hasNextPage\"]:\n raise NotImplementedError(\n \"We haven't implemented support for more than 500 ids per request\"\n )\n measurables_dict = {}\n for edge in response[\"data\"][\"measurables\"][\"edges\"]:\n measureable = edge[\"node\"]\n measurables_dict[measureable[\"id\"]] = measureable\n\n return [measurables_dict.get(id, None) for id in ids]\n\n def create_measurement(\n self, measureable_id: str, cdf: \"ForetoldCdf\"\n ) -> requests.Response:\n if self.token is None:\n raise Exception(\"A token is required to submit a prediction\")\n if len(cdf) > 1000:\n raise Exception(\"Maximum CDF length of 1000 exceeded\")\n headers = {\"Authorization\": f\"Bearer {self.token}\"}\n query = _measurement_query(measureable_id, cdf)\n response = requests.post(self.api_url, json={\"query\": query}, headers=headers)\n return response\n\n\nclass ForetoldQuestion:\n \"\"\"\"Information about foretold question, including aggregated distribution\"\"\"\n\n def __init__(self, id, foretold, data=None):\n \"\"\"\n Should not be called directly, instead use Foretold.get_question\n\n id: measurableId, the second id in the URL for a foretold question\n foretold: Foretold api\n data: Data retrieved from the foretold api\n \"\"\"\n self.id = id\n self.foretold = foretold\n self.floatCdf = None\n self.channelId = None\n if data is not None:\n self._update_from_data(data)\n\n def _update_from_data(self, data):\n \"\"\"Update based on a dictionary of data from Foretold\"\"\"\n try:\n self.channelId = data[\"channelId\"]\n except (KeyError, TypeError):\n raise ValueError(f\"Foretold data missing or invalid\")\n\n # If floatCdf is not available, we can just keep it as None\n try:\n self.floatCdf = data[\"previousAggregate\"][\"value\"][\"floatCdf\"]\n except (KeyError, TypeError):\n self.floatCdf = None\n\n def refresh_question(self):\n # previousAggregate is the most recent aggregated distribution\n try:\n measurable = self.foretold._query_measurable(self.id)\n self._update_from_data(measurable)\n except ValueError:\n raise ValueError(f\"Error loading distribution {self.id} from Foretold\")\n\n @property\n def url(self):\n return f\"https://www.foretold.io/c/{self.channelId}/m/{self.id}\"\n\n @property\n def community_prediction_available(self):\n return self.floatCdf is not None\n\n def get_float_cdf_or_error(self):\n if not self.community_prediction_available:\n raise ValueError(\"No community prediction available\")\n return self.floatCdf\n\n def quantile(self, q):\n \"\"\"Quantile of distribution\"\"\"\n floatCdf = self.get_float_cdf_or_error()\n return np.interp(q, floatCdf[\"ys\"], floatCdf[\"xs\"])\n\n def sample_community(self):\n \"\"\"Sample from CDF\"\"\"\n y = uniform()\n return torch.tensor(self.quantile(y))\n\n def plotCdf(self):\n \"\"\"Plot the CDF\"\"\"\n floatCdf = self.get_float_cdf_or_error()\n seaborn.lineplot(floatCdf[\"xs\"], floatCdf[\"ys\"])\n\n def submit_from_samples(\n self, samples: Union[np.ndarray, pd.Series], length: int = 20\n ) -> requests.Response:\n \"\"\"Submit a prediction to Foretold based on the given samples\n\n :param samples: Samples on which to base the submission\n :param length: The length of the CDF derived from the samples\n \"\"\"\n cdf = ForetoldCdf.from_samples(samples, length)\n return self.foretold.create_measurement(self.id, cdf)\n\n\n@dataclass\nclass ForetoldCdf:\n\n xs: List[float]\n ys: List[float]\n\n @staticmethod\n def from_samples(\n samples: Union[np.ndarray, pd.Series], length: int\n ) -> \"ForetoldCdf\":\n \"\"\"Build a Foretold CDF representation from an array of samples\n\n See the following for details:\n https://docs.foretold.io/cumulative-distribution-functions-format\n\n :param samples: Samples from which to build the CDF\n :param length: The length of returned CDF\n \"\"\"\n if length < 2:\n raise ValueError(\"`length` must be at least 2\")\n hist, bin_edges = np.histogram(samples, bins=length - 1, density=True) # type: ignore\n bin_width = bin_edges[1] - bin_edges[0]\n # Foretold expects `0 <= ys <= 1`, so we clip to that . This\n # is defensive -- at the time of implementation it isn't known\n # how the API handles violations of this.\n ys = np.clip(np.hstack([np.array([0.0]), np.cumsum(hist) * bin_width]), 0, 1) # type: ignore\n return ForetoldCdf(bin_edges.tolist(), ys.tolist()) # type: ignore\n\n def __len__(self):\n return len(self.xs)\n\n\ndef _measurement_query(measureable_id: str, cdf: ForetoldCdf) -> str:\n return f\"\"\"mutation {{\n measurementCreate(\n input: {{\n value: {{ floatCdf: {{ xs: {cdf.xs}, ys: {cdf.ys} }} }}\n competitorType: COMPETITIVE\n measurableId: \"{measureable_id}\"\n }}\n ) {{\n id\n }}\n }}\n \"\"\"\n" ]
[ [ "numpy.cumsum", "numpy.array", "numpy.histogram", "numpy.interp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
drnextgis/cogeo-mosaic
[ "034d0124a2da894c2bb432b1c0cebba7f716edbd" ]
[ "cogeo_mosaic/utils.py" ]
[ "\"\"\"cogeo_mosaic.utils: utility functions.\"\"\"\n\nimport logging\nimport os\nimport sys\nfrom concurrent import futures\nfrom typing import Dict, List, Sequence, Tuple\n\nimport click\nimport mercantile\nimport numpy\nfrom pygeos import area, intersection\nfrom rio_tiler.io import COGReader\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef _filter_futures(tasks):\n \"\"\"\n Filter future task to remove Exceptions.\n\n Attributes\n ----------\n tasks : list\n List of 'concurrent.futures._base.Future'\n\n Yields\n ------\n Successful task's result\n\n \"\"\"\n for future in tasks:\n try:\n yield future.result()\n except Exception as err:\n logger.warning(str(err))\n pass\n\n\ndef get_dataset_info(src_path: str) -> Dict:\n \"\"\"Get rasterio dataset meta.\"\"\"\n with COGReader(src_path) as cog:\n bounds = cog.bounds\n return {\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [bounds[0], bounds[3]],\n [bounds[0], bounds[1]],\n [bounds[2], bounds[1]],\n [bounds[2], bounds[3]],\n [bounds[0], bounds[3]],\n ]\n ],\n },\n \"properties\": {\n \"path\": src_path,\n \"bounds\": cog.bounds,\n \"minzoom\": cog.minzoom,\n \"maxzoom\": cog.maxzoom,\n \"datatype\": cog.dataset.meta[\"dtype\"],\n },\n \"type\": \"Feature\",\n }\n\n\ndef get_footprints(\n dataset_list: Sequence[str], max_threads: int = 20, quiet: bool = True\n) -> List:\n \"\"\"\n Create footprint GeoJSON.\n\n Attributes\n ----------\n dataset_listurl : tuple or list, required\n Dataset urls.\n max_threads : int\n Max threads to use (default: 20).\n\n Returns\n -------\n out : tuple\n tuple of footprint feature.\n\n \"\"\"\n fout = os.devnull if quiet else sys.stderr\n with futures.ThreadPoolExecutor(max_workers=max_threads) as executor:\n future_work = [executor.submit(get_dataset_info, item) for item in dataset_list]\n with click.progressbar( # type: ignore\n futures.as_completed(future_work),\n file=fout,\n length=len(future_work),\n label=\"Get footprints\",\n show_percent=True,\n ) as future:\n for _ in future:\n pass\n\n return list(_filter_futures(future_work))\n\n\ndef tiles_to_bounds(tiles: List[mercantile.Tile]) -> Tuple[float, float, float, float]:\n \"\"\"Get bounds from a set of mercator tiles.\"\"\"\n zoom = tiles[0].z\n xyz = numpy.array([[t.x, t.y, t.z] for t in tiles])\n extrema = {\n \"x\": {\"min\": xyz[:, 0].min(), \"max\": xyz[:, 0].max() + 1},\n \"y\": {\"min\": xyz[:, 1].min(), \"max\": xyz[:, 1].max() + 1},\n }\n ulx, uly = mercantile.ul(extrema[\"x\"][\"min\"], extrema[\"y\"][\"min\"], zoom)\n lrx, lry = mercantile.ul(extrema[\"x\"][\"max\"], extrema[\"y\"][\"max\"], zoom)\n return (ulx, lry, lrx, uly)\n\n\ndef _intersect_percent(tile, dataset_geoms):\n \"\"\"Return the overlap percent.\"\"\"\n inter_areas = area(intersection(tile, dataset_geoms))\n return [inter_area / area(tile) for inter_area in inter_areas]\n\n\ndef bbox_union(\n bbox_1: Tuple[float, float, float, float],\n bbox_2: Tuple[float, float, float, float],\n) -> Tuple[float, float, float, float]:\n \"\"\"Return the union of two bounding boxes.\"\"\"\n return (\n min(bbox_1[0], bbox_2[0]),\n min(bbox_1[1], bbox_2[1]),\n max(bbox_1[2], bbox_2[2]),\n max(bbox_1[3], bbox_2[3]),\n )\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pradeeptadas/uniswap-v3-project
[ "8f938dc5602fdb6e58b2cf42393a01994f48682d", "8f938dc5602fdb6e58b2cf42393a01994f48682d" ]
[ "uniswapv3_simulator/optimization/ddpg/ddpg.py", "uniswapv3_simulator/math.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport copy\nimport logging\n\nfrom .replay_buffer import ReplayBuffer\nfrom .exploration_noise import ConstantNoise\n\n\nlogger = logging.getLogger('optimization.ddpg')\n\n\nclass DDPG:\n def __init__(self,\n model=None,\n gamma=0.99,\n tau=1e-3,\n optimizer=optim.Adam,\n actor_optimizer_kwargs={},\n critic_optimizer_kwargs={},\n clip_gradients=None):\n\n self.online_network = model\n self.target_network = copy.deepcopy(model)\n self.online_network.eval()\n self.target_network.eval()\n\n self.actor_optimizer = optimizer(self.online_network.actor_params,\n **actor_optimizer_kwargs)\n self.critic_optimizer = optimizer(self.online_network.critic_params,\n **critic_optimizer_kwargs)\n\n self.gamma = gamma\n self.tau = tau\n self.clip_gradients = clip_gradients\n\n def action(self, obs):\n self.online_network.eval()\n obs = torch.as_tensor(obs, dtype=torch.float32)\n with torch.no_grad():\n action = self.online_network.action(obs).cpu().numpy()\n\n return action\n\n def update_target_networks(self):\n for target, online in zip(self.target_network.parameters(),\n self.online_network.parameters()):\n target.detach_()\n target.copy_(target * (1.0 - self.tau) + online * self.tau)\n\n # this is for things like batch norm and other PyTorch objects\n # that have buffers and/or instead of learnable parameters\n for target, online in zip(self.target_network.buffers(),\n self.online_network.buffers()):\n # detach is probably unnecessary since buffers are not learnable\n target.detach_()\n target.copy_(target * (1.0 - self.tau) + online * self.tau)\n\n def update_target(self, obs, action, reward, next_obs, terminal):\n with torch.no_grad():\n next_action = self.target_network.action(next_obs)\n q_sa_next = self.target_network.critic_value(next_obs, next_action)\n\n update_target = reward + self.gamma * q_sa_next * ~terminal\n\n return update_target\n\n def update(self, obs, action, reward, next_obs, terminal):\n obs = torch.as_tensor(obs, dtype=torch.float32)\n action = torch.as_tensor(action, dtype=torch.float32)\n reward = torch.as_tensor(reward, dtype=torch.float32)\n next_obs = torch.as_tensor(next_obs, dtype=torch.float32)\n terminal = torch.as_tensor(terminal, dtype=torch.bool)\n\n self.online_network.eval()\n update_target = self.update_target(obs, action, reward, next_obs, terminal)\n\n self.online_network.train()\n q_sa = self.online_network.critic_value(obs, action)\n td_error = q_sa - update_target\n critic_loss = td_error.pow(2).mul(0.5).squeeze(-1).mean()\n\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n if self.clip_gradients:\n nn.utils.clip_grad_norm_(self.online_network.critic_params,\n self.clip_gradients)\n self.critic_optimizer.step()\n\n action = self.online_network.action(obs)\n policy_loss = -self.online_network.critic_value(obs, action).mean()\n\n self.actor_optimizer.zero_grad()\n policy_loss.backward()\n if self.clip_gradients:\n nn.utils.clip_grad_norm_(self.online_network.actor_params,\n self.clip_gradients)\n self.actor_optimizer.step()\n\n self.update_target_networks()\n self.online_network.eval()\n\n\nclass DeepActorModel(nn.Module):\n def __init__(self, obs_size, action_size,\n actor_hidden_layers, critic_hidden_layers):\n super().__init__()\n\n critic_layers = []\n input_size = obs_size + action_size\n # critic_layers.append(nn.BatchNorm1d(input_size))\n for i, units in enumerate(critic_hidden_layers):\n output_size = units\n critic_layers.append(nn.Linear(input_size, output_size))\n critic_layers.append(nn.ReLU())\n input_size = output_size\n\n critic_layers.append(nn.Linear(input_size, 1))\n self.critic_layers = nn.Sequential(*critic_layers)\n self.critic_params = list(self.critic_layers.parameters())\n\n actor_layers = []\n input_size = obs_size\n # actor_layers.append(nn.BatchNorm1d(input_size))\n for i, units in enumerate(actor_hidden_layers):\n output_size = units\n actor_layers.append(nn.Linear(input_size, output_size))\n actor_layers.append(nn.ReLU())\n input_size = output_size\n\n actor_layers.append(nn.Linear(input_size, action_size))\n self.actor_layers = nn.Sequential(*actor_layers)\n self.actor_params = list(self.actor_layers.parameters())\n\n def forward(self, obs):\n action = self.actor_layers(obs)\n # using sigmoid instead of tanh since \"actions\" >= 0?\n action = torch.sigmoid(action).squeeze()\n\n return action\n\n def action(self, obs):\n return self.forward(obs)\n\n def critic_value(self, obs, action):\n x = torch.cat([obs, action], dim=-1)\n\n return self.critic_layers(x)\n\n\nclass DDPGTrainer:\n def __init__(self, agent, env, args):\n self.agent = agent\n self.env = env\n self.args = args\n\n self._replay_buffer = None\n self._exploration_noise = None\n\n def train(self):\n seed_seq = np.random.SeedSequence(self.args.seed)\n seeds = seed_seq.generate_state(3)\n\n self.env.seed(int(seeds[0]))\n self._replay_buffer = ReplayBuffer(self.args.memory_size, seed=seeds[1])\n self._exploration_noise = self.args.exploration_noise(\n **self.args.noise_kwargs,\n seed=seeds[2]\n )\n rewards = []\n\n obs = self.env.reset()\n total_reward = 0\n j = 0\n try:\n for i in range(self.args.train_steps):\n action = self.agent.action(obs.reshape(1, -1)).squeeze()\n logger.debug(f'Raw action: {action}')\n exploration_noise = self._exploration_noise.sample()\n logger.debug(f'Exploration noise: {exploration_noise}')\n action += exploration_noise\n if self.args.clip_actions:\n action = np.clip(action, self.args.clip_actions[0], self.args.clip_actions[1])\n logger.debug(f'Final action: {action}')\n\n next_obs, reward, terminal, _ = self.env.step(action)\n self._replay_buffer.add((obs, action, reward, next_obs, terminal))\n total_reward += reward\n\n if (i >= self.args.update_start) and (i % self.args.update_freq == 0):\n exp_batch = self._replay_buffer.sample(self.args.batch_size)\n exp_batch = list(zip(*exp_batch))\n self.agent.update(\n np.array(exp_batch[0]), # obs\n np.array(exp_batch[1]), # action\n np.array(exp_batch[2]).reshape(-1, 1), # reward\n np.array(exp_batch[3]), # next_obs\n np.array(exp_batch[4]).reshape(-1, 1), # terminal\n )\n\n obs = next_obs\n j += 1\n if terminal:\n rewards.append(total_reward)\n if len(rewards) % 50 == 0:\n ep_num = len(rewards)\n mean_score = np.mean(rewards[-50:])\n print(\n f'Episode: {ep_num:>5,} | Mean Score: {mean_score:>7,.6f}'\n )\n obs = self.env.reset()\n total_reward = 0\n j = 0\n\n except KeyboardInterrupt:\n logger.warning(f'Training stopped during step {i:,.0f}.')\n\n self.env.close()\n\n return rewards\n\n\nclass TrainArgs:\n def __init__(self, **kwargs):\n self.train_steps = 10000\n self.batch_size = 64\n self.update_freq = 1\n self.update_start = 1000\n self.memory_size = 1000000\n self.exploration_noise = ConstantNoise\n self.noise_kwargs = {}\n self.clip_actions = None\n self.seed = None\n\n self.update(**kwargs)\n\n def update(self, **kwargs):\n for key, value in kwargs.items():\n if not hasattr(self, key):\n logger.warning(f'{key} is not a valid training parameter.')\n continue\n setattr(self, key, value)", "import numpy as np\n\n\ndef sqrt_price_to_tick(sqrt_price):\n \"\"\"\n TODO: finish documentation\n See formula 6.8 in the white paper.\n\n We use the change of base formula to compute the log as numpy doesn't have\n a log function with an arbitrary base.\n\n :param sqrt_price:\n :return:\n \"\"\"\n base = np.sqrt(1.0001)\n return int(np.floor(np.log(sqrt_price) / np.log(base)))\n\n\ndef tick_to_sqrt_price(tick):\n \"\"\"\n TODO: finish documentation\n See formula 6.2 in the white paper.\n\n :param tick:\n :return:\n \"\"\"\n return 1.0001 ** (tick / 2)\n\n\ndef get_delta_token0(liquidity_delta,\n sqrt_price, sqrt_price_lower, sqrt_price_upper,\n tick, tick_lower, tick_upper):\n \"\"\"\n TODO: finish documentation\n Calculate \\Delta X, \\Delta Y, the amounts of token0 and token1, respectively\n that needs to be contributed to add \\Delta L liquidity to the pool. See\n formula 6.30 in the white paper.\n\n :param liquidity_delta:\n :param sqrt_price:\n :param tick:\n :param tick_lower:\n :param tick_upper:\n :param sqrt_price_lower:\n :param sqrt_price_upper:\n :return:\n \"\"\"\n if tick < tick_lower:\n delta_token0 = liquidity_delta * (1 / sqrt_price_lower - 1 / sqrt_price_upper)\n elif tick < tick_upper:\n delta_token0 = liquidity_delta * (1 / sqrt_price - 1 / sqrt_price_upper)\n else:\n delta_token0 = 0\n\n return delta_token0\n\n\ndef get_delta_token1(liquidity_delta,\n sqrt_price, sqrt_price_lower, sqrt_price_upper,\n tick, tick_lower, tick_upper):\n \"\"\"\n TODO: finish documentation\n Calculate \\Delta X, \\Delta Y, the amounts of token0 and token1, respectively\n that needs to be contributed to add \\Delta L liquidity to the pool. See\n formula 6.29 in the white paper.\n\n :param liquidity_delta:\n :param sqrt_price:\n :param tick:\n :param tick_lower:\n :param tick_upper:\n :param sqrt_price_lower:\n :param sqrt_price_upper:\n :return:\n \"\"\"\n if tick < tick_lower:\n delta_token1 = 0\n elif tick < tick_upper:\n delta_token1 = liquidity_delta * (sqrt_price - sqrt_price_lower)\n else:\n delta_token1 = liquidity_delta * (sqrt_price_upper - sqrt_price_lower)\n\n return delta_token1\n\n\ndef get_init_fee_growth_outside(init_tick, current_tick, fee_growth_global):\n \"\"\"\n TODO: update documentation\n\n :param fee_growth_global:\n :param init_tick:\n :param current_tick:\n :return:\n \"\"\"\n return fee_growth_global if current_tick >= init_tick else 0 # formula 6.21\n\n\ndef get_fee_growth_above(fee_growth_global, fee_growth_outside,\n current_tick, tick, at_max_tick=False):\n \"\"\"\n TODO: update documentation\n\n :param fee_growth_global:\n :param fee_growth_outside:\n :param current_tick:\n :param tick:\n :return:\n \"\"\"\n # If we are currently at the max tick, the fee growth above the tick\n # should be fee_growth_outside, not fee_growth_global - fee_growth_outside,\n # as the current tick is the lower bound of the price range whereas the\n # max tick is an upper bound on the position. In other words, position\n # ranges are given by:\n # [ tick_to_sqrt_price(lower_tick), tick_to_sqrt_price(upper_tick) ]\n # so the tick_upper doesn't have any area above it (as ticks are lower\n # bounds) whereas the lower tick does have area above it.\n # This is a deviation from the white paper to handle an edge case, so we\n # need to keep an eye on it.\n tick_condition = (current_tick > tick) if at_max_tick else (current_tick >= tick)\n # formula 6.17\n return (\n fee_growth_global - fee_growth_outside if tick_condition\n else fee_growth_outside\n )\n\n\ndef get_fee_growth_below(fee_growth_global, fee_growth_outside,\n current_tick, tick):\n \"\"\"\n TODO: update documentation\n\n :param fee_growth_global:\n :param fee_growth_outside:\n :param current_tick:\n :param tick:\n :return:\n \"\"\"\n # formula 6.18\n return (\n fee_growth_outside if current_tick >= tick\n else fee_growth_global - fee_growth_outside\n )\n\n\ndef get_fee_growth_inside(fee_growth_global,\n fee_growth_outside_lower, fee_growth_outside_upper,\n tick, tick_lower, tick_upper,\n at_max_tick=False):\n \"\"\"\n TODO: update documentation\n\n :param fee_growth_global:\n :param fee_growth_outside_lower:\n :param fee_growth_outside_upper:\n :param tick:\n :param tick_lower:\n :param tick_upper:\n :return:\n \"\"\"\n # formula 6.17\n fa_upper = get_fee_growth_above(fee_growth_global, fee_growth_outside_upper,\n tick, tick_upper, at_max_tick=at_max_tick)\n # formula 6.18\n fb_lower = get_fee_growth_below(fee_growth_global, fee_growth_outside_lower,\n tick, tick_lower)\n\n return fee_growth_global - fb_lower - fa_upper # formula 6.19\n\n\ndef get_uncollected_fees(liquidity, fee_growth_inside, fee_growth_inside_last):\n \"\"\"\n TODO: update documentation\n formula 6.28 (formulas are the same for each token)\n\n :param liquidity:\n :param fee_growth_inside:\n :param fee_growth_inside_last:\n :return:\n \"\"\"\n return liquidity * (fee_growth_inside - fee_growth_inside_last) # formula 6.28\n\n\ndef swap_within_tick(token, tokens_in, sqrt_price, liquidity, sqrt_price_limit):\n \"\"\"\n TODO: finish documentation\n See section 6.2.3 of the white paper\n\n :param token:\n :param tokens_in:\n :param sqrt_price:\n :param liquidity:\n :param sqrt_price_limit:\n :return:\n \"\"\"\n # Calculate the next_sqrt_price, limited by the next_tick_sqrt_price.\n # If next_sqrt_price is outside of next_tick_sqrt_price, then we only\n # execute part of the swap and cross the next tick to execute the\n # remaining amount.\n if token == 0:\n assert sqrt_price_limit <= sqrt_price, (\n 'Expected sqrt_price_limit <= sqrt_price'\n )\n # temporary delta_sqrt_price_inv to determine the actual next_sqrt_price\n delta_sqrt_price_inv = tokens_in / liquidity # formula 6.15\n next_sqrt_price_inv = (1 / sqrt_price) + delta_sqrt_price_inv\n next_sqrt_price = max(\n 1 / next_sqrt_price_inv,\n sqrt_price_limit\n )\n else:\n assert sqrt_price_limit >= sqrt_price, (\n 'Expected sqrt_price_limit => sqrt_price'\n )\n next_sqrt_price = min(\n sqrt_price + tokens_in / liquidity, # formula 6.13\n sqrt_price_limit\n )\n\n delta_sqrt_price = next_sqrt_price - sqrt_price\n delta_token1 = delta_sqrt_price * liquidity # formula 6.14\n\n delta_sqrt_price_inv = (1 / next_sqrt_price) - (1 / sqrt_price)\n delta_token0 = delta_sqrt_price_inv * liquidity # formula 6.16\n\n return delta_token0, delta_token1, next_sqrt_price\n" ]
[ [ "torch.nn.Sequential", "torch.sigmoid", "torch.cat", "numpy.clip", "torch.nn.utils.clip_grad_norm_", "torch.nn.Linear", "torch.no_grad", "numpy.random.SeedSequence", "numpy.mean", "torch.nn.ReLU", "numpy.array", "torch.as_tensor" ], [ "numpy.log", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Arif064001/multifocus_multiview_stereo_reconstruction
[ "0c1039a1a3dfa75c4904d5ca55fef5f59175b7d0", "0c1039a1a3dfa75c4904d5ca55fef5f59175b7d0" ]
[ "pyramid.py", "alignments.py" ]
[ "import numpy as np\nfrom scipy import ndimage\nimport cv2\nfrom timed import timed\n\n'''\nSource: https://github.com/sjawhar/focus-stacking.git\nReference: Chang and Wang (2011) A Multi-focus Image Fusion Method Based on Laplacian Pyramid\n'''\n\ndef generating_kernel(a):\n kernel = np.array([0.25 - a / 2.0, 0.25, a, 0.25, 0.25 - a / 2.0])\n return np.outer(kernel, kernel)\n\n\ndef reduce_layer(layer, kernel=generating_kernel(0.4)):\n if len(layer.shape) == 2:\n convolution = convolve(layer, kernel)\n return convolution[::2,::2]\n\n ch_layer = reduce_layer(layer[:,:,0])\n next_layer = np.zeros(list(ch_layer.shape) + [layer.shape[2]], dtype = ch_layer.dtype)\n next_layer[:, :, 0] = ch_layer\n\n for channel in range(1, layer.shape[2]):\n next_layer[:, :, channel] = reduce_layer(layer[:,:,channel])\n\n return next_layer\n\ndef expand_layer(layer, kernel=generating_kernel(0.4)):\n if len(layer.shape) == 2:\n expand = np.zeros((2 * layer.shape[0], 2 * layer.shape[1]), dtype=np.float64)\n expand[::2, ::2] = layer;\n convolution = convolve(expand, kernel)\n return 4.*convolution\n\n ch_layer = expand_layer(layer[:,:,0])\n next_layer = np.zeros(list(ch_layer.shape) + [layer.shape[2]], dtype = ch_layer.dtype)\n next_layer[:, :, 0] = ch_layer\n\n for channel in range(1, layer.shape[2]):\n next_layer[:, :, channel] = expand_layer(layer[:,:,channel])\n\n return next_layer\n\ndef convolve(image, kernel=generating_kernel(0.4)):\n return ndimage.convolve(image.astype(np.float64), kernel, mode='mirror')\n\ndef gaussian_pyramid(images, levels):\n pyramid = [images.astype(np.float64)]\n num_images = images.shape[0]\n\n while levels > 0:\n next_layer = reduce_layer(pyramid[-1][0])\n next_layer_size = [num_images] + list(next_layer.shape)\n pyramid.append(np.zeros(next_layer_size, dtype=next_layer.dtype))\n pyramid[-1][0] = next_layer\n for layer in range(1, images.shape[0]):\n pyramid[-1][layer] = reduce_layer(pyramid[-2][layer])\n levels = levels - 1\n\n return pyramid\n\ndef laplacian_pyramid(images, levels):\n gaussian = gaussian_pyramid(images, levels)\n\n pyramid = [gaussian[-1]]\n for level in range(len(gaussian) - 1, 0, -1):\n gauss = gaussian[level - 1]\n pyramid.append(np.zeros(gauss.shape, dtype=gauss.dtype))\n for layer in range(images.shape[0]):\n gauss_layer = gauss[layer]\n expanded = expand_layer(gaussian[level][layer])\n if expanded.shape != gauss_layer.shape:\n expanded = expanded[:gauss_layer.shape[0],:gauss_layer.shape[1]]\n pyramid[-1][layer] = gauss_layer - expanded\n\n return pyramid[::-1]\n\ndef collapse(pyramid):\n image = pyramid[-1]\n for layer in pyramid[-2::-1]:\n expanded = expand_layer(image)\n if expanded.shape != layer.shape:\n expanded = expanded[:layer.shape[0],:layer.shape[1]]\n image = expanded + layer\n\n return image\n\ndef get_probabilities(gray_image):\n levels, counts = np.unique(gray_image.astype(np.uint8), return_counts = True)\n probabilities = np.zeros((256,), dtype=np.float64)\n probabilities[levels] = counts.astype(np.float64) / counts.sum()\n return probabilities\n\ndef entropy(image, kernel_size):\n def _area_entropy(area, probabilities):\n levels = area.flatten()\n return -1. * (levels * np.log(probabilities[levels])).sum()\n\n probabilities = get_probabilities(image)\n pad_amount = int((kernel_size - 1) / 2)\n padded_image = cv2.copyMakeBorder(image,pad_amount,pad_amount,pad_amount,pad_amount,cv2.BORDER_REFLECT101)\n entropies = np.zeros(image.shape[:2], dtype=np.float64)\n offset = np.arange(-pad_amount, pad_amount + 1)\n for row in range(entropies.shape[0]):\n for column in range(entropies.shape[1]):\n area = padded_image[row + pad_amount + offset[:, np.newaxis], column + pad_amount + offset]\n entropies[row, column] = _area_entropy(area, probabilities)\n\n return entropies\n\n\ndef deviation(image, kernel_size):\n def _area_deviation(area):\n average = np.average(area).astype(np.float64)\n return np.square(area - average).sum() / area.size\n\n pad_amount = int((kernel_size - 1) / 2)\n padded_image = cv2.copyMakeBorder(image,pad_amount,pad_amount,pad_amount,pad_amount,cv2.BORDER_REFLECT101)\n deviations = np.zeros(image.shape[:2], dtype=np.float64)\n offset = np.arange(-pad_amount, pad_amount + 1)\n for row in range(deviations.shape[0]):\n for column in range(deviations.shape[1]):\n area = padded_image[row + pad_amount + offset[:, np.newaxis], column + pad_amount + offset]\n deviations[row, column] = _area_deviation(area)\n\n return deviations\n\ndef get_fused_base(images, kernel_size):\n layers = images.shape[0]\n entropies = np.zeros(images.shape[:3], dtype=np.float64)\n deviations = np.copy(entropies)\n for layer in range(layers):\n gray_image = cv2.cvtColor(images[layer].astype(np.float32), cv2.COLOR_BGR2GRAY).astype(np.uint8)\n# probabilities = get_probabilities(gray_image)\n entropies[layer] = entropy(gray_image, kernel_size)\n deviations[layer] = deviation(gray_image, kernel_size)\n\n best_e = np.argmax(entropies, axis = 0)\n best_d = np.argmax(deviations, axis = 0)\n fused = np.zeros(images.shape[1:], dtype=np.float64)\n\n for layer in range(layers):\n fused += np.where(best_e[:,:,np.newaxis] == layer, images[layer], 0)\n fused += np.where(best_d[:,:,np.newaxis] == layer, images[layer], 0)\n\n return (fused / 2).astype(images.dtype)\n\ndef fuse_pyramids(pyramids, kernel_size):\n fused = [get_fused_base(pyramids[-1], kernel_size)]\n for layer in range(len(pyramids) - 2, -1, -1):\n fused.append(get_fused_laplacian(pyramids[layer]))\n\n return fused[::-1]\n\ndef get_fused_laplacian(laplacians):\n layers = laplacians.shape[0]\n region_energies = np.zeros(laplacians.shape[:3], dtype=np.float64)\n\n for layer in range(layers):\n gray_lap = cv2.cvtColor(laplacians[layer].astype(np.float32), cv2.COLOR_BGR2GRAY)\n region_energies[layer] = region_energy(gray_lap)\n\n best_re = np.argmax(region_energies, axis = 0)\n fused = np.zeros(laplacians.shape[1:], dtype=laplacians.dtype)\n\n for layer in range(layers):\n fused += np.where(best_re[:,:,np.newaxis] == layer, laplacians[layer], 0)\n\n return fused\n\ndef region_energy(laplacian):\n return convolve(np.square(laplacian))\n\n@timed\ndef get_pyramid_fusion(images, min_size = 32):\n smallest_side = min(images[0].shape[:2])\n depth = int(np.log2(smallest_side / min_size))\n kernel_size = 5\n\n pyramids = laplacian_pyramid(images, depth)\n fusion = fuse_pyramids(pyramids, kernel_size)\n\n return collapse(fusion).clip(0, 255).astype(np.uint8)\n\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 28 18:44:12 2019\n\n@author: chuong nguyen <[email protected]>\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom timed import timed\n\n\n@timed\ndef align_images(image_BGRs, algorithm='ECC', homographies=[]):\n '''Compute homography if missing and align image by warping.\n\n INPUT:\n - BGRs: list of BGR images\n - algorithm='ECC': ('ECC', 'ORB', 'SIFT', 'SURF')\n - homographies=[]: list of known homographies if available to speed up\n - base='MIDDLE': ('MIDDLE', 'START', 'END') position of reference image\n\n OUTPUT:\n - aligned_images: aligned images\n - holographies: list of homographies\n '''\n \n \n grays = [cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in image_BGRs]\n# grays = [cv2.blur(gray, (5, 5)) for gray in grays] # blur to smooth noise\n if len(homographies) == 0:\n if algorithm.upper() == 'ECC':\n homographies = get_homography_ECC(grays)\n elif algorithm.upper() == 'ECC_PYRAMID':\n homographies = get_homography_ECC_pyramid(grays)\n elif algorithm.upper() in ['ORB', 'SIFT', 'SURF']:\n homographies = get_homography_feature(grays, algorithm)\n else: # HYBRID\n homographies = get_homography_hybrid(grays)\n \n warped_images = warp_image_homography(image_BGRs, homographies)\n return warped_images, homographies\n\n\n@timed\ndef get_homography_ECC(grays, iterations=1000, epsilon=1e-6):\n '''Computer a list of homography from list of images\n\n INPUT:\n - grays: list of gray images\n - iterations=5000: number of iterations for ECC algorithm\n - epsilon=1e-10: threshold for ECC algorithm\n\n OUTPUT:\n - homographies: list of homographies\n '''\n base_index = len(grays)//2\n base_gray = grays[base_index]\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, iterations,\n epsilon)\n homographies = [cv2.findTransformECC(gray, base_gray,\n np.eye(3, dtype=np.float32),\n cv2.MOTION_HOMOGRAPHY, criteria)[1]\n for gray in grays]\n return homographies\n\n\n@timed\ndef get_homography_ECC_pyramid(grays, iterations=1000, epsilon=1e-6):\n '''Computer a list of homography from list of images\n\n INPUT:\n - grays: list of gray images\n - iterations=5000: number of iterations for ECC algorithm\n - epsilon=1e-10: threshold for ECC algorithm\n\n OUTPUT:\n - homographies: list of homographies\n '''\n base_index = len(grays)//2\n no_levels = 4\n pyramid = []\n for l in range(no_levels):\n pyramid.append([cv2.resize(gray, (0, 0), fx=0.5**l, fy=0.5**l)\n for gray in grays])\n\n scale_matrix = np.array([[1, 1, 2], [1, 1, 2], [1, 1, 1]],\n dtype=np.float32)\n for l in range(no_levels - 1, -1, -1):\n grays_p = pyramid[l]\n print(l)\n if l == (no_levels - 1):\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,\n iterations, epsilon)\n homographies = [cv2.findTransformECC(gray_p, grays_p[base_index],\n np.eye(3, dtype=np.float32),\n cv2.MOTION_HOMOGRAPHY,\n criteria)[1]\n for gray_p in grays_p]\n else:\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,\n iterations, epsilon)\n homographies = [cv2.findTransformECC(gray_p, grays_p[base_index],\n H_p*scale_matrix,\n cv2.MOTION_HOMOGRAPHY,\n criteria)[1]\n for (gray_p, H_p) in zip(grays_p, homographies)]\n\n return homographies\n\n\n@timed\ndef get_homography_hybrid(grays, iterations=1000, epsilon=1e-6):\n '''Computer a list of homography from list of images\n\n INPUT:\n - grays: list of gray images\n - iterations=5000: number of iterations for ECC algorithm\n - epsilon=1e-10: threshold for ECC algorithm\n\n OUTPUT:\n - homographies: list of homographies\n '''\n homographies_init = get_homography_feature(grays, 'ORB')\n base_index = len(grays)//2\n base_gray = grays[base_index]\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, iterations,\n epsilon)\n homographies = [cv2.findTransformECC(gray, base_gray, H,\n cv2.MOTION_HOMOGRAPHY, criteria)[1]\n for gray, H in zip(grays, homographies_init)]\n return homographies\n\n\n@timed\ndef get_homography_feature(grays, feature, max_pnts=128):\n '''Compute homography using feature matching.\n Only support ORB and SIFT features for now.\n Not working properly.\n '''\n base_index = len(grays)//2\n base_gray = grays[base_index]\n if feature.upper() == 'ORB':\n detector = cv2.ORB_create(max_pnts) # tested with OpenCV 3.2.0\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n elif feature.upper() == 'SIFT':\n detector = cv2.SIFT_create(max_pnts) # tested with OpenCV 3.2.0\n matcher = cv2.BFMatcher()\n\n base_kpnt, base_desc = detector.detectAndCompute(base_gray, None)\n homographies = []\n for i, gray in enumerate(grays):\n if i == base_index:\n H = np.eye(3, dtype=np.float32)\n else:\n curr_kpnt, curr_desc = detector.detectAndCompute(base_gray, None)\n matches = matcher.match(curr_desc, base_desc)\n matches = sorted(matches, key=lambda x: x.distance)\n# matches = matches[:max_pnts]\n src_pts = np.zeros([len(matches), 1, 2], dtype=np.float32)\n dst_pts = np.zeros([len(matches), 1, 2], dtype=np.float32)\n for i in range(len(matches)):\n src_pts[i] = curr_kpnt[matches[i].queryIdx].pt\n dst_pts[i] = base_kpnt[matches[i].trainIdx].pt\n H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,\n ransacReprojThreshold=2.0)\n homographies.append(H.astype(np.float32))\n return homographies\n'''\ndef get_homography_feature(grays, feature, max_pnts=128):\n #Compute homography using feature matching.\n #Only support ORB feature for now.\n #Not working properly.\n #\n base_index = len(grays)//2\n base_gray = grays[base_index]\n if feature.upper() == 'ORB':\n detector = cv2.ORB_create(max_pnts) # tested with OpenCV 3.2.0\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n base_kpnt, base_desc = detector.detectAndCompute(base_gray, None)\n homographies = []\n for i, gray in enumerate(grays):\n if i == base_index:\n H = np.eye(3, dtype=np.float32)\n else:\n curr_kpnt, curr_desc = detector.detectAndCompute(base_gray, None)\n matches = matcher.match(curr_desc, base_desc)\n matches = sorted(matches, key=lambda x: x.distance)\n# matches = matches[:max_pnts]\n src_pts = np.zeros([len(matches), 1, 2], dtype=np.float32)\n dst_pts = np.zeros([len(matches), 1, 2], dtype=np.float32)\n for i in range(len(matches)):\n src_pts[i] = curr_kpnt[matches[i].queryIdx].pt\n dst_pts[i] = base_kpnt[matches[i].trainIdx].pt\n H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,\n ransacReprojThreshold=2.0)\n homographies.append(H.astype(np.float32))\n return homographies\n'''\n\n@timed\ndef warp_image_homography(image_BGRs, homographies):\n '''Warp images to align images from given homographies\n '''\n warped_BGRs = [cv2.warpPerspective(\n BGR, H, BGR.shape[1::-1],\n flags=cv2.INTER_LINEAR,borderValue=(0,0,0))\n for BGR, H in zip(image_BGRs, homographies)]\n return warped_BGRs\n" ]
[ [ "numpy.square", "numpy.log", "numpy.log2", "numpy.arange", "numpy.copy", "numpy.argmax", "numpy.average", "numpy.outer", "numpy.array", "numpy.zeros", "numpy.where" ], [ "numpy.eye", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jkalloor3/bqskit
[ "ad34a6eae3c0e62d2bd960cd4cd841ba8e845811" ]
[ "tests/ir/opt/instantiaters/test_qfactor.py" ]
[ "\"\"\"This test module verifies the QFactor instantiater.\"\"\"\nfrom __future__ import annotations\n\nimport numpy as np\nfrom scipy.stats import unitary_group\n\nfrom bqskit.ir.circuit import Circuit\nfrom bqskit.ir.gates.parameterized import RXGate\nfrom bqskit.ir.gates.parameterized.unitary import VariableUnitaryGate\nfrom bqskit.ir.opt.instantiaters.qfactor import QFactor\n\n\nclass TestQFactorEndToEnd:\n\n def test_no_change(self) -> None:\n u1 = unitary_group.rvs(8)\n g1 = VariableUnitaryGate(3)\n circuit = Circuit(3)\n circuit.append_gate(g1, [0, 1, 2])\n utry_before = circuit.get_unitary()\n # The following call should not make any changes in circuit\n QFactor().instantiate(circuit, u1, circuit.params)\n utry_after = circuit.get_unitary()\n\n assert np.allclose(\n utry_before,\n utry_after,\n )\n\n def test_1_gate(self) -> None:\n u1 = unitary_group.rvs(8)\n g1 = VariableUnitaryGate(3)\n circuit = Circuit(3)\n circuit.append_gate(g1, [0, 1, 2])\n params = QFactor().instantiate(circuit, u1, circuit.params)\n circuit.set_params(params)\n\n g1_params = list(np.reshape(u1, (64,)))\n g1_params = list(np.real(g1_params)) + list(np.imag(g1_params))\n\n assert np.allclose(\n circuit.get_unitary(),\n g1.get_unitary(g1_params),\n )\n\n def test_2_gate(self) -> None:\n g1 = VariableUnitaryGate(2)\n g2 = VariableUnitaryGate(3)\n g3 = RXGate()\n circuit = Circuit(4)\n circuit.append_gate(g1, [0, 1])\n circuit.append_gate(g2, [1, 2, 3])\n circuit.append_gate(g3, [1])\n utry = circuit.get_unitary(np.random.random(circuit.num_params))\n params = QFactor().instantiate(circuit, utry, circuit.params)\n\n circuit.set_params(params)\n\n assert np.allclose(\n circuit.get_unitary(),\n utry,\n )\n" ]
[ [ "numpy.imag", "numpy.random.random", "numpy.allclose", "numpy.reshape", "numpy.real", "scipy.stats.unitary_group.rvs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ythlml/mindspore
[ "028ae212624164044cfaa84f347fc502cb7fcb0f", "028ae212624164044cfaa84f347fc502cb7fcb0f", "028ae212624164044cfaa84f347fc502cb7fcb0f", "028ae212624164044cfaa84f347fc502cb7fcb0f", "028ae212624164044cfaa84f347fc502cb7fcb0f", "028ae212624164044cfaa84f347fc502cb7fcb0f" ]
[ "tests/st/ops/gpu/test_batchnorm_fold_op.py", "tests/st/ops/ascend/test_tbe_ops/test_addn.py", "tests/ut/python/dataset/test_minddataset.py", "tests/st/ops/gpu/test_flatten_op.py", "tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py", "tests/ut/python/dataset/test_skip.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common.api import ms_function\nfrom mindspore.ops import operations as P\n\ncontext.set_context(device_target='GPU')\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.op = P.BatchNormFold(freeze_bn=10)\n\n @ms_function\n def construct(self, x, mean, variance, current_step):\n a, b, c, d = self.op(x, mean, variance, current_step)\n return a, b, c, d\n\n\ndef np_result(x, mean, var, momentum, epsilon):\n np_mean = x.mean(axis=(0, 2, 3))\n np_var = x.var(axis=(0, 2, 3))\n n = x.shape[0] * x.shape[2] * x.shape[3]\n mean_update = momentum * np_mean + (1 - momentum) * mean\n var_update = momentum * np_var * n / (n - 1) + (1 - momentum) * var\n np_var = np.sqrt(np_var + epsilon)\n delay_mean = mean.copy()\n delay_std = np.sqrt(var + epsilon)\n return np_mean, np_var, mean_update, var_update, delay_mean, delay_std\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_batchnorm_fold():\n net = Net()\n c = 64\n x = np.random.uniform(1, 10, size=[3, c, 32, 32]).astype('float32')\n mean = np.random.uniform(1, 10, size=[c]).astype('float32')\n variance = np.random.uniform(1, 10, size=[c]).astype('float32')\n current_step = np.array([0]).astype('int32')\n ms_mean = Tensor(mean)\n ms_var = Tensor(variance)\n batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var,\n Tensor(current_step))\n\n expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)\n assert np.allclose(batch_mean.asnumpy(), expect1, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(batch_var.asnumpy(), expect2, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(ms_mean.asnumpy(), expect3, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(ms_var.asnumpy(), expect4, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(delay_mean.asnumpy(), expect5, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(delay_std.asnumpy(), expect6, rtol=1.e-7, atol=1.e-5)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_batchnorm_fold2():\n net = Net()\n c = 64\n x = np.random.uniform(1, 10, size=[3, c, 512, 512]).astype('float32')\n mean = np.random.uniform(1, 10, size=[c]).astype('float32')\n variance = np.random.uniform(1, 10, size=[c]).astype('float32')\n current_step = np.array([0]).astype('int32')\n ms_mean = Tensor(mean)\n ms_var = Tensor(variance)\n batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var,\n Tensor(current_step))\n expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)\n assert np.allclose(batch_mean.asnumpy(), expect1, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(batch_var.asnumpy(), expect2, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(ms_mean.asnumpy(), expect3, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(delay_mean.asnumpy(), expect5, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(delay_std.asnumpy(), expect6, rtol=1.e-7, atol=1.e-5)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_batchnorm_fold_freeze():\n net = Net()\n c = 64\n x = np.random.uniform(1, 10, size=[3, c, 32, 32]).astype('float32')\n mean = np.random.uniform(1, 10, size=[c]).astype('float32')\n variance = np.random.uniform(1, 10, size=[c]).astype('float32')\n current_step = np.array([10]).astype('int32')\n ms_mean = Tensor(mean)\n ms_var = Tensor(variance)\n batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var,\n Tensor(current_step))\n expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)\n assert np.allclose(batch_mean.asnumpy(), np.zeros_like(mean), rtol=1.e-7, atol=1.e-5)\n assert np.allclose(batch_var.asnumpy(), np.ones_like(mean), rtol=1.e-7, atol=1.e-5)\n assert np.allclose(ms_mean.asnumpy(), mean, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(ms_var.asnumpy(), variance, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(delay_mean.asnumpy(), expect5, rtol=1.e-7, atol=1.e-5)\n assert np.allclose(delay_std.asnumpy(), expect6, rtol=1.e-7, atol=1.e-5)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common.api import ms_function\nfrom mindspore.common.initializer import initializer\nfrom mindspore.common.parameter import Parameter\nfrom mindspore.ops import operations as P\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.add = P.AddN()\n\n def construct(self, x, y):\n return self.add((x, y))\n\n\ndef test_net():\n x = np.random.randn(1, 3, 3, 4).astype(np.float32)\n y = np.random.randn(1, 3, 3, 4).astype(np.float32)\n add = Net()\n output = add(Tensor(x), Tensor(y))\n print(x)\n print(y)\n print(output.asnumpy())\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nThis is the test module for mindrecord\n\"\"\"\nimport collections\nimport json\nimport numpy as np\nimport os\nimport pytest\nimport re\nimport string\n\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.vision.c_transforms as vision\nfrom mindspore import log as logger\nfrom mindspore.dataset.transforms.vision import Inter\nfrom mindspore.mindrecord import FileWriter\n\nFILES_NUM = 4\nCV_FILE_NAME = \"../data/mindrecord/imagenet.mindrecord\"\nCV1_FILE_NAME = \"../data/mindrecord/imagenet1.mindrecord\"\nCV2_FILE_NAME = \"../data/mindrecord/imagenet2.mindrecord\"\nCV_DIR_NAME = \"../data/mindrecord/testImageNetData\"\nNLP_FILE_NAME = \"../data/mindrecord/aclImdb.mindrecord\"\nOLD_NLP_FILE_NAME = \"../data/mindrecord/testOldVersion/aclImdb.mindrecord\"\nNLP_FILE_POS = \"../data/mindrecord/testAclImdbData/pos\"\nNLP_FILE_VOCAB = \"../data/mindrecord/testAclImdbData/vocab.txt\"\n\n\[email protected]\ndef add_and_remove_cv_file():\n \"\"\"add/remove cv file\"\"\"\n paths = [\"{}{}\".format(CV_FILE_NAME, str(x).rjust(1, '0'))\n for x in range(FILES_NUM)]\n for x in paths:\n os.remove(\"{}\".format(x)) if os.path.exists(\"{}\".format(x)) else None\n os.remove(\"{}.db\".format(x)) if os.path.exists(\n \"{}.db\".format(x)) else None\n writer = FileWriter(CV_FILE_NAME, FILES_NUM)\n data = get_data(CV_DIR_NAME)\n cv_schema_json = {\"id\": {\"type\": \"int32\"},\n \"file_name\": {\"type\": \"string\"},\n \"label\": {\"type\": \"int32\"},\n \"data\": {\"type\": \"bytes\"}}\n writer.add_schema(cv_schema_json, \"img_schema\")\n writer.add_index([\"file_name\", \"label\"])\n writer.write_raw_data(data)\n writer.commit()\n yield \"yield_cv_data\"\n for x in paths:\n os.remove(\"{}\".format(x))\n os.remove(\"{}.db\".format(x))\n\n\[email protected]\ndef add_and_remove_nlp_file():\n \"\"\"add/remove nlp file\"\"\"\n paths = [\"{}{}\".format(NLP_FILE_NAME, str(x).rjust(1, '0'))\n for x in range(FILES_NUM)]\n for x in paths:\n if os.path.exists(\"{}\".format(x)):\n os.remove(\"{}\".format(x))\n if os.path.exists(\"{}.db\".format(x)):\n os.remove(\"{}.db\".format(x))\n writer = FileWriter(NLP_FILE_NAME, FILES_NUM)\n data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)]\n nlp_schema_json = {\"id\": {\"type\": \"string\"}, \"label\": {\"type\": \"int32\"},\n \"rating\": {\"type\": \"float32\"},\n \"input_ids\": {\"type\": \"int64\",\n \"shape\": [-1]},\n \"input_mask\": {\"type\": \"int64\",\n \"shape\": [1, -1]},\n \"segment_ids\": {\"type\": \"int64\",\n \"shape\": [2, -1]}\n }\n writer.set_header_size(1 << 14)\n writer.set_page_size(1 << 15)\n writer.add_schema(nlp_schema_json, \"nlp_schema\")\n writer.add_index([\"id\", \"rating\"])\n writer.write_raw_data(data)\n writer.commit()\n yield \"yield_nlp_data\"\n for x in paths:\n os.remove(\"{}\".format(x))\n os.remove(\"{}.db\".format(x))\n\n\[email protected]\ndef add_and_remove_nlp_compress_file():\n \"\"\"add/remove nlp file\"\"\"\n paths = [\"{}{}\".format(NLP_FILE_NAME, str(x).rjust(1, '0'))\n for x in range(FILES_NUM)]\n for x in paths:\n if os.path.exists(\"{}\".format(x)):\n os.remove(\"{}\".format(x))\n if os.path.exists(\"{}.db\".format(x)):\n os.remove(\"{}.db\".format(x))\n writer = FileWriter(NLP_FILE_NAME, FILES_NUM)\n data = []\n for row_id in range(16):\n data.append({\n \"label\": row_id,\n \"array_a\": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129,\n 255, 256, -32768, 32767, -32769, 32768, -2147483648,\n 2147483647], dtype=np.int32), [-1]),\n \"array_b\": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129, 255,\n 256, -32768, 32767, -32769, 32768, -2147483648, 2147483647, -2147483649, 2147483649, -922337036854775808, 9223372036854775807]), [1, -1]),\n \"array_c\": str.encode(\"nlp data\"),\n \"array_d\": np.reshape(np.array([[-10, -127], [10, 127]]), [2, -1])\n })\n nlp_schema_json = {\"label\": {\"type\": \"int32\"},\n \"array_a\": {\"type\": \"int32\",\n \"shape\": [-1]},\n \"array_b\": {\"type\": \"int64\",\n \"shape\": [1, -1]},\n \"array_c\": {\"type\": \"bytes\"},\n \"array_d\": {\"type\": \"int64\",\n \"shape\": [2, -1]}\n }\n writer.set_header_size(1 << 14)\n writer.set_page_size(1 << 15)\n writer.add_schema(nlp_schema_json, \"nlp_schema\")\n writer.write_raw_data(data)\n writer.commit()\n yield \"yield_nlp_data\"\n for x in paths:\n os.remove(\"{}\".format(x))\n os.remove(\"{}.db\".format(x))\n\n\ndef test_nlp_compress_data(add_and_remove_nlp_compress_file):\n \"\"\"tutorial for nlp minderdataset.\"\"\"\n data = []\n for row_id in range(16):\n data.append({\n \"label\": row_id,\n \"array_a\": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129,\n 255, 256, -32768, 32767, -32769, 32768, -2147483648,\n 2147483647], dtype=np.int32), [-1]),\n \"array_b\": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129, 255,\n 256, -32768, 32767, -32769, 32768, -2147483648, 2147483647, -2147483649, 2147483649, -922337036854775808, 9223372036854775807]), [1, -1]),\n \"array_c\": str.encode(\"nlp data\"),\n \"array_d\": np.reshape(np.array([[-10, -127], [10, 127]]), [2, -1])\n })\n num_readers = 1\n data_set = ds.MindDataset(\n NLP_FILE_NAME + \"0\", None, num_readers, shuffle=False)\n assert data_set.get_dataset_size() == 16\n num_iter = 0\n for x, item in zip(data, data_set.create_dict_iterator()):\n assert (item[\"array_a\"] == x[\"array_a\"]).all()\n assert (item[\"array_b\"] == x[\"array_b\"]).all()\n assert item[\"array_c\"].tobytes() == x[\"array_c\"]\n assert (item[\"array_d\"] == x[\"array_d\"]).all()\n assert item[\"label\"] == x[\"label\"]\n num_iter += 1\n assert num_iter == 16\n\n\ndef test_nlp_compress_data_old_version(add_and_remove_nlp_compress_file):\n \"\"\"tutorial for nlp minderdataset.\"\"\"\n num_readers = 1\n data_set = ds.MindDataset(\n NLP_FILE_NAME + \"0\", None, num_readers, shuffle=False)\n old_data_set = ds.MindDataset(\n OLD_NLP_FILE_NAME + \"0\", None, num_readers, shuffle=False)\n assert old_data_set.get_dataset_size() == 16\n num_iter = 0\n for x, item in zip(old_data_set.create_dict_iterator(), data_set.create_dict_iterator()):\n assert (item[\"array_a\"] == x[\"array_a\"]).all()\n assert (item[\"array_b\"] == x[\"array_b\"]).all()\n assert (item[\"array_c\"] == x[\"array_c\"]).all()\n assert (item[\"array_d\"] == x[\"array_d\"]).all()\n assert item[\"label\"] == x[\"label\"]\n num_iter += 1\n assert num_iter == 16\n\n\ndef test_cv_minddataset_writer_tutorial():\n \"\"\"tutorial for cv dataset writer.\"\"\"\n paths = [\"{}{}\".format(CV_FILE_NAME, str(x).rjust(1, '0'))\n for x in range(FILES_NUM)]\n for x in paths:\n os.remove(\"{}\".format(x)) if os.path.exists(\"{}\".format(x)) else None\n os.remove(\"{}.db\".format(x)) if os.path.exists(\n \"{}.db\".format(x)) else None\n writer = FileWriter(CV_FILE_NAME, FILES_NUM)\n data = get_data(CV_DIR_NAME)\n cv_schema_json = {\"file_name\": {\"type\": \"string\"}, \"label\": {\"type\": \"int32\"},\n \"data\": {\"type\": \"bytes\"}}\n writer.add_schema(cv_schema_json, \"img_schema\")\n writer.add_index([\"file_name\", \"label\"])\n writer.write_raw_data(data)\n writer.commit()\n for x in paths:\n os.remove(\"{}\".format(x))\n os.remove(\"{}.db\".format(x))\n\n\ndef test_cv_minddataset_partition_tutorial(add_and_remove_cv_file):\n \"\"\"tutorial for cv minddataset.\"\"\"\n columns_list = [\"data\", \"file_name\", \"label\"]\n num_readers = 4\n\n def partitions(num_shards):\n for partition_id in range(num_shards):\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers,\n num_shards=num_shards, shard_id=partition_id)\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- partition : {} ------------------------\".format(partition_id))\n logger.info(\n \"-------------- item[label]: {} -----------------------\".format(item[\"label\"]))\n num_iter += 1\n return num_iter\n\n assert partitions(4) == 3\n assert partitions(5) == 2\n assert partitions(9) == 2\n\n\ndef test_cv_minddataset_dataset_size(add_and_remove_cv_file):\n \"\"\"tutorial for cv minddataset.\"\"\"\n columns_list = [\"data\", \"file_name\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers)\n assert data_set.get_dataset_size() == 10\n repeat_num = 2\n data_set = data_set.repeat(repeat_num)\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- get dataset size {} -----------------\".format(num_iter))\n logger.info(\n \"-------------- item[label]: {} ---------------------\".format(item[\"label\"]))\n logger.info(\n \"-------------- item[data]: {} ----------------------\".format(item[\"data\"]))\n num_iter += 1\n assert num_iter == 20\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers,\n num_shards=4, shard_id=3)\n assert data_set.get_dataset_size() == 3\n\n\ndef test_cv_minddataset_repeat_reshuffle(add_and_remove_cv_file):\n \"\"\"tutorial for cv minddataset.\"\"\"\n columns_list = [\"data\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers)\n decode_op = vision.Decode()\n data_set = data_set.map(\n input_columns=[\"data\"], operations=decode_op, num_parallel_workers=2)\n resize_op = vision.Resize((32, 32), interpolation=Inter.LINEAR)\n data_set = data_set.map(input_columns=\"data\",\n operations=resize_op, num_parallel_workers=2)\n data_set = data_set.batch(2)\n data_set = data_set.repeat(2)\n num_iter = 0\n labels = []\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- get dataset size {} -----------------\".format(num_iter))\n logger.info(\n \"-------------- item[label]: {} ---------------------\".format(item[\"label\"]))\n logger.info(\n \"-------------- item[data]: {} ----------------------\".format(item[\"data\"]))\n num_iter += 1\n labels.append(item[\"label\"])\n assert num_iter == 10\n logger.info(\"repeat shuffle: {}\".format(labels))\n assert len(labels) == 10\n assert labels[0:5] == labels[0:5]\n assert labels[0:5] != labels[5:5]\n\n\ndef test_cv_minddataset_batch_size_larger_than_records(add_and_remove_cv_file):\n \"\"\"tutorial for cv minddataset.\"\"\"\n columns_list = [\"data\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers)\n decode_op = vision.Decode()\n data_set = data_set.map(\n input_columns=[\"data\"], operations=decode_op, num_parallel_workers=2)\n resize_op = vision.Resize((32, 32), interpolation=Inter.LINEAR)\n data_set = data_set.map(input_columns=\"data\",\n operations=resize_op, num_parallel_workers=2)\n data_set = data_set.batch(32, drop_remainder=True)\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- get dataset size {} -----------------\".format(num_iter))\n logger.info(\n \"-------------- item[label]: {} ---------------------\".format(item[\"label\"]))\n logger.info(\n \"-------------- item[data]: {} ----------------------\".format(item[\"data\"]))\n num_iter += 1\n assert num_iter == 0\n\n\ndef test_cv_minddataset_issue_888(add_and_remove_cv_file):\n \"\"\"issue 888 test.\"\"\"\n columns_list = [\"data\", \"label\"]\n num_readers = 2\n data = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list,\n num_readers, shuffle=False, num_shards=5, shard_id=1)\n data = data.shuffle(2)\n data = data.repeat(9)\n num_iter = 0\n for item in data.create_dict_iterator():\n num_iter += 1\n assert num_iter == 18\n\n\ndef test_cv_minddataset_blockreader_tutorial(add_and_remove_cv_file):\n \"\"\"tutorial for cv minddataset.\"\"\"\n columns_list = [\"data\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers,\n block_reader=True)\n assert data_set.get_dataset_size() == 10\n repeat_num = 2\n data_set = data_set.repeat(repeat_num)\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- block reader repeat tow {} -----------------\".format(num_iter))\n logger.info(\n \"-------------- item[label]: {} ----------------------------\".format(item[\"label\"]))\n logger.info(\n \"-------------- item[data]: {} -----------------------------\".format(item[\"data\"]))\n num_iter += 1\n assert num_iter == 20\n\n\ndef test_cv_minddataset_blockreader_some_field_not_in_index_tutorial(add_and_remove_cv_file):\n \"\"\"tutorial for cv minddataset.\"\"\"\n columns_list = [\"id\", \"data\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers, shuffle=False,\n block_reader=True)\n assert data_set.get_dataset_size() == 10\n repeat_num = 2\n data_set = data_set.repeat(repeat_num)\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- block reader repeat tow {} -----------------\".format(num_iter))\n logger.info(\n \"-------------- item[id]: {} ----------------------------\".format(item[\"id\"]))\n logger.info(\n \"-------------- item[label]: {} ----------------------------\".format(item[\"label\"]))\n logger.info(\n \"-------------- item[data]: {} -----------------------------\".format(item[\"data\"]))\n num_iter += 1\n assert num_iter == 20\n\n\ndef test_cv_minddataset_reader_file_list(add_and_remove_cv_file):\n \"\"\"tutorial for cv minderdataset.\"\"\"\n columns_list = [\"data\", \"file_name\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset([CV_FILE_NAME + str(x)\n for x in range(FILES_NUM)], columns_list, num_readers)\n assert data_set.get_dataset_size() == 10\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- cv reader basic: {} ------------------------\".format(num_iter))\n logger.info(\n \"-------------- len(item[data]): {} ------------------------\".format(len(item[\"data\"])))\n logger.info(\n \"-------------- item[data]: {} -----------------------------\".format(item[\"data\"]))\n logger.info(\n \"-------------- item[file_name]: {} ------------------------\".format(item[\"file_name\"]))\n logger.info(\n \"-------------- item[label]: {} ----------------------------\".format(item[\"label\"]))\n num_iter += 1\n assert num_iter == 10\n\n\ndef test_cv_minddataset_reader_one_partition(add_and_remove_cv_file):\n \"\"\"tutorial for cv minderdataset.\"\"\"\n columns_list = [\"data\", \"file_name\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset([CV_FILE_NAME + \"0\"], columns_list, num_readers)\n assert data_set.get_dataset_size() < 10\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- cv reader basic: {} ------------------------\".format(num_iter))\n logger.info(\n \"-------------- len(item[data]): {} ------------------------\".format(len(item[\"data\"])))\n logger.info(\n \"-------------- item[data]: {} -----------------------------\".format(item[\"data\"]))\n logger.info(\n \"-------------- item[file_name]: {} ------------------------\".format(item[\"file_name\"]))\n logger.info(\n \"-------------- item[label]: {} ----------------------------\".format(item[\"label\"]))\n num_iter += 1\n assert num_iter < 10\n\n\ndef test_cv_minddataset_reader_two_dataset(add_and_remove_cv_file):\n \"\"\"tutorial for cv minderdataset.\"\"\"\n if os.path.exists(CV1_FILE_NAME):\n os.remove(CV1_FILE_NAME)\n if os.path.exists(\"{}.db\".format(CV1_FILE_NAME)):\n os.remove(\"{}.db\".format(CV1_FILE_NAME))\n if os.path.exists(CV2_FILE_NAME):\n os.remove(CV2_FILE_NAME)\n if os.path.exists(\"{}.db\".format(CV2_FILE_NAME)):\n os.remove(\"{}.db\".format(CV2_FILE_NAME))\n writer = FileWriter(CV1_FILE_NAME, 1)\n data = get_data(CV_DIR_NAME)\n cv_schema_json = {\"id\": {\"type\": \"int32\"},\n \"file_name\": {\"type\": \"string\"},\n \"label\": {\"type\": \"int32\"},\n \"data\": {\"type\": \"bytes\"}}\n writer.add_schema(cv_schema_json, \"CV1_schema\")\n writer.add_index([\"file_name\", \"label\"])\n writer.write_raw_data(data)\n writer.commit()\n\n writer = FileWriter(CV2_FILE_NAME, 1)\n data = get_data(CV_DIR_NAME)\n cv_schema_json = {\"id\": {\"type\": \"int32\"},\n \"file_name\": {\"type\": \"string\"},\n \"label\": {\"type\": \"int32\"},\n \"data\": {\"type\": \"bytes\"}}\n writer.add_schema(cv_schema_json, \"CV2_schema\")\n writer.add_index([\"file_name\", \"label\"])\n writer.write_raw_data(data)\n writer.commit()\n columns_list = [\"data\", \"file_name\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset([CV_FILE_NAME + str(x) for x in range(FILES_NUM)] + [CV1_FILE_NAME, CV2_FILE_NAME],\n columns_list, num_readers)\n assert data_set.get_dataset_size() == 30\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- cv reader basic: {} ------------------------\".format(num_iter))\n logger.info(\n \"-------------- len(item[data]): {} ------------------------\".format(len(item[\"data\"])))\n logger.info(\n \"-------------- item[data]: {} -----------------------------\".format(item[\"data\"]))\n logger.info(\n \"-------------- item[file_name]: {} ------------------------\".format(item[\"file_name\"]))\n logger.info(\n \"-------------- item[label]: {} ----------------------------\".format(item[\"label\"]))\n num_iter += 1\n assert num_iter == 30\n if os.path.exists(CV1_FILE_NAME):\n os.remove(CV1_FILE_NAME)\n if os.path.exists(\"{}.db\".format(CV1_FILE_NAME)):\n os.remove(\"{}.db\".format(CV1_FILE_NAME))\n if os.path.exists(CV2_FILE_NAME):\n os.remove(CV2_FILE_NAME)\n if os.path.exists(\"{}.db\".format(CV2_FILE_NAME)):\n os.remove(\"{}.db\".format(CV2_FILE_NAME))\n\n\ndef test_cv_minddataset_reader_two_dataset_partition(add_and_remove_cv_file):\n paths = [\"{}{}\".format(CV1_FILE_NAME, str(x).rjust(1, '0'))\n for x in range(FILES_NUM)]\n for x in paths:\n os.remove(\"{}\".format(x)) if os.path.exists(\"{}\".format(x)) else None\n os.remove(\"{}.db\".format(x)) if os.path.exists(\n \"{}.db\".format(x)) else None\n writer = FileWriter(CV1_FILE_NAME, FILES_NUM)\n data = get_data(CV_DIR_NAME)\n cv_schema_json = {\"id\": {\"type\": \"int32\"},\n \"file_name\": {\"type\": \"string\"},\n \"label\": {\"type\": \"int32\"},\n \"data\": {\"type\": \"bytes\"}}\n writer.add_schema(cv_schema_json, \"CV1_schema\")\n writer.add_index([\"file_name\", \"label\"])\n writer.write_raw_data(data)\n writer.commit()\n\n columns_list = [\"data\", \"file_name\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset([CV_FILE_NAME + str(x) for x in range(2)] + [CV1_FILE_NAME + str(x) for x in range(2, 4)],\n columns_list, num_readers)\n assert data_set.get_dataset_size() < 20\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- cv reader basic: {} ------------------------\".format(num_iter))\n logger.info(\n \"-------------- len(item[data]): {} ------------------------\".format(len(item[\"data\"])))\n logger.info(\n \"-------------- item[data]: {} -----------------------------\".format(item[\"data\"]))\n logger.info(\n \"-------------- item[file_name]: {} ------------------------\".format(item[\"file_name\"]))\n logger.info(\n \"-------------- item[label]: {} ----------------------------\".format(item[\"label\"]))\n num_iter += 1\n assert num_iter < 20\n for x in paths:\n os.remove(\"{}\".format(x))\n os.remove(\"{}.db\".format(x))\n\n\ndef test_cv_minddataset_reader_basic_tutorial(add_and_remove_cv_file):\n \"\"\"tutorial for cv minderdataset.\"\"\"\n columns_list = [\"data\", \"file_name\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers)\n assert data_set.get_dataset_size() == 10\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- cv reader basic: {} ------------------------\".format(num_iter))\n logger.info(\n \"-------------- len(item[data]): {} ------------------------\".format(len(item[\"data\"])))\n logger.info(\n \"-------------- item[data]: {} -----------------------------\".format(item[\"data\"]))\n logger.info(\n \"-------------- item[file_name]: {} ------------------------\".format(item[\"file_name\"]))\n logger.info(\n \"-------------- item[label]: {} ----------------------------\".format(item[\"label\"]))\n num_iter += 1\n assert num_iter == 10\n\n\ndef test_nlp_minddataset_reader_basic_tutorial(add_and_remove_nlp_file):\n \"\"\"tutorial for nlp minderdataset.\"\"\"\n num_readers = 4\n data_set = ds.MindDataset(NLP_FILE_NAME + \"0\", None, num_readers)\n assert data_set.get_dataset_size() == 10\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- cv reader basic: {} ------------------------\".format(num_iter))\n logger.info(\n \"-------------- num_iter: {} ------------------------\".format(num_iter))\n logger.info(\n \"-------------- item[id]: {} ------------------------\".format(item[\"id\"]))\n logger.info(\n \"-------------- item[rating]: {} --------------------\".format(item[\"rating\"]))\n logger.info(\"-------------- item[input_ids]: {}, shape: {} -----------------\".format(\n item[\"input_ids\"], item[\"input_ids\"].shape))\n logger.info(\"-------------- item[input_mask]: {}, shape: {} -----------------\".format(\n item[\"input_mask\"], item[\"input_mask\"].shape))\n logger.info(\"-------------- item[segment_ids]: {}, shape: {} -----------------\".format(\n item[\"segment_ids\"], item[\"segment_ids\"].shape))\n assert item[\"input_ids\"].shape == (50,)\n assert item[\"input_mask\"].shape == (1, 50)\n assert item[\"segment_ids\"].shape == (2, 25)\n num_iter += 1\n assert num_iter == 10\n\n\ndef test_cv_minddataset_reader_basic_tutorial_5_epoch(add_and_remove_cv_file):\n \"\"\"tutorial for cv minderdataset.\"\"\"\n columns_list = [\"data\", \"file_name\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers)\n assert data_set.get_dataset_size() == 10\n for epoch in range(5):\n num_iter = 0\n for data in data_set:\n logger.info(\"data is {}\".format(data))\n num_iter += 1\n assert num_iter == 10\n\n data_set.reset()\n\n\ndef test_cv_minddataset_reader_basic_tutorial_5_epoch_with_batch(add_and_remove_cv_file):\n \"\"\"tutorial for cv minderdataset.\"\"\"\n columns_list = [\"data\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers)\n\n resize_height = 32\n resize_width = 32\n\n # define map operations\n decode_op = vision.Decode()\n resize_op = vision.Resize(\n (resize_height, resize_width), ds.transforms.vision.Inter.LINEAR)\n\n data_set = data_set.map(\n input_columns=[\"data\"], operations=decode_op, num_parallel_workers=4)\n data_set = data_set.map(\n input_columns=[\"data\"], operations=resize_op, num_parallel_workers=4)\n\n data_set = data_set.batch(2)\n assert data_set.get_dataset_size() == 5\n for epoch in range(5):\n num_iter = 0\n for data in data_set:\n logger.info(\"data is {}\".format(data))\n num_iter += 1\n assert num_iter == 5\n\n data_set.reset()\n\n\ndef test_cv_minddataset_reader_no_columns(add_and_remove_cv_file):\n \"\"\"tutorial for cv minderdataset.\"\"\"\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\")\n assert data_set.get_dataset_size() == 10\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- cv reader basic: {} ------------------------\".format(num_iter))\n logger.info(\n \"-------------- len(item[data]): {} ------------------------\".format(len(item[\"data\"])))\n logger.info(\n \"-------------- item[data]: {} -----------------------------\".format(item[\"data\"]))\n logger.info(\n \"-------------- item[file_name]: {} ------------------------\".format(item[\"file_name\"]))\n logger.info(\n \"-------------- item[label]: {} ----------------------------\".format(item[\"label\"]))\n num_iter += 1\n assert num_iter == 10\n\n\ndef test_cv_minddataset_reader_repeat_tutorial(add_and_remove_cv_file):\n \"\"\"tutorial for cv minderdataset.\"\"\"\n columns_list = [\"data\", \"file_name\", \"label\"]\n num_readers = 4\n data_set = ds.MindDataset(CV_FILE_NAME + \"0\", columns_list, num_readers)\n repeat_num = 2\n data_set = data_set.repeat(repeat_num)\n num_iter = 0\n for item in data_set.create_dict_iterator():\n logger.info(\n \"-------------- repeat two test {} ------------------------\".format(num_iter))\n logger.info(\n \"-------------- len(item[data]): {} -----------------------\".format(len(item[\"data\"])))\n logger.info(\n \"-------------- item[data]: {} ----------------------------\".format(item[\"data\"]))\n logger.info(\n \"-------------- item[file_name]: {} -----------------------\".format(item[\"file_name\"]))\n logger.info(\n \"-------------- item[label]: {} ---------------------------\".format(item[\"label\"]))\n num_iter += 1\n assert num_iter == 20\n\n\ndef get_data(dir_name):\n \"\"\"\n usage: get data from imagenet dataset\n params:\n dir_name: directory containing folder images and annotation information\n\n \"\"\"\n if not os.path.isdir(dir_name):\n raise IOError(\"Directory {} not exists\".format(dir_name))\n img_dir = os.path.join(dir_name, \"images\")\n ann_file = os.path.join(dir_name, \"annotation.txt\")\n with open(ann_file, \"r\") as file_reader:\n lines = file_reader.readlines()\n\n data_list = []\n for i, line in enumerate(lines):\n try:\n filename, label = line.split(\",\")\n label = label.strip(\"\\n\")\n with open(os.path.join(img_dir, filename), \"rb\") as file_reader:\n img = file_reader.read()\n data_json = {\"id\": i,\n \"file_name\": filename,\n \"data\": img,\n \"label\": int(label)}\n data_list.append(data_json)\n except FileNotFoundError:\n continue\n return data_list\n\n\ndef get_multi_bytes_data(file_name, bytes_num=3):\n \"\"\"\n Return raw data of multi-bytes dataset.\n\n Args:\n file_name (str): String of multi-bytes dataset's path.\n bytes_num (int): Number of bytes fields.\n\n Returns:\n List\n \"\"\"\n if not os.path.exists(file_name):\n raise IOError(\"map file {} not exists\".format(file_name))\n dir_name = os.path.dirname(file_name)\n with open(file_name, \"r\") as file_reader:\n lines = file_reader.readlines()\n data_list = []\n row_num = 0\n for line in lines:\n try:\n img10_path = line.strip('\\n').split(\" \")\n img5 = []\n for path in img10_path[:bytes_num]:\n with open(os.path.join(dir_name, path), \"rb\") as file_reader:\n img5 += [file_reader.read()]\n data_json = {\"image_{}\".format(i): img5[i]\n for i in range(len(img5))}\n data_json.update({\"id\": row_num})\n row_num += 1\n data_list.append(data_json)\n except FileNotFoundError:\n continue\n return data_list\n\n\ndef get_mkv_data(dir_name):\n \"\"\"\n Return raw data of Vehicle_and_Person dataset.\n\n Args:\n dir_name (str): String of Vehicle_and_Person dataset's path.\n\n Returns:\n List\n \"\"\"\n if not os.path.isdir(dir_name):\n raise IOError(\"Directory {} not exists\".format(dir_name))\n img_dir = os.path.join(dir_name, \"Image\")\n label_dir = os.path.join(dir_name, \"prelabel\")\n\n data_list = []\n file_list = os.listdir(label_dir)\n\n index = 1\n for item in file_list:\n if os.path.splitext(item)[1] == '.json':\n file_path = os.path.join(label_dir, item)\n\n image_name = ''.join([os.path.splitext(item)[0], \".jpg\"])\n image_path = os.path.join(img_dir, image_name)\n\n with open(file_path, \"r\") as load_f:\n load_dict = json.load(load_f)\n\n if os.path.exists(image_path):\n with open(image_path, \"rb\") as file_reader:\n img = file_reader.read()\n data_json = {\"file_name\": image_name,\n \"prelabel\": str(load_dict),\n \"data\": img,\n \"id\": index}\n data_list.append(data_json)\n index += 1\n logger.info('{} images are missing'.format(\n len(file_list) - len(data_list)))\n return data_list\n\n\ndef get_nlp_data(dir_name, vocab_file, num):\n \"\"\"\n Return raw data of aclImdb dataset.\n\n Args:\n dir_name (str): String of aclImdb dataset's path.\n vocab_file (str): String of dictionary's path.\n num (int): Number of sample.\n\n Returns:\n List\n \"\"\"\n if not os.path.isdir(dir_name):\n raise IOError(\"Directory {} not exists\".format(dir_name))\n for root, dirs, files in os.walk(dir_name):\n for index, file_name_extension in enumerate(files):\n if index < num:\n file_path = os.path.join(root, file_name_extension)\n file_name, _ = file_name_extension.split('.', 1)\n id_, rating = file_name.split('_', 1)\n with open(file_path, 'r') as f:\n raw_content = f.read()\n\n dictionary = load_vocab(vocab_file)\n vectors = [dictionary.get('[CLS]')]\n vectors += [dictionary.get(i) if i in dictionary\n else dictionary.get('[UNK]')\n for i in re.findall(r\"[\\w']+|[{}]\"\n .format(string.punctuation),\n raw_content)]\n vectors += [dictionary.get('[SEP]')]\n input_, mask, segment = inputs(vectors)\n input_ids = np.reshape(np.array(input_), [-1])\n input_mask = np.reshape(np.array(mask), [1, -1])\n segment_ids = np.reshape(np.array(segment), [2, -1])\n data = {\n \"label\": 1,\n \"id\": id_,\n \"rating\": float(rating),\n \"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": segment_ids\n }\n yield data\n\n\ndef convert_to_uni(text):\n if isinstance(text, str):\n return text\n if isinstance(text, bytes):\n return text.decode('utf-8', 'ignore')\n raise Exception(\"The type %s does not convert!\" % type(text))\n\n\ndef load_vocab(vocab_file):\n \"\"\"load vocabulary to translate statement.\"\"\"\n vocab = collections.OrderedDict()\n vocab.setdefault('blank', 2)\n index = 0\n with open(vocab_file) as reader:\n while True:\n tmp = reader.readline()\n if not tmp:\n break\n token = convert_to_uni(tmp)\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab\n\n\ndef inputs(vectors, maxlen=50):\n length = len(vectors)\n if length > maxlen:\n return vectors[0:maxlen], [1] * maxlen, [0] * maxlen\n input_ = vectors + [0] * (maxlen - length)\n mask = [1] * length + [0] * (maxlen - length)\n segment = [0] * maxlen\n return input_, mask, segment\n\n\ndef test_write_with_multi_bytes_and_array_and_read_by_MindDataset():\n mindrecord_file_name = \"test.mindrecord\"\n if os.path.exists(\"{}\".format(mindrecord_file_name)):\n os.remove(\"{}\".format(mindrecord_file_name))\n if os.path.exists(\"{}.db\".format(mindrecord_file_name)):\n os.remove(\"{}.db\".format(x))\n data = [{\"file_name\": \"001.jpg\", \"label\": 4,\n \"image1\": bytes(\"image1 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image1 bytes def\", encoding='UTF-8'),\n \"source_sos_ids\": np.array([1, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([6, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"image3\": bytes(\"image1 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image1 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image1 bytes mno\", encoding='UTF-8'),\n \"target_sos_ids\": np.array([28, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([33, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([39, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([48, 49, 50, 51], dtype=np.int64)},\n {\"file_name\": \"002.jpg\", \"label\": 5,\n \"image1\": bytes(\"image2 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image2 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image2 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image2 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image2 bytes mno\", encoding='UTF-8'),\n \"source_sos_ids\": np.array([11, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([16, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"target_sos_ids\": np.array([128, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([133, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([139, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([148, 49, 50, 51], dtype=np.int64)},\n {\"file_name\": \"003.jpg\", \"label\": 6,\n \"source_sos_ids\": np.array([21, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([26, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"target_sos_ids\": np.array([228, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([233, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([239, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"image1\": bytes(\"image3 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image3 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image3 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image3 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image3 bytes mno\", encoding='UTF-8'),\n \"target_eos_mask\": np.array([248, 49, 50, 51], dtype=np.int64)},\n {\"file_name\": \"004.jpg\", \"label\": 7,\n \"source_sos_ids\": np.array([31, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([36, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"image1\": bytes(\"image4 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image4 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image4 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image4 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image4 bytes mno\", encoding='UTF-8'),\n \"target_sos_ids\": np.array([328, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([333, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([339, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([348, 49, 50, 51], dtype=np.int64)},\n {\"file_name\": \"005.jpg\", \"label\": 8,\n \"source_sos_ids\": np.array([41, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([46, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"target_sos_ids\": np.array([428, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([433, 34, 35, 36, 37, 38], dtype=np.int64),\n \"image1\": bytes(\"image5 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image5 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image5 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image5 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image5 bytes mno\", encoding='UTF-8'),\n \"target_eos_ids\": np.array([439, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([448, 49, 50, 51], dtype=np.int64)},\n {\"file_name\": \"006.jpg\", \"label\": 9,\n \"source_sos_ids\": np.array([51, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([56, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"target_sos_ids\": np.array([528, 29, 30, 31, 32], dtype=np.int64),\n \"image1\": bytes(\"image6 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image6 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image6 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image6 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image6 bytes mno\", encoding='UTF-8'),\n \"target_sos_mask\": np.array([533, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([539, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([548, 49, 50, 51], dtype=np.int64)}\n ]\n\n writer = FileWriter(mindrecord_file_name)\n schema = {\"file_name\": {\"type\": \"string\"},\n \"image1\": {\"type\": \"bytes\"},\n \"image2\": {\"type\": \"bytes\"},\n \"source_sos_ids\": {\"type\": \"int64\", \"shape\": [-1]},\n \"source_sos_mask\": {\"type\": \"int64\", \"shape\": [-1]},\n \"image3\": {\"type\": \"bytes\"},\n \"image4\": {\"type\": \"bytes\"},\n \"image5\": {\"type\": \"bytes\"},\n \"target_sos_ids\": {\"type\": \"int64\", \"shape\": [-1]},\n \"target_sos_mask\": {\"type\": \"int64\", \"shape\": [-1]},\n \"target_eos_ids\": {\"type\": \"int64\", \"shape\": [-1]},\n \"target_eos_mask\": {\"type\": \"int64\", \"shape\": [-1]},\n \"label\": {\"type\": \"int32\"}}\n writer.add_schema(schema, \"data is so cool\")\n writer.write_raw_data(data)\n writer.commit()\n\n # change data value to list\n data_value_to_list = []\n for item in data:\n new_data = {}\n new_data['file_name'] = np.asarray(item[\"file_name\"], dtype='S')\n new_data['label'] = np.asarray(list([item[\"label\"]]), dtype=np.int32)\n new_data['image1'] = np.asarray(list(item[\"image1\"]), dtype=np.uint8)\n new_data['image2'] = np.asarray(list(item[\"image2\"]), dtype=np.uint8)\n new_data['image3'] = np.asarray(list(item[\"image3\"]), dtype=np.uint8)\n new_data['image4'] = np.asarray(list(item[\"image4\"]), dtype=np.uint8)\n new_data['image5'] = np.asarray(list(item[\"image5\"]), dtype=np.uint8)\n new_data['source_sos_ids'] = item[\"source_sos_ids\"]\n new_data['source_sos_mask'] = item[\"source_sos_mask\"]\n new_data['target_sos_ids'] = item[\"target_sos_ids\"]\n new_data['target_sos_mask'] = item[\"target_sos_mask\"]\n new_data['target_eos_ids'] = item[\"target_eos_ids\"]\n new_data['target_eos_mask'] = item[\"target_eos_mask\"]\n data_value_to_list.append(new_data)\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 13\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"source_sos_ids\",\n \"source_sos_mask\", \"target_sos_ids\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 3\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] == data[num_iter][field]).all()\n else:\n assert item[field] == data[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 1\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\n \"image2\", \"source_sos_mask\", \"image3\", \"target_sos_ids\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 4\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 3\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"target_sos_ids\",\n \"image4\", \"source_sos_ids\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 3\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 3\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"target_sos_ids\", \"image5\",\n \"image4\", \"image3\", \"source_sos_ids\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 5\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 1\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"target_eos_mask\", \"image5\",\n \"image2\", \"source_sos_mask\", \"label\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 5\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"label\", \"target_eos_mask\", \"image1\", \"target_eos_ids\", \"source_sos_mask\",\n \"image2\", \"image4\", \"image3\", \"source_sos_ids\", \"image5\", \"file_name\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 11\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n os.remove(\"{}\".format(mindrecord_file_name))\n os.remove(\"{}.db\".format(mindrecord_file_name))\n\n\ndef test_write_with_multi_bytes_and_MindDataset():\n mindrecord_file_name = \"test.mindrecord\"\n data = [{\"file_name\": \"001.jpg\", \"label\": 43,\n \"image1\": bytes(\"image1 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image1 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image1 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image1 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image1 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"002.jpg\", \"label\": 91,\n \"image1\": bytes(\"image2 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image2 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image2 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image2 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image2 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"003.jpg\", \"label\": 61,\n \"image1\": bytes(\"image3 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image3 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image3 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image3 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image3 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"004.jpg\", \"label\": 29,\n \"image1\": bytes(\"image4 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image4 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image4 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image4 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image4 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"005.jpg\", \"label\": 78,\n \"image1\": bytes(\"image5 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image5 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image5 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image5 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image5 bytes mno\", encoding='UTF-8')},\n {\"file_name\": \"006.jpg\", \"label\": 37,\n \"image1\": bytes(\"image6 bytes abc\", encoding='UTF-8'),\n \"image2\": bytes(\"image6 bytes def\", encoding='UTF-8'),\n \"image3\": bytes(\"image6 bytes ghi\", encoding='UTF-8'),\n \"image4\": bytes(\"image6 bytes jkl\", encoding='UTF-8'),\n \"image5\": bytes(\"image6 bytes mno\", encoding='UTF-8')}\n ]\n writer = FileWriter(mindrecord_file_name)\n schema = {\"file_name\": {\"type\": \"string\"},\n \"image1\": {\"type\": \"bytes\"},\n \"image2\": {\"type\": \"bytes\"},\n \"image3\": {\"type\": \"bytes\"},\n \"label\": {\"type\": \"int32\"},\n \"image4\": {\"type\": \"bytes\"},\n \"image5\": {\"type\": \"bytes\"}}\n writer.add_schema(schema, \"data is so cool\")\n writer.write_raw_data(data)\n writer.commit()\n\n # change data value to list\n data_value_to_list = []\n for item in data:\n new_data = {}\n new_data['file_name'] = np.asarray(item[\"file_name\"], dtype='S')\n new_data['label'] = np.asarray(list([item[\"label\"]]), dtype=np.int32)\n new_data['image1'] = np.asarray(list(item[\"image1\"]), dtype=np.uint8)\n new_data['image2'] = np.asarray(list(item[\"image2\"]), dtype=np.uint8)\n new_data['image3'] = np.asarray(list(item[\"image3\"]), dtype=np.uint8)\n new_data['image4'] = np.asarray(list(item[\"image4\"]), dtype=np.uint8)\n new_data['image5'] = np.asarray(list(item[\"image5\"]), dtype=np.uint8)\n data_value_to_list.append(new_data)\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 7\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"image1\", \"image2\", \"image5\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 3\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"image2\", \"image4\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 2\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"image5\", \"image2\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 2\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"image5\", \"image2\", \"label\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 3\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"image4\", \"image5\",\n \"image2\", \"image3\", \"file_name\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 5\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n os.remove(\"{}\".format(mindrecord_file_name))\n os.remove(\"{}.db\".format(mindrecord_file_name))\n\n\ndef test_write_with_multi_array_and_MindDataset():\n mindrecord_file_name = \"test.mindrecord\"\n data = [{\"source_sos_ids\": np.array([1, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([6, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"source_eos_ids\": np.array([13, 14, 15, 16, 17, 18], dtype=np.int64),\n \"source_eos_mask\": np.array([19, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),\n \"target_sos_ids\": np.array([28, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([33, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([39, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([48, 49, 50, 51], dtype=np.int64)},\n {\"source_sos_ids\": np.array([11, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([16, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"source_eos_ids\": np.array([113, 14, 15, 16, 17, 18], dtype=np.int64),\n \"source_eos_mask\": np.array([119, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),\n \"target_sos_ids\": np.array([128, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([133, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([139, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([148, 49, 50, 51], dtype=np.int64)},\n {\"source_sos_ids\": np.array([21, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([26, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"source_eos_ids\": np.array([213, 14, 15, 16, 17, 18], dtype=np.int64),\n \"source_eos_mask\": np.array([219, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),\n \"target_sos_ids\": np.array([228, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([233, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([239, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([248, 49, 50, 51], dtype=np.int64)},\n {\"source_sos_ids\": np.array([31, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([36, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"source_eos_ids\": np.array([313, 14, 15, 16, 17, 18], dtype=np.int64),\n \"source_eos_mask\": np.array([319, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),\n \"target_sos_ids\": np.array([328, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([333, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([339, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([348, 49, 50, 51], dtype=np.int64)},\n {\"source_sos_ids\": np.array([41, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([46, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"source_eos_ids\": np.array([413, 14, 15, 16, 17, 18], dtype=np.int64),\n \"source_eos_mask\": np.array([419, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),\n \"target_sos_ids\": np.array([428, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([433, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([439, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([448, 49, 50, 51], dtype=np.int64)},\n {\"source_sos_ids\": np.array([51, 2, 3, 4, 5], dtype=np.int64),\n \"source_sos_mask\": np.array([56, 7, 8, 9, 10, 11, 12], dtype=np.int64),\n \"source_eos_ids\": np.array([513, 14, 15, 16, 17, 18], dtype=np.int64),\n \"source_eos_mask\": np.array([519, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),\n \"target_sos_ids\": np.array([528, 29, 30, 31, 32], dtype=np.int64),\n \"target_sos_mask\": np.array([533, 34, 35, 36, 37, 38], dtype=np.int64),\n \"target_eos_ids\": np.array([539, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),\n \"target_eos_mask\": np.array([548, 49, 50, 51], dtype=np.int64)}\n ]\n writer = FileWriter(mindrecord_file_name)\n schema = {\"source_sos_ids\": {\"type\": \"int64\", \"shape\": [-1]},\n \"source_sos_mask\": {\"type\": \"int64\", \"shape\": [-1]},\n \"source_eos_ids\": {\"type\": \"int64\", \"shape\": [-1]},\n \"source_eos_mask\": {\"type\": \"int64\", \"shape\": [-1]},\n \"target_sos_ids\": {\"type\": \"int64\", \"shape\": [-1]},\n \"target_sos_mask\": {\"type\": \"int64\", \"shape\": [-1]},\n \"target_eos_ids\": {\"type\": \"int64\", \"shape\": [-1]},\n \"target_eos_mask\": {\"type\": \"int64\", \"shape\": [-1]}}\n writer.add_schema(schema, \"data is so cool\")\n writer.write_raw_data(data)\n writer.commit()\n\n # change data value to list - do none\n data_value_to_list = []\n for item in data:\n new_data = {}\n new_data['source_sos_ids'] = item[\"source_sos_ids\"]\n new_data['source_sos_mask'] = item[\"source_sos_mask\"]\n new_data['source_eos_ids'] = item[\"source_eos_ids\"]\n new_data['source_eos_mask'] = item[\"source_eos_mask\"]\n new_data['target_sos_ids'] = item[\"target_sos_ids\"]\n new_data['target_sos_mask'] = item[\"target_sos_mask\"]\n new_data['target_eos_ids'] = item[\"target_eos_ids\"]\n new_data['target_eos_mask'] = item[\"target_eos_mask\"]\n data_value_to_list.append(new_data)\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 8\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"source_eos_ids\", \"source_eos_mask\",\n \"target_sos_ids\", \"target_sos_mask\",\n \"target_eos_ids\", \"target_eos_mask\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 6\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"source_sos_ids\",\n \"target_sos_ids\",\n \"target_eos_mask\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 3\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"target_eos_mask\",\n \"source_eos_mask\",\n \"source_sos_mask\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 3\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 2\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"target_eos_ids\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 1\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n num_readers = 1\n data_set = ds.MindDataset(dataset_file=mindrecord_file_name,\n columns_list=[\"target_eos_mask\", \"target_eos_ids\",\n \"target_sos_mask\", \"target_sos_ids\",\n \"source_eos_mask\", \"source_eos_ids\",\n \"source_sos_mask\", \"source_sos_ids\"],\n num_parallel_workers=num_readers,\n shuffle=False)\n assert data_set.get_dataset_size() == 6\n num_iter = 0\n for item in data_set.create_dict_iterator():\n assert len(item) == 8\n for field in item:\n if isinstance(item[field], np.ndarray):\n assert (item[field] ==\n data_value_to_list[num_iter][field]).all()\n else:\n assert item[field] == data_value_to_list[num_iter][field]\n num_iter += 1\n assert num_iter == 6\n\n os.remove(\"{}\".format(mindrecord_file_name))\n os.remove(\"{}.db\".format(mindrecord_file_name))\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\n\n\nclass NetFlatten(nn.Cell):\n def __init__(self):\n super(NetFlatten, self).__init__()\n self.flatten = P.Flatten()\n\n def construct(self, x):\n return self.flatten(x)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_flatten():\n x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32))\n expect = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32)\n \"\"\"\n expect output:\n [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]\n \"\"\"\n\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")\n flatten = NetFlatten()\n output = flatten(x)\n assert (output.asnumpy() == expect).all()\n\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n flatten = NetFlatten()\n output = flatten(x)\n assert (output.asnumpy() == expect).all()\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common.api import ms_function\nfrom mindspore.ops import operations as P\n\ncontext.set_context(device_target=\"Ascend\")\n\n\nclass Net(nn.Cell):\n def __init__(self, is_grad=False):\n super(Net, self).__init__()\n self.SoftmaxCrossEntropyWithLogits = P.SoftmaxCrossEntropyWithLogits()\n\n @ms_function\n def construct(self, features, labels):\n return self.SoftmaxCrossEntropyWithLogits(features, labels)\n\n\ndef test_net():\n features = np.random.randn(32, 1001).astype(np.float16)\n labels = np.random.randn(32, 1001).astype(np.float16)\n SoftmaxCrossEntropyWithLogits = Net()\n output = SoftmaxCrossEntropyWithLogits(Tensor(features), Tensor(labels))\n # print(output.asnumpy())\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport numpy as np\n\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.vision.c_transforms as vision\nfrom mindspore import log as logger\n\nDATA_DIR_TF2 = [\"../data/dataset/test_tf_file_3_images/train-0000-of-0001.data\"]\nSCHEMA_DIR_TF2 = \"../data/dataset/test_tf_file_3_images/datasetSchema.json\"\n\n\ndef test_tf_skip():\n \"\"\"\n a simple skip operation.\n \"\"\"\n data1 = ds.TFRecordDataset(DATA_DIR_TF2, SCHEMA_DIR_TF2, shuffle=False)\n\n resize_height, resize_width = 32, 32\n decode_op = vision.Decode()\n resize_op = vision.Resize((resize_height, resize_width), interpolation=ds.transforms.vision.Inter.LINEAR)\n data1 = data1.map(input_columns=[\"image\"], operations=decode_op)\n data1 = data1.map(input_columns=[\"image\"], operations=resize_op)\n data1 = data1.skip(2)\n\n num_iter = 0\n for item in data1.create_dict_iterator():\n num_iter += 1\n assert num_iter == 1\n\n\ndef generator_md():\n \"\"\"\n create a dataset with [0, 1, 2, 3, 4]\n \"\"\"\n for i in range(5):\n yield (np.array([i]),)\n\n\ndef test_generator_skip():\n ds1 = ds.GeneratorDataset(generator_md, [\"data\"], num_parallel_workers=4)\n\n # Here ds1 should be [3, 4]\n ds1 = ds1.skip(3)\n\n buf = []\n for data in ds1:\n buf.append(data[0][0])\n assert len(buf) == 2\n assert buf == [3, 4]\n\n\ndef test_skip_1():\n ds1 = ds.GeneratorDataset(generator_md, [\"data\"])\n\n # Here ds1 should be []\n ds1 = ds1.skip(7)\n\n buf = []\n for data in ds1:\n buf.append(data[0][0])\n assert len(buf) == 0\n assert buf == []\n\n\ndef test_skip_2():\n ds1 = ds.GeneratorDataset(generator_md, [\"data\"])\n\n # Here ds1 should be [0, 1, 2, 3, 4]\n ds1 = ds1.skip(0)\n\n buf = []\n for data in ds1:\n buf.append(data[0][0])\n assert len(buf) == 5\n assert buf == [0, 1, 2, 3, 4]\n\n\ndef test_skip_repeat_1():\n ds1 = ds.GeneratorDataset(generator_md, [\"data\"])\n\n # Here ds1 should be [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]\n ds1 = ds1.repeat(2)\n\n # Here ds1 should be [3, 4, 0, 1, 2, 3, 4]\n ds1 = ds1.skip(3)\n\n buf = []\n for data in ds1:\n buf.append(data[0][0])\n assert len(buf) == 7\n assert buf == [3, 4, 0, 1, 2, 3, 4]\n\n\ndef test_skip_repeat_2():\n ds1 = ds.GeneratorDataset(generator_md, [\"data\"])\n\n # Here ds1 should be [3, 4]\n ds1 = ds1.skip(3)\n\n # Here ds1 should be [3, 4, 3, 4]\n ds1 = ds1.repeat(2)\n\n buf = []\n for data in ds1:\n buf.append(data[0][0])\n assert len(buf) == 4\n assert buf == [3, 4, 3, 4]\n\n\ndef test_skip_repeat_3():\n ds1 = ds.GeneratorDataset(generator_md, [\"data\"])\n\n # Here ds1 should be [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]\n ds1 = ds1.repeat(2)\n\n # Here ds1 should be [3, 4]\n ds1 = ds1.skip(8)\n\n # Here ds1 should be [3, 4, 3, 4, 3, 4]\n ds1 = ds1.repeat(3)\n\n buf = []\n for data in ds1:\n buf.append(data[0][0])\n assert len(buf) == 6\n assert buf == [3, 4, 3, 4, 3, 4]\n\n\ndef test_skip_take_1():\n ds1 = ds.GeneratorDataset(generator_md, [\"data\"])\n\n # Here ds1 should be [0, 1, 2, 3]\n ds1 = ds1.take(4)\n\n # Here ds1 should be [2, 3]\n ds1 = ds1.skip(2)\n\n buf = []\n for data in ds1:\n buf.append(data[0][0])\n assert len(buf) == 2\n assert buf == [2, 3]\n\n\ndef test_skip_take_2():\n ds1 = ds.GeneratorDataset(generator_md, [\"data\"])\n\n # Here ds1 should be [2, 3, 4]\n ds1 = ds1.skip(2)\n\n # Here ds1 should be [2, 3]\n ds1 = ds1.take(2)\n\n buf = []\n for data in ds1:\n buf.append(data[0][0])\n assert len(buf) == 2\n assert buf == [2, 3]\n\n\ndef generator_1d():\n for i in range(64):\n yield (np.array([i]),)\n\n\ndef test_skip_filter_1():\n dataset = ds.GeneratorDataset(generator_1d, ['data'])\n dataset = dataset.skip(5)\n dataset = dataset.filter(predicate=lambda data: data < 11, num_parallel_workers=4)\n\n buf = []\n for item in dataset:\n buf.append(item[0][0])\n assert buf == [5, 6, 7, 8, 9, 10]\n\n\ndef test_skip_filter_2():\n dataset = ds.GeneratorDataset(generator_1d, ['data'])\n dataset = dataset.filter(predicate=lambda data: data < 11, num_parallel_workers=4)\n dataset = dataset.skip(5)\n\n buf = []\n for item in dataset:\n buf.append(item[0][0])\n assert buf == [5, 6, 7, 8, 9, 10]\n\n\nif __name__ == \"__main__\":\n test_tf_skip()\n test_generator_skip()\n test_skip_1()\n test_skip_2()\n test_skip_repeat_1()\n test_skip_repeat_2()\n test_skip_repeat_3()\n test_skip_take_1()\n test_skip_take_2()\n test_skip_filter_1()\n test_skip_filter_2()\n" ]
[ [ "numpy.ones_like", "numpy.sqrt", "numpy.zeros_like", "numpy.random.uniform", "numpy.array" ], [ "numpy.random.randn" ], [ "numpy.asarray", "numpy.array" ], [ "numpy.array" ], [ "numpy.random.randn" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ushham/JournalTool
[ "f0ab9b6711b733f3c68a8a94bbb9773ffd3a95fe", "f0ab9b6711b733f3c68a8a94bbb9773ffd3a95fe" ]
[ "journal_venv/lib/python3.9/site-packages/cartopy/tests/mpl/test_plots.py", "journal_venv/lib/python3.9/site-packages/cartopy/tests/io/test_srtm.py" ]
[ "# (C) British Crown Copyright 2018 - 2019, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <https://www.gnu.org/licenses/>.\n\nfrom __future__ import (absolute_import, division, print_function)\n\nfrom io import BytesIO\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport cartopy.crs as ccrs\n\n\ndef test_empty_plot():\n \"\"\"Test making a plot with empty arrays.\"\"\"\n fig = plt.figure()\n ax = plt.axes(projection=ccrs.Mercator())\n ax.plot([], [], transform=ccrs.PlateCarree())\n fig.savefig(BytesIO())\n\n\ndef test_triplot_bbox_tight():\n \"\"\"Test triplot with a tight bbox (#1060).\"\"\"\n x = np.degrees([-0.101, -0.090, -0.069])\n y = np.degrees([0.872, 0.883, 0.888])\n triangles = np.asarray([[0, 1, 2]])\n\n fig = plt.figure()\n ax = plt.axes(projection=ccrs.OSGB(approx=False))\n ax.triplot(x, y, triangles, transform=ccrs.Geodetic())\n fig.savefig(BytesIO(), bbox_inches='tight')\n", "# (C) British Crown Copyright 2011 - 2019, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <https://www.gnu.org/licenses/>.\n\nfrom __future__ import (absolute_import, division, print_function)\n\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\n\nimport cartopy.crs as ccrs\nimport cartopy.io.srtm\n\nfrom .test_downloaders import download_to_temp # noqa: F401 (used as fixture)\n\n\npytestmark = [pytest.mark.network,\n pytest.mark.filterwarnings('ignore:SRTM requires an account'),\n pytest.mark.usefixtures('srtm_login_or_skip')]\n\n\[email protected]\ndef srtm_login_or_skip(monkeypatch):\n import os\n try:\n srtm_username = os.environ['SRTM_USERNAME']\n except KeyError:\n pytest.skip('SRTM_USERNAME environment variable is unset.')\n try:\n srtm_password = os.environ['SRTM_PASSWORD']\n except KeyError:\n pytest.skip('SRTM_PASSWORD environment variable is unset.')\n\n from six.moves.urllib.request import (HTTPBasicAuthHandler,\n HTTPCookieProcessor,\n HTTPPasswordMgrWithDefaultRealm,\n build_opener)\n from six.moves.http_cookiejar import CookieJar\n\n password_manager = HTTPPasswordMgrWithDefaultRealm()\n password_manager.add_password(\n None,\n \"https://urs.earthdata.nasa.gov\",\n srtm_username,\n srtm_password)\n cookie_jar = CookieJar()\n opener = build_opener(HTTPBasicAuthHandler(password_manager),\n HTTPCookieProcessor(cookie_jar))\n\n monkeypatch.setattr(cartopy.io, 'urlopen', opener.open)\n\n\nclass TestRetrieve(object):\n @pytest.mark.parametrize('Source, read_SRTM, max_, min_, pt', [\n (cartopy.io.srtm.SRTM3Source, cartopy.io.srtm.read_SRTM3,\n 602, -34, 78),\n (cartopy.io.srtm.SRTM1Source, cartopy.io.srtm.read_SRTM1,\n 602, -37, 50),\n ], ids=[\n 'srtm3',\n 'srtm1',\n ])\n def test_srtm_retrieve(self, Source, read_SRTM, max_, min_, pt,\n download_to_temp): # noqa: F811\n # test that the download mechanism for SRTM works\n with warnings.catch_warnings(record=True) as w:\n r = Source().srtm_fname(-4, 50)\n assert len(w) == 1\n assert issubclass(w[0].category, cartopy.io.DownloadWarning)\n\n assert r.startswith(str(download_to_temp)), \\\n 'File not downloaded to tmp dir'\n\n img, _, _ = read_SRTM(r)\n\n # check that the data is fairly sensible\n assert img.max() == max_\n assert img.min() == min_\n assert img[-10, 12] == pt\n\n @pytest.mark.parametrize('Source, shape', [\n (cartopy.io.srtm.SRTM3Source, (1201, 1201)),\n (cartopy.io.srtm.SRTM1Source, (3601, 3601)),\n ], ids=[\n 'srtm3',\n 'srtm1',\n ])\n def test_srtm_out_of_range(self, Source, shape):\n # Somewhere over the pacific the elevation should be 0.\n img, _, _ = Source().combined(120, 2, 2, 2)\n assert_array_equal(img, np.zeros(np.array(shape) * 2))\n\n\[email protected]('Source', [\n cartopy.io.srtm.SRTM3Source,\n cartopy.io.srtm.SRTM1Source,\n], ids=[\n 'srtm3',\n 'srtm1',\n])\nclass TestSRTMSource__single_tile(object):\n def test_out_of_range(self, Source):\n source = Source()\n match = r'No srtm tile found for those coordinates\\.'\n with pytest.raises(ValueError, match=match):\n source.single_tile(-25, 50)\n\n def test_in_range(self, Source):\n if Source == cartopy.io.srtm.SRTM3Source:\n shape = (1201, 1201)\n elif Source == cartopy.io.srtm.SRTM1Source:\n shape = (3601, 3601)\n else:\n raise ValueError('Source is of unexpected type.')\n source = Source()\n img, crs, extent = source.single_tile(-1, 50)\n assert isinstance(img, np.ndarray)\n assert img.shape == shape\n assert img.dtype == np.dtype('>i2')\n assert crs == ccrs.PlateCarree()\n assert extent == (-1, 0, 50, 51)\n\n def test_zeros(self, Source):\n source = Source()\n _, _, extent = source.single_tile(0, 50)\n assert extent == (0, 1, 50, 51)\n\n\[email protected]('Source', [\n cartopy.io.srtm.SRTM3Source,\n cartopy.io.srtm.SRTM1Source,\n], ids=[\n 'srtm3',\n 'srtm1',\n])\nclass TestSRTMSource__combined(object):\n def test_trivial(self, Source):\n source = Source()\n\n e_img, e_crs, e_extent = source.single_tile(-3, 50)\n r_img, r_crs, r_extent = source.combined(-3, 50, 1, 1)\n assert_array_equal(e_img, r_img)\n assert e_crs == r_crs\n assert e_extent == r_extent\n\n def test_2by2(self, Source):\n source = Source()\n\n e_img, _, e_extent = source.combined(-1, 50, 2, 1)\n assert e_extent == (-1, 1, 50, 51)\n imgs = [source.single_tile(-1, 50)[0],\n source.single_tile(0, 50)[0]]\n assert_array_equal(np.hstack(imgs), e_img)\n\n\[email protected]('Source', [\n cartopy.io.srtm.SRTM3Source,\n cartopy.io.srtm.SRTM1Source,\n], ids=[\n 'srtm3',\n 'srtm1',\n])\ndef test_fetch_raster_ascombined(Source):\n source = Source()\n\n e_img, e_crs, e_extent = source.combined(-1, 50, 2, 1)\n imgs = source.fetch_raster(ccrs.PlateCarree(),\n (-0.9, 0.1, 50.1, 50.999),\n None)\n assert len(imgs) == 1\n r_img, r_extent = imgs[0]\n assert e_extent == r_extent\n assert_array_equal(e_img[::-1, :], r_img)\n" ]
[ [ "numpy.asarray", "numpy.degrees", "matplotlib.pyplot.figure" ], [ "numpy.testing.assert_array_equal", "numpy.hstack", "numpy.array", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jakelishman/qutip
[ "fbb7fad5bc205910228db622d90601c82db45e4b", "fbb7fad5bc205910228db622d90601c82db45e4b", "fbb7fad5bc205910228db622d90601c82db45e4b", "fbb7fad5bc205910228db622d90601c82db45e4b", "fbb7fad5bc205910228db622d90601c82db45e4b" ]
[ "qutip/tests/solve/test_sesolve.py", "qutip/core/states.py", "qutip/core/data/expm.py", "qutip/solve/nonmarkov/heom.py", "qutip/_mkl/spmv.py" ]
[ "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport numpy as np\nfrom numpy.testing import assert_, run_module_suite\n\n# disable the progress bar\nimport os\n\nfrom qutip import (\n sigmax, sigmay, sigmaz, qeye, basis, expect, num, destroy, create,\n Cubic_Spline, sesolve,\n)\nfrom qutip.solve import SolverOptions\n\nos.environ['QUTIP_GRAPHICS'] = \"NO\"\n\n\nclass TestSESolve:\n \"\"\"\n A test class for the QuTiP Schrodinger Eq. solver\n \"\"\"\n\n def check_evolution(self, H, delta, psi0, tlist, analytic_func,\n U0=None, td_args={}, tol=5e-3):\n \"\"\"\n Compare integrated evolution with analytical result\n If U0 is not None then operator evo is checked\n Otherwise state evo\n \"\"\"\n\n if U0 is None:\n output = sesolve(H, psi0, tlist, [sigmax(), sigmay(), sigmaz()],\n args=td_args)\n sx, sy, sz = output.expect[0], output.expect[1], output.expect[2]\n else:\n output = sesolve(H, U0, tlist, args=td_args)\n sx = [expect(sigmax(), U*psi0) for U in output.states]\n sy = [expect(sigmay(), U*psi0) for U in output.states]\n sz = [expect(sigmaz(), U*psi0) for U in output.states]\n\n sx_analytic = np.zeros(np.shape(tlist))\n sy_analytic = np.array([-np.sin(delta*analytic_func(t, td_args))\n for t in tlist])\n sz_analytic = np.array([np.cos(delta*analytic_func(t, td_args))\n for t in tlist])\n\n np.testing.assert_allclose(sx, sx_analytic, atol=tol)\n np.testing.assert_allclose(sy, sy_analytic, atol=tol)\n np.testing.assert_allclose(sz, sz_analytic, atol=tol)\n\n def test_01_1_state_with_const_H(self):\n \"sesolve: state with const H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n analytic_func = lambda t, args: t\n\n self.check_evolution(H1, delta, psi0, tlist, analytic_func)\n\n def test_01_1_unitary_with_const_H(self):\n \"sesolve: unitary operator with const H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n U0 = qeye(2) # initital operator\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n analytic_func = lambda t, args: t\n\n self.check_evolution(H1, delta, psi0, tlist, analytic_func, U0)\n\n def test_02_1_state_with_func_H(self):\n \"sesolve: state with td func H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n alpha = 0.1\n td_args = {'alpha':alpha}\n h1_func = lambda t, args: H1*np.exp(-args['alpha']*t)\n analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))\n /args['alpha'])\n\n self.check_evolution(h1_func, delta, psi0, tlist, analytic_func,\n td_args=td_args)\n\n def test_02_2_unitary_with_func_H(self):\n \"sesolve: unitary operator with td func H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n U0 = qeye(2) # initital operator\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n alpha = 0.1\n td_args = {'alpha':alpha}\n h1_func = lambda t, args: H1*np.exp(-args['alpha']*t)\n analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))\n /args['alpha'])\n\n self.check_evolution(h1_func, delta, psi0, tlist, analytic_func, U0,\n td_args=td_args)\n\n def test_03_1_state_with_list_func_H(self):\n \"sesolve: state with td list func H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n alpha = 0.1\n td_args = {'alpha':alpha}\n h1_coeff = lambda t, args: np.exp(-args['alpha']*t)\n H = [[H1, h1_coeff]]\n analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))\n /args['alpha'])\n\n self.check_evolution(H, delta, psi0, tlist, analytic_func,\n td_args=td_args)\n\n def test_03_2_unitary_with_list_func_H(self):\n \"sesolve: unitary operator with td list func H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n U0 = qeye(2) # initital operator\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n alpha = 0.1\n td_args = {'alpha':alpha}\n h1_coeff = lambda t, args: np.exp(-args['alpha']*t)\n H = [[H1, h1_coeff]]\n analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))\n /args['alpha'])\n\n self.check_evolution(H, delta, psi0, tlist, analytic_func, U0,\n td_args=td_args)\n\n def test_04_1_state_with_list_str_H(self):\n \"sesolve: state with td list str H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n alpha = 0.1\n td_args = {'alpha':alpha}\n H = [[H1, 'exp(-alpha*t)']]\n analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))\n /args['alpha'])\n\n self.check_evolution(H, delta, psi0, tlist, analytic_func,\n td_args=td_args)\n\n def test_04_2_unitary_with_list_func_H(self):\n \"sesolve: unitary operator with td list str H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n U0 = qeye(2) # initital operator\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n alpha = 0.1\n td_args = {'alpha':alpha}\n H = [[H1, 'exp(-alpha*t)']]\n analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))\n /args['alpha'])\n\n self.check_evolution(H, delta, psi0, tlist, analytic_func, U0,\n td_args=td_args)\n\n\n def test_05_1_state_with_interp_H(self):\n \"sesolve: state with td interp H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n alpha = 0.1\n td_args = {'alpha':alpha}\n tcub = np.linspace(0, 20, 50)\n S = Cubic_Spline(0, 20, np.exp(-alpha*tcub))\n H = [[H1, S]]\n analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))\n /args['alpha'])\n\n self.check_evolution(H, delta, psi0, tlist, analytic_func,\n td_args=td_args)\n\n def test_05_2_unitary_with_interp_H(self):\n \"sesolve: unitary operator with td interp H\"\n delta = 1.0 * 2*np.pi # atom frequency\n psi0 = basis(2, 0) # initial state\n U0 = qeye(2) # initital operator\n H1 = 0.5*delta*sigmax() # Hamiltonian operator\n tlist = np.linspace(0, 20, 200)\n\n alpha = 0.1\n td_args = {'alpha':alpha}\n tcub = np.linspace(0, 20, 50)\n S = Cubic_Spline(0, 20, np.exp(-alpha*tcub))\n H = [[H1, S]]\n analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))\n /args['alpha'])\n\n self.check_evolution(H, delta, psi0, tlist, analytic_func, U0,\n td_args=td_args)\n\n def compare_evolution(self, H, psi0, tlist,\n normalize=False, td_args={}, tol=5e-5):\n \"\"\"\n Compare integrated evolution of unitary operator with state evo\n \"\"\"\n U0 = qeye(2)\n options = SolverOptions(store_states=True, normalize_output=normalize)\n out_s = sesolve(H, psi0, tlist, [sigmax(), sigmay(), sigmaz()],\n options=options,args=td_args)\n xs, ys, zs = out_s.expect[0], out_s.expect[1], out_s.expect[2]\n\n out_u = sesolve(H, U0, tlist, options=options, args=td_args)\n xu = [expect(sigmax(), U*psi0) for U in out_u.states]\n yu = [expect(sigmay(), U*psi0) for U in out_u.states]\n zu = [expect(sigmaz(), U*psi0) for U in out_u.states]\n\n if normalize:\n msg_ext = \". (Normalized)\"\n else:\n msg_ext = \". (Not normalized)\"\n assert_(max(abs(xs - xu)) < tol,\n msg=\"expect X not matching\" + msg_ext)\n assert_(max(abs(ys - yu)) < tol,\n msg=\"expect Y not matching\" + msg_ext)\n assert_(max(abs(zs - zu)) < tol,\n msg=\"expect Z not matching\" + msg_ext)\n\n def test_06_1_compare_state_and_unitary_const(self):\n \"sesolve: compare state and unitary operator evo - const H\"\n eps = 0.2 * 2*np.pi\n delta = 1.0 * 2*np.pi # atom frequency\n w0 = 0.5*eps\n w1 = 0.5*delta\n H0 = w0*sigmaz()\n H1 = w1*sigmax()\n H = H0 + H1\n\n psi0 = basis(2, 0) # initial state\n tlist = np.linspace(0, 20, 200)\n\n self.compare_evolution(H, psi0, tlist,\n normalize=False, tol=5e-5)\n self.compare_evolution(H, psi0, tlist,\n normalize=True, tol=5e-5)\n\n def test_06_2_compare_state_and_unitary_func(self):\n \"sesolve: compare state and unitary operator evo - func td\"\n eps = 0.2 * 2*np.pi\n delta = 1.0 * 2*np.pi # atom frequency\n w0 = 0.5*eps\n w1 = 0.5*delta\n H0 = w0*sigmaz()\n H1 = w1*sigmax()\n a = 0.1\n alpha = 0.1\n td_args = {'a':a, 'alpha':alpha}\n H_func = lambda t, args: a*t*H0 + H1*np.exp(-alpha*t)\n H = H_func\n\n psi0 = basis(2, 0) # initial state\n tlist = np.linspace(0, 20, 200)\n\n self.compare_evolution(H, psi0, tlist,\n normalize=False, td_args=td_args, tol=5e-5)\n self.compare_evolution(H, psi0, tlist,\n normalize=True, td_args=td_args, tol=5e-5)\n\n def test_06_3_compare_state_and_unitary_list_func(self):\n \"sesolve: compare state and unitary operator evo - list func td\"\n eps = 0.2 * 2*np.pi\n delta = 1.0 * 2*np.pi # atom frequency\n w0 = 0.5*eps\n w1 = 0.5*delta\n H0 = w0*sigmaz()\n H1 = w1*sigmax()\n a = 0.1\n w_a = w0\n td_args = {'a':a, 'w_a':w_a}\n h0_func = lambda t, args: a*t\n h1_func = lambda t, args: np.cos(w_a*t)\n H = [[H0, h0_func], [H1, h1_func]]\n\n psi0 = basis(2, 0) # initial state\n tlist = np.linspace(0, 20, 200)\n\n self.compare_evolution(H, psi0, tlist,\n normalize=False, td_args=td_args, tol=5e-5)\n self.compare_evolution(H, psi0, tlist,\n normalize=True, td_args=td_args, tol=5e-5)\n\n def test_06_4_compare_state_and_unitary_list_str(self):\n \"sesolve: compare state and unitary operator evo - list str td\"\n eps = 0.2 * 2*np.pi\n delta = 1.0 * 2*np.pi # atom frequency\n w0 = 0.5*eps\n w1 = 0.5*delta\n H0 = w0*sigmaz()\n H1 = w1*sigmax()\n w_a = w0\n\n td_args = {'w_a':w_a}\n H = [H0, [H1, 'cos(w_a*t)']]\n\n psi0 = basis(2, 0) # initial state\n tlist = np.linspace(0, 20, 200)\n\n self.compare_evolution(H, psi0, tlist,\n normalize=False, td_args=td_args, tol=5e-5)\n self.compare_evolution(H, psi0, tlist,\n normalize=True, td_args=td_args, tol=5e-5)\n\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n", "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n__all__ = ['basis', 'qutrit_basis', 'coherent', 'coherent_dm', 'fock_dm',\n 'fock', 'thermal_dm', 'maximally_mixed_dm', 'ket2dm', 'projection',\n 'qstate', 'ket', 'bra', 'state_number_enumerate',\n 'state_number_index', 'state_index_number', 'state_number_qobj',\n 'phase_basis', 'zero_ket', 'spin_state', 'spin_coherent',\n 'bell_state', 'singlet_state', 'triplet_states', 'w_state',\n 'ghz_state', 'enr_state_dictionaries', 'enr_fock',\n 'enr_thermal_dm']\n\nimport itertools\nimport numbers\nimport warnings\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom . import data as _data\nfrom .qobj import Qobj\nfrom .operators import jmat, displace, qdiags\nfrom .tensor import tensor\n\ndef _promote_to_zero_list(arg, length):\n \"\"\"\n Ensure `arg` is a list of length `length`. If `arg` is None it is promoted\n to `[0]*length`. All other inputs are checked that they match the correct\n form.\n\n Returns\n -------\n list_ : list\n A list of integers of length `length`.\n \"\"\"\n if arg is None:\n arg = [0]*length\n elif not isinstance(arg, list):\n arg = [arg]\n if not len(arg) == length:\n raise ValueError(\"All list inputs must be the same length.\")\n if all(isinstance(x, numbers.Integral) for x in arg):\n return arg\n raise TypeError(\"Dimensions must be an integer or list of integers.\")\n\n\ndef basis(dimensions, n=None, offset=None, *, dtype=_data.Dense):\n \"\"\"Generates the vector representation of a Fock state.\n\n Parameters\n ----------\n dimensions : int or list of ints\n Number of Fock states in Hilbert space. If a list, then the resultant\n object will be a tensor product over spaces with those dimensions.\n\n n : int or list of ints, optional (default 0 for all dimensions)\n Integer corresponding to desired number state, defaults to 0 for all\n dimensions if omitted. The shape must match ``dimensions``, e.g. if\n ``dimensions`` is a list, then ``n`` must either be omitted or a list\n of equal length.\n\n offset : int or list of ints, optional (default 0 for all dimensions)\n The lowest number state that is included in the finite number state\n representation of the state in the relevant dimension.\n\n dtype : type or str\n storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n state : :class:`qutip.Qobj`\n Qobj representing the requested number state ``|n>``.\n\n Examples\n --------\n >>> basis(5,2) # doctest: +SKIP\n Quantum object: dims = [[5], [1]], shape = (5, 1), type = ket\n Qobj data =\n [[ 0.+0.j]\n [ 0.+0.j]\n [ 1.+0.j]\n [ 0.+0.j]\n [ 0.+0.j]]\n >>> basis([2,2,2], [0,1,0]) # doctest: +SKIP\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], shape = (8, 1), type = ket\n Qobj data =\n [[0.]\n [0.]\n [1.]\n [0.]\n [0.]\n [0.]\n [0.]\n [0.]]\n\n\n Notes\n -----\n A subtle incompatibility with the quantum optics toolbox: In QuTiP::\n\n basis(N, 0) = ground state\n\n but in the qotoolbox::\n\n basis(N, 1) = ground state\n\n \"\"\"\n # Promote all parameters to lists to simplify later logic.\n if not isinstance(dimensions, list):\n dimensions = [dimensions]\n n_dimensions = len(dimensions)\n ns = [m-off for m, off in zip(_promote_to_zero_list(n, n_dimensions),\n _promote_to_zero_list(offset, n_dimensions))]\n if any((not isinstance(x, numbers.Integral)) or x < 0 for x in dimensions):\n raise ValueError(\"All dimensions must be >= 0.\")\n if not all(0 <= n < dimension for n, dimension in zip(ns, dimensions)):\n raise ValueError(\"All basis indices must be \"\n \"`offset <= n < dimension+offset`.\")\n location, size = 0, 1\n for m, dimension in zip(reversed(ns), reversed(dimensions)):\n location += m*size\n size *= dimension\n\n data = _data.one_element[dtype]((size, 1), (location, 0), 1)\n return Qobj(data,\n dims=[dimensions, [1]*n_dimensions],\n type='ket',\n isherm=False,\n isunitary=False,\n copy=False)\n\n\ndef qutrit_basis(*, dtype=_data.Dense):\n \"\"\"Basis states for a three level system (qutrit)\n\n dtype : type or str\n storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n qstates : array\n Array of qutrit basis vectors\n\n \"\"\"\n out = np.empty((3,), dtype=object)\n out[:] = [\n basis(3, 0, dtype=dtype),\n basis(3, 1, dtype=dtype),\n basis(3, 2, dtype=dtype),\n ]\n return out\n\n_COHERENT_METHODS = ('operator', 'analytic')\n\n\ndef coherent(N, alpha, offset=0, method='operator', *, dtype=_data.Dense):\n \"\"\"Generates a coherent state with eigenvalue alpha.\n\n Constructed using displacement operator on vacuum state.\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n alpha : float/complex\n Eigenvalue of coherent state.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the state. Using a non-zero offset will make the\n default method 'analytic'.\n\n method : string {'operator', 'analytic'}\n Method for generating coherent state.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n state : qobj\n Qobj quantum object for coherent state\n\n Examples\n --------\n >>> coherent(5,0.25j) # doctest: +SKIP\n Quantum object: dims = [[5], [1]], shape = [5, 1], type = ket\n Qobj data =\n [[ 9.69233235e-01+0.j ]\n [ 0.00000000e+00+0.24230831j]\n [ -4.28344935e-02+0.j ]\n [ 0.00000000e+00-0.00618204j]\n [ 7.80904967e-04+0.j ]]\n\n Notes\n -----\n Select method 'operator' (default) or 'analytic'. With the\n 'operator' method, the coherent state is generated by displacing\n the vacuum state using the displacement operator defined in the\n truncated Hilbert space of size 'N'. This method guarantees that the\n resulting state is normalized. With 'analytic' method the coherent state\n is generated using the analytical formula for the coherent state\n coefficients in the Fock basis. This method does not guarantee that the\n state is normalized if truncated to a small number of Fock states,\n but would in that case give more accurate coefficients.\n\n \"\"\"\n if method == \"operator\" and offset == 0:\n return (displace(N, alpha, dtype=dtype) * basis(N, 0)).to(dtype)\n elif method == \"analytic\" or offset > 0:\n sqrtn = np.sqrt(np.arange(offset, offset+N, dtype=complex))\n sqrtn[0] = 1 # Get rid of divide by zero warning\n data = alpha / sqrtn\n if offset == 0:\n data[0] = np.exp(-abs(alpha)**2 / 2.0)\n else:\n s = np.prod(np.sqrt(np.arange(1, offset + 1))) # sqrt factorial\n data[0] = np.exp(-abs(alpha)**2 * 0.5) * alpha**offset / s\n np.cumprod(data, out=sqrtn) # Reuse sqrtn array\n return Qobj(sqrtn,\n dims=[[N], [1]],\n type='ket',\n copy=False).to(dtype)\n raise TypeError(\n \"The method option can only take values in \" + repr(_COHERENT_METHODS)\n )\n\n\ndef coherent_dm(N, alpha, offset=0, method='operator', *, dtype=_data.Dense):\n \"\"\"Density matrix representation of a coherent state.\n\n Constructed via outer product of :func:`qutip.states.coherent`\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n alpha : float/complex\n Eigenvalue for coherent state.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the state.\n\n method : string {'operator', 'analytic'}\n Method for generating coherent density matrix.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n dm : qobj\n Density matrix representation of coherent state.\n\n Examples\n --------\n >>> coherent_dm(3,0.25j) # doctest: +SKIP\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.93941695+0.j 0.00000000-0.23480733j -0.04216943+0.j ]\n [ 0.00000000+0.23480733j 0.05869011+0.j 0.00000000-0.01054025j]\n [-0.04216943+0.j 0.00000000+0.01054025j 0.00189294+0.j\\\n ]]\n\n Notes\n -----\n Select method 'operator' (default) or 'analytic'. With the\n 'operator' method, the coherent density matrix is generated by displacing\n the vacuum state using the displacement operator defined in the\n truncated Hilbert space of size 'N'. This method guarantees that the\n resulting density matrix is normalized. With 'analytic' method the coherent\n density matrix is generated using the analytical formula for the coherent\n state coefficients in the Fock basis. This method does not guarantee that\n the state is normalized if truncated to a small number of Fock states,\n but would in that case give more accurate coefficients.\n\n \"\"\"\n return coherent(N, alpha, offset=offset, method=method, dtype=dtype).proj()\n\n\ndef fock_dm(dimensions, n=None, offset=None, *, dtype=_data.CSR):\n \"\"\"Density matrix representation of a Fock state\n\n Constructed via outer product of :func:`qutip.states.fock`.\n\n Parameters\n ----------\n dimensions : int or list of ints\n Number of Fock states in Hilbert space. If a list, then the resultant\n object will be a tensor product over spaces with those dimensions.\n\n n : int or list of ints, optional (default 0 for all dimensions)\n Integer corresponding to desired number state, defaults to 0 for all\n dimensions if omitted. The shape must match ``dimensions``, e.g. if\n ``dimensions`` is a list, then ``n`` must either be omitted or a list\n of equal length.\n\n offset : int or list of ints, optional (default 0 for all dimensions)\n The lowest number state that is included in the finite number state\n representation of the state in the relevant dimension.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n dm : qobj\n Density matrix representation of Fock state.\n\n Examples\n --------\n >>> fock_dm(3,1) # doctest: +SKIP\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.+0.j 0.+0.j 0.+0.j]\n [ 0.+0.j 1.+0.j 0.+0.j]\n [ 0.+0.j 0.+0.j 0.+0.j]]\n\n \"\"\"\n return basis(dimensions, n, offset=offset, dtype=dtype).proj()\n\n\ndef fock(dimensions, n=None, offset=None, *, dtype=_data.Dense):\n \"\"\"Bosonic Fock (number) state.\n\n Same as :func:`qutip.states.basis`.\n\n Parameters\n ----------\n dimensions : int or list of ints\n Number of Fock states in Hilbert space. If a list, then the resultant\n object will be a tensor product over spaces with those dimensions.\n\n n : int or list of ints, optional (default 0 for all dimensions)\n Integer corresponding to desired number state, defaults to 0 for all\n dimensions if omitted. The shape must match ``dimensions``, e.g. if\n ``dimensions`` is a list, then ``n`` must either be omitted or a list\n of equal length.\n\n offset : int or list of ints, optional (default 0 for all dimensions)\n The lowest number state that is included in the finite number state\n representation of the state in the relevant dimension.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n Requested number state :math:`\\\\left|n\\\\right>`.\n\n Examples\n --------\n >>> fock(4,3) # doctest: +SKIP\n Quantum object: dims = [[4], [1]], shape = [4, 1], type = ket\n Qobj data =\n [[ 0.+0.j]\n [ 0.+0.j]\n [ 0.+0.j]\n [ 1.+0.j]]\n\n \"\"\"\n return basis(dimensions, n, offset=offset, dtype=dtype)\n\n\ndef thermal_dm(N, n, method='operator', *, dtype=_data.CSR):\n \"\"\"Density matrix for a thermal state of n particles\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n\n n : float\n Expectation value for number of particles in thermal state.\n\n method : string {'operator', 'analytic'}\n ``string`` that sets the method used to generate the\n thermal state probabilities\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n dm : qobj\n Thermal state density matrix.\n\n Examples\n --------\n >>> thermal_dm(5, 1) # doctest: +SKIP\n Quantum object: dims = [[5], [5]], \\\nshape = [5, 5], type = oper, isHerm = True\n Qobj data =\n [[ 0.51612903 0. 0. 0. 0. ]\n [ 0. 0.25806452 0. 0. 0. ]\n [ 0. 0. 0.12903226 0. 0. ]\n [ 0. 0. 0. 0.06451613 0. ]\n [ 0. 0. 0. 0. 0.03225806]]\n\n\n >>> thermal_dm(5, 1, 'analytic') # doctest: +SKIP\n Quantum object: dims = [[5], [5]], \\\nshape = [5, 5], type = oper, isHerm = True\n Qobj data =\n [[ 0.5 0. 0. 0. 0. ]\n [ 0. 0.25 0. 0. 0. ]\n [ 0. 0. 0.125 0. 0. ]\n [ 0. 0. 0. 0.0625 0. ]\n [ 0. 0. 0. 0. 0.03125]]\n\n Notes\n -----\n The 'operator' method (default) generates\n the thermal state using the truncated number operator ``num(N)``. This\n is the method that should be used in computations. The\n 'analytic' method uses the analytic coefficients derived in\n an infinite Hilbert space. The analytic form is not necessarily normalized,\n if truncated too aggressively.\n\n \"\"\"\n if n == 0:\n return fock_dm(N, 0, dtype=dtype)\n else:\n i = np.arange(N)\n if method == 'operator':\n beta = np.log(1.0 / n + 1.0)\n diags = np.exp(-beta * i)\n diags = diags / np.sum(diags)\n # populates diagonal terms using truncated operator expression\n\n elif method == 'analytic':\n # populates diagonal terms using analytic values\n diags = (1.0 + n) ** (-1.0) * (n / (1.0 + n)) ** (i)\n else:\n raise ValueError(\n \"'method' keyword argument must be 'operator' or 'analytic'\"\n )\n out = qdiags(diags, 0, dims=[[N], [N]], shape=(N, N), dtype=dtype)\n out._isherm = True\n return out\n\n\ndef maximally_mixed_dm(N, *, dtype=_data.CSR):\n \"\"\"\n Returns the maximally mixed density matrix for a Hilbert space of\n dimension N.\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n dm : qobj\n Thermal state density matrix.\n \"\"\"\n if not isinstance(N, numbers.Integral) or N <= 0:\n raise ValueError(\"N must be integer N > 0\")\n return Qobj(_data.identity[dtype](N, scale=1/N), dims=[[N], [N]],\n type='oper', isherm=True, isunitary=(N == 1), copy=False)\n\n\ndef ket2dm(Q):\n \"\"\"\n Takes input ket or bra vector and returns density matrix formed by outer\n product. This is completely identical to calling `Q.proj()`.\n\n Parameters\n ----------\n Q : qobj\n Ket or bra type quantum object.\n\n Returns\n -------\n dm : qobj\n Density matrix formed by outer product of `Q`.\n\n Examples\n --------\n >>> x=basis(3,2)\n >>> ket2dm(x) # doctest: +SKIP\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.+0.j 0.+0.j 0.+0.j]\n [ 0.+0.j 0.+0.j 0.+0.j]\n [ 0.+0.j 0.+0.j 1.+0.j]]\n\n \"\"\"\n if Q.isket or Q.isbra:\n return Q.proj()\n raise TypeError(\"input is not a ket or bra vector.\")\n\n\ndef projection(N, n, m, offset=None, *, dtype=_data.CSR):\n \"\"\"The projection operator that projects state :math:`|m>` on state :math:`|n>`.\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n\n n, m : float\n The number states in the projection.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the projector.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n oper : qobj\n Requested projection operator.\n\n \"\"\"\n return basis(N, n, offset=offset, dtype=dtype) @ \\\n basis(N, m, offset=offset, dtype=dtype).dag()\n\n\ndef qstate(string, *, dtype=_data.Dense):\n \"\"\"\n Creates a tensor product for a set of qubits in either the 'up' :math:`|0>`\n or 'down' :math:`|1>` state.\n\n Parameters\n ----------\n string : str\n String containing 'u' or 'd' for each qubit (ex. 'ududd')\n\n Returns\n -------\n qstate : qobj\n Qobj for tensor product corresponding to input string.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Notes\n -----\n Look at ket and bra for more general functions\n creating multiparticle states.\n\n Examples\n --------\n >>> qstate('udu') # doctest: +SKIP\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], shape = [8, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]]\n \"\"\"\n n = len(string)\n if n != (string.count('u') + string.count('d')):\n raise TypeError('String input to QSTATE must consist ' +\n 'of \"u\" and \"d\" elements only')\n return basis([2]*n, [1 if x == 'u' else 0 for x in string], dtype=dtype)\n\n\n#\n# different qubit notation dictionary\n#\n_qubit_dict = {\n 'g': 0, # ground state\n 'e': 1, # excited state\n 'u': 0, # spin up\n 'd': 1, # spin down\n 'H': 0, # horizontal polarization\n 'V': 1, # vertical polarization\n}\n\n\ndef _character_to_qudit(x):\n \"\"\"\n Converts a character representing a one-particle state into int.\n \"\"\"\n return _qubit_dict[x] if x in _qubit_dict else int(x)\n\n\ndef ket(seq, dim=2, *, dtype=_data.Dense):\n \"\"\"\n Produces a multiparticle ket state for a list or string,\n where each element stands for state of the respective particle.\n\n Parameters\n ----------\n seq : str / list of ints or characters\n Each element defines state of the respective particle.\n (e.g. [1,1,0,1] or a string \"1101\").\n For qubits it is also possible to use the following conventions:\n - 'g'/'e' (ground and excited state)\n - 'u'/'d' (spin up and down)\n - 'H'/'V' (horizontal and vertical polarization)\n Note: for dimension > 9 you need to use a list.\n\n\n dim : int (default: 2) / list of ints\n Space dimension for each particle:\n int if there are the same, list if they are different.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n ket : qobj\n\n Examples\n --------\n >>> ket(\"10\") # doctest: +SKIP\n Quantum object: dims = [[2, 2], [1, 1]], shape = [4, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 1.]\n [ 0.]]\n\n >>> ket(\"Hue\") # doctest: +SKIP\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], shape = [8, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 1.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]\n\n >>> ket(\"12\", 3) # doctest: +SKIP\n Quantum object: dims = [[3, 3], [1, 1]], shape = [9, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]\n [ 0.]]\n\n >>> ket(\"31\", [5, 2]) # doctest: +SKIP\n Quantum object: dims = [[5, 2], [1, 1]], shape = [10, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]]\n \"\"\"\n ns = [_character_to_qudit(x) for x in seq]\n dim = [dim]*len(ns) if isinstance(dim, numbers.Integral) else dim\n return basis(dim, ns, dtype=dtype)\n\n\ndef bra(seq, dim=2, *, dtype=_data.Dense):\n \"\"\"\n Produces a multiparticle bra state for a list or string,\n where each element stands for state of the respective particle.\n\n Parameters\n ----------\n seq : str / list of ints or characters\n Each element defines state of the respective particle.\n (e.g. [1,1,0,1] or a string \"1101\").\n For qubits it is also possible to use the following conventions:\n - 'g'/'e' (ground and excited state)\n - 'u'/'d' (spin up and down)\n - 'H'/'V' (horizontal and vertical polarization)\n Note: for dimension > 9 you need to use a list.\n\n\n dim : int (default: 2) / list of ints\n Space dimension for each particle:\n int if there are the same, list if they are different.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n bra : qobj\n\n Examples\n --------\n >>> bra(\"10\") # doctest: +SKIP\n Quantum object: dims = [[1, 1], [2, 2]], shape = [1, 4], type = bra\n Qobj data =\n [[ 0. 0. 1. 0.]]\n\n >>> bra(\"Hue\") # doctest: +SKIP\n Quantum object: dims = [[1, 1, 1], [2, 2, 2]], shape = [1, 8], type = bra\n Qobj data =\n [[ 0. 1. 0. 0. 0. 0. 0. 0.]]\n\n >>> bra(\"12\", 3) # doctest: +SKIP\n Quantum object: dims = [[1, 1], [3, 3]], shape = [1, 9], type = bra\n Qobj data =\n [[ 0. 0. 0. 0. 0. 1. 0. 0. 0.]]\n\n\n >>> bra(\"31\", [5, 2]) # doctest: +SKIP\n Quantum object: dims = [[1, 1], [5, 2]], shape = [1, 10], type = bra\n Qobj data =\n [[ 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]]\n \"\"\"\n return ket(seq, dim=dim, dtype=dtype).dag()\n\n\ndef state_number_enumerate(dims, excitations=None):\n \"\"\"\n An iterator that enumerate all the state number arrays (quantum numbers on\n the form [n1, n2, n3, ...]) for a system with dimensions given by dims.\n\n Example:\n\n >>> for state in state_number_enumerate([2,2]): # doctest: +SKIP\n >>> print(state) # doctest: +SKIP\n [ 0 0 ]\n [ 0 1 ]\n [ 1 0 ]\n [ 1 1 ]\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n excitations : integer (None)\n Restrict state space to states with excitation numbers below or\n equal to this value.\n\n Returns\n -------\n state_number : list\n Successive state number arrays that can be used in loops and other\n iterations, using standard state enumeration *by definition*.\n\n \"\"\"\n return (x\n for x in itertools.product(*[range(d) for d in dims])\n if excitations is None or sum(x) <= excitations)\n\n\ndef state_number_index(dims, state):\n \"\"\"\n Return the index of a quantum state corresponding to state,\n given a system with dimensions given by dims.\n\n Example:\n\n >>> state_number_index([2, 2, 2], [1, 1, 0])\n 6\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n state : list\n State number array.\n\n Returns\n -------\n idx : int\n The index of the state given by `state` in standard enumeration\n ordering.\n\n \"\"\"\n return int(np.dot(state, np.cumprod([1] + dims[:0:-1])[::-1]))\n\n\ndef state_index_number(dims, index):\n \"\"\"\n Return a quantum number representation given a state index, for a system\n of composite structure defined by dims.\n\n Example:\n\n >>> state_index_number([2, 2, 2], 6)\n [1, 1, 0]\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n index : integer\n The index of the state in standard enumeration ordering.\n\n Returns\n -------\n state : list\n The state number array corresponding to index `index` in standard\n enumeration ordering.\n\n \"\"\"\n state = np.empty_like(dims)\n D = np.concatenate([np.flipud(np.cumprod(np.flipud(dims[1:]))), [1]])\n for n in range(len(dims)):\n state[n] = index / D[n]\n index -= state[n] * D[n]\n return list(state)\n\n\ndef state_number_qobj(dims, state, *, dtype=_data.Dense):\n \"\"\"\n Return a Qobj representation of a quantum state specified by the state\n array `state`.\n\n Example:\n\n >>> state_number_qobj([2, 2, 2], [1, 0, 1]) # doctest: +SKIP\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], \\\nshape = [8, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]]\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n state : list\n State number array.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n state : :class:`qutip.Qobj.qobj`\n The state as a :class:`qutip.Qobj.qobj` instance.\n\n\n \"\"\"\n warnings.warn(\"basis() is a drop-in replacement for this\",\n DeprecationWarning)\n return basis(dims, state, dtype=dtype)\n\n\n# Excitation-number restricted (enr) states\n\ndef enr_state_dictionaries(dims, excitations):\n \"\"\"\n Return the number of states, and lookup-dictionaries for translating\n a state tuple to a state index, and vice versa, for a system with a given\n number of components and maximum number of excitations.\n\n Parameters\n ----------\n dims: list\n A list with the number of states in each sub-system.\n\n excitations : integer\n The maximum numbers of dimension\n\n Returns\n -------\n nstates, state2idx, idx2state: integer, dict, dict\n The number of states `nstates`, a dictionary for looking up state\n indices from a state tuple, and a dictionary for looking up state\n state tuples from state indices.\n \"\"\"\n nstates = 0\n state2idx = {}\n idx2state = {}\n\n for state in state_number_enumerate(dims, excitations):\n state2idx[state] = nstates\n idx2state[nstates] = state\n nstates += 1\n\n return nstates, state2idx, idx2state\n\n\ndef enr_fock(dims, excitations, state, *, dtype=_data.Dense):\n \"\"\"\n Generate the Fock state representation in a excitation-number restricted\n state space. The `dims` argument is a list of integers that define the\n number of quantums states of each component of a composite quantum system,\n and the `excitations` specifies the maximum number of excitations for\n the basis states that are to be included in the state space. The `state`\n argument is a tuple of integers that specifies the state (in the number\n basis representation) for which to generate the Fock state representation.\n\n Parameters\n ----------\n dims : list\n A list of the dimensions of each subsystem of a composite quantum\n system.\n\n excitations : integer\n The maximum number of excitations that are to be included in the\n state space.\n\n state : list of integers\n The state in the number basis representation.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n ket : Qobj\n A Qobj instance that represent a Fock state in the exication-number-\n restricted state space defined by `dims` and `exciations`.\n\n \"\"\"\n nstates, state2idx, _ = enr_state_dictionaries(dims, excitations)\n try:\n data =_data.one_element[dtype]((nstates, 1),\n (state2idx[tuple(state)], 0), 1)\n except KeyError:\n msg = (\n \"state tuple \" + str(tuple(state))\n + \" is not in the restricted state space.\"\n )\n raise ValueError(msg) from None\n return Qobj(data, dims=[dims, [1]*len(dims)], type='ket', copy=False)\n\n\ndef enr_thermal_dm(dims, excitations, n, *, dtype=_data.CSR):\n \"\"\"\n Generate the density operator for a thermal state in the excitation-number-\n restricted state space defined by the `dims` and `exciations` arguments.\n See the documentation for enr_fock for a more detailed description of\n these arguments. The temperature of each mode in dims is specified by\n the average number of excitatons `n`.\n\n Parameters\n ----------\n dims : list\n A list of the dimensions of each subsystem of a composite quantum\n system.\n\n excitations : integer\n The maximum number of excitations that are to be included in the\n state space.\n\n n : integer\n The average number of exciations in the thermal state. `n` can be\n a float (which then applies to each mode), or a list/array of the same\n length as dims, in which each element corresponds specifies the\n temperature of the corresponding mode.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n dm : Qobj\n Thermal state density matrix.\n \"\"\"\n nstates, _, idx2state = enr_state_dictionaries(dims, excitations)\n if not isinstance(n, (list, np.ndarray)):\n n = np.ones(len(dims)) * n\n else:\n n = np.asarray(n)\n\n diags = [np.prod((n / (n + 1)) ** np.array(state))\n for idx, state in idx2state.items()]\n diags /= np.sum(diags)\n out = qdiags(diags, 0, dims=[dims, dims],\n shape=(nstates, nstates), dtype=dtype)\n out._isherm = True\n return out\n\n\ndef phase_basis(N, m, phi0=0, *, dtype=_data.Dense):\n \"\"\"\n Basis vector for the mth phase of the Pegg-Barnett phase operator.\n\n Parameters\n ----------\n N : int\n Number of basis vectors in Hilbert space.\n\n m : int\n Integer corresponding to the mth discrete phase\n phi_m = phi0 + 2 * pi * m / N\n\n phi0 : float (default=0)\n Reference phase angle.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n state : qobj\n Ket vector for mth Pegg-Barnett phase operator basis state.\n\n Notes\n -----\n The Pegg-Barnett basis states form a complete set over the truncated\n Hilbert space.\n\n \"\"\"\n phim = phi0 + (2.0 * np.pi * m) / N\n n = np.arange(N)[:, np.newaxis]\n data = np.exp(1.0j * n * phim) / np.sqrt(N)\n return Qobj(data, dims=[[N], [1]], type='ket', copy=False).to(dtype)\n\n\ndef zero_ket(N, dims=None, *, dtype=_data.Dense):\n \"\"\"\n Creates the zero ket vector with shape Nx1 and dimensions `dims`.\n\n Parameters\n ----------\n N : int\n Hilbert space dimensionality\n dims : list\n Optional dimensions if ket corresponds to\n a composite Hilbert space.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n zero_ket : qobj\n Zero ket on given Hilbert space.\n\n \"\"\"\n return Qobj(_data.zeros[dtype](N, 1), dims=dims, type='ket', copy=False)\n\n\ndef spin_state(j, m, type='ket', *, dtype=_data.Dense):\n \"\"\"Generates the spin state |j, m>, i.e. the eigenstate\n of the spin-j Sz operator with eigenvalue m.\n\n Parameters\n ----------\n j : float\n The spin of the state ().\n\n m : int\n Eigenvalue of the spin-j Sz operator.\n\n type : string {'ket', 'bra', 'dm'}\n Type of state to generate.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n state : qobj\n Qobj quantum object for spin state\n\n \"\"\"\n J = 2*j + 1\n\n if type == 'ket':\n return basis(int(J), int(j - m), dtype=dtype)\n elif type == 'bra':\n return basis(int(J), int(j - m), dtype=dtype).dag()\n elif type == 'dm':\n return fock_dm(int(J), int(j - m), dtype=dtype)\n else:\n raise ValueError(f\"invalid value keyword argument type='{type}'\")\n\n\ndef spin_coherent(j, theta, phi, type='ket', *, dtype=_data.Dense):\n \"\"\"Generate the coherent spin state |theta, phi>.\n\n Parameters\n ----------\n j : float\n The spin of the state.\n\n theta : float\n Angle from z axis.\n\n phi : float\n Angle from x axis.\n\n type : string {'ket', 'bra', 'dm'}\n Type of state to generate.\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n state : qobj\n Qobj quantum object for spin coherent state\n\n \"\"\"\n if type not in ['ket', 'bra', 'dm']:\n raise ValueError(\"invalid value keyword argument 'type'\")\n Sp = jmat(j, '+')\n Sm = jmat(j, '-')\n psi = (0.5 * theta * np.exp(1j * phi) * Sm -\n 0.5 * theta * np.exp(-1j * phi) * Sp).expm() * \\\n spin_state(j, j)\n\n if type == 'bra':\n psi = psi.dag()\n elif type == 'dm':\n psi = ket2dm(psi)\n return psi.to(dtype)\n\n\n_BELL_STATES = {\n '00': np.sqrt(0.5) * (basis([2, 2], [0, 0]) + basis([2, 2], [1, 1])),\n '01': np.sqrt(0.5) * (basis([2, 2], [0, 0]) - basis([2, 2], [1, 1])),\n '10': np.sqrt(0.5) * (basis([2, 2], [0, 1]) + basis([2, 2], [1, 0])),\n '11': np.sqrt(0.5) * (basis([2, 2], [0, 1]) - basis([2, 2], [1, 0])),\n}\n\n\ndef bell_state(state='00', *, dtype=_data.Dense):\n \"\"\"\n Returns the Bell state:\n\n |B00> = 1 / sqrt(2)*[|0>|0>+|1>|1>]\n |B01> = 1 / sqrt(2)*[|0>|0>-|1>|1>]\n |B10> = 1 / sqrt(2)*[|0>|1>+|1>|0>]\n |B11> = 1 / sqrt(2)*[|0>|1>-|1>|0>]\n\n\n Parameters\n ----------\n state : str ['00', '01', `10`, `11`]\n Which bell state to return\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n Bell_state : qobj\n Bell state\n\n \"\"\"\n return _BELL_STATES[state].copy().to(dtype)\n\n\ndef singlet_state(*, dtype=_data.Dense):\n \"\"\"\n Returns the two particle singlet-state:\n\n |S>=1/sqrt(2)*[|0>|1>-|1>|0>]\n\n that is identical to the fourth bell state.\n\n Parameters\n ----------\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n Bell_state : qobj\n |B11> Bell state\n\n \"\"\"\n return bell_state('11').to(dtype)\n\n\ndef triplet_states(*, dtype=_data.Dense):\n \"\"\"\n Returns the two particle triplet-states:\n |T> = |1>|1>\n = 1 / sqrt(2)*[|0>|1> + |1>|0>]\n = |0>|0>\n that is identical to the fourth bell state.\n\n Parameters\n ----------\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n trip_states : list\n 2 particle triplet states\n \"\"\"\n return [\n basis([2, 2], [1, 1], dtype=dtype),\n np.sqrt(0.5) * (basis([2, 2], [0, 1], dtype=dtype) +\n basis([2, 2], [1, 0], dtype=dtype)),\n basis([2, 2], [0, 0], dtype=dtype),\n ]\n\n\ndef w_state(N=3, *, dtype=_data.Dense):\n \"\"\"\n Returns the N-qubit W-state.\n [ |100..0> + |010..0> + |001..0> + ... |000..1> ] / sqrt(n)\n\n Parameters\n ----------\n N : int (default=3)\n Number of qubits in state\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n W : qobj\n N-qubit W-state\n \"\"\"\n inds = np.zeros(N, dtype=int)\n inds[0] = 1\n state = basis([2]*N, list(inds), dtype=dtype)\n for kk in range(1, N):\n state += basis([2]*N, list(np.roll(inds, kk)), dtype=dtype)\n return np.sqrt(1 / N) * state\n\n\ndef ghz_state(N=3, *, dtype=_data.Dense):\n \"\"\"\n Returns the N-qubit GHZ-state\n [ |00...00> + |11...11> ] / sqrt(2)\n\n Parameters\n ----------\n N : int (default=3)\n Number of qubits in state\n\n dtype : type or str\n Storage representation. Any data-layer known to `qutip.data.to` is\n accepted.\n\n Returns\n -------\n G : qobj\n N-qubit GHZ-state\n \"\"\"\n return np.sqrt(0.5) * (basis([2]*N, [0]*N, dtype=dtype) +\n basis([2]*N, [1]*N, dtype=dtype))\n", "import numpy as np\nimport scipy.sparse.linalg\n\nfrom .dense import Dense\nfrom .csr import CSR\nfrom .properties import isdiag_csr\nfrom qutip.settings import settings\nfrom .base import idxint_dtype\n\n__all__ = [\n 'expm', 'expm_csr', 'expm_csr_dense',\n]\n\n\ndef expm_csr(matrix: CSR) -> CSR:\n if matrix.shape[0] != matrix.shape[1]:\n raise ValueError(\"can only exponentiate square matrix\")\n if isdiag_csr(matrix):\n matrix_sci = matrix.as_scipy()\n data = np.ones(matrix.shape[0], dtype=np.complex128)\n data[matrix_sci.indices] += np.expm1(matrix_sci.data)\n return CSR(\n (\n data,\n np.arange(matrix.shape[0], dtype=idxint_dtype),\n np.arange(matrix.shape[0] + 1, dtype=idxint_dtype),\n ),\n shape=matrix.shape,\n copy=False,\n )\n # The scipy solvers for the Pade approximant are more efficient with the\n # CSC format than the CSR one.\n csc = matrix.as_scipy().tocsc()\n return CSR(scipy.sparse.linalg.expm(csc).tocsr(),\n tidyup=settings.core['auto_tidyup'])\n\n\ndef expm_csr_dense(matrix: CSR) -> Dense:\n if matrix.shape[0] != matrix.shape[1]:\n raise ValueError(\"can only exponentiate square matrix\")\n return Dense(scipy.sparse.linalg.expm(matrix.to_array()))\n\n\nfrom .dispatch import Dispatcher as _Dispatcher\nimport inspect as _inspect\n\n\nexpm = _Dispatcher(\n _inspect.Signature([\n _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD),\n ]),\n name='expm',\n module=__name__,\n inputs=('matrix',),\n out=True,\n)\nexpm.__doc__ = \"\"\"Matrix exponential `e**A` for a matrix `A`.\"\"\"\nexpm.add_specialisations([\n (CSR, CSR, expm_csr),\n (CSR, Dense, expm_csr_dense),\n], _defer=True)\n\ndel _inspect, _Dispatcher\n", "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson,\n# Neill Lambert, Anubhav Vardhan, Alexander Pitchford.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\"\"\"\nThis module provides exact solvers for a system-bath setup using the\nhierarchy equations of motion (HEOM).\n\"\"\"\n\n# Authors: Neill Lambert, Anubhav Vardhan, Alexander Pitchford\n# Contact: [email protected]\n\nimport timeit\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy.integrate\nfrom copy import copy\nfrom qutip import (\n Qobj, qeye, enr_state_dictionaries, liouvillian, spre, spost, sprepost,\n)\nfrom qutip.core import data as _data\nfrom ..solver import SolverOptions, Result, Stats\nfrom qutip.ui.progressbar import BaseProgressBar, TextProgressBar\nfrom ._heom import pad\n\n\ndef _ode_rhs(t, state, operator):\n state = _data.dense.fast_from_numpy(state)\n return _data.matmul(operator, state, dtype=_data.Dense).as_ndarray()[:, 0]\n\n\nclass HEOMSolver(object):\n \"\"\"\n This is superclass for all solvers that use the HEOM method for\n calculating the dynamics evolution. There are many references for this.\n A good introduction, and perhaps closest to the notation used here is:\n DOI:10.1103/PhysRevLett.104.250401\n A more canonical reference, with full derivation is:\n DOI: 10.1103/PhysRevA.41.6676\n The method can compute open system dynamics without using any Markovian\n or rotating wave approximation (RWA) for systems where the bath\n correlations can be approximated to a sum of complex eponentials.\n The method builds a matrix of linked differential equations, which are\n then solved used the same ODE solvers as other qutip solvers (e.g. mesolve)\n\n This class should be treated as abstract. Currently the only subclass\n implemented is that for the Drude-Lorentz spectral density. This covers\n the majority of the work that has been done using this model, and there\n are some performance advantages to assuming this model where it is\n appropriate.\n\n There are opportunities to develop a more general spectral density code.\n\n Attributes\n ----------\n H_sys : Qobj\n System Hamiltonian\n\n coup_op : Qobj\n Operator describing the coupling between system and bath.\n\n coup_strength : float\n Coupling strength.\n\n temperature : float\n Bath temperature, in units corresponding to planck\n\n N_cut : int\n Cutoff parameter for the bath\n\n N_exp : int\n Number of exponential terms used to approximate the bath correlation\n functions\n\n planck : float\n reduced Planck constant\n\n boltzmann : float\n Boltzmann's constant\n\n options : :class:`qutip.solver.SolverOptions`\n Generic solver options.\n If set to None the default options will be used\n\n progress_bar: BaseProgressBar\n Optional instance of BaseProgressBar, or a subclass thereof, for\n showing the progress of the simulation.\n\n stats : :class:`qutip.solver.Stats`\n optional container for holding performance statitics\n If None is set, then statistics are not collected\n There may be an overhead in collecting statistics\n\n exp_coeff : list of complex\n Coefficients for the exponential series terms\n\n exp_freq : list of complex\n Frequencies for the exponential series terms\n \"\"\"\n def __init__(self):\n raise NotImplementedError(\n \"This is a abstract class only. \"\n \"Use a subclass, for example HSolverDL\")\n\n def reset(self):\n \"\"\"\n Reset any attributes to default values\n \"\"\"\n self.planck = 1.0\n self.boltzmann = 1.0\n self.H_sys = None\n self.coup_op = None\n self.coup_strength = 0.0\n self.temperature = 1.0\n self.N_cut = 10\n self.N_exp = 2\n self.N_he = 0\n\n self.exp_coeff = None\n self.exp_freq = None\n\n self.options = None\n self.progress_bar = None\n self.stats = None\n\n self.ode = None\n self.configured = False\n\n def configure(self, H_sys, coup_op, coup_strength, temperature, N_cut,\n N_exp, planck=None, boltzmann=None, renorm=None,\n bnd_cut_approx=None, options=None, progress_bar=None,\n stats=None):\n \"\"\"\n Configure the solver using the passed parameters\n The parameters are described in the class attributes, unless there\n is some specific behaviour\n\n Parameters\n ----------\n options : :class:`qutip.solver.SolverOptions`\n Generic solver options.\n If set to None the default options will be used\n\n progress_bar: BaseProgressBar\n Optional instance of BaseProgressBar, or a subclass thereof, for\n showing the progress of the simulation.\n If set to None, then the default progress bar will be used\n Set to False for no progress bar\n\n stats: :class:`qutip.solver.Stats`\n Optional instance of solver.Stats, or a subclass thereof, for\n storing performance statistics for the solver\n If set to True, then the default Stats for this class will be used\n Set to False for no stats\n \"\"\"\n\n self.H_sys = H_sys\n self.coup_op = coup_op\n self.coup_strength = coup_strength\n self.temperature = temperature\n self.N_cut = N_cut\n self.N_exp = N_exp\n if planck:\n self.planck = planck\n if boltzmann:\n self.boltzmann = boltzmann\n if isinstance(options, SolverOptions):\n self.options = options\n if isinstance(progress_bar, BaseProgressBar):\n self.progress_bar = progress_bar\n else:\n self.progress_bar = TextProgressBar() if progress_bar else None\n if isinstance(stats, Stats):\n self.stats = stats\n else:\n self.stats = self.create_new_stats() if stats else None\n\n def create_new_stats(self):\n \"\"\"\n Creates a new stats object suitable for use with this solver\n Note: this solver expects the stats object to have sections\n config\n integrate\n \"\"\"\n stats = Stats(['config', 'run'])\n stats.header = \"Hierarchy Solver Stats\"\n return stats\n\n\nclass HSolverDL(HEOMSolver):\n \"\"\"\n HEOM solver based on the Drude-Lorentz model for spectral density.\n Drude-Lorentz bath the correlation functions can be exactly analytically\n expressed as an infinite sum of exponentials which depend on the\n temperature, these are called the Matsubara terms or Matsubara frequencies\n\n For practical computation purposes an approximation must be used based\n on a small number of Matsubara terms (typically < 4).\n\n Attributes\n ----------\n cut_freq : float\n Bath spectral density cutoff frequency.\n\n renorm : bool\n Apply renormalisation to coupling terms\n Can be useful if using SI units for planck and boltzmann\n\n bnd_cut_approx : bool\n Use boundary cut off approximation\n Can be\n \"\"\"\n\n def __init__(self, H_sys, coup_op, coup_strength, temperature, N_cut,\n N_exp, cut_freq, planck=1.0, boltzmann=1.0, renorm=True,\n bnd_cut_approx=True, options=None, progress_bar=None,\n stats=None):\n\n self.reset()\n\n self.options = SolverOptions() if options is None else options\n\n self.progress_bar = False\n if progress_bar is None:\n self.progress_bar = BaseProgressBar()\n elif progress_bar is True:\n self.progress_bar = TextProgressBar()\n\n # the other attributes will be set in the configure method\n self.configure(H_sys, coup_op, coup_strength, temperature, N_cut,\n N_exp, cut_freq, planck=planck, boltzmann=boltzmann,\n renorm=renorm, bnd_cut_approx=bnd_cut_approx,\n stats=stats)\n\n def reset(self):\n \"\"\"\n Reset any attributes to default values\n \"\"\"\n HEOMSolver.reset(self)\n self.cut_freq = 1.0\n self.renorm = False\n self.bnd_cut_approx = False\n\n def configure(self, H_sys, coup_op, coup_strength, temperature, N_cut,\n N_exp, cut_freq, planck=None, boltzmann=None, renorm=None,\n bnd_cut_approx=None, options=None, progress_bar=None,\n stats=None):\n \"\"\"\n Calls configure from :class:`HEOMSolver` and sets any attributes\n that are specific to this subclass\n \"\"\"\n start_config = timeit.default_timer()\n\n HEOMSolver.configure(\n self, H_sys, coup_op, coup_strength, temperature, N_cut, N_exp,\n planck=planck, boltzmann=boltzmann, options=options,\n progress_bar=progress_bar, stats=stats)\n self.cut_freq = cut_freq\n if renorm is not None:\n self.renorm = renorm\n if bnd_cut_approx is not None:\n self.bnd_cut_approx = bnd_cut_approx\n\n # Load local values for optional parameters\n # Constants and Hamiltonian.\n hbar = self.planck\n options = self.options\n progress_bar = self.progress_bar\n stats = self.stats\n\n if stats:\n ss_conf = stats.sections.get('config')\n if ss_conf is None:\n ss_conf = stats.add_section('config')\n\n c, nu = self._calc_matsubara_params()\n\n if renorm:\n norm_plus, norm_minus = self._calc_renorm_factors()\n if stats:\n stats.add_message('options', 'renormalisation', ss_conf)\n # Dimensions et by system\n N_temp = 1\n for i in H_sys.dims[0]:\n N_temp *= i\n sup_dim = N_temp**2\n unit_sys = qeye(N_temp)\n\n # Use shorthands (mainly as in referenced PRL)\n lam0 = self.coup_strength\n gam = self.cut_freq\n N_c = self.N_cut\n N_m = self.N_exp\n Q = coup_op # Q as shorthand for coupling operator\n beta = 1.0/(self.boltzmann*self.temperature)\n\n # Ntot is the total number of ancillary elements in the hierarchy\n # Ntot = factorial(N_c + N_m) / (factorial(N_c)*factorial(N_m))\n # Turns out to be the same as nstates from state_number_enumerate\n N_he, he2idx, idx2he = enr_state_dictionaries([N_c + 1]*N_m, N_c)\n\n unit_helems = _data.identity(N_he)\n if self.bnd_cut_approx:\n # the Tanimura boundary cut off operator\n if stats:\n stats.add_message('options', 'boundary cutoff approx', ss_conf)\n op = -2*sprepost(Q, Q.dag()) + spre(Q.dag()*Q) + spost(Q.dag()*Q)\n\n approx_factr = ((2*lam0 / (beta*gam*hbar)) - 1j*lam0) / hbar\n for k in range(N_m):\n approx_factr -= (c[k] / nu[k])\n L_bnd = (-approx_factr*op).data\n L_helems = _data.kron(unit_helems, L_bnd)\n else:\n L_helems = _data.zeros(N_he*sup_dim, N_he*sup_dim)\n\n # Build the hierarchy element interaction matrix\n if stats:\n start_helem_constr = timeit.default_timer()\n\n unit_sup = spre(unit_sys).data\n spreQ = spre(Q).data\n spostQ = spost(Q).data\n commQ = (spre(Q) - spost(Q)).data\n N_he_interact = 0\n\n for he_idx in range(N_he):\n he_state = list(idx2he[he_idx])\n n_excite = sum(he_state)\n\n # The diagonal elements for the hierarchy operator\n # coeff for diagonal elements\n sum_n_m_freq = 0.0\n for k in range(N_m):\n sum_n_m_freq += he_state[k]*nu[k]\n\n L_he = pad(_data.mul(unit_sup, -sum_n_m_freq),\n N_he, N_he, he_idx, he_idx)\n L_helems = _data.add(L_helems, L_he)\n\n # Add the neighour interations\n he_state_neigh = copy(he_state)\n for k in range(N_m):\n\n n_k = he_state[k]\n if n_k >= 1:\n # find the hierarchy element index of the neighbour before\n # this element, for this Matsubara term\n he_state_neigh[k] = n_k - 1\n he_idx_neigh = he2idx[tuple(he_state_neigh)]\n\n op = _data.sub(_data.mul(spreQ, c[k]), _data.mul(spostQ, np.conj(c[k])))\n if renorm:\n op = _data.mul(op, -1j*norm_minus[n_k, k])\n else:\n op = _data.mul(op, -1j*n_k)\n\n L_he = pad(op, N_he, N_he, he_idx, he_idx_neigh)\n L_helems = _data.add(L_helems, L_he)\n N_he_interact += 1\n\n he_state_neigh[k] = n_k\n\n if n_excite <= N_c - 1:\n # find the hierarchy element index of the neighbour after\n # this element, for this Matsubara term\n he_state_neigh[k] = n_k + 1\n he_idx_neigh = he2idx[tuple(he_state_neigh)]\n\n if renorm:\n op = _data.mul(commQ, -1j*norm_plus[n_k, k])\n else:\n op = _data.mul(commQ, -1j)\n\n L_he = pad(op, N_he, N_he, he_idx, he_idx_neigh)\n L_helems = _data.add(L_helems, L_he)\n N_he_interact += 1\n\n he_state_neigh[k] = n_k\n\n if stats:\n stats.add_timing('hierarchy contruct',\n timeit.default_timer() - start_helem_constr,\n ss_conf)\n stats.add_count('Num hierarchy elements', N_he, ss_conf)\n stats.add_count('Num he interactions', N_he_interact, ss_conf)\n\n # Setup Liouvillian\n if stats:\n start_louvillian = timeit.default_timer()\n\n H_he = _data.kron(unit_helems, liouvillian(H_sys).data)\n L_helems = _data.add(L_helems, H_he)\n\n if stats:\n stats.add_timing('Liouvillian contruct',\n timeit.default_timer() - start_louvillian,\n ss_conf)\n start_integ_conf = timeit.default_timer()\n\n r = scipy.integrate.ode(_ode_rhs)\n\n r.set_f_params(L_helems)\n r.set_integrator('zvode', method=options['method'],\n order=options['order'],\n atol=options['atol'], rtol=options['rtol'],\n nsteps=options['nsteps'],\n first_step=options['first_step'],\n min_step=options['min_step'],\n max_step=options['max_step'])\n\n if stats:\n time_now = timeit.default_timer()\n stats.add_timing('Liouvillian contruct',\n time_now - start_integ_conf,\n ss_conf)\n if ss_conf.total_time is None:\n ss_conf.total_time = time_now - start_config\n else:\n ss_conf.total_time += time_now - start_config\n\n self._ode = r\n self._N_he = N_he\n self._sup_dim = sup_dim\n self._configured = True\n\n def run(self, rho0, tlist):\n \"\"\"\n Function to solve for an open quantum system using the\n HEOM model.\n\n Parameters\n ----------\n rho0 : Qobj\n Initial state (density matrix) of the system.\n\n tlist : list\n Time over which system evolves.\n\n Returns\n -------\n results : :class:`qutip.solver.Result`\n Object storing all results from the simulation.\n \"\"\"\n\n start_run = timeit.default_timer()\n\n sup_dim = self._sup_dim\n stats = self.stats\n r = self._ode\n\n if not self._configured:\n raise RuntimeError(\"Solver must be configured before it is run\")\n if stats:\n ss_conf = stats.sections.get('config')\n if ss_conf is None:\n raise RuntimeError(\"No config section for solver stats\")\n ss_run = stats.sections.get('run')\n if ss_run is None:\n ss_run = stats.add_section('run')\n\n # Set up terms of the matsubara and tanimura boundaries\n output = Result()\n output.solver = \"hsolve\"\n output.times = tlist\n output.states = []\n\n if stats:\n start_init = timeit.default_timer()\n output.states.append(Qobj(rho0))\n rho0_flat = rho0.full().ravel('F') # Using 'F' effectively transposes\n rho0_he = np.zeros([sup_dim*self._N_he], dtype=complex)\n rho0_he[:sup_dim] = rho0_flat\n r.set_initial_value(rho0_he, tlist[0])\n\n if stats:\n stats.add_timing('initialize',\n timeit.default_timer() - start_init, ss_run)\n start_integ = timeit.default_timer()\n\n dt = np.diff(tlist)\n n_tsteps = len(tlist)\n for t_idx, t in enumerate(tlist):\n if t_idx < n_tsteps - 1:\n r.integrate(r.t + dt[t_idx])\n rho = Qobj(r.y[:sup_dim].reshape(rho0.shape), dims=rho0.dims)\n output.states.append(rho)\n\n if stats:\n time_now = timeit.default_timer()\n stats.add_timing('integrate',\n time_now - start_integ, ss_run)\n if ss_run.total_time is None:\n ss_run.total_time = time_now - start_run\n else:\n ss_run.total_time += time_now - start_run\n stats.total_time = ss_conf.total_time + ss_run.total_time\n\n return output\n\n def _calc_matsubara_params(self):\n \"\"\"\n Calculate the Matsubara coefficents and frequencies\n\n Returns\n -------\n c, nu: both list(float)\n\n \"\"\"\n c = []\n nu = []\n lam0 = self.coup_strength\n gam = self.cut_freq\n hbar = self.planck\n beta = 1.0/(self.boltzmann*self.temperature)\n N_m = self.N_exp\n\n g = 2*np.pi / (beta*hbar)\n for k in range(N_m):\n if k == 0:\n nu.append(gam)\n c.append(lam0*gam*\n (1.0/np.tan(gam*hbar*beta/2.0) - 1j) / hbar)\n else:\n nu.append(k*g)\n c.append(4*lam0*gam*nu[k] /\n ((nu[k]**2 - gam**2)*beta*hbar**2))\n\n self.exp_coeff = c\n self.exp_freq = nu\n return c, nu\n\n def _calc_renorm_factors(self):\n \"\"\"\n Calculate the renormalisation factors\n\n Returns\n -------\n norm_plus, norm_minus : array[N_c, N_m] of float\n \"\"\"\n c = self.exp_coeff\n N_m = self.N_exp\n N_c = self.N_cut\n\n norm_plus = np.empty((N_c+1, N_m))\n norm_minus = np.empty((N_c+1, N_m))\n for k in range(N_m):\n for n in range(N_c+1):\n norm_plus[n, k] = np.sqrt(abs(c[k])*(n + 1))\n norm_minus[n, k] = np.sqrt(float(n)/abs(c[k]))\n\n return norm_plus, norm_minus\n", "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\nimport numpy as np\nfrom ctypes import POINTER, c_int, c_char, byref\nfrom numpy import ctypeslib\nfrom qutip.settings import settings as qset\nzcsrgemv = qset.install['mkl_lib'].mkl_cspblas_zcsrgemv\n\ndef mkl_spmv(A, x):\n \"\"\"\n sparse csr_spmv using MKL\n \"\"\"\n (m,n) = A.shape\n\n # Pointers to data of the matrix\n data = A.data.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n indptr = A.indptr.ctypes.data_as(POINTER(c_int))\n indices = A.indices.ctypes.data_as(POINTER(c_int))\n\n # Allocate output, using same conventions as input\n if x.ndim == 1:\n y = np.empty(m,dtype=np.complex128,order='C')\n elif x.ndim==2 and x.shape[1]==1:\n y = np.empty((m,1),dtype=np.complex128,order='C')\n else:\n raise Exception('Input vector must be 1D row or 2D column vector')\n\n np_x = x.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n np_y = y.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n # now call MKL. This returns the answer in np_y, which points to y\n zcsrgemv(byref(c_char(bytes(b'N'))), byref(c_int(m)), data ,indptr, indices, np_x, np_y )\n return y\n" ]
[ [ "numpy.testing.run_module_suite", "numpy.linspace", "numpy.cos", "numpy.shape", "numpy.testing.assert_allclose", "numpy.exp" ], [ "numpy.log", "numpy.sqrt", "numpy.asarray", "numpy.empty_like", "numpy.arange", "numpy.flipud", "numpy.cumprod", "numpy.roll", "numpy.array", "numpy.exp", "numpy.zeros", "numpy.sum", "numpy.empty" ], [ "numpy.arange", "numpy.expm1", "numpy.ones" ], [ "numpy.conj", "numpy.tan", "numpy.diff", "numpy.zeros", "numpy.empty" ], [ "numpy.ctypeslib.ndpointer", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dandiez/AdventOfCode
[ "99ebe6991964290ede87b144c8692c8f6b31030d" ]
[ "2019/day_12/solution.py" ]
[ "import itertools\nfrom math import lcm\nfrom typing import List\nfrom unittest import TestCase\n\nimport numpy as np\nfrom parse import parse\n\nVector3D = np.ndarray\n\n\ndef read_input(filename=\"input\"):\n with open(filename) as f:\n lines = [line.strip() for line in f.readlines() if line.strip()]\n inp = [tuple(parse(\"<x={}, y={}, z={}>\", line)) for line in lines]\n inp = [tuple(int(n) for n in str_tuple) for str_tuple in inp]\n return inp\n\n\nclass Body:\n def __init__(\n self, initial_position: Vector3D, initial_velocity: Vector3D, name: str\n ):\n self.pos = initial_position\n self.vel = initial_velocity\n self.name = name\n\n def __repr__(self):\n return (\n f\"{self.name:>10}: pos=<x={self.pos[0]:>3}, y={self.pos[1]:>3}, z={self.pos[2]:>3}>, \"\n f\"vel=<x={self.vel[0]:>3}, y={self.vel[1]:>3}, z={self.vel[2]:>3}>\"\n )\n\n @property\n def potential_energy(self):\n return sum(abs(self.pos))\n\n @property\n def kinetic_energy(self):\n return sum(abs(self.vel))\n\n @property\n def total_energy(self):\n return self.potential_energy * self.kinetic_energy\n\n\nclass System:\n def __init__(self, bodies: List[Body]):\n self.bodies = bodies\n self.age = 0\n self.previous_states = set(self.get_system_state())\n\n def get_system_state(self):\n state = tuple((tuple(body.pos), tuple(body.vel)) for body in self.bodies)\n return state\n\n def __repr__(self):\n _repr = \"\"\n for body in self.bodies:\n _repr += f\"{repr(body)}\\n\"\n return _repr\n\n def simulate(self, number_of_steps):\n target_age = self.age + number_of_steps\n for self.age in range(self.age, target_age + 1):\n # print(f\"Energy after {self.age} steps: {self.get_total_energy()}\")\n # print(self)\n if self.age < target_age:\n self.update_system()\n state = self.get_system_state()\n if state in self.previous_states:\n print(f\"Repeated state after {self.age} steps\")\n return\n if self.age % 100000 == 0:\n print(f\"age is {self.age}\")\n self.previous_states.add(state)\n\n def get_total_energy(self):\n return sum(body.total_energy for body in self.bodies)\n\n def update_system(self):\n self.apply_gravity()\n self.apply_velocity()\n\n def apply_gravity(self):\n for body_a, body_b in itertools.combinations(self.bodies, 2):\n for dimension in range(3):\n if body_a.pos[dimension] > body_b.pos[dimension]:\n body_a.vel[dimension] -= 1\n body_b.vel[dimension] += 1\n elif body_a.pos[dimension] < body_b.pos[dimension]:\n body_a.vel[dimension] += 1\n body_b.vel[dimension] -= 1\n\n def apply_velocity(self):\n for body in self.bodies:\n body.pos += body.vel\n\n\ndef part_1(inp, number_of_steps=1000):\n return part_n(inp, number_of_steps=number_of_steps)\n\n\ndef part_n(inp, number_of_steps=1000, is_part_1=True, dimension=None):\n if dimension is None:\n factor = np.array((1, 1, 1))\n elif dimension == 0:\n factor = np.array((1, 0, 0))\n elif dimension == 1:\n factor = np.array((0, 1, 0))\n elif dimension == 2:\n factor = np.array((0, 0, 1))\n else:\n raise ValueError()\n\n moon_names = (\"Io\", \"Europa\", \"Ganymede\", \"Callisto\")\n the_system = System(\n [\n Body(np.array(loc) * factor, np.array((0, 0, 0)), name)\n for (loc, name) in zip(inp, moon_names)\n ]\n )\n the_system.simulate(number_of_steps)\n if is_part_1:\n return the_system.get_total_energy()\n return the_system.age\n\n\ndef part_2(inp):\n cycle_x = part_n(inp, number_of_steps=10000000000000, is_part_1=False, dimension=0)\n cycle_y = part_n(inp, number_of_steps=10000000000000, is_part_1=False, dimension=1)\n cycle_z = part_n(inp, number_of_steps=10000000000000, is_part_1=False, dimension=2)\n return lcm(cycle_x, cycle_y, cycle_z)\n\n\ndef test_sample_1(self):\n inp = read_input(\"sample_1\")\n self.assertEqual(179, part_1(inp, number_of_steps=10))\n self.assertEqual(2772, part_2(inp))\n\n\ndef test_sample_2(self):\n inp = read_input(\"sample_2\")\n self.assertEqual(1940, part_1(inp, number_of_steps=100))\n\n\ndef main(input_file):\n \"\"\"Solve puzzle and connect part 1 with part 2 if needed.\"\"\"\n # part 1\n inp = read_input(input_file)\n p1 = part_1(inp)\n print(f\"Solution to part 1: {p1}\")\n\n # part 2\n inp = read_input(input_file)\n p2 = part_2(inp)\n print(f\"Solution to part 2: {p2}\")\n return p1, p2\n\n\nif __name__ == \"__main__\":\n print(\"*** solving tests ***\")\n test_sample_1(TestCase())\n test_sample_2(TestCase())\n print(\"*** solving main ***\")\n main(\"input\")\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anhddo/ai-arena
[ "bc881e83073be4f9130b1a50fa56a51c11d21f9f" ]
[ "arena5/algos/hppo/hppo.py" ]
[ "# ©2020 Johns Hopkins University Applied Physics Laboratory LLC.\nimport random\nimport os\nimport numpy as np\nfrom arena5.algos.hppo.utils import ned_to_ripCoords_tf\nfrom arena5.algos.hppo.GAE import GAE\nimport tensorflow as tf\nfrom stable_baselines.common.policies import MlpPolicy, CnnPolicy\nfrom stable_baselines.common import tf_util, zipsame\nfrom stable_baselines.common.distributions import DiagGaussianProbabilityDistribution\nfrom tensorflow.keras.layers import Lambda, Input, LSTM, Dense, Reshape, Flatten, multiply, RepeatVector, Permute\nfrom tensorflow.keras.initializers import Orthogonal\nfrom stable_baselines.common import Dataset\n\nfrom tensorflow.keras import backend as K\nfrom stable_baselines.common.mpi_adam import MpiAdam\nfrom stable_baselines.common import tf_util, zipsame\nfrom tensorflow.keras.backend import set_session\nfrom stable_baselines import logger\n\nORTHO_01 = Orthogonal(0.01)\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\nclass HPPOPolicy():\n\n def __init__(self, env, policy_comm, **kwargs):\n\n # Pull params out of kwargs\n self.params = kwargs['params']\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU\n config.log_device_placement = True # to log device placement (on which device the operation ran)\n sess = tf.Session(config=config)\n set_session(sess) # set this TensorFlow session as the default session for Keras\n\n self.env = env\n self.comm = policy_comm\n\n # Setup tensorboard logger\n logger.configure(self.params.logDir, format_strs=['tensorboard'])\n\n state_size = env.observation_space.shape\n action_size_behavior = env.action_space.shape\n self.b_agent_attack = BehaviorModel(state_size, action_size_behavior, self.comm, label='attack')\n self.b_agent_evade = BehaviorModel(state_size, action_size_behavior, self.comm, label='evade')\n self.b_agent_transit = BehaviorModel(state_size, action_size_behavior, self.comm, label='transit')\n\n # Define meta agent\n self.m_agent = MetaAgent(state_size, [self.b_agent_attack, self.b_agent_evade, self.b_agent_transit], self.comm)\n\n # constants\n self.discount_factor = 0.99\n\n def run(self, num_steps, data_dir, policy_record=None):\n local_steps = int(num_steps / self.comm.Get_size())\n\n steps = 0\n\n # TODO: Add alpha annealing over num_steps\n\n while True:\n\n # sync weights\n self.b_agent_attack.sync_weights()\n self.b_agent_evade.sync_weights()\n self.b_agent_transit.sync_weights()\n self.m_agent.sync_weights()\n\n # create placeholders to store experience that we gather\n training_state = {\"meta\": [], \"attack\": [], \"evade\": [], \"transit\": []}\n training_action = {\"meta\": [], \"attack\": [], \"evade\": [], \"transit\": []}\n training_reward = {\"meta\": [], \"attack\": [], \"evade\": [], \"transit\": []}\n training_next_state = {\"meta\": [], \"attack\": [], \"evade\": [], \"transit\": []}\n training_done = {\"meta\": [], \"attack\": [], \"evade\": [], \"transit\": []}\n training_reward_sum_combined = 0 # keep track of combined reward over all episodes between training\n\n state = self.env.reset()\n reward_sum = {}\n done = False\n while not done:\n complete_action, distribution, beh_actions, label = self.m_agent.get_action(state)\n\n next_state, reward, done, info = self.env.step(complete_action, dst=distribution, label=label)\n\n # Aggregate reward throughout the episode\n if not reward_sum:\n reward_sum = reward\n else:\n reward_sum = {k: reward_sum[k] + reward[k] for (k, v) in reward.items()}\n training_reward_sum_combined += reward[\"combined\"]\n\n training_state[\"meta\"].append(state)\n training_action[\"meta\"].append(distribution)\n training_reward[\"meta\"].append(reward[\"combined\"])\n training_next_state[\"meta\"].append(next_state)\n training_done[\"meta\"].append(done)\n\n for idx, label in enumerate(['attack', 'evade', 'transit']):\n training_state[label].append(state)\n training_action[label].append(beh_actions[idx])\n training_reward[label].append(reward[label])\n training_next_state[label].append(next_state)\n training_done[label].append(done)\n\n state = next_state\n\n # #now we have batches of data: compute the values and advantages\n # training_value = {\"meta\": None, \"attack\": None, \"evade\": None, \"transit\": None}\n # training_advantages = {\"meta\": None, \"attack\": None, \"evade\": None, \"transit\": None}\n\n # log tensorboard\n {logger.logkv(k, v) for (k, v) in reward_sum.items()}\n logger.dumpkvs()\n\n # vcompute advantages and values\n models = [self.b_agent_attack, self.b_agent_evade, self.b_agent_transit, self.m_agent]\n for model in models:\n\n network = model.label\n\n states = training_state[network]\n actions = training_action[network]\n reward = training_reward[network]\n next_states = training_next_state[network]\n done = training_done[network]\n\n # Convert done bools to ints and invert\n done_int = np.invert(done).astype(np.int)\n\n # Generalized advantage estimation (gets advantages to train on and value estimates)\n target, advantages = GAE(states, actions, reward, next_states, done_int, model.sample_value, T=128, y=0.99, lam=0.95, use_Q=False)\n\n # train this model\n dataset = Dataset(dict(ob=np.asarray(states), ac=np.asarray(actions), atarg=np.asarray(advantages), vtarg=np.asarray(target)), shuffle=True)\n\n for k in range(4):\n for i, batch in enumerate(dataset.iterate_once(len(states))):\n model.train(batch[\"ob\"], batch[\"ac\"], batch[\"vtarg\"], batch[\"atarg\"], 1.0)\n\n print('FINISHED TRAINING EPISODE')\n\n\ndef general_actor_critic(input_shape_vec, act_output_shape, comm, learn_rate=[0.001, 0.001], trainable=True, label=\"\"):\n\n sess = K.get_session()\n np.random.seed(0)\n tf.set_random_seed(0)\n\n # network 1 (new policy)\n with tf.variable_scope(label+\"_pi_new\", reuse=False):\n inp = Input(shape=input_shape_vec) # [5,6,3]\n # rc_lyr = Lambda(lambda x: ned_to_ripCoords_tf(x, 4000))(inp)\n trunk_x = Reshape([input_shape_vec[0], input_shape_vec[1] * 3])(inp)\n trunk_x = LSTM(128)(trunk_x)\n dist, sample_action_op, action_ph, value_output = ppo_continuous(3, trunk_x)\n\n # network 2 (old policy)\n with tf.variable_scope(label+\"_pi_old\", reuse=False):\n inp_old = Input(shape=input_shape_vec) # [5,6,3]\n # rc_lyr = Lambda(lambda x: ned_to_ripCoords_tf(x, 4000))(inp_old)\n trunk_x = Reshape([input_shape_vec[0], input_shape_vec[1] * 3])(inp_old)\n trunk_x = LSTM(128)(trunk_x)\n dist_old, sample_action_op_old, action_ph_old, value_output_old = ppo_continuous(3, trunk_x)\n\n # additional placeholders\n adv_ph = tf.placeholder(tf.float32, [None], name=\"advantages_ph\")\n alpha_ph = tf.placeholder(tf.float32, shape=(), name=\"alpha_ph\")\n vtarg = tf.placeholder(tf.float32, [None]) # target value placeholder\n\n # loss\n loss = ppo_continuous_loss(dist, dist_old, value_output, action_ph, alpha_ph, adv_ph, vtarg)\n\n # gradient\n with tf.variable_scope(\"grad\", reuse=False):\n gradient = tf_util.flatgrad(loss, tf_util.get_trainable_vars(label+\"_pi_new\"))\n adam = MpiAdam(tf_util.get_trainable_vars(label+\"_pi_new\"), epsilon=0.00001, sess=sess, comm=comm)\n\n # method for sync'ing the two policies\n assign_old_eq_new = tf_util.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in\n zipsame(tf_util.get_globals_vars(label+\"_pi_old\"), tf_util.get_globals_vars(label+\"_pi_new\"))])\n\n # initialize all the things\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n\n # methods for interacting with this model\n\n def sync_weights():\n assign_old_eq_new(sess=sess)\n\n def sample_action(states, logstd_override=None):\n a = sess.run(sample_action_op, feed_dict={inp: states})\n return a\n\n def sample_value(states):\n v = sess.run(value_output, feed_dict={inp: states})\n return v\n\n def train(states, actions, vtarget, advs, alpha):\n alpha = max(alpha, 0.0)\n adam_lr = learn_rate[0]\n\n g = sess.run([gradient], feed_dict={\n inp: states,\n inp_old: states,\n action_ph: actions,\n adv_ph: advs,\n alpha_ph: alpha,\n vtarg: vtarget\n })\n\n adam.update(g[0], adam_lr * alpha)\n\n # initial sync\n adam.sync()\n sync_weights()\n\n return sync_weights, sample_action, sample_value, train\n\n\ndef ppo_continuous(num_actions, previous_layer):\n\n # act distribution\n action_ph = tf.placeholder(tf.float32, [None, num_actions], name=\"actions_ph\")\n means = Dense(num_actions, activation=\"linear\", kernel_initializer=ORTHO_01)(previous_layer)\n vlogstd = tf.get_variable(name='pi/vlogstd', shape=[1, num_actions], initializer=tf.zeros_initializer())\n means_and_logstd = tf.concat([means, means*0.0 + vlogstd], 1)\n distribution = DiagGaussianProbabilityDistribution(means_and_logstd)\n\n # sample op\n sample_action_op = distribution.sample()\n\n # value\n value_output = Dense(1)(previous_layer)\n\n return distribution, sample_action_op, action_ph, value_output\n\n\ndef ppo_continuous_loss(new_dist, old_dist, value_output, actions_ph, alpha_ph, adv_ph, val_ph, clipping_epsilon=0.2):\n\n ratio = tf.exp(new_dist.logp(actions_ph) - old_dist.logp(actions_ph))\n epsilon = clipping_epsilon * alpha_ph\n surr1 = ratio * adv_ph\n surr2 = tf.clip_by_value(ratio, 1.0 - epsilon, 1.0 + epsilon) * adv_ph\n ploss = - tf.reduce_mean(tf.minimum(surr1, surr2))\n vloss = tf.reduce_mean(tf.square(value_output - val_ph))\n loss = ploss + vloss\n\n return loss\n\n\nclass BehaviorModel:\n def __init__(self, input_shape, output_shape, comm, label='behavior'):\n self.label = label\n self.input_shape = input_shape\n self.output_shape = output_shape\n\n # These are all methods! Use them to interact with the ppo models.\n self.sync_weights, self.sample_action, self.sample_value, self.train = general_actor_critic(self.input_shape, self.output_shape, comm, label=self.label)\n\n def get_action(self, state):\n action = self.sample_action(np.asarray([state]))[0] # batch dimension 1\n return action\n\n\nclass MetaAgent:\n def __init__(self, input_shape, behavior_primitive_mdls, comm):\n self.label = \"meta\"\n self.behavior_primitive_mdls = behavior_primitive_mdls\n self.input_shape = input_shape\n self.output_shape = len(behavior_primitive_mdls)\n self.sync_weights, self.sample_action, self.sample_value, self.train = general_actor_critic(self.input_shape, self.output_shape, comm, label=self.label)\n\n def get_action(self, state):\n meta_action = self.sample_action(np.asarray([state]))[0]\n beh_actions = np.array([bm.get_action(state) for bm in self.behavior_primitive_mdls])\n\n meta_action_softmax = np.exp(meta_action)/sum(np.exp(meta_action))\n\n # Get the argmax of the softmax\n meta_action_argmax = np.argmax(meta_action_softmax, axis=-1)\n\n # Get label from argmax\n label = self.behavior_primitive_mdls[meta_action_argmax].label\n\n # Should be doing a vectorized dot product\n complete_action = np.tensordot(beh_actions, np.expand_dims(meta_action_softmax, axis=0), axes=[0, 1])\n # complete_action = np.tensordot(meta_action_softmax, beh_actions, axes=[0, 1])\n\n return complete_action, meta_action_softmax, beh_actions, label\n" ]
[ [ "numpy.expand_dims", "tensorflow.concat", "numpy.asarray", "tensorflow.minimum", "numpy.exp", "tensorflow.keras.backend.set_session", "tensorflow.ConfigProto", "numpy.argmax", "tensorflow.Session", "tensorflow.square", "numpy.invert", "tensorflow.keras.layers.Dense", "tensorflow.zeros_initializer", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.set_random_seed", "tensorflow.keras.layers.Reshape", "tensorflow.clip_by_value", "numpy.random.seed", "tensorflow.keras.backend.get_session", "tensorflow.assign", "tensorflow.keras.initializers.Orthogonal", "tensorflow.keras.layers.LSTM", "tensorflow.variable_scope", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
r-matsuzaka/baby-steps-of-rl-ja
[ "fdfd1da6df2fc12168c7f0c1ed211f09b0da1135" ]
[ "IRL/bayesian.py" ]
[ "import numpy as np\nimport scipy.stats\nfrom planner import PolicyIterationPlanner\nfrom scipy.special import logsumexp\nfrom tqdm import tqdm\n\n\nclass BayesianIRL:\n def __init__(self, env, eta=0.8, prior_mean=0.0, prior_scale=0.5):\n self.env = env\n self.planner = PolicyIterationPlanner(env)\n self.eta = eta\n self._mean = prior_mean\n self._scale = prior_scale\n self.prior_dist = scipy.stats.norm(loc=prior_mean, scale=prior_scale)\n\n def estimate(\n self,\n trajectories,\n epoch=50,\n gamma=0.3,\n learning_rate=0.1,\n sigma=0.05,\n sample_size=20,\n ):\n num_states = len(self.env.states)\n reward = np.random.normal(size=num_states, loc=self._mean, scale=self._scale)\n\n def get_q(r, g):\n self.planner.reward_func = lambda s: r[s]\n V = self.planner.plan(g)\n Q = self.planner.policy_to_q(V, gamma)\n return Q\n\n for i in range(epoch):\n noises = np.random.randn(sample_size, num_states)\n scores = []\n for n in tqdm(noises):\n _reward = reward + sigma * n\n Q = get_q(_reward, gamma)\n\n # Calculate prior (sum of log prob).\n reward_prior = np.sum(self.prior_dist.logpdf(_r) for _r in _reward)\n\n # Calculate likelihood.\n likelihood = self.calculate_likelihood(trajectories, Q)\n # Calculate posterior.\n posterior = likelihood + reward_prior\n scores.append(posterior)\n\n rate = learning_rate / (sample_size * sigma)\n scores = np.array(scores)\n normalized_scores = (scores - scores.mean()) / scores.std()\n noise = np.mean(noises * normalized_scores.reshape((-1, 1)), axis=0)\n reward = reward + rate * noise\n print(\"At iteration {} posterior={}.\".format(i, scores.mean()))\n\n reward = reward.reshape(self.env.shape)\n return reward\n\n def calculate_likelihood(self, trajectories, Q):\n mean_log_prob = 0.0\n for t in trajectories:\n t_log_prob = 0.0\n for s, a in t:\n expert_value = self.eta * Q[s][a]\n total = [self.eta * Q[s][_a] for _a in self.env.actions]\n t_log_prob += expert_value - logsumexp(total)\n mean_log_prob += t_log_prob\n mean_log_prob /= len(trajectories)\n return mean_log_prob\n\n\nif __name__ == \"__main__\":\n\n def test_estimate():\n from environment import GridWorldEnv\n\n env = GridWorldEnv(\n grid=[\n [0, 0, 0, 1],\n [0, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, 0, 0],\n ]\n )\n # Train Teacher\n teacher = PolicyIterationPlanner(env)\n teacher.plan()\n trajectories = []\n print(\"Gather demonstrations of teacher.\")\n for i in range(20):\n s = env.reset()\n done = False\n steps = []\n while not done:\n a = teacher.act(s)\n steps.append((s, a))\n n_s, r, done, _ = env.step(a)\n s = n_s\n trajectories.append(steps)\n\n print(\"Estimate reward.\")\n irl = BayesianIRL(env)\n rewards = irl.estimate(trajectories)\n print(rewards)\n env.plot_on_grid(rewards)\n\n test_estimate()\n" ]
[ [ "numpy.random.normal", "numpy.array", "numpy.random.randn", "scipy.special.logsumexp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "0.19", "1.5", "1.2", "1.7", "1.0", "1.3", "1.8" ], "tensorflow": [] } ]
teomores/Oracle_HPC_contest
[ "6be9a097abc6d4b45f7c80e7095f536a38b13161" ]
[ "features/target.py" ]
[ "import pandas as pd\nimport numpy as np\nimport os\n\ndef target(df_exp_train, path=\"\"):\n path_validation = os.path.join(path, \"test.csv\")\n df_val = pd.read_csv(path_validation, escapechar=\"\\\\\")\n df_exp_train = df_exp_train.merge(df_val[['record_id', 'linked_id']], how='left', left_on='queried_record_id', right_on='record_id').drop('record_id', axis=1)\n\n def extract_target(predicted, linked):\n res = np.where(predicted == linked, 1, 0)\n return res\n\n df_exp_train['target'] = extract_target(df_exp_train.predicted_record_id.values, df_exp_train.linked_id.values)\n #return df_exp_train.drop(['linked_id'], axis=1)\n return df_exp_train['target']\n" ]
[ [ "pandas.read_csv", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
elvijs/GPflow
[ "056e59f2c5aa2b5021de9b7b91ce1cee2ea0bb92", "056e59f2c5aa2b5021de9b7b91ce1cee2ea0bb92", "056e59f2c5aa2b5021de9b7b91ce1cee2ea0bb92" ]
[ "gpflow/expectations/cross_kernels.py", "tests/test_mean_functions.py", "gpflow/mean_functions.py" ]
[ "import tensorflow as tf\n\nfrom . import dispatch\nfrom .. import kernels\nfrom ..inducing_variables import InducingPoints\nfrom ..probability_distributions import DiagonalGaussian, Gaussian\nfrom .expectations import expectation\n\n\[email protected]((Gaussian, DiagonalGaussian), kernels.SquaredExponential,\n InducingPoints, kernels.Linear, InducingPoints)\ndef _E(p, sqexp_kern, feat1, lin_kern, feat2, nghp=None):\n \"\"\"\n Compute the expectation:\n expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)\n - K_lin_{.,.} :: SqExp kernel\n - K_sqexp_{.,.} :: Linear kernel\n Different Z1 and Z2 are handled if p is diagonal and K_lin and K_sqexp have disjoint\n active_dims, in which case the joint expectations simplify into a product of expectations\n\n :return: NxM1xM2\n \"\"\"\n if sqexp_kern.on_separate_dims(lin_kern) and isinstance(\n p, DiagonalGaussian): # no joint expectations required\n eKxz1 = expectation(p, (sqexp_kern, feat1))\n eKxz2 = expectation(p, (lin_kern, feat2))\n return eKxz1[:, :, None] * eKxz2[:, None, :]\n\n if feat1 != feat2:\n raise NotImplementedError(\n \"inducing_variables have to be the same for both kernels.\")\n\n if sqexp_kern.active_dims != lin_kern.active_dims:\n raise NotImplementedError(\n \"active_dims have to be the same for both kernels.\")\n\n # use only active dimensions\n Xcov = sqexp_kern.slice_cov(\n tf.linalg.diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov)\n Z, Xmu = sqexp_kern.slice(feat1.Z, p.mu)\n\n N = tf.shape(Xmu)[0]\n D = tf.shape(Xmu)[1]\n\n def take_with_ard(value):\n if not sqexp_kern.ard:\n return tf.zeros((D, ), dtype=value.dtype) + value\n return value\n\n lin_kern_variances = take_with_ard(lin_kern.variance)\n sqexp_kern_lengthscale = take_with_ard(sqexp_kern.lengthscale)\n\n chol_L_plus_Xcov = tf.linalg.cholesky(\n tf.linalg.diag(sqexp_kern_lengthscale**2) + Xcov) # NxDxD\n\n Z_transpose = tf.transpose(Z)\n all_diffs = Z_transpose - tf.expand_dims(Xmu, 2) # NxDxM\n exponent_mahalanobis = tf.linalg.triangular_solve(chol_L_plus_Xcov,\n all_diffs,\n lower=True) # NxDxM\n exponent_mahalanobis = tf.reduce_sum(tf.square(exponent_mahalanobis),\n 1) # NxM\n exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM\n\n sqrt_det_L = tf.reduce_prod(sqexp_kern_lengthscale)\n sqrt_det_L_plus_Xcov = tf.exp(\n tf.reduce_sum(tf.math.log(tf.linalg.diag_part(chol_L_plus_Xcov)),\n axis=1))\n determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N\n eKxz_sqexp = sqexp_kern.variance * (\n determinants[:, None] * exponent_mahalanobis\n ) ## NxM <- End RBF eKxz code\n\n tiled_Z = tf.tile(tf.expand_dims(Z_transpose, 0), (N, 1, 1)) # NxDxM\n z_L_inv_Xcov = tf.linalg.matmul(tiled_Z,\n Xcov / sqexp_kern_lengthscale[:, None]**2.,\n transpose_a=True) # NxMxD\n\n cross_eKzxKxz = tf.linalg.cholesky_solve(\n chol_L_plus_Xcov,\n (lin_kern_variances * sqexp_kern_lengthscale**2.)[..., None] *\n tiled_Z) # NxDxM\n\n cross_eKzxKxz = tf.linalg.matmul(\n (z_L_inv_Xcov + Xmu[:, None, :]) * eKxz_sqexp[..., None],\n cross_eKzxKxz) # NxMxM\n return cross_eKzxKxz\n\n\[email protected]((Gaussian, DiagonalGaussian), kernels.Linear,\n InducingPoints, kernels.SquaredExponential, InducingPoints)\ndef _E(p, lin_kern, feat1, sqexp_kern, feat2, nghp=None):\n \"\"\"\n Compute the expectation:\n expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)\n - K_lin_{.,.} :: Linear kernel\n - K_sqexp_{.,.} :: sqexp kernel\n Different Z1 and Z2 are handled if p is diagonal and K_lin and K_sqexp have disjoint\n active_dims, in which case the joint expectations simplify into a product of expectations\n\n :return: NxM1xM2\n \"\"\"\n return tf.linalg.adjoint(\n expectation(p, (sqexp_kern, feat2), (lin_kern, feat1)))\n", "# Copyright 2017 the GPflow authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\n\nimport gpflow\nfrom gpflow.config import default_int\nfrom gpflow.inducing_variables import InducingPoints\nfrom gpflow.mean_functions import Additive, Constant, Linear, Product, SwitchedMeanFunction, Zero\n\nrng = np.random.RandomState(99021)\n\n\nclass Datum:\n input_dim, output_dim = 3, 2\n N, Ntest, M = 20, 30, 10\n\n\n_mean_functions = [\n Zero(),\n Linear(A=rng.randn(Datum.input_dim, Datum.output_dim), b=rng.randn(Datum.output_dim, 1).reshape(-1)),\n Constant(c=rng.randn(Datum.output_dim, 1).reshape(-1))\n]\n\n\[email protected]('mean_function_1', _mean_functions)\[email protected]('mean_function_2', _mean_functions)\[email protected]('operation', ['+', 'x'])\ndef test_mean_functions_output_shape(mean_function_1, mean_function_2, operation):\n \"\"\"\n Test the output shape for basic and compositional mean functions, also\n check that the combination of mean functions returns the correct class\n \"\"\"\n X = np.random.randn(Datum.N, Datum.input_dim)\n Y = mean_function_1(X)\n # basic output shape check\n assert Y.shape in [(Datum.N, Datum.output_dim), (Datum.N, 1)]\n\n # composed mean function output shape check\n if operation == '+':\n mean_composed = mean_function_1 + mean_function_2\n elif operation == 'x':\n mean_composed = mean_function_1 * mean_function_2\n else:\n raise (NotImplementedError)\n\n Y_composed = mean_composed(X)\n assert Y_composed.shape in [(Datum.N, Datum.output_dim), (Datum.N, 1)]\n\n\[email protected]('mean_function_1', _mean_functions)\[email protected]('mean_function_2', _mean_functions)\[email protected]('operation', ['+', 'x'])\ndef test_mean_functions_composite_type(mean_function_1, mean_function_2, operation):\n if operation == '+':\n mean_composed = mean_function_1 + mean_function_2\n assert isinstance(mean_composed, Additive)\n elif operation == 'x':\n mean_composed = mean_function_1 * mean_function_2\n assert isinstance(mean_composed, Product)\n else:\n raise (NotImplementedError)\n\n\n_linear_functions = [\n Linear(A=rng.randn(Datum.input_dim, Datum.output_dim), b=rng.randn(Datum.output_dim, 1).reshape(-1))\n for _ in range(3)\n]\n\n# Append inverse of first Linear mean function in _linear_functions\n_linear_functions.append(Linear(A=-1. * _linear_functions[0].A, b=-1. * _linear_functions[0].b))\n\n_constant_functions = [Constant(c=rng.randn(Datum.output_dim, 1).reshape(-1)) for _ in range(3)]\n# Append inverse of first Constant mean function in _constant_functions\n_constant_functions.append(Constant(c=-1. * _constant_functions[0].c))\n\n\ndef _create_GPR_model_with_bias(X, Y, mean_function):\n return gpflow.models.GPR((X, Y), mean_function=mean_function, kernel=gpflow.kernels.Bias(Datum.input_dim))\n\n\[email protected]('mean_functions', [_linear_functions, _constant_functions])\ndef test_mean_functions_distributive_property(mean_functions):\n \"\"\"\n Tests that distributive property of addition and multiplication holds for mean functions\n (both Constant and Linear): A * (B + C) = A * B + A * C\n \"\"\"\n X, Y = rng.randn(Datum.N, Datum.input_dim), rng.randn(Datum.N, Datum.output_dim)\n Xtest = rng.randn(30, Datum.input_dim)\n A, B, C = mean_functions[0], mean_functions[1], mean_functions[2]\n lhs = Product(A, Additive(B, C)) # A * (B + C)\n rhs = Additive(Product(A, B), Product(A, C)) # A * B + A * C\n\n model_lhs = _create_GPR_model_with_bias(X, Y, mean_function=lhs)\n model_rhs = _create_GPR_model_with_bias(X, Y, mean_function=rhs)\n\n mu_lhs, var_lhs = model_lhs.predict_f(Xtest)\n mu_rhs, var_rhs = model_rhs.predict_f(Xtest)\n\n assert_allclose(mu_lhs, mu_rhs)\n assert_allclose(var_lhs, var_rhs)\n\n\[email protected]('mean_functions', [_linear_functions, _constant_functions])\ndef test_mean_functions_A_minus_A_equals_zero(mean_functions):\n \"\"\"\n Tests that the addition the inverse of a mean function to itself is equivalent to having a\n Zero mean function: A + (-A) = 0\n \"\"\"\n X, Y = rng.randn(Datum.N, Datum.input_dim), rng.randn(Datum.N, Datum.output_dim)\n Xtest = rng.randn(30, Datum.input_dim)\n A, A_inverse = mean_functions[0], mean_functions[-1]\n lhs = Additive(A, A_inverse) # A + (-A)\n rhs = Zero() # 0\n\n model_lhs = _create_GPR_model_with_bias(X, Y, mean_function=lhs)\n model_rhs = _create_GPR_model_with_bias(X, Y, mean_function=rhs)\n\n mu_lhs, var_lhs = model_lhs.predict_f(Xtest)\n mu_rhs, var_rhs = model_rhs.predict_f(Xtest)\n\n assert_allclose(mu_lhs, mu_rhs)\n assert_allclose(var_lhs, var_rhs)\n\n\[email protected]('mean_functions', [_linear_functions])\ndef test_linear_mean_functions_associative_property(mean_functions):\n \"\"\"\n Tests that associative property of addition holds for linear mean functions:\n A + (B + (-A)) = B = (A + B) + (-A)\n \"\"\"\n X, Y = rng.randn(Datum.N, Datum.input_dim), rng.randn(Datum.N, Datum.output_dim)\n Xtest = rng.randn(30, Datum.input_dim)\n A, B, A_inverse = mean_functions[0], mean_functions[1], mean_functions[-1]\n\n lhs = Additive(A, Additive(B, A_inverse)) # A + (B + (-A))\n rhs = Additive(Additive(A, B), A_inverse) # (A + B) + (-A)\n\n model_lhs = _create_GPR_model_with_bias(X, Y, mean_function=lhs)\n model_b = _create_GPR_model_with_bias(X, Y, mean_function=B)\n model_rhs = _create_GPR_model_with_bias(X, Y, mean_function=rhs)\n\n mu_lhs, var_lhs = model_lhs.predict_f(Xtest)\n mu_b, var_b = model_b.predict_f(Xtest)\n mu_rhs, var_rhs = model_rhs.predict_f(Xtest)\n\n assert_allclose(mu_lhs, mu_b)\n assert_allclose(var_lhs, var_b)\n assert_allclose(mu_b, mu_rhs)\n assert_allclose(var_b, var_rhs)\n\n\[email protected]('N, D', [[10, 3]])\ndef test_switched_mean_function(N, D):\n \"\"\"\n Test for the SwitchedMeanFunction.\n \"\"\"\n X = np.hstack([rng.randn(N, D), 1.0 * rng.randint(0, 2, N).reshape(-1, 1)])\n zeros, ones = Constant(np.zeros(1)), Constant(np.ones(1))\n switched_mean = SwitchedMeanFunction([zeros, ones])\n\n np_list = np.array([0., 1.])\n result_ref = (np_list[X[:, D].astype(default_int())]).reshape(-1, 1)\n result = switched_mean(X)\n\n assert_allclose(result, result_ref)\n\n\ndef test_bug_277_regression():\n \"\"\"\n See github issue #277. This is a regression test.\n \"\"\"\n model1, model2 = Linear(), Linear()\n assert model1.b.numpy() == model2.b.numpy()\n model2.b.assign([1.])\n assert not model1.b.numpy() == model2.b.numpy()\n\n\n# TODO: (@sergio.pasc) finish tests below once GP models are ready for TF2.0\n_model_classes = [\n gpflow.models.GPR,\n gpflow.models.SGPR,\n gpflow.models.GPRFITC,\n gpflow.models.SVGP,\n gpflow.models.VGP,\n gpflow.models.GPMC,\n gpflow.models.SGPMC\n]\n\n\[email protected]('model_class', _model_classes)\ndef test_models_with_mean_functions_changes(model_class):\n \"\"\"\n Simply check that all models have a higher prediction with a constant mean\n function than with a zero mean function.\n\n For compositions of mean functions check that multiplication/ addition of\n a constant results in a higher prediction, whereas addition of zero/\n mutliplication with one does not.\n \"\"\"\n data = rng.randn(Datum.N, Datum.input_dim), rng.randn(Datum.N, 1)\n predict_at = rng.randn(Datum.Ntest, Datum.input_dim)\n inducing_variable = InducingPoints(Z=rng.randn(Datum.M, Datum.input_dim))\n kernel = gpflow.kernels.Matern32()\n likelihood = gpflow.likelihoods.Gaussian()\n zero_mean = Zero()\n non_zero_mean = Constant(c=np.ones(1) * 10)\n\n if model_class in [gpflow.models.GPR]:\n model_zero_mean = model_class(data, kernel=kernel, mean_function=zero_mean)\n model_non_zero_mean = model_class(data, kernel=kernel, mean_function=non_zero_mean)\n elif model_class in [gpflow.models.VGP]:\n model_zero_mean = model_class(data, likelihood=likelihood, kernel=kernel, mean_function=zero_mean)\n model_non_zero_mean = model_class(data, likelihood=likelihood, kernel=kernel, mean_function=non_zero_mean)\n elif model_class in [gpflow.models.SVGP]:\n model_zero_mean = model_class(kernel=kernel,\n likelihood=likelihood,\n inducing_variable=inducing_variable,\n mean_function=zero_mean)\n model_non_zero_mean = model_class(kernel=kernel,\n likelihood=likelihood,\n inducing_variable=inducing_variable,\n mean_function=non_zero_mean)\n elif model_class in [gpflow.models.SGPR, gpflow.models.GPRFITC]:\n model_zero_mean = model_class(data,\n kernel=kernel,\n inducing_variable=inducing_variable,\n mean_function=zero_mean)\n model_non_zero_mean = model_class(data,\n kernel=kernel,\n inducing_variable=inducing_variable,\n mean_function=non_zero_mean)\n elif model_class in [gpflow.models.SGPMC]:\n model_zero_mean = model_class(data,\n kernel=kernel,\n likelihood=likelihood,\n inducing_variable=inducing_variable,\n mean_function=zero_mean)\n model_non_zero_mean = model_class(data,\n kernel=kernel,\n likelihood=likelihood,\n inducing_variable=inducing_variable,\n mean_function=non_zero_mean)\n elif model_class in [gpflow.models.GPMC]:\n model_zero_mean = model_class(data,\n kernel=kernel,\n likelihood=likelihood,\n mean_function=zero_mean)\n model_non_zero_mean = model_class(data,\n kernel=kernel,\n likelihood=likelihood,\n mean_function=non_zero_mean)\n else:\n raise (NotImplementedError)\n\n mu_zero, var_zero = model_zero_mean.predict_f(predict_at)\n mu_non_zero, var_non_zero = model_non_zero_mean.predict_f(predict_at)\n # predictive variance remains unchanged after modifying mean function\n assert np.all(var_zero.numpy() == var_non_zero.numpy())\n # predictive mean changes after modifying mean function\n assert not np.all(mu_zero.numpy() == mu_non_zero.numpy())\n", "# Copyright 2016 James Hensman, alexggmatthews, PabloLeon, Valentine Svensson\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThroughout GPflow, by default, latent functions being modelled with Gaussian\nprocesses are assumed to have zero mean, f ~ GP(0, k(x,x')).\n\nIn some cases we may wish to model only the deviation from a fixed function\nwith a Gaussian process. For flexibility this fixed function could be both\ninput dependent and parameterised function, μ(x; θ),\nwith some unknown parameters θ, resulting in f ~ GP(μ(x;θ), k(x,x')).\n\nThe GPflow :class:`MeanFunction <gpflow.mean_functions.MeanFunction>` class\nallows this to be done whilst additionally learning parameters of the\nparametric function.\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .base import Module, Parameter\nfrom .config import default_float\n\n\nclass MeanFunction(Module):\n \"\"\"\n The base mean function class.\n To implement a mean function, write the __call__ method. This takes a\n tensor X and returns a tensor m(X). In accordance with the GPflow\n standard, each row of X represents one datum, and each row of Y is computed\n independently for each row of X.\n\n MeanFunction classes can have parameters, see the Linear class for an\n example.\n \"\"\"\n\n def __call__(self, X):\n raise NotImplementedError(\n \"Implement the __call__ method for this mean function\")\n\n def __add__(self, other):\n return Additive(self, other)\n\n def __mul__(self, other):\n return Product(self, other)\n\n\nclass Linear(MeanFunction):\n \"\"\"\n y_i = A x_i + b\n \"\"\"\n\n def __init__(self, A=None, b=None):\n \"\"\"\n A is a matrix which maps each element of X to Y, b is an additive\n constant.\n\n If X has N rows and D columns, and Y is intended to have Q columns,\n then A must be [D, Q], b must be a vector of length Q.\n \"\"\"\n MeanFunction.__init__(self)\n A = np.ones((1, 1), dtype=default_float()) if A is None else A\n b = np.zeros(1, dtype=default_float()) if b is None else b\n self.A = Parameter(np.atleast_2d(A))\n self.b = Parameter(b)\n\n def __call__(self, X):\n return tf.tensordot(X, self.A, [[-1], [0]]) + self.b\n\n\nclass Identity(Linear):\n \"\"\"\n y_i = x_i\n \"\"\"\n\n def __init__(self, input_dim=None):\n Linear.__init__(self)\n self.input_dim = input_dim\n\n def __call__(self, X):\n return X\n\n @property\n def A(self):\n if self.input_dim is None:\n raise ValueError(\n \"An input_dim needs to be specified when using the \"\n \"`Identity` mean function in combination with expectations.\")\n return tf.eye(self.input_dim, dtype=default_float())\n\n @property\n def b(self):\n if self.input_dim is None:\n raise ValueError(\n \"An input_dim needs to be specified when using the \"\n \"`Identity` mean function in combination with expectations.\")\n\n return tf.zeros(self.input_dim, dtype=default_float())\n\n @A.setter\n def A(self, A):\n pass\n\n @b.setter\n def b(self, b):\n pass\n\n\nclass Constant(MeanFunction):\n def __init__(self, c=None):\n super().__init__()\n c = np.zeros(1) if c is None else c\n self.c = Parameter(c)\n\n def __call__(self, X):\n shape = [tf.shape(X)[0], 1]\n return tf.tile(tf.reshape(self.c, (1, -1)), shape)\n\n\nclass Zero(Constant):\n def __init__(self, output_dim=1):\n Constant.__init__(self)\n self.output_dim = output_dim\n del self.c\n\n def __call__(self, X):\n return tf.zeros((tf.shape(X)[0], self.output_dim), dtype=X.dtype)\n\n\nclass SwitchedMeanFunction(MeanFunction):\n \"\"\"\n This class enables to use different (independent) mean_functions respective\n to the data 'label'.\n We assume the 'label' is stored in the extra column of X.\n \"\"\"\n\n def __init__(self, meanfunction_list):\n super().__init__()\n for m in meanfunction_list:\n assert isinstance(m, MeanFunction)\n self.meanfunctions = meanfunction_list\n\n def __call__(self, X):\n ind = tf.gather(tf.transpose(X), tf.shape(X)[1] - 1) # ind = X[:,-1]\n ind = tf.cast(ind, tf.int32)\n X = tf.transpose(\n tf.gather(tf.transpose(X),\n tf.range(0, tf.shape(X)[1] - 1))) # X = X[:,:-1]\n\n # split up X into chunks corresponding to the relevant likelihoods\n x_list = tf.dynamic_partition(X, ind, len(self.meanfunctions))\n # apply the likelihood-function to each section of the data\n results = [m(x) for x, m in zip(x_list, self.meanfunctions)]\n # stitch the results back together\n partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind,\n len(self.meanfunctions))\n return tf.dynamic_stitch(partitions, results)\n\n\nclass Additive(MeanFunction):\n def __init__(self, first_part, second_part):\n MeanFunction.__init__(self)\n self.add_1 = first_part\n self.add_2 = second_part\n\n def __call__(self, X):\n return tf.add(self.add_1(X), self.add_2(X))\n\n\nclass Product(MeanFunction):\n def __init__(self, first_part, second_part):\n MeanFunction.__init__(self)\n\n self.prod_1 = first_part\n self.prod_2 = second_part\n\n def __call__(self, X):\n return tf.multiply(self.prod_1(X), self.prod_2(X))\n" ]
[ [ "tensorflow.linalg.diag_part", "tensorflow.transpose", "tensorflow.linalg.cholesky_solve", "tensorflow.shape", "tensorflow.zeros", "tensorflow.linalg.triangular_solve", "tensorflow.linalg.diag", "tensorflow.linalg.matmul", "tensorflow.exp", "tensorflow.expand_dims", "tensorflow.reduce_prod", "tensorflow.square" ], [ "numpy.ones", "numpy.random.randn", "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros", "numpy.random.RandomState" ], [ "tensorflow.transpose", "tensorflow.shape", "tensorflow.cast", "tensorflow.reshape", "tensorflow.dynamic_stitch", "numpy.atleast_2d", "tensorflow.tensordot", "numpy.zeros", "tensorflow.size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.0", "1.2" ] } ]
GBillotey/Fractal-shades
[ "99c690cb1114ab7edcbfd9836af585fed2b133e8", "99c690cb1114ab7edcbfd9836af585fed2b133e8" ]
[ "tests/test_memmap.py", "src/fractalshades/numpy_utils/xrange.py" ]
[ "# -*- coding: utf-8 -*-\nimport os\nimport unittest\nimport concurrent.futures\nimport contextlib\nimport sys\nimport uuid\n\nimport numpy as np\nfrom numpy.lib.format import open_memmap\n\n\nimport fractalshades.utils as fsutils\nimport fractalshades.settings as fssettings\nfrom fractalshades.mprocessing import Multiprocess_filler #, globalize\nimport test_config\n\n\n# https://gist.github.com/EdwinChan/3c13d3a746bb3ec5082f\[email protected]\ndef globalized(func):\n namespace = sys.modules[func.__module__]\n name, qualname = func.__name__, func.__qualname__\n func.__name__ = func.__qualname__ = f'_{name}_{uuid.uuid4().hex}'\n setattr(namespace, func.__name__, func)\n try:\n yield\n finally:\n delattr(namespace, func.__name__)\n func.__name__, func.__qualname__ = name, qualname\n\n\nclass Test_mmap(unittest.TestCase):\n \n def setUp(self):\n _mmap_dir = os.path.join(test_config.temporary_data_dir, \"_mmap_dir\")\n fsutils.mkdir_p(_mmap_dir)\n self._mmap_dir = _mmap_dir\n self.process_file = \"test_mmap_process\"\n self.thread_file = \"test_mmap_thread\"\n self.nx = 100\n self.ny = 100\n\n def run_mmap(self, file):\n print(\"Executing our Task on Process: {}\".format(os.getpid()))\n arr = np.ones([self.nx, self.ny], dtype=np.float32)\n \n mmap = open_memmap(\n filename=os.path.join(self._mmap_dir, file), \n mode='w+',\n dtype=np.float32,\n shape=(self.nx, self.ny),\n fortran_order=False,\n version=None)\n mmap[:] = arr\n print(\"Finished our Task on Process: {}\".format(os.getpid()))\n \n# \n# def test_in_process(self):\n# func = self.run_mmap.__func__\n# with globalized(func\n# ), concurrent.futures.ProcessPoolExecutor(\n# max_workers=1) as subprocess:\n# subprocess.submit(func, (self, self.thread_file)).result()\n# \n# # check correct execution\n# mmap = open_memmap(\n# filename=os.path.join(self._mmap_dir, self.thread_file), \n# mode='w+',\n# dtype=np.int32,\n# shape=(self.nx, self.ny),\n# fortran_order=False,\n# version=None)\n# arr = mmap[:]\n# print(\"arr\", arr)\n# \n\n def test_in_thread(self):\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=1) as threadpool:\n threadpool.submit(self.run_mmap, self.process_file).result()\n print(\"in main loop\")\n \n # check correct execution\n mmap = open_memmap(\n filename=os.path.join(self._mmap_dir, self.process_file), \n mode='r+',\n dtype=np.float32,\n shape=(self.nx, self.ny),\n fortran_order=False,\n version=None)\n res = mmap[:]\n print(\"arr\", res)\n expected = np.ones([self.nx, self.ny], dtype=np.float32)\n np.testing.assert_array_equal(res, expected)\n\n \n\n\n\n\nif __name__ == \"__main__\":\n\n fssettings.multiprocessing_start_method = \"fork\"\n full_test = False\n runner = unittest.TextTestRunner(verbosity=2)\n if full_test:\n runner.run(test_config.suite([Test_mmap]))\n else:\n suite = unittest.TestSuite()\n suite.addTest(Test_mmap(\"test_in_thread\"))\n runner.run(suite)\n\n", "# -*- coding: utf-8 -*-\nimport numpy as np\nimport numbers\nimport re\nimport mpmath\n\n#import time\n\ndef mpc_to_Xrange(mpc, dtype=np.complex128):\n \"\"\" Convert a mpc complex to a Xrange array\"\"\"\n select = {np.dtype(np.complex64): np.float32,\n np.dtype(np.complex128): np.float64}\n float_type = select[np.dtype(dtype)]\n mpcx_m, mpcx_exp = mpmath.frexp(mpc.real)\n mpcy_m, mpcy_exp = mpmath.frexp(mpc.imag)\n mpcx_m = float_type(mpcx_m)\n mpcy_m = float_type(mpcy_m)\n\n if (mpcx_exp > mpcy_exp):\n m = mpcx_m + 1j * np.ldexp(mpcy_m, mpcy_exp - mpcx_exp)\n exp = mpcx_exp\n elif (mpcx_exp < mpcy_exp):\n m = 1j * mpcy_m + np.ldexp(mpcx_m, mpcx_exp - mpcy_exp)\n exp = mpcy_exp\n else:\n m = mpcx_m + 1j * mpcy_m\n exp = mpcx_exp\n \n return Xrange_array(m, np.int32(exp))\n #np.array(exp, np.int32))\n\n# b = Xrange_array._build_complex(\n# Xrange_array(\n# np.array(mpcx_m, float_type),\n# np.array(mpcx_exp, np.int32)),\n# Xrange_array(\n# np.array(mpcy_m, float_type),\n# np.array(mpcy_exp, np.int32)))\n# print(a)\n# print(b)\n# if str(time.time())[13] == 0:\n# raise ValueError()\n# return b\n\ndef mpf_to_Xrange(mpf, dtype=np.float64):\n \"\"\" Convert a mpc float to a Xrange array\"\"\"\n mpf_m, mpf_exp = mpmath.frexp(mpf)\n return Xrange_array(dtype(mpf_m), np.int32(mpf_exp))\n # np.array(mpf_m, dtype), np.array(mpf_exp, np.int32))\n\ndef Xrange_to_mpfc(arr):\n \"\"\" Convert a Xrange array of size-1 to a mpf or mpc\"\"\"\n if arr.is_complex:\n return Xrange_to_mpfc(arr.real) + 1.j * Xrange_to_mpfc(arr.imag)\n else:\n m = arr._mantissa\n exp = arr._exp\n return mpmath.ldexp(float(m), int(exp))\n\ndef get_xr_dtype(dtype):\n return np.dtype([('mantissa', dtype),\n ('exp', np.int32)], align=False)\n\nclass Xrange_array(np.ndarray):\n \"\"\"\nArrays class for \"extended range\" floats or complex numbers.\n This class allows to represent floating points numbers in simple or double\n precision in the range [1e-646456992, 1e+646456992].\n\nParameters:\n mantissa : Can be either\n - a nd.array of dtype: one of the supported dtype float32, float64,\n complex64, complex128,\n - a string array, each item reprenting a float in standard or\n e-notation e.g. [\"123.456e789\", \"-.3e-7\", \"1.e-1000\", \"1.0\"]\n Note that list inputs are accepted in both cases, and passed\n through np.asarray() method.\n exp : int32 array of shape sh, if None will default to 0\n ignored if mantissa is provided as string array.\n str_input_dtype : np.float32 of np.float64, only used if mantissa\n provided as a string, to allow specification of dataype.\n (if None or not provided, will default to float64 / complex128)\n\nReturn:\n Xrange_array of same shape as parameter 'mantissa' representing\n (real case): mantissa * 2**exp \n (complex case): (mantissa.real + 1.j * mantissa.imag) * 2**exp\n\nUsage:\n >>> Xa = Xrange_array([[\"123.456e-1789\", \"-.3e-7\"], [\"1.e700\", \"1.0\"]])\n >>> print(Xa**2)\n [[ 1.52413839e-3574 9.00000000e-16]\n [ 1.00000000e+1400 1.00000000e+00]]\n \n >>> b = np.array([1., 1., np.pi, np.pi], dtype=np.float32)\n >>> Xb = Xrange_array(b)\n >>> for exp10 in range(1001):\n Xb = Xb * [-10., 0.1, 10., -0.1]\n >>> Xb\n <class 'arrays.Xrange_array'>\n shape: (4,)\n internal dtype: [('mantissa', '<f8'), ('exp_re', '<i4')]\n base 10 representation:\n [-1.00000000e+1001 1.00000000e-1001 3.14159274e+1001 -3.14159274e-1001]\n >>> print(Xb)\n [-1.00000000e+1001 1.00000000e-1001 3.14159274e+1001 -3.14159274e-1001]\n\nImplementation details:\n Each scalar in the array is stored as a couple: 1 real or complex and\n 1 int32 integer for an extra base-2 exponent. \n The overall array is stored as a structured array of type :\n - (float32, int32)\n - (float64, int32)\n - (complex64, int32)\n - (complex128, int32)\n Hence, the mantissa can be one of 4 supported types :\n float32, float64, complex64, complex128\n\n Each class instance exposes the following properties (\"views\" of the base\n data array):\n real view of real part, as a real Xrange_array (read only)\n imag view of imaginary part, as a real Xrange_array (read only)\n is_complex Scalar boolean\n\n The binary operations implemented are:\n +, -, *, /, <, <=, >, >=\n and their matching 'assignment' operators:\n +=, -=, *=, /=\n\n The unary operations implemented are :\n as numpy unfunc : abs, sqrt, square, conj, log, angle (through arctan2)\n as instance method : abs2 (square of abs)\n\n Xrange_array may silently over/underflow, due to the implementation of its\n exponent as a np.int32 array. If needed, checks for overflow shall be\n implemented downstream in user code.\n >>> np.int32(2**31)\n -2147483648\n\nReference:\n https://numpy.org/devdocs/user/basics.subclassing.html\n \n \"\"\"\n _FLOAT_DTYPES = [np.float32, np.float64]\n _COMPLEX_DTYPES = [np.complex64, np.complex128]\n _DTYPES = _FLOAT_DTYPES + _COMPLEX_DTYPES\n _STRUCT_DTYPES = [get_xr_dtype(dt) for dt in _DTYPES]\n # types that can be 'viewed' as Xrange_array:\n _HANDLED_TYPES = (np.ndarray, numbers.Number, list)\n #__array_priority__ = 20\n def __new__(cls, mantissa, exp=None, str_input_dtype=None):\n \"\"\"\n Constructor\n \"\"\"\n mantissa = np.asarray(mantissa)\n if mantissa.dtype.type == np.str_:\n mantissa, exp = np.vectorize(cls._convert_from_string)(mantissa)\n if str_input_dtype is not None:\n mantissa = np.asarray(mantissa, dtype=str_input_dtype)\n\n data = cls._extended_data_array(mantissa, exp)\n return super().__new__(cls, data.shape, dtype=data.dtype, buffer=data)\n\n @staticmethod\n def _convert_from_string(input_str):\n \"\"\"\n Return mantissa and base 2 exponent from float input string\n \n Parameters\n input_str: string (see exp_pattern for accepted patterns)\n \n Return\n m : mantissa\n exp_re : base 2 exponent\n \"\"\"\n exp_pattern = (\"^([-+]?[0-9]+\\.[0-9]*|[-+]?[0-9]*\\.[0-9]+|[-+]?[0-9]+)\"\n \"([eE]?)([-+]?[0-9]*)$\")\n err_msg = (\"Unsupported Xrange_array string item: <{}>\\n\" +\n \"(Examples of supported input items: \" +\n \"<123.456e789>, <-.123e-127>, <+1e77>, <1.0>, ...)\")\n\n match = re.match(exp_pattern, input_str)\n if match:\n m = float(match.group(1))\n exp_10 = 0\n if match.group(2) in [\"e\", \"E\"]:\n try:\n exp_10 = int(match.group(3))\n if abs(exp_10) > 646456992:\n raise ValueError(\"Overflow int string input, cannot \"\n \"represent exponent with int32, maxint 2**31-1\")\n except ValueError:\n raise ValueError(err_msg.format(input_str))\n # We need 96 bits precision for accurate mantissa in this base-10 to base-2\n # conversion, will use native Python integers, as speed is not critical\n # here.\n # >>> import mpmath\n # >>> mpmath.mp.dps = 30\n # >>> mpmath.log(\"10.\") / mpmath.log(\"2.\") * mpmath.mpf(\"1.e25\")\n # mpmath.log(\"10.\") / mpmath.log(\"2.\") * mpmath.mpf(2**96)\n # mpf('263190258962436467100402834429.2138584375862')\n rr_hex = 263190258962436467100402834429\n exp_10, mod = divmod(exp_10 * rr_hex, 2**96)\n m *= 2.**(mod * 2.**-96)\n return m, exp_10\n else:\n raise ValueError(err_msg.format(input_str))\n\n @staticmethod\n def _extended_data_array(mantissa, exp):#, counter):#_re, exp_im):\n \"\"\"\n Builds the structured internal array.\n \"\"\"\n mantissa_dtype = mantissa.dtype\n if mantissa_dtype not in Xrange_array._DTYPES:\n casted = False\n for cast_dtype in Xrange_array._DTYPES:\n if np.can_cast(mantissa_dtype, cast_dtype, \"safe\"):\n mantissa = mantissa.astype(cast_dtype)\n mantissa_dtype = cast_dtype\n casted = True\n break\n if not casted:\n if (mantissa_dtype in Xrange_array._STRUCT_DTYPES\n ) and (exp is None):\n return mantissa # Pass-through\n raise ValueError(\"Unsupported type for Xrange_array {}\".format(\n mantissa_dtype))\n\n # Builds the field-array\n sh = mantissa.shape\n if exp is None:\n exp = np.zeros(sh, dtype=np.int32)\n\n extended_dtype = get_xr_dtype(mantissa_dtype)\n\n data = np.empty(sh, dtype=extended_dtype)\n data['mantissa'] = mantissa\n data['exp'] = exp\n return data\n\n @property\n def is_complex(self):\n \"\"\" boolean scalar, True if Xrange_array is complex\"\"\"\n _dtype = self.dtype\n if len(_dtype) > 1:\n _dtype = _dtype[0]\n return _dtype in Xrange_array._COMPLEX_DTYPES\n\n @property\n def real(self):\n \"\"\"\n Returns a view to the real part of self, as an Xrange_array.\n \"\"\"\n if self.is_complex:\n if self.dtype.names is None:\n return np.asarray(self).real.view(Xrange_array)\n real_bytes = 4\n if self._mantissa.real.dtype == np.float64:\n real_bytes = 8\n data_dtype = np.dtype({'names': ['mantissa', 'exp'],\n 'formats': [\"f\" + str(real_bytes), \"i4\"],\n 'offsets': [0, real_bytes*2],\n 'itemsize': real_bytes * 2 + 4})\n re = np.asarray(self).view(dtype=data_dtype).view(Xrange_array)\n # As exponent is shared between real and imaginary part, this \n # view is read-only\n re.flags.writeable = False\n return re\n else:\n return self\n\n @real.setter\n def real(self, val):\n \"\"\"\n Setting the real part\n note: impacts the imaginary through the exponent\n \"\"\"\n val = val.view(Xrange_array)\n Xrange_array._coexp_ufunc(val._mantissa, val._exp,\n self._mantissa.imag, self._exp)\n arr = np.asarray(self)\n (arr[\"mantissa\"].real, arr[\"mantissa\"].imag, arr[\"exp\"]\n )= Xrange_array._coexp_ufunc(val._mantissa, val._exp,\n self._mantissa.imag, self._exp)\n\n @property\n def imag(self):\n \"\"\"\n Returns a view to the imaginary part of self, as an Xrange_array.\n \"\"\"\n if self.is_complex:\n if self.dtype.names is None:\n return np.asarray(self).imag.view(Xrange_array)\n assert 'exp' in np.asarray(self).dtype.names\n real_bytes = 4\n if self._mantissa.real.dtype == np.float64:\n real_bytes = 8\n data_dtype = np.dtype({'names': ['mantissa', 'exp'],\n 'formats': [\"f\" + str(real_bytes), \"i4\"],\n 'offsets': [real_bytes, real_bytes*2],\n 'itemsize': real_bytes * 2 + 4})\n im = np.asarray(self).view(dtype=data_dtype).view(Xrange_array)\n # As exponent is shared between real and imaginary part, this \n # view is read-only\n im.flags.writeable = False\n return im\n else:\n return 0. * self\n\n @imag.setter\n def imag(self, val):\n \"\"\"\n Setting the imaginary part\n note: impacts the real through the exponent\n \"\"\"\n arr = np.asarray(self)\n (arr[\"mantissa\"].real, arr[\"mantissa\"].imag, arr[\"exp\"]\n )= Xrange_array._coexp_ufunc(self._mantissa.real, self._exp,\n val._mantissa, val._exp)\n\n\n @staticmethod\n def empty(shape, dtype, asarray=False):\n \"\"\" Return a new Xrange_array of given shape and type, without\n initializing entries.\n\n if asarray is True, return a view as an array, otherwise (default)\n return a Xrange_array\n \"\"\"\n extended_dtype = np.dtype([('mantissa', dtype),\n ('exp', np.int32)], align=False)\n if asarray:\n return np.empty(shape, dtype=extended_dtype)\n else:\n return np.empty(shape, dtype=extended_dtype).view(Xrange_array)\n\n @staticmethod\n def zeros(shape, dtype):\n \"\"\" Return a new Xrange_array of given shape and type, with all entries\n initialized with 0.\"\"\"\n ret = Xrange_array.empty(shape, dtype, asarray=True)\n ret[\"mantissa\"] = 0.\n ret[\"exp\"] = 0\n return ret.view(Xrange_array)\n\n @staticmethod\n def ones(shape, dtype):\n \"\"\" Return a new Xrange_array of given shape and type, with all entries\n initialized with 1.\"\"\"\n ret = Xrange_array.empty(shape, dtype, asarray=True)\n ret[\"mantissa\"] = 1.\n ret[\"exp\"] = 0\n return ret.view(Xrange_array)\n\n def fill(self, val):\n \"\"\" Fill the array with val.\n Parameter\n ---------\n val : numpy scalar of a Xrange_array of null shape\n \"\"\"\n fill_dict = {\"exp\": 0}\n if np.isscalar(val):\n fill_dict[\"mantissa\"] = val \n elif isinstance(val, Xrange_array) and (val.shape == ()):\n fill_dict[\"mantissa\"] = val._mantissa\n fill_dict[\"exp\"] = val._exp\n else:\n raise ValueError(\"Invalid input to Xrange_array.fill, \"\n \"expected a numpy scalar or a Xrange_array of dim 0.\")\n for key in [\"mantissa\", \"exp\"]:\n (np.asarray(self)[key]).fill(fill_dict[key])\n\n def to_standard(self):\n \"\"\" Returns the Xrange_array downcasted to standard np.ndarray ;\n obviously, may overflow. \"\"\"\n return self._mantissa * (2.**self._exp)\n\n @staticmethod\n def _build_complex(re, im):\n \"\"\" Build a complex Xrange_array from 2 similar shaped and typed\n Xrange_array (imag and real parts)\"\"\"\n m_re, m_im, exp = Xrange_array._coexp_ufunc(\n re._mantissa, re._exp, im._mantissa, im._exp)\n dtype = np.complex64\n if (m_re.dtype == np.float64) or (m_im.dtype == np.float64):\n dtype = np.complex128\n c = np.empty(m_re.shape, dtype=dtype)\n c.real = m_re\n c.imag = m_im\n return Xrange_array(c, exp)\n\n @property\n def _mantissa(self):\n \"\"\" Returns the mantissa of Xrange_array\"\"\"\n try:\n return np.asarray(self[\"mantissa\"])\n except IndexError: # Assume we are view casting a np.ndarray\n m = np.asarray(self)\n if m.dtype in Xrange_array._DTYPES:\n return m\n else:\n for cast_dtype in Xrange_array._DTYPES:\n if np.can_cast(m.dtype, cast_dtype, \"safe\"):\n return m.astype(cast_dtype)\n\n @property\n def _exp(self):\n \"\"\" Returns the exponent of Xrange_array\"\"\"\n try:\n return np.asarray(self[\"exp\"])\n except IndexError: # We are view casting a np.ndarray\n return np.int32(0)\n\n\n def normalize(self):\n \"\"\"\n Normalize in-place a Xrange_array\n \"\"\"\n arr = np.asarray(self)\n arr[\"mantissa\"], arr[\"exp\"] = self._normalize(\n arr[\"mantissa\"], arr[\"exp\"])\n\n @staticmethod\n def _normalize(m, exp):\n \"\"\"\n Parameters\n m : np.array of supported type\n exp : int32 np.array\n\n Return\n nm : float32 or float64 np.array\n nexp : int32 np.array\n f * 2**exp == nf * 2**nexp\n .5 <= abs(nf) < 1.\n \"\"\"\n if m.dtype in Xrange_array._FLOAT_DTYPES:\n nm, exp2 = np.frexp(m)\n nexp = np.where(m == 0., np.int32(0), exp + exp2)\n return nm, nexp\n elif m.dtype in Xrange_array._COMPLEX_DTYPES:\n nm = np.empty_like(m)\n nm_re, nexp_re = Xrange_array._normalize(m.real, exp)\n nm_im, nexp_im = Xrange_array._normalize(m.imag, exp)\n nm.real, nm.imag, nexp = Xrange_array._coexp_ufunc(\n nm_re, nexp_re, nm_im, nexp_im)\n return nm, nexp\n else:\n raise ValueError(m.dtype)\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n \"\"\"\n - *ufunc* is the ufunc object that was called.\n - *method* is a string indicating how the Ufunc was called, either\n ``\"__call__\"`` to indicate it was called directly, or one of its\n :ref:`methods<ufuncs.methods>`: ``\"reduce\"``, ``\"accumulate\"``,\n ``\"reduceat\"``, ``\"outer\"``, or ``\"at\"``.\n - *inputs* is a tuple of the input arguments to the ``ufunc``\n - *kwargs* contains any optional or keyword arguments passed to the\n function. This includes any ``out`` arguments, which are always\n contained in a tuple.\n \n see also:\n https://github.com/numpy/numpy/blob/v1.19.0/numpy/lib/mixins.py#L59-L176\n \"\"\"\n out = kwargs.pop(\"out\", None)\n if out is not None:\n if ufunc.nout == 1: # Only supported case to date\n out = np.asarray(out[0])\n\n casted_inputs = ()\n for x in inputs:\n # Only support operations with instances of _HANDLED_TYPES.\n if isinstance(x, Xrange_array):\n casted_inputs += (x,)\n elif isinstance(x, np.ndarray):\n casted_inputs += (x.view(Xrange_array),)\n elif isinstance(x, numbers.Number):\n casted_inputs += (Xrange_array(x),)\n elif isinstance(x, list):\n casted_inputs += (Xrange_array(x),)\n else:\n # Operation not supported (type not handled), return the\n # sentinel value NotImplemented\n return NotImplemented\n\n if method == \"__call__\":\n if ufunc in [np.add, np.subtract]:\n out = self._add(ufunc, casted_inputs, out=out)\n elif ufunc is np.negative:\n out = self._negative(casted_inputs, out=out)\n elif ufunc in [np.multiply, np.true_divide]:\n out = self._mul(ufunc, casted_inputs)\n elif ufunc in [np.greater, np.greater_equal, np.less,\n np.less_equal, np.equal, np.not_equal]:\n # Not a Xrange array, returns bool array\n return self._compare(ufunc, casted_inputs, out=out)\n elif ufunc is np.maximum:\n out = self._maximum(casted_inputs, out=out)\n elif ufunc is np.absolute:\n out = self._abs(casted_inputs, out=out)\n elif ufunc is np.sqrt:\n out = self._sqrt(casted_inputs, out=out)\n elif ufunc is np.square:\n out = self._square(casted_inputs, out=out)\n elif ufunc is np.conj:\n out = self._conj(casted_inputs, out=out)\n elif ufunc is np.log:\n out = self._log(casted_inputs, out=out)\n elif ufunc is np.arctan2:\n # Not a Xrange array, returns a float array\n return self._arctan2(casted_inputs, out=out)\n else:\n out = None\n elif method in [\"reduce\", \"accumulate\"]:\n if ufunc is np.add:\n out = self._add_method(casted_inputs, method, out=out,\n **kwargs)\n elif ufunc is np.multiply:\n out = self._mul_method(casted_inputs, method, out=out,\n **kwargs)\n else:\n out = None\n\n if out is None:\n raise NotImplementedError(\"ufunc {} method {} not implemented for \"\n \"Xrange_array\".format(ufunc, method))\n return out.view(Xrange_array)\n\n\n @staticmethod\n def _arctan2(inputs, out=None):\n \"\"\"\n Return the arctan2 as a standard float array\n \"\"\"\n op0, op1 = inputs\n\n if op0.shape == () and op0 == 0.:\n # As of numpy 1.19.3 'np.angle' is not a ufunc but wraps arctan2 ;\n # this branch will handle calls by np.angle with zimag = 0. and\n # zreal a complex Xrange_array\n return np.angle(op1._mantissa)\n\n m0 = op0._mantissa\n m1 = op1._mantissa\n if out is None:\n out = np.empty(np.broadcast(m0, m1).shape,\n dtype=np.result_type(m0, m1))\n out, _ = Xrange_array._coexp_ufunc(\n m0, op0._exp, m1, op1._exp, ufunc=np.arctan2)\n return out\n\n def abs2(self, out=None):\n \"\"\"\n Return the square of np.abs(self) (for optimisation purpose).\n \"\"\"\n if out is None:\n out = Xrange_array.empty(self.shape,\n dtype=self._mantissa.real.dtype, asarray=True)\n if self.is_complex:\n out[\"mantissa\"] = self._mantissa.real**2 + self._mantissa.imag**2\n out[\"exp\"] = 2 * self._exp\n else:\n out[\"mantissa\"] = self._mantissa**2\n out[\"exp\"] = 2 * self._exp\n # ! not a unfunc so need to keep the view\n return out.view(Xrange_array)\n\n @staticmethod\n def _conj(inputs, out=None):\n \"\"\" x -> np.conj(x) \"\"\"\n op0, = inputs\n m0 = op0._mantissa\n if out is None:\n out = Xrange_array.empty(m0.shape, dtype=m0.dtype, asarray=True)\n out[\"mantissa\"] = np.conj(op0._mantissa)\n out[\"exp\"] = op0._exp\n return out\n\n @staticmethod\n def _square(inputs, out=None):\n \"\"\" x -> x**2 \"\"\"\n op0, = inputs\n m0 = op0._mantissa\n if out is None:\n out = Xrange_array.empty(m0.shape, dtype=m0.dtype, asarray=True) \n m = np.square(m0)\n if Xrange_array._need_renorm(m):\n out[\"mantissa\"], out[\"exp\"] = Xrange_array._normalize(\n m, 2 * op0._exp)\n else:\n out[\"mantissa\"] = m\n out[\"exp\"] = 2 * op0._exp\n return out\n\n @staticmethod\n def _log(inputs, out=None):\n \"\"\" x -> np.log(x) \"\"\"\n ln2 = 0.6931471805599453\n op0, = inputs\n m0 = op0._mantissa\n if out is None:\n out = Xrange_array.empty(m0.shape, dtype=m0.dtype, asarray=True)\n\n if op0.is_complex:\n m_re, exp_re = Xrange_array._normalize(m0.real, op0._exp)\n m_im, exp_im = Xrange_array._normalize(m0.imag, op0._exp)\n m_re *= 2.\n exp_re -= 1\n m_re, m_im, e = Xrange_array._coexp_ufunc(\n m_re, exp_re, m_im, exp_im)\n m = m_re + 1.j * m_im\n else:\n m, e = Xrange_array._normalize(m0, op0._exp)\n m *= 2.\n e -= 1\n m_re = m\n # Avoid loss of significant digits if e * ln2 close to log(m)\n # ie m close to 2.0\n e_is_m1 = (e == -1)\n if np.isscalar(m):\n if e_is_m1:\n m[e_is_m1] *= 0.5\n e[e_is_m1] += 1\n else:\n m[e_is_m1] *= 0.5\n e[e_is_m1] += 1\n\n out[\"mantissa\"] = np.log(m) + m_re.dtype.type(e * ln2)\n out[\"exp\"] = 0\n return out\n\n @staticmethod\n def _sqrt(inputs, out=None):\n \"\"\" x -> np.sqrt(x) \"\"\"\n sqrt0, = inputs\n m0 = sqrt0._mantissa\n if out is None:\n out = Xrange_array.empty(m0.shape, dtype=m0.dtype, asarray=True)\n \n if sqrt0.is_complex:\n m_re, m_im, exp = Xrange_array._coexp_ufunc(\n m0.real, sqrt0._exp,\n m0.imag, sqrt0._exp, None)\n m = m_re + 1.j * m_im\n even_exp = ((exp % 2) == 0).astype(bool)\n exp = np.where(even_exp, exp // 2, (exp - 1) // 2)\n out[\"mantissa\"] = np.sqrt(np.where(even_exp, m, m * 2.))\n out[\"exp\"] = exp\n else:\n even_exp = ((sqrt0._exp % 2) == 0).astype(bool)\n out[\"mantissa\"] = np.sqrt(np.where(even_exp, sqrt0._mantissa,\n sqrt0._mantissa * 2.))\n out[\"exp\"] = np.where(even_exp, sqrt0._exp // 2,\n (sqrt0._exp - 1) // 2)\n\n return out\n\n @staticmethod\n def _abs(inputs, out=None):\n \"\"\" x -> np.abs(x) \"\"\"\n op0, = inputs\n m0 = op0._mantissa\n exp0 = op0._exp\n\n if out is None:\n out = Xrange_array.empty(m0.shape, dtype=m0.real.dtype,\n asarray=True)\n if op0.is_complex:\n Xrange_array._sqrt((op0.real * op0.real + op0.imag * op0.imag,),\n out=out)\n else:\n out[\"mantissa\"] = np.abs(m0)\n out[\"exp\"] = exp0\n\n return out\n\n @staticmethod\n def _compare(ufunc, inputs, out=None):\n \"\"\" compare x and y \"\"\"\n op0, op1 = inputs\n m0 = op0._mantissa\n m1 = op1._mantissa\n if out is None:\n out = np.empty(np.broadcast(m0, m1).shape, dtype=bool)\n\n if (op0.is_complex or op1.is_complex):\n if ufunc in [np.equal, np.not_equal]:\n re_eq = Xrange_array._coexp_ufunc(\n m0.real, op0._exp, m1.real, op1._exp, ufunc)[0]\n im_eq = Xrange_array._coexp_ufunc(\n m0.imag, op0._exp, m1.imag, op1._exp, ufunc)[0]\n if ufunc is np.equal:\n out = re_eq & im_eq\n else:\n out = re_eq | im_eq\n else:\n raise NotImplementedError(\n \"{} Not supported for complex\".format(ufunc))\n else:\n out = Xrange_array._coexp_ufunc(m0, op0._exp, m1,\n op1._exp, ufunc)[0]\n return out\n\n @staticmethod\n def _maximum(inputs, out=None):\n op0, op1 = inputs\n m0 = op0._mantissa\n exp0 = op0._exp\n m1 = op1._mantissa\n exp1 = op1._exp\n if out is None:\n out = Xrange_array.empty(np.broadcast(m0, m1).shape,\n dtype=np.result_type(m0, m1), asarray=True)\n where1 = (op1 > op0)\n out[\"mantissa\"] = np.where(where1, m1, m0)\n out[\"exp\"] = np.where(where1, exp1, exp0)\n return out\n\n @staticmethod\n def _mul(ufunc, inputs, out=None):\n \"\"\" internal auxilliary function for * and / operators \"\"\"\n op0, op1 = inputs\n m0 = op0._mantissa\n exp0 = op0._exp\n m1 = op1._mantissa\n exp1 = op1._exp\n if ufunc is np.true_divide:\n m1 = np.reciprocal(m1)\n exp1 = -exp1\n\n if out is None:\n out = Xrange_array.empty(np.broadcast(m0, m1).shape,\n dtype=np.result_type(m0, m1), asarray=True)\n\n m = m0 * m1\n if Xrange_array._need_renorm(m):\n out[\"mantissa\"], out[\"exp\"] = Xrange_array._normalize(m, exp0 + exp1)\n else:\n out[\"mantissa\"] = m\n out[\"exp\"] = exp0 + exp1\n return out\n\n @staticmethod\n def _negative(inputs, out=None):\n \"\"\" x -> -x \"\"\"\n op0, = inputs\n m0 = op0._mantissa\n if out is None:\n out = Xrange_array.empty(op0.shape, dtype=m0.dtype, asarray=True)\n out[\"mantissa\"] = -m0\n out[\"exp\"] = op0._exp\n return out\n\n @staticmethod\n def _add(ufunc, inputs, out=None):\n \"\"\" internal auxilliary function for + and - operators \"\"\"\n op0, op1 = inputs\n m0 = op0._mantissa\n m1 = op1._mantissa\n if out is None:\n out = Xrange_array.empty(np.broadcast(m0, m1).shape,\n dtype=np.result_type(m0, m1), asarray=True)\n\n if (op0.is_complex or op1.is_complex):\n # Packing together\n out[\"mantissa\"], out[\"exp\"] = Xrange_array._cplx_coexp_ufunc(\n m0, op0._exp, m1, op1._exp, ufunc)\n else:\n out[\"mantissa\"], out[\"exp\"] = Xrange_array._coexp_ufunc(\n m0, op0._exp, m1, op1._exp, ufunc)\n\n return out \n\n @staticmethod\n def _coexp_ufunc(m0, exp0, m1, exp1, ufunc=None):\n \"\"\" \n If ufunc is None :\n m0, exp0, m1, exp1, -> co_m0, co_m1, co_exp so that :\n (*) m0 * 2**exp0 == co_m0 * 2**co_exp\n (*) m1 * 2**exp1 == co_m1 * 2**co_exp\n (*) co_exp is the \"leading exponent\" exp = np.maximum(exp0, exp1)\n except if one of m0, m1 is null.\n If ufunc is provided :\n m0, exp0, m1, exp1, -> ufunc(co_m0, co_m1), co_exp\n \n \"\"\"\n co_m0, co_m1 = np.copy(np.broadcast_arrays(m0, m1))\n \n exp0 = np.broadcast_to(exp0, co_m0.shape)\n exp1 = np.broadcast_to(exp1, co_m0.shape)\n\n m0_null = (m0 == 0.)\n m1_null = (m1 == 0.)\n d_exp = exp0 - exp1\n\n if (co_m0.shape == ()):\n if ((exp1 > exp0) & ~m1_null):\n co_m0 = Xrange_array._exp2_shift(co_m0, d_exp)\n if ((exp0 > exp1) & ~m0_null):\n co_m1 = Xrange_array._exp2_shift(co_m1, -d_exp)\n exp = np.maximum(exp0, exp1)\n if m0_null:\n exp = exp1\n if m1_null:\n exp = exp0\n else:\n bool0 = ((exp1 > exp0) & ~m1_null)\n co_m0[bool0] = Xrange_array._exp2_shift(\n co_m0[bool0], d_exp[bool0])\n bool1 = ((exp0 > exp1) & ~m0_null)\n co_m1[bool1] = Xrange_array._exp2_shift(\n co_m1[bool1], -d_exp[bool1])\n exp = np.maximum(exp0, exp1)\n exp[m0_null] = exp1[m0_null]\n exp[m1_null] = exp0[m1_null]\n\n if ufunc is not None: \n return (ufunc(co_m0, co_m1), exp)\n else:\n return (co_m0, co_m1, exp)\n \n \n @staticmethod\n def _cplx_coexp_ufunc(m0, exp0, m1, exp1, ufunc=None):\n \"\"\" \n Idem with complex m0, m1\n \"\"\"\n co_m0, co_m1 = np.copy(np.broadcast_arrays(m0, m1))\n exp0 = np.broadcast_to(exp0, co_m0.shape)\n exp1 = np.broadcast_to(exp1, co_m0.shape)\n\n m0_null = (m0 == 0.)\n m1_null = (m1 == 0.)\n d_exp = exp0 - exp1\n\n if (co_m0.shape == ()):\n if ((exp1 > exp0) & ~m1_null):\n co_m0 = (Xrange_array._exp2_shift(co_m0.real, d_exp)\n + 1.j * Xrange_array._exp2_shift(co_m0.imag, d_exp))\n if ((exp0 > exp1) & ~m0_null):\n co_m1 = (Xrange_array._exp2_shift(co_m1.real, d_exp)\n + 1.j * Xrange_array._exp2_shift(co_m1.imag, d_exp))\n exp = np.maximum(exp0, exp1)\n if m0_null:\n exp = exp1\n if m1_null:\n exp = exp0\n else:\n f_dtype = np.float32\n if (m0.dtype == np.complex128) or (m1.dtype == np.complex128):\n f_dtype = np.float64\n k0 = Xrange_array._exp2(-d_exp, dtype=f_dtype)\n k1 = Xrange_array._exp2(d_exp, dtype=f_dtype)\n \n bool0 = ((exp1 > exp0) & ~m1_null)\n bool1 = ((exp0 > exp1) & ~m0_null)\n\n co_m0[bool0] *= k0[bool0]\n co_m1[bool1] *= k1[bool1]\n\n exp = np.maximum(exp0, exp1)\n exp[m0_null] = exp1[m0_null]\n exp[m1_null] = exp0[m1_null]\n\n if ufunc is not None: \n return (ufunc(co_m0, co_m1), exp)\n else:\n return (co_m0, co_m1, exp)\n\n @staticmethod\n def _add_method(inputs, method, out=None, **kwargs):\n \"\"\"\n \"\"\"\n if method == \"accumulate\":\n raise NotImplementedError(\"ufunc {} method {} not implemented for \"\n \"Xrange_array\".format(np.add, method))\n if out is not None:\n raise NotImplementedError(\"`out` keyword not immplemented \"\n \"for ufunc {} method {} of Xrange_array\".format(\n np.add, \"reduce\"))\n\n op, = inputs\n\n axis = kwargs.get(\"axis\", 0)\n broadcast_co_exp_acc = np.maximum.reduce(op._exp, axis=axis, \n keepdims=True)\n\n if op.is_complex:\n re = Xrange_array._exp2_shift(op._mantissa.real, \n op._exp - broadcast_co_exp_acc)\n im = Xrange_array._exp2_shift(op._mantissa.imag, \n op._exp - broadcast_co_exp_acc)\n co_m = re + 1.j * im\n else:\n co_m = Xrange_array._exp2_shift(op._mantissa, \n op._exp - broadcast_co_exp_acc)\n\n res = Xrange_array(*Xrange_array._normalize(\n np.add.reduce(co_m, axis=axis),\n np.squeeze(broadcast_co_exp_acc, axis=axis)))\n return res\n\n @staticmethod\n def _mul_method(inputs, method, out=None, **kwargs):\n \"\"\"\n methods implemented are reduce or accumulate\n \"\"\"\n if out is not None:\n raise NotImplementedError(\"`out` keyword not immplemented \"\n \"for ufunc {} method {} of Xrange_array\".format(\n np.multiply, method))\n\n op, = inputs\n m0, exp0 = Xrange_array._normalize(op._mantissa, op._exp)\n # np.multiply.reduce(m0, axis=axis) shall remains bounded\n # Set m m between sqrt(0.5) and sqrt(2)\n # With float64 mantissa, in current implementation, mantissa is only\n # guaranteed to not overflow for arrays of less than 2000 elements\n # (because 1.41**2000 = 2.742996861934711e+298 < max float64)\n is_below = m0 < np.sqrt(0.5)\n m0[is_below] *= 2.\n exp0[is_below] -= 1 \n\n axis = kwargs.get(\"axis\", 0)\n res = Xrange_array(*Xrange_array._normalize(\n getattr(np.multiply, method)(m0, axis=axis),\n getattr(np.add, method)(exp0, axis=axis)))\n return res\n\n @staticmethod\n def _need_renorm(val):\n \"\"\"\n Returns True if val need renom\n \"\"\"\n val = np.asarray(val)\n if val.dtype == np.float32:\n bits = val.view(np.int32)\n return np.any((np.abs(((bits >> 23) & 0xff) - 127) > 31)\n & (val != 0.))\n elif val.dtype == np.float64:\n bits = val.view(np.int64)\n return np.any((np.abs(((bits >> 52) & 0x7ff) - 1023) > 255)\n & (val != 0.))\n elif val.dtype in [np.complex64, np.complex128]:\n return np.logical_or(Xrange_array._need_renorm(val.real),\n Xrange_array._need_renorm(val.imag))\n else:\n raise ValueError(\"Unsupported dtype {}\".format(val.dtype))\n\n @staticmethod\n def _xlog2(val):\n \"\"\"\n Returns a rough evaluation of the exponent base 2\n \"\"\"\n val = np.asarray(val)\n if val.dtype == np.float32:\n bits = val.view(np.int32)\n return np.where(val == 0., 0, np.abs(((bits >> 23) & 0xff) - 127)\n ).astype(np.int16)\n elif val.dtype == np.float64:\n bits = val.view(np.int64)\n return np.where(val == 0., 0, np.abs(((bits >> 52) & 0x7ff) - 1023)\n ).astype(np.int16)\n elif val.dtype in [np.complex64, np.complex128]:\n return np.maximum(Xrange_array._xlog2(val.real),\n Xrange_array._xlog2(val.imag))\n else:\n raise ValueError(\"Unsupported dtype {}\".format(val.dtype))\n\n @staticmethod\n def _exp2(exp, dtype):\n \"\"\"\n Returns 2**-exp, exp np.int32 > 0\n \"\"\"\n if dtype == np.float32:\n _exp = np.clip(127 - exp, 0, None)\n return (_exp << 23).view(np.float32)\n elif dtype == np.float64:\n _exp = np.clip(1023 - exp.astype(np.int64), 0, None)\n return (_exp << 52).view(np.float64)\n else:\n raise ValueError(\"Unsupported dtype {}\".format(dtype))\n\n @staticmethod\n def _exp2_shift(m, shift):\n \"\"\"\n Parameters\n m : float32 or float64 array, mantissa\n exp : int32 array, negative integers array\n\n Return\n res array of same type as m, shifted by 2**shift :\n res = m * 2**shift\n\n References:\n https://en.wikipedia.org/wiki/Single-precision_floating-point_format\n s(1)e(8)m(23)\n (bits >> 23) & 0xff : exponent with bias 127 (0x7f)\n (bits & 0x7fffff) : mantissa, implicit first bit of value 1\n\n https://en.wikipedia.org/wiki/Double-precision_floating-point_format\n s(1)e(11)m(52)\n (bits >> 52) & 0x7ff : exponent with bias 1023 (0x3ff)\n (bits & 0xfffffffffffff) : mantissa, implicit first bit of value 1\n \"\"\"\n dtype = m.dtype\n if dtype == np.float32:\n bits = m.view(np.int32)\n # Need to take special care as casting to int32 a 0d array is only\n # supported if the itemsize is unchanged. So we impose the res \n # dtype\n res_32 = np.empty_like(bits)\n exp = np.clip(((bits >> 23) & 0xff) + shift, 0, None)\n np.add((exp << 23), bits & 0x7fffff, out=res_32)\n return np.copysign(res_32.view(np.float32), m)\n\n elif dtype == np.float64:\n bits = m.view(np.int64)\n exp = np.clip(((bits >> 52) & 0x7ff) + shift, 0, None)\n return np.copysign(((exp << 52) + (bits & 0xfffffffffffff)\n ).view(np.float64) , m)\n else:\n raise ValueError(\"Unsupported dtype {}\".format(dtype))\n\n\n def __repr__(self):\n \"\"\" Detailed string representation of self \"\"\"\n s = (str(type(self)) + \"\\nshape: \" +str(self.shape) +\n \"\\ninternal dtype: \" + str(self.dtype) + \n \"\\nbase 10 representation:\\n\" +\n self.__str__())\n return s\n\n def __str__(self):\n \"\"\"\n String representation of self. Takes into account the value of\n np.get_printoptions(precision)\n\n Usage :\n with np.printoptions(precision=2) as opts:\n print(extended_range_array)\n \"\"\"\n # There is no easy way to impose the formatting of a structured array\n # Monkey patching np.core.arrayprint.StructuredVoidFormat\n orig = np.core.arrayprint.StructuredVoidFormat\n try:\n np.core.arrayprint.StructuredVoidFormat = _Xrange_array_format\n if self.shape == ():\n ret = np.array2string(self.reshape([1]))[1:-1]\n else:\n ret = np.array2string(self)\n finally:\n np.core.arrayprint.StructuredVoidFormat = orig\n return ret\n\n def _to_str_array(self, **options):\n \"\"\"\n String representation of self. Takes into account the value of\n np.get_printoptions(precision)\n\n Usage :\n with np.printoptions(precision=2) as opts:\n print(extended_range_array)\n \"\"\"\n if self.is_complex:\n s_re = Xrange_array._to_char(self.real, **options)\n s_im = Xrange_array._to_char(self.imag, im=True, **options)\n s = np.core.defchararray.add(s_re, s_im)\n s = np.core.defchararray.add(s, \"j\")\n else:\n s = Xrange_array._to_char(self, **options)\n return s\n\n @staticmethod\n def _to_char(arr, im=False, im_p_char = '\\u2795',\n im_m_char = '\\u2796', **options):\n \"\"\"\n Parameters:\n m2 base 2 real mantissa\n exp2 : base 2 exponent\n\n Return\n str_arr string array of representations in base 10.\n\n Note: precisions according to:\n np.get_printoptions(precision)\n \"\"\"\n def_opts = np.get_printoptions()\n precision = options.pop(\"precision\", def_opts[\"precision\"])\n nanstr = options.pop(\"nanstr\", def_opts[\"nanstr\"])\n infstr = options.pop(\"infstr\", def_opts[\"infstr\"])\n \n m2, exp2 = Xrange_array._normalize(arr._mantissa, arr._exp)\n m10, exp10 = Xrange_array._rebase_2to10(m2, exp2)\n\n if np.isscalar(m10): # scalar do not support item assignment\n if (np.abs(m10) < 1.0):\n m10 *= 10.\n exp10 -= 1\n exp10 = np.asarray(exp10, np.int32)\n _m10 = np.around(m10, decimals=precision)\n if (np.abs(_m10) >= 10.0):\n m10 *= 0.1\n exp10 += 1\n m10 = np.around(m10, decimals=precision)\n # Special case of 0.\n if (m2 == 0.):\n exp10 = 0\n # Special case of 0.\n if np.isnan(m2 == 0.):\n exp10 = 0\n else:\n m10_up = (np.abs(m10) < 1.0)\n m10[m10_up] *= 10.\n exp10[m10_up] -= 1\n exp10 = np.asarray(exp10, np.int32)\n _m10 = np.around(m10, decimals=precision)\n m10_down= (np.abs(_m10) >= 10.0)\n m10[m10_down] *= 0.1\n exp10[m10_down] += 1\n m10 = np.around(m10, decimals=precision)\n # Special case of 0.\n is_null = (m2 == 0.)\n exp10[is_null] = 0\n\n if im :\n p_char = im_p_char # '\\u2795' bold +\n m_char = im_m_char # '\\u2796' bold -\n else:\n p_char = \" \"\n m_char = \"-\"\n concat = np.core.defchararray.add\n exp_digits = int(np.log10(max([np.nanmax(np.abs(exp10)), 10.]))) + 1\n str_arr = np.where(m10 < 0., m_char, p_char)\n str_arr = concat(str_arr,\n np.char.ljust(np.abs(m10).astype(\"|U\" + \n str(precision + 2)),\n precision + 2, \"0\"))\n str_arr = concat(str_arr, \"e\")\n str_arr = concat(str_arr, np.where(exp10 < 0, \"-\", \"+\"))\n str_arr = concat(str_arr,\n np.char.rjust(np.abs(exp10).astype(\"|U10\"), exp_digits, \"0\"))\n\n # Handles nan and inf values\n np.putmask(str_arr, np.isnan(m2), nanstr)\n np.putmask(str_arr, np.isinf(m2), infstr)\n\n return str_arr\n\n @staticmethod\n def _rebase_2to10(m2, exp2):\n \"\"\"\n Parameters:\n m2 mantissa in base 2\n exp2 int32 exponent in base 2\n\n Returns:\n m10 mantissa in base 10\n exp10 int32 exponent in base 10\n\n Note : \n This is a high-precision version of:\n > r = math.log10(2)\n > exp10, mod = np.divmod(exp2 * r, 1.)\n > return m2 * 10.**mod, exp10\n\n In order to guarantee an accuracy > 15 digits (in reality, close to 16)\n for `mod` with the 9-digits highest int32 base 2 exponent (2**31 - 1)\n we use an overall precision of 96 bits for this divmod.\n \"\"\"\n # We will divide by hand in base 2**32 (chosen so that exp2 * ri does\n # not overflow an int64 with the largest exp2 == 2**31-1), ri < 2**32.\n # >>> import mpmath\n # >>> mpmath.mp.dps = 35\n # >>> mpmath.log(\"2.\") / mpmath.log(\"10.\") * mpmath.mpf(2**96)\n # mpf('23850053418134191015272426710.02243475524574')\n r_96 = 23850053418134191015272426710\n mm = [None] * 3\n for i in range(3):\n ri = (r_96 >> (32 * (2 - i))) & 0xffffffff\n mm[i] = exp2.astype(np.int64) * ri\n if i == 0: # extract the integer `mod` part\n di, mm[i] = np.divmod(mm[i], 0x100000000)\n d = di.astype(np.int64)\n m = (mm[0] + (mm[1] + mm[2] * 2.**-32) * 2.**-32) * 2**-32\n return m2 * 10.**m, d.astype(np.int32)\n\n\n def __setitem__(self, key, val):\n \"\"\" Can be given either a Xrange_array or a complex of float array-like\n (See 'supported types')\n \"\"\"\n if type(val) is Xrange_array:\n if val.is_complex and not(self.is_complex):\n raise ValueError(\"Cant cast complex values to real\")\n np.ndarray.__setitem__(self, key, val)\n else:\n val = np.asarray(val).view(Xrange_array)\n np.ndarray.__setitem__(self._mantissa, key, val._mantissa)\n np.ndarray.__setitem__(self._exp, key, val._exp)\n\n def __getitem__(self, key):\n \"\"\" For single item, return array of empty shape rather than a scalar,\n to allow pretty print and maintain assignment behaviour consistent.\n \"\"\"\n res = np.ndarray.__getitem__(self, key)\n if np.isscalar(res):\n res = np.asarray(res).view(Xrange_array)\n return res\n\n def __eq__(self, other):\n \"\"\" Ensure that `!=` is handled by Xrange_array instance. \"\"\"\n return np.equal(self, other)\n\n def __ne__(self, other):\n \"\"\" Ensure that `==` is handled by Xrange_array instance. \"\"\"\n return np.not_equal(self, other)\n\n\nclass _Xrange_array_format():\n \"\"\" Formatter class for Xrange_array printing. \"\"\"\n def __init__(self, **options):\n self.options = options\n @classmethod\n def from_data(cls, data, **options):\n return cls(**options)\n def __call__(self, x):\n return str(x._to_str_array(**self.options))\n\n\nclass Xrange_polynomial(np.lib.mixins.NDArrayOperatorsMixin):\n \"\"\"\n One-dimensionnal polynomial class featuring extended-range coefficients\n which provides:\n - the standard Python numerical methods ‘+’, ‘-‘, ‘*' \n - derivative\n - evaluation\n - pretty-print\n\n Parameters\n ----------\n coeffs: array_like - can be viewed as a Xrange_array\n Polynomial coefficients in order of increasing degree, i.e.,\n (1, 2, 3) give 1 + 2*x + 3*x**2.\n\n cutdeg : int, maximum degree coefficient. At instanciation but also for\n the subsequent operations, monomes of degree above cutdeg will be \n disregarded.\n \"\"\" \n # Unicode character mappings for \"pretty print\" of the polynomial\n _superscript_mapping = str.maketrans({\n \"0\": \"⁰\",\n \"1\": \"¹\",\n \"2\": \"²\",\n \"3\": \"³\",\n \"4\": \"⁴\",\n \"5\": \"⁵\",\n \"6\": \"⁶\",\n \"7\": \"⁷\",\n \"8\": \"⁸\",\n \"9\": \"⁹\"\n })\n\n def __init__(self, coeffs, cutdeg):\n if isinstance(coeffs, Xrange_array):\n self.coeffs = coeffs[0:cutdeg+1]\n else:\n self.coeffs = Xrange_array(np.asarray(coeffs)[0:cutdeg+1])#.view(Xrange_array)\n if self.coeffs.ndim != 1:\n raise ValueError(\"Only 1-d inputs for Xrange_polynomial\")\n self.cutdeg = cutdeg\n\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n casted_inputs = ()\n casted_cutdegs = ()\n\n for x in inputs:\n # Only support operations with instances of \n # Xrange_array._HANDLED_TYPES.\n if isinstance(x, Xrange_polynomial):\n casted_inputs += (x.coeffs,)\n casted_cutdegs += (x.cutdeg,)\n elif isinstance(x, Xrange_array):\n casted_inputs += (x.flatten(),)\n elif isinstance(x, np.ndarray):\n casted_inputs += (x.flatten().view(Xrange_array),)\n elif isinstance(x, numbers.Number):\n casted_inputs += (Xrange_array([x]),)\n elif isinstance(x, list):\n casted_inputs += (Xrange_array(x),)\n else:\n # Operation not supported (type not handled), return the\n # sentinel value NotImplemented\n return NotImplemented\n\n cutdeg = min(casted_cutdegs)\n if not all(item == cutdeg for item in casted_cutdegs):\n raise ValueError(\"Operation not supported, incompatible cutdegs {}\"\n .format(casted_cutdegs))\n\n out = kwargs.pop(\"out\", None)\n\n if method == \"__call__\":\n if ufunc in [np.add, np.subtract]:\n return self._add(ufunc, casted_inputs, cutdeg=cutdeg, out=out)\n elif ufunc is np.negative:\n return self._negative(casted_inputs, cutdeg=cutdeg, out=out)\n elif ufunc is np.multiply:\n return self._mul(casted_inputs, cutdeg=cutdeg, out=out)\n # Other ufunc not supported\n return NotImplemented\n\n @staticmethod\n def _add(ufunc, inputs, cutdeg, out=None):\n \"\"\" Add or Subtract 2 Xrange_polynomial \"\"\"\n op0, op1 = inputs\n res_len = min(max(op0.size, op1.size), cutdeg + 1)\n op0_len = min(op0.size, res_len)\n op1_len = min(op1.size, res_len)\n\n dtype=np.result_type(op0._mantissa, op1._mantissa)\n res = Xrange_array(np.zeros([res_len], dtype=dtype))\n\n res[:op0_len] += op0[:op0_len]\n if ufunc is np.add:\n res[:op1_len] += op1[:op1_len]\n elif ufunc is np.subtract: \n res[:op1_len] -= op1[:op1_len]\n return Xrange_polynomial(res, cutdeg=cutdeg)\n\n @staticmethod\n def _negative(inputs, cutdeg, out=None):\n \"\"\" Change sign of a Xrange_polynomial \"\"\"\n op0, = inputs\n return Xrange_polynomial(-op0, cutdeg=cutdeg)\n\n @staticmethod\n def _mul(inputs, cutdeg, out=None):\n \"\"\" Product of 2 Xrange_polynomial \"\"\"\n op0, op1 = inputs\n # This is a convolution, fix the window with the shortest poly op0,\n # swapping poly if needed. (We do not use fft but direct\n # calculation as number of terms stay low)\n if op0.size > op1.size:\n op0, op1 = op1, op0\n l0 = op0.size\n l1 = op1.size\n cutoff_res = min(l0 + l1 - 2, cutdeg) # the degree...\n op1 = np.pad(op1, (l0 - 1, cutoff_res - l1 + 1),\n mode='constant').view(Xrange_array)\n shift = np.arange(0, cutoff_res + 1)\n take1 = shift[:, np.newaxis] + np.arange(l0 - 1 , -1, -1)\n return Xrange_polynomial(np.sum(op0 * np.take(op1, take1), axis=1),\n cutdeg=cutdeg) # /!\\ and not cutoff_res\n\n def __call__(self, arg):\n \"\"\" Call self as a function.\n \"\"\"\n if not isinstance(arg, Xrange_array):\n arg = Xrange_array(np.asarray(arg))\n\n res_dtype = np.result_type(arg._mantissa, self.coeffs._mantissa)\n res = Xrange_array.empty(arg.shape, dtype=res_dtype)\n res.fill(self.coeffs[-1])\n\n for i in range(2, self.coeffs.size + 1):\n res = self.coeffs[-i] + res * arg\n return res\n \n def deriv(self, k=1.):\n l = self.coeffs.size\n coeffs = self.coeffs[1:] * np.arange(1, l)\n if k != 1.:\n mul = 1.\n for i in range(l-1):\n coeffs[i] *= mul\n mul *= k\n return Xrange_polynomial(coeffs, cutdeg=self.cutdeg)\n\n def taylor_shift(self, x0):#, quad_prec=False):\n \"\"\"\n Parameters\n ----------\n x0 : Xrange_array of shape (1,)\n\n Returns\n -------\n Q : Xrange_polynomial so that\n Q(X) = P(X + x0) \n\n Implementation\n Q(X) = P(X + x0) transformation is accomplished by the three simpler\n transformation:\n g(X) = p(x0 * X)\n f(X) = g(X + 1)\n q(X) = f(1./x0 * X)\n\n References\n [1] Joachim von zur Gathen, Jürgen Gerhard Fast Algorithms for Taylor\n Shifts and Certain Difference Equations.\n [2] Mary Shaw, J.F. Traub On the number of multiplications for the\n evaluation of a polynomial and some of its derivatives.\n \"\"\"\n if x0 == 0.:\n return Xrange_polynomial(self.coeffs, cutdeg=self.cutdeg)\n return self.scale_shift(x0)._taylor_shift_one().scale_shift(1. / x0)\n\n def _taylor_shift_one(self):\n \"\"\"\n private auxilliary function, shift by 1.0 : return Q so that\n Q(X) = P(X + 1.0) where P is self\n \"\"\"\n dtype = self.coeffs._mantissa.dtype\n pascalT = Xrange_array.zeros([self.coeffs.size], dtype)\n tmp = pascalT.copy()\n pascalT[0] = self.coeffs[-1]\n for i in range(2, self.coeffs.size + 1):\n # at each step P -> P + (ai + X P)\n tmp[1:] = pascalT[:-1]\n tmp[0] = self.coeffs[-i]\n pascalT += tmp\n return Xrange_polynomial(pascalT, cutdeg=self.cutdeg)\n\n def scale_shift(self, a):\n \"\"\"\n Parameters\n ----------\n a : Xrange_array of shape (1,)\n \n Returns\n -------\n Q : Xrange_polynomial so that :\n Q(X) = P(a * X) where P is 'self'\n \"\"\"\n dtype = self.coeffs._mantissa.dtype\n scaled = Xrange_array.ones([self.coeffs.size], dtype=dtype)\n scaled[1:] = a\n scaled = np.cumprod(scaled) * self.coeffs\n return Xrange_polynomial(scaled, cutdeg=self.cutdeg)\n\n def __repr__(self):\n return (\"Xrange_polynomial(cutdeg=\"+ str(self.cutdeg) +\",\\n\" +\n self.__str__() + \")\")\n\n def __str__(self):\n return self._to_str()\n\n def _to_str(self):\n \"\"\"\n Generate the full string representation of the polynomial, using\n `_monome_base_str` to generate each polynomial term.\n \"\"\"\n if self.coeffs.is_complex:\n str_coeffs = self.coeffs._to_str_array()\n else:\n str_coeffs = np.abs(self.coeffs)._to_str_array()\n linewidth = np.get_printoptions().get('linewidth', 75)\n if linewidth < 1:\n linewidth = 1\n if self.coeffs.real[0] >= 0.:\n out = f\"{str_coeffs[0][1:]}\"\n else:\n out = f\"-{str_coeffs[0][1:]}\"\n for i, coef in enumerate(str_coeffs[1:]):\n out += \" \"\n power = str(i + 1)\n # 1st Polynomial coefficient\n if (self.coeffs.is_complex) or self.coeffs.real[i + 1] >= 0.:\n next_term = f\"+ {coef}\"\n else:\n next_term = f\"- {coef}\"\n # Polynomial term\n next_term += self._monome_base_str(power, \"X\")\n # Length of the current line with next term added\n line_len = len(out.split('\\n')[-1]) + len(next_term)\n # If not the last term in the polynomial, it will be two \n # characters longer due to the +/- with the next term\n if i < len(self.coeffs[1:]) - 1:\n line_len += 2\n # Handle linebreaking\n if line_len >= linewidth:\n next_term = next_term.replace(\" \", \"\\n\", 1)\n next_term = next_term.replace(\" \", \" \")\n out += next_term\n return out\n\n @classmethod\n def _monome_base_str(cls, i, var_str):\n return f\"·{var_str}{i.translate(cls._superscript_mapping)}\"\n\n\nclass Xrange_SA(Xrange_polynomial):\n \"\"\"\n One-dimensionnal, extended-range serie approximation class based on\n Xrange_polynomial:\n - provides the same feature as Xrange_polynomial + control of a\n truncature error term\n - For the prupose of truncature error calculation, it is assumed that \n the domain of convergence is enclosed in the unit circle.\n\n Parameters\n ----------\n coeffs: see Xrange_polynomial\n cutdeg: see Xrange_polynomial (Monomes of degree above cutoff will be \n disregarded.)\n err : truncature error term, in X**(cutoff + 1). Default to 0.\n \"\"\" \n\n def __init__(self, coeffs, cutdeg, err=Xrange_array(0.)):\n self.err = err\n if not(isinstance(err, Xrange_array)):\n self.err = Xrange_array(err)\n super().__init__(coeffs, cutdeg)\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n casted_inputs = ()\n casted_cutdegs = ()\n casted_errs = ()\n\n for x in inputs:\n # Only support operations with instances of \n # Xrange_array._HANDLED_TYPES.\n if isinstance(x, Xrange_SA):\n casted_cutdegs += (x.cutdeg,)\n casted_inputs += (x.coeffs,)\n casted_errs += (x.err,)\n else:\n casted_errs += (0.,)\n if isinstance(x, Xrange_polynomial):\n casted_inputs += (x.coeffs,)\n casted_cutdegs += (x.cutdeg,)\n elif isinstance(x, Xrange_array):\n casted_inputs += (x.flatten(),)\n elif isinstance(x, np.ndarray):\n casted_inputs += (x.flatten().view(Xrange_array),)\n elif isinstance(x, numbers.Number):\n casted_inputs += (Xrange_array([x]),)\n elif isinstance(x, list):\n casted_inputs += (Xrange_array(x),)\n else:\n # Operation not supported (type not handled), return the\n # sentinel value NotImplemented\n return NotImplemented\n\n cutdeg = min(casted_cutdegs)\n if not all(item == cutdeg for item in casted_cutdegs):\n raise ValueError(\"Operation not supported, incompatible cutdegs {}\"\n .format(casted_cutdegs))\n\n out = kwargs.pop(\"out\", None)\n\n if method == \"__call__\":\n if ufunc in [np.add, np.subtract]:\n return self._add(ufunc, casted_inputs, casted_errs,\n cutdeg=cutdeg, out=out)\n elif ufunc is np.negative:\n return self._negative(casted_inputs, casted_errs,\n cutdeg=cutdeg, out=out)\n elif ufunc is np.multiply:\n return self._mul(casted_inputs, casted_errs,\n cutdeg=cutdeg, out=out)\n # Other ufunc not supported\n return NotImplemented\n\n @staticmethod\n def _add(ufunc, inputs, errs, cutdeg, out=None):\n \"\"\" Add or Subtract 2 Xrange_SA \"\"\"\n op0, op1 = inputs\n res_len = min(max(op0.size, op1.size), cutdeg + 1)\n op0_len = min(op0.size, res_len)\n op1_len = min(op1.size, res_len)\n\n dtype=np.result_type(op0._mantissa, op1._mantissa)\n res = Xrange_array(np.zeros([res_len], dtype=dtype))\n\n res[:op0_len] += op0[:op0_len]\n if ufunc is np.add:\n res[:op1_len] += op1[:op1_len]\n elif ufunc is np.subtract: \n res[:op1_len] -= op1[:op1_len]\n\n return Xrange_SA(res, cutdeg=cutdeg, err=sum(errs))\n\n @staticmethod\n def _negative(inputs, errs, cutdeg, out=None):\n \"\"\" Change sign of a Xrange_SA \"\"\"\n op0, = inputs\n err0, = errs\n return Xrange_SA(-op0, cutdeg=cutdeg, err=err0)\n\n @staticmethod\n def _mul(inputs, errs, cutdeg, out=None):\n \"\"\" Multiply 2 Xrange_SA \"\"\"\n op0, op1 = inputs\n # Almost same as Xrange_polynomial but need to take care of the \n # truncature error term\n if op0.size > op1.size:\n op0, op1 = op1, op0\n l0 = op0.size\n l1 = op1.size\n cutoff_res = min(l0 + l1 - 2, cutdeg) # the degree...\n op1 = np.pad(op1, (l0 - 1, l0 - 1),\n mode='constant').view(Xrange_array)\n shift = np.arange(0, cutoff_res + 1)\n take1 = shift[:, np.newaxis] + np.arange(l0 - 1 , -1, -1)\n op_res = np.sum(op0 * np.take(op1, take1), axis=1)\n\n err0, err1 = errs\n # We will use L2 norm to control truncature error term.\n # Heuristic based on random walk / magnitude of the sum of iud random\n # variables\n # Exact term is :\n # op_err0 = err0 * np.sum(np.abs(op1))\n # op_err1 = err1 * np.sum(np.abs(op0))\n op_err0 = err0 * np.sqrt(np.sum(op1.abs2()))\n op_err1 = err1 * np.sqrt(np.sum(op0.abs2()))\n\n if cutdeg < (l0 + l1 - 2):\n # Truncature error term - L2 norm\n shift_errT = np.arange(cutoff_res + 1, l0 + l1 - 1)\n take1 = shift_errT[:, np.newaxis] + np.arange(l0 - 1 , -1, -1)\n op_errT = np.sum(op0 * np.take(op1, take1), axis=1)\n # We will use L2 norm to control truncature error term.\n # Exact term is :\n # op_errT = np.sum(np.abs(op_errT))\n op_errT = np.sqrt(np.sum(op_errT.abs2()))\n err = op_err0 + op_err1 + op_errT + err0 * err1\n else:\n err = op_err0 + op_err1 + err0 * err1\n\n return Xrange_SA(op_res, cutdeg=cutdeg, err=err)\n\n\n def __repr__(self):\n return (\"Xrange_SA(cutdeg=\"+ str(self.cutdeg) +\",\\n\" +\n self.__str__() + \")\")\n\n def __str__(self):\n return self._to_str()\n\n def _to_str(self):\n \"\"\"\n Generate the full string representation of the SA, using\n `_monome_base_str` to generate each polynomial term.\n \"\"\"\n out = super()._to_str()\n out += \" // Res <= {}\".format(self.err.__str__()\n ) + self._monome_base_str(str(self.cutdeg + 1), \"X\")\n return out\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.ones" ], [ "numpy.can_cast", "numpy.sqrt", "numpy.get_printoptions", "numpy.take", "numpy.asarray", "numpy.around", "numpy.squeeze", "numpy.dtype", "numpy.broadcast", "numpy.where", "numpy.square", "numpy.frexp", "numpy.pad", "numpy.clip", "numpy.arange", "numpy.empty_like", "numpy.add.reduce", "numpy.ndarray.__setitem__", "numpy.reciprocal", "numpy.array2string", "numpy.zeros", "numpy.log", "numpy.core.defchararray.add", "numpy.isnan", "numpy.ndarray.__getitem__", "numpy.cumprod", "numpy.equal", "numpy.broadcast_arrays", "numpy.divmod", "numpy.not_equal", "numpy.maximum.reduce", "numpy.maximum", "numpy.conj", "numpy.abs", "numpy.int32", "numpy.result_type", "numpy.ldexp", "numpy.vectorize", "numpy.broadcast_to", "numpy.isscalar", "numpy.add", "numpy.angle", "numpy.isinf", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pwambach/cate
[ "956eff12530e4a339f56d6d3739bc41328df4f75" ]
[ "cate/util/im/utils.py" ]
[ "# The MIT License (MIT)\n# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n__author__ = \"Norman Fomferra (Brockmann Consult GmbH)\"\n\nimport numpy as np\n\n\ndef aggregate_ndarray_first(a1, a2, a3, a4):\n return a1\n\n\ndef aggregate_ndarray_min(a1, a2, a3, a4):\n a = np.fmin(a1, a2)\n a = np.fmin(a, a3, out=a)\n a = np.fmin(a, a4, out=a)\n return a\n\n\ndef aggregate_ndarray_max(a1, a2, a3, a4):\n a = np.fmax(a1, a2)\n a = np.fmax(a, a3, out=a)\n a = np.fmax(a, a4, out=a)\n return a\n\n\ndef aggregate_ndarray_sum(a1, a2, a3, a4):\n return a1 + a2 + a3 + a4\n\n\ndef aggregate_ndarray_mean(a1, a2, a3, a4):\n return (a1 + a2 + a3 + a4) / 4.\n\n\ndef downsample_ndarray(a, aggregator=aggregate_ndarray_mean):\n if aggregator is aggregate_ndarray_first:\n # Optimization\n return a[..., 0::2, 0::2]\n else:\n a1 = a[..., 0::2, 0::2]\n a2 = a[..., 0::2, 1::2]\n a3 = a[..., 1::2, 0::2]\n a4 = a[..., 1::2, 1::2]\n return aggregator(a1, a2, a3, a4)\n\n\ndef get_chunk_size(array):\n chunk_size = None\n try:\n # xarray DataArray with dask, returns the size of each individual tile\n chunk_size = array.chunks\n if chunk_size:\n chunk_size = tuple([c[0] if isinstance(c, tuple) else c for c in chunk_size])\n except Exception:\n pass\n if not chunk_size:\n try:\n # netcdf 4\n chunk_size = array.encoding['chunksizes']\n except Exception:\n pass\n return chunk_size\n" ]
[ [ "numpy.fmax", "numpy.fmin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AdrienCorenflos/JSL
[ "8a3ba27179a2bd90207214fccb81df884b05c3d0" ]
[ "jsl/experimental/seql/experiments/poly_logreg_sgd_demo.py" ]
[ "import jax.numpy as jnp\nfrom jax import random, nn\nfrom sklearn.preprocessing import PolynomialFeatures\n\nfrom jsl.experimental.seql.agents.sgd_agent import sgd_agent\nfrom jsl.experimental.seql.environments.base import make_random_poly_classification_environment\nfrom jsl.experimental.seql.experiments.plotting import plot_classification_predictions\nfrom jsl.experimental.seql.utils import binary_cross_entropy, train\n\ndef callback_fn(agent, env, model_fn, obs_noise, degree, **kwargs):\n belief = kwargs[\"belief_state\"]\n mu, sigma = belief.params, None\n\n logprobs, _ = kwargs[\"preds\"]\n y_test = jnp.squeeze(kwargs[\"Y_test\"])\n predictions = jnp.where(logprobs > jnp.log(0.5), 1, 0)\n print(\"Accuracy: \", jnp.mean(jnp.squeeze(predictions)==y_test))\n\n poly = PolynomialFeatures(degree)\n grid = poly.fit_transform(jnp.mgrid[-3:3:100j, -3:3:100j].reshape((2, -1)).T)\n grid_preds = agent.predict(belief, grid)\n\n filename = \"poly_logreg_sgd_ppd\"\n timesteps = [5, 10, 15, 75]\n plot_classification_predictions(env,\n mu,\n sigma,\n obs_noise,\n timesteps,\n grid,\n grid_preds,\n filename,\n model_fn=model_fn,\n **kwargs)\n\ndef model_fn(params, x):\n return nn.log_sigmoid(x @ params)\n\ndef loss_fn(params, x, y, model_fn):\n logprobs = model_fn(params, x)\n return binary_cross_entropy(y, logprobs)\n\ndef main():\n key = random.PRNGKey(0)\n degree = 3\n ntrain = 200 # 80% of the data\n ntest = 50 # 20% of the data\n nfeatures, nclasses = 2, 2\n env = make_random_poly_classification_environment(key,\n degree,\n ntrain,\n ntest,\n nfeatures=nfeatures,\n nclasses=nclasses)\n \n\n \n obs_noise = 0.01\n buffer_size = 1\n input_dim = env.X_train.shape[-1]\n agent = sgd_agent(loss_fn,\n model_fn,\n obs_noise=obs_noise,\n buffer_size=buffer_size)\n\n params = jnp.zeros((input_dim, 1))\n belief = agent.init_state(params)\n\n nsteps = 100\n partial_callback = lambda **kwargs: callback_fn(agent,\n env,\n model_fn,\n obs_noise,\n degree,\n **kwargs)\n\n _, unused_rewards = train(belief,\n agent,\n env,\n nsteps=nsteps,\n callback=partial_callback)\n \n\n \n\nif __name__ == \"__main__\":\n main()" ]
[ [ "sklearn.preprocessing.PolynomialFeatures" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KazutakaYamanouchi/bachelor-study
[ "a5b8392459e7649cb8a35d09e65bd269d13b5297", "a5b8392459e7649cb8a35d09e65bd269d13b5297" ]
[ "utils/dwt.py", "utils/dct.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass LossyYCbCr(nn.Module):\n def forward(self, rgb: torch.Tensor):\n return torch.cat([\n # Y (B, 1, H, W)\n 0.299 * rgb[:, 0:1]\n + 0.587 * rgb[:, 1:2]\n + 0.114 * rgb[:, 2:3],\n # Cb (B, 1, H, W)\n -0.16875 * rgb[:, 0:1]\n - 0.33126 * rgb[:, 1:2]\n + 0.5 * rgb[:, 2:3],\n # Cr (B, 1, H, W)\n 0.5 * rgb[:, 0:1]\n - 0.41869 * rgb[:, 1:2]\n - 0.08131 * rgb[:, 2:3]\n ], dim=1)\n\n\nclass LossyRGB(nn.Module):\n def forward(self, ycbcr: torch.Tensor):\n return torch.cat([\n # R\n ycbcr[:, 0:1]\n + 1.402 * ycbcr[:, 2:3],\n # G\n ycbcr[:, 0:1]\n - 0.34413 * ycbcr[:, 1:2]\n - 0.71414 * ycbcr[:, 2:3],\n # B\n ycbcr[:, 0:1]\n + 1.772 * ycbcr[:, 1:2]\n ], dim=1)\n\n\n# RCT-lossless\nclass LosslessYCbCr(nn.Module):\n def forward(self, rgb: torch.Tensor):\n return torch.cat([\n # Y\n (rgb[:, 0:1] + 2 * rgb[:, 1:2] + rgb[:, 2:3]) / 4,\n # Cb\n rgb[:, 2:3] - rgb[:, 1:2],\n # Cr\n rgb[:, 0:1] - rgb[:, 1:2]\n ], dim=1)\n\n\nclass LosslessRGB(nn.Module):\n def forward(self, ycbcr: torch.Tensor):\n return torch.cat([\n # Y\n ycbcr[:, 2:3]\n + ycbcr[:, 0:1]\n - 0.25 * ycbcr[:, 1:2]\n - 0.25 * ycbcr[:, 2:3],\n # Cb\n ycbcr[:, 0:1]\n - 0.25 * ycbcr[:, 1:2]\n - 0.25 * ycbcr[:, 2:3],\n # Cr\n ycbcr[:, 1:2]\n + ycbcr[:, 0:1]\n - 0.25 * ycbcr[:, 1:2]\n - 0.25 * ycbcr[:, 2:3]\n ], dim=1)\n\n\nclass DWT(nn.Module):\n def __init__(\n self, lossy: bool = True\n ):\n super().__init__()\n if lossy:\n dec_lo = [\n 0.02674875741080976,\n -0.01686411844287495,\n -0.07822326652898785,\n 0.2668641184428723,\n 0.6029490182363579,\n 0.2668641184428723,\n -0.07822326652898785,\n -0.01686411844287495,\n 0.02674875741080976\n ]\n self.to_ycbcr = LossyYCbCr()\n self.to_rgb = LossyRGB()\n print('Lossyモードが選択されました。')\n else:\n dec_lo = [\n -0.125,\n 0.25,\n 0.75,\n 0.25,\n -0.125\n ]\n self.to_ycbcr = LosslessYCbCr()\n self.to_rgb = LosslessRGB()\n print('Losslessモードが選択されました。')\n\n self.dwt_vertical = nn.Conv2d(\n 3, 3, (len(dec_lo), 1), padding=(len(dec_lo) // 2, 0),\n bias=False, padding_mode='reflect')\n self.dwt_horizontal = nn.Conv2d(\n 3, 3, (1, len(dec_lo)), padding=(0, len(dec_lo) // 2),\n bias=False, padding_mode='reflect')\n self.dwt_vertical.weight.requires_grad = False\n self.dwt_horizontal.weight.requires_grad = False\n self.dwt_vertical.weight.fill_(0) # 0埋め\n self.dwt_horizontal.weight.fill_(0) # 0埋め\n\n for c in range(3):\n for i in range(len(dec_lo)):\n # .weight: (In, Out, Vertical K Size, Horizontal K Size)\n self.dwt_vertical.weight[c, c, i, 0] = dec_lo[i]\n self.dwt_horizontal.weight[c, c, 0, i] = dec_lo[i]\n\n def forward(self, image: torch.Tensor, k: int = 1) -> torch.Tensor:\n '''\n Args:\n image: 画素値0.0-1.0の画像バッチ\n '''\n # DCレベルシフト\n # shifted = (image + 1) / 2\n # YCbCr変換(DCレベルシフト済み)\n # ll = self.to_ycbcr(shifted)\n ll = self.to_ycbcr(image)\n # DWT k回\n for i in range(k):\n ll = self.dwt_vertical(self.dwt_horizontal(ll))\n # RGB変換(DCレベルシフト済み)\n rgb_shifted = self.to_rgb(ll)\n # 逆DCレベルシフト\n # rgb = rgb_shifted * 2 - 1\n return rgb_shifted\n", "import numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass BWDCT(nn.Module):\n def __init__(self):\n super().__init__()\n self.bwdct = nn.Conv2d(1, 64, 8, 8, bias=False)\n self.bwdct.weight.requires_grad = False\n for m in range(8):\n for n in range(8):\n for p in range(8):\n for q in range(8):\n self.bwdct.weight[p * 8 + q, 0, m, n] \\\n = np.cos(np.pi * (2 * m + 1) * p / 16) \\\n * np.cos(np.pi * (2 * n + 1) * q / 16) \\\n * (np.sqrt(1 / 8) if p == 0 else (1 / 2)) \\\n * (np.sqrt(1 / 8) if q == 0 else (1 / 2))\n\n def forward(self, x):\n return self.bwdct(x)\n\n\nclass JPEGEncoder(nn.Module):\n def __init__(\n self, chroma_subsampling=True\n ):\n super().__init__()\n self.bwdct = BWDCT()\n self.chroma_subsampling = chroma_subsampling\n if self.chroma_subsampling:\n self.cs = nn.AvgPool2d(2)\n\n def forward(self, image):\n '''JPEGのDCT係数から正規化済みRGB画素へ変換する。\n Args:\n y_coefs: YのDCT係数\n cr_coefs: CrのDCT係数\n cb_coefs: CbのDCT係数\n Note:\n 入力は(B, 64, H, W)の形状で入力する。\n 各成分はジグザグスキャンせずそのまま平坦化した状態である。\n '''\n if image.size(1) == 1:\n return self.bwdct(image), None\n else:\n # Y\n y = 0.299 * image[:, 0:1] \\\n + 0.587 * image[:, 1:2] \\\n + 0.114 * image[:, 2:3]\n cbcr = torch.cat([\n # Cb\n -0.1687 * image[:, 0:1]\n - 0.3313 * image[:, 1:2]\n + 0.5 * image[:, 2:3]\n + 0.5,\n # Cr\n 0.5 * image[:, 0:1]\n - 0.4187 * image[:, 1:2]\n - 0.0813 * image[:, 2:3]\n + 0.5\n ], dim=1)\n\n if self.chroma_subsampling:\n cbcr = self.cs(cbcr)\n y_blocks = self.bwdct(y)\n cbcr_blocks = torch.cat([\n self.bwdct(cbcr[:, 0:1]),\n self.bwdct(cbcr[:, 1:2])\n ], dim=1)\n return y_blocks, cbcr_blocks\n\n\nclass IBWDCT(nn.Module):\n def __init__(self,):\n super().__init__()\n self.ibwdct = nn.ConvTranspose2d(64, 1, 8, 8, bias=False)\n self.ibwdct.weight.requires_grad = False\n for m in range(8):\n for n in range(8):\n for p in range(8):\n for q in range(8):\n self.ibwdct.weight[p * 8 + q, 0, m, n] \\\n = np.cos(np.pi * (2 * m + 1) * p / 16) \\\n * np.cos(np.pi * (2 * n + 1) * q / 16) \\\n * (np.sqrt(1 / 8) if p == 0 else (1 / 2)) \\\n * (np.sqrt(1 / 8) if q == 0 else (1 / 2))\n\n def forward(self, x):\n return self.ibwdct(x)\n\n\nclass JPEGDecoder(nn.Module):\n def __init__(self):\n super().__init__()\n self.ibwdct = IBWDCT()\n self.ics = nn.UpsamplingBilinear2d(scale_factor=2)\n\n def forward(self, y_cbcr):\n '''JPEGのDCT係数から正規化済みRGB画素へ変換する。\n\n Args:\n y_cbcr: DCT係数(Y, CbCr)のタプル ((B, 64, H, W), (B, 128, H, W))\n Note:\n 各成分はジグザグスキャンせずそのまま平坦化した状態である。\n '''\n y = self.ibwdct(y_cbcr[0])\n if y_cbcr[1] is not None:\n cbcr = torch.cat([\n self.ibwdct(y_cbcr[1][:, :64]),\n self.ibwdct(y_cbcr[1][:, 64:])\n ], dim=1)\n if cbcr.size()[2:] != y.size()[2:]:\n cbcr = self.ics(cbcr)\n return torch.cat([\n # R\n y\n + 1.402 * (cbcr[:, 1:2] - 0.5),\n # G\n y\n - 0.344136286 * (cbcr[:, 0:1] - 0.5)\n - 0.714136286 * (cbcr[:, 1:2] - 0.5),\n # B\n y\n + 1.772 * (cbcr[:, 0:1] - 0.5)\n ], dim=1)\n else:\n return y\n\n\nclass DCT(nn.Module):\n def __init__(self, k: int = 1):\n super().__init__()\n self.encoder = JPEGEncoder()\n self.decoder = JPEGDecoder()\n lpf = torch.zeros(8, 8, device='cuda')\n lpf_list = [8, 6, 5, 4, 3, 2]\n lpf_ratio = lpf_list[k]\n lpf[:lpf_ratio, :lpf_ratio] = 1\n lpf_shift = torch.flatten(lpf)\n self.lpf_shifted = lpf_shift.reshape(64, 1, 1)\n\n def forward(self, images: torch.Tensor, k: int = 1) -> torch.Tensor:\n # クロマサブサンプリングでYとCbCrの解像度が異なるので別で受け取る\n image_shifted = (images + 1) / 2\n coefs_y, coefs_cbcr = self.encoder(image_shifted)\n coefs_cb, coefs_cr = coefs_cbcr[:, :64], coefs_cbcr[:, 64:]\n adaptted_cb = coefs_cb * self.lpf_shifted\n adaptted_cr = coefs_cr * self.lpf_shifted\n adaptted_y = coefs_y * self.lpf_shifted\n\n cbcr = torch.cat([\n adaptted_cb,\n adaptted_cr\n ], dim=1)\n\n rgb_shifted = self.decoder((adaptted_y, cbcr))\n rgb = rgb_shifted * 2 - 1\n return rgb\n" ]
[ [ "torch.cat" ], [ "numpy.sqrt", "torch.nn.ConvTranspose2d", "torch.zeros", "torch.cat", "torch.nn.Conv2d", "numpy.cos", "torch.nn.UpsamplingBilinear2d", "torch.nn.AvgPool2d", "torch.flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tjdcs/colour
[ "09413da71b5da57408eb812797c5db1300d4791a" ]
[ "colour/models/rgb/datasets/itur_bt_2020.py" ]
[ "\"\"\"\nITU-R BT.2020 Colourspace\n=========================\n\nDefines the *ITU-R BT.2020* colourspace:\n\n- :attr:`colour.models.RGB_COLOURSPACE_BT2020`.\n\nReferences\n----------\n- :cite:`InternationalTelecommunicationUnion2015h` : International\n Telecommunication Union. (2015). Recommendation ITU-R BT.2020 - Parameter\n values for ultra-high definition television systems for production and\n international programme exchange (pp. 1-8).\n https://www.itu.int/dms_pubrec/itu-r/rec/bt/\\\nR-REC-BT.2020-2-201510-I!!PDF-E.pdf\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\n\nfrom colour.colorimetry import CCS_ILLUMINANTS\nfrom colour.hints import NDArray\nfrom colour.models.rgb import (\n RGB_Colourspace,\n normalised_primary_matrix,\n oetf_BT2020,\n oetf_inverse_BT2020,\n)\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"PRIMARIES_BT2020\",\n \"WHITEPOINT_NAME_BT2020\",\n \"CCS_WHITEPOINT_BT2020\",\n \"MATRIX_BT2020_TO_XYZ\",\n \"MATRIX_XYZ_TO_BT2020\",\n \"RGB_COLOURSPACE_BT2020\",\n]\n\nPRIMARIES_BT2020: NDArray = np.array(\n [\n [0.7080, 0.2920],\n [0.1700, 0.7970],\n [0.1310, 0.0460],\n ]\n)\n\"\"\"*ITU-R BT.2020* colourspace primaries.\"\"\"\n\nWHITEPOINT_NAME_BT2020: str = \"D65\"\n\"\"\"*ITU-R BT.2020* colourspace whitepoint name.\"\"\"\n\nCCS_WHITEPOINT_BT2020: NDArray = CCS_ILLUMINANTS[\n \"CIE 1931 2 Degree Standard Observer\"\n][WHITEPOINT_NAME_BT2020]\n\"\"\"*ITU-R BT.2020* colourspace whitepoint chromaticity coordinates.\"\"\"\n\nMATRIX_BT2020_TO_XYZ: NDArray = normalised_primary_matrix(\n PRIMARIES_BT2020, CCS_WHITEPOINT_BT2020\n)\n\"\"\"*ITU-R BT.2020* colourspace to *CIE XYZ* tristimulus values matrix.\"\"\"\n\nMATRIX_XYZ_TO_BT2020: NDArray = np.linalg.inv(MATRIX_BT2020_TO_XYZ)\n\"\"\"*CIE XYZ* tristimulus values to *ITU-R BT.2020* colourspace matrix.\"\"\"\n\nRGB_COLOURSPACE_BT2020: RGB_Colourspace = RGB_Colourspace(\n \"ITU-R BT.2020\",\n PRIMARIES_BT2020,\n CCS_WHITEPOINT_BT2020,\n WHITEPOINT_NAME_BT2020,\n MATRIX_BT2020_TO_XYZ,\n MATRIX_XYZ_TO_BT2020,\n oetf_BT2020,\n oetf_inverse_BT2020,\n)\nRGB_COLOURSPACE_BT2020.__doc__ = \"\"\"\n*ITU-R BT.2020* colourspace.\n\nThe wavelength of the *ITU-R BT.2020* primary colours are:\n\n- 630nm for the red primary colour\n- 532nm for the green primary colour\n- 467nm for the blue primary colour.\n\nReferences\n----------\n:cite:`InternationalTelecommunicationUnion2015h`\n\"\"\"\n" ]
[ [ "numpy.linalg.inv", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Unmesh-Kumar/DMRM
[ "f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02" ]
[ "Decoders/decoder1_attention.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom classifier import SimpleClassifier\n\nclass _netG(nn.Module):\n def __init__(self, args):\n super(_netG, self).__init__()\n\n self.ninp = args.ninp\n self.nhid = args.nhid\n self.nlayers = args.nlayers\n self.dropout = args.dropout\n self.rnn = getattr(nn, 'LSTM')(self.ninp, self.nhid, self.nlayers, bidirectional=False, dropout=self.dropout, batch_first=True)\n self.rnn_type = 'LSTM'\n\n self.decoder =SimpleClassifier(self.nhid*2, self.nhid*4, args.vocab_size, self.dropout)\n self.d = args.dropout\n self.beta = 3\n self.vocab_size = args.vocab_size\n # self.init_weights()\n self.w_q = nn.Linear(self.nhid*2, self.nhid)\n self.ans_q = nn.Linear(self.nhid, self.nhid)\n self.Wa_q = nn.Linear(self.nhid, 1)\n\n self.w_h = nn.Linear(self.nhid*2, self.nhid)\n self.ans_h = nn.Linear(self.nhid, self.nhid)\n self.Wa_h = nn.Linear(self.nhid, 1)\n\n self.w_i = nn.Linear(self.nhid*2, self.nhid)\n self.ans_i = nn.Linear(self.nhid, self.nhid)\n self.Wa_i = nn.Linear(self.nhid, 1)\n\n self.concat = nn.Linear(self.nhid*3, self.nhid)\n # self.fusion = nn.Linear(self.nhid*2, self.nhid*2)\n\n def init_weights(self):\n self.decoder.weight = nn.init.xavier_uniform(self.decoder.weight)\n self.decoder.bias.data.fill_(0)\n\n def forward(self, emb, question, history, image, hidden):\n ques_length = question.size(1)\n his_length = history.size(1)\n img_length = image.size(1)\n batch_size, ans_length, _ = emb.size()\n question = question.contiguous()\n seqLogprobs = []\n for index in range(ans_length):\n input_ans = emb[:, index, :].unsqueeze(1)\n output, hidden = self.rnn(input_ans, hidden)\n input_ans = output.squeeze(1)\n ques_emb = self.w_q(question.view(-1, 2*self.nhid)).view(-1, ques_length, self.nhid)\n input_ans_q = self.ans_q(input_ans).view(-1, 1, self.nhid)\n atten_emb_q = F.tanh(ques_emb + input_ans_q.expand_as(ques_emb))\n ques_atten_weight = F.softmax(self.Wa_q(F.dropout(atten_emb_q, self.d, training=self.training).view(-1, self.nhid)).view(-1, ques_length), 1)\n ques_attn_feat = torch.bmm(ques_atten_weight.view(-1, 1, ques_length), ques_emb.view(-1,ques_length, self.nhid))\n \n input_ans_h = self.ans_h(input_ans).view(-1, 1, self.nhid)\n his_emb = self.w_h(history.view(-1, 2* self.nhid)).view(-1, his_length, self.nhid)\n atten_emb_h = F.tanh(his_emb + input_ans_h.expand_as(his_emb))\n his_atten_weight = F.softmax(self.Wa_h(F.dropout(atten_emb_h, self.d, training=self.training).view(-1, self.nhid)).view(-1, his_length), 1)\n his_attn_feat = torch.bmm(his_atten_weight.view(-1, 1, his_length), his_emb.view(-1, his_length, self.nhid))\n \n input_ans_i = self.ans_i(input_ans).view(-1, 1, self.nhid)\n img_emb = self.w_i(image.view(-1, 2* self.nhid)).view(-1, img_length, self.nhid)\n atten_emb_i = F.tanh(img_emb + input_ans_i.expand_as(img_emb))\n img_atten_weight = F.softmax(self.Wa_i(F.dropout(atten_emb_i, self.d, training=self.training).view(-1, self.nhid)).view(-1, img_length), 1)\n img_attn_feat = torch.bmm(img_atten_weight.view(-1, 1, img_length), img_emb.view(-1, img_length, self.nhid))\n \n concat_feat = torch.cat((ques_attn_feat.view(-1, self.nhid), his_attn_feat.view(-1, self.nhid), img_attn_feat.view(-1, self.nhid)),1)\n concat_feat = F.tanh(self.concat(F.dropout(concat_feat, self.d, training=self.training)))\n fusion_feat = torch.cat((output.squeeze(1), concat_feat),1)\n\n fusion_feat = F.dropout(fusion_feat, self.d, training=self.training)\n decoded = self.decoder(fusion_feat.view(-1, 2*self.nhid))\n logprob = F.log_softmax(self.beta * decoded, 1)\n seqLogprobs.append(logprob)\n\n return torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1).contiguous(), hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n if self.rnn_type == 'LSTM':\n return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))\n else:\n return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())\n\n def sample_beam(self, netW, input, hidden_state, opt={}):\n beam_size = opt.get('beam_size', 10)\n batch_size = input.size(1)\n\n # assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'\n seq_all = torch.LongTensor(self.seq_length, batch_size, beam_size).zero_()\n seq = torch.LongTensor(self.seq_length, batch_size).zero_()\n seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)\n # lets process every image independently for now, for simplicity\n\n self.done_beams = [[] for _ in range(batch_size)]\n for k in range(batch_size):\n # copy the hidden state for beam_size time.\n state = []\n for state_tmp in hidden_state:\n state.append(state_tmp[:, k, :].view(1, 1, -1).expand(1, beam_size, self.nhid).clone())\n\n state = tuple(state)\n\n beam_seq = torch.LongTensor(self.seq_length, beam_size).zero_()\n beam_seq_logprobs = torch.FloatTensor(self.seq_length, beam_size).zero_()\n beam_logprobs_sum = torch.zeros(beam_size) # running sum of logprobs for each beam\n for t in range(self.seq_length + 1):\n if t == 0: # input <bos>\n it = input.data.resize_(1, beam_size).fill_(self.vocab_size)\n xt = netW(Variable(it, requires_grad=False))\n else:\n \"\"\"perform a beam merge. that is,\n for every previous beam we now many new possibilities to branch out\n we need to resort our beams to maintain the loop invariant of keeping\n the top beam_size most likely sequences.\"\"\"\n logprobsf = logprobs.float() # lets go to CPU for more efficiency in indexing operations\n ys, ix = torch.sort(logprobsf, 1,\n True) # sorted array of logprobs along each previous beam (last true = descending)\n candidates = []\n cols = min(beam_size, ys.size(1))\n rows = beam_size\n if t == 1: # at first time step only the first beam is active\n rows = 1\n for cc in range(cols): # for each column (word, essentially)\n for qq in range(rows): # for each beam expansion\n # compute logprob of expanding beam q with word in (sorted) position c\n local_logprob = ys[qq, cc]\n if beam_seq[t - 2, qq] == self.vocab_size:\n local_logprob.data.fill_(-9999)\n\n candidate_logprob = beam_logprobs_sum[qq] + local_logprob\n candidates.append({'c': ix.data[qq, cc], 'q': qq, 'p': candidate_logprob.data[0],\n 'r': local_logprob.data[0]})\n\n candidates = sorted(candidates, key=lambda x: -x['p'])\n\n # construct new beams\n new_state = [_.clone() for _ in state]\n if t > 1:\n # well need these as reference when we fork beams around\n beam_seq_prev = beam_seq[:t - 1].clone()\n beam_seq_logprobs_prev = beam_seq_logprobs[:t - 1].clone()\n for vix in range(beam_size):\n v = candidates[vix]\n # fork beam index q into index vix\n if t > 1:\n beam_seq[:t - 1, vix] = beam_seq_prev[:, v['q']]\n beam_seq_logprobs[:t - 1, vix] = beam_seq_logprobs_prev[:, v['q']]\n\n # rearrange recurrent states\n for state_ix in range(len(new_state)):\n # copy over state in previous beam q to new beam at vix\n new_state[state_ix][0, vix] = state[state_ix][0, v['q']] # dimension one is time step\n\n # append new end terminal at the end of this beam\n beam_seq[t - 1, vix] = v['c'] # c'th word is the continuation\n beam_seq_logprobs[t - 1, vix] = v['r'] # the raw logprob here\n beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam\n\n if v['c'] == self.vocab_size or t == self.seq_length:\n # END token special case here, or we reached the end.\n # add the beam to a set of done beams\n self.done_beams[k].append({'seq': beam_seq[:, vix].clone(),\n 'logps': beam_seq_logprobs[:, vix].clone(),\n 'p': beam_logprobs_sum[vix]\n })\n\n # encode as vectors\n it = beam_seq[t - 1].view(1, -1)\n xt = netW(Variable(it.cuda()))\n\n if t >= 1:\n state = new_state\n\n output, state = self.rnn(xt, state)\n\n output = F.dropout(output, self.d, training=self.training)\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n logprobs = F.log_softmax(self.beta * decoded)\n\n self.done_beams[k] = sorted(self.done_beams[k], key=lambda x: -x['p'])\n seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score\n seqLogprobs[:, k] = self.done_beams[k][0]['logps']\n for ii in range(beam_size):\n seq_all[:, k, ii] = self.done_beams[k][ii]['seq']\n\n # return the samples and their log likelihoods\n return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)\n\n def sample(self, netW, input, state, opt={}):\n sample_max = opt.get('sample_max', 1)\n beam_size = opt.get('beam_size', 1)\n temperature = opt.get('temperature', 1.0)\n seq_length = opt.get('seq_length', 9)\n self.seq_length = seq_length\n\n if beam_size > 1:\n return self.sample_beam(netW, input, state, opt)\n\n batch_size = input.size(1)\n seq = []\n seqLogprobs = []\n for t in range(self.seq_length + 1):\n if t == 0: # input <bos>\n it = input.data\n elif sample_max:\n sampleLogprobs, it = torch.max(logprobs.data, 1)\n it = it.view(-1).long()\n else:\n if temperature == 1.0:\n prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)\n else:\n # scale logprobs by temperature\n prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()\n it = torch.multinomial(prob_prev, 1).cuda()\n sampleLogprobs = logprobs.gather(1, Variable(it,\n requires_grad=False)) # gather the logprobs at sampled positions\n it = it.view(-1).long() # and flatten indices for downstream processing\n\n xt = netW(Variable(it.view(-1, 1), requires_grad=False))\n\n if t >= 1:\n seq.append(it) # seq[t] the input of t+2 time step\n seqLogprobs.append(sampleLogprobs.view(-1))\n it = torch.unsqueeze(it, 0)\n\n output, state = self.rnn(xt, state)\n output = F.dropout(output, self.d, training=self.training)\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n logprobs = F.log_softmax(self.beta * decoded, 1)\n\n return torch.cat([_.unsqueeze(1) for _ in seq], 1), torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)\n\n\n\n\n\n\n\n" ]
[ [ "torch.div", "torch.LongTensor", "torch.max", "torch.nn.functional.log_softmax", "torch.nn.functional.dropout", "torch.zeros", "torch.unsqueeze", "torch.multinomial", "torch.exp", "torch.nn.Linear", "torch.FloatTensor", "torch.sort", "torch.nn.init.xavier_uniform", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jrobrien91/ACT
[ "604b93d75366d23029f89d88df9053d52825c214", "604b93d75366d23029f89d88df9053d52825c214", "604b93d75366d23029f89d88df9053d52825c214", "604b93d75366d23029f89d88df9053d52825c214" ]
[ "examples/plot_daytime_averages.py", "examples/plot_raw_minimpl.py", "examples/plot_multiple_dataset.py", "examples/plot_rh_timeseries.py" ]
[ "\"\"\"\nCalculate and plot daily daytime temperature averages\n-----------------------------------------------------\n\nExample of how to read in MET data and plot up daytime\ntemperature averages using the add_solar_variable function\n\nAuthor: Adam Theisen\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nimport act\n\n# Read in the sample MET data\nobj = act.io.armfiles.read_netcdf(act.tests.EXAMPLE_MET_WILDCARD)\n\n# Add the solar variable, including dawn/dusk to variable\nobj = act.utils.geo_utils.add_solar_variable(obj)\n\n# Using the sun variable, only analyze daytime data\nobj = obj.where(obj['sun_variable'] == 1)\n\n# Take daily mean using xarray features\nobj = obj.resample(time='1d', skipna=True, keep_attrs=True).mean()\n\n# Creat Plot Display\ndisplay = act.plotting.TimeSeriesDisplay(obj, figsize=(15, 10))\ndisplay.plot('temp_mean', linestyle='solid')\ndisplay.day_night_background()\nplt.show()\n\nobj.close()\n", "\"\"\"\nRead and plot a PPI from raw mini-MPL data\n------------------------------------------\n\nExample of how to read in raw data from the mini-MPL\nand plot out the PPI by converting it to PyART\n\nAuthor: Adam Theisen\n\"\"\"\n\n\nfrom matplotlib import pyplot as plt\n\nimport act\n\ntry:\n import pyart\n\n PYART_AVAILABLE = True\nexcept ImportError:\n PYART_AVAILABLE = False\n\n# Read in sample mini-MPL data\nfiles = act.tests.sample_files.EXAMPLE_SIGMA_MPLV5\nobj = act.io.mpl.read_sigma_mplv5(files)\n\n# Create a PyART Radar Object\nradar = act.utils.create_pyart_obj(\n obj, azimuth='azimuth_angle', elevation='elevation_angle', range_var='range'\n)\n\n# Creat Plot Display\nif PYART_AVAILABLE:\n display = pyart.graph.RadarDisplay(radar)\n display.plot('nrb_copol', sweep=0, title_flag=False, vmin=0, vmax=1.0, cmap='jet')\n plt.show()\n", "\"\"\"\nPlot multiple datasets\n----------------------\n\nThis is an example of how to download and\nplot multiple datasets at a time.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport os\n\nimport act\n\n# Place your username and token here\nusername = os.getenv('ARM_USERNAME')\ntoken = os.getenv('ARM_PASSWORD')\n\n# Get data from the web service if username and token are available\n# if not, use test data\nif username is None or token is None or len(username) == 0 or len(token) == 0:\n ceil_ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_CEIL1)\n met_ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_MET1)\nelse:\n # Download and read data\n act.discovery.download_data(username, token, 'sgpceilC1.b1', '2019-01-01', '2019-01-07')\n ceil_ds = act.io.armfiles.read_netcdf('sgpceilC1.b1/sgpceilC1.b1.201901*.nc')\n met_ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_MET_WILDCARD)\n\n# Read in CEIL data and correct it\nceil_ds = act.corrections.ceil.correct_ceil(ceil_ds, -9999.0)\n\n\n# You can use tuples if the datasets in the tuple contain a\n# datastream attribute. This is required in all ARM datasets.\ndisplay = act.plotting.TimeSeriesDisplay((ceil_ds, met_ds), subplot_shape=(2,), figsize=(15, 10))\ndisplay.plot('backscatter', 'sgpceilC1.b1', subplot_index=(0,))\ndisplay.plot('temp_mean', 'sgpmetE13.b1', subplot_index=(1,))\ndisplay.day_night_background('sgpmetE13.b1', subplot_index=(1,))\nplt.show()\n\n# You can also use a dictionary so that you can customize\n# your datastream names to something that may be more useful.\ndisplay = act.plotting.TimeSeriesDisplay(\n {'ceiliometer': ceil_ds, 'met': met_ds}, subplot_shape=(2,), figsize=(15, 10)\n)\ndisplay.plot('backscatter', 'ceiliometer', subplot_index=(0,))\ndisplay.plot('temp_mean', 'met', subplot_index=(1,))\ndisplay.day_night_background('met', subplot_index=(1,))\nplt.show()\n\nceil_ds.close()\nmet_ds.close()\n", "\"\"\"\nPlot winds and relative humidity from sounding data\n---------------------------------------------------\n\nThis is an example of how to display wind rose and barb timeseries\nfrom multiple days worth of sounding data.\n\n\"\"\"\n\nfrom matplotlib import pyplot as plt\n\nimport act\n\nsonde_ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)\n\nBarbDisplay = act.plotting.TimeSeriesDisplay({'sonde_darwin': sonde_ds}, figsize=(10, 5))\nBarbDisplay.plot_time_height_xsection_from_1d_data(\n 'rh', 'pres', cmap='YlGn', vmin=0, vmax=100, num_time_periods=25\n)\nBarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres', num_barbs_x=20)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vigneshyaadav27/Car-Rental
[ "9437f90fb1ed000df9c66ec3911b60c99d2cc7ee" ]
[ "gamblers_problem.py" ]
[ "#######################################################################\r\n# Copyright (C) #\r\n# 2016-2018 Shangtong Zhang([email protected]) #\r\n# 2016 Kenta Shimada([email protected]) #\r\n# Permission given to modify the code as long as you keep this #\r\n# declaration at the top #\r\n#######################################################################\r\n\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nmatplotlib.use('Agg')\r\n\r\n# goal\r\nGOAL = 100\r\n\r\n# all states, including state 0 and state 100\r\nSTATES = np.arange(GOAL + 1)\r\n\r\n# probability of head\r\nHEAD_PROB = 0.4\r\n\r\n\r\ndef figure_4_3():\r\n # state value\r\n state_value = np.zeros(GOAL + 1)\r\n state_value[GOAL] = 1.0\r\n\r\n sweeps_history = []\r\n\r\n # value iteration\r\n while True:\r\n old_state_value = state_value.copy()\r\n sweeps_history.append(old_state_value)\r\n\r\n for state in STATES[1:GOAL]:\r\n # get possilbe actions for current state\r\n actions = np.arange(min(state, GOAL - state) + 1)\r\n action_returns = []\r\n for action in actions:\r\n action_returns.append(\r\n HEAD_PROB * state_value[state + action] + (1 - HEAD_PROB) * state_value[state - action])\r\n new_value = np.max(action_returns)\r\n state_value[state] = new_value\r\n delta = abs(state_value - old_state_value).max()\r\n if delta < 1e-9:\r\n sweeps_history.append(state_value)\r\n break\r\n\r\n # compute the optimal policy\r\n policy = np.zeros(GOAL + 1)\r\n for state in STATES[1:GOAL]:\r\n actions = np.arange(min(state, GOAL - state) + 1)\r\n action_returns = []\r\n for action in actions:\r\n action_returns.append(\r\n HEAD_PROB * state_value[state + action] + (1 - HEAD_PROB) * state_value[state - action])\r\n\r\n # round to resemble the figure in the book, see\r\n # https://github.com/ShangtongZhang/reinforcement-learning-an-introduction/issues/83\r\n policy[state] = actions[np.argmax(np.round(action_returns[1:], 5)) + 1]\r\n\r\n plt.figure(figsize=(10, 20))\r\n\r\n plt.subplot(2, 1, 1)\r\n for sweep, state_value in enumerate(sweeps_history):\r\n plt.plot(state_value, label='sweep {}'.format(sweep))\r\n plt.xlabel('Capital')\r\n plt.ylabel('Value estimates')\r\n plt.legend(loc='best')\r\n\r\n plt.subplot(2, 1, 2)\r\n plt.scatter(STATES, policy)\r\n plt.xlabel('Capital')\r\n plt.ylabel('Final policy (stake)')\r\n\r\n plt.savefig('../images/figure_4_3.png')\r\n plt.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n figure_4_3()\r\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.scatter", "matplotlib.use", "numpy.arange", "matplotlib.pyplot.savefig", "numpy.round", "numpy.max", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aaronspring/xclim
[ "18c5b358d1bed4014b0877df3dd7ff435f772157", "393797308a581428c229920a3b0933c8243cd03a" ]
[ "xclim/analog.py", "xclim/testing/tests/test_units.py" ]
[ "# -*- encoding: utf8 -*-\n# noqa: D205,D400\n\"\"\"\nSpatial analogs\n===============\n\nSpatial analogues are maps showing which areas have a present-day climate that is analogous\nto the future climate of a given place. This type of map can be useful for climate adaptation\nto see how well regions are coping today under specific climate conditions. For example,\nofficials from a city located in a temperate region that may be expecting more heatwaves in\nthe future can learn from the experience of another city where heatwaves are a common occurrence,\nleading to more proactive intervention plans to better deal with new climate conditions.\n\nSpatial analogues are estimated by comparing the distribution of climate indices computed at\nthe target location over the future period with the distribution of the same climate indices\ncomputed over a reference period for multiple candidate regions. A number of methodological\nchoices thus enter the computation:\n\n - Climate indices of interest,\n - Metrics measuring the difference between both distributions,\n - Reference data from which to compute the base indices,\n - A future climate scenario to compute the target indices.\n\nThe climate indices chosen to compute the spatial analogues are usually annual values of\nindices relevant to the intended audience of these maps. For example, in the case of the\nwine grape industry, the climate indices examined could include the length of the frost-free\nseason, growing degree-days, annual winter minimum temperature andand annual number of\nvery cold days [Roy2017]_.\n\n\nMethods to compute the (dis)similarity between samples\n------------------------------------------------------\n\nThis module implements five of the six methods described in [Grenier2013]_ to measure\nthe dissimilarity between two samples. Some of these algorithms can be used to\ntest whether or not two samples have been drawn from the same distribution.\nHere, they are used to find areas with analog climate conditions to a target\nclimate.\n\nMethods available\n~~~~~~~~~~~~~~~~~\n * Standardized Euclidean distance\n * Nearest Neighbour distance\n * Zech-Aslan energy statistic\n * Friedman-Rafsky runs statistic\n * Kolmogorov-Smirnov statistic\n * Kullback-Leibler divergence\n\nAll methods accept arrays, the first is the reference (n, D) and\nthe second is the candidate (m, D). Where the climate indicators\nvary along D and the distribution dimension along n or m. All methods output\na single float.\n\n\n.. rubric:: References\n\n.. [Roy2017] Roy, P., Grenier, P., Barriault, E. et al. Climatic Change (2017) 143: 43. `<doi:10.1007/s10584-017-1960-x>`_\n.. [Grenier2013] Grenier, P., A.-C. Parent, D. Huard, F. Anctil, and D. Chaumont, 2013: An assessment of six dissimilarity metrics for climate analogs. J. Appl. Meteor. Climatol., 52, 733–752, `<doi:10.1175/JAMC-D-12-0170.1>`_\n\"\"\"\n# Code adapted from flyingpigeon.dissimilarity, Nov 2020.\nfrom typing import Sequence, Tuple, Union\n\nimport numpy as np\nimport xarray as xr\nfrom boltons.funcutils import wraps\nfrom pkg_resources import parse_version\nfrom scipy import __version__ as __scipy_version__\nfrom scipy import spatial\nfrom scipy.spatial import cKDTree as KDTree\n\n# TODO: Szekely, G, Rizzo, M (2014) Energy statistics: A class of statistics\n# based on distances. J Stat Planning & Inference 143: 1249-1272\n\n# TODO: Hellinger distance\nmetrics = dict()\n\n\ndef spatial_analogs(\n target: xr.Dataset,\n candidates: xr.Dataset,\n dist_dim: Union[str, Sequence[str]] = \"time\",\n method: str = \"kldiv\",\n **kwargs,\n):\n \"\"\"Compute dissimilarity statistics between target points and candidate points.\n\n Spatial analogs based on the comparison of climate indices. The algorithm compares\n the distribution of the reference indices with the distribution of spatially\n distributed candidate indices and returns a value measuring the dissimilarity\n between both distributions over the candidate grid.\n\n Parameters\n ----------\n target : xr.Dataset\n Dataset of the target indices. Only indice variables should be included in the\n dataset's `data_vars`. They should have only the dimension(s) `dist_dim `in common with `candidates`.\n candidates : xr.Dataset\n Dataset of the candidate indices. Only indice variables should be included in\n the dataset's `data_vars`.\n dist_dim : str\n The dimension over which the *distributions* are constructed. This can be a multi-index dimension.\n method : {'seuclidean', 'nearest_neighbor', 'zech_aslan', 'kolmogorov_smirnov', 'friedman_rafsky', 'kldiv'}\n Which method to use when computing the dissimilarity statistic.\n **kwargs\n Any other parameter passed directly to the dissimilarity method.\n\n Returns\n -------\n xr.DataArray\n The dissimilarity statistic over the union of candidates' and target's dimensions.\n \"\"\"\n if parse_version(__scipy_version__) < parse_version(\"1.6.0\") and method in [\n \"kldiv\",\n \"nearest_neighbor\",\n ]:\n raise RuntimeError(f\"Spatial analog method ({method}) requires scipy>=1.6.0.\")\n\n # Create the target DataArray:\n target = xr.concat(\n target.data_vars.values(),\n xr.DataArray(list(target.data_vars.keys()), dims=(\"indices\",), name=\"indices\"),\n )\n\n # Create the target DataArray with different dist_dim\n c_dist_dim = \"candidate_dist_dim\"\n candidates = xr.concat(\n candidates.data_vars.values(),\n xr.DataArray(\n list(candidates.data_vars.keys()),\n dims=(\"indices\",),\n name=\"indices\",\n ),\n ).rename({dist_dim: c_dist_dim})\n\n try:\n metric = metrics[method]\n except KeyError:\n raise ValueError(\n f\"Method {method} is not implemented. Available methods are : {','.join(metrics.keys())}.\"\n )\n\n # Compute dissimilarity\n diss = xr.apply_ufunc(\n metric,\n target,\n candidates,\n input_core_dims=[(dist_dim, \"indices\"), (c_dist_dim, \"indices\")],\n output_core_dims=[()],\n vectorize=True,\n dask=\"parallelized\",\n output_dtypes=[float],\n **kwargs,\n )\n diss.name = \"dissimilarity\"\n diss.attrs.update(\n long_name=f\"Dissimilarity between target and candidates, using metric {method}.\",\n indices=\",\".join(target.indices.values),\n metric=method,\n )\n\n return diss\n\n\n# ---------------------------------------------------------------------------- #\n# -------------------------- Utility functions ------------------------------- #\n# ---------------------------------------------------------------------------- #\n\n\ndef standardize(x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Standardize x and y by the square root of the product of their standard deviation.\n\n Parameters\n ----------\n x: np.ndarray\n Array to be compared.\n y: np.ndarray\n Array to be compared.\n\n Returns\n -------\n (ndarray, ndarray)\n Standardized arrays.\n \"\"\"\n s = np.sqrt(x.std(0, ddof=1) * y.std(0, ddof=1))\n return x / s, y / s\n\n\ndef metric(func):\n \"\"\"Register a metric function in the `metrics` mapping and add some preparation/checking code.\n\n All metric functions accept 2D inputs. This reshape 1D inputs to (n, 1) and (m, 1).\n All metric functions are invalid when any non-finite values are present in the inputs.\n \"\"\"\n\n @wraps(func)\n def _metric_overhead(x, y, **kwargs):\n if np.any(np.isnan(x)) or np.any(np.isnan(y)):\n return np.NaN\n\n x = np.atleast_2d(x)\n y = np.atleast_2d(y)\n\n # If array is 1D, flip it.\n if x.shape[0] == 1:\n x = x.T\n if y.shape[0] == 1:\n y = y.T\n\n if x.shape[1] != y.shape[1]:\n raise AttributeError(\"Shape mismatch\")\n\n return func(x, y, **kwargs)\n\n metrics[func.__name__] = _metric_overhead\n return _metric_overhead\n\n\n# ---------------------------------------------------------------------------- #\n# ------------------------ Dissimilarity metrics ----------------------------- #\n# ---------------------------------------------------------------------------- #\n\n\n@metric\ndef seuclidean(x: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Compute the Euclidean distance between the mean of a multivariate candidate sample with respect to the mean of a reference sample.\n\n Parameters\n ----------\n x : np.ndarray (n,d)\n Reference sample.\n y : np.ndarray (m,d)\n Candidate sample.\n\n Returns\n -------\n float\n Standardized Euclidean Distance between the mean of the samples\n ranging from 0 to infinity.\n\n Notes\n -----\n This metric considers neither the information from individual points nor\n the standard deviation of the candidate distribution.\n\n References\n ----------\n Veloz et al. (2011) Identifying climatic analogs for Wisconsin under\n 21st-century climate-change scenarios. Climatic Change,\n DOI 10.1007/s10584-011-0261-z.\n \"\"\"\n mx = x.mean(axis=0)\n my = y.mean(axis=0)\n\n return spatial.distance.seuclidean(mx, my, x.var(axis=0, ddof=1))\n\n\n@metric\ndef nearest_neighbor(x: np.ndarray, y: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute a dissimilarity metric based on the number of points in the pooled sample whose nearest neighbor belongs to the same distribution.\n\n Parameters\n ----------\n x : np.ndarray (n,d)\n Reference sample.\n y : np.ndarray (m,d)\n Candidate sample.\n\n Returns\n -------\n float\n Nearest-Neighbor dissimilarity metric ranging from 0 to 1.\n\n References\n ----------\n Henze N. (1988) A Multivariate two-sample test based on the number of\n nearest neighbor type coincidences. Ann. of Stat., Vol. 16, No.2, 772-783.\n \"\"\"\n x, y = standardize(x, y)\n\n nx, _ = x.shape\n\n # Pool the samples and find the nearest neighbours\n xy = np.vstack([x, y])\n tree = KDTree(xy)\n _, ind = tree.query(xy, k=2, eps=0, p=2, workers=2)\n\n # Identify points whose neighbors are from the same sample\n same = ~np.logical_xor(*(ind < nx).T)\n\n return same.mean()\n\n\n@metric\ndef zech_aslan(x: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Compute the Zech-Aslan energy distance dissimimilarity metric based on an analogy with the energy of a cloud of electrical charges.\n\n Parameters\n ----------\n x : np.ndarray (n,d)\n Reference sample.\n y : np.ndarray (m,d)\n Candidate sample.\n\n Returns\n -------\n float\n Zech-Aslan dissimilarity metric ranging from -infinity to infinity.\n\n References\n ----------\n Zech G. and Aslan B. (2003) A Multivariate two-sample test based on the\n concept of minimum energy. PHYStat2003, SLAC, Stanford, CA, Sep 8-11.\n Aslan B. and Zech G. (2008) A new class of binning-free, multivariate\n goodness-of-fit tests: the energy tests. arXiV:hep-ex/0203010v5.\n \"\"\"\n nx, d = x.shape\n ny, d = y.shape\n\n v = (x.std(axis=0, ddof=1) * y.std(axis=0, ddof=1)).astype(np.double)\n\n dx = spatial.distance.pdist(x, \"seuclidean\", V=v)\n dy = spatial.distance.pdist(y, \"seuclidean\", V=v)\n dxy = spatial.distance.cdist(x, y, \"seuclidean\", V=v)\n\n phix = -np.log(dx).sum() / nx / (nx - 1)\n phiy = -np.log(dy).sum() / ny / (ny - 1)\n phixy = np.log(dxy).sum() / nx / ny\n return phix + phiy + phixy\n\n\n@metric\ndef skezely_rizzo(x, y):\n \"\"\"\n Compute the Skezely-Rizzo energy distance dissimimilarity metric based on an analogy with the energy of a cloud of electrical charges.\n\n Parameters\n ----------\n x : ndarray (n,d)\n Reference sample.\n y : ndarray (m,d)\n Candidate sample.\n\n Returns\n -------\n float\n Skezely-Rizzo dissimilarity metric ranging from -infinity to infinity.\n\n References\n ----------\n TODO\n \"\"\"\n raise NotImplementedError\n # nx, d = x.shape\n # ny, d = y.shape\n #\n # v = x.std(0, ddof=1) * y.std(0, ddof=1)\n #\n # dx = spatial.distance.pdist(x, 'seuclidean', V=v)\n # dy = spatial.distance.pdist(y, 'seuclidean', V=v)\n # dxy = spatial.distance.cdist(x, y, 'seuclidean', V=v)\n #\n # phix = -np.log(dx).sum() / nx / (nx - 1)\n # phiy = -np.log(dy).sum() / ny / (ny - 1)\n # phixy = np.log(dxy).sum() / nx / ny\n\n # z = dxy.sum() * 2. / (nx*ny) - (1./nx**2) *\n\n # z = (2 / (n * m)) * sum(dxy(:)) - (1 / (n ^ 2)) * sum(2 * dx) - (1 /\n # (m ^ 2)) * sum(2 * dy);\n # z = ((n * m) / (n + m)) * z;\n\n\n@metric\ndef friedman_rafsky(x: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Compute a dissimilarity metric based on the Friedman-Rafsky runs statistics.\n\n The algorithm builds a minimal spanning tree (the subset of edges\n connecting all points that minimizes the total edge length) then counts\n the edges linking points from the same distribution.\n\n Parameters\n ----------\n x : np.ndarray (n,d)\n Reference sample.\n y : np.ndarray (m,d)\n Candidate sample.\n\n Returns\n -------\n float\n Friedman-Rafsky dissimilarity metric ranging from 0 to (m+n-1)/(m+n).\n\n References\n ----------\n Friedman J.H. and Rafsky L.C. (1979) Multivariate generaliations of the\n Wald-Wolfowitz and Smirnov two-sample tests. Annals of Stat. Vol.7, No. 4, 697-717.\n \"\"\"\n from scipy.sparse.csgraph import minimum_spanning_tree\n from sklearn import neighbors\n\n nx, _ = x.shape\n ny, _ = y.shape\n n = nx + ny\n\n xy = np.vstack([x, y])\n # Compute the NNs and the minimum spanning tree\n g = neighbors.kneighbors_graph(xy, n_neighbors=n - 1, mode=\"distance\")\n mst = minimum_spanning_tree(g, overwrite=True)\n edges = np.array(mst.nonzero()).T\n\n # Number of points whose neighbor is from the other sample\n diff = np.logical_xor(*(edges < nx).T).sum()\n\n return 1.0 - (1.0 + diff) / n\n\n\n@metric\ndef kolmogorov_smirnov(x: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Compute the Kolmogorov-Smirnov statistic applied to two multivariate samples as described by Fasano and Franceschini.\n\n Parameters\n ----------\n x : np.ndarray (n,d)\n Reference sample.\n y : np.ndarray (m,d)\n Candidate sample.\n\n Returns\n -------\n float\n Kolmogorov-Smirnov dissimilarity metric ranging from 0 to 1.\n\n References\n ----------\n Fasano G. and Francheschini A. (1987) A multidimensional version\n of the Kolmogorov-Smirnov test. Monthly Notices of the Royal Astronomical Society, vol. 225, pp. 155-170.\n \"\"\"\n\n def pivot(x, y):\n nx, d = x.shape\n ny, d = y.shape\n\n # Multiplicative factor converting d-dim booleans to a unique integer.\n mf = (2 ** np.arange(d)).reshape(1, d, 1)\n minlength = 2 ** d\n\n # Assign a unique integer according on whether or not x[i] <= sample\n ix = ((x.T <= np.atleast_3d(x)) * mf).sum(1)\n iy = ((x.T <= np.atleast_3d(y)) * mf).sum(1)\n\n # Count the number of samples in each quadrant\n cx = 1.0 * np.apply_along_axis(np.bincount, 0, ix, minlength=minlength) / nx\n cy = 1.0 * np.apply_along_axis(np.bincount, 0, iy, minlength=minlength) / ny\n\n # This is from https://github.com/syrte/ndtest/blob/master/ndtest.py\n # D = cx - cy\n # D[0,:] -= 1. / nx # I don't understand this...\n # dmin, dmax = -D.min(), D.max() + .1 / nx\n\n return np.max(np.abs(cx - cy))\n\n return max(pivot(x, y), pivot(y, x))\n\n\n@metric\ndef kldiv(\n x: np.ndarray, y: np.ndarray, *, k: Union[int, Sequence[int]] = 1\n) -> Union[float, Sequence[float]]:\n r\"\"\"\n Compute the Kullback-Leibler divergence between two multivariate samples.\n\n .. math\n D(P||Q) = \"\\\"frac{d}{n} \"\\\"sum_i^n \"\\\"log{\"\\\"frac{r_k(x_i)}{s_k(x_i)}} + \"\\\"log{\"\\\"frac{m}{n-1}}\n\n where r_k(x_i) and s_k(x_i) are, respectively, the euclidean distance\n to the kth neighbour of x_i in the x array (excepting x_i) and\n in the y array.\n\n Parameters\n ----------\n x : np.ndarray (n,d)\n Samples from distribution P, which typically represents the true\n distribution (reference).\n y : np.ndarray (m,d)\n Samples from distribution Q, which typically represents the\n approximate distribution (candidate)\n k : int or sequence\n The kth neighbours to look for when estimating the density of the\n distributions. Defaults to 1, which can be noisy.\n\n Returns\n -------\n float or sequence\n The estimated Kullback-Leibler divergence D(P||Q) computed from\n the distances to the kth neighbour.\n\n Notes\n -----\n In information theory, the Kullback–Leibler divergence is a non-symmetric\n measure of the difference between two probability distributions P and Q,\n where P is the \"true\" distribution and Q an approximation. This nuance is\n important because D(P||Q) is not equal to D(Q||P).\n\n For probability distributions P and Q of a continuous random variable,\n the K–L divergence is defined as:\n\n D_{KL}(P||Q) = \"\\\"int p(x) \"\\\"log{p()/q(x)} dx\n\n This formula assumes we have a representation of the probability\n densities p(x) and q(x). In many cases, we only have samples from the\n distribution, and most methods first estimate the densities from the\n samples and then proceed to compute the K-L divergence. In Perez-Cruz,\n the authors propose an algorithm to estimate the K-L divergence directly\n from the sample using an empirical CDF. Even though the CDFs do not\n converge to their true values, the paper proves that the K-L divergence\n almost surely does converge to its true value.\n\n References\n ----------\n Kullback-Leibler Divergence Estimation of Continuous Distributions (2008).\n Fernando Pérez-Cruz.\n \"\"\"\n mk = np.iterable(k)\n ka = np.atleast_1d(k)\n\n nx, d = x.shape\n ny, d = y.shape\n\n # Limit the number of dimensions to 10, too slow otherwise.\n if d > 10:\n raise ValueError(\"Too many dimensions: {}.\".format(d))\n\n # Not enough data to draw conclusions.\n if nx < 5 or ny < 5:\n return np.nan if not mk else [np.nan] * len(k)\n\n # Build a KD tree representation of the samples.\n xtree = KDTree(x)\n ytree = KDTree(y)\n\n # Get the k'th nearest neighbour from each points in x for both x and y.\n # We get the values for K + 1 to make sure the output is a 2D array.\n kmax = max(ka) + 1\n r, _ = xtree.query(x, k=kmax, eps=0, p=2, workers=2)\n s, _ = ytree.query(x, k=kmax, eps=0, p=2, workers=2)\n\n # There is a mistake in the paper. In Eq. 14, the right side misses a\n # negative sign on the first term of the right hand side.\n out = list()\n for ki in ka:\n # The 0th nearest neighbour of x[i] in x is x[i] itself.\n # Hence we take the k'th + 1, which in 0-based indexing is given by\n # index k.\n out.append(\n -np.log(r[:, ki] / s[:, ki - 1]).sum() * d / nx + np.log(ny / (nx - 1.0))\n )\n\n if mk:\n return out\n return out[0]\n", "import numpy as np\nimport pytest\nimport xarray as xr\nfrom dask import array as dsk\n\nfrom xclim import indices, set_options\nfrom xclim.core.units import (\n check_units,\n convert_units_to,\n pint2cfunits,\n pint_multiply,\n str2pint,\n units,\n units2pint,\n)\nfrom xclim.core.utils import ValidationError\n\n\nclass TestUnits:\n def test_temperature(self):\n assert 4 * units.d == 4 * units.day\n Q_ = units.Quantity\n assert Q_(1, units.C) == Q_(1, units.degC)\n\n def test_hydro(self):\n with units.context(\"hydro\"):\n q = 1 * units.kg / units.m ** 2 / units.s\n assert q.to(\"mm/day\") == q.to(\"mm/d\")\n\n def test_lat_lon(self):\n assert 100 * units.degreeN == 100 * units.degree\n\n def test_pcic(self):\n with units.context(\"hydro\"):\n fu = units.parse_units(\"kilogram / d / meter ** 2\")\n tu = units.parse_units(\"mm/day\")\n np.isclose(1 * fu, 1 * tu)\n\n def test_dimensionality(self):\n with units.context(\"hydro\"):\n fu = 1 * units.parse_units(\"kg / m**2 / s\")\n tu = 1 * units.parse_units(\"mm / d\")\n fu.to(\"mmday\")\n tu.to(\"mmday\")\n\n def test_fraction(self):\n q = 5 * units.percent\n assert q.to(\"dimensionless\") == 0.05\n\n q = 5 * units.parse_units(\"pct\")\n assert q.to(\"dimensionless\") == 0.05\n\n\nclass TestConvertUnitsTo:\n def test_deprecation(self, tas_series):\n with pytest.warns(FutureWarning):\n out = convert_units_to(0, units.K)\n assert out == 273.15\n\n with pytest.warns(FutureWarning):\n out = convert_units_to(10, units.mm / units.day, context=\"hydro\")\n assert out == 10\n\n with pytest.warns(FutureWarning):\n tas = tas_series(np.arange(365), start=\"1/1/2001\")\n out = indices.tx_days_above(tas, 30)\n\n out1 = indices.tx_days_above(tas, \"30 degC\")\n out2 = indices.tx_days_above(tas, \"303.15 K\")\n np.testing.assert_array_equal(out, out1)\n np.testing.assert_array_equal(out, out2)\n assert out1.name == tas.name\n\n def test_fraction(self):\n out = convert_units_to(xr.DataArray([10], attrs={\"units\": \"%\"}), \"\")\n assert out == 0.1\n\n def test_lazy(self, pr_series):\n pr = pr_series(np.arange(365), start=\"1/1/2001\").chunk({\"time\": 100})\n out = convert_units_to(pr, \"mm/day\")\n assert isinstance(out.data, dsk.Array)\n\n def test_offset_confusion(self):\n out = convert_units_to(\"10 degC days\", \"K days\")\n assert out == 10\n\n\nclass TestUnitConversion:\n def test_pint2cfunits(self):\n u = units(\"mm/d\")\n assert pint2cfunits(u.units) == \"mm d-1\"\n\n u = units(\"percent\")\n assert pint2cfunits(u.units) == \"%\"\n\n u = units(\"pct\")\n assert pint2cfunits(u.units) == \"%\"\n\n def test_units2pint(self, pr_series):\n u = units2pint(pr_series([1, 2]))\n assert (str(u)) == \"kilogram / meter ** 2 / second\"\n assert pint2cfunits(u) == \"kg m-2 s-1\"\n\n u = units2pint(\"m^3 s-1\")\n assert str(u) == \"meter ** 3 / second\"\n assert pint2cfunits(u) == \"m^3 s-1\"\n\n u = units2pint(\"kg m-2 s-1\")\n assert (str(u)) == \"kilogram / meter ** 2 / second\"\n\n u = units2pint(\"%\")\n assert str(u) == \"percent\"\n\n u = units2pint(\"1\")\n assert str(u) == \"dimensionless\"\n\n u = units2pint(\"mm s-1\")\n assert str(u) == \"millimeter / second\"\n\n def test_pint_multiply(self, pr_series):\n a = pr_series([1, 2, 3])\n out = pint_multiply(a, 1 * units.days)\n assert out[0] == 1 * 60 * 60 * 24\n assert out.units == \"kg m-2\"\n\n def test_str2pint(self):\n Q_ = units.Quantity\n assert str2pint(\"-0.78 m\") == Q_(-0.78, units=\"meter\")\n assert str2pint(\"m kg/s\") == Q_(1, units=\"meter kilogram/second\")\n assert str2pint(\"11.8 degC days\") == Q_(11.8, units=\"delta_degree_Celsius days\")\n assert str2pint(\"nan m^2 K^-3\").units == Q_(1, units=\"m²/K³\").units\n\n\nclass TestCheckUnits:\n def test_basic(self):\n check_units(\"%\", \"[]\")\n check_units(\"pct\", \"[]\")\n check_units(\"mm/day\", \"[precipitation]\")\n check_units(\"mm/s\", \"[precipitation]\")\n check_units(\"kg/m2/s\", \"[precipitation]\")\n check_units(\"kg/m2\", \"[length]\")\n check_units(\"cms\", \"[discharge]\")\n check_units(\"m3/s\", \"[discharge]\")\n check_units(\"m/s\", \"[speed]\")\n check_units(\"km/h\", \"[speed]\")\n\n with set_options(data_validation=\"raise\"):\n with pytest.raises(ValidationError):\n check_units(\"mm\", \"[precipitation]\")\n\n with pytest.raises(ValidationError):\n check_units(\"m3\", \"[discharge]\")\n\n def test_user_error(self):\n with pytest.raises(ValidationError):\n check_units(\"deg C\", \"[temperature]\")\n" ]
[ [ "numpy.logical_xor", "numpy.log", "numpy.abs", "numpy.isnan", "sklearn.neighbors.kneighbors_graph", "numpy.arange", "scipy.spatial.distance.cdist", "numpy.atleast_1d", "numpy.atleast_2d", "numpy.atleast_3d", "scipy.spatial.distance.pdist", "numpy.apply_along_axis", "scipy.sparse.csgraph.minimum_spanning_tree", "numpy.iterable", "scipy.spatial.cKDTree", "numpy.vstack" ], [ "numpy.testing.assert_array_equal", "numpy.arange", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Javilleiro/-SDC_ND_P2-Advanced-Lane-Finding
[ "080466fa34c4d51856c30b47894828e3cd399a63" ]
[ "line.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 7 11:38:55 2021\n\n@author: dahouse\n\"\"\"\nimport numpy as np\n\n# Define a class to receive the characteristics of each line detection\nclass Line():\n def __init__(self):\n # was the line detected in the last iteration?\n self.detected = False \n # x values of the last n fits of the line\n self.recent_xfitted = [] \n #average x values of the fitted line over the last n iterations\n self.bestx = None \n #polynomial coefficients averaged over the last n iterations\n self.best_fit = None \n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])] \n #radius of curvature of the line in some units\n self.radius_of_curvature = None \n #distance in meters of vehicle center from the line\n self.line_base_pos = None \n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float') \n #x values for detected line pixels\n self.allx = None \n #y values for detected line pixels\n self.ally = None \n " ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlainLich/COVID-Data
[ "43d7f950c86270bfe411af8bc899464f0599f48e" ]
[ "source/USA-covidtracking-com.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Analyze population data from https://covidtracking.com\n# \n# \n# **Note:** This is a Jupyter notebook which is also available as its executable export as a Python 3 script (therefore with automatically generated comments).\n\n# ### Sept 29,2021: Obsolete data\n# Our source https://covidtracking.com/data/api says:\n# - `As of March 7, 2021 we are no longer collecting new data. Learn about available federal data at https://covidtracking.com/analysis-updates/federal-covid-data-101-how-to-find-data.`\n# - https://covidtracking.com/analysis-updates/simple-covid-data\n# - https://covidtracking.com/about-data/data-summary\n# - https://covidtracking.com/about-data/federal-resources\n# \n# **The following loads and analyses data up to March 7, 2021.**\n\n# # Libraries\n\n# In[ ]:\n\n\nimport sys,os\naddPath= [os.path.abspath(\"../venv/lib/python3.9/site-packages/\"),\n os.path.abspath(\"../source\")]\naddPath.extend(sys.path)\nsys.path = addPath\n\n\n# In[ ]:\n\n\n# Sys import\nimport sys, os, re\n# Common imports\nimport math\nimport numpy as NP\nimport numpy.random as RAND\nimport scipy.stats as STATS\nfrom scipy import sparse\nfrom scipy import linalg\n\n# Better formatting functions\nfrom IPython.display import display, HTML\nfrom IPython import get_ipython\n\nimport matplotlib as MPL\nimport matplotlib.pyplot as PLT\nimport seaborn as SNS\nSNS.set(font_scale=1)\n\n# Python programming\nfrom itertools import cycle\nfrom time import time\nimport datetime\n\n# Using pandas\nimport pandas as PAN\nimport xlrd\n\n\n# In[ ]:\n\n\nsys.path.append('/home/alain/test/MachLearn/COVID/source')\n\n\n# In[ ]:\n\n\nimport libApp.appUSA as appUSA\n\n\n# In[ ]:\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\nprint(\"For now, reduce python warnings, I will look into this later\")\n\n\n# ### Import my own modules\n# The next cell attempts to give user some information if things improperly setup.\n# Intended to work both in Jupyter and when executing the Python file directly.\n\n# In[ ]:\n\n\nif not get_ipython() is None and os.path.abspath(\"../source/\") not in sys.path:\n sys.path.append(os.path.abspath(\"../source/\"))\ntry:\n from lib.utilities import *\n from lib.figureHelpers import *\n from lib.DataMgrJSON import *\n from lib.DataMgr import *\n from lib.pandaUtils import *\nexcept Exception as err:\n print(\"Could not find library 'lib' with contents 'DataGouvFr' \")\n if get_ipython() is None:\n print(\"Check the PYTHONPATH environment variable which should point to 'source' wich contains 'lib'\")\n else:\n print(\"You are supposed to be running in JupySessions, and '../source/lib' should exist\")\n raise err\n\n\n# In[ ]:\n\n\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:100% !important; }</style>\"))\n\n\n# ## Check environment\n# \n# It is expected that:\n# - your working directory is named `JupySessions`, \n# - that it has subdirectories \n# - `images/*` where generated images may be stored to avoid overcrowding. \n# - At the same level as your working dir there should be directories \n# - `../data` for storing input data and \n# - `../source` for python scripts.\n# \n# My package library is in `../source/lib`, and users running under Python (not in Jupyter) should\n# set their PYTHONPATH to include \"../source\" ( *or whatever appropriate* ).\n\n# In[ ]:\n\n\ncheckSetup(chap=\"Chap04\")\nImgMgr = ImageMgr(chapdir=\"Chap04\")\n\n\n# # Load Data\n\n# ## Functions\n\n# ## Load CSV and XLSX data from remote \n# The `dataFileVMgr` will manage a cache of data files in `../dataUSCovidTrack`.\n# \n# We check what is in the cache/data directory; for each file, we identify the latest version, \n# and list this below to make sure. Files of interest are documented in `.filespecs.json`\n# \n# Consulted: https://github.com/COVID19Tracking/covid-tracking-api\n# \n# Downloaded: see `.filespecs.json`\n\n# In[ ]:\n\n\ndataFileVMgr = manageAndCacheFilesJSONHandwritten(\"../dataUSCovidTrack\")\n\n\n# In[ ]:\n\n\ndataFileVMgr.getRemoteInfo()\ndataFileVMgr.updatePrepare()\ndataFileVMgr.cacheUpdate()\n\n\n# In[ ]:\n\n\nprint(\"Most recent versions of files in data directory:\")\nfor f in dataFileVMgr.listMostRecent() :\n print(f\"\\t{f}\")\n\n\n# In[ ]:\n\n\nlast = lambda x: dataFileVMgr.getRecentVersion(x,default=True)\n\n\n# This ensures we load the most recent version, so that it is not required to update the list \n# below. The timestamps shown in the following sequence will be update by the call to `getRecentVersion`.\n\n# In[ ]:\n\n\nUSStatesDailyCSV = last('CTStatesDaily.csv' ) \nUSStatesInfoCSV = last('CTStatesInfo.csv')\nUSDailyCSV = last('CTUSDaily.csv')\n\nUSAPopChangeCSV = last('USACensusPopchange.csv') \nUSAPopChangeRankCSV = last('USACensusPopchangeRanks.csv')\n\n\n# Now load the stuff\n\n# In[ ]:\n\n\nad = lambda x: \"../dataUSCovidTrack/\"+x\n\ndata_USStatesDaily = read_csvPandas(ad(USStatesDailyCSV) , error_bad_lines=False, sep=\",\" )\ndata_USStatesInfo = read_csvPandas(ad(USStatesInfoCSV), error_bad_lines=False, sep=\",\" )\ndata_USDaily = read_csvPandas(ad(USDailyCSV), error_bad_lines=False, sep=\",\" )\ndata_USAPopChange = read_csvPandas(ad(USAPopChangeCSV) , error_bad_lines=False, sep=\",\" )\ndata_USAPopChangeRank = read_csvPandas(ad(USAPopChangeRankCSV), error_bad_lines=False, sep=\",\" )\n\n\n# Show the shape of the loaded data:\n\n# In[ ]:\n\n\ndef showBasics(data,dataName):\n print(f\"{dataName:24}\\thas shape {data.shape}\")\n\ndataListDescr = ( (data_USStatesDaily, \"data_USStatesDaily\"),\n (data_USStatesInfo, \"data_USStatesInfo\"),\n (data_USDaily , \"data_USDaily\"),\n (data_USAPopChange, \"data_USAPopChange\"),\n (data_USAPopChangeRank, \"data_USAPopChangeRank\"),\n )\n \nfor (dat,name) in dataListDescr:\n showBasics(dat,name)\n\n\n# In[ ]:\n\n\nfor (dat,name) in dataListDescr:\n if name[0:5]==\"meta_\": continue\n print(f\"\\nDescription of data in '{name}'\\n\")\n display(dat.describe().transpose())\n\n\n# In[ ]:\n\n\nfor (dat,name) in dataListDescr:\n if name[0:5]==\"meta_\": continue\n print(f\"\\nInformation about '{name}'\\n\")\n dat.info()\n\n\n# ### Get demographics information\n# The metadata is in `../dataUSCovidTrack/*.pdf`. We need to preprocess the demographics information for ease of use below. Notice that column `STATE` features state's **FIPS codes**.\n\n# In[ ]:\n\n\ndemogrCols=(\"SUMLEV\",\"STATE\",\"NAME\",\"POPESTIMATE2019\" )\ndemogrX = data_USAPopChange.loc[:,demogrCols]\ndemogrX[\"SUMLEV\"]== 40\ndemogr = demogrX[demogrX[\"SUMLEV\"]== 40 ].copy() \n\n\n# In[ ]:\n\n\ndtCols = ('date','fips', 'state', \n 'positive', 'negative', \n 'hospitalizedCurrently', 'hospitalizedCumulative', \n 'inIcuCurrently', 'inIcuCumulative',\n 'onVentilatorCurrently', 'onVentilatorCumulative', \n 'recovered','death', 'hospitalized'\n )\n\n\n# In[ ]:\n\n\ndt = data_USStatesDaily.loc[ :, dtCols].copy()\ndt[\"dateNum\"] = PAN.to_datetime(dt.loc[:,\"date\"], format=\"%Y%m%d\")\ndateStart = dt[\"dateNum\"].min()\ndateEnd = dt[\"dateNum\"].max() \ndateSpan = dateEnd - dateStart \nprint(f\"Our statistics span {dateSpan.days+1} days, start: {dateStart} and end {dateEnd}\")\ndt[\"elapsedDays\"] = (dt[\"dateNum\"] - dateStart).dt.days\n\ndt = dt.set_index(\"state\")\ndtg = dt.groupby(\"state\")\n\n#dtx = dt[dt.index == \"Europe\"]\n#dtg = dtx.groupby(\"countriesAndTerritories\")\n\n\n# Now, the figure making process is generalized into this class, since we plan to emit multiple figures.\n\n# First attempt, just get the first!\n\n# In[ ]:\n\n\nplotCols=(\"recovered\",\"death\",\"hospitalized\")\n\npsFig = appUSA.perStateFigure(dateStart)\npsFig.getDemographics(data_USAPopChange)\npsFig.initPainter(subnodeSpec=15, maxCol=3)\npsFig.mkImage(dtg,plotCols)\nImgMgr.save_fig(\"FIG001\")\nprint(f\"Had issues with state encodings:{psFig.abbrevIssueList}\")\n\n\n# ## Now select States according to multiple criteria\n# ### Start with most populated states\n\n# In[ ]:\n\n\ntble = psFig.getPopStateTble(dtg)\n\n\n# In[ ]:\n\n\nmostPopulated = tble.sort_values(by=[\"pop\"], ascending=False,).iloc[:15,0].values\n\n\n# In[ ]:\n\n\npsFig2 = appUSA.perStateSelected(dateStart,mostPopulated)\npsFig2.getDemographics(data_USAPopChange)\npsFig2.initPainter(subnodeSpec=15, maxCol=3)\npsFig2.mkImage(dtg,plotCols)\nImgMgr.save_fig(\"FIG002\")\nprint(f\"Had issues with state encodings:{psFig2.abbrevIssueList}\")\n\n\n# In[ ]:\n\n\ndtgMax = dtg.max().loc[:,[\"fips\",\"death\",\"recovered\",\"hospitalized\"]]\n\ndtgMerged = PAN.merge(dtgMax.reset_index(), demogr, left_on=\"fips\", right_on=\"STATE\")\ndtgMerged[\"deathPM\"]= dtgMerged.loc[:,\"death\"]/dtgMerged.loc[:,\"POPESTIMATE2019\"]*1.0e6\n\nmostDeadly = dtgMerged.sort_values(by=[\"deathPM\"], ascending=False,).iloc[:15,0].values\n\n\n# In[ ]:\n\n\npsFig3 = appUSA.perStateSelected(dateStart,mostDeadly)\npsFig3.getDemographics(data_USAPopChange)\npsFig3.initPainter(subnodeSpec=15, maxCol=3)\npsFig3.mkImage(dtg,plotCols)\nImgMgr.save_fig(\"FIG003\")\nprint(f\"Had issues with state encodings:{psFig3.abbrevIssueList}\")\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
garyteofanus/pandas
[ "cc51219fad8add8f442b847ccdabd3f9e9077cb6", "cc51219fad8add8f442b847ccdabd3f9e9077cb6", "cc51219fad8add8f442b847ccdabd3f9e9077cb6", "cc51219fad8add8f442b847ccdabd3f9e9077cb6", "cc51219fad8add8f442b847ccdabd3f9e9077cb6", "cc51219fad8add8f442b847ccdabd3f9e9077cb6" ]
[ "asv_bench/benchmarks/rolling.py", "pandas/tests/test_nanops.py", "asv_bench/benchmarks/multiindex_object.py", "pandas/tests/groupby/test_bin_groupby.py", "pandas/tests/plotting/test_frame.py", "pandas/tests/frame/methods/test_diff.py" ]
[ "import numpy as np\n\nimport pandas as pd\n\n\nclass Methods:\n\n params = (\n [\"DataFrame\", \"Series\"],\n [10, 1000],\n [\"int\", \"float\"],\n [\"median\", \"mean\", \"max\", \"min\", \"std\", \"count\", \"skew\", \"kurt\", \"sum\"],\n )\n param_names = [\"contructor\", \"window\", \"dtype\", \"method\"]\n\n def setup(self, constructor, window, dtype, method):\n N = 10 ** 5\n arr = (100 * np.random.random(N)).astype(dtype)\n self.roll = getattr(pd, constructor)(arr).rolling(window)\n\n def time_rolling(self, constructor, window, dtype, method):\n getattr(self.roll, method)()\n\n def peakmem_rolling(self, constructor, window, dtype, method):\n getattr(self.roll, method)()\n\n\nclass Apply:\n params = (\n [\"DataFrame\", \"Series\"],\n [3, 300],\n [\"int\", \"float\"],\n [sum, np.sum, lambda x: np.sum(x) + 5],\n [True, False],\n )\n param_names = [\"constructor\", \"window\", \"dtype\", \"function\", \"raw\"]\n\n def setup(self, constructor, window, dtype, function, raw):\n N = 10 ** 3\n arr = (100 * np.random.random(N)).astype(dtype)\n self.roll = getattr(pd, constructor)(arr).rolling(window)\n\n def time_rolling(self, constructor, window, dtype, function, raw):\n self.roll.apply(function, raw=raw)\n\n\nclass Engine:\n params = (\n [\"DataFrame\", \"Series\"],\n [\"int\", \"float\"],\n [np.sum, lambda x: np.sum(x) + 5],\n [\"cython\", \"numba\"],\n )\n param_names = [\"constructor\", \"dtype\", \"function\", \"engine\"]\n\n def setup(self, constructor, dtype, function, engine):\n N = 10 ** 3\n arr = (100 * np.random.random(N)).astype(dtype)\n self.data = getattr(pd, constructor)(arr)\n\n def time_rolling_apply(self, constructor, dtype, function, engine):\n self.data.rolling(10).apply(function, raw=True, engine=engine)\n\n def time_expanding_apply(self, constructor, dtype, function, engine):\n self.data.expanding().apply(function, raw=True, engine=engine)\n\n\nclass ExpandingMethods:\n\n params = (\n [\"DataFrame\", \"Series\"],\n [\"int\", \"float\"],\n [\"median\", \"mean\", \"max\", \"min\", \"std\", \"count\", \"skew\", \"kurt\", \"sum\"],\n )\n param_names = [\"contructor\", \"window\", \"dtype\", \"method\"]\n\n def setup(self, constructor, dtype, method):\n N = 10 ** 5\n arr = (100 * np.random.random(N)).astype(dtype)\n self.expanding = getattr(pd, constructor)(arr).expanding()\n\n def time_expanding(self, constructor, dtype, method):\n getattr(self.expanding, method)()\n\n\nclass EWMMethods:\n\n params = ([\"DataFrame\", \"Series\"], [10, 1000], [\"int\", \"float\"], [\"mean\", \"std\"])\n param_names = [\"contructor\", \"window\", \"dtype\", \"method\"]\n\n def setup(self, constructor, window, dtype, method):\n N = 10 ** 5\n arr = (100 * np.random.random(N)).astype(dtype)\n self.ewm = getattr(pd, constructor)(arr).ewm(halflife=window)\n\n def time_ewm(self, constructor, window, dtype, method):\n getattr(self.ewm, method)()\n\n\nclass VariableWindowMethods(Methods):\n params = (\n [\"DataFrame\", \"Series\"],\n [\"50s\", \"1h\", \"1d\"],\n [\"int\", \"float\"],\n [\"median\", \"mean\", \"max\", \"min\", \"std\", \"count\", \"skew\", \"kurt\", \"sum\"],\n )\n param_names = [\"contructor\", \"window\", \"dtype\", \"method\"]\n\n def setup(self, constructor, window, dtype, method):\n N = 10 ** 5\n arr = (100 * np.random.random(N)).astype(dtype)\n index = pd.date_range(\"2017-01-01\", periods=N, freq=\"5s\")\n self.roll = getattr(pd, constructor)(arr, index=index).rolling(window)\n\n\nclass Pairwise:\n\n params = ([10, 1000, None], [\"corr\", \"cov\"], [True, False])\n param_names = [\"window\", \"method\", \"pairwise\"]\n\n def setup(self, window, method, pairwise):\n N = 10 ** 4\n arr = np.random.random(N)\n self.df = pd.DataFrame(arr)\n\n def time_pairwise(self, window, method, pairwise):\n if window is None:\n r = self.df.expanding()\n else:\n r = self.df.rolling(window=window)\n getattr(r, method)(self.df, pairwise=pairwise)\n\n\nclass Quantile:\n params = (\n [\"DataFrame\", \"Series\"],\n [10, 1000],\n [\"int\", \"float\"],\n [0, 0.5, 1],\n [\"linear\", \"nearest\", \"lower\", \"higher\", \"midpoint\"],\n )\n param_names = [\"constructor\", \"window\", \"dtype\", \"percentile\"]\n\n def setup(self, constructor, window, dtype, percentile, interpolation):\n N = 10 ** 5\n arr = np.random.random(N).astype(dtype)\n self.roll = getattr(pd, constructor)(arr).rolling(window)\n\n def time_quantile(self, constructor, window, dtype, percentile, interpolation):\n self.roll.quantile(percentile, interpolation=interpolation)\n\n\nclass PeakMemFixed:\n def setup(self):\n N = 10\n arr = 100 * np.random.random(N)\n self.roll = pd.Series(arr).rolling(10)\n\n def peakmem_fixed(self):\n # GH 25926\n # This is to detect memory leaks in rolling operations.\n # To save time this is only ran on one method.\n # 6000 iterations is enough for most types of leaks to be detected\n for x in range(6000):\n self.roll.max()\n\n\nfrom .pandas_vb_common import setup # noqa: F401 isort:skip\n", "from functools import partial\nimport operator\nimport warnings\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.common import is_integer_dtype\n\nimport pandas as pd\nfrom pandas import Series, isna\nimport pandas._testing as tm\nfrom pandas.core.arrays import DatetimeArray\nimport pandas.core.nanops as nanops\n\nuse_bn = nanops._USE_BOTTLENECK\nhas_c16 = hasattr(np, \"complex128\")\n\n\nclass TestnanopsDataFrame:\n def setup_method(self, method):\n np.random.seed(11235)\n nanops._USE_BOTTLENECK = False\n\n arr_shape = (11, 7)\n\n self.arr_float = np.random.randn(*arr_shape)\n self.arr_float1 = np.random.randn(*arr_shape)\n self.arr_complex = self.arr_float + self.arr_float1 * 1j\n self.arr_int = np.random.randint(-10, 10, arr_shape)\n self.arr_bool = np.random.randint(0, 2, arr_shape) == 0\n self.arr_str = np.abs(self.arr_float).astype(\"S\")\n self.arr_utf = np.abs(self.arr_float).astype(\"U\")\n self.arr_date = np.random.randint(0, 20000, arr_shape).astype(\"M8[ns]\")\n self.arr_tdelta = np.random.randint(0, 20000, arr_shape).astype(\"m8[ns]\")\n\n self.arr_nan = np.tile(np.nan, arr_shape)\n self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])\n self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])\n self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])\n self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])\n\n self.arr_inf = self.arr_float * np.inf\n self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])\n\n self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])\n self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf])\n self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf])\n self.arr_obj = np.vstack(\n [\n self.arr_float.astype(\"O\"),\n self.arr_int.astype(\"O\"),\n self.arr_bool.astype(\"O\"),\n self.arr_complex.astype(\"O\"),\n self.arr_str.astype(\"O\"),\n self.arr_utf.astype(\"O\"),\n self.arr_date.astype(\"O\"),\n self.arr_tdelta.astype(\"O\"),\n ]\n )\n\n with np.errstate(invalid=\"ignore\"):\n self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j\n self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj])\n\n self.arr_nan_infj = self.arr_inf * 1j\n self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj])\n\n self.arr_float_2d = self.arr_float\n self.arr_float1_2d = self.arr_float1\n\n self.arr_nan_2d = self.arr_nan\n self.arr_float_nan_2d = self.arr_float_nan\n self.arr_float1_nan_2d = self.arr_float1_nan\n self.arr_nan_float1_2d = self.arr_nan_float1\n\n self.arr_float_1d = self.arr_float[:, 0]\n self.arr_float1_1d = self.arr_float1[:, 0]\n\n self.arr_nan_1d = self.arr_nan[:, 0]\n self.arr_float_nan_1d = self.arr_float_nan[:, 0]\n self.arr_float1_nan_1d = self.arr_float1_nan[:, 0]\n self.arr_nan_float1_1d = self.arr_nan_float1[:, 0]\n\n def teardown_method(self, method):\n nanops._USE_BOTTLENECK = use_bn\n\n def check_results(self, targ, res, axis, check_dtype=True):\n res = getattr(res, \"asm8\", res)\n res = getattr(res, \"values\", res)\n\n # timedeltas are a beast here\n def _coerce_tds(targ, res):\n if hasattr(targ, \"dtype\") and targ.dtype == \"m8[ns]\":\n if len(targ) == 1:\n targ = targ[0].item()\n res = res.item()\n else:\n targ = targ.view(\"i8\")\n return targ, res\n\n try:\n if (\n axis != 0\n and hasattr(targ, \"shape\")\n and targ.ndim\n and targ.shape != res.shape\n ):\n res = np.split(res, [targ.shape[0]], axis=0)[0]\n except (ValueError, IndexError):\n targ, res = _coerce_tds(targ, res)\n\n try:\n tm.assert_almost_equal(targ, res, check_dtype=check_dtype)\n except AssertionError:\n\n # handle timedelta dtypes\n if hasattr(targ, \"dtype\") and targ.dtype == \"m8[ns]\":\n targ, res = _coerce_tds(targ, res)\n tm.assert_almost_equal(targ, res, check_dtype=check_dtype)\n return\n\n # There are sometimes rounding errors with\n # complex and object dtypes.\n # If it isn't one of those, re-raise the error.\n if not hasattr(res, \"dtype\") or res.dtype.kind not in [\"c\", \"O\"]:\n raise\n # convert object dtypes to something that can be split into\n # real and imaginary parts\n if res.dtype.kind == \"O\":\n if targ.dtype.kind != \"O\":\n res = res.astype(targ.dtype)\n else:\n cast_dtype = \"c16\" if has_c16 else \"f8\"\n res = res.astype(cast_dtype)\n targ = targ.astype(cast_dtype)\n # there should never be a case where numpy returns an object\n # but nanops doesn't, so make that an exception\n elif targ.dtype.kind == \"O\":\n raise\n tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype)\n tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype)\n\n def check_fun_data(\n self,\n testfunc,\n targfunc,\n testarval,\n targarval,\n check_dtype=True,\n empty_targfunc=None,\n **kwargs,\n ):\n for axis in list(range(targarval.ndim)) + [None]:\n for skipna in [False, True]:\n targartempval = targarval if skipna else testarval\n if skipna and empty_targfunc and isna(targartempval).all():\n targ = empty_targfunc(targartempval, axis=axis, **kwargs)\n else:\n targ = targfunc(targartempval, axis=axis, **kwargs)\n\n res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)\n self.check_results(targ, res, axis, check_dtype=check_dtype)\n if skipna:\n res = testfunc(testarval, axis=axis, **kwargs)\n self.check_results(targ, res, axis, check_dtype=check_dtype)\n if axis is None:\n res = testfunc(testarval, skipna=skipna, **kwargs)\n self.check_results(targ, res, axis, check_dtype=check_dtype)\n if skipna and axis is None:\n res = testfunc(testarval, **kwargs)\n self.check_results(targ, res, axis, check_dtype=check_dtype)\n\n if testarval.ndim <= 1:\n return\n\n # Recurse on lower-dimension\n testarval2 = np.take(testarval, 0, axis=-1)\n targarval2 = np.take(targarval, 0, axis=-1)\n self.check_fun_data(\n testfunc,\n targfunc,\n testarval2,\n targarval2,\n check_dtype=check_dtype,\n empty_targfunc=empty_targfunc,\n **kwargs,\n )\n\n def check_fun(self, testfunc, targfunc, testar, empty_targfunc=None, **kwargs):\n\n targar = testar\n if testar.endswith(\"_nan\") and hasattr(self, testar[:-4]):\n targar = testar[:-4]\n\n testarval = getattr(self, testar)\n targarval = getattr(self, targar)\n self.check_fun_data(\n testfunc,\n targfunc,\n testarval,\n targarval,\n empty_targfunc=empty_targfunc,\n **kwargs,\n )\n\n def check_funs(\n self,\n testfunc,\n targfunc,\n allow_complex=True,\n allow_all_nan=True,\n allow_date=True,\n allow_tdelta=True,\n allow_obj=True,\n **kwargs,\n ):\n self.check_fun(testfunc, targfunc, \"arr_float\", **kwargs)\n self.check_fun(testfunc, targfunc, \"arr_float_nan\", **kwargs)\n self.check_fun(testfunc, targfunc, \"arr_int\", **kwargs)\n self.check_fun(testfunc, targfunc, \"arr_bool\", **kwargs)\n objs = [\n self.arr_float.astype(\"O\"),\n self.arr_int.astype(\"O\"),\n self.arr_bool.astype(\"O\"),\n ]\n\n if allow_all_nan:\n self.check_fun(testfunc, targfunc, \"arr_nan\", **kwargs)\n\n if allow_complex:\n self.check_fun(testfunc, targfunc, \"arr_complex\", **kwargs)\n self.check_fun(testfunc, targfunc, \"arr_complex_nan\", **kwargs)\n if allow_all_nan:\n self.check_fun(testfunc, targfunc, \"arr_nan_nanj\", **kwargs)\n objs += [self.arr_complex.astype(\"O\")]\n\n if allow_date:\n targfunc(self.arr_date)\n self.check_fun(testfunc, targfunc, \"arr_date\", **kwargs)\n objs += [self.arr_date.astype(\"O\")]\n\n if allow_tdelta:\n try:\n targfunc(self.arr_tdelta)\n except TypeError:\n pass\n else:\n self.check_fun(testfunc, targfunc, \"arr_tdelta\", **kwargs)\n objs += [self.arr_tdelta.astype(\"O\")]\n\n if allow_obj:\n self.arr_obj = np.vstack(objs)\n # some nanops handle object dtypes better than their numpy\n # counterparts, so the numpy functions need to be given something\n # else\n if allow_obj == \"convert\":\n targfunc = partial(\n self._badobj_wrap, func=targfunc, allow_complex=allow_complex\n )\n self.check_fun(testfunc, targfunc, \"arr_obj\", **kwargs)\n\n def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):\n if value.dtype.kind == \"O\":\n if allow_complex:\n value = value.astype(\"c16\")\n else:\n value = value.astype(\"f8\")\n return func(value, **kwargs)\n\n @pytest.mark.parametrize(\n \"nan_op,np_op\", [(nanops.nanany, np.any), (nanops.nanall, np.all)]\n )\n def test_nan_funcs(self, nan_op, np_op):\n # TODO: allow tdelta, doesn't break tests\n self.check_funs(\n nan_op, np_op, allow_all_nan=False, allow_date=False, allow_tdelta=False\n )\n\n def test_nansum(self):\n self.check_funs(\n nanops.nansum,\n np.sum,\n allow_date=False,\n check_dtype=False,\n empty_targfunc=np.nansum,\n )\n\n def test_nanmean(self):\n self.check_funs(\n nanops.nanmean,\n np.mean,\n allow_complex=False, # TODO: allow this, doesn't break test\n allow_obj=False,\n allow_date=False,\n )\n\n def test_nanmean_overflow(self):\n # GH 10155\n # In the previous implementation mean can overflow for int dtypes, it\n # is now consistent with numpy\n\n for a in [2 ** 55, -(2 ** 55), 20150515061816532]:\n s = Series(a, index=range(500), dtype=np.int64)\n result = s.mean()\n np_result = s.values.mean()\n assert result == a\n assert result == np_result\n assert result.dtype == np.float64\n\n @pytest.mark.parametrize(\n \"dtype\",\n [\n np.int16,\n np.int32,\n np.int64,\n np.float32,\n np.float64,\n getattr(np, \"float128\", None),\n ],\n )\n def test_returned_dtype(self, dtype):\n if dtype is None:\n # no float128 available\n return\n\n s = Series(range(10), dtype=dtype)\n group_a = [\"mean\", \"std\", \"var\", \"skew\", \"kurt\"]\n group_b = [\"min\", \"max\"]\n for method in group_a + group_b:\n result = getattr(s, method)()\n if is_integer_dtype(dtype) and method in group_a:\n assert result.dtype == np.float64\n else:\n assert result.dtype == dtype\n\n def test_nanmedian(self):\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n self.check_funs(\n nanops.nanmedian,\n np.median,\n allow_complex=False,\n allow_date=False,\n allow_obj=\"convert\",\n )\n\n @pytest.mark.parametrize(\"ddof\", range(3))\n def test_nanvar(self, ddof):\n self.check_funs(\n nanops.nanvar,\n np.var,\n allow_complex=False,\n allow_date=False,\n allow_obj=\"convert\",\n ddof=ddof,\n )\n\n @pytest.mark.parametrize(\"ddof\", range(3))\n def test_nanstd(self, ddof):\n self.check_funs(\n nanops.nanstd,\n np.std,\n allow_complex=False,\n allow_date=False,\n allow_obj=\"convert\",\n ddof=ddof,\n )\n\n @td.skip_if_no_scipy\n @pytest.mark.parametrize(\"ddof\", range(3))\n def test_nansem(self, ddof):\n from scipy.stats import sem\n\n with np.errstate(invalid=\"ignore\"):\n self.check_funs(\n nanops.nansem,\n sem,\n allow_complex=False,\n allow_date=False,\n allow_tdelta=False,\n allow_obj=\"convert\",\n ddof=ddof,\n )\n\n @pytest.mark.parametrize(\n \"nan_op,np_op\", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)]\n )\n def test_nanops_with_warnings(self, nan_op, np_op):\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n self.check_funs(nan_op, np_op, allow_obj=False)\n\n def _argminmax_wrap(self, value, axis=None, func=None):\n res = func(value, axis)\n nans = np.min(value, axis)\n nullnan = isna(nans)\n if res.ndim:\n res[nullnan] = -1\n elif (\n hasattr(nullnan, \"all\")\n and nullnan.all()\n or not hasattr(nullnan, \"all\")\n and nullnan\n ):\n res = -1\n return res\n\n def test_nanargmax(self):\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n func = partial(self._argminmax_wrap, func=np.argmax)\n self.check_funs(nanops.nanargmax, func, allow_obj=False)\n\n def test_nanargmin(self):\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n func = partial(self._argminmax_wrap, func=np.argmin)\n self.check_funs(nanops.nanargmin, func, allow_obj=False)\n\n def _skew_kurt_wrap(self, values, axis=None, func=None):\n if not isinstance(values.dtype.type, np.floating):\n values = values.astype(\"f8\")\n result = func(values, axis=axis, bias=False)\n # fix for handling cases where all elements in an axis are the same\n if isinstance(result, np.ndarray):\n result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0\n return result\n elif np.max(values) == np.min(values):\n return 0.0\n return result\n\n @td.skip_if_no_scipy\n def test_nanskew(self):\n from scipy.stats import skew\n\n func = partial(self._skew_kurt_wrap, func=skew)\n with np.errstate(invalid=\"ignore\"):\n self.check_funs(\n nanops.nanskew,\n func,\n allow_complex=False,\n allow_date=False,\n allow_tdelta=False,\n )\n\n @td.skip_if_no_scipy\n def test_nankurt(self):\n from scipy.stats import kurtosis\n\n func1 = partial(kurtosis, fisher=True)\n func = partial(self._skew_kurt_wrap, func=func1)\n with np.errstate(invalid=\"ignore\"):\n self.check_funs(\n nanops.nankurt,\n func,\n allow_complex=False,\n allow_date=False,\n allow_tdelta=False,\n )\n\n def test_nanprod(self):\n self.check_funs(\n nanops.nanprod,\n np.prod,\n allow_date=False,\n allow_tdelta=False,\n empty_targfunc=np.nanprod,\n )\n\n def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):\n res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)\n res01 = checkfun(\n self.arr_float_2d,\n self.arr_float1_2d,\n min_periods=len(self.arr_float_2d) - 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ0, res00)\n tm.assert_almost_equal(targ0, res01)\n\n res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs)\n res11 = checkfun(\n self.arr_float_nan_2d,\n self.arr_float1_nan_2d,\n min_periods=len(self.arr_float_2d) - 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ1, res10)\n tm.assert_almost_equal(targ1, res11)\n\n targ2 = np.nan\n res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)\n res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)\n res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)\n res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs)\n res24 = checkfun(\n self.arr_float_nan_2d,\n self.arr_nan_float1_2d,\n min_periods=len(self.arr_float_2d) - 1,\n **kwargs,\n )\n res25 = checkfun(\n self.arr_float_2d,\n self.arr_float1_2d,\n min_periods=len(self.arr_float_2d) + 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ2, res20)\n tm.assert_almost_equal(targ2, res21)\n tm.assert_almost_equal(targ2, res22)\n tm.assert_almost_equal(targ2, res23)\n tm.assert_almost_equal(targ2, res24)\n tm.assert_almost_equal(targ2, res25)\n\n def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):\n res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)\n res01 = checkfun(\n self.arr_float_1d,\n self.arr_float1_1d,\n min_periods=len(self.arr_float_1d) - 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ0, res00)\n tm.assert_almost_equal(targ0, res01)\n\n res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs)\n res11 = checkfun(\n self.arr_float_nan_1d,\n self.arr_float1_nan_1d,\n min_periods=len(self.arr_float_1d) - 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ1, res10)\n tm.assert_almost_equal(targ1, res11)\n\n targ2 = np.nan\n res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)\n res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)\n res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)\n res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs)\n res24 = checkfun(\n self.arr_float_nan_1d,\n self.arr_nan_float1_1d,\n min_periods=len(self.arr_float_1d) - 1,\n **kwargs,\n )\n res25 = checkfun(\n self.arr_float_1d,\n self.arr_float1_1d,\n min_periods=len(self.arr_float_1d) + 1,\n **kwargs,\n )\n tm.assert_almost_equal(targ2, res20)\n tm.assert_almost_equal(targ2, res21)\n tm.assert_almost_equal(targ2, res22)\n tm.assert_almost_equal(targ2, res23)\n tm.assert_almost_equal(targ2, res24)\n tm.assert_almost_equal(targ2, res25)\n\n def test_nancorr(self):\n targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]\n self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)\n targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method=\"pearson\")\n\n def test_nancorr_pearson(self):\n targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]\n self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method=\"pearson\")\n targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method=\"pearson\")\n\n @td.skip_if_no_scipy\n def test_nancorr_kendall(self):\n from scipy.stats import kendalltau\n\n targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]\n targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]\n self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method=\"kendall\")\n targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]\n targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method=\"kendall\")\n\n @td.skip_if_no_scipy\n def test_nancorr_spearman(self):\n from scipy.stats import spearmanr\n\n targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]\n targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]\n self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method=\"spearman\")\n targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]\n targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method=\"spearman\")\n\n @td.skip_if_no_scipy\n def test_invalid_method(self):\n targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]\n targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]\n msg = \"Unkown method 'foo', expected one of 'kendall', 'spearman'\"\n with pytest.raises(ValueError, match=msg):\n self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method=\"foo\")\n\n def test_nancov(self):\n targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]\n targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]\n self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)\n targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]\n targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]\n self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)\n\n def check_nancomp(self, checkfun, targ0):\n arr_float = self.arr_float\n arr_float1 = self.arr_float1\n arr_nan = self.arr_nan\n arr_nan_nan = self.arr_nan_nan\n arr_float_nan = self.arr_float_nan\n arr_float1_nan = self.arr_float1_nan\n arr_nan_float1 = self.arr_nan_float1\n\n while targ0.ndim:\n res0 = checkfun(arr_float, arr_float1)\n tm.assert_almost_equal(targ0, res0)\n\n if targ0.ndim > 1:\n targ1 = np.vstack([targ0, arr_nan])\n else:\n targ1 = np.hstack([targ0, arr_nan])\n res1 = checkfun(arr_float_nan, arr_float1_nan)\n tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)\n\n targ2 = arr_nan_nan\n res2 = checkfun(arr_float_nan, arr_nan_float1)\n tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)\n\n # Lower dimension for next step in the loop\n arr_float = np.take(arr_float, 0, axis=-1)\n arr_float1 = np.take(arr_float1, 0, axis=-1)\n arr_nan = np.take(arr_nan, 0, axis=-1)\n arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)\n arr_float_nan = np.take(arr_float_nan, 0, axis=-1)\n arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)\n arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)\n targ0 = np.take(targ0, 0, axis=-1)\n\n @pytest.mark.parametrize(\n \"op,nanop\",\n [\n (operator.eq, nanops.naneq),\n (operator.ne, nanops.nanne),\n (operator.gt, nanops.nangt),\n (operator.ge, nanops.nange),\n (operator.lt, nanops.nanlt),\n (operator.le, nanops.nanle),\n ],\n )\n def test_nan_comparison(self, op, nanop):\n targ0 = op(self.arr_float, self.arr_float1)\n self.check_nancomp(nanop, targ0)\n\n def check_bool(self, func, value, correct):\n while getattr(value, \"ndim\", True):\n res0 = func(value)\n if correct:\n assert res0\n else:\n assert not res0\n\n if not hasattr(value, \"ndim\"):\n break\n\n # Reduce dimension for next step in the loop\n value = np.take(value, 0, axis=-1)\n\n def test__has_infs(self):\n pairs = [\n (\"arr_complex\", False),\n (\"arr_int\", False),\n (\"arr_bool\", False),\n (\"arr_str\", False),\n (\"arr_utf\", False),\n (\"arr_complex\", False),\n (\"arr_complex_nan\", False),\n (\"arr_nan_nanj\", False),\n (\"arr_nan_infj\", True),\n (\"arr_complex_nan_infj\", True),\n ]\n pairs_float = [\n (\"arr_float\", False),\n (\"arr_nan\", False),\n (\"arr_float_nan\", False),\n (\"arr_nan_nan\", False),\n (\"arr_float_inf\", True),\n (\"arr_inf\", True),\n (\"arr_nan_inf\", True),\n (\"arr_float_nan_inf\", True),\n (\"arr_nan_nan_inf\", True),\n ]\n\n for arr, correct in pairs:\n val = getattr(self, arr)\n self.check_bool(nanops._has_infs, val, correct)\n\n for arr, correct in pairs_float:\n val = getattr(self, arr)\n self.check_bool(nanops._has_infs, val, correct)\n self.check_bool(nanops._has_infs, val.astype(\"f4\"), correct)\n self.check_bool(nanops._has_infs, val.astype(\"f2\"), correct)\n\n def test__bn_ok_dtype(self):\n assert nanops._bn_ok_dtype(self.arr_float.dtype, \"test\")\n assert nanops._bn_ok_dtype(self.arr_complex.dtype, \"test\")\n assert nanops._bn_ok_dtype(self.arr_int.dtype, \"test\")\n assert nanops._bn_ok_dtype(self.arr_bool.dtype, \"test\")\n assert nanops._bn_ok_dtype(self.arr_str.dtype, \"test\")\n assert nanops._bn_ok_dtype(self.arr_utf.dtype, \"test\")\n assert not nanops._bn_ok_dtype(self.arr_date.dtype, \"test\")\n assert not nanops._bn_ok_dtype(self.arr_tdelta.dtype, \"test\")\n assert not nanops._bn_ok_dtype(self.arr_obj.dtype, \"test\")\n\n\nclass TestEnsureNumeric:\n def test_numeric_values(self):\n # Test integer\n assert nanops._ensure_numeric(1) == 1\n\n # Test float\n assert nanops._ensure_numeric(1.1) == 1.1\n\n # Test complex\n assert nanops._ensure_numeric(1 + 2j) == 1 + 2j\n\n def test_ndarray(self):\n # Test numeric ndarray\n values = np.array([1, 2, 3])\n assert np.allclose(nanops._ensure_numeric(values), values)\n\n # Test object ndarray\n o_values = values.astype(object)\n assert np.allclose(nanops._ensure_numeric(o_values), values)\n\n # Test convertible string ndarray\n s_values = np.array([\"1\", \"2\", \"3\"], dtype=object)\n assert np.allclose(nanops._ensure_numeric(s_values), values)\n\n # Test non-convertible string ndarray\n s_values = np.array([\"foo\", \"bar\", \"baz\"], dtype=object)\n msg = r\"Could not convert .* to numeric\"\n with pytest.raises(TypeError, match=msg):\n nanops._ensure_numeric(s_values)\n\n def test_convertable_values(self):\n assert np.allclose(nanops._ensure_numeric(\"1\"), 1.0)\n assert np.allclose(nanops._ensure_numeric(\"1.1\"), 1.1)\n assert np.allclose(nanops._ensure_numeric(\"1+1j\"), 1 + 1j)\n\n def test_non_convertable_values(self):\n msg = \"Could not convert foo to numeric\"\n with pytest.raises(TypeError, match=msg):\n nanops._ensure_numeric(\"foo\")\n\n # with the wrong type, python raises TypeError for us\n msg = \"argument must be a string or a number\"\n with pytest.raises(TypeError, match=msg):\n nanops._ensure_numeric({})\n with pytest.raises(TypeError, match=msg):\n nanops._ensure_numeric([])\n\n\nclass TestNanvarFixedValues:\n\n # xref GH10242\n\n def setup_method(self, method):\n # Samples from a normal distribution.\n self.variance = variance = 3.0\n self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)\n\n def test_nanvar_all_finite(self):\n samples = self.samples\n actual_variance = nanops.nanvar(samples)\n tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2)\n\n def test_nanvar_nans(self):\n samples = np.nan * np.ones(2 * self.samples.shape[0])\n samples[::2] = self.samples\n\n actual_variance = nanops.nanvar(samples, skipna=True)\n tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2)\n\n actual_variance = nanops.nanvar(samples, skipna=False)\n tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2)\n\n def test_nanstd_nans(self):\n samples = np.nan * np.ones(2 * self.samples.shape[0])\n samples[::2] = self.samples\n\n actual_std = nanops.nanstd(samples, skipna=True)\n tm.assert_almost_equal(actual_std, self.variance ** 0.5, check_less_precise=2)\n\n actual_std = nanops.nanvar(samples, skipna=False)\n tm.assert_almost_equal(actual_std, np.nan, check_less_precise=2)\n\n def test_nanvar_axis(self):\n # Generate some sample data.\n samples_norm = self.samples\n samples_unif = self.prng.uniform(size=samples_norm.shape[0])\n samples = np.vstack([samples_norm, samples_unif])\n\n actual_variance = nanops.nanvar(samples, axis=1)\n tm.assert_almost_equal(\n actual_variance, np.array([self.variance, 1.0 / 12]), check_less_precise=2\n )\n\n def test_nanvar_ddof(self):\n n = 5\n samples = self.prng.uniform(size=(10000, n + 1))\n samples[:, -1] = np.nan # Force use of our own algorithm.\n\n variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()\n variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()\n variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()\n\n # The unbiased estimate.\n var = 1.0 / 12\n tm.assert_almost_equal(variance_1, var, check_less_precise=2)\n\n # The underestimated variance.\n tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, check_less_precise=2)\n\n # The overestimated variance.\n tm.assert_almost_equal(\n variance_2, (n - 1.0) / (n - 2.0) * var, check_less_precise=2\n )\n\n def test_ground_truth(self):\n # Test against values that were precomputed with Numpy.\n samples = np.empty((4, 4))\n samples[:3, :3] = np.array(\n [\n [0.97303362, 0.21869576, 0.55560287],\n [0.72980153, 0.03109364, 0.99155171],\n [0.09317602, 0.60078248, 0.15871292],\n ]\n )\n samples[3] = samples[:, 3] = np.nan\n\n # Actual variances along axis=0, 1 for ddof=0, 1, 2\n variance = np.array(\n [\n [\n [0.13762259, 0.05619224, 0.11568816],\n [0.20643388, 0.08428837, 0.17353224],\n [0.41286776, 0.16857673, 0.34706449],\n ],\n [\n [0.09519783, 0.16435395, 0.05082054],\n [0.14279674, 0.24653093, 0.07623082],\n [0.28559348, 0.49306186, 0.15246163],\n ],\n ]\n )\n\n # Test nanvar.\n for axis in range(2):\n for ddof in range(3):\n var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)\n tm.assert_almost_equal(var[:3], variance[axis, ddof])\n assert np.isnan(var[3])\n\n # Test nanstd.\n for axis in range(2):\n for ddof in range(3):\n std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)\n tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)\n assert np.isnan(std[3])\n\n def test_nanstd_roundoff(self):\n # Regression test for GH 10242 (test data taken from GH 10489). Ensure\n # that variance is stable.\n data = Series(766897346 * np.ones(10))\n for ddof in range(3):\n result = data.std(ddof=ddof)\n assert result == 0.0\n\n @property\n def prng(self):\n return np.random.RandomState(1234)\n\n\nclass TestNanskewFixedValues:\n\n # xref GH 11974\n\n def setup_method(self, method):\n # Test data + skewness value (computed with scipy.stats.skew)\n self.samples = np.sin(np.linspace(0, 1, 200))\n self.actual_skew = -0.1875895205961754\n\n def test_constant_series(self):\n # xref GH 11974\n for val in [3075.2, 3075.3, 3075.5]:\n data = val * np.ones(300)\n skew = nanops.nanskew(data)\n assert skew == 0.0\n\n def test_all_finite(self):\n alpha, beta = 0.3, 0.1\n left_tailed = self.prng.beta(alpha, beta, size=100)\n assert nanops.nanskew(left_tailed) < 0\n\n alpha, beta = 0.1, 0.3\n right_tailed = self.prng.beta(alpha, beta, size=100)\n assert nanops.nanskew(right_tailed) > 0\n\n def test_ground_truth(self):\n skew = nanops.nanskew(self.samples)\n tm.assert_almost_equal(skew, self.actual_skew)\n\n def test_axis(self):\n samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))])\n skew = nanops.nanskew(samples, axis=1)\n tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))\n\n def test_nans(self):\n samples = np.hstack([self.samples, np.nan])\n skew = nanops.nanskew(samples, skipna=False)\n assert np.isnan(skew)\n\n def test_nans_skipna(self):\n samples = np.hstack([self.samples, np.nan])\n skew = nanops.nanskew(samples, skipna=True)\n tm.assert_almost_equal(skew, self.actual_skew)\n\n @property\n def prng(self):\n return np.random.RandomState(1234)\n\n\nclass TestNankurtFixedValues:\n\n # xref GH 11974\n\n def setup_method(self, method):\n # Test data + kurtosis value (computed with scipy.stats.kurtosis)\n self.samples = np.sin(np.linspace(0, 1, 200))\n self.actual_kurt = -1.2058303433799713\n\n def test_constant_series(self):\n # xref GH 11974\n for val in [3075.2, 3075.3, 3075.5]:\n data = val * np.ones(300)\n kurt = nanops.nankurt(data)\n assert kurt == 0.0\n\n def test_all_finite(self):\n alpha, beta = 0.3, 0.1\n left_tailed = self.prng.beta(alpha, beta, size=100)\n assert nanops.nankurt(left_tailed) < 0\n\n alpha, beta = 0.1, 0.3\n right_tailed = self.prng.beta(alpha, beta, size=100)\n assert nanops.nankurt(right_tailed) > 0\n\n def test_ground_truth(self):\n kurt = nanops.nankurt(self.samples)\n tm.assert_almost_equal(kurt, self.actual_kurt)\n\n def test_axis(self):\n samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))])\n kurt = nanops.nankurt(samples, axis=1)\n tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))\n\n def test_nans(self):\n samples = np.hstack([self.samples, np.nan])\n kurt = nanops.nankurt(samples, skipna=False)\n assert np.isnan(kurt)\n\n def test_nans_skipna(self):\n samples = np.hstack([self.samples, np.nan])\n kurt = nanops.nankurt(samples, skipna=True)\n tm.assert_almost_equal(kurt, self.actual_kurt)\n\n @property\n def prng(self):\n return np.random.RandomState(1234)\n\n\nclass TestDatetime64NaNOps:\n @pytest.mark.parametrize(\"tz\", [None, \"UTC\"])\n # Enabling mean changes the behavior of DataFrame.mean\n # See https://github.com/pandas-dev/pandas/issues/24752\n def test_nanmean(self, tz):\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=tz)\n expected = dti[1]\n\n for obj in [dti, DatetimeArray(dti), Series(dti)]:\n result = nanops.nanmean(obj)\n assert result == expected\n\n dti2 = dti.insert(1, pd.NaT)\n\n for obj in [dti2, DatetimeArray(dti2), Series(dti2)]:\n result = nanops.nanmean(obj)\n assert result == expected\n\n\ndef test_use_bottleneck():\n\n if nanops._BOTTLENECK_INSTALLED:\n\n pd.set_option(\"use_bottleneck\", True)\n assert pd.get_option(\"use_bottleneck\")\n\n pd.set_option(\"use_bottleneck\", False)\n assert not pd.get_option(\"use_bottleneck\")\n\n pd.set_option(\"use_bottleneck\", use_bn)\n\n\[email protected](\n \"numpy_op, expected\",\n [\n (np.sum, 10),\n (np.nansum, 10),\n (np.mean, 2.5),\n (np.nanmean, 2.5),\n (np.median, 2.5),\n (np.nanmedian, 2.5),\n (np.min, 1),\n (np.max, 4),\n (np.nanmin, 1),\n (np.nanmax, 4),\n ],\n)\ndef test_numpy_ops(numpy_op, expected):\n # GH8383\n result = numpy_op(pd.Series([1, 2, 3, 4]))\n assert result == expected\n\n\[email protected](\n \"operation\",\n [\n nanops.nanany,\n nanops.nanall,\n nanops.nansum,\n nanops.nanmean,\n nanops.nanmedian,\n nanops.nanstd,\n nanops.nanvar,\n nanops.nansem,\n nanops.nanargmax,\n nanops.nanargmin,\n nanops.nanmax,\n nanops.nanmin,\n nanops.nanskew,\n nanops.nankurt,\n nanops.nanprod,\n ],\n)\ndef test_nanops_independent_of_mask_param(operation):\n # GH22764\n s = pd.Series([1, 2, np.nan, 3, np.nan, 4])\n mask = s.isna()\n median_expected = operation(s)\n median_result = operation(s, mask=mask)\n assert median_expected == median_result\n", "import string\n\nimport numpy as np\n\nfrom pandas import DataFrame, MultiIndex, RangeIndex, date_range\n\nfrom .pandas_vb_common import tm\n\n\nclass GetLoc:\n def setup(self):\n self.mi_large = MultiIndex.from_product(\n [np.arange(1000), np.arange(20), list(string.ascii_letters)],\n names=[\"one\", \"two\", \"three\"],\n )\n self.mi_med = MultiIndex.from_product(\n [np.arange(1000), np.arange(10), list(\"A\")], names=[\"one\", \"two\", \"three\"]\n )\n self.mi_small = MultiIndex.from_product(\n [np.arange(100), list(\"A\"), list(\"A\")], names=[\"one\", \"two\", \"three\"]\n )\n\n def time_large_get_loc(self):\n self.mi_large.get_loc((999, 19, \"Z\"))\n\n def time_large_get_loc_warm(self):\n for _ in range(1000):\n self.mi_large.get_loc((999, 19, \"Z\"))\n\n def time_med_get_loc(self):\n self.mi_med.get_loc((999, 9, \"A\"))\n\n def time_med_get_loc_warm(self):\n for _ in range(1000):\n self.mi_med.get_loc((999, 9, \"A\"))\n\n def time_string_get_loc(self):\n self.mi_small.get_loc((99, \"A\", \"A\"))\n\n def time_small_get_loc_warm(self):\n for _ in range(1000):\n self.mi_small.get_loc((99, \"A\", \"A\"))\n\n\nclass Duplicates:\n def setup(self):\n size = 65536\n arrays = [np.random.randint(0, 8192, size), np.random.randint(0, 1024, size)]\n mask = np.random.rand(size) < 0.1\n self.mi_unused_levels = MultiIndex.from_arrays(arrays)\n self.mi_unused_levels = self.mi_unused_levels[mask]\n\n def time_remove_unused_levels(self):\n self.mi_unused_levels.remove_unused_levels()\n\n\nclass Integer:\n def setup(self):\n self.mi_int = MultiIndex.from_product(\n [np.arange(1000), np.arange(1000)], names=[\"one\", \"two\"]\n )\n self.obj_index = np.array(\n [\n (0, 10),\n (0, 11),\n (0, 12),\n (0, 13),\n (0, 14),\n (0, 15),\n (0, 16),\n (0, 17),\n (0, 18),\n (0, 19),\n ],\n dtype=object,\n )\n\n def time_get_indexer(self):\n self.mi_int.get_indexer(self.obj_index)\n\n def time_is_monotonic(self):\n self.mi_int.is_monotonic\n\n\nclass Duplicated:\n def setup(self):\n n, k = 200, 5000\n levels = [np.arange(n), tm.makeStringIndex(n).values, 1000 + np.arange(n)]\n codes = [np.random.choice(n, (k * n)) for lev in levels]\n self.mi = MultiIndex(levels=levels, codes=codes)\n\n def time_duplicated(self):\n self.mi.duplicated()\n\n\nclass Sortlevel:\n def setup(self):\n n = 1182720\n low, high = -4096, 4096\n arrs = [\n np.repeat(np.random.randint(low, high, (n // k)), k)\n for k in [11, 7, 5, 3, 1]\n ]\n self.mi_int = MultiIndex.from_arrays(arrs)[np.random.permutation(n)]\n\n a = np.repeat(np.arange(100), 1000)\n b = np.tile(np.arange(1000), 100)\n self.mi = MultiIndex.from_arrays([a, b])\n self.mi = self.mi.take(np.random.permutation(np.arange(100000)))\n\n def time_sortlevel_int64(self):\n self.mi_int.sortlevel()\n\n def time_sortlevel_zero(self):\n self.mi.sortlevel(0)\n\n def time_sortlevel_one(self):\n self.mi.sortlevel(1)\n\n\nclass Values:\n def setup_cache(self):\n\n level1 = range(1000)\n level2 = date_range(start=\"1/1/2012\", periods=100)\n mi = MultiIndex.from_product([level1, level2])\n return mi\n\n def time_datetime_level_values_copy(self, mi):\n mi.copy().values\n\n def time_datetime_level_values_sliced(self, mi):\n mi[:10].values\n\n\nclass CategoricalLevel:\n def setup(self):\n\n self.df = DataFrame(\n {\n \"a\": np.arange(1_000_000, dtype=np.int32),\n \"b\": np.arange(1_000_000, dtype=np.int64),\n \"c\": np.arange(1_000_000, dtype=float),\n }\n ).astype({\"a\": \"category\", \"b\": \"category\"})\n\n def time_categorical_level(self):\n self.df.set_index([\"a\", \"b\"])\n\n\nclass Equals:\n def setup(self):\n idx_large_fast = RangeIndex(100000)\n idx_small_slow = date_range(start=\"1/1/2012\", periods=1)\n self.mi_large_slow = MultiIndex.from_product([idx_large_fast, idx_small_slow])\n\n self.idx_non_object = RangeIndex(1)\n\n def time_equals_non_object_index(self):\n self.mi_large_slow.equals(self.idx_non_object)\n\n\nclass SetOperations:\n\n params = [\n (\"monotonic\", \"non_monotonic\"),\n (\"datetime\", \"int\", \"string\"),\n (\"intersection\", \"union\", \"symmetric_difference\"),\n ]\n param_names = [\"index_structure\", \"dtype\", \"method\"]\n\n def setup(self, index_structure, dtype, method):\n N = 10 ** 5\n level1 = range(1000)\n\n level2 = date_range(start=\"1/1/2000\", periods=N // 1000)\n dates_left = MultiIndex.from_product([level1, level2])\n\n level2 = range(N // 1000)\n int_left = MultiIndex.from_product([level1, level2])\n\n level2 = tm.makeStringIndex(N // 1000).values\n str_left = MultiIndex.from_product([level1, level2])\n\n data = {\n \"datetime\": dates_left,\n \"int\": int_left,\n \"string\": str_left,\n }\n\n if index_structure == \"non_monotonic\":\n data = {k: mi[::-1] for k, mi in data.items()}\n\n data = {k: {\"left\": mi, \"right\": mi[:-1]} for k, mi in data.items()}\n self.left = data[dtype][\"left\"]\n self.right = data[dtype][\"right\"]\n\n def time_operation(self, index_structure, dtype, method):\n getattr(self.left, method)(self.right)\n\n\nfrom .pandas_vb_common import setup # noqa: F401 isort:skip\n", "import numpy as np\nimport pytest\n\nfrom pandas._libs import groupby, lib, reduction as libreduction\n\nfrom pandas.core.dtypes.common import ensure_int64\n\nfrom pandas import Index, Series, isna\nimport pandas._testing as tm\n\n\ndef test_series_grouper():\n obj = Series(np.random.randn(10))\n dummy = obj.iloc[:0]\n\n labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)\n\n grouper = libreduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)\n result, counts = grouper.get_result()\n\n expected = np.array([obj[3:6].mean(), obj[6:].mean()])\n tm.assert_almost_equal(result, expected)\n\n exp_counts = np.array([3, 4], dtype=np.int64)\n tm.assert_almost_equal(counts, exp_counts)\n\n\ndef test_series_grouper_requires_nonempty_raises():\n # GH#29500\n obj = Series(np.random.randn(10))\n dummy = obj.iloc[:0]\n labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)\n\n with pytest.raises(ValueError, match=\"SeriesGrouper requires non-empty `series`\"):\n libreduction.SeriesGrouper(dummy, np.mean, labels, 2, dummy)\n\n\ndef test_series_bin_grouper():\n obj = Series(np.random.randn(10))\n dummy = obj[:0]\n\n bins = np.array([3, 6])\n\n grouper = libreduction.SeriesBinGrouper(obj, np.mean, bins, dummy)\n result, counts = grouper.get_result()\n\n expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])\n tm.assert_almost_equal(result, expected)\n\n exp_counts = np.array([3, 3, 4], dtype=np.int64)\n tm.assert_almost_equal(counts, exp_counts)\n\n\[email protected](\n \"binner,closed,expected\",\n [\n (\n np.array([0, 3, 6, 9], dtype=np.int64),\n \"left\",\n np.array([2, 5, 6], dtype=np.int64),\n ),\n (\n np.array([0, 3, 6, 9], dtype=np.int64),\n \"right\",\n np.array([3, 6, 6], dtype=np.int64),\n ),\n (np.array([0, 3, 6], dtype=np.int64), \"left\", np.array([2, 5], dtype=np.int64)),\n (\n np.array([0, 3, 6], dtype=np.int64),\n \"right\",\n np.array([3, 6], dtype=np.int64),\n ),\n ],\n)\ndef test_generate_bins(binner, closed, expected):\n values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)\n result = lib.generate_bins_dt64(values, binner, closed=closed)\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_group_ohlc():\n def _check(dtype):\n obj = np.array(np.random.randn(20), dtype=dtype)\n\n bins = np.array([6, 12, 20])\n out = np.zeros((3, 4), dtype)\n counts = np.zeros(len(out), dtype=np.int64)\n labels = ensure_int64(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))\n\n func = getattr(groupby, f\"group_ohlc_{dtype}\")\n func(out, counts, obj[:, None], labels)\n\n def _ohlc(group):\n if isna(group).all():\n return np.repeat(np.nan, 4)\n return [group[0], group.max(), group.min(), group[-1]]\n\n expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])\n\n tm.assert_almost_equal(out, expected)\n tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))\n\n obj[:6] = np.nan\n func(out, counts, obj[:, None], labels)\n expected[0] = np.nan\n tm.assert_almost_equal(out, expected)\n\n _check(\"float32\")\n _check(\"float64\")\n\n\nclass TestMoments:\n pass\n\n\nclass TestReducer:\n def test_int_index(self):\n arr = np.random.randn(100, 4)\n\n msg = \"Must pass either dummy and labels, or neither\"\n # we must pass either both labels and dummy, or neither\n with pytest.raises(ValueError, match=msg):\n libreduction.compute_reduction(arr, np.sum, labels=Index(np.arange(4)))\n\n with pytest.raises(ValueError, match=msg):\n libreduction.compute_reduction(\n arr, np.sum, axis=1, labels=Index(np.arange(100))\n )\n\n dummy = Series(0.0, index=np.arange(100))\n result = libreduction.compute_reduction(\n arr, np.sum, dummy=dummy, labels=Index(np.arange(4))\n )\n expected = arr.sum(0)\n tm.assert_almost_equal(result, expected)\n\n dummy = Series(0.0, index=np.arange(4))\n result = libreduction.compute_reduction(\n arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))\n )\n expected = arr.sum(1)\n tm.assert_almost_equal(result, expected)\n\n result = libreduction.compute_reduction(\n arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))\n )\n tm.assert_almost_equal(result, expected)\n", "# coding: utf-8\n\n\"\"\" Test cases for DataFrame.plot \"\"\"\n\nfrom datetime import date, datetime\nimport itertools\nimport string\nimport warnings\n\nimport numpy as np\nfrom numpy.random import rand, randn\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.api import is_list_like\n\nimport pandas as pd\nfrom pandas import DataFrame, MultiIndex, PeriodIndex, Series, bdate_range, date_range\nimport pandas._testing as tm\nfrom pandas.core.arrays import integer_array\nfrom pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n\nfrom pandas.io.formats.printing import pprint_thing\nimport pandas.plotting as plotting\n\n\[email protected]_if_no_mpl\nclass TestDataFramePlots(TestPlotBase):\n def setup_method(self, method):\n TestPlotBase.setup_method(self, method)\n import matplotlib as mpl\n\n mpl.rcdefaults()\n\n self.tdf = tm.makeTimeDataFrame()\n self.hexbin_df = DataFrame(\n {\n \"A\": np.random.uniform(size=20),\n \"B\": np.random.uniform(size=20),\n \"C\": np.arange(20) + np.random.uniform(size=20),\n }\n )\n\n def _assert_ytickslabels_visibility(self, axes, expected):\n for ax, exp in zip(axes, expected):\n self._check_visible(ax.get_yticklabels(), visible=exp)\n\n def _assert_xtickslabels_visibility(self, axes, expected):\n for ax, exp in zip(axes, expected):\n self._check_visible(ax.get_xticklabels(), visible=exp)\n\n @pytest.mark.slow\n def test_plot(self):\n from pandas.plotting._matplotlib.compat import _mpl_ge_3_1_0\n\n df = self.tdf\n _check_plot_works(df.plot, grid=False)\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot, subplots=True)\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot, subplots=True, layout=(-1, 2))\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot, subplots=True, use_index=False)\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n df = DataFrame({\"x\": [1, 2], \"y\": [3, 4]})\n if _mpl_ge_3_1_0():\n msg = \"'Line2D' object has no property 'blarg'\"\n else:\n msg = \"Unknown property blarg\"\n with pytest.raises(AttributeError, match=msg):\n df.plot.line(blarg=True)\n\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n\n _check_plot_works(df.plot, use_index=True)\n _check_plot_works(df.plot, sort_columns=False)\n _check_plot_works(df.plot, yticks=[1, 5, 10])\n _check_plot_works(df.plot, xticks=[1, 5, 10])\n _check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))\n\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.plot, subplots=True, title=\"blah\")\n\n # We have to redo it here because _check_plot_works does two plots,\n # once without an ax kwarg and once with an ax kwarg and the new sharex\n # behaviour does not remove the visibility of the latter axis (as ax is\n # present). see: https://github.com/pandas-dev/pandas/issues/9737\n\n axes = df.plot(subplots=True, title=\"blah\")\n self._check_axes_shape(axes, axes_num=3, layout=(3, 1))\n # axes[0].figure.savefig(\"test.png\")\n for ax in axes[:2]:\n self._check_visible(ax.xaxis) # xaxis must be visible for grid\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n self._check_visible([ax.xaxis.get_label()], visible=False)\n for ax in [axes[2]]:\n self._check_visible(ax.xaxis)\n self._check_visible(ax.get_xticklabels())\n self._check_visible([ax.xaxis.get_label()])\n self._check_ticks_props(ax, xrot=0)\n\n _check_plot_works(df.plot, title=\"blah\")\n\n tuples = zip(string.ascii_letters[:10], range(10))\n df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))\n _check_plot_works(df.plot, use_index=True)\n\n # unicode\n index = MultiIndex.from_tuples(\n [\n (\"\\u03b1\", 0),\n (\"\\u03b1\", 1),\n (\"\\u03b2\", 2),\n (\"\\u03b2\", 3),\n (\"\\u03b3\", 4),\n (\"\\u03b3\", 5),\n (\"\\u03b4\", 6),\n (\"\\u03b4\", 7),\n ],\n names=[\"i0\", \"i1\"],\n )\n columns = MultiIndex.from_tuples(\n [(\"bar\", \"\\u0394\"), (\"bar\", \"\\u0395\")], names=[\"c0\", \"c1\"]\n )\n df = DataFrame(np.random.randint(0, 10, (8, 2)), columns=columns, index=index)\n _check_plot_works(df.plot, title=\"\\u03A3\")\n\n # GH 6951\n # Test with single column\n df = DataFrame({\"x\": np.random.rand(10)})\n axes = _check_plot_works(df.plot.bar, subplots=True)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n # When ax is supplied and required number of axes is 1,\n # passed ax should be used:\n fig, ax = self.plt.subplots()\n axes = df.plot.bar(subplots=True, ax=ax)\n assert len(axes) == 1\n result = ax.axes\n assert result is axes[0]\n\n def test_integer_array_plot(self):\n # GH 25587\n arr = integer_array([1, 2, 3, 4], dtype=\"UInt32\")\n\n s = Series(arr)\n _check_plot_works(s.plot.line)\n _check_plot_works(s.plot.bar)\n _check_plot_works(s.plot.hist)\n _check_plot_works(s.plot.pie)\n\n df = DataFrame({\"x\": arr, \"y\": arr})\n _check_plot_works(df.plot.line)\n _check_plot_works(df.plot.bar)\n _check_plot_works(df.plot.hist)\n _check_plot_works(df.plot.pie, y=\"y\")\n _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\")\n _check_plot_works(df.plot.hexbin, x=\"x\", y=\"y\")\n\n def test_mpl2_color_cycle_str(self):\n # GH 15516\n colors = [\"C\" + str(x) for x in range(10)]\n df = DataFrame(randn(10, 3), columns=[\"a\", \"b\", \"c\"])\n for c in colors:\n _check_plot_works(df.plot, color=c)\n\n def test_color_single_series_list(self):\n # GH 3486\n df = DataFrame({\"A\": [1, 2, 3]})\n _check_plot_works(df.plot, color=[\"red\"])\n\n def test_rgb_tuple_color(self):\n # GH 16695\n df = DataFrame({\"x\": [1, 2], \"y\": [3, 4]})\n _check_plot_works(df.plot, x=\"x\", y=\"y\", color=(1, 0, 0))\n _check_plot_works(df.plot, x=\"x\", y=\"y\", color=(1, 0, 0, 0.5))\n\n def test_color_empty_string(self):\n df = DataFrame(randn(10, 2))\n with pytest.raises(ValueError):\n df.plot(color=\"\")\n\n def test_color_and_style_arguments(self):\n df = DataFrame({\"x\": [1, 2], \"y\": [3, 4]})\n # passing both 'color' and 'style' arguments should be allowed\n # if there is no color symbol in the style strings:\n ax = df.plot(color=[\"red\", \"black\"], style=[\"-\", \"--\"])\n # check that the linestyles are correctly set:\n linestyle = [line.get_linestyle() for line in ax.lines]\n assert linestyle == [\"-\", \"--\"]\n # check that the colors are correctly set:\n color = [line.get_color() for line in ax.lines]\n assert color == [\"red\", \"black\"]\n # passing both 'color' and 'style' arguments should not be allowed\n # if there is a color symbol in the style strings:\n with pytest.raises(ValueError):\n df.plot(color=[\"red\", \"black\"], style=[\"k-\", \"r--\"])\n\n def test_nonnumeric_exclude(self):\n df = DataFrame({\"A\": [\"x\", \"y\", \"z\"], \"B\": [1, 2, 3]})\n ax = df.plot()\n assert len(ax.get_lines()) == 1 # B was plotted\n\n @pytest.mark.slow\n def test_implicit_label(self):\n df = DataFrame(randn(10, 3), columns=[\"a\", \"b\", \"c\"])\n ax = df.plot(x=\"a\", y=\"b\")\n self._check_text_labels(ax.xaxis.get_label(), \"a\")\n\n @pytest.mark.slow\n def test_donot_overwrite_index_name(self):\n # GH 8494\n df = DataFrame(randn(2, 2), columns=[\"a\", \"b\"])\n df.index.name = \"NAME\"\n df.plot(y=\"b\", label=\"LABEL\")\n assert df.index.name == \"NAME\"\n\n @pytest.mark.slow\n def test_plot_xy(self):\n # columns.inferred_type == 'string'\n df = self.tdf\n self._check_data(df.plot(x=0, y=1), df.set_index(\"A\")[\"B\"].plot())\n self._check_data(df.plot(x=0), df.set_index(\"A\").plot())\n self._check_data(df.plot(y=0), df.B.plot())\n self._check_data(df.plot(x=\"A\", y=\"B\"), df.set_index(\"A\").B.plot())\n self._check_data(df.plot(x=\"A\"), df.set_index(\"A\").plot())\n self._check_data(df.plot(y=\"B\"), df.B.plot())\n\n # columns.inferred_type == 'integer'\n df.columns = np.arange(1, len(df.columns) + 1)\n self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())\n self._check_data(df.plot(x=1), df.set_index(1).plot())\n self._check_data(df.plot(y=1), df[1].plot())\n\n # figsize and title\n ax = df.plot(x=1, y=2, title=\"Test\", figsize=(16, 8))\n self._check_text_labels(ax.title, \"Test\")\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0))\n\n # columns.inferred_type == 'mixed'\n # TODO add MultiIndex test\n\n @pytest.mark.slow\n @pytest.mark.parametrize(\n \"input_log, expected_log\", [(True, \"log\"), (\"sym\", \"symlog\")]\n )\n def test_logscales(self, input_log, expected_log):\n df = DataFrame({\"a\": np.arange(100)}, index=np.arange(100))\n\n ax = df.plot(logy=input_log)\n self._check_ax_scales(ax, yaxis=expected_log)\n assert ax.get_yscale() == expected_log\n\n ax = df.plot(logx=input_log)\n self._check_ax_scales(ax, xaxis=expected_log)\n assert ax.get_xscale() == expected_log\n\n ax = df.plot(loglog=input_log)\n self._check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log)\n assert ax.get_xscale() == expected_log\n assert ax.get_yscale() == expected_log\n\n @pytest.mark.parametrize(\"input_param\", [\"logx\", \"logy\", \"loglog\"])\n def test_invalid_logscale(self, input_param):\n # GH: 24867\n df = DataFrame({\"a\": np.arange(100)}, index=np.arange(100))\n\n msg = \"Boolean, None and 'sym' are valid options, 'sm' is given.\"\n with pytest.raises(ValueError, match=msg):\n df.plot(**{input_param: \"sm\"})\n\n @pytest.mark.slow\n def test_xcompat(self):\n import pandas as pd\n\n df = self.tdf\n ax = df.plot(x_compat=True)\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n\n tm.close()\n pd.plotting.plot_params[\"xaxis.compat\"] = True\n ax = df.plot()\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n\n tm.close()\n pd.plotting.plot_params[\"x_compat\"] = False\n\n ax = df.plot()\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)\n\n tm.close()\n # useful if you're plotting a bunch together\n with pd.plotting.plot_params.use(\"x_compat\", True):\n ax = df.plot()\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n\n tm.close()\n ax = df.plot()\n lines = ax.get_lines()\n assert not isinstance(lines[0].get_xdata(), PeriodIndex)\n assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)\n\n def test_period_compat(self):\n # GH 9012\n # period-array conversions\n df = DataFrame(\n np.random.rand(21, 2),\n index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),\n columns=[\"a\", \"b\"],\n )\n\n df.plot()\n self.plt.axhline(y=0)\n tm.close()\n\n def test_unsorted_index(self):\n df = DataFrame(\n {\"y\": np.arange(100)}, index=np.arange(99, -1, -1), dtype=np.int64\n )\n ax = df.plot()\n lines = ax.get_lines()[0]\n rs = lines.get_xydata()\n rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name=\"y\")\n tm.assert_series_equal(rs, df.y, check_index_type=False)\n tm.close()\n\n df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)\n ax = df.plot()\n lines = ax.get_lines()[0]\n rs = lines.get_xydata()\n rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name=\"y\")\n tm.assert_series_equal(rs, df.y)\n\n def test_unsorted_index_lims(self):\n df = DataFrame({\"y\": [0.0, 1.0, 2.0, 3.0]}, index=[1.0, 0.0, 3.0, 2.0])\n ax = df.plot()\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= np.nanmin(lines[0].get_data()[0])\n assert xmax >= np.nanmax(lines[0].get_data()[0])\n\n df = DataFrame(\n {\"y\": [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0]},\n index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],\n )\n ax = df.plot()\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= np.nanmin(lines[0].get_data()[0])\n assert xmax >= np.nanmax(lines[0].get_data()[0])\n\n df = DataFrame({\"y\": [0.0, 1.0, 2.0, 3.0], \"z\": [91.0, 90.0, 93.0, 92.0]})\n ax = df.plot(x=\"z\", y=\"y\")\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= np.nanmin(lines[0].get_data()[0])\n assert xmax >= np.nanmax(lines[0].get_data()[0])\n\n @pytest.mark.slow\n def test_subplots(self):\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n\n for kind in [\"bar\", \"barh\", \"line\", \"area\"]:\n axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)\n self._check_axes_shape(axes, axes_num=3, layout=(3, 1))\n assert axes.shape == (3,)\n\n for ax, column in zip(axes, df.columns):\n self._check_legend_labels(ax, labels=[pprint_thing(column)])\n\n for ax in axes[:-2]:\n self._check_visible(ax.xaxis) # xaxis must be visible for grid\n self._check_visible(ax.get_xticklabels(), visible=False)\n if not (kind == \"bar\" and self.mpl_ge_3_1_0):\n # change https://github.com/pandas-dev/pandas/issues/26714\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n self._check_visible(ax.xaxis.get_label(), visible=False)\n self._check_visible(ax.get_yticklabels())\n\n self._check_visible(axes[-1].xaxis)\n self._check_visible(axes[-1].get_xticklabels())\n self._check_visible(axes[-1].get_xticklabels(minor=True))\n self._check_visible(axes[-1].xaxis.get_label())\n self._check_visible(axes[-1].get_yticklabels())\n\n axes = df.plot(kind=kind, subplots=True, sharex=False)\n for ax in axes:\n self._check_visible(ax.xaxis)\n self._check_visible(ax.get_xticklabels())\n self._check_visible(ax.get_xticklabels(minor=True))\n self._check_visible(ax.xaxis.get_label())\n self._check_visible(ax.get_yticklabels())\n\n axes = df.plot(kind=kind, subplots=True, legend=False)\n for ax in axes:\n assert ax.get_legend() is None\n\n def test_groupby_boxplot_sharey(self):\n # https://github.com/pandas-dev/pandas/issues/20968\n # sharey can now be switched check whether the right\n # pair of axes is turned on or off\n\n df = DataFrame(\n {\n \"a\": [-1.43, -0.15, -3.70, -1.43, -0.14],\n \"b\": [0.56, 0.84, 0.29, 0.56, 0.85],\n \"c\": [0, 1, 2, 3, 1],\n },\n index=[0, 1, 2, 3, 4],\n )\n\n # behavior without keyword\n axes = df.groupby(\"c\").boxplot()\n expected = [True, False, True, False]\n self._assert_ytickslabels_visibility(axes, expected)\n\n # set sharey=True should be identical\n axes = df.groupby(\"c\").boxplot(sharey=True)\n expected = [True, False, True, False]\n self._assert_ytickslabels_visibility(axes, expected)\n\n # sharey=False, all yticklabels should be visible\n axes = df.groupby(\"c\").boxplot(sharey=False)\n expected = [True, True, True, True]\n self._assert_ytickslabels_visibility(axes, expected)\n\n def test_groupby_boxplot_sharex(self):\n # https://github.com/pandas-dev/pandas/issues/20968\n # sharex can now be switched check whether the right\n # pair of axes is turned on or off\n\n df = DataFrame(\n {\n \"a\": [-1.43, -0.15, -3.70, -1.43, -0.14],\n \"b\": [0.56, 0.84, 0.29, 0.56, 0.85],\n \"c\": [0, 1, 2, 3, 1],\n },\n index=[0, 1, 2, 3, 4],\n )\n\n # behavior without keyword\n axes = df.groupby(\"c\").boxplot()\n expected = [True, True, True, True]\n self._assert_xtickslabels_visibility(axes, expected)\n\n # set sharex=False should be identical\n axes = df.groupby(\"c\").boxplot(sharex=False)\n expected = [True, True, True, True]\n self._assert_xtickslabels_visibility(axes, expected)\n\n # sharex=True, yticklabels should be visible\n # only for bottom plots\n axes = df.groupby(\"c\").boxplot(sharex=True)\n expected = [False, False, True, True]\n self._assert_xtickslabels_visibility(axes, expected)\n\n @pytest.mark.slow\n def test_subplots_timeseries(self):\n idx = date_range(start=\"2014-07-01\", freq=\"M\", periods=10)\n df = DataFrame(np.random.rand(10, 3), index=idx)\n\n for kind in [\"line\", \"area\"]:\n axes = df.plot(kind=kind, subplots=True, sharex=True)\n self._check_axes_shape(axes, axes_num=3, layout=(3, 1))\n\n for ax in axes[:-2]:\n # GH 7801\n self._check_visible(ax.xaxis) # xaxis must be visible for grid\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n self._check_visible(ax.xaxis.get_label(), visible=False)\n self._check_visible(ax.get_yticklabels())\n\n self._check_visible(axes[-1].xaxis)\n self._check_visible(axes[-1].get_xticklabels())\n self._check_visible(axes[-1].get_xticklabels(minor=True))\n self._check_visible(axes[-1].xaxis.get_label())\n self._check_visible(axes[-1].get_yticklabels())\n self._check_ticks_props(axes, xrot=0)\n\n axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)\n for ax in axes:\n self._check_visible(ax.xaxis)\n self._check_visible(ax.get_xticklabels())\n self._check_visible(ax.get_xticklabels(minor=True))\n self._check_visible(ax.xaxis.get_label())\n self._check_visible(ax.get_yticklabels())\n self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)\n\n def test_subplots_timeseries_y_axis(self):\n # GH16953\n data = {\n \"numeric\": np.array([1, 2, 5]),\n \"timedelta\": [\n pd.Timedelta(-10, unit=\"s\"),\n pd.Timedelta(10, unit=\"m\"),\n pd.Timedelta(10, unit=\"h\"),\n ],\n \"datetime_no_tz\": [\n pd.to_datetime(\"2017-08-01 00:00:00\"),\n pd.to_datetime(\"2017-08-01 02:00:00\"),\n pd.to_datetime(\"2017-08-02 00:00:00\"),\n ],\n \"datetime_all_tz\": [\n pd.to_datetime(\"2017-08-01 00:00:00\", utc=True),\n pd.to_datetime(\"2017-08-01 02:00:00\", utc=True),\n pd.to_datetime(\"2017-08-02 00:00:00\", utc=True),\n ],\n \"text\": [\"This\", \"should\", \"fail\"],\n }\n testdata = DataFrame(data)\n\n ax_numeric = testdata.plot(y=\"numeric\")\n assert (\n ax_numeric.get_lines()[0].get_data()[1] == testdata[\"numeric\"].values\n ).all()\n ax_timedelta = testdata.plot(y=\"timedelta\")\n assert (\n ax_timedelta.get_lines()[0].get_data()[1] == testdata[\"timedelta\"].values\n ).all()\n ax_datetime_no_tz = testdata.plot(y=\"datetime_no_tz\")\n assert (\n ax_datetime_no_tz.get_lines()[0].get_data()[1]\n == testdata[\"datetime_no_tz\"].values\n ).all()\n ax_datetime_all_tz = testdata.plot(y=\"datetime_all_tz\")\n assert (\n ax_datetime_all_tz.get_lines()[0].get_data()[1]\n == testdata[\"datetime_all_tz\"].values\n ).all()\n\n msg = \"no numeric data to plot\"\n with pytest.raises(TypeError, match=msg):\n testdata.plot(y=\"text\")\n\n @pytest.mark.xfail(reason=\"not support for period, categorical, datetime_mixed_tz\")\n def test_subplots_timeseries_y_axis_not_supported(self):\n \"\"\"\n This test will fail for:\n period:\n since period isn't yet implemented in ``select_dtypes``\n and because it will need a custom value converter +\n tick formatter (as was done for x-axis plots)\n\n categorical:\n because it will need a custom value converter +\n tick formatter (also doesn't work for x-axis, as of now)\n\n datetime_mixed_tz:\n because of the way how pandas handles ``Series`` of\n ``datetime`` objects with different timezone,\n generally converting ``datetime`` objects in a tz-aware\n form could help with this problem\n \"\"\"\n data = {\n \"numeric\": np.array([1, 2, 5]),\n \"period\": [\n pd.Period(\"2017-08-01 00:00:00\", freq=\"H\"),\n pd.Period(\"2017-08-01 02:00\", freq=\"H\"),\n pd.Period(\"2017-08-02 00:00:00\", freq=\"H\"),\n ],\n \"categorical\": pd.Categorical(\n [\"c\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"], ordered=False\n ),\n \"datetime_mixed_tz\": [\n pd.to_datetime(\"2017-08-01 00:00:00\", utc=True),\n pd.to_datetime(\"2017-08-01 02:00:00\"),\n pd.to_datetime(\"2017-08-02 00:00:00\"),\n ],\n }\n testdata = pd.DataFrame(data)\n ax_period = testdata.plot(x=\"numeric\", y=\"period\")\n assert (\n ax_period.get_lines()[0].get_data()[1] == testdata[\"period\"].values\n ).all()\n ax_categorical = testdata.plot(x=\"numeric\", y=\"categorical\")\n assert (\n ax_categorical.get_lines()[0].get_data()[1]\n == testdata[\"categorical\"].values\n ).all()\n ax_datetime_mixed_tz = testdata.plot(x=\"numeric\", y=\"datetime_mixed_tz\")\n assert (\n ax_datetime_mixed_tz.get_lines()[0].get_data()[1]\n == testdata[\"datetime_mixed_tz\"].values\n ).all()\n\n @pytest.mark.slow\n def test_subplots_layout(self):\n # GH 6667\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n\n axes = df.plot(subplots=True, layout=(2, 2))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n assert axes.shape == (2, 2)\n\n axes = df.plot(subplots=True, layout=(-1, 2))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n assert axes.shape == (2, 2)\n\n axes = df.plot(subplots=True, layout=(2, -1))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n assert axes.shape == (2, 2)\n\n axes = df.plot(subplots=True, layout=(1, 4))\n self._check_axes_shape(axes, axes_num=3, layout=(1, 4))\n assert axes.shape == (1, 4)\n\n axes = df.plot(subplots=True, layout=(-1, 4))\n self._check_axes_shape(axes, axes_num=3, layout=(1, 4))\n assert axes.shape == (1, 4)\n\n axes = df.plot(subplots=True, layout=(4, -1))\n self._check_axes_shape(axes, axes_num=3, layout=(4, 1))\n assert axes.shape == (4, 1)\n\n with pytest.raises(ValueError):\n df.plot(subplots=True, layout=(1, 1))\n with pytest.raises(ValueError):\n df.plot(subplots=True, layout=(-1, -1))\n\n # single column\n df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))\n axes = df.plot(subplots=True)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n assert axes.shape == (1,)\n\n axes = df.plot(subplots=True, layout=(3, 3))\n self._check_axes_shape(axes, axes_num=1, layout=(3, 3))\n assert axes.shape == (3, 3)\n\n @pytest.mark.slow\n def test_subplots_warnings(self):\n # GH 9464\n with tm.assert_produces_warning(None):\n df = DataFrame(np.random.randn(100, 4))\n df.plot(subplots=True, layout=(3, 2))\n\n df = DataFrame(\n np.random.randn(100, 4), index=date_range(\"1/1/2000\", periods=100)\n )\n df.plot(subplots=True, layout=(3, 2))\n\n @pytest.mark.slow\n def test_subplots_multiple_axes(self):\n # GH 5353, 6970, GH 7069\n fig, axes = self.plt.subplots(2, 3)\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n\n returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n assert returned.shape == (3,)\n assert returned[0].figure is fig\n # draw on second row\n returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n assert returned.shape == (3,)\n assert returned[0].figure is fig\n self._check_axes_shape(axes, axes_num=6, layout=(2, 3))\n tm.close()\n\n with pytest.raises(ValueError):\n fig, axes = self.plt.subplots(2, 3)\n # pass different number of axes from required\n df.plot(subplots=True, ax=axes)\n\n # pass 2-dim axes and invalid layout\n # invalid lauout should not affect to input and return value\n # (show warning is tested in\n # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes\n fig, axes = self.plt.subplots(2, 2)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))\n\n returned = df.plot(\n subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False\n )\n self._check_axes_shape(returned, axes_num=4, layout=(2, 2))\n assert returned.shape == (4,)\n\n returned = df.plot(\n subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False\n )\n self._check_axes_shape(returned, axes_num=4, layout=(2, 2))\n assert returned.shape == (4,)\n\n returned = df.plot(\n subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False\n )\n self._check_axes_shape(returned, axes_num=4, layout=(2, 2))\n assert returned.shape == (4,)\n\n # single column\n fig, axes = self.plt.subplots(1, 1)\n df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))\n\n axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n assert axes.shape == (1,)\n\n def test_subplots_ts_share_axes(self):\n # GH 3964\n fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)\n self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)\n df = DataFrame(\n np.random.randn(10, 9),\n index=date_range(start=\"2014-07-01\", freq=\"M\", periods=10),\n )\n for i, ax in enumerate(axes.ravel()):\n df[i].plot(ax=ax, fontsize=5)\n\n # Rows other than bottom should not be visible\n for ax in axes[0:-1].ravel():\n self._check_visible(ax.get_xticklabels(), visible=False)\n\n # Bottom row should be visible\n for ax in axes[-1].ravel():\n self._check_visible(ax.get_xticklabels(), visible=True)\n\n # First column should be visible\n for ax in axes[[0, 1, 2], [0]].ravel():\n self._check_visible(ax.get_yticklabels(), visible=True)\n\n # Other columns should not be visible\n for ax in axes[[0, 1, 2], [1]].ravel():\n self._check_visible(ax.get_yticklabels(), visible=False)\n for ax in axes[[0, 1, 2], [2]].ravel():\n self._check_visible(ax.get_yticklabels(), visible=False)\n\n def test_subplots_sharex_axes_existing_axes(self):\n # GH 9158\n d = {\"A\": [1.0, 2.0, 3.0, 4.0], \"B\": [4.0, 3.0, 2.0, 1.0], \"C\": [5, 1, 3, 4]}\n df = DataFrame(d, index=date_range(\"2014 10 11\", \"2014 10 14\"))\n\n axes = df[[\"A\", \"B\"]].plot(subplots=True)\n df[\"C\"].plot(ax=axes[0], secondary_y=True)\n\n self._check_visible(axes[0].get_xticklabels(), visible=False)\n self._check_visible(axes[1].get_xticklabels(), visible=True)\n for ax in axes.ravel():\n self._check_visible(ax.get_yticklabels(), visible=True)\n\n @pytest.mark.slow\n def test_subplots_dup_columns(self):\n # GH 10962\n df = DataFrame(np.random.rand(5, 5), columns=list(\"aaaaa\"))\n axes = df.plot(subplots=True)\n for ax in axes:\n self._check_legend_labels(ax, labels=[\"a\"])\n assert len(ax.lines) == 1\n tm.close()\n\n axes = df.plot(subplots=True, secondary_y=\"a\")\n for ax in axes:\n # (right) is only attached when subplots=False\n self._check_legend_labels(ax, labels=[\"a\"])\n assert len(ax.lines) == 1\n tm.close()\n\n ax = df.plot(secondary_y=\"a\")\n self._check_legend_labels(ax, labels=[\"a (right)\"] * 5)\n assert len(ax.lines) == 0\n assert len(ax.right_ax.lines) == 5\n\n def test_negative_log(self):\n df = -DataFrame(\n rand(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"x\", \"y\", \"z\", \"four\"],\n )\n\n with pytest.raises(ValueError):\n df.plot.area(logy=True)\n with pytest.raises(ValueError):\n df.plot.area(loglog=True)\n\n def _compare_stacked_y_cood(self, normal_lines, stacked_lines):\n base = np.zeros(len(normal_lines[0].get_data()[1]))\n for nl, sl in zip(normal_lines, stacked_lines):\n base += nl.get_data()[1] # get y coordinates\n sy = sl.get_data()[1]\n tm.assert_numpy_array_equal(base, sy)\n\n def test_line_area_stacked(self):\n with tm.RNGContext(42):\n df = DataFrame(rand(6, 4), columns=[\"w\", \"x\", \"y\", \"z\"])\n neg_df = -df\n # each column has either positive or negative value\n sep_df = DataFrame(\n {\"w\": rand(6), \"x\": rand(6), \"y\": -rand(6), \"z\": -rand(6)}\n )\n # each column has positive-negative mixed value\n mixed_df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"w\", \"x\", \"y\", \"z\"],\n )\n\n for kind in [\"line\", \"area\"]:\n ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)\n ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)\n self._compare_stacked_y_cood(ax1.lines, ax2.lines)\n\n ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)\n ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)\n self._compare_stacked_y_cood(ax1.lines, ax2.lines)\n\n ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)\n ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)\n self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])\n self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])\n\n _check_plot_works(mixed_df.plot, stacked=False)\n with pytest.raises(ValueError):\n mixed_df.plot(stacked=True)\n\n # Use an index with strictly positive values, preventing\n # matplotlib from warning about ignoring xlim\n df2 = df.set_index(df.index + 1)\n _check_plot_works(df2.plot, kind=kind, logx=True, stacked=True)\n\n def test_line_area_nan_df(self):\n values1 = [1, 2, np.nan, 3]\n values2 = [3, np.nan, 2, 1]\n df = DataFrame({\"a\": values1, \"b\": values2})\n tdf = DataFrame({\"a\": values1, \"b\": values2}, index=tm.makeDateIndex(k=4))\n\n for d in [df, tdf]:\n ax = _check_plot_works(d.plot)\n masked1 = ax.lines[0].get_ydata()\n masked2 = ax.lines[1].get_ydata()\n # remove nan for comparison purpose\n\n exp = np.array([1, 2, 3], dtype=np.float64)\n tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)\n\n exp = np.array([3, 2, 1], dtype=np.float64)\n tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)\n tm.assert_numpy_array_equal(\n masked1.mask, np.array([False, False, True, False])\n )\n tm.assert_numpy_array_equal(\n masked2.mask, np.array([False, True, False, False])\n )\n\n expected1 = np.array([1, 2, 0, 3], dtype=np.float64)\n expected2 = np.array([3, 0, 2, 1], dtype=np.float64)\n\n ax = _check_plot_works(d.plot, stacked=True)\n tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)\n tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)\n\n ax = _check_plot_works(d.plot.area)\n tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)\n tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)\n\n ax = _check_plot_works(d.plot.area, stacked=False)\n tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)\n tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)\n\n def test_line_lim(self):\n df = DataFrame(rand(6, 3), columns=[\"x\", \"y\", \"z\"])\n ax = df.plot()\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= lines[0].get_data()[0][0]\n assert xmax >= lines[0].get_data()[0][-1]\n\n ax = df.plot(secondary_y=True)\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= lines[0].get_data()[0][0]\n assert xmax >= lines[0].get_data()[0][-1]\n\n axes = df.plot(secondary_y=True, subplots=True)\n self._check_axes_shape(axes, axes_num=3, layout=(3, 1))\n for ax in axes:\n assert hasattr(ax, \"left_ax\")\n assert not hasattr(ax, \"right_ax\")\n xmin, xmax = ax.get_xlim()\n lines = ax.get_lines()\n assert xmin <= lines[0].get_data()[0][0]\n assert xmax >= lines[0].get_data()[0][-1]\n\n def test_area_lim(self):\n df = DataFrame(rand(6, 4), columns=[\"x\", \"y\", \"z\", \"four\"])\n\n neg_df = -df\n for stacked in [True, False]:\n ax = _check_plot_works(df.plot.area, stacked=stacked)\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n lines = ax.get_lines()\n assert xmin <= lines[0].get_data()[0][0]\n assert xmax >= lines[0].get_data()[0][-1]\n assert ymin == 0\n\n ax = _check_plot_works(neg_df.plot.area, stacked=stacked)\n ymin, ymax = ax.get_ylim()\n assert ymax == 0\n\n @pytest.mark.slow\n def test_bar_colors(self):\n import matplotlib.pyplot as plt\n\n default_colors = self._unpack_cycler(plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n ax = df.plot.bar()\n self._check_colors(ax.patches[::5], facecolors=default_colors[:5])\n tm.close()\n\n custom_colors = \"rgcby\"\n ax = df.plot.bar(color=custom_colors)\n self._check_colors(ax.patches[::5], facecolors=custom_colors)\n tm.close()\n\n from matplotlib import cm\n\n # Test str -> colormap functionality\n ax = df.plot.bar(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::5], facecolors=rgba_colors)\n tm.close()\n\n # Test colormap functionality\n ax = df.plot.bar(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::5], facecolors=rgba_colors)\n tm.close()\n\n ax = df.loc[:, [0]].plot.bar(color=\"DodgerBlue\")\n self._check_colors([ax.patches[0]], facecolors=[\"DodgerBlue\"])\n tm.close()\n\n ax = df.plot(kind=\"bar\", color=\"green\")\n self._check_colors(ax.patches[::5], facecolors=[\"green\"] * 5)\n tm.close()\n\n def test_bar_user_colors(self):\n df = pd.DataFrame(\n {\"A\": range(4), \"B\": range(1, 5), \"color\": [\"red\", \"blue\", \"blue\", \"red\"]}\n )\n # This should *only* work when `y` is specified, else\n # we use one color per column\n ax = df.plot.bar(y=\"A\", color=df[\"color\"])\n result = [p.get_facecolor() for p in ax.patches]\n expected = [\n (1.0, 0.0, 0.0, 1.0),\n (0.0, 0.0, 1.0, 1.0),\n (0.0, 0.0, 1.0, 1.0),\n (1.0, 0.0, 0.0, 1.0),\n ]\n assert result == expected\n\n @pytest.mark.slow\n def test_bar_linewidth(self):\n df = DataFrame(randn(5, 5))\n\n # regular\n ax = df.plot.bar(linewidth=2)\n for r in ax.patches:\n assert r.get_linewidth() == 2\n\n # stacked\n ax = df.plot.bar(stacked=True, linewidth=2)\n for r in ax.patches:\n assert r.get_linewidth() == 2\n\n # subplots\n axes = df.plot.bar(linewidth=2, subplots=True)\n self._check_axes_shape(axes, axes_num=5, layout=(5, 1))\n for ax in axes:\n for r in ax.patches:\n assert r.get_linewidth() == 2\n\n @pytest.mark.slow\n def test_bar_barwidth(self):\n df = DataFrame(randn(5, 5))\n\n width = 0.9\n\n # regular\n ax = df.plot.bar(width=width)\n for r in ax.patches:\n assert r.get_width() == width / len(df.columns)\n\n # stacked\n ax = df.plot.bar(stacked=True, width=width)\n for r in ax.patches:\n assert r.get_width() == width\n\n # horizontal regular\n ax = df.plot.barh(width=width)\n for r in ax.patches:\n assert r.get_height() == width / len(df.columns)\n\n # horizontal stacked\n ax = df.plot.barh(stacked=True, width=width)\n for r in ax.patches:\n assert r.get_height() == width\n\n # subplots\n axes = df.plot.bar(width=width, subplots=True)\n for ax in axes:\n for r in ax.patches:\n assert r.get_width() == width\n\n # horizontal subplots\n axes = df.plot.barh(width=width, subplots=True)\n for ax in axes:\n for r in ax.patches:\n assert r.get_height() == width\n\n @pytest.mark.slow\n def test_bar_barwidth_position(self):\n df = DataFrame(randn(5, 5))\n self._check_bar_alignment(\n df, kind=\"bar\", stacked=False, width=0.9, position=0.2\n )\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, width=0.9, position=0.2)\n self._check_bar_alignment(\n df, kind=\"barh\", stacked=False, width=0.9, position=0.2\n )\n self._check_bar_alignment(\n df, kind=\"barh\", stacked=True, width=0.9, position=0.2\n )\n self._check_bar_alignment(\n df, kind=\"bar\", subplots=True, width=0.9, position=0.2\n )\n self._check_bar_alignment(\n df, kind=\"barh\", subplots=True, width=0.9, position=0.2\n )\n\n @pytest.mark.slow\n def test_bar_barwidth_position_int(self):\n # GH 12979\n df = DataFrame(randn(5, 5))\n\n for w in [1, 1.0]:\n ax = df.plot.bar(stacked=True, width=w)\n ticks = ax.xaxis.get_ticklocs()\n tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))\n assert ax.get_xlim() == (-0.75, 4.75)\n # check left-edge of bars\n assert ax.patches[0].get_x() == -0.5\n assert ax.patches[-1].get_x() == 3.5\n\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, width=1)\n self._check_bar_alignment(df, kind=\"barh\", stacked=False, width=1)\n self._check_bar_alignment(df, kind=\"barh\", stacked=True, width=1)\n self._check_bar_alignment(df, kind=\"bar\", subplots=True, width=1)\n self._check_bar_alignment(df, kind=\"barh\", subplots=True, width=1)\n\n @pytest.mark.slow\n def test_bar_bottom_left(self):\n df = DataFrame(rand(5, 5))\n ax = df.plot.bar(stacked=False, bottom=1)\n result = [p.get_y() for p in ax.patches]\n assert result == [1] * 25\n\n ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])\n result = [p.get_y() for p in ax.patches[:5]]\n assert result == [-1, -2, -3, -4, -5]\n\n ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))\n result = [p.get_x() for p in ax.patches]\n assert result == [1] * 25\n\n ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])\n result = [p.get_x() for p in ax.patches[:5]]\n assert result == [1, 2, 3, 4, 5]\n\n axes = df.plot.bar(subplots=True, bottom=-1)\n for ax in axes:\n result = [p.get_y() for p in ax.patches]\n assert result == [-1] * 5\n\n axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))\n for ax in axes:\n result = [p.get_x() for p in ax.patches]\n assert result == [1] * 5\n\n @pytest.mark.slow\n def test_bar_nan(self):\n df = DataFrame({\"A\": [10, np.nan, 20], \"B\": [5, 10, 20], \"C\": [1, 2, 3]})\n ax = df.plot.bar()\n expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]\n result = [p.get_height() for p in ax.patches]\n assert result == expected\n\n ax = df.plot.bar(stacked=True)\n result = [p.get_height() for p in ax.patches]\n assert result == expected\n\n result = [p.get_y() for p in ax.patches]\n expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]\n assert result == expected\n\n @pytest.mark.slow\n def test_bar_categorical(self):\n # GH 13019\n df1 = pd.DataFrame(\n np.random.randn(6, 5),\n index=pd.Index(list(\"ABCDEF\")),\n columns=pd.Index(list(\"abcde\")),\n )\n # categorical index must behave the same\n df2 = pd.DataFrame(\n np.random.randn(6, 5),\n index=pd.CategoricalIndex(list(\"ABCDEF\")),\n columns=pd.CategoricalIndex(list(\"abcde\")),\n )\n\n for df in [df1, df2]:\n ax = df.plot.bar()\n ticks = ax.xaxis.get_ticklocs()\n tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))\n assert ax.get_xlim() == (-0.5, 5.5)\n # check left-edge of bars\n assert ax.patches[0].get_x() == -0.25\n assert ax.patches[-1].get_x() == 5.15\n\n ax = df.plot.bar(stacked=True)\n tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))\n assert ax.get_xlim() == (-0.5, 5.5)\n assert ax.patches[0].get_x() == -0.25\n assert ax.patches[-1].get_x() == 4.75\n\n @pytest.mark.slow\n def test_plot_scatter(self):\n df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"x\", \"y\", \"z\", \"four\"],\n )\n\n _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\")\n _check_plot_works(df.plot.scatter, x=1, y=2)\n\n with pytest.raises(TypeError):\n df.plot.scatter(x=\"x\")\n with pytest.raises(TypeError):\n df.plot.scatter(y=\"y\")\n\n # GH 6951\n axes = df.plot(x=\"x\", y=\"y\", kind=\"scatter\", subplots=True)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n def test_raise_error_on_datetime_time_data(self):\n # GH 8113, datetime.time type is not supported by matplotlib in scatter\n df = pd.DataFrame(np.random.randn(10), columns=[\"a\"])\n df[\"dtime\"] = pd.date_range(start=\"2014-01-01\", freq=\"h\", periods=10).time\n msg = \"must be a string or a number, not 'datetime.time'\"\n\n with pytest.raises(TypeError, match=msg):\n df.plot(kind=\"scatter\", x=\"dtime\", y=\"a\")\n\n def test_scatterplot_datetime_data(self):\n # GH 30391\n dates = pd.date_range(start=date(2019, 1, 1), periods=12, freq=\"W\")\n vals = np.random.normal(0, 1, len(dates))\n df = pd.DataFrame({\"dates\": dates, \"vals\": vals})\n\n _check_plot_works(df.plot.scatter, x=\"dates\", y=\"vals\")\n _check_plot_works(df.plot.scatter, x=0, y=1)\n\n def test_scatterplot_object_data(self):\n # GH 18755\n df = pd.DataFrame(dict(a=[\"A\", \"B\", \"C\"], b=[2, 3, 4]))\n\n _check_plot_works(df.plot.scatter, x=\"a\", y=\"b\")\n _check_plot_works(df.plot.scatter, x=0, y=1)\n\n df = pd.DataFrame(dict(a=[\"A\", \"B\", \"C\"], b=[\"a\", \"b\", \"c\"]))\n\n _check_plot_works(df.plot.scatter, x=\"a\", y=\"b\")\n _check_plot_works(df.plot.scatter, x=0, y=1)\n\n @pytest.mark.slow\n def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):\n # addressing issue #10611, to ensure colobar does not\n # interfere with x-axis label and ticklabels with\n # ipython inline backend.\n random_array = np.random.random((1000, 3))\n df = pd.DataFrame(random_array, columns=[\"A label\", \"B label\", \"C label\"])\n\n ax1 = df.plot.scatter(x=\"A label\", y=\"B label\")\n ax2 = df.plot.scatter(x=\"A label\", y=\"B label\", c=\"C label\")\n\n vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]\n vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]\n assert vis1 == vis2\n\n vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]\n vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]\n assert vis1 == vis2\n\n assert (\n ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()\n )\n\n @pytest.mark.slow\n def test_if_hexbin_xaxis_label_is_visible(self):\n # addressing issue #10678, to ensure colobar does not\n # interfere with x-axis label and ticklabels with\n # ipython inline backend.\n random_array = np.random.random((1000, 3))\n df = pd.DataFrame(random_array, columns=[\"A label\", \"B label\", \"C label\"])\n\n ax = df.plot.hexbin(\"A label\", \"B label\", gridsize=12)\n assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())\n assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())\n assert ax.xaxis.get_label().get_visible()\n\n @pytest.mark.slow\n def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):\n import matplotlib.pyplot as plt\n\n random_array = np.random.random((1000, 3))\n df = pd.DataFrame(random_array, columns=[\"A label\", \"B label\", \"C label\"])\n\n fig, axes = plt.subplots(1, 2)\n df.plot.scatter(\"A label\", \"B label\", c=\"C label\", ax=axes[0])\n df.plot.scatter(\"A label\", \"B label\", c=\"C label\", ax=axes[1])\n plt.tight_layout()\n\n points = np.array([ax.get_position().get_points() for ax in fig.axes])\n axes_x_coords = points[:, :, 0]\n parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]\n colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]\n assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()\n\n @pytest.mark.parametrize(\"x, y\", [(\"x\", \"y\"), (\"y\", \"x\"), (\"y\", \"y\")])\n @pytest.mark.slow\n def test_plot_scatter_with_categorical_data(self, x, y):\n # after fixing GH 18755, should be able to plot categorical data\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4], \"y\": pd.Categorical([\"a\", \"b\", \"a\", \"c\"])}\n )\n\n _check_plot_works(df.plot.scatter, x=x, y=y)\n\n @pytest.mark.slow\n def test_plot_scatter_with_c(self):\n df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"x\", \"y\", \"z\", \"four\"],\n )\n\n axes = [df.plot.scatter(x=\"x\", y=\"y\", c=\"z\"), df.plot.scatter(x=0, y=1, c=2)]\n for ax in axes:\n # default to Greys\n assert ax.collections[0].cmap.name == \"Greys\"\n\n # n.b. there appears to be no public method\n # to get the colorbar label\n assert ax.collections[0].colorbar._label == \"z\"\n\n cm = \"cubehelix\"\n ax = df.plot.scatter(x=\"x\", y=\"y\", c=\"z\", colormap=cm)\n assert ax.collections[0].cmap.name == cm\n\n # verify turning off colorbar works\n ax = df.plot.scatter(x=\"x\", y=\"y\", c=\"z\", colorbar=False)\n assert ax.collections[0].colorbar is None\n\n # verify that we can still plot a solid color\n ax = df.plot.scatter(x=0, y=1, c=\"red\")\n assert ax.collections[0].colorbar is None\n self._check_colors(ax.collections, facecolors=[\"r\"])\n\n # Ensure that we can pass an np.array straight through to matplotlib,\n # this functionality was accidentally removed previously.\n # See https://github.com/pandas-dev/pandas/issues/8852 for bug report\n #\n # Exercise colormap path and non-colormap path as they are independent\n #\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4]})\n red_rgba = [1.0, 0.0, 0.0, 1.0]\n green_rgba = [0.0, 1.0, 0.0, 1.0]\n rgba_array = np.array([red_rgba, green_rgba])\n ax = df.plot.scatter(x=\"A\", y=\"B\", c=rgba_array)\n # expect the face colors of the points in the non-colormap path to be\n # identical to the values we supplied, normally we'd be on shaky ground\n # comparing floats for equality but here we expect them to be\n # identical.\n tm.assert_numpy_array_equal(ax.collections[0].get_facecolor(), rgba_array)\n # we don't test the colors of the faces in this next plot because they\n # are dependent on the spring colormap, which may change its colors\n # later.\n float_array = np.array([0.0, 1.0])\n df.plot.scatter(x=\"A\", y=\"B\", c=float_array, cmap=\"spring\")\n\n def test_scatter_colors(self):\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [1, 2, 3], \"c\": [1, 2, 3]})\n with pytest.raises(TypeError):\n df.plot.scatter(x=\"a\", y=\"b\", c=\"c\", color=\"green\")\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n ax = df.plot.scatter(x=\"a\", y=\"b\", c=\"c\")\n tm.assert_numpy_array_equal(\n ax.collections[0].get_facecolor()[0],\n np.array(self.colorconverter.to_rgba(default_colors[0])),\n )\n\n ax = df.plot.scatter(x=\"a\", y=\"b\", color=\"white\")\n tm.assert_numpy_array_equal(\n ax.collections[0].get_facecolor()[0],\n np.array([1, 1, 1, 1], dtype=np.float64),\n )\n\n @pytest.mark.slow\n def test_plot_bar(self):\n df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"one\", \"two\", \"three\", \"four\"],\n )\n\n _check_plot_works(df.plot.bar)\n _check_plot_works(df.plot.bar, legend=False)\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.plot.bar, subplots=True)\n _check_plot_works(df.plot.bar, stacked=True)\n\n df = DataFrame(\n randn(10, 15), index=list(string.ascii_letters[:10]), columns=range(15)\n )\n _check_plot_works(df.plot.bar)\n\n df = DataFrame({\"a\": [0, 1], \"b\": [1, 0]})\n ax = _check_plot_works(df.plot.bar)\n self._check_ticks_props(ax, xrot=90)\n\n ax = df.plot.bar(rot=35, fontsize=10)\n self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)\n\n ax = _check_plot_works(df.plot.barh)\n self._check_ticks_props(ax, yrot=0)\n\n ax = df.plot.barh(rot=55, fontsize=11)\n self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)\n\n def _check_bar_alignment(\n self,\n df,\n kind=\"bar\",\n stacked=False,\n subplots=False,\n align=\"center\",\n width=0.5,\n position=0.5,\n ):\n\n axes = df.plot(\n kind=kind,\n stacked=stacked,\n subplots=subplots,\n align=align,\n width=width,\n position=position,\n grid=True,\n )\n\n axes = self._flatten_visible(axes)\n\n for ax in axes:\n if kind == \"bar\":\n axis = ax.xaxis\n ax_min, ax_max = ax.get_xlim()\n min_edge = min(p.get_x() for p in ax.patches)\n max_edge = max(p.get_x() + p.get_width() for p in ax.patches)\n elif kind == \"barh\":\n axis = ax.yaxis\n ax_min, ax_max = ax.get_ylim()\n min_edge = min(p.get_y() for p in ax.patches)\n max_edge = max(p.get_y() + p.get_height() for p in ax.patches)\n else:\n raise ValueError\n\n # GH 7498\n # compare margins between lim and bar edges\n tm.assert_almost_equal(ax_min, min_edge - 0.25)\n tm.assert_almost_equal(ax_max, max_edge + 0.25)\n\n p = ax.patches[0]\n if kind == \"bar\" and (stacked is True or subplots is True):\n edge = p.get_x()\n center = edge + p.get_width() * position\n elif kind == \"bar\" and stacked is False:\n center = p.get_x() + p.get_width() * len(df.columns) * position\n edge = p.get_x()\n elif kind == \"barh\" and (stacked is True or subplots is True):\n center = p.get_y() + p.get_height() * position\n edge = p.get_y()\n elif kind == \"barh\" and stacked is False:\n center = p.get_y() + p.get_height() * len(df.columns) * position\n edge = p.get_y()\n else:\n raise ValueError\n\n # Check the ticks locates on integer\n assert (axis.get_ticklocs() == np.arange(len(df))).all()\n\n if align == \"center\":\n # Check whether the bar locates on center\n tm.assert_almost_equal(axis.get_ticklocs()[0], center)\n elif align == \"edge\":\n # Check whether the bar's edge starts from the tick\n tm.assert_almost_equal(axis.get_ticklocs()[0], edge)\n else:\n raise ValueError\n\n return axes\n\n @pytest.mark.slow\n def test_bar_stacked_center(self):\n # GH2157\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(5))}, index=range(5))\n self._check_bar_alignment(df, kind=\"bar\", stacked=True)\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, width=0.9)\n self._check_bar_alignment(df, kind=\"barh\", stacked=True)\n self._check_bar_alignment(df, kind=\"barh\", stacked=True, width=0.9)\n\n @pytest.mark.slow\n def test_bar_center(self):\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(5))}, index=range(5))\n self._check_bar_alignment(df, kind=\"bar\", stacked=False)\n self._check_bar_alignment(df, kind=\"bar\", stacked=False, width=0.9)\n self._check_bar_alignment(df, kind=\"barh\", stacked=False)\n self._check_bar_alignment(df, kind=\"barh\", stacked=False, width=0.9)\n\n @pytest.mark.slow\n def test_bar_subplots_center(self):\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(5))}, index=range(5))\n self._check_bar_alignment(df, kind=\"bar\", subplots=True)\n self._check_bar_alignment(df, kind=\"bar\", subplots=True, width=0.9)\n self._check_bar_alignment(df, kind=\"barh\", subplots=True)\n self._check_bar_alignment(df, kind=\"barh\", subplots=True, width=0.9)\n\n @pytest.mark.slow\n def test_bar_align_single_column(self):\n df = DataFrame(randn(5))\n self._check_bar_alignment(df, kind=\"bar\", stacked=False)\n self._check_bar_alignment(df, kind=\"bar\", stacked=True)\n self._check_bar_alignment(df, kind=\"barh\", stacked=False)\n self._check_bar_alignment(df, kind=\"barh\", stacked=True)\n self._check_bar_alignment(df, kind=\"bar\", subplots=True)\n self._check_bar_alignment(df, kind=\"barh\", subplots=True)\n\n @pytest.mark.slow\n def test_bar_edge(self):\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(5))}, index=range(5))\n\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, align=\"edge\")\n self._check_bar_alignment(df, kind=\"bar\", stacked=True, width=0.9, align=\"edge\")\n self._check_bar_alignment(df, kind=\"barh\", stacked=True, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"barh\", stacked=True, width=0.9, align=\"edge\"\n )\n\n self._check_bar_alignment(df, kind=\"bar\", stacked=False, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"bar\", stacked=False, width=0.9, align=\"edge\"\n )\n self._check_bar_alignment(df, kind=\"barh\", stacked=False, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"barh\", stacked=False, width=0.9, align=\"edge\"\n )\n\n self._check_bar_alignment(df, kind=\"bar\", subplots=True, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"bar\", subplots=True, width=0.9, align=\"edge\"\n )\n self._check_bar_alignment(df, kind=\"barh\", subplots=True, align=\"edge\")\n self._check_bar_alignment(\n df, kind=\"barh\", subplots=True, width=0.9, align=\"edge\"\n )\n\n @pytest.mark.slow\n def test_bar_log_no_subplots(self):\n # GH3254, GH3298 matplotlib/matplotlib#1882, #1892\n # regressions in 1.2.1\n expected = np.array([0.1, 1.0, 10.0, 100])\n\n # no subplots\n df = DataFrame({\"A\": [3] * 5, \"B\": list(range(1, 6))}, index=range(5))\n ax = df.plot.bar(grid=True, log=True)\n tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)\n\n @pytest.mark.slow\n def test_bar_log_subplots(self):\n expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])\n\n ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(\n log=True, subplots=True\n )\n\n tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)\n tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)\n\n @pytest.mark.slow\n def test_boxplot(self):\n df = self.hist_df\n series = df[\"height\"]\n numeric_cols = df._get_numeric_data().columns\n labels = [pprint_thing(c) for c in numeric_cols]\n\n ax = _check_plot_works(df.plot.box)\n self._check_text_labels(ax.get_xticklabels(), labels)\n tm.assert_numpy_array_equal(\n ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1)\n )\n assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)\n\n axes = series.plot.box(rot=40)\n self._check_ticks_props(axes, xrot=40, yrot=0)\n tm.close()\n\n ax = _check_plot_works(series.plot.box)\n\n positions = np.array([1, 6, 7])\n ax = df.plot.box(positions=positions)\n numeric_cols = df._get_numeric_data().columns\n labels = [pprint_thing(c) for c in numeric_cols]\n self._check_text_labels(ax.get_xticklabels(), labels)\n tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)\n assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)\n\n @pytest.mark.slow\n def test_boxplot_vertical(self):\n df = self.hist_df\n numeric_cols = df._get_numeric_data().columns\n labels = [pprint_thing(c) for c in numeric_cols]\n\n # if horizontal, yticklabels are rotated\n ax = df.plot.box(rot=50, fontsize=8, vert=False)\n self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)\n self._check_text_labels(ax.get_yticklabels(), labels)\n assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot.box, subplots=True, vert=False, logx=True)\n self._check_axes_shape(axes, axes_num=3, layout=(1, 3))\n self._check_ax_scales(axes, xaxis=\"log\")\n for ax, label in zip(axes, labels):\n self._check_text_labels(ax.get_yticklabels(), [label])\n assert len(ax.lines) == self.bp_n_objects\n\n positions = np.array([3, 2, 8])\n ax = df.plot.box(positions=positions, vert=False)\n self._check_text_labels(ax.get_yticklabels(), labels)\n tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)\n assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)\n\n @pytest.mark.slow\n def test_boxplot_return_type(self):\n df = DataFrame(\n randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=[\"one\", \"two\", \"three\", \"four\"],\n )\n with pytest.raises(ValueError):\n df.plot.box(return_type=\"NOTATYPE\")\n\n result = df.plot.box(return_type=\"dict\")\n self._check_box_return_type(result, \"dict\")\n\n result = df.plot.box(return_type=\"axes\")\n self._check_box_return_type(result, \"axes\")\n\n result = df.plot.box() # default axes\n self._check_box_return_type(result, \"axes\")\n\n result = df.plot.box(return_type=\"both\")\n self._check_box_return_type(result, \"both\")\n\n @pytest.mark.slow\n def test_boxplot_subplots_return_type(self):\n df = self.hist_df\n\n # normal style: return_type=None\n result = df.plot.box(subplots=True)\n assert isinstance(result, Series)\n self._check_box_return_type(\n result, None, expected_keys=[\"height\", \"weight\", \"category\"]\n )\n\n for t in [\"dict\", \"axes\", \"both\"]:\n returned = df.plot.box(return_type=t, subplots=True)\n self._check_box_return_type(\n returned,\n t,\n expected_keys=[\"height\", \"weight\", \"category\"],\n check_ax_title=False,\n )\n\n @pytest.mark.slow\n @td.skip_if_no_scipy\n def test_kde_df(self):\n df = DataFrame(randn(100, 4))\n ax = _check_plot_works(df.plot, kind=\"kde\")\n expected = [pprint_thing(c) for c in df.columns]\n self._check_legend_labels(ax, labels=expected)\n self._check_ticks_props(ax, xrot=0)\n\n ax = df.plot(kind=\"kde\", rot=20, fontsize=5)\n self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot, kind=\"kde\", subplots=True)\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n axes = df.plot(kind=\"kde\", logy=True, subplots=True)\n self._check_ax_scales(axes, yaxis=\"log\")\n\n @pytest.mark.slow\n @td.skip_if_no_scipy\n def test_kde_missing_vals(self):\n df = DataFrame(np.random.uniform(size=(100, 4)))\n df.loc[0, 0] = np.nan\n _check_plot_works(df.plot, kind=\"kde\")\n\n @pytest.mark.slow\n def test_hist_df(self):\n from matplotlib.patches import Rectangle\n\n df = DataFrame(randn(100, 4))\n series = df[0]\n\n ax = _check_plot_works(df.plot.hist)\n expected = [pprint_thing(c) for c in df.columns]\n self._check_legend_labels(ax, labels=expected)\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot.hist, subplots=True, logy=True)\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n self._check_ax_scales(axes, yaxis=\"log\")\n\n axes = series.plot.hist(rot=40)\n self._check_ticks_props(axes, xrot=40, yrot=0)\n tm.close()\n\n ax = series.plot.hist(cumulative=True, bins=4, density=True)\n # height of last bin (index 5) must be 1.0\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n tm.assert_almost_equal(rects[-1].get_height(), 1.0)\n tm.close()\n\n ax = series.plot.hist(cumulative=True, bins=4)\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n\n tm.assert_almost_equal(rects[-2].get_height(), 100.0)\n tm.close()\n\n # if horizontal, yticklabels are rotated\n axes = df.plot.hist(rot=50, fontsize=8, orientation=\"horizontal\")\n self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)\n\n def _check_box_coord(\n self,\n patches,\n expected_y=None,\n expected_h=None,\n expected_x=None,\n expected_w=None,\n ):\n result_y = np.array([p.get_y() for p in patches])\n result_height = np.array([p.get_height() for p in patches])\n result_x = np.array([p.get_x() for p in patches])\n result_width = np.array([p.get_width() for p in patches])\n # dtype is depending on above values, no need to check\n\n if expected_y is not None:\n tm.assert_numpy_array_equal(result_y, expected_y, check_dtype=False)\n if expected_h is not None:\n tm.assert_numpy_array_equal(result_height, expected_h, check_dtype=False)\n if expected_x is not None:\n tm.assert_numpy_array_equal(result_x, expected_x, check_dtype=False)\n if expected_w is not None:\n tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False)\n\n @pytest.mark.slow\n def test_hist_df_coord(self):\n normal_df = DataFrame(\n {\n \"A\": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])),\n \"B\": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([8, 8, 8, 8, 8])),\n \"C\": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10])),\n },\n columns=[\"A\", \"B\", \"C\"],\n )\n\n nan_df = DataFrame(\n {\n \"A\": np.repeat(\n np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6])\n ),\n \"B\": np.repeat(\n np.array([1, np.nan, 2, 3, 4, 5]), np.array([8, 3, 8, 8, 8, 8])\n ),\n \"C\": np.repeat(\n np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10])\n ),\n },\n columns=[\"A\", \"B\", \"C\"],\n )\n\n for df in [normal_df, nan_df]:\n ax = df.plot.hist(bins=5)\n self._check_box_coord(\n ax.patches[:5],\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n ax.patches[5:10],\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n ax.patches[10:],\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([6, 7, 8, 9, 10]),\n )\n\n ax = df.plot.hist(bins=5, stacked=True)\n self._check_box_coord(\n ax.patches[:5],\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n ax.patches[5:10],\n expected_y=np.array([10, 9, 8, 7, 6]),\n expected_h=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n ax.patches[10:],\n expected_y=np.array([18, 17, 16, 15, 14]),\n expected_h=np.array([6, 7, 8, 9, 10]),\n )\n\n axes = df.plot.hist(bins=5, stacked=True, subplots=True)\n self._check_box_coord(\n axes[0].patches,\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n axes[1].patches,\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n axes[2].patches,\n expected_y=np.array([0, 0, 0, 0, 0]),\n expected_h=np.array([6, 7, 8, 9, 10]),\n )\n\n # horizontal\n ax = df.plot.hist(bins=5, orientation=\"horizontal\")\n self._check_box_coord(\n ax.patches[:5],\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n ax.patches[5:10],\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n ax.patches[10:],\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([6, 7, 8, 9, 10]),\n )\n\n ax = df.plot.hist(bins=5, stacked=True, orientation=\"horizontal\")\n self._check_box_coord(\n ax.patches[:5],\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n ax.patches[5:10],\n expected_x=np.array([10, 9, 8, 7, 6]),\n expected_w=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n ax.patches[10:],\n expected_x=np.array([18, 17, 16, 15, 14]),\n expected_w=np.array([6, 7, 8, 9, 10]),\n )\n\n axes = df.plot.hist(\n bins=5, stacked=True, subplots=True, orientation=\"horizontal\"\n )\n self._check_box_coord(\n axes[0].patches,\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([10, 9, 8, 7, 6]),\n )\n self._check_box_coord(\n axes[1].patches,\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([8, 8, 8, 8, 8]),\n )\n self._check_box_coord(\n axes[2].patches,\n expected_x=np.array([0, 0, 0, 0, 0]),\n expected_w=np.array([6, 7, 8, 9, 10]),\n )\n\n @pytest.mark.slow\n def test_plot_int_columns(self):\n df = DataFrame(randn(100, 4)).cumsum()\n _check_plot_works(df.plot, legend=True)\n\n @pytest.mark.slow\n def test_df_legend_labels(self):\n kinds = [\"line\", \"bar\", \"barh\", \"kde\", \"area\", \"hist\"]\n df = DataFrame(rand(3, 3), columns=[\"a\", \"b\", \"c\"])\n df2 = DataFrame(rand(3, 3), columns=[\"d\", \"e\", \"f\"])\n df3 = DataFrame(rand(3, 3), columns=[\"g\", \"h\", \"i\"])\n df4 = DataFrame(rand(3, 3), columns=[\"j\", \"k\", \"l\"])\n\n for kind in kinds:\n\n ax = df.plot(kind=kind, legend=True)\n self._check_legend_labels(ax, labels=df.columns)\n\n ax = df2.plot(kind=kind, legend=False, ax=ax)\n self._check_legend_labels(ax, labels=df.columns)\n\n ax = df3.plot(kind=kind, legend=True, ax=ax)\n self._check_legend_labels(ax, labels=df.columns.union(df3.columns))\n\n ax = df4.plot(kind=kind, legend=\"reverse\", ax=ax)\n expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))\n self._check_legend_labels(ax, labels=expected)\n\n # Secondary Y\n ax = df.plot(legend=True, secondary_y=\"b\")\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\"])\n ax = df2.plot(legend=False, ax=ax)\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\"])\n ax = df3.plot(kind=\"bar\", legend=True, secondary_y=\"h\", ax=ax)\n self._check_legend_labels(\n ax, labels=[\"a\", \"b (right)\", \"c\", \"g\", \"h (right)\", \"i\"]\n )\n\n # Time Series\n ind = date_range(\"1/1/2014\", periods=3)\n df = DataFrame(randn(3, 3), columns=[\"a\", \"b\", \"c\"], index=ind)\n df2 = DataFrame(randn(3, 3), columns=[\"d\", \"e\", \"f\"], index=ind)\n df3 = DataFrame(randn(3, 3), columns=[\"g\", \"h\", \"i\"], index=ind)\n ax = df.plot(legend=True, secondary_y=\"b\")\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\"])\n ax = df2.plot(legend=False, ax=ax)\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\"])\n ax = df3.plot(legend=True, ax=ax)\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\", \"c\", \"g\", \"h\", \"i\"])\n\n # scatter\n ax = df.plot.scatter(x=\"a\", y=\"b\", label=\"data1\")\n self._check_legend_labels(ax, labels=[\"data1\"])\n ax = df2.plot.scatter(x=\"d\", y=\"e\", legend=False, label=\"data2\", ax=ax)\n self._check_legend_labels(ax, labels=[\"data1\"])\n ax = df3.plot.scatter(x=\"g\", y=\"h\", label=\"data3\", ax=ax)\n self._check_legend_labels(ax, labels=[\"data1\", \"data3\"])\n\n # ensure label args pass through and\n # index name does not mutate\n # column names don't mutate\n df5 = df.set_index(\"a\")\n ax = df5.plot(y=\"b\")\n self._check_legend_labels(ax, labels=[\"b\"])\n ax = df5.plot(y=\"b\", label=\"LABEL_b\")\n self._check_legend_labels(ax, labels=[\"LABEL_b\"])\n self._check_text_labels(ax.xaxis.get_label(), \"a\")\n ax = df5.plot(y=\"c\", label=\"LABEL_c\", ax=ax)\n self._check_legend_labels(ax, labels=[\"LABEL_b\", \"LABEL_c\"])\n assert df5.columns.tolist() == [\"b\", \"c\"]\n\n def test_missing_marker_multi_plots_on_same_ax(self):\n # GH 18222\n df = pd.DataFrame(\n data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=[\"x\", \"r\", \"g\", \"b\"]\n )\n fig, ax = self.plt.subplots(nrows=1, ncols=3)\n # Left plot\n df.plot(x=\"x\", y=\"r\", linewidth=0, marker=\"o\", color=\"r\", ax=ax[0])\n df.plot(x=\"x\", y=\"g\", linewidth=1, marker=\"x\", color=\"g\", ax=ax[0])\n df.plot(x=\"x\", y=\"b\", linewidth=1, marker=\"o\", color=\"b\", ax=ax[0])\n self._check_legend_labels(ax[0], labels=[\"r\", \"g\", \"b\"])\n self._check_legend_marker(ax[0], expected_markers=[\"o\", \"x\", \"o\"])\n # Center plot\n df.plot(x=\"x\", y=\"b\", linewidth=1, marker=\"o\", color=\"b\", ax=ax[1])\n df.plot(x=\"x\", y=\"r\", linewidth=0, marker=\"o\", color=\"r\", ax=ax[1])\n df.plot(x=\"x\", y=\"g\", linewidth=1, marker=\"x\", color=\"g\", ax=ax[1])\n self._check_legend_labels(ax[1], labels=[\"b\", \"r\", \"g\"])\n self._check_legend_marker(ax[1], expected_markers=[\"o\", \"o\", \"x\"])\n # Right plot\n df.plot(x=\"x\", y=\"g\", linewidth=1, marker=\"x\", color=\"g\", ax=ax[2])\n df.plot(x=\"x\", y=\"b\", linewidth=1, marker=\"o\", color=\"b\", ax=ax[2])\n df.plot(x=\"x\", y=\"r\", linewidth=0, marker=\"o\", color=\"r\", ax=ax[2])\n self._check_legend_labels(ax[2], labels=[\"g\", \"b\", \"r\"])\n self._check_legend_marker(ax[2], expected_markers=[\"x\", \"o\", \"o\"])\n\n def test_legend_name(self):\n multi = DataFrame(\n randn(4, 4),\n columns=[np.array([\"a\", \"a\", \"b\", \"b\"]), np.array([\"x\", \"y\", \"x\", \"y\"])],\n )\n multi.columns.names = [\"group\", \"individual\"]\n\n ax = multi.plot()\n leg_title = ax.legend_.get_title()\n self._check_text_labels(leg_title, \"group,individual\")\n\n df = DataFrame(randn(5, 5))\n ax = df.plot(legend=True, ax=ax)\n leg_title = ax.legend_.get_title()\n self._check_text_labels(leg_title, \"group,individual\")\n\n df.columns.name = \"new\"\n ax = df.plot(legend=False, ax=ax)\n leg_title = ax.legend_.get_title()\n self._check_text_labels(leg_title, \"group,individual\")\n\n ax = df.plot(legend=True, ax=ax)\n leg_title = ax.legend_.get_title()\n self._check_text_labels(leg_title, \"new\")\n\n @pytest.mark.slow\n def test_no_legend(self):\n kinds = [\"line\", \"bar\", \"barh\", \"kde\", \"area\", \"hist\"]\n df = DataFrame(rand(3, 3), columns=[\"a\", \"b\", \"c\"])\n\n for kind in kinds:\n\n ax = df.plot(kind=kind, legend=False)\n self._check_legend_labels(ax, visible=False)\n\n @pytest.mark.slow\n def test_style_by_column(self):\n import matplotlib.pyplot as plt\n\n fig = plt.gcf()\n\n df = DataFrame(randn(100, 3))\n for markers in [\n {0: \"^\", 1: \"+\", 2: \"o\"},\n {0: \"^\", 1: \"+\"},\n [\"^\", \"+\", \"o\"],\n [\"^\", \"+\"],\n ]:\n fig.clf()\n fig.add_subplot(111)\n ax = df.plot(style=markers)\n for i, l in enumerate(ax.get_lines()[: len(markers)]):\n assert l.get_marker() == markers[i]\n\n @pytest.mark.slow\n def test_line_label_none(self):\n s = Series([1, 2])\n ax = s.plot()\n assert ax.get_legend() is None\n\n ax = s.plot(legend=True)\n assert ax.get_legend().get_texts()[0].get_text() == \"None\"\n\n @pytest.mark.slow\n def test_line_colors(self):\n from matplotlib import cm\n\n custom_colors = \"rgcby\"\n df = DataFrame(randn(5, 5))\n\n ax = df.plot(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n\n tm.close()\n\n ax2 = df.plot(color=custom_colors)\n lines2 = ax2.get_lines()\n\n for l1, l2 in zip(ax.get_lines(), lines2):\n assert l1.get_color() == l2.get_color()\n\n tm.close()\n\n ax = df.plot(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n tm.close()\n\n ax = df.plot(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n tm.close()\n\n # make color a list if plotting one column frame\n # handles cases like df.plot(color='DodgerBlue')\n ax = df.loc[:, [0]].plot(color=\"DodgerBlue\")\n self._check_colors(ax.lines, linecolors=[\"DodgerBlue\"])\n\n ax = df.plot(color=\"red\")\n self._check_colors(ax.get_lines(), linecolors=[\"red\"] * 5)\n tm.close()\n\n # GH 10299\n custom_colors = [\"#FF0000\", \"#0000FF\", \"#FFFF00\", \"#000000\", \"#FFFFFF\"]\n ax = df.plot(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n tm.close()\n\n with pytest.raises(ValueError):\n # Color contains shorthand hex value results in ValueError\n custom_colors = [\"#F00\", \"#00F\", \"#FF0\", \"#000\", \"#FFF\"]\n # Forced show plot\n _check_plot_works(df.plot, color=custom_colors)\n\n @pytest.mark.slow\n def test_dont_modify_colors(self):\n colors = [\"r\", \"g\", \"b\"]\n pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)\n assert len(colors) == 3\n\n @pytest.mark.slow\n def test_line_colors_and_styles_subplots(self):\n # GH 9894\n from matplotlib import cm\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n\n axes = df.plot(subplots=True)\n for ax, c in zip(axes, list(default_colors)):\n c = [c]\n self._check_colors(ax.get_lines(), linecolors=c)\n tm.close()\n\n # single color char\n axes = df.plot(subplots=True, color=\"k\")\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"k\"])\n tm.close()\n\n # single color str\n axes = df.plot(subplots=True, color=\"green\")\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"green\"])\n tm.close()\n\n custom_colors = \"rgcby\"\n axes = df.plot(color=custom_colors, subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n axes = df.plot(color=list(custom_colors), subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # GH 10299\n custom_colors = [\"#FF0000\", \"#0000FF\", \"#FFFF00\", \"#000000\", \"#FFFFFF\"]\n axes = df.plot(color=custom_colors, subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n with pytest.raises(ValueError):\n # Color contains shorthand hex value results in ValueError\n custom_colors = [\"#F00\", \"#00F\", \"#FF0\", \"#000\", \"#FFF\"]\n # Forced show plot\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.plot, color=custom_colors, subplots=True)\n\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n for cmap in [\"jet\", cm.jet]:\n axes = df.plot(colormap=cmap, subplots=True)\n for ax, c in zip(axes, rgba_colors):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # make color a list if plotting one column frame\n # handles cases like df.plot(color='DodgerBlue')\n axes = df.loc[:, [0]].plot(color=\"DodgerBlue\", subplots=True)\n self._check_colors(axes[0].lines, linecolors=[\"DodgerBlue\"])\n\n # single character style\n axes = df.plot(style=\"r\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"r\"])\n tm.close()\n\n # list of styles\n styles = list(\"rgcby\")\n axes = df.plot(style=styles, subplots=True)\n for ax, c in zip(axes, styles):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n @pytest.mark.slow\n def test_area_colors(self):\n from matplotlib import cm\n from matplotlib.collections import PolyCollection\n\n custom_colors = \"rgcby\"\n df = DataFrame(rand(5, 5))\n\n ax = df.plot.area(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]\n self._check_colors(poly, facecolors=custom_colors)\n\n handles, labels = ax.get_legend_handles_labels()\n self._check_colors(handles, facecolors=custom_colors)\n\n for h in handles:\n assert h.get_alpha() is None\n tm.close()\n\n ax = df.plot.area(colormap=\"jet\")\n jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=jet_colors)\n poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]\n self._check_colors(poly, facecolors=jet_colors)\n\n handles, labels = ax.get_legend_handles_labels()\n self._check_colors(handles, facecolors=jet_colors)\n for h in handles:\n assert h.get_alpha() is None\n tm.close()\n\n # When stacked=False, alpha is set to 0.5\n ax = df.plot.area(colormap=cm.jet, stacked=False)\n self._check_colors(ax.get_lines(), linecolors=jet_colors)\n poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]\n jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]\n self._check_colors(poly, facecolors=jet_with_alpha)\n\n handles, labels = ax.get_legend_handles_labels()\n linecolors = jet_with_alpha\n self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)\n for h in handles:\n assert h.get_alpha() == 0.5\n\n @pytest.mark.slow\n def test_hist_colors(self):\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n ax = df.plot.hist()\n self._check_colors(ax.patches[::10], facecolors=default_colors[:5])\n tm.close()\n\n custom_colors = \"rgcby\"\n ax = df.plot.hist(color=custom_colors)\n self._check_colors(ax.patches[::10], facecolors=custom_colors)\n tm.close()\n\n from matplotlib import cm\n\n # Test str -> colormap functionality\n ax = df.plot.hist(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::10], facecolors=rgba_colors)\n tm.close()\n\n # Test colormap functionality\n ax = df.plot.hist(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::10], facecolors=rgba_colors)\n tm.close()\n\n ax = df.loc[:, [0]].plot.hist(color=\"DodgerBlue\")\n self._check_colors([ax.patches[0]], facecolors=[\"DodgerBlue\"])\n\n ax = df.plot(kind=\"hist\", color=\"green\")\n self._check_colors(ax.patches[::10], facecolors=[\"green\"] * 5)\n tm.close()\n\n @pytest.mark.slow\n @td.skip_if_no_scipy\n def test_kde_colors(self):\n from matplotlib import cm\n\n custom_colors = \"rgcby\"\n df = DataFrame(rand(5, 5))\n\n ax = df.plot.kde(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n tm.close()\n\n ax = df.plot.kde(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n tm.close()\n\n ax = df.plot.kde(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n\n @pytest.mark.slow\n @td.skip_if_no_scipy\n def test_kde_colors_and_styles_subplots(self):\n from matplotlib import cm\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n\n axes = df.plot(kind=\"kde\", subplots=True)\n for ax, c in zip(axes, list(default_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # single color char\n axes = df.plot(kind=\"kde\", color=\"k\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"k\"])\n tm.close()\n\n # single color str\n axes = df.plot(kind=\"kde\", color=\"red\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"red\"])\n tm.close()\n\n custom_colors = \"rgcby\"\n axes = df.plot(kind=\"kde\", color=custom_colors, subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n for cmap in [\"jet\", cm.jet]:\n axes = df.plot(kind=\"kde\", colormap=cmap, subplots=True)\n for ax, c in zip(axes, rgba_colors):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # make color a list if plotting one column frame\n # handles cases like df.plot(color='DodgerBlue')\n axes = df.loc[:, [0]].plot(kind=\"kde\", color=\"DodgerBlue\", subplots=True)\n self._check_colors(axes[0].lines, linecolors=[\"DodgerBlue\"])\n\n # single character style\n axes = df.plot(kind=\"kde\", style=\"r\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"r\"])\n tm.close()\n\n # list of styles\n styles = list(\"rgcby\")\n axes = df.plot(kind=\"kde\", style=styles, subplots=True)\n for ax, c in zip(axes, styles):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n @pytest.mark.slow\n def test_boxplot_colors(self):\n def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c=\"k\", fliers_c=None):\n # TODO: outside this func?\n if fliers_c is None:\n fliers_c = \"k\"\n self._check_colors(bp[\"boxes\"], linecolors=[box_c] * len(bp[\"boxes\"]))\n self._check_colors(\n bp[\"whiskers\"], linecolors=[whiskers_c] * len(bp[\"whiskers\"])\n )\n self._check_colors(\n bp[\"medians\"], linecolors=[medians_c] * len(bp[\"medians\"])\n )\n self._check_colors(bp[\"fliers\"], linecolors=[fliers_c] * len(bp[\"fliers\"]))\n self._check_colors(bp[\"caps\"], linecolors=[caps_c] * len(bp[\"caps\"]))\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(randn(5, 5))\n bp = df.plot.box(return_type=\"dict\")\n _check_colors(bp, default_colors[0], default_colors[0], default_colors[2])\n tm.close()\n\n dict_colors = dict(\n boxes=\"#572923\", whiskers=\"#982042\", medians=\"#804823\", caps=\"#123456\"\n )\n bp = df.plot.box(color=dict_colors, sym=\"r+\", return_type=\"dict\")\n _check_colors(\n bp,\n dict_colors[\"boxes\"],\n dict_colors[\"whiskers\"],\n dict_colors[\"medians\"],\n dict_colors[\"caps\"],\n \"r\",\n )\n tm.close()\n\n # partial colors\n dict_colors = dict(whiskers=\"c\", medians=\"m\")\n bp = df.plot.box(color=dict_colors, return_type=\"dict\")\n _check_colors(bp, default_colors[0], \"c\", \"m\")\n tm.close()\n\n from matplotlib import cm\n\n # Test str -> colormap functionality\n bp = df.plot.box(colormap=\"jet\", return_type=\"dict\")\n jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]\n _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])\n tm.close()\n\n # Test colormap functionality\n bp = df.plot.box(colormap=cm.jet, return_type=\"dict\")\n _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])\n tm.close()\n\n # string color is applied to all artists except fliers\n bp = df.plot.box(color=\"DodgerBlue\", return_type=\"dict\")\n _check_colors(bp, \"DodgerBlue\", \"DodgerBlue\", \"DodgerBlue\", \"DodgerBlue\")\n\n # tuple is also applied to all artists except fliers\n bp = df.plot.box(color=(0, 1, 0), sym=\"#123456\", return_type=\"dict\")\n _check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), \"#123456\")\n\n with pytest.raises(ValueError):\n # Color contains invalid key results in ValueError\n df.plot.box(color=dict(boxes=\"red\", xxxx=\"blue\"))\n\n @pytest.mark.parametrize(\n \"props, expected\",\n [\n (\"boxprops\", \"boxes\"),\n (\"whiskerprops\", \"whiskers\"),\n (\"capprops\", \"caps\"),\n (\"medianprops\", \"medians\"),\n ],\n )\n def test_specified_props_kwd_plot_box(self, props, expected):\n # GH 30346\n df = DataFrame({k: np.random.random(100) for k in \"ABC\"})\n kwd = {props: dict(color=\"C1\")}\n result = df.plot.box(return_type=\"dict\", **kwd)\n\n assert result[expected][0].get_color() == \"C1\"\n\n def test_default_color_cycle(self):\n import matplotlib.pyplot as plt\n import cycler\n\n colors = list(\"rgbk\")\n plt.rcParams[\"axes.prop_cycle\"] = cycler.cycler(\"color\", colors)\n\n df = DataFrame(randn(5, 3))\n ax = df.plot()\n\n expected = self._unpack_cycler(plt.rcParams)[:3]\n self._check_colors(ax.get_lines(), linecolors=expected)\n\n def test_unordered_ts(self):\n df = DataFrame(\n np.array([3.0, 2.0, 1.0]),\n index=[date(2012, 10, 1), date(2012, 9, 1), date(2012, 8, 1)],\n columns=[\"test\"],\n )\n ax = df.plot()\n xticks = ax.lines[0].get_xdata()\n assert xticks[0] < xticks[1]\n ydata = ax.lines[0].get_ydata()\n tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))\n\n @td.skip_if_no_scipy\n def test_kind_both_ways(self):\n df = DataFrame({\"x\": [1, 2, 3]})\n for kind in plotting.PlotAccessor._common_kinds:\n\n df.plot(kind=kind)\n getattr(df.plot, kind)()\n for kind in [\"scatter\", \"hexbin\"]:\n df.plot(\"x\", \"x\", kind=kind)\n getattr(df.plot, kind)(\"x\", \"x\")\n\n def test_all_invalid_plot_data(self):\n df = DataFrame(list(\"abcd\"))\n for kind in plotting.PlotAccessor._common_kinds:\n\n msg = \"no numeric data to plot\"\n with pytest.raises(TypeError, match=msg):\n df.plot(kind=kind)\n\n @pytest.mark.slow\n def test_partially_invalid_plot_data(self):\n with tm.RNGContext(42):\n df = DataFrame(randn(10, 2), dtype=object)\n df[np.random.rand(df.shape[0]) > 0.5] = \"a\"\n for kind in plotting.PlotAccessor._common_kinds:\n\n msg = \"no numeric data to plot\"\n with pytest.raises(TypeError, match=msg):\n df.plot(kind=kind)\n\n with tm.RNGContext(42):\n # area plot doesn't support positive/negative mixed data\n kinds = [\"area\"]\n df = DataFrame(rand(10, 2), dtype=object)\n df[np.random.rand(df.shape[0]) > 0.5] = \"a\"\n for kind in kinds:\n with pytest.raises(TypeError):\n df.plot(kind=kind)\n\n def test_invalid_kind(self):\n df = DataFrame(randn(10, 2))\n with pytest.raises(ValueError):\n df.plot(kind=\"aasdf\")\n\n @pytest.mark.parametrize(\n \"x,y,lbl\",\n [\n ([\"B\", \"C\"], \"A\", \"a\"),\n ([\"A\"], [\"B\", \"C\"], [\"b\", \"c\"]),\n (\"A\", [\"B\", \"C\"], \"badlabel\"),\n ],\n )\n def test_invalid_xy_args(self, x, y, lbl):\n # GH 18671, 19699 allows y to be list-like but not x\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n with pytest.raises(ValueError):\n df.plot(x=x, y=y, label=lbl)\n\n @pytest.mark.parametrize(\"x,y\", [(\"A\", \"B\"), ([\"A\"], \"B\")])\n def test_invalid_xy_args_dup_cols(self, x, y):\n # GH 18671, 19699 allows y to be list-like but not x\n df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list(\"AAB\"))\n with pytest.raises(ValueError):\n df.plot(x=x, y=y)\n\n @pytest.mark.parametrize(\n \"x,y,lbl,colors\",\n [\n (\"A\", [\"B\"], [\"b\"], [\"red\"]),\n (\"A\", [\"B\", \"C\"], [\"b\", \"c\"], [\"red\", \"blue\"]),\n (0, [1, 2], [\"bokeh\", \"cython\"], [\"green\", \"yellow\"]),\n ],\n )\n def test_y_listlike(self, x, y, lbl, colors):\n # GH 19699: tests list-like y and verifies lbls & colors\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n _check_plot_works(df.plot, x=\"A\", y=y, label=lbl)\n\n ax = df.plot(x=x, y=y, label=lbl, color=colors)\n assert len(ax.lines) == len(y)\n self._check_colors(ax.get_lines(), linecolors=colors)\n\n @pytest.mark.parametrize(\"x,y,colnames\", [(0, 1, [\"A\", \"B\"]), (1, 0, [0, 1])])\n def test_xy_args_integer(self, x, y, colnames):\n # GH 20056: tests integer args for xy and checks col names\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4]})\n df.columns = colnames\n _check_plot_works(df.plot, x=x, y=y)\n\n @pytest.mark.slow\n def test_hexbin_basic(self):\n df = self.hexbin_df\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", gridsize=10)\n # TODO: need better way to test. This just does existence.\n assert len(ax.collections) == 1\n\n # GH 6951\n axes = df.plot.hexbin(x=\"A\", y=\"B\", subplots=True)\n # hexbin should have 2 axes in the figure, 1 for plotting and another\n # is colorbar\n assert len(axes[0].figure.axes) == 2\n # return value is single axes\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n @pytest.mark.slow\n def test_hexbin_with_c(self):\n df = self.hexbin_df\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", C=\"C\")\n assert len(ax.collections) == 1\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", C=\"C\", reduce_C_function=np.std)\n assert len(ax.collections) == 1\n\n @pytest.mark.slow\n def test_hexbin_cmap(self):\n df = self.hexbin_df\n\n # Default to BuGn\n ax = df.plot.hexbin(x=\"A\", y=\"B\")\n assert ax.collections[0].cmap.name == \"BuGn\"\n\n cm = \"cubehelix\"\n ax = df.plot.hexbin(x=\"A\", y=\"B\", colormap=cm)\n assert ax.collections[0].cmap.name == cm\n\n @pytest.mark.slow\n def test_no_color_bar(self):\n df = self.hexbin_df\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", colorbar=None)\n assert ax.collections[0].colorbar is None\n\n @pytest.mark.slow\n def test_allow_cmap(self):\n df = self.hexbin_df\n\n ax = df.plot.hexbin(x=\"A\", y=\"B\", cmap=\"YlGn\")\n assert ax.collections[0].cmap.name == \"YlGn\"\n\n with pytest.raises(TypeError):\n df.plot.hexbin(x=\"A\", y=\"B\", cmap=\"YlGn\", colormap=\"BuGn\")\n\n @pytest.mark.slow\n def test_pie_df(self):\n df = DataFrame(\n np.random.rand(5, 3),\n columns=[\"X\", \"Y\", \"Z\"],\n index=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n with pytest.raises(ValueError):\n df.plot.pie()\n\n ax = _check_plot_works(df.plot.pie, y=\"Y\")\n self._check_text_labels(ax.texts, df.index)\n\n ax = _check_plot_works(df.plot.pie, y=2)\n self._check_text_labels(ax.texts, df.index)\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.plot.pie, subplots=True)\n assert len(axes) == len(df.columns)\n for ax in axes:\n self._check_text_labels(ax.texts, df.index)\n for ax, ylabel in zip(axes, df.columns):\n assert ax.get_ylabel() == ylabel\n\n labels = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n color_args = [\"r\", \"g\", \"b\", \"c\", \"m\"]\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.plot.pie, subplots=True, labels=labels, colors=color_args\n )\n assert len(axes) == len(df.columns)\n\n for ax in axes:\n self._check_text_labels(ax.texts, labels)\n self._check_colors(ax.patches, facecolors=color_args)\n\n def test_pie_df_nan(self):\n df = DataFrame(np.random.rand(4, 4))\n for i in range(4):\n df.iloc[i, i] = np.nan\n fig, axes = self.plt.subplots(ncols=4)\n df.plot.pie(subplots=True, ax=axes, legend=True)\n\n base_expected = [\"0\", \"1\", \"2\", \"3\"]\n for i, ax in enumerate(axes):\n expected = list(base_expected) # force copy\n expected[i] = \"\"\n result = [x.get_text() for x in ax.texts]\n assert result == expected\n # legend labels\n # NaN's not included in legend with subplots\n # see https://github.com/pandas-dev/pandas/issues/8390\n assert [x.get_text() for x in ax.get_legend().get_texts()] == base_expected[\n :i\n ] + base_expected[i + 1 :]\n\n @pytest.mark.slow\n def test_errorbar_plot(self):\n with warnings.catch_warnings():\n d = {\"x\": np.arange(12), \"y\": np.arange(12, 0, -1)}\n df = DataFrame(d)\n d_err = {\"x\": np.ones(12) * 0.2, \"y\": np.ones(12) * 0.4}\n df_err = DataFrame(d_err)\n\n # check line plots\n ax = _check_plot_works(df.plot, yerr=df_err, logy=True)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n kinds = [\"line\", \"bar\", \"barh\"]\n for kind in kinds:\n ax = _check_plot_works(df.plot, yerr=df_err[\"x\"], kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)\n self._check_has_errorbars(ax, xerr=2, yerr=2)\n ax = _check_plot_works(\n df.plot, yerr=df_err[\"x\"], xerr=df_err[\"x\"], kind=kind\n )\n self._check_has_errorbars(ax, xerr=2, yerr=2)\n ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)\n self._check_has_errorbars(ax, xerr=2, yerr=2)\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n axes = _check_plot_works(\n df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind\n )\n self._check_has_errorbars(axes, xerr=1, yerr=1)\n\n ax = _check_plot_works(\n (df + 1).plot, yerr=df_err, xerr=df_err, kind=\"bar\", log=True\n )\n self._check_has_errorbars(ax, xerr=2, yerr=2)\n\n # yerr is raw error values\n ax = _check_plot_works(df[\"y\"].plot, yerr=np.ones(12) * 0.4)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n # yerr is column name\n for yerr in [\"yerr\", \"誤差\"]:\n s_df = df.copy()\n s_df[yerr] = np.ones(12) * 0.2\n ax = _check_plot_works(s_df.plot, yerr=yerr)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(s_df.plot, y=\"y\", x=\"x\", yerr=yerr)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n\n with pytest.raises(ValueError):\n df.plot(yerr=np.random.randn(11))\n\n df_err = DataFrame({\"x\": [\"zzz\"] * 12, \"y\": [\"zzz\"] * 12})\n with pytest.raises((ValueError, TypeError)):\n df.plot(yerr=df_err)\n\n @pytest.mark.xfail(reason=\"Iterator is consumed\", raises=ValueError)\n @pytest.mark.slow\n def test_errorbar_plot_iterator(self):\n with warnings.catch_warnings():\n d = {\"x\": np.arange(12), \"y\": np.arange(12, 0, -1)}\n df = DataFrame(d)\n\n # yerr is iterator\n ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n @pytest.mark.slow\n def test_errorbar_with_integer_column_names(self):\n # test with integer column names\n df = DataFrame(np.random.randn(10, 2))\n df_err = DataFrame(np.random.randn(10, 2))\n ax = _check_plot_works(df.plot, yerr=df_err)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(df.plot, y=0, yerr=1)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n\n @pytest.mark.slow\n def test_errorbar_with_partial_columns(self):\n df = DataFrame(np.random.randn(10, 3))\n df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])\n kinds = [\"line\", \"bar\"]\n for kind in kinds:\n ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n ix = date_range(\"1/1/2000\", periods=10, freq=\"M\")\n df.set_index(ix, inplace=True)\n df_err.set_index(ix, inplace=True)\n ax = _check_plot_works(df.plot, yerr=df_err, kind=\"line\")\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n d = {\"x\": np.arange(12), \"y\": np.arange(12, 0, -1)}\n df = DataFrame(d)\n d_err = {\"x\": np.ones(12) * 0.2, \"z\": np.ones(12) * 0.4}\n df_err = DataFrame(d_err)\n for err in [d_err, df_err]:\n ax = _check_plot_works(df.plot, yerr=err)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n\n @pytest.mark.slow\n def test_errorbar_timeseries(self):\n\n with warnings.catch_warnings():\n d = {\"x\": np.arange(12), \"y\": np.arange(12, 0, -1)}\n d_err = {\"x\": np.ones(12) * 0.2, \"y\": np.ones(12) * 0.4}\n\n # check time-series plots\n ix = date_range(\"1/1/2000\", \"1/1/2001\", freq=\"M\")\n tdf = DataFrame(d, index=ix)\n tdf_err = DataFrame(d_err, index=ix)\n\n kinds = [\"line\", \"bar\", \"barh\"]\n for kind in kinds:\n ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n ax = _check_plot_works(tdf.plot, y=\"y\", yerr=tdf_err[\"x\"], kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n ax = _check_plot_works(tdf.plot, y=\"y\", yerr=\"x\", kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)\n self._check_has_errorbars(ax, xerr=0, yerr=2)\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n axes = _check_plot_works(\n tdf.plot, kind=kind, yerr=tdf_err, subplots=True\n )\n self._check_has_errorbars(axes, xerr=0, yerr=1)\n\n def test_errorbar_asymmetrical(self):\n\n np.random.seed(0)\n err = np.random.rand(3, 2, 5)\n\n # each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...\n df = DataFrame(np.arange(15).reshape(3, 5)).T\n\n ax = df.plot(yerr=err, xerr=err / 2)\n\n yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]\n expected_0_0 = err[0, :, 0] * np.array([-1, 1])\n tm.assert_almost_equal(yerr_0_0, expected_0_0)\n\n with pytest.raises(ValueError):\n df.plot(yerr=err.T)\n\n tm.close()\n\n def test_table(self):\n df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))\n _check_plot_works(df.plot, table=True)\n _check_plot_works(df.plot, table=df)\n\n ax = df.plot()\n assert len(ax.tables) == 0\n plotting.table(ax, df.T)\n assert len(ax.tables) == 1\n\n def test_errorbar_scatter(self):\n df = DataFrame(np.random.randn(5, 2), index=range(5), columns=[\"x\", \"y\"])\n df_err = DataFrame(\n np.random.randn(5, 2) / 5, index=range(5), columns=[\"x\", \"y\"]\n )\n\n ax = _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\")\n self._check_has_errorbars(ax, xerr=0, yerr=0)\n ax = _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\", xerr=df_err)\n self._check_has_errorbars(ax, xerr=1, yerr=0)\n\n ax = _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\", yerr=df_err)\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n ax = _check_plot_works(df.plot.scatter, x=\"x\", y=\"y\", xerr=df_err, yerr=df_err)\n self._check_has_errorbars(ax, xerr=1, yerr=1)\n\n def _check_errorbar_color(containers, expected, has_err=\"has_xerr\"):\n lines = []\n errs = [c.lines for c in ax.containers if getattr(c, has_err, False)][0]\n for el in errs:\n if is_list_like(el):\n lines.extend(el)\n else:\n lines.append(el)\n err_lines = [x for x in lines if x in ax.collections]\n self._check_colors(\n err_lines, linecolors=np.array([expected] * len(err_lines))\n )\n\n # GH 8081\n df = DataFrame(np.random.randn(10, 5), columns=[\"a\", \"b\", \"c\", \"d\", \"e\"])\n ax = df.plot.scatter(x=\"a\", y=\"b\", xerr=\"d\", yerr=\"e\", c=\"red\")\n self._check_has_errorbars(ax, xerr=1, yerr=1)\n _check_errorbar_color(ax.containers, \"red\", has_err=\"has_xerr\")\n _check_errorbar_color(ax.containers, \"red\", has_err=\"has_yerr\")\n\n ax = df.plot.scatter(x=\"a\", y=\"b\", yerr=\"e\", color=\"green\")\n self._check_has_errorbars(ax, xerr=0, yerr=1)\n _check_errorbar_color(ax.containers, \"green\", has_err=\"has_yerr\")\n\n @pytest.mark.slow\n def test_sharex_and_ax(self):\n # https://github.com/pandas-dev/pandas/issues/9737 using gridspec,\n # the axis in fig.get_axis() are sorted differently than pandas\n # expected them, so make sure that only the right ones are removed\n import matplotlib.pyplot as plt\n\n plt.close(\"all\")\n gs, axes = _generate_4_axes_via_gridspec()\n\n df = DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [1, 2, 3, 4, 5, 6],\n \"c\": [1, 2, 3, 4, 5, 6],\n \"d\": [1, 2, 3, 4, 5, 6],\n }\n )\n\n def _check(axes):\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n for ax in [axes[0], axes[2]]:\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n for ax in [axes[1], axes[3]]:\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n\n for ax in axes:\n df.plot(x=\"a\", y=\"b\", title=\"title\", ax=ax, sharex=True)\n gs.tight_layout(plt.gcf())\n _check(axes)\n tm.close()\n\n gs, axes = _generate_4_axes_via_gridspec()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=axes, sharex=True)\n _check(axes)\n tm.close()\n\n gs, axes = _generate_4_axes_via_gridspec()\n # without sharex, no labels should be touched!\n for ax in axes:\n df.plot(x=\"a\", y=\"b\", title=\"title\", ax=ax)\n\n gs.tight_layout(plt.gcf())\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n @pytest.mark.slow\n def test_sharey_and_ax(self):\n # https://github.com/pandas-dev/pandas/issues/9737 using gridspec,\n # the axis in fig.get_axis() are sorted differently than pandas\n # expected them, so make sure that only the right ones are removed\n import matplotlib.pyplot as plt\n\n gs, axes = _generate_4_axes_via_gridspec()\n\n df = DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [1, 2, 3, 4, 5, 6],\n \"c\": [1, 2, 3, 4, 5, 6],\n \"d\": [1, 2, 3, 4, 5, 6],\n }\n )\n\n def _check(axes):\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n for ax in [axes[0], axes[1]]:\n self._check_visible(ax.get_yticklabels(), visible=True)\n for ax in [axes[2], axes[3]]:\n self._check_visible(ax.get_yticklabels(), visible=False)\n\n for ax in axes:\n df.plot(x=\"a\", y=\"b\", title=\"title\", ax=ax, sharey=True)\n gs.tight_layout(plt.gcf())\n _check(axes)\n tm.close()\n\n gs, axes = _generate_4_axes_via_gridspec()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=axes, sharey=True)\n\n gs.tight_layout(plt.gcf())\n _check(axes)\n tm.close()\n\n gs, axes = _generate_4_axes_via_gridspec()\n # without sharex, no labels should be touched!\n for ax in axes:\n df.plot(x=\"a\", y=\"b\", title=\"title\", ax=ax)\n\n gs.tight_layout(plt.gcf())\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n\n @td.skip_if_no_scipy\n def test_memory_leak(self):\n \"\"\" Check that every plot type gets properly collected. \"\"\"\n import weakref\n import gc\n\n results = {}\n for kind in plotting.PlotAccessor._all_kinds:\n\n args = {}\n if kind in [\"hexbin\", \"scatter\", \"pie\"]:\n df = self.hexbin_df\n args = {\"x\": \"A\", \"y\": \"B\"}\n elif kind == \"area\":\n df = self.tdf.abs()\n else:\n df = self.tdf\n\n # Use a weakref so we can see if the object gets collected without\n # also preventing it from being collected\n results[kind] = weakref.proxy(df.plot(kind=kind, **args))\n\n # have matplotlib delete all the figures\n tm.close()\n # force a garbage collection\n gc.collect()\n for key in results:\n # check that every plot was collected\n with pytest.raises(ReferenceError):\n # need to actually access something to get an error\n results[key].lines\n\n @pytest.mark.slow\n def test_df_subplots_patterns_minorticks(self):\n # GH 10657\n import matplotlib.pyplot as plt\n\n df = DataFrame(\n np.random.randn(10, 2),\n index=date_range(\"1/1/2000\", periods=10),\n columns=list(\"AB\"),\n )\n\n # shared subplots\n fig, axes = plt.subplots(2, 1, sharex=True)\n axes = df.plot(subplots=True, ax=axes)\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n # xaxis of 1st ax must be hidden\n self._check_visible(axes[0].get_xticklabels(), visible=False)\n self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)\n self._check_visible(axes[1].get_xticklabels(), visible=True)\n self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)\n tm.close()\n\n fig, axes = plt.subplots(2, 1)\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=axes, sharex=True)\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n # xaxis of 1st ax must be hidden\n self._check_visible(axes[0].get_xticklabels(), visible=False)\n self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)\n self._check_visible(axes[1].get_xticklabels(), visible=True)\n self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # not shared\n fig, axes = plt.subplots(2, 1)\n axes = df.plot(subplots=True, ax=axes)\n for ax in axes:\n assert len(ax.lines) == 1\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n @pytest.mark.slow\n def test_df_gridspec_patterns(self):\n # GH 10819\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n\n ts = Series(np.random.randn(10), index=date_range(\"1/1/2000\", periods=10))\n\n df = DataFrame(np.random.randn(10, 2), index=ts.index, columns=list(\"AB\"))\n\n def _get_vertical_grid():\n gs = gridspec.GridSpec(3, 1)\n fig = plt.figure()\n ax1 = fig.add_subplot(gs[:2, :])\n ax2 = fig.add_subplot(gs[2, :])\n return ax1, ax2\n\n def _get_horizontal_grid():\n gs = gridspec.GridSpec(1, 3)\n fig = plt.figure()\n ax1 = fig.add_subplot(gs[:, :2])\n ax2 = fig.add_subplot(gs[:, 2])\n return ax1, ax2\n\n for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:\n ax1 = ts.plot(ax=ax1)\n assert len(ax1.lines) == 1\n ax2 = df.plot(ax=ax2)\n assert len(ax2.lines) == 2\n for ax in [ax1, ax2]:\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # subplots=True\n for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:\n axes = df.plot(subplots=True, ax=[ax1, ax2])\n assert len(ax1.lines) == 1\n assert len(ax2.lines) == 1\n for ax in axes:\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # vertical / subplots / sharex=True / sharey=True\n ax1, ax2 = _get_vertical_grid()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)\n assert len(axes[0].lines) == 1\n assert len(axes[1].lines) == 1\n for ax in [ax1, ax2]:\n # yaxis are visible because there is only one column\n self._check_visible(ax.get_yticklabels(), visible=True)\n # xaxis of axes0 (top) are hidden\n self._check_visible(axes[0].get_xticklabels(), visible=False)\n self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)\n self._check_visible(axes[1].get_xticklabels(), visible=True)\n self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # horizontal / subplots / sharex=True / sharey=True\n ax1, ax2 = _get_horizontal_grid()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)\n assert len(axes[0].lines) == 1\n assert len(axes[1].lines) == 1\n self._check_visible(axes[0].get_yticklabels(), visible=True)\n # yaxis of axes1 (right) are hidden\n self._check_visible(axes[1].get_yticklabels(), visible=False)\n for ax in [ax1, ax2]:\n # xaxis are visible because there is only one column\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # boxed\n def _get_boxed_grid():\n gs = gridspec.GridSpec(3, 3)\n fig = plt.figure()\n ax1 = fig.add_subplot(gs[:2, :2])\n ax2 = fig.add_subplot(gs[:2, 2])\n ax3 = fig.add_subplot(gs[2, :2])\n ax4 = fig.add_subplot(gs[2, 2])\n return ax1, ax2, ax3, ax4\n\n axes = _get_boxed_grid()\n df = DataFrame(np.random.randn(10, 4), index=ts.index, columns=list(\"ABCD\"))\n axes = df.plot(subplots=True, ax=axes)\n for ax in axes:\n assert len(ax.lines) == 1\n # axis are visible because these are not shared\n self._check_visible(ax.get_yticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n # subplots / sharex=True / sharey=True\n axes = _get_boxed_grid()\n with tm.assert_produces_warning(UserWarning):\n axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)\n for ax in axes:\n assert len(ax.lines) == 1\n for ax in [axes[0], axes[2]]: # left column\n self._check_visible(ax.get_yticklabels(), visible=True)\n for ax in [axes[1], axes[3]]: # right column\n self._check_visible(ax.get_yticklabels(), visible=False)\n for ax in [axes[0], axes[1]]: # top row\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible(ax.get_xticklabels(minor=True), visible=False)\n for ax in [axes[2], axes[3]]: # bottom row\n self._check_visible(ax.get_xticklabels(), visible=True)\n self._check_visible(ax.get_xticklabels(minor=True), visible=True)\n tm.close()\n\n @pytest.mark.slow\n def test_df_grid_settings(self):\n # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792\n self._check_grid_settings(\n DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4]}),\n plotting.PlotAccessor._dataframe_kinds,\n kws={\"x\": \"a\", \"y\": \"b\"},\n )\n\n def test_invalid_colormap(self):\n df = DataFrame(randn(3, 2), columns=[\"A\", \"B\"])\n\n with pytest.raises(ValueError):\n df.plot(colormap=\"invalid_colormap\")\n\n def test_plain_axes(self):\n\n # supplied ax itself is a SubplotAxes, but figure contains also\n # a plain Axes object (GH11556)\n fig, ax = self.plt.subplots()\n fig.add_axes([0.2, 0.2, 0.2, 0.2])\n Series(rand(10)).plot(ax=ax)\n\n # supplied ax itself is a plain Axes, but because the cmap keyword\n # a new ax is created for the colorbar -> also multiples axes (GH11520)\n df = DataFrame({\"a\": randn(8), \"b\": randn(8)})\n fig = self.plt.figure()\n ax = fig.add_axes((0, 0, 1, 1))\n df.plot(kind=\"scatter\", ax=ax, x=\"a\", y=\"b\", c=\"a\", cmap=\"hsv\")\n\n # other examples\n fig, ax = self.plt.subplots()\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n Series(rand(10)).plot(ax=ax)\n Series(rand(10)).plot(ax=cax)\n\n fig, ax = self.plt.subplots()\n from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\n iax = inset_axes(ax, width=\"30%\", height=1.0, loc=3)\n Series(rand(10)).plot(ax=ax)\n Series(rand(10)).plot(ax=iax)\n\n def test_passed_bar_colors(self):\n import matplotlib as mpl\n\n color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]\n colormap = mpl.colors.ListedColormap(color_tuples)\n barplot = pd.DataFrame([[1, 2, 3]]).plot(kind=\"bar\", cmap=colormap)\n assert color_tuples == [c.get_facecolor() for c in barplot.patches]\n\n def test_rcParams_bar_colors(self):\n import matplotlib as mpl\n\n color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]\n with mpl.rc_context(rc={\"axes.prop_cycle\": mpl.cycler(\"color\", color_tuples)}):\n barplot = pd.DataFrame([[1, 2, 3]]).plot(kind=\"bar\")\n assert color_tuples == [c.get_facecolor() for c in barplot.patches]\n\n @pytest.mark.parametrize(\"method\", [\"line\", \"barh\", \"bar\"])\n def test_secondary_axis_font_size(self, method):\n # GH: 12565\n df = (\n pd.DataFrame(np.random.randn(15, 2), columns=list(\"AB\"))\n .assign(C=lambda df: df.B.cumsum())\n .assign(D=lambda df: df.C * 1.1)\n )\n\n fontsize = 20\n sy = [\"C\", \"D\"]\n\n kwargs = dict(secondary_y=sy, fontsize=fontsize, mark_right=True)\n ax = getattr(df.plot, method)(**kwargs)\n self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)\n\n @pytest.mark.slow\n def test_x_string_values_ticks(self):\n # Test if string plot index have a fixed xtick position\n # GH: 7612, GH: 22334\n df = pd.DataFrame(\n {\n \"sales\": [3, 2, 3],\n \"visits\": [20, 42, 28],\n \"day\": [\"Monday\", \"Tuesday\", \"Wednesday\"],\n }\n )\n ax = df.plot.area(x=\"day\")\n ax.set_xlim(-1, 3)\n xticklabels = [t.get_text() for t in ax.get_xticklabels()]\n labels_position = dict(zip(xticklabels, ax.get_xticks()))\n # Testing if the label stayed at the right position\n assert labels_position[\"Monday\"] == 0.0\n assert labels_position[\"Tuesday\"] == 1.0\n assert labels_position[\"Wednesday\"] == 2.0\n\n @pytest.mark.slow\n def test_x_multiindex_values_ticks(self):\n # Test if multiindex plot index have a fixed xtick position\n # GH: 15912\n index = pd.MultiIndex.from_product([[2012, 2013], [1, 2]])\n df = pd.DataFrame(np.random.randn(4, 2), columns=[\"A\", \"B\"], index=index)\n ax = df.plot()\n ax.set_xlim(-1, 4)\n xticklabels = [t.get_text() for t in ax.get_xticklabels()]\n labels_position = dict(zip(xticklabels, ax.get_xticks()))\n # Testing if the label stayed at the right position\n assert labels_position[\"(2012, 1)\"] == 0.0\n assert labels_position[\"(2012, 2)\"] == 1.0\n assert labels_position[\"(2013, 1)\"] == 2.0\n assert labels_position[\"(2013, 2)\"] == 3.0\n\n @pytest.mark.parametrize(\"kind\", [\"line\", \"area\"])\n def test_xlim_plot_line(self, kind):\n # test if xlim is set correctly in plot.line and plot.area\n # GH 27686\n df = pd.DataFrame([2, 4], index=[1, 2])\n ax = df.plot(kind=kind)\n xlims = ax.get_xlim()\n assert xlims[0] < 1\n assert xlims[1] > 2\n\n def test_xlim_plot_line_correctly_in_mixed_plot_type(self):\n # test if xlim is set correctly when ax contains multiple different kinds\n # of plots, GH 27686\n fig, ax = self.plt.subplots()\n\n indexes = [\"k1\", \"k2\", \"k3\", \"k4\"]\n df = pd.DataFrame(\n {\n \"s1\": [1000, 2000, 1500, 2000],\n \"s2\": [900, 1400, 2000, 3000],\n \"s3\": [1500, 1500, 1600, 1200],\n \"secondary_y\": [1, 3, 4, 3],\n },\n index=indexes,\n )\n df[[\"s1\", \"s2\", \"s3\"]].plot.bar(ax=ax, stacked=False)\n df[[\"secondary_y\"]].plot(ax=ax, secondary_y=True)\n\n xlims = ax.get_xlim()\n assert xlims[0] < 0\n assert xlims[1] > 3\n\n # make sure axis labels are plotted correctly as well\n xticklabels = [t.get_text() for t in ax.get_xticklabels()]\n assert xticklabels == indexes\n\n def test_subplots_sharex_false(self):\n # test when sharex is set to False, two plots should have different\n # labels, GH 25160\n df = pd.DataFrame(np.random.rand(10, 2))\n df.iloc[5:, 1] = np.nan\n df.iloc[:5, 0] = np.nan\n\n figs, axs = self.plt.subplots(2, 1)\n df.plot.line(ax=axs, subplots=True, sharex=False)\n\n expected_ax1 = np.arange(4.5, 10, 0.5)\n expected_ax2 = np.arange(-0.5, 5, 0.5)\n\n tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1)\n tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2)\n\n def test_plot_no_rows(self):\n # GH 27758\n df = pd.DataFrame(columns=[\"foo\"], dtype=int)\n assert df.empty\n ax = df.plot()\n assert len(ax.get_lines()) == 1\n line = ax.get_lines()[0]\n assert len(line.get_xdata()) == 0\n assert len(line.get_ydata()) == 0\n\n def test_plot_no_numeric_data(self):\n df = pd.DataFrame([\"a\", \"b\", \"c\"])\n with pytest.raises(TypeError):\n df.plot()\n\n def test_missing_markers_legend(self):\n # 14958\n df = pd.DataFrame(np.random.randn(8, 3), columns=[\"A\", \"B\", \"C\"])\n ax = df.plot(y=[\"A\"], marker=\"x\", linestyle=\"solid\")\n df.plot(y=[\"B\"], marker=\"o\", linestyle=\"dotted\", ax=ax)\n df.plot(y=[\"C\"], marker=\"<\", linestyle=\"dotted\", ax=ax)\n\n self._check_legend_labels(ax, labels=[\"A\", \"B\", \"C\"])\n self._check_legend_marker(ax, expected_markers=[\"x\", \"o\", \"<\"])\n\n def test_missing_markers_legend_using_style(self):\n # 14563\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6],\n \"B\": [2, 4, 1, 3, 2, 4],\n \"C\": [3, 3, 2, 6, 4, 2],\n \"X\": [1, 2, 3, 4, 5, 6],\n }\n )\n\n fig, ax = self.plt.subplots()\n for kind in \"ABC\":\n df.plot(\"X\", kind, label=kind, ax=ax, style=\".\")\n\n self._check_legend_labels(ax, labels=[\"A\", \"B\", \"C\"])\n self._check_legend_marker(ax, expected_markers=[\".\", \".\", \".\"])\n\n\ndef _generate_4_axes_via_gridspec():\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import matplotlib.gridspec # noqa\n\n gs = mpl.gridspec.GridSpec(2, 2)\n ax_tl = plt.subplot(gs[0, 0])\n ax_ll = plt.subplot(gs[1, 0])\n ax_tr = plt.subplot(gs[0, 1])\n ax_lr = plt.subplot(gs[1, 1])\n\n return gs, [ax_tl, ax_ll, ax_tr, ax_lr]\n", "import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame, Series, Timestamp, date_range\nimport pandas._testing as tm\n\n\nclass TestDataFrameDiff:\n def test_diff(self, datetime_frame):\n the_diff = datetime_frame.diff(1)\n\n tm.assert_series_equal(\n the_diff[\"A\"], datetime_frame[\"A\"] - datetime_frame[\"A\"].shift(1)\n )\n\n # int dtype\n a = 10_000_000_000_000_000\n b = a + 1\n s = Series([a, b])\n\n rs = DataFrame({\"s\": s}).diff()\n assert rs.s[1] == 1\n\n # mixed numeric\n tf = datetime_frame.astype(\"float32\")\n the_diff = tf.diff(1)\n tm.assert_series_equal(the_diff[\"A\"], tf[\"A\"] - tf[\"A\"].shift(1))\n\n # GH#10907\n df = pd.DataFrame({\"y\": pd.Series([2]), \"z\": pd.Series([3])})\n df.insert(0, \"x\", 1)\n result = df.diff(axis=1)\n expected = pd.DataFrame(\n {\"x\": np.nan, \"y\": pd.Series(1), \"z\": pd.Series(1)}\n ).astype(\"float64\")\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"tz\", [None, \"UTC\"])\n def test_diff_datetime_axis0(self, tz):\n # GH#18578\n df = DataFrame(\n {\n 0: date_range(\"2010\", freq=\"D\", periods=2, tz=tz),\n 1: date_range(\"2010\", freq=\"D\", periods=2, tz=tz),\n }\n )\n\n result = df.diff(axis=0)\n expected = DataFrame(\n {\n 0: pd.TimedeltaIndex([\"NaT\", \"1 days\"]),\n 1: pd.TimedeltaIndex([\"NaT\", \"1 days\"]),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"tz\", [None, \"UTC\"])\n def test_diff_datetime_axis1(self, tz):\n # GH#18578\n df = DataFrame(\n {\n 0: date_range(\"2010\", freq=\"D\", periods=2, tz=tz),\n 1: date_range(\"2010\", freq=\"D\", periods=2, tz=tz),\n }\n )\n if tz is None:\n result = df.diff(axis=1)\n expected = DataFrame(\n {\n 0: pd.TimedeltaIndex([\"NaT\", \"NaT\"]),\n 1: pd.TimedeltaIndex([\"0 days\", \"0 days\"]),\n }\n )\n tm.assert_frame_equal(result, expected)\n else:\n with pytest.raises(NotImplementedError):\n result = df.diff(axis=1)\n\n def test_diff_timedelta(self):\n # GH#4533\n df = DataFrame(\n dict(\n time=[Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")],\n value=[1.0, 2.0],\n )\n )\n\n res = df.diff()\n exp = DataFrame(\n [[pd.NaT, np.nan], [pd.Timedelta(\"00:01:00\"), 1]], columns=[\"time\", \"value\"]\n )\n tm.assert_frame_equal(res, exp)\n\n def test_diff_mixed_dtype(self):\n df = DataFrame(np.random.randn(5, 3))\n df[\"A\"] = np.array([1, 2, 3, 4, 5], dtype=object)\n\n result = df.diff()\n assert result[0].dtype == np.float64\n\n def test_diff_neg_n(self, datetime_frame):\n rs = datetime_frame.diff(-1)\n xp = datetime_frame - datetime_frame.shift(-1)\n tm.assert_frame_equal(rs, xp)\n\n def test_diff_float_n(self, datetime_frame):\n rs = datetime_frame.diff(1.0)\n xp = datetime_frame.diff(1)\n tm.assert_frame_equal(rs, xp)\n\n def test_diff_axis(self):\n # GH#9727\n df = DataFrame([[1.0, 2.0], [3.0, 4.0]])\n tm.assert_frame_equal(\n df.diff(axis=1), DataFrame([[np.nan, 1.0], [np.nan, 1.0]])\n )\n tm.assert_frame_equal(\n df.diff(axis=0), DataFrame([[np.nan, np.nan], [2.0, 2.0]])\n )\n" ]
[ [ "numpy.random.random", "pandas.Series", "pandas.DataFrame", "pandas.date_range", "numpy.sum" ], [ "pandas._testing.assert_almost_equal", "numpy.split", "numpy.imag", "pandas.Series", "numpy.take", "numpy.linspace", "numpy.vstack", "numpy.max", "numpy.random.randn", "scipy.stats.spearmanr", "pandas.isna", "pandas.core.nanops._bn_ok_dtype", "numpy.random.randint", "numpy.hstack", "pandas.core.nanops.nanstd", "pandas._testing.assert_numpy_array_equal", "numpy.real", "pandas.set_option", "pandas.core.dtypes.common.is_integer_dtype", "numpy.min", "numpy.isnan", "pandas.core.arrays.DatetimeArray", "numpy.cov", "pandas.date_range", "numpy.errstate", "numpy.corrcoef", "numpy.random.RandomState", "numpy.array", "pandas.core.nanops.nankurt", "numpy.abs", "numpy.random.seed", "pandas.core.nanops._ensure_numeric", "pandas.core.nanops.nanskew", "numpy.tile", "numpy.ones", "pandas.core.nanops.nanmean", "scipy.stats.kendalltau", "pandas.get_option", "numpy.empty", "pandas.core.nanops.nanvar" ], [ "pandas.MultiIndex", "pandas.RangeIndex", "numpy.arange", "numpy.random.choice", "pandas.MultiIndex.from_arrays", "numpy.random.permutation", "pandas.MultiIndex.from_product", "numpy.random.rand", "pandas.date_range", "numpy.array", "numpy.random.randint" ], [ "pandas._testing.assert_almost_equal", "pandas._testing.assert_numpy_array_equal", "pandas._libs.reduction.SeriesGrouper", "pandas._libs.lib.generate_bins_dt64", "numpy.arange", "pandas._libs.reduction.SeriesBinGrouper", "pandas.isna", "numpy.random.randn", "numpy.diff", "numpy.repeat", "numpy.array", "numpy.zeros" ], [ "pandas._testing.assert_almost_equal", "pandas.to_datetime", "pandas.Series", "numpy.linspace", "pandas.plotting.table", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "pandas.tests.plotting.common.TestPlotBase.setup_method", "matplotlib.rcdefaults", "numpy.random.randn", "pandas.core.dtypes.api.is_list_like", "pandas.plotting._matplotlib.compat._mpl_ge_3_1_0", "pandas._testing.RNGContext", "pandas._testing.makeDateIndex", "numpy.random.randint", "matplotlib.pyplot.tight_layout", "pandas._testing.assert_numpy_array_equal", "numpy.arange", "pandas.core.arrays.integer_array", "matplotlib.pyplot.gcf", "matplotlib.pyplot.subplot", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.close", "matplotlib.cycler", "pandas._testing.assert_series_equal", "matplotlib.pyplot.figure", "numpy.isclose", "pandas._testing.assert_produces_warning", "pandas.plotting.plot_params.use", "pandas.Categorical", "pandas.Timedelta", "numpy.delete", "matplotlib.colors.ListedColormap", "pandas._testing.close", "numpy.random.rand", "pandas.date_range", "pandas.MultiIndex.from_product", "numpy.array", "pandas.tests.plotting.common._check_plot_works", "matplotlib.cm.jet", "numpy.random.random", "numpy.random.seed", "matplotlib.pyplot.subplots", "numpy.ones", "pandas.Period", "pandas._testing.makeTimeDataFrame", "numpy.random.uniform", "pandas.io.formats.printing.pprint_thing" ], [ "pandas.Series", "pandas.TimedeltaIndex", "pandas.Timestamp", "pandas.DataFrame", "pandas.Timedelta", "pandas._testing.assert_frame_equal", "numpy.random.randn", "pandas.date_range", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
OxfordHED/sunbear
[ "9c7f368c4086f69868e7e5d87ea0b40700610e19" ]
[ "sunbear/vis.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sunbear.forward import forward_pos\n\n__all__ = [\"vis\"]\n\ndef vis(phi, ngrid=10, line_kwargs={}):\n \"\"\"\n Plot the grid deformation based on the given deflection potential, `phi`.\n It only works for 2D signal at the moment.\n\n Parameters\n ----------\n * `phi` : np.ndarray\n The deflection potential.\n * `ngrid` : int or sequential, int\n Number of grid points to be visualized.\n * `line_kwargs` : dict\n Kwargs of the plt.plot to plot the grid lines.\n \"\"\"\n ndim = np.ndim(phi)\n if ndim != 2:\n raise ValueError(\"vis function can only take 2D deflection potential\")\n if not hasattr(ngrid, \"__iter__\"):\n ngrid = (ngrid, ngrid)\n if line_kwargs == {}:\n line_kwargs = {\"color\": \"C0\"}\n\n # obtain the mesh position\n x = [np.linspace(0, phi.shape[i]-1, ngrid[i]) for i in range(ndim)]\n meshpos = np.array(np.meshgrid(*x)) # (ndim, ngrid0, ngrid1, ...)\n pos = meshpos.reshape(ndim, -1).T # (N x D)\n\n # get the new position\n newpos = forward_pos(pos, phi) # (N x D)\n new_shape = list(meshpos.shape[1:]) + [ndim]\n mesh_newpos = newpos.reshape(new_shape) # (ngrid0, ngrid1, ..., ndim)\n\n if ndim == 2:\n for i in range(mesh_newpos.shape[0]):\n plt.plot(mesh_newpos[i,:,0], mesh_newpos[i,:,1], **line_kwargs)\n for i in range(mesh_newpos.shape[1]):\n plt.plot(mesh_newpos[:,i,0], mesh_newpos[:,i,1], **line_kwargs)\n plt.gca().set_aspect(phi.shape[1] * 1.0 / phi.shape[0])\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.linspace", "numpy.ndim", "matplotlib.pyplot.plot", "numpy.meshgrid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aurelienpierre/colour
[ "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47", "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47", "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47", "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47", "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47", "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47", "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47", "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47", "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47", "3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47" ]
[ "colour/contrast/tests/test_barten1999.py", "colour/recovery/tests/test_meng2015.py", "colour/examples/notation/examples_hexadecimal.py", "colour/characterisation/tests/test_aces_it.py", "colour/models/osa_ucs.py", "colour/appearance/tests/test_rlab.py", "colour/examples/models/examples_cmyk.py", "colour/quality/cri.py", "colour/blindness/datasets/machado2010.py", "colour/plotting/temperature.py" ]
[ "\"\"\"Defines the unit tests for the :mod:`colour.contrast.barten1999` module.\"\"\"\n\nimport numpy as np\nimport unittest\nfrom itertools import permutations\n\nfrom colour.contrast import (\n optical_MTF_Barten1999,\n pupil_diameter_Barten1999,\n sigma_Barten1999,\n retinal_illuminance_Barten1999,\n maximum_angular_size_Barten1999,\n contrast_sensitivity_function_Barten1999,\n)\nfrom colour.utilities import ignore_numpy_errors\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"TestOpticalMTFBarten1999\",\n \"TestPupilDiameterBarten1999\",\n \"TestSigmaBarten1999\",\n \"TestRetinalIlluminanceBarten1999\",\n \"TestMaximumAngularSizeBarten1999\",\n \"TestContrastSensitivityFunctionBarten1999\",\n]\n\n\nclass TestOpticalMTFBarten1999(unittest.TestCase):\n \"\"\"\n Define :func:`colour.contrast.barten1999.optical_MTF_Barten1999`\n definition unit tests methods.\n \"\"\"\n\n def test_optical_MTF_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.optical_MTF_Barten1999`\n definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n optical_MTF_Barten1999(4, 0.01), 0.968910791191297, decimal=7\n )\n\n np.testing.assert_almost_equal(\n optical_MTF_Barten1999(8, 0.01), 0.881323136669471, decimal=7\n )\n\n np.testing.assert_almost_equal(\n optical_MTF_Barten1999(4, 0.05), 0.454040738727245, decimal=7\n )\n\n def test_n_dimensional_optical_MTF_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.optical_MTF_Barten1999`\n definition n-dimensional support.\n \"\"\"\n\n u = np.array([4, 8, 12])\n sigma = np.array([0.01, 0.05, 0.1])\n M_opt = optical_MTF_Barten1999(u, sigma)\n\n u = np.tile(u, (6, 1))\n sigma = np.tile(sigma, (6, 1))\n M_opt = np.tile(M_opt, (6, 1))\n np.testing.assert_almost_equal(\n optical_MTF_Barten1999(u, sigma), M_opt, decimal=7\n )\n\n u = np.reshape(u, (2, 3, 3))\n sigma = np.reshape(sigma, (2, 3, 3))\n M_opt = np.reshape(M_opt, (2, 3, 3))\n np.testing.assert_almost_equal(\n optical_MTF_Barten1999(u, sigma), M_opt, decimal=7\n )\n\n @ignore_numpy_errors\n def test_nan_optical_MTF_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.optical_MTF_Barten1999`\n definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n optical_MTF_Barten1999(np.array(case), np.array(case))\n\n\nclass TestPupilDiameterBarten1999(unittest.TestCase):\n \"\"\"\n Define :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`\n definition unit tests methods.\n \"\"\"\n\n def test_pupil_diameter_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`\n definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n pupil_diameter_Barten1999(20, 60), 2.272517118855717, decimal=7\n )\n\n np.testing.assert_almost_equal(\n pupil_diameter_Barten1999(0.2, 600), 2.272517118855717, decimal=7\n )\n\n np.testing.assert_almost_equal(\n pupil_diameter_Barten1999(20, 60, 30), 2.459028745178825, decimal=7\n )\n\n def test_n_dimensional_pupil_diameter_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`\n definition n-dimensional support.\n \"\"\"\n\n L = np.array([0.2, 20, 100])\n X_0 = np.array([60, 120, 240])\n Y_0 = np.array([60, 30, 15])\n d = pupil_diameter_Barten1999(L, X_0, Y_0)\n\n L = np.tile(L, (6, 1))\n X_0 = np.tile(X_0, (6, 1))\n d = np.tile(d, (6, 1))\n np.testing.assert_almost_equal(\n pupil_diameter_Barten1999(L, X_0, Y_0), d, decimal=7\n )\n\n L = np.reshape(L, (2, 3, 3))\n X_0 = np.reshape(X_0, (2, 3, 3))\n d = np.reshape(d, (2, 3, 3))\n np.testing.assert_almost_equal(\n pupil_diameter_Barten1999(L, X_0, Y_0), d, decimal=7\n )\n\n @ignore_numpy_errors\n def test_nan_pupil_diameter_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`\n definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n pupil_diameter_Barten1999(\n np.array(case), np.array(case), np.array(case)\n )\n\n\nclass TestSigmaBarten1999(unittest.TestCase):\n \"\"\"\n Define :func:`colour.contrast.barten1999.sigma_Barten1999` definition unit\n tests methods.\n \"\"\"\n\n def test_sigma_Barten1999(self):\n \"\"\"Test :func:`colour.contrast.barten1999.sigma_Barten1999` definition.\"\"\"\n\n np.testing.assert_almost_equal(\n sigma_Barten1999(0.5 / 60, 0.08 / 60, 2.1),\n 0.008791157173231,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n sigma_Barten1999(0.75 / 60, 0.08 / 60, 2.1),\n 0.012809761902549,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n sigma_Barten1999(0.5 / 60, 0.16 / 60, 2.1),\n 0.010040141654601,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n sigma_Barten1999(0.5 / 60, 0.08 / 60, 2.5),\n 0.008975274678558,\n decimal=7,\n )\n\n def test_n_dimensional_sigma_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.sigma_Barten1999` definition\n n-dimensional support.\n \"\"\"\n\n sigma_0 = np.array([0.25 / 60, 0.5 / 60, 0.75 / 60])\n C_ab = np.array([0.04 / 60, 0.08 / 60, 0.16 / 60])\n d = np.array([2.1, 2.5, 5.0])\n sigma = sigma_Barten1999(sigma_0, C_ab, d)\n\n sigma_0 = np.tile(sigma_0, (6, 1))\n C_ab = np.tile(C_ab, (6, 1))\n sigma = np.tile(sigma, (6, 1))\n np.testing.assert_almost_equal(\n sigma_Barten1999(sigma_0, C_ab, d), sigma, decimal=7\n )\n\n sigma_0 = np.reshape(sigma_0, (2, 3, 3))\n C_ab = np.reshape(C_ab, (2, 3, 3))\n sigma = np.reshape(sigma, (2, 3, 3))\n np.testing.assert_almost_equal(\n sigma_Barten1999(sigma_0, C_ab, d), sigma, decimal=7\n )\n\n @ignore_numpy_errors\n def test_nan_sigma_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.sigma_Barten1999`\n definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n sigma_Barten1999(np.array(case), np.array(case), np.array(case))\n\n\nclass TestRetinalIlluminanceBarten1999(unittest.TestCase):\n \"\"\"\n Define :func:`colour.contrast.barten1999.retinal_illuminance_Barten1999`\n definition unit tests methods.\n \"\"\"\n\n def test_retinal_illuminance_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.retinal_illuminance_Barten1999`\n definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n retinal_illuminance_Barten1999(20, 2.1, True),\n 66.082316060529919,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n retinal_illuminance_Barten1999(20, 2.5, True),\n 91.815644777503664,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n retinal_illuminance_Barten1999(20, 2.1, False),\n 69.272118011654939,\n decimal=7,\n )\n\n def test_n_dimensional_retinal_illuminance_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.retinal_illuminance_Barten1999`\n definition n-dimensional support.\n \"\"\"\n\n L = np.array([0.2, 20, 100])\n d = np.array([2.1, 2.5, 5.0])\n E = retinal_illuminance_Barten1999(L, d)\n\n L = np.tile(L, (6, 1))\n d = np.tile(d, (6, 1))\n E = np.tile(E, (6, 1))\n np.testing.assert_almost_equal(\n retinal_illuminance_Barten1999(L, d), E, decimal=7\n )\n\n L = np.reshape(L, (2, 3, 3))\n d = np.reshape(d, (2, 3, 3))\n E = np.reshape(E, (2, 3, 3))\n np.testing.assert_almost_equal(\n retinal_illuminance_Barten1999(L, d), E, decimal=7\n )\n\n @ignore_numpy_errors\n def test_nan_retinal_illuminance_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.retinal_illuminance_Barten1999`\n definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n retinal_illuminance_Barten1999(np.array(case), np.array(case))\n\n\nclass TestMaximumAngularSizeBarten1999(unittest.TestCase):\n \"\"\"\n Define :func:`colour.contrast.barten1999.maximum_angular_size_Barten1999`\n definition unit tests methods.\n \"\"\"\n\n def test_maximum_angular_size_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.\\\nmaximum_angular_size_Barten1999` definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n maximum_angular_size_Barten1999(4, 60, 12, 15),\n 3.572948005052482,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n maximum_angular_size_Barten1999(8, 60, 12, 15),\n 1.851640199545103,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n maximum_angular_size_Barten1999(4, 120, 12, 15),\n 3.577708763999663,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n maximum_angular_size_Barten1999(4, 60, 24, 15),\n 3.698001308168194,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n maximum_angular_size_Barten1999(4, 60, 12, 30),\n 6.324555320336758,\n decimal=7,\n )\n\n def test_n_dimensional_maximum_angular_size_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.\\\nmaximum_angular_size_Barten1999` definition n-dimensional support.\n \"\"\"\n\n u = np.array([4, 8, 12])\n X_0 = np.array([60, 120, 240])\n X_max = np.array([12, 14, 16])\n N_max = np.array([15, 20, 25])\n X = maximum_angular_size_Barten1999(u, X_0, X_max, N_max)\n\n u = np.tile(u, (6, 1))\n X_0 = np.tile(X_0, (6, 1))\n X = np.tile(X, (6, 1))\n np.testing.assert_almost_equal(\n maximum_angular_size_Barten1999(u, X_0, X_max, N_max), X, decimal=7\n )\n\n u = np.reshape(u, (2, 3, 3))\n X_0 = np.reshape(X_0, (2, 3, 3))\n X = np.reshape(X, (2, 3, 3))\n np.testing.assert_almost_equal(\n maximum_angular_size_Barten1999(u, X_0, X_max, N_max), X, decimal=7\n )\n\n @ignore_numpy_errors\n def test_nan_maximum_angular_size_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.\\\nmaximum_angular_size_Barten1999` definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n maximum_angular_size_Barten1999(\n np.array(case), np.array(case), np.array(case), np.array(case)\n )\n\n\nclass TestContrastSensitivityFunctionBarten1999(unittest.TestCase):\n \"\"\"\n Define :func:`colour.contrast.barten1999.\\\ncontrast_sensitivity_function_Barten1999` definition unit tests methods.\n \"\"\"\n\n def test_contrast_sensitivity_function_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.\\\ncontrast_sensitivity_function_Barten1999` definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=4,\n sigma=0.01,\n E=65,\n X_0=60,\n X_max=12,\n Y_0=60,\n Y_max=12,\n p=1.2 * 10**6,\n ),\n 352.761342126727020,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=8,\n sigma=0.01,\n E=65,\n X_0=60,\n X_max=12,\n Y_0=60,\n Y_max=12,\n p=1.2 * 10**6,\n ),\n 177.706338840717340,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=4,\n sigma=0.02,\n E=65,\n X_0=60,\n X_max=12,\n Y_0=60,\n Y_max=12,\n p=1.2 * 10**6,\n ),\n 320.872401634215750,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=4,\n sigma=0.01,\n E=130,\n X_0=60,\n X_max=12,\n Y_0=60,\n Y_max=12,\n p=1.2 * 10**6,\n ),\n 455.171315756946400,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=4,\n sigma=0.01,\n E=65,\n X_0=120,\n X_max=12,\n Y_0=60,\n Y_max=12,\n p=1.2 * 10**6,\n ),\n 352.996281545740660,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=4,\n sigma=0.01,\n E=65,\n X_0=60,\n X_max=24,\n Y_0=60,\n Y_max=12,\n p=1.2 * 10**6,\n ),\n 358.881580104493650,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=4,\n sigma=0.01,\n E=65,\n X_0=240,\n X_max=12,\n Y_0=60,\n Y_max=12,\n p=1.2 * 10**6,\n ),\n contrast_sensitivity_function_Barten1999(\n u=4,\n sigma=0.01,\n E=65,\n X_0=60,\n X_max=12,\n Y_0=240,\n Y_max=12,\n p=1.2 * 10**6,\n ),\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=4,\n sigma=0.01,\n E=65,\n X_0=60,\n X_max=12,\n Y_0=60,\n Y_max=12,\n p=1.4 * 10**6,\n ),\n 374.791328640476140,\n decimal=7,\n )\n\n def test_n_dimensional_contrast_sensitivity_function_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.\\\ncontrast_sensitivity_function_Barten1999` definition n-dimensional support.\n \"\"\"\n\n u = np.array([4, 8, 12])\n sigma = np.array([0.01, 0.02, 0.04])\n E = np.array([0.65, 90, 1500])\n X_0 = np.array([60, 120, 240])\n S = contrast_sensitivity_function_Barten1999(\n u=u, sigma=sigma, E=E, X_0=X_0\n )\n\n u = np.tile(u, (6, 1))\n E = np.tile(E, (6, 1))\n S = np.tile(S, (6, 1))\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=u, sigma=sigma, E=E, X_0=X_0\n ),\n S,\n decimal=7,\n )\n\n u = np.reshape(u, (2, 3, 3))\n E = np.reshape(E, (2, 3, 3))\n S = np.reshape(S, (2, 3, 3))\n np.testing.assert_almost_equal(\n contrast_sensitivity_function_Barten1999(\n u=u, sigma=sigma, E=E, X_0=X_0\n ),\n S,\n decimal=7,\n )\n\n @ignore_numpy_errors\n def test_nan_contrast_sensitivity_function_Barten1999(self):\n \"\"\"\n Test :func:`colour.contrast.barten1999.\\\ncontrast_sensitivity_function_Barten1999` definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n contrast_sensitivity_function_Barten1999(\n u=np.array(case),\n sigma=np.array(case),\n E=np.array(case),\n X_0=np.array(case),\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"Defines the unit tests for the :mod:`colour.recovery.meng2015` module.\"\"\"\n\nimport numpy as np\nimport unittest\n\nfrom colour.colorimetry import (\n MSDS_CMFS,\n SDS_ILLUMINANTS,\n SpectralShape,\n reshape_msds,\n reshape_sd,\n sd_to_XYZ_integration,\n)\nfrom colour.recovery import XYZ_to_sd_Meng2015\nfrom colour.utilities import domain_range_scale\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"TestXYZ_to_sd_Meng2015\",\n]\n\n\nclass TestXYZ_to_sd_Meng2015(unittest.TestCase):\n \"\"\"\n Define :func:`colour.recovery.meng2015.XYZ_to_sd_Meng2015` definition unit\n tests methods.\n \"\"\"\n\n def setUp(self):\n \"\"\"Initialise the common tests attributes.\"\"\"\n\n # pylint: disable=E1102\n self._cmfs = reshape_msds(\n MSDS_CMFS[\"CIE 1931 2 Degree Standard Observer\"],\n SpectralShape(360, 780, 10),\n )\n self._sd_D65 = reshape_sd(SDS_ILLUMINANTS[\"D65\"], self._cmfs.shape)\n self._sd_E = reshape_sd(SDS_ILLUMINANTS[\"E\"], self._cmfs.shape)\n\n def test_XYZ_to_sd_Meng2015(self):\n \"\"\"Test :func:`colour.recovery.meng2015.XYZ_to_sd_Meng2015` definition.\"\"\"\n\n XYZ = np.array([0.20654008, 0.12197225, 0.05136952])\n np.testing.assert_almost_equal(\n sd_to_XYZ_integration(\n XYZ_to_sd_Meng2015(XYZ, self._cmfs, self._sd_D65),\n self._cmfs,\n self._sd_D65,\n )\n / 100,\n XYZ,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n sd_to_XYZ_integration(\n XYZ_to_sd_Meng2015(XYZ, self._cmfs, self._sd_E),\n self._cmfs,\n self._sd_E,\n )\n / 100,\n XYZ,\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n sd_to_XYZ_integration(\n XYZ_to_sd_Meng2015(\n XYZ,\n self._cmfs,\n self._sd_D65,\n optimisation_kwargs={\n \"options\": {\n \"ftol\": 1e-10,\n }\n },\n ),\n self._cmfs,\n self._sd_D65,\n )\n / 100,\n XYZ,\n decimal=7,\n )\n\n shape = SpectralShape(400, 700, 5)\n # pylint: disable=E1102\n cmfs = reshape_msds(self._cmfs, shape)\n np.testing.assert_almost_equal(\n sd_to_XYZ_integration(\n XYZ_to_sd_Meng2015(XYZ, cmfs, self._sd_D65), cmfs, self._sd_D65\n )\n / 100,\n XYZ,\n decimal=7,\n )\n\n def test_raise_exception_XYZ_to_sd_Meng2015(self):\n \"\"\"\n Test :func:`colour.recovery.meng2015.XYZ_to_sd_Meng2015`\n definition raised exception.\n \"\"\"\n\n self.assertRaises(\n RuntimeError,\n XYZ_to_sd_Meng2015,\n np.array([0.0, 0.0, 1.0]),\n optimisation_kwargs={\n \"options\": {\"maxiter\": 10},\n },\n )\n\n def test_domain_range_scale_XYZ_to_sd_Meng2015(self):\n \"\"\"\n Test :func:`colour.recovery.meng2015.XYZ_to_sd_Meng2015` definition\n domain and range scale support.\n \"\"\"\n\n XYZ_i = np.array([0.20654008, 0.12197225, 0.05136952])\n XYZ_o = sd_to_XYZ_integration(\n XYZ_to_sd_Meng2015(XYZ_i, self._cmfs, self._sd_D65),\n self._cmfs,\n self._sd_D65,\n )\n\n d_r = ((\"reference\", 1, 1), (\"1\", 1, 0.01), (\"100\", 100, 1))\n for scale, factor_a, factor_b in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n sd_to_XYZ_integration(\n XYZ_to_sd_Meng2015(\n XYZ_i * factor_a, self._cmfs, self._sd_D65\n ),\n self._cmfs,\n self._sd_D65,\n ),\n XYZ_o * factor_b,\n decimal=7,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"Showcases hexadecimal computations.\"\"\"\n\nimport numpy as np\n\nimport colour.notation.hexadecimal\nfrom colour.utilities import message_box\n\nmessage_box(\"Hexadecimal Computations\")\n\nRGB = np.array([0.45620519, 0.03081071, 0.04091952])\nmessage_box(\n f'Converting to the \"hexadecimal\" representation from given \"RGB\"'\n f\"colourspace values:\\n\\n\\t{RGB}\"\n)\nprint(colour.notation.hexadecimal.RGB_to_HEX(RGB))\n\nprint(\"\\n\")\n\nhex_triplet = \"#74070a\"\nmessage_box(\n f'Converting to the \"RGB\" colourspace from given \"hexadecimal\" '\n f\"representation:\\n\\n\\t{hex_triplet}\"\n)\nprint(colour.notation.hexadecimal.HEX_to_RGB(hex_triplet))\n", "\"\"\"Defines the unit tests for the :mod:`colour.characterisation.aces_it` module.\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\nimport os\nimport unittest\n\nfrom colour.characterisation import (\n MSDS_ACES_RICD,\n MSDS_CAMERA_SENSITIVITIES,\n SDS_COLOURCHECKERS,\n sd_to_aces_relative_exposure_values,\n read_training_data_rawtoaces_v1,\n generate_illuminants_rawtoaces_v1,\n white_balance_multipliers,\n best_illuminant,\n normalise_illuminant,\n training_data_sds_to_RGB,\n training_data_sds_to_XYZ,\n optimisation_factory_rawtoaces_v1,\n optimisation_factory_Jzazbz,\n matrix_idt,\n camera_RGB_to_ACES2065_1,\n)\nfrom colour.characterisation.aces_it import RESOURCES_DIRECTORY_RAWTOACES\nfrom colour.colorimetry import (\n MSDS_CMFS,\n MultiSpectralDistributions,\n SDS_ILLUMINANTS,\n SpectralDistribution,\n SpectralShape,\n reshape_msds,\n sds_and_msds_to_msds,\n sd_constant,\n sd_ones,\n)\nfrom colour.io import read_sds_from_csv_file\nfrom colour.utilities import domain_range_scale\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"MSDS_CANON_EOS_5DMARK_II\",\n \"SD_AMPAS_ISO7589_STUDIO_TUNGSTEN\",\n \"TestSpectralToAcesRelativeExposureValues\",\n \"TestReadTrainingDataRawtoacesV1\",\n \"TestGenerateIlluminantsRawtoacesV1\",\n \"TestWhiteBalanceMultipliers\",\n \"TestBestIlluminant\",\n \"TestNormaliseIlluminant\",\n \"TestTrainingDataSdsToRGB\",\n \"TestTrainingDataSdsToXYZ\",\n \"TestOptimizationFactoryRawtoacesV1\",\n \"TestOptimizationFactoryJzazbz\",\n \"TestMatrixIdt\",\n \"TestCamera_RGB_to_ACES2065_1\",\n]\n\nMSDS_CANON_EOS_5DMARK_II: MultiSpectralDistributions = sds_and_msds_to_msds(\n list(\n read_sds_from_csv_file(\n os.path.join(\n RESOURCES_DIRECTORY_RAWTOACES,\n \"CANON_EOS_5DMark_II_RGB_Sensitivities.csv\",\n )\n ).values()\n )\n)\n\nSD_AMPAS_ISO7589_STUDIO_TUNGSTEN: SpectralDistribution = (\n read_sds_from_csv_file(\n os.path.join(\n RESOURCES_DIRECTORY_RAWTOACES, \"AMPAS_ISO_7589_Tungsten.csv\"\n )\n )[\"iso7589\"]\n)\n\n\nclass TestSpectralToAcesRelativeExposureValues(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.\\\nsd_to_aces_relative_exposure_values` definition unit tests methods.\n \"\"\"\n\n def test_spectral_to_aces_relative_exposure_values(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.\n sd_to_aces_relative_exposure_values` definition.\n \"\"\"\n\n shape = MSDS_ACES_RICD.shape\n grey_reflector = sd_constant(0.18, shape)\n np.testing.assert_almost_equal(\n sd_to_aces_relative_exposure_values(grey_reflector),\n np.array([0.18, 0.18, 0.18]),\n decimal=7,\n )\n\n perfect_reflector = sd_ones(shape)\n np.testing.assert_almost_equal(\n sd_to_aces_relative_exposure_values(perfect_reflector),\n np.array([0.97783784, 0.97783784, 0.97783784]),\n decimal=7,\n )\n\n dark_skin = SDS_COLOURCHECKERS[\"ColorChecker N Ohta\"][\"dark skin\"]\n np.testing.assert_almost_equal(\n sd_to_aces_relative_exposure_values(dark_skin),\n np.array([0.11718149, 0.08663609, 0.05897268]),\n decimal=7,\n )\n\n dark_skin = SDS_COLOURCHECKERS[\"ColorChecker N Ohta\"][\"dark skin\"]\n np.testing.assert_almost_equal(\n sd_to_aces_relative_exposure_values(\n dark_skin, SDS_ILLUMINANTS[\"A\"]\n ),\n np.array([0.13583991, 0.09431845, 0.05928214]),\n decimal=7,\n )\n\n dark_skin = SDS_COLOURCHECKERS[\"ColorChecker N Ohta\"][\"dark skin\"]\n np.testing.assert_almost_equal(\n sd_to_aces_relative_exposure_values(\n dark_skin, apply_chromatic_adaptation=True\n ),\n np.array([0.11807796, 0.08690312, 0.05891252]),\n decimal=7,\n )\n\n dark_skin = SDS_COLOURCHECKERS[\"ColorChecker N Ohta\"][\"dark skin\"]\n np.testing.assert_almost_equal(\n sd_to_aces_relative_exposure_values(\n dark_skin,\n apply_chromatic_adaptation=True,\n chromatic_adaptation_transform=\"Bradford\",\n ),\n np.array([0.11805993, 0.08689013, 0.05900396]),\n decimal=7,\n )\n\n def test_domain_range_scale_spectral_to_aces_relative_exposure_values(\n self,\n ):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.\n sd_to_aces_relative_exposure_values` definition domain and range scale\n support.\n \"\"\"\n\n shape = MSDS_ACES_RICD.shape\n grey_reflector = sd_constant(0.18, shape)\n RGB = sd_to_aces_relative_exposure_values(grey_reflector)\n\n d_r = ((\"reference\", 1), (\"1\", 1), (\"100\", 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n sd_to_aces_relative_exposure_values(grey_reflector),\n RGB * factor,\n decimal=7,\n )\n\n\nclass TestReadTrainingDataRawtoacesV1(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.\\\nread_training_data_rawtoaces_v1` definition unit tests methods.\n \"\"\"\n\n def test_read_training_data_rawtoaces_v1(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.\n read_training_data_rawtoaces_v1` definition.\n \"\"\"\n\n self.assertEqual(len(read_training_data_rawtoaces_v1().labels), 190)\n\n\nclass TestGenerateIlluminantsRawtoacesV1(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.\\\ngenerate_illuminants_rawtoaces_v1` definition unit tests methods.\n \"\"\"\n\n def test_generate_illuminants_rawtoaces_v1(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.\n generate_illuminants_rawtoaces_v1` definition.\n \"\"\"\n\n self.assertListEqual(\n list(sorted(generate_illuminants_rawtoaces_v1().keys())),\n [\n \"1000K Blackbody\",\n \"1500K Blackbody\",\n \"2000K Blackbody\",\n \"2500K Blackbody\",\n \"3000K Blackbody\",\n \"3500K Blackbody\",\n \"D100\",\n \"D105\",\n \"D110\",\n \"D115\",\n \"D120\",\n \"D125\",\n \"D130\",\n \"D135\",\n \"D140\",\n \"D145\",\n \"D150\",\n \"D155\",\n \"D160\",\n \"D165\",\n \"D170\",\n \"D175\",\n \"D180\",\n \"D185\",\n \"D190\",\n \"D195\",\n \"D200\",\n \"D205\",\n \"D210\",\n \"D215\",\n \"D220\",\n \"D225\",\n \"D230\",\n \"D235\",\n \"D240\",\n \"D245\",\n \"D250\",\n \"D40\",\n \"D45\",\n \"D50\",\n \"D55\",\n \"D60\",\n \"D65\",\n \"D70\",\n \"D75\",\n \"D80\",\n \"D85\",\n \"D90\",\n \"D95\",\n \"iso7589\",\n ],\n )\n\n\nclass TestWhiteBalanceMultipliers(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.white_balance_multipliers`\n definition unit tests methods.\n \"\"\"\n\n def test_white_balance_multipliers(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.white_balance_multipliers`\n definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n white_balance_multipliers(\n MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS[\"D55\"]\n ),\n np.array([2.34141541, 1.00000000, 1.51633759]),\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n white_balance_multipliers(\n MSDS_CANON_EOS_5DMARK_II,\n SDS_ILLUMINANTS[\"ISO 7589 Studio Tungsten\"],\n ),\n np.array([1.57095278, 1.00000000, 2.43560477]),\n decimal=7,\n )\n\n\nclass TestBestIlluminant(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.best_illuminant` definition\n unit tests methods.\n \"\"\"\n\n def test_best_illuminant(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.best_illuminant`\n definition.\n \"\"\"\n\n self.assertEqual(\n best_illuminant(\n white_balance_multipliers(\n MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS[\"FL2\"]\n ),\n MSDS_CANON_EOS_5DMARK_II,\n generate_illuminants_rawtoaces_v1(),\n ).name,\n \"D40\",\n )\n\n self.assertEqual(\n best_illuminant(\n white_balance_multipliers(\n MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS[\"A\"]\n ),\n MSDS_CANON_EOS_5DMARK_II,\n generate_illuminants_rawtoaces_v1(),\n ).name,\n \"3000K Blackbody\",\n )\n\n\nclass TestNormaliseIlluminant(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.normalise_illuminant`\n definition unit tests methods.\n \"\"\"\n\n def test_normalise_illuminant(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.normalise_illuminant`\n definition.\n \"\"\"\n\n self.assertAlmostEqual(\n np.sum(\n normalise_illuminant(\n SDS_ILLUMINANTS[\"D55\"], MSDS_CANON_EOS_5DMARK_II\n ).values\n ),\n 3.439037388220850,\n places=7,\n )\n\n\nclass TestTrainingDataSdsToRGB(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.training_data_sds_to_RGB`\n definition unit tests methods.\n \"\"\"\n\n def test_training_data_sds_to_RGB(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.training_data_sds_to_RGB`\n definition.\n \"\"\"\n\n RGB, RGB_w = training_data_sds_to_RGB(\n read_training_data_rawtoaces_v1(),\n MSDS_CANON_EOS_5DMARK_II,\n SDS_ILLUMINANTS[\"D55\"],\n )\n np.testing.assert_almost_equal(\n RGB,\n np.array(\n [\n [42.00296381, 39.83290349, 43.28842394],\n [181.25453293, 180.47486885, 180.30657630],\n [1580.35041765, 1578.67251435, 1571.05703787],\n [403.67553672, 403.67553672, 403.67553672],\n [1193.51958332, 1194.63985124, 1183.92806238],\n [862.07824054, 863.30644583, 858.29863779],\n [605.42274304, 602.94953701, 596.61414309],\n [395.70687930, 394.67167942, 392.97719777],\n [227.27502116, 228.33554705, 227.96959477],\n [130.97735082, 132.12395139, 131.97239271],\n [61.79308820, 61.85572037, 62.40560537],\n [592.29430914, 383.93309398, 282.70032306],\n [504.67305022, 294.69245978, 193.90976423],\n [640.93167741, 494.91914821, 421.68337308],\n [356.53952646, 239.77610719, 181.18147755],\n [179.58569818, 130.00540238, 109.23999883],\n [1061.07297514, 818.29727750, 730.13362169],\n [765.75936417, 522.06805938, 456.59355601],\n [104.70554060, 80.35106922, 65.75667232],\n [694.19925422, 161.06849749, 214.20170991],\n [1054.83161580, 709.41713619, 668.10329523],\n [697.35479081, 276.20032105, 275.86226833],\n [183.26315174, 65.93801513, 74.60775905],\n [359.74416854, 59.73576149, 89.81296522],\n [1043.53760601, 405.48081521, 376.37298474],\n [344.35374209, 111.26727966, 109.10587712],\n [215.18064862, 87.41152853, 85.18152727],\n [555.37005673, 134.76016985, 111.54658160],\n [931.71846961, 210.02605133, 150.65312210],\n [211.01186324, 50.73939233, 54.55750662],\n [654.45781665, 132.73694874, 107.20009737],\n [1193.89772859, 625.60766645, 521.51066476],\n [802.65730883, 228.94887565, 178.30864097],\n [149.82853589, 44.31839648, 55.29195048],\n [80.88083928, 33.78936351, 41.73438243],\n [579.50157840, 240.80755019, 188.50864121],\n [537.09280420, 80.41714202, 48.28907694],\n [777.62363031, 205.11587061, 122.43126732],\n [292.65436510, 59.53457252, 44.27126512],\n [511.68625012, 134.76897130, 85.73242441],\n [903.64947615, 462.49015529, 350.74183199],\n [852.95457070, 291.64071698, 151.51871958],\n [1427.59841722, 907.54863477, 724.29520203],\n [527.68979414, 169.76114596, 89.48561902],\n [496.62188809, 317.11827387, 243.77642038],\n [554.39017413, 284.77453644, 181.92376325],\n [310.50669032, 96.25812545, 41.22765558],\n [1246.49891599, 522.05121993, 238.28646123],\n [240.19646249, 118.57745244, 82.68426681],\n [1005.98836135, 355.93514762, 118.60457241],\n [792.31376787, 369.56509398, 143.27388201],\n [459.04590557, 315.46594358, 215.53901098],\n [806.50918893, 352.20277469, 97.69239677],\n [1574.11778922, 1078.61331515, 697.02647383],\n [1015.45155837, 598.98507153, 301.94169280],\n [479.68722930, 242.23619637, 72.60351059],\n [1131.70538515, 628.32510627, 213.67910327],\n [185.86573238, 162.55033903, 137.59385867],\n [1131.77074807, 603.89218698, 153.83160203],\n [638.14148862, 527.18090248, 410.12394346],\n [884.58039320, 655.09236879, 329.23967927],\n [1172.73094356, 840.43080883, 380.90114088],\n [1490.24223350, 1111.18491878, 482.33357611],\n [1054.70234779, 513.29967197, 91.55980977],\n [1532.99674295, 1035.15868150, 253.21942988],\n [662.35328287, 528.52354760, 326.56458987],\n [1769.55456145, 1557.58571488, 1155.79098414],\n [1196.62083017, 1079.28012658, 888.47017893],\n [1578.73591185, 1089.40083172, 314.45691871],\n [252.98204345, 206.56788008, 153.62801631],\n [973.59975800, 714.51185344, 251.12884859],\n [1661.01720988, 1340.46809762, 619.61710815],\n [656.66179353, 566.61547800, 322.22788098],\n [676.69663303, 571.86743785, 249.62031449],\n [1229.28626315, 1020.14702709, 353.11090960],\n [390.76190378, 324.36051944, 119.31108035],\n [1524.10495708, 1366.72397704, 633.03830849],\n [1264.54750712, 1149.12002542, 335.25348483],\n [265.96753330, 260.89397210, 130.78590008],\n [90.15969432, 90.72350914, 55.12008388],\n [298.22463247, 300.48700028, 101.95760063],\n [813.34391710, 820.12623357, 313.17818415],\n [186.96402165, 190.38042094, 104.27515726],\n [230.34939258, 235.91900919, 120.77815429],\n [469.57926615, 472.51064145, 256.40912347],\n [117.81249486, 129.17019984, 69.78861213],\n [133.39581196, 151.50390168, 77.66255652],\n [164.19259747, 172.13159331, 80.92295294],\n [146.12230124, 149.32536508, 87.48300520],\n [201.93215173, 208.89885695, 111.84447436],\n [248.41427850, 282.34047722, 122.55482010],\n [304.35509339, 377.38986207, 118.66130122],\n [381.85533606, 530.40398972, 150.83506876],\n [967.19810669, 1161.33086750, 663.54746741],\n [613.98437237, 865.41677370, 362.92357557],\n [410.21304405, 611.89683658, 284.09389273],\n [279.50447144, 416.01646348, 213.18049093],\n [334.48807624, 487.46571814, 235.49134434],\n [664.04349337, 867.87454943, 549.71146455],\n [311.66934673, 431.38058636, 256.13307806],\n [110.04404638, 203.88196409, 104.63331585],\n [153.35857585, 312.67834716, 149.90942505],\n [273.46344514, 462.41992197, 292.50571823],\n [184.77058437, 267.46361125, 193.71894670],\n [75.79805899, 163.84071881, 95.67465270],\n [461.73803707, 668.68797906, 484.77687282],\n [523.01992144, 790.69326153, 598.73122243],\n [105.89414085, 124.92341127, 113.03925656],\n [279.33299507, 446.45128537, 344.73426977],\n [340.57250119, 381.28610429, 353.83182947],\n [141.00956904, 329.50139051, 228.90179483],\n [117.29728945, 156.88993944, 139.49878229],\n [565.12438106, 696.52297174, 615.88218349],\n [1046.73447319, 1446.22424473, 1277.47338963],\n [133.87404291, 253.25944193, 224.75872956],\n [586.52626500, 1015.43013448, 885.49907251],\n [927.08412116, 1197.93784752, 1140.76612264],\n [81.29463446, 202.46201173, 186.35209411],\n [350.90699453, 788.82959642, 669.10307704],\n [278.88231719, 581.42068355, 526.82554470],\n [642.66176703, 990.64038619, 907.64284280],\n [689.10344984, 942.49383066, 900.33073076],\n [190.62073977, 540.21088595, 523.62573562],\n [322.35685764, 676.02683754, 692.94583013],\n [896.29532467, 1289.90474463, 1311.34615018],\n [204.06785020, 321.83261403, 337.01923114],\n [237.10512554, 549.97044011, 646.06486244],\n [907.26703197, 1252.44260107, 1309.50173432],\n [504.74103065, 728.27088424, 782.27808125],\n [470.91049729, 912.49116456, 1059.41083523],\n [231.75497961, 539.14727494, 732.41647792],\n [624.91135978, 943.51709467, 1086.48492282],\n [104.84186738, 398.05825469, 663.96030581],\n [100.47632953, 226.41423139, 323.51675153],\n [998.19560093, 1168.81108673, 1283.07267859],\n [350.74519746, 457.74100518, 552.52270183],\n [223.19531677, 560.14850077, 855.05346039],\n [66.92044931, 128.18947830, 205.30719728],\n [280.63458798, 518.51069955, 784.38948897],\n [1071.24122457, 1267.16339790, 1467.81704311],\n [271.47257445, 553.57609491, 914.33723598],\n [211.86582477, 295.18643027, 418.51776463],\n [153.86457460, 342.06625645, 649.82579665],\n [179.59188635, 265.25370235, 413.68135787],\n [529.77485058, 737.79030218, 1046.29865466],\n [208.71936449, 421.30392624, 796.71281168],\n [685.50294808, 879.76243717, 1195.00892794],\n [85.02189613, 113.33360860, 171.03209018],\n [72.06980264, 139.42600347, 315.97906141],\n [349.57868286, 426.82308690, 556.49647978],\n [726.50329821, 882.48411184, 1163.20130103],\n [102.62158777, 177.73895468, 467.26740089],\n [208.63097281, 322.84137064, 639.30554347],\n [377.19498209, 456.13180268, 706.44272480],\n [149.91131672, 218.16462984, 455.15510078],\n [556.80606655, 673.96774240, 1020.98785748],\n [172.19546054, 181.38617476, 478.69666973],\n [494.98572332, 534.88874559, 773.75255591],\n [1166.31475206, 1207.81829513, 1411.04368728],\n [324.81131421, 298.91188334, 521.96994638],\n [731.58631467, 725.95113189, 1192.71141630],\n [376.70584074, 352.06184423, 572.37854429],\n [421.32413767, 465.07677606, 910.85999527],\n [155.65680826, 145.82096629, 282.56390371],\n [982.43736509, 991.65710582, 1312.39630323],\n [41.37244888, 33.41882583, 59.48460827],\n [282.61535563, 188.37255834, 441.62967707],\n [182.28936533, 136.29152918, 248.30801310],\n [398.28853814, 281.28601665, 641.78038278],\n [494.34030557, 393.91395210, 664.96627121],\n [579.86630787, 449.57878986, 836.64303806],\n [281.30892711, 142.60663373, 309.93723963],\n [439.97606151, 345.13329865, 425.68615785],\n [887.17712876, 583.53811414, 886.88440975],\n [841.97939219, 617.28846790, 810.67002861],\n [1280.60242984, 1139.62066080, 1255.46929276],\n [336.77846782, 246.82877629, 324.48823631],\n [1070.92080733, 527.41599474, 913.93600561],\n [676.57753460, 329.48235976, 509.56020035],\n [1353.12934453, 1048.28092139, 1227.42851889],\n [248.56120754, 78.30056642, 137.39216268],\n [675.76876164, 381.60749713, 545.08703142],\n [1008.57884369, 704.64042514, 836.94311729],\n [1207.19931876, 527.74482440, 737.30284625],\n [1157.60714894, 736.24734736, 846.01278626],\n [861.62204402, 714.70913295, 747.29294390],\n [255.83324360, 94.08214754, 147.60127564],\n [1522.93390177, 1017.14491217, 1073.23488749],\n [460.59077351, 93.73852735, 210.75844436],\n [909.87331348, 498.83253656, 750.09672276],\n ]\n ),\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n RGB_w, np.array([2.34141541, 1.00000000, 1.51633759]), decimal=7\n )\n\n training_data = sds_and_msds_to_msds(\n SDS_COLOURCHECKERS[\"BabelColor Average\"].values()\n )\n RGB, RGB_w = training_data_sds_to_RGB(\n training_data, MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS[\"D55\"]\n )\n np.testing.assert_almost_equal(\n RGB,\n np.array(\n [\n [263.80361607, 170.29412869, 132.71463416],\n [884.07936328, 628.44083126, 520.43504675],\n [324.17856150, 443.95092266, 606.43750890],\n [243.82059773, 253.22111395, 144.98600653],\n [481.54199203, 527.96925768, 764.50624747],\n [628.07015143, 979.73104655, 896.85237907],\n [927.63600544, 391.11468312, 150.73047156],\n [203.13259862, 317.65395368, 639.54581080],\n [686.28955864, 260.78688114, 254.89963998],\n [174.05857536, 132.16684952, 230.54054095],\n [806.50094411, 817.35481419, 312.91902292],\n [1111.20280010, 608.82554576, 194.31984092],\n [94.99792206, 185.04148229, 456.53592437],\n [340.60457483, 498.62910631, 254.08356415],\n [531.53679194, 136.11844274, 109.19876416],\n [1387.37113491, 952.84382040, 286.23152122],\n [681.97933172, 326.66634506, 526.23078660],\n [244.90739217, 554.88866566, 741.21522946],\n [1841.80020583, 1834.49277300, 1784.07500285],\n [1179.76201558, 1189.84138939, 1182.25520674],\n [720.27089899, 726.91855632, 724.84766858],\n [382.16849234, 387.41521539, 386.87510339],\n [178.43859184, 181.76108810, 182.71062184],\n [64.77754952, 64.80020759, 65.45515287],\n ]\n ),\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n RGB_w, np.array([2.34141541, 1.00000000, 1.51633759]), decimal=7\n )\n\n\nclass TestTrainingDataSdsToXYZ(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.training_data_sds_to_XYZ`\n definition unit tests methods.\n \"\"\"\n\n def test_training_data_sds_to_XYZ(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.training_data_sds_to_XYZ`\n definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n training_data_sds_to_XYZ(\n read_training_data_rawtoaces_v1(),\n MSDS_CMFS[\"CIE 1931 2 Degree Standard Observer\"],\n SDS_ILLUMINANTS[\"D55\"],\n ),\n np.array(\n [\n [0.01743541, 0.01795040, 0.01961110],\n [0.08556071, 0.08957352, 0.09017032],\n [0.74558770, 0.78175495, 0.78343383],\n [0.19005289, 0.19950000, 0.20126062],\n [0.56263167, 0.59145443, 0.58944868],\n [0.40708229, 0.42774653, 0.42813199],\n [0.28533739, 0.29945717, 0.29732644],\n [0.18670375, 0.19575576, 0.19612855],\n [0.10734487, 0.11290543, 0.11381239],\n [0.06188310, 0.06524694, 0.06594260],\n [0.02905436, 0.03045954, 0.03111642],\n [0.25031624, 0.22471846, 0.12599982],\n [0.20848487, 0.18072652, 0.08216289],\n [0.28173081, 0.26937432, 0.19943363],\n [0.15129458, 0.13765872, 0.08086671],\n [0.07854243, 0.07274480, 0.05123870],\n [0.46574583, 0.43948749, 0.34501135],\n [0.33111608, 0.29368033, 0.21379720],\n [0.04596029, 0.04443836, 0.03115443],\n [0.28422646, 0.15495892, 0.11586479],\n [0.47490187, 0.41497780, 0.33505853],\n [0.29452546, 0.20003225, 0.13705453],\n [0.06905269, 0.04421818, 0.03449201],\n [0.13040440, 0.06239791, 0.04175606],\n [0.43838730, 0.29962261, 0.18439668],\n [0.13390118, 0.08356608, 0.04956679],\n [0.08356733, 0.05794634, 0.03910007],\n [0.21637988, 0.12469189, 0.04842559],\n [0.37899204, 0.22130821, 0.07365608],\n [0.07733610, 0.04256869, 0.02300063],\n [0.25696432, 0.14119282, 0.04740500],\n [0.51960474, 0.41409496, 0.25643556],\n [0.32241564, 0.19954021, 0.08051276],\n [0.05811798, 0.03389661, 0.02553745],\n [0.03192572, 0.02139972, 0.01894908],\n [0.24605476, 0.17854962, 0.09147038],\n [0.20624731, 0.10555152, 0.01675508],\n [0.31255107, 0.19334840, 0.05143990],\n [0.11006219, 0.06057155, 0.01700794],\n [0.20509764, 0.12555310, 0.03594860],\n [0.38058683, 0.30396093, 0.16256996],\n [0.34354473, 0.23964048, 0.06111316],\n [0.62251344, 0.54770879, 0.34634977],\n [0.21294652, 0.14470338, 0.03492000],\n [0.22064317, 0.19656587, 0.11907643],\n [0.23955073, 0.19768225, 0.08595970],\n [0.12377361, 0.08353105, 0.01434151],\n [0.52378659, 0.40757502, 0.10242337],\n [0.09732322, 0.07735501, 0.03254246],\n [0.41081884, 0.30127969, 0.04240016],\n [0.32946008, 0.27129095, 0.05232655],\n [0.19870991, 0.18701769, 0.09764509],\n [0.31867743, 0.25717029, 0.02158054],\n [0.67745549, 0.64283785, 0.31268426],\n [0.43182429, 0.39425828, 0.13198410],\n [0.19075096, 0.16573196, 0.01845293],\n [0.47578930, 0.43714747, 0.07974541],\n [0.08420865, 0.08615579, 0.06605406],\n [0.47306132, 0.43488423, 0.05262924],\n [0.28242654, 0.28638349, 0.19186089],\n [0.37367384, 0.38524079, 0.13498637],\n [0.49536547, 0.51027091, 0.15645211],\n [0.63680942, 0.67272132, 0.19642820],\n [0.43790684, 0.39093965, 0.02518505],\n [0.63216527, 0.66425603, 0.07124985],\n [0.28682848, 0.29807036, 0.14308787],\n [0.78666095, 0.83181391, 0.53110094],\n [0.54475049, 0.57280425, 0.43240766],\n [0.65555915, 0.68992930, 0.10030198],\n [0.10560623, 0.10992647, 0.06863885],\n [0.40588908, 0.43345904, 0.08589490],\n [0.69824760, 0.76446843, 0.23843395],\n [0.27951451, 0.30869595, 0.13310650],\n [0.28351930, 0.32278417, 0.09130925],\n [0.51144946, 0.58985649, 0.11409286],\n [0.16769668, 0.19357639, 0.04824163],\n [0.64027510, 0.74864980, 0.24145602],\n [0.51533750, 0.64418491, 0.09390029],\n [0.10903312, 0.13420204, 0.04403235],\n [0.03916991, 0.04755109, 0.02410291],\n [0.12726285, 0.16825903, 0.03705646],\n [0.34079923, 0.44119883, 0.10621489],\n [0.08299513, 0.10226271, 0.04607974],\n [0.10117617, 0.12690940, 0.05211600],\n [0.20673305, 0.25456362, 0.11244267],\n [0.05040081, 0.06702198, 0.02944861],\n [0.05809758, 0.07896803, 0.03312583],\n [0.07202711, 0.09383365, 0.03453490],\n [0.06392748, 0.07896740, 0.03860393],\n [0.08851258, 0.11174080, 0.04873213],\n [0.09821259, 0.13743849, 0.03901353],\n [0.12253000, 0.18989034, 0.03327101],\n [0.15082798, 0.25948217, 0.03805919],\n [0.41476613, 0.56455709, 0.26988900],\n [0.25043710, 0.40869656, 0.12211755],\n [0.17536685, 0.28765326, 0.10166502],\n [0.12038544, 0.19242328, 0.07754636],\n [0.14661345, 0.23524743, 0.09334793],\n [0.29469553, 0.41056592, 0.23093160],\n [0.13015693, 0.19492122, 0.09333495],\n [0.04081181, 0.08280292, 0.03122401],\n [0.06569736, 0.13553353, 0.05266408],\n [0.12177383, 0.20160583, 0.11621774],\n [0.08354206, 0.11970984, 0.08207175],\n [0.02834645, 0.06259404, 0.03135058],\n [0.20884161, 0.29927365, 0.20553553],\n [0.23180119, 0.33870071, 0.24267407],\n [0.04413521, 0.05398934, 0.04862030],\n [0.13068910, 0.19470885, 0.15073584],\n [0.16108644, 0.18484544, 0.17474649],\n [0.06206737, 0.12873462, 0.09368693],\n [0.05126858, 0.06722639, 0.05961970],\n [0.25534374, 0.31335090, 0.27780291],\n [0.48369629, 0.63319069, 0.57347864],\n [0.06066266, 0.09712274, 0.09253437],\n [0.27940216, 0.41909220, 0.39351159],\n [0.44664100, 0.54665344, 0.55342931],\n [0.03590889, 0.06959304, 0.07535965],\n [0.16621092, 0.30339106, 0.29722885],\n [0.12909138, 0.22008859, 0.22690521],\n [0.31015553, 0.42498221, 0.42044232],\n [0.33970423, 0.42779997, 0.43883150],\n [0.10000582, 0.19440825, 0.23393750],\n [0.16694758, 0.26056864, 0.32541934],\n [0.43598087, 0.55484571, 0.63089871],\n [0.10305166, 0.13633951, 0.16650820],\n [0.12725465, 0.19404057, 0.30068226],\n [0.44450660, 0.54666776, 0.64220554],\n [0.25312549, 0.31346831, 0.38485942],\n [0.24557618, 0.34698805, 0.51328941],\n [0.13585660, 0.18761687, 0.36302217],\n [0.32288492, 0.39652004, 0.54579104],\n [0.08400465, 0.11889755, 0.34519851],\n [0.06038029, 0.07936884, 0.16393180],\n [0.47840043, 0.53070661, 0.64043584],\n [0.16727376, 0.19048161, 0.27055547],\n [0.14740952, 0.19227205, 0.44545300],\n [0.03953792, 0.04540593, 0.10766386],\n [0.16200092, 0.18995251, 0.41003367],\n [0.53147895, 0.57554326, 0.74787983],\n [0.17107460, 0.19285623, 0.48157477],\n [0.11394187, 0.12139868, 0.21928748],\n [0.10838799, 0.11193347, 0.34884682],\n [0.10390937, 0.10854555, 0.22459293],\n [0.28493924, 0.30349174, 0.54832107],\n [0.13572090, 0.13988801, 0.43412229],\n [0.36141619, 0.37929776, 0.62919317],\n [0.04527113, 0.04612919, 0.09028801],\n [0.05164102, 0.04505136, 0.17732932],\n [0.18148861, 0.19085005, 0.29528314],\n [0.37792382, 0.39238764, 0.61357669],\n [0.08148672, 0.06054619, 0.27321036],\n [0.13431208, 0.12118937, 0.35762939],\n [0.19932157, 0.19328547, 0.37878896],\n [0.09456787, 0.08094285, 0.25785832],\n [0.29868476, 0.28967149, 0.54786550],\n [0.09582629, 0.06156148, 0.27163852],\n [0.25053785, 0.23630807, 0.40751054],\n [0.56821117, 0.57452018, 0.72419232],\n [0.16116009, 0.13379410, 0.28760107],\n [0.37816205, 0.32564214, 0.64945876],\n [0.19440630, 0.16599850, 0.31684298],\n [0.24229817, 0.19698372, 0.51538353],\n [0.08104904, 0.06295569, 0.15738669],\n [0.48808364, 0.46372832, 0.69336648],\n [0.01983575, 0.01538929, 0.03252398],\n [0.13468770, 0.08473328, 0.25136965],\n [0.08762890, 0.06560340, 0.13804375],\n [0.20192043, 0.12939477, 0.36343630],\n [0.24231283, 0.19018859, 0.36604686],\n [0.28784724, 0.21105155, 0.46114703],\n [0.12549222, 0.07471177, 0.17126268],\n [0.20910983, 0.18235419, 0.22475458],\n [0.43032307, 0.32727171, 0.49574549],\n [0.39105442, 0.32475758, 0.42885925],\n [0.60567491, 0.57928897, 0.64030251],\n [0.15645417, 0.12986348, 0.17171885],\n [0.50025055, 0.32646202, 0.51899239],\n [0.29822363, 0.19839451, 0.27397060],\n [0.63136923, 0.55375993, 0.63816664],\n [0.10261977, 0.05754107, 0.07473368],\n [0.30325538, 0.21976283, 0.29171854],\n [0.46794841, 0.39368920, 0.44286306],\n [0.54326558, 0.36319029, 0.41127862],\n [0.52355493, 0.42261205, 0.43529051],\n [0.39852212, 0.37568122, 0.37825751],\n [0.10892106, 0.06698290, 0.07939788],\n [0.68780223, 0.58022018, 0.54422258],\n [0.18984448, 0.09051898, 0.12104133],\n [0.41991006, 0.29457037, 0.40780639],\n ]\n ),\n decimal=7,\n )\n\n training_data = sds_and_msds_to_msds(\n SDS_COLOURCHECKERS[\"BabelColor Average\"].values()\n )\n\n np.testing.assert_almost_equal(\n training_data_sds_to_XYZ(\n training_data,\n MSDS_CMFS[\"CIE 1931 2 Degree Standard Observer\"],\n SDS_ILLUMINANTS[\"D55\"],\n ),\n np.array(\n [\n [0.11386016, 0.10184316, 0.06318332],\n [0.38043230, 0.34842093, 0.23582246],\n [0.17359019, 0.18707491, 0.31848244],\n [0.10647823, 0.13300376, 0.06486355],\n [0.24658643, 0.23417740, 0.40546447],\n [0.30550003, 0.42171110, 0.41928361],\n [0.38409200, 0.30325611, 0.05955461],\n [0.13149767, 0.11720378, 0.35673016],\n [0.28717811, 0.19215580, 0.12514286],\n [0.08401031, 0.06423349, 0.12782115],\n [0.33990604, 0.44124555, 0.10834694],\n [0.46443889, 0.42686462, 0.07340585],\n [0.07650085, 0.06051409, 0.26167301],\n [0.14598990, 0.23185071, 0.09380297],\n [0.20642710, 0.12162691, 0.04673088],\n [0.57371755, 0.59896814, 0.08930486],\n [0.30208819, 0.19714705, 0.28492050],\n [0.14184323, 0.19554336, 0.36653731],\n [0.86547610, 0.91241348, 0.88583082],\n [0.55802432, 0.58852191, 0.59042758],\n [0.34102067, 0.35951875, 0.36251375],\n [0.18104441, 0.19123509, 0.19353380],\n [0.08461047, 0.08944605, 0.09150081],\n [0.03058273, 0.03200953, 0.03277947],\n ]\n ),\n decimal=7,\n )\n\n np.testing.assert_almost_equal(\n training_data_sds_to_XYZ(\n training_data,\n MSDS_CMFS[\"CIE 1931 2 Degree Standard Observer\"],\n SDS_ILLUMINANTS[\"D55\"],\n chromatic_adaptation_transform=\"Bradford\",\n ),\n np.array(\n [\n [0.11386557, 0.10185906, 0.06306965],\n [0.38044920, 0.34846911, 0.23548776],\n [0.17349711, 0.18690409, 0.31901794],\n [0.10656174, 0.13314825, 0.06450454],\n [0.24642109, 0.23388536, 0.40625776],\n [0.30564803, 0.42194543, 0.41894818],\n [0.38414010, 0.30337780, 0.05881558],\n [0.13128440, 0.11682332, 0.35780551],\n [0.28707604, 0.19200780, 0.12518610],\n [0.08392779, 0.06409174, 0.12816180],\n [0.34028525, 0.44190577, 0.10665985],\n [0.46462806, 0.42722924, 0.07207641],\n [0.07631823, 0.06018898, 0.26258457],\n [0.14620929, 0.23222248, 0.09296807],\n [0.20635082, 0.12152088, 0.04669974],\n [0.57410962, 0.59968182, 0.08713069],\n [0.30185180, 0.19675858, 0.28565273],\n [0.14177898, 0.19541060, 0.36711242],\n [0.86550834, 0.91247072, 0.88567193],\n [0.55803077, 0.58853268, 0.59040518],\n [0.34102300, 0.35952246, 0.36250826],\n [0.18104563, 0.19123690, 0.19353274],\n [0.08461039, 0.08944568, 0.09150425],\n [0.03058222, 0.03200864, 0.03278183],\n ]\n ),\n decimal=7,\n )\n\n\nclass TestOptimizationFactoryRawtoacesV1(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.\\\noptimisation_factory_rawtoaces_v1` definition unit tests methods.\n \"\"\"\n\n def test_optimisation_factory_rawtoaces_v1(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.\\\noptimisation_factory_rawtoaces_v1` definition.\n \"\"\"\n\n self.assertEqual(len(optimisation_factory_rawtoaces_v1()), 2)\n\n\nclass TestOptimizationFactoryJzazbz(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.\\\noptimisation_factory_Jzazbz` definition unit tests methods.\n \"\"\"\n\n def test_optimisation_factory_Jzazbz(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.\\\noptimisation_factory_Jzazbz` definition.\n \"\"\"\n\n self.assertEqual(len(optimisation_factory_Jzazbz()), 2)\n\n\nclass TestMatrixIdt(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.matrix_idt`\n definition unit tests methods.\n \"\"\"\n\n def test_matrix_idt(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.matrix_idt`\n definition.\n \"\"\"\n\n # The *RAW to ACES* v1 matrix for the same camera and optimized by\n # `Ceres Solver <http://ceres-solver.org/>`__ is as follows:\n #\n # 0.864994 -0.026302 0.161308\n # 0.056527 1.122997 -0.179524\n # 0.023683 -0.202547 1.178864\n np.testing.assert_allclose(\n matrix_idt(MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS[\"D55\"])[0],\n np.array(\n [\n [0.84993207, -0.01605594, 0.15143504],\n [0.05090392, 1.12559930, -0.18498249],\n [0.02006825, -0.19445149, 1.16206549],\n ]\n ),\n rtol=0.0001,\n atol=0.0001,\n )\n\n # The *RAW to ACES* v1 matrix for the same camera and optimized by\n # `Ceres Solver <http://ceres-solver.org/>`__ is as follows:\n #\n # 0.888492 -0.077505 0.189014\n # 0.021805 1.066614 -0.088418\n # -0.019718 -0.206664 1.226381\n np.testing.assert_allclose(\n matrix_idt(\n MSDS_CANON_EOS_5DMARK_II, SD_AMPAS_ISO7589_STUDIO_TUNGSTEN\n )[0],\n np.array(\n [\n [0.85895300, -0.04381920, 0.15978620],\n [0.01024800, 1.08825364, -0.11392229],\n [-0.02327674, -0.18044292, 1.15903609],\n ]\n ),\n rtol=0.0001,\n atol=0.0001,\n )\n\n M, RGB_w = matrix_idt(\n MSDS_CANON_EOS_5DMARK_II,\n SDS_ILLUMINANTS[\"D55\"],\n optimisation_factory=optimisation_factory_Jzazbz,\n )\n np.testing.assert_allclose(\n M,\n np.array(\n [\n [0.84841492, -0.01569765, 0.15799332],\n [0.05333075, 1.11428542, -0.17523500],\n [0.02262287, -0.22527728, 1.19646895],\n ]\n ),\n rtol=0.0001,\n atol=0.0001,\n )\n np.testing.assert_allclose(\n RGB_w,\n np.array([2.34141541, 1.00000000, 1.51633759]),\n rtol=0.0001,\n atol=0.0001,\n )\n\n M, RGB_w = matrix_idt(\n MSDS_CANON_EOS_5DMARK_II,\n SDS_ILLUMINANTS[\"D55\"],\n optimisation_kwargs={\"method\": \"Nelder-Mead\"},\n )\n np.testing.assert_allclose(\n M,\n np.array(\n [\n [0.71327381, 0.19213397, 0.11115511],\n [-0.05788252, 1.31165598, -0.21730625],\n [-0.05913103, -0.02787107, 1.10737947],\n ]\n ),\n rtol=0.0001,\n atol=0.0001,\n )\n np.testing.assert_allclose(\n RGB_w,\n np.array([2.34141541, 1.00000000, 1.51633759]),\n rtol=0.0001,\n atol=0.0001,\n )\n\n training_data = sds_and_msds_to_msds(\n SDS_COLOURCHECKERS[\"BabelColor Average\"].values()\n )\n\n # pylint: disable=E1102\n np.testing.assert_allclose(\n matrix_idt(\n reshape_msds(\n MSDS_CAMERA_SENSITIVITIES[\"Nikon 5100 (NPL)\"],\n SpectralShape(400, 700, 10),\n ),\n SD_AMPAS_ISO7589_STUDIO_TUNGSTEN,\n training_data=training_data,\n )[0],\n np.array(\n [\n [0.74041064, 0.10951105, 0.11963256],\n [-0.00467360, 1.09238438, -0.11398966],\n [0.06728533, -0.29530438, 1.18589793],\n ]\n ),\n rtol=0.0001,\n atol=0.0001,\n )\n\n np.testing.assert_allclose(\n matrix_idt(\n MSDS_CANON_EOS_5DMARK_II,\n SDS_ILLUMINANTS[\"D55\"],\n chromatic_adaptation_transform=\"Bradford\",\n )[0],\n np.array(\n [\n [0.85020607, -0.01371074, 0.14907913],\n [0.05074081, 1.12898863, -0.18800656],\n [0.02095822, -0.20110079, 1.16769711],\n ]\n ),\n rtol=0.0001,\n atol=0.0001,\n )\n\n _M, RGB_w, XYZ, RGB = matrix_idt(\n MSDS_CANON_EOS_5DMARK_II,\n SDS_ILLUMINANTS[\"D55\"],\n additional_data=True,\n )\n\n np.testing.assert_almost_equal(\n RGB_w, np.array([2.34141541, 1.00000000, 1.51633759])\n )\n\n np.testing.assert_almost_equal(\n XYZ[:5, ...],\n np.array(\n [\n [0.01743160, 0.01794927, 0.01960625],\n [0.08556139, 0.08957352, 0.09017387],\n [0.74560311, 0.78175547, 0.78350814],\n [0.19005289, 0.19950000, 0.20126062],\n [0.56264334, 0.59145486, 0.58950505],\n ]\n ),\n )\n\n np.testing.assert_almost_equal(\n RGB[:5, ...],\n np.array(\n [\n [0.02075823, 0.01968577, 0.02139352],\n [0.08957758, 0.08919227, 0.08910910],\n [0.78102307, 0.78019384, 0.77643020],\n [0.19950000, 0.19950000, 0.19950000],\n [0.58984787, 0.59040152, 0.58510766],\n ]\n ),\n )\n\n\nclass TestCamera_RGB_to_ACES2065_1(unittest.TestCase):\n \"\"\"\n Define :func:`colour.characterisation.aces_it.\\\ncamera_RGB_to_ACES2065_1` definition unit tests methods.\n \"\"\"\n\n def test_camera_RGB_to_ACES2065_1(self):\n \"\"\"\n Test :func:`colour.characterisation.aces_it.\\\ncamera_RGB_to_ACES2065_1` definition.\n \"\"\"\n\n B, b = matrix_idt(MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS[\"D55\"])\n np.testing.assert_almost_equal(\n camera_RGB_to_ACES2065_1(np.array([0.1, 0.2, 0.3]), B, b),\n np.array([0.26468115, 0.15288980, 0.49443355]),\n )\n\n np.testing.assert_almost_equal(\n camera_RGB_to_ACES2065_1(np.array([1.5, 1.5, 1.5]), B, b),\n np.array([3.30542136, 1.44643555, 2.42192985]),\n )\n\n np.testing.assert_almost_equal(\n camera_RGB_to_ACES2065_1(np.array([1.0, 1.0, 1.0]), B, b, True),\n np.array([2.20361424, 0.96429036, 1.61461990]),\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"\nOptical Society of America Uniform Colour Scales (OSA UCS)\n==========================================================\n\nDefines the *OSA UCS* colourspace:\n\n- :func:`colour.XYZ_to_OSA_UCS`\n- :func:`colour.OSA_UCS_to_XYZ`\n\nReferences\n----------\n- :cite:`Cao2013` : Cao, R., Trussell, H. J., & Shamey, R. (2013). Comparison\n of the performance of inverse transformation methods from OSA-UCS to\n CIEXYZ. Journal of the Optical Society of America A, 30(8), 1508.\n doi:10.1364/JOSAA.30.001508\n- :cite:`Moroney2003` : Moroney, N. (2003). A Radial Sampling of the OSA\n Uniform Color Scales. Color and Imaging Conference, 2003(1), 175-180.\n ISSN:2166-9635\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\nfrom scipy.optimize import fmin\n\nfrom colour.algebra import spow, vector_dot\nfrom colour.hints import ArrayLike, Dict, FloatingOrNDArray, NDArray, Optional\nfrom colour.models import XYZ_to_xyY\nfrom colour.utilities import (\n as_float,\n as_float_array,\n domain_range_scale,\n from_range_100,\n to_domain_100,\n tsplit,\n tstack,\n)\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"XYZ_to_OSA_UCS\",\n \"OSA_UCS_to_XYZ\",\n]\n\nMATRIX_XYZ_TO_RGB_OSA_UCS: NDArray = np.array(\n [\n [0.799, 0.4194, -0.1648],\n [-0.4493, 1.3265, 0.0927],\n [-0.1149, 0.3394, 0.717],\n ]\n)\n\"\"\"\n*OSA UCS* matrix converting from *CIE XYZ* tristimulus values to *RGB*\ncolourspace.\n\"\"\"\n\n\ndef XYZ_to_OSA_UCS(XYZ: ArrayLike) -> NDArray:\n \"\"\"\n Convert from *CIE XYZ* tristimulus values under the\n *CIE 1964 10 Degree Standard Observer* to *OSA UCS* colourspace.\n\n The lightness axis, *L* is usually in range [-9, 5] and centered around\n middle gray (Munsell N/6). The yellow-blue axis, *j* is usually in range\n [-15, 15]. The red-green axis, *g* is usually in range [-20, 15].\n\n Parameters\n ----------\n XYZ\n *CIE XYZ* tristimulus values under the\n *CIE 1964 10 Degree Standard Observer*.\n\n Returns\n -------\n :class:`numpy.ndarray`\n *OSA UCS* :math:`Ljg` lightness, jaune (yellowness), and greenness.\n\n Notes\n -----\n +------------+-----------------------+--------------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+====================+\n | ``XYZ`` | [0, 100] | [0, 1] |\n +------------+-----------------------+--------------------+\n\n +------------+-----------------------+--------------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+====================+\n | ``Ljg`` | ``L`` : [-100, 100] | ``L`` : [-1, 1] |\n | | | |\n | | ``j`` : [-100, 100] | ``j`` : [-1, 1] |\n | | | |\n | | ``g`` : [-100, 100] | ``g`` : [-1, 1] |\n +------------+-----------------------+--------------------+\n\n - *OSA UCS* uses the *CIE 1964 10 Degree Standard Observer*.\n\n References\n ----------\n :cite:`Cao2013`, :cite:`Moroney2003`\n\n Examples\n --------\n >>> import numpy as np\n >>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) * 100\n >>> XYZ_to_OSA_UCS(XYZ) # doctest: +ELLIPSIS\n array([-3.0049979..., 2.9971369..., -9.6678423...])\n \"\"\"\n\n XYZ = to_domain_100(XYZ)\n x, y, Y = tsplit(XYZ_to_xyY(XYZ))\n\n Y_0 = Y * (\n 4.4934 * x**2\n + 4.3034 * y**2\n - 4.276 * x * y\n - 1.3744 * x\n - 2.5643 * y\n + 1.8103\n )\n\n o_3 = 1 / 3\n Y_0_es = spow(Y_0, o_3) - 2 / 3\n # Gracefully handles Y_0 < 30.\n Y_0_s = Y_0 - 30\n Lambda = 5.9 * (Y_0_es + 0.042 * spow(Y_0_s, o_3))\n\n RGB = vector_dot(MATRIX_XYZ_TO_RGB_OSA_UCS, XYZ)\n RGB_3 = spow(RGB, 1 / 3)\n\n C = Lambda / (5.9 * Y_0_es)\n L = (Lambda - 14.4) / spow(2, 1 / 2)\n j = C * np.dot(RGB_3, np.array([1.7, 8, -9.7]))\n g = C * np.dot(RGB_3, np.array([-13.7, 17.7, -4]))\n\n Ljg = tstack([L, j, g])\n\n return from_range_100(Ljg)\n\n\ndef OSA_UCS_to_XYZ(\n Ljg: ArrayLike, optimisation_kwargs: Optional[Dict] = None\n) -> NDArray:\n \"\"\"\n Convert from *OSA UCS* colourspace to *CIE XYZ* tristimulus values under\n the *CIE 1964 10 Degree Standard Observer*.\n\n Parameters\n ----------\n Ljg\n *OSA UCS* :math:`Ljg` lightness, jaune (yellowness), and greenness.\n optimisation_kwargs\n Parameters for :func:`scipy.optimize.fmin` definition.\n\n Returns\n -------\n :class:`numpy.ndarray`\n *CIE XYZ* tristimulus values under the\n *CIE 1964 10 Degree Standard Observer*.\n\n Warnings\n --------\n There is no analytical inverse transformation from *OSA UCS* to :math:`Ljg`\n lightness, jaune (yellowness), and greenness to *CIE XYZ* tristimulus\n values, the current implementation relies on optimization using\n :func:`scipy.optimize.fmin` definition and thus has reduced precision and\n poor performance.\n\n Notes\n -----\n +------------+-----------------------+--------------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+====================+\n | ``Ljg`` | ``L`` : [-100, 100] | ``L`` : [-1, 1] |\n | | | |\n | | ``j`` : [-100, 100] | ``j`` : [-1, 1] |\n | | | |\n | | ``g`` : [-100, 100] | ``g`` : [-1, 1] |\n +------------+-----------------------+--------------------+\n\n +------------+-----------------------+--------------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+====================+\n | ``XYZ`` | [0, 100] | [0, 1] |\n +------------+-----------------------+--------------------+\n\n - *OSA UCS* uses the *CIE 1964 10 Degree Standard Observer*.\n\n References\n ----------\n :cite:`Cao2013`, :cite:`Moroney2003`\n\n Examples\n --------\n >>> import numpy as np\n >>> Ljg = np.array([-3.00499790, 2.99713697, -9.66784231])\n >>> OSA_UCS_to_XYZ(Ljg) # doctest: +ELLIPSIS\n array([ 20.6540240..., 12.1972369..., 5.1369372...])\n \"\"\"\n\n Ljg = to_domain_100(Ljg)\n shape = Ljg.shape\n Ljg = np.atleast_1d(Ljg.reshape([-1, 3]))\n\n optimisation_settings = {\"disp\": False}\n if optimisation_kwargs is not None:\n optimisation_settings.update(optimisation_kwargs)\n\n def error_function(XYZ: ArrayLike, Ljg: ArrayLike) -> FloatingOrNDArray:\n \"\"\"Error function.\"\"\"\n\n # Error must be computed in \"reference\" domain and range.\n with domain_range_scale(\"ignore\"):\n error = np.linalg.norm(XYZ_to_OSA_UCS(XYZ) - as_float_array(Ljg))\n\n return as_float(error)\n\n x_0 = np.array([30, 30, 30])\n XYZ = as_float_array(\n [\n fmin(error_function, x_0, (Ljg_i,), **optimisation_settings)\n for Ljg_i in as_float_array(Ljg)\n ]\n )\n\n return from_range_100(XYZ.reshape(shape))\n", "# !/usr/bin/env python\n\"\"\"Defines the unit tests for the :mod:`colour.appearance.rlab` module.\"\"\"\n\nimport numpy as np\nimport unittest\nfrom itertools import permutations\n\nfrom colour.appearance import (\n D_FACTOR_RLAB,\n VIEWING_CONDITIONS_RLAB,\n XYZ_to_RLAB,\n)\nfrom colour.utilities import (\n as_float_array,\n domain_range_scale,\n ignore_numpy_errors,\n)\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"TestXYZ_to_RLAB\",\n]\n\n\nclass TestXYZ_to_RLAB(unittest.TestCase):\n \"\"\"\n Define :func:`colour.appearance.rlab.XYZ_to_RLAB` definition unit\n tests methods.\n \"\"\"\n\n def test_XYZ_to_RLAB(self):\n \"\"\"\n Test :func:`colour.appearance.rlab.XYZ_to_RLAB` definition.\n\n Notes\n -----\n - The test values have been generated from data of the following file\n by *Fairchild (2013)*:\n http://rit-mcsl.org/fairchild//files/AppModEx.xls\n \"\"\"\n\n XYZ = np.array([19.01, 20.00, 21.78])\n XYZ_n = np.array([95.05, 100.00, 108.88])\n Y_n = 318.31\n sigma = 0.4347\n np.testing.assert_allclose(\n XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma),\n np.array([49.67, 0.01, 270, 0, np.nan, 0, -0.01]),\n rtol=0.01,\n atol=0.01,\n )\n\n XYZ = np.array([57.06, 43.06, 31.96])\n Y_n = 31.83\n np.testing.assert_allclose(\n XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma),\n np.array([69.33, 49.74, 21.3, 0.72, np.nan, 46.33, 18.09]),\n rtol=0.01,\n atol=0.01,\n )\n\n XYZ = np.array([3.53, 6.56, 2.14])\n XYZ_n = np.array([109.85, 100.00, 35.58])\n Y_n = 318.31\n np.testing.assert_allclose(\n XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma),\n np.array([30.78, 41.02, 176.9, 1.33, np.nan, -40.96, 2.25]),\n rtol=0.01,\n atol=0.01,\n )\n\n XYZ = np.array([19.01, 20.00, 21.78])\n Y_n = 31.83\n np.testing.assert_allclose(\n XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma),\n np.array([49.83, 54.87, 286.5, 1.1, np.nan, 15.57, -52.61]),\n rtol=0.01,\n atol=0.01,\n )\n\n def test_n_dimensional_XYZ_to_RLAB(self):\n \"\"\"\n Test :func:`colour.appearance.rlab.XYZ_to_RLAB` definition\n n-dimensional support.\n \"\"\"\n\n XYZ = np.array([19.01, 20.00, 21.78])\n XYZ_n = np.array([95.05, 100.00, 108.88])\n Y_n = 318.31\n sigma = 0.4347\n specification = XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma)\n\n XYZ = np.tile(XYZ, (6, 1))\n specification = np.tile(specification, (6, 1))\n np.testing.assert_almost_equal(\n XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma), specification, decimal=7\n )\n\n XYZ_n = np.tile(XYZ_n, (6, 1))\n np.testing.assert_almost_equal(\n XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma), specification, decimal=7\n )\n\n XYZ = np.reshape(XYZ, (2, 3, 3))\n XYZ_n = np.reshape(XYZ_n, (2, 3, 3))\n specification = np.reshape(specification, (2, 3, 7))\n np.testing.assert_almost_equal(\n XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma), specification, decimal=7\n )\n\n def test_domain_range_scale_XYZ_to_RLAB(self):\n \"\"\"\n Test :func:`colour.appearance.rlab.XYZ_to_RLAB` definition domain and\n range scale support.\n \"\"\"\n\n XYZ = np.array([19.01, 20.00, 21.78])\n XYZ_n = np.array([109.85, 100, 35.58])\n Y_n = 31.83\n sigma = VIEWING_CONDITIONS_RLAB[\"Average\"]\n D = D_FACTOR_RLAB[\"Hard Copy Images\"]\n specification = XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma, D)\n\n d_r = (\n (\"reference\", 1, 1),\n (\"1\", 0.01, np.array([1, 1, 1 / 360, 1, np.nan, 1, 1])),\n (\"100\", 1, np.array([1, 1, 100 / 360, 1, np.nan, 1, 1])),\n )\n for scale, factor_a, factor_b in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n XYZ_to_RLAB(\n XYZ * factor_a, XYZ_n * factor_a, Y_n, sigma, D\n ),\n as_float_array(specification) * factor_b,\n decimal=7,\n )\n\n @ignore_numpy_errors\n def test_nan_XYZ_to_RLAB(self):\n \"\"\"\n Test :func:`colour.appearance.rlab.XYZ_to_RLAB` definition nan\n support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n XYZ = np.array(case)\n XYZ_n = np.array(case)\n Y_n = case[0]\n sigma = case[0]\n D = case[0]\n XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma, D)\n", "\"\"\"Showcases Cyan-Magenta-Yellow (Black) (CMY(K)) colour transformations.\"\"\"\n\nimport numpy as np\n\nimport colour\nfrom colour.utilities import message_box\n\nmessage_box(\"Cyan-Magenta-Yellow (Black) (CMY(K)) Colour Transformations\")\n\nRGB = np.array([0.45620519, 0.03081071, 0.04091952])\nmessage_box(\n f'Converting to the \"CMY\" colourspace from given \"RGB\" colourspace '\n f\"values:\\n\\n\\t{RGB}\"\n)\nprint(colour.RGB_to_CMY(RGB))\n\nprint(\"\\n\")\n\nCMY = np.array([0.54379481, 0.96918929, 0.95908048])\nmessage_box(\n f'Converting to the \"RGB\" colourspace from given \"CMY\" colourspace '\n f\"values:\\n\\n\\t{CMY}\"\n)\nprint(colour.CMY_to_RGB(CMY))\n\nprint(\"\\n\")\n\nmessage_box(\n f'Converting to the \"CMYK\" colourspace from given \"CMY\" colourspace '\n f\"values:\\n\\n\\t{CMY}\"\n)\nprint(colour.CMY_to_CMYK(CMY))\n\nprint(\"\\n\")\n\nCMYK = np.array([0.00000000, 0.93246304, 0.91030457, 0.54379481])\nmessage_box(\n f'Converting to the \"CMY\" colourspace from given \"CMYK\" colourspace '\n f\"values:\\n\\n\\t{CMYK}\"\n)\nprint(colour.CMYK_to_CMY(CMYK))\n", "\"\"\"\nColour Rendering Index\n======================\n\nDefines the *Colour Rendering Index* (CRI) computation objects:\n\n- :class:`colour.quality.ColourRendering_Specification_CRI`\n- :func:`colour.colour_rendering_index`\n\nReferences\n----------\n- :cite:`Ohno2008a` : Ohno, Yoshiro, & Davis, W. (2008). NIST CQS simulation\n (Version 7.4) [Computer software].\n https://drive.google.com/file/d/1PsuU6QjUJjCX6tQyCud6ul2Tbs8rYWW9/view?\\\nusp=sharing\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\nfrom dataclasses import dataclass\n\nfrom colour.algebra import euclidean_distance, spow\nfrom colour.colorimetry import (\n MSDS_CMFS,\n MultiSpectralDistributions,\n SPECTRAL_SHAPE_DEFAULT,\n SpectralDistribution,\n reshape_msds,\n reshape_sd,\n sd_CIE_illuminant_D_series,\n sd_blackbody,\n sd_to_XYZ,\n)\nfrom colour.hints import (\n Boolean,\n Dict,\n Floating,\n FloatingOrNDArray,\n Integer,\n NDArray,\n Tuple,\n Union,\n)\nfrom colour.models import UCS_to_uv, XYZ_to_UCS, XYZ_to_xyY\nfrom colour.quality.datasets.tcs import INDEXES_TO_NAMES_TCS, SDS_TCS\nfrom colour.temperature import CCT_to_xy_CIE_D, uv_to_CCT_Robertson1968\nfrom colour.utilities import domain_range_scale, as_float_scalar\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"TCS_ColorimetryData\",\n \"TCS_ColourQualityScaleData\",\n \"ColourRendering_Specification_CRI\",\n \"colour_rendering_index\",\n \"tcs_colorimetry_data\",\n \"colour_rendering_indexes\",\n]\n\n\n@dataclass\nclass TCS_ColorimetryData:\n \"\"\"Define the class storing *test colour samples* colorimetry data.\"\"\"\n\n name: str\n XYZ: NDArray\n uv: NDArray\n UVW: NDArray\n\n\n@dataclass\nclass TCS_ColourQualityScaleData:\n \"\"\"\n Define the class storing *test colour samples* colour rendering\n index data.\n \"\"\"\n\n name: str\n Q_a: Floating\n\n\n@dataclass()\nclass ColourRendering_Specification_CRI:\n \"\"\"\n Define the *Colour Rendering Index* (CRI) colour quality specification.\n\n Parameters\n ----------\n name\n Name of the test spectral distribution.\n Q_a\n *Colour Rendering Index* (CRI) :math:`Q_a`.\n Q_as\n Individual *colour rendering indexes* data for each sample.\n colorimetry_data\n Colorimetry data for the test and reference computations.\n\n References\n ----------\n :cite:`Ohno2008a`\n \"\"\"\n\n name: str\n Q_a: Floating\n Q_as: Dict[Integer, TCS_ColourQualityScaleData]\n colorimetry_data: Tuple[\n Tuple[TCS_ColorimetryData, ...], Tuple[TCS_ColorimetryData, ...]\n ]\n\n\ndef colour_rendering_index(\n sd_test: SpectralDistribution, additional_data: Boolean = False\n) -> Union[Floating, ColourRendering_Specification_CRI]:\n \"\"\"\n Return the *Colour Rendering Index* (CRI) :math:`Q_a` of given spectral\n distribution.\n\n Parameters\n ----------\n sd_test\n Test spectral distribution.\n additional_data\n Whether to output additional data.\n\n Returns\n -------\n :class:`numpy.floating` or \\\n:class:`colour.quality.ColourRendering_Specification_CRI`\n *Colour Rendering Index* (CRI).\n\n References\n ----------\n :cite:`Ohno2008a`\n\n Examples\n --------\n >>> from colour import SDS_ILLUMINANTS\n >>> sd = SDS_ILLUMINANTS['FL2']\n >>> colour_rendering_index(sd) # doctest: +ELLIPSIS\n 64.2337241...\n \"\"\"\n\n # pylint: disable=E1102\n cmfs = reshape_msds(\n MSDS_CMFS[\"CIE 1931 2 Degree Standard Observer\"],\n SPECTRAL_SHAPE_DEFAULT,\n )\n\n shape = cmfs.shape\n sd_test = reshape_sd(sd_test, shape)\n tcs_sds = {sd.name: reshape_sd(sd, shape) for sd in SDS_TCS.values()}\n\n with domain_range_scale(\"1\"):\n XYZ = sd_to_XYZ(sd_test, cmfs)\n\n uv = UCS_to_uv(XYZ_to_UCS(XYZ))\n CCT, _D_uv = uv_to_CCT_Robertson1968(uv)\n\n if CCT < 5000:\n sd_reference = sd_blackbody(CCT, shape)\n else:\n xy = CCT_to_xy_CIE_D(CCT)\n sd_reference = sd_CIE_illuminant_D_series(xy)\n sd_reference.align(shape)\n\n test_tcs_colorimetry_data = tcs_colorimetry_data(\n sd_test, sd_reference, tcs_sds, cmfs, chromatic_adaptation=True\n )\n\n reference_tcs_colorimetry_data = tcs_colorimetry_data(\n sd_reference, sd_reference, tcs_sds, cmfs\n )\n\n Q_as = colour_rendering_indexes(\n test_tcs_colorimetry_data, reference_tcs_colorimetry_data\n )\n\n Q_a = as_float_scalar(\n np.average(\n [v.Q_a for k, v in Q_as.items() if k in (1, 2, 3, 4, 5, 6, 7, 8)]\n )\n )\n\n if additional_data:\n return ColourRendering_Specification_CRI(\n sd_test.name,\n Q_a,\n Q_as,\n (test_tcs_colorimetry_data, reference_tcs_colorimetry_data),\n )\n else:\n return Q_a\n\n\ndef tcs_colorimetry_data(\n sd_t: SpectralDistribution,\n sd_r: SpectralDistribution,\n sds_tcs: Dict[str, SpectralDistribution],\n cmfs: MultiSpectralDistributions,\n chromatic_adaptation: Boolean = False,\n) -> Tuple[TCS_ColorimetryData, ...]:\n \"\"\"\n Return the *test colour samples* colorimetry data.\n\n Parameters\n ----------\n sd_t\n Test spectral distribution.\n sd_r\n Reference spectral distribution.\n sds_tcs\n *Test colour samples* spectral distributions.\n cmfs\n Standard observer colour matching functions.\n chromatic_adaptation\n Perform chromatic adaptation.\n\n Returns\n -------\n :class:`tuple`\n *Test colour samples* colorimetry data.\n \"\"\"\n\n XYZ_t = sd_to_XYZ(sd_t, cmfs)\n uv_t = UCS_to_uv(XYZ_to_UCS(XYZ_t))\n u_t, v_t = uv_t[0], uv_t[1]\n\n XYZ_r = sd_to_XYZ(sd_r, cmfs)\n uv_r = UCS_to_uv(XYZ_to_UCS(XYZ_r))\n u_r, v_r = uv_r[0], uv_r[1]\n\n tcs_data = []\n for _key, value in sorted(INDEXES_TO_NAMES_TCS.items()):\n sd_tcs = sds_tcs[value]\n XYZ_tcs = sd_to_XYZ(sd_tcs, cmfs, sd_t)\n xyY_tcs = XYZ_to_xyY(XYZ_tcs)\n uv_tcs = UCS_to_uv(XYZ_to_UCS(XYZ_tcs))\n u_tcs, v_tcs = uv_tcs[0], uv_tcs[1]\n\n if chromatic_adaptation:\n\n def c(\n x: FloatingOrNDArray, y: FloatingOrNDArray\n ) -> FloatingOrNDArray:\n \"\"\"Compute the :math:`c` term.\"\"\"\n\n return (4 - x - 10 * y) / y\n\n def d(\n x: FloatingOrNDArray, y: FloatingOrNDArray\n ) -> FloatingOrNDArray:\n \"\"\"Compute the :math:`d` term.\"\"\"\n\n return (1.708 * y + 0.404 - 1.481 * x) / y\n\n c_t, d_t = c(u_t, v_t), d(u_t, v_t)\n c_r, d_r = c(u_r, v_r), d(u_r, v_r)\n tcs_c, tcs_d = c(u_tcs, v_tcs), d(u_tcs, v_tcs)\n u_tcs = (\n 10.872 + 0.404 * c_r / c_t * tcs_c - 4 * d_r / d_t * tcs_d\n ) / (16.518 + 1.481 * c_r / c_t * tcs_c - d_r / d_t * tcs_d)\n v_tcs = 5.52 / (\n 16.518 + 1.481 * c_r / c_t * tcs_c - d_r / d_t * tcs_d\n )\n\n W_tcs = 25 * spow(xyY_tcs[-1], 1 / 3) - 17\n U_tcs = 13 * W_tcs * (u_tcs - u_r)\n V_tcs = 13 * W_tcs * (v_tcs - v_r)\n\n tcs_data.append(\n TCS_ColorimetryData(\n sd_tcs.name, XYZ_tcs, uv_tcs, np.array([U_tcs, V_tcs, W_tcs])\n )\n )\n\n return tuple(tcs_data)\n\n\ndef colour_rendering_indexes(\n test_data: Tuple[TCS_ColorimetryData, ...],\n reference_data: Tuple[TCS_ColorimetryData, ...],\n) -> Dict[Integer, TCS_ColourQualityScaleData]:\n \"\"\"\n Return the *test colour samples* rendering indexes :math:`Q_a`.\n\n Parameters\n ----------\n test_data\n Test data.\n reference_data\n Reference data.\n\n Returns\n -------\n :class:`dict`\n *Test colour samples* *Colour Rendering Index* (CRI).\n \"\"\"\n\n Q_as = {}\n for i in range(len(test_data)):\n Q_as[i + 1] = TCS_ColourQualityScaleData(\n test_data[i].name,\n 100\n - 4.6\n * as_float_scalar(\n euclidean_distance(reference_data[i].UVW, test_data[i].UVW)\n ),\n )\n\n return Q_as\n", "\"\"\"\nPre-Computed Matrices for simulation of CVD - Machado (2009)\n============================================================\n\nDefines the pre-computed matrices for simulation of colour vision deficiency\nfrom Machado (2010).\n\nReferences\n----------\n- :cite:`Machado2010a` : Machado, Gustavo Mello. (2010). A model for\n simulation of color vision deficiency and a color contrast enhancement\n technique for dichromats. (pp. 1-94).\n http://www.lume.ufrgs.br/handle/10183/26950\n\"\"\"\n\nimport numpy as np\n\nfrom colour.utilities import CaseInsensitiveMapping\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"CVD_MATRICES_MACHADO2010\",\n]\n\nCVD_MATRICES_MACHADO2010: CaseInsensitiveMapping = CaseInsensitiveMapping(\n {\n \"Protanomaly\": {\n 0.0: np.array(\n [\n [1.000000, 0.000000, -0.000000],\n [0.000000, 1.000000, 0.000000],\n [-0.000000, -0.000000, 1.000000],\n ]\n ),\n 0.1: np.array(\n [\n [0.856167, 0.182038, -0.038205],\n [0.029342, 0.955115, 0.015544],\n [-0.002880, -0.001563, 1.004443],\n ]\n ),\n 0.2: np.array(\n [\n [0.734766, 0.334872, -0.069637],\n [0.051840, 0.919198, 0.028963],\n [-0.004928, -0.004209, 1.009137],\n ]\n ),\n 0.3: np.array(\n [\n [0.630323, 0.465641, -0.095964],\n [0.069181, 0.890046, 0.040773],\n [-0.006308, -0.007724, 1.014032],\n ]\n ),\n 0.4: np.array(\n [\n [0.539009, 0.579343, -0.118352],\n [0.082546, 0.866121, 0.051332],\n [-0.007136, -0.011959, 1.019095],\n ]\n ),\n 0.5: np.array(\n [\n [0.458064, 0.679578, -0.137642],\n [0.092785, 0.846313, 0.060902],\n [-0.007494, -0.016807, 1.024301],\n ]\n ),\n 0.6: np.array(\n [\n [0.385450, 0.769005, -0.154455],\n [0.100526, 0.829802, 0.069673],\n [-0.007442, -0.022190, 1.029632],\n ]\n ),\n 0.7: np.array(\n [\n [0.319627, 0.849633, -0.169261],\n [0.106241, 0.815969, 0.077790],\n [-0.007025, -0.028051, 1.035076],\n ]\n ),\n 0.8: np.array(\n [\n [0.259411, 0.923008, -0.182420],\n [0.110296, 0.804340, 0.085364],\n [-0.006276, -0.034346, 1.040622],\n ]\n ),\n 0.9: np.array(\n [\n [0.203876, 0.990338, -0.194214],\n [0.112975, 0.794542, 0.092483],\n [-0.005222, -0.041043, 1.046265],\n ]\n ),\n 1.0: np.array(\n [\n [0.152286, 1.052583, -0.204868],\n [0.114503, 0.786281, 0.099216],\n [-0.003882, -0.048116, 1.051998],\n ]\n ),\n },\n \"Deuteranomaly\": {\n 0.0: np.array(\n [\n [1.000000, 0.000000, -0.000000],\n [0.000000, 1.000000, 0.000000],\n [-0.000000, -0.000000, 1.000000],\n ]\n ),\n 0.1: np.array(\n [\n [0.866435, 0.177704, -0.044139],\n [0.049567, 0.939063, 0.011370],\n [-0.003453, 0.007233, 0.996220],\n ]\n ),\n 0.2: np.array(\n [\n [0.760729, 0.319078, -0.079807],\n [0.090568, 0.889315, 0.020117],\n [-0.006027, 0.013325, 0.992702],\n ]\n ),\n 0.3: np.array(\n [\n [0.675425, 0.433850, -0.109275],\n [0.125303, 0.847755, 0.026942],\n [-0.007950, 0.018572, 0.989378],\n ]\n ),\n 0.4: np.array(\n [\n [0.605511, 0.528560, -0.134071],\n [0.155318, 0.812366, 0.032316],\n [-0.009376, 0.023176, 0.986200],\n ]\n ),\n 0.5: np.array(\n [\n [0.547494, 0.607765, -0.155259],\n [0.181692, 0.781742, 0.036566],\n [-0.010410, 0.027275, 0.983136],\n ]\n ),\n 0.6: np.array(\n [\n [0.498864, 0.674741, -0.173604],\n [0.205199, 0.754872, 0.039929],\n [-0.011131, 0.030969, 0.980162],\n ]\n ),\n 0.7: np.array(\n [\n [0.457771, 0.731899, -0.189670],\n [0.226409, 0.731012, 0.042579],\n [-0.011595, 0.034333, 0.977261],\n ]\n ),\n 0.8: np.array(\n [\n [0.422823, 0.781057, -0.203881],\n [0.245752, 0.709602, 0.044646],\n [-0.011843, 0.037423, 0.974421],\n ]\n ),\n 0.9: np.array(\n [\n [0.392952, 0.823610, -0.216562],\n [0.263559, 0.690210, 0.046232],\n [-0.011910, 0.040281, 0.971630],\n ]\n ),\n 1.0: np.array(\n [\n [0.367322, 0.860646, -0.227968],\n [0.280085, 0.672501, 0.047413],\n [-0.011820, 0.042940, 0.968881],\n ]\n ),\n },\n \"Tritanomaly\": {\n 0.0: np.array(\n [\n [1.000000, 0.000000, -0.000000],\n [0.000000, 1.000000, 0.000000],\n [-0.000000, -0.000000, 1.000000],\n ]\n ),\n 0.1: np.array(\n [\n [0.926670, 0.092514, -0.019184],\n [0.021191, 0.964503, 0.014306],\n [0.008437, 0.054813, 0.936750],\n ]\n ),\n 0.2: np.array(\n [\n [0.895720, 0.133330, -0.029050],\n [0.029997, 0.945400, 0.024603],\n [0.013027, 0.104707, 0.882266],\n ]\n ),\n 0.3: np.array(\n [\n [0.905871, 0.127791, -0.033662],\n [0.026856, 0.941251, 0.031893],\n [0.013410, 0.148296, 0.838294],\n ]\n ),\n 0.4: np.array(\n [\n [0.948035, 0.089490, -0.037526],\n [0.014364, 0.946792, 0.038844],\n [0.010853, 0.193991, 0.795156],\n ]\n ),\n 0.5: np.array(\n [\n [1.017277, 0.027029, -0.044306],\n [-0.006113, 0.958479, 0.047634],\n [0.006379, 0.248708, 0.744913],\n ]\n ),\n 0.6: np.array(\n [\n [1.104996, -0.046633, -0.058363],\n [-0.032137, 0.971635, 0.060503],\n [0.001336, 0.317922, 0.680742],\n ]\n ),\n 0.7: np.array(\n [\n [1.193214, -0.109812, -0.083402],\n [-0.058496, 0.979410, 0.079086],\n [-0.002346, 0.403492, 0.598854],\n ]\n ),\n 0.8: np.array(\n [\n [1.257728, -0.139648, -0.118081],\n [-0.078003, 0.975409, 0.102594],\n [-0.003316, 0.501214, 0.502102],\n ]\n ),\n 0.9: np.array(\n [\n [1.278864, -0.125333, -0.153531],\n [-0.084748, 0.957674, 0.127074],\n [-0.000989, 0.601151, 0.399838],\n ]\n ),\n 1.0: np.array(\n [\n [1.255528, -0.076749, -0.178779],\n [-0.078411, 0.930809, 0.147602],\n [0.004733, 0.691367, 0.303900],\n ]\n ),\n },\n }\n)\nCVD_MATRICES_MACHADO2010.__doc__ = \"\"\"\nMachado (2010) Simulation matrices :math:`\\\\Phi_{CVD}`.\n\"\"\"\n", "\"\"\"\nColour Temperature & Correlated Colour Temperature Plotting\n===========================================================\n\nDefines the colour temperature and correlated colour temperature plotting\nobjects:\n\n- :func:`colour.plotting.\\\nplot_planckian_locus_in_chromaticity_diagram_CIE1931`\n- :func:`colour.plotting.\\\nplot_planckian_locus_in_chromaticity_diagram_CIE1960UCS`\n\"\"\"\n\nfrom __future__ import annotations\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.collections import LineCollection\n\nfrom colour.algebra import normalise_maximum\nfrom colour.colorimetry import MSDS_CMFS, CCS_ILLUMINANTS\nfrom colour.hints import (\n Any,\n ArrayLike,\n Callable,\n Dict,\n Floating,\n List,\n Literal,\n NDArray,\n Optional,\n Sequence,\n Tuple,\n Union,\n cast,\n)\nfrom colour.models import (\n UCS_to_uv,\n UCS_uv_to_xy,\n XYZ_to_UCS,\n xy_to_Luv_uv,\n xy_to_XYZ,\n)\nfrom colour.temperature import CCT_to_uv\nfrom colour.plotting import (\n CONSTANTS_COLOUR_STYLE,\n CONSTANTS_ARROW_STYLE,\n XYZ_to_plotting_colourspace,\n artist,\n plot_chromaticity_diagram_CIE1931,\n plot_chromaticity_diagram_CIE1960UCS,\n filter_passthrough,\n override_style,\n render,\n update_settings_collection,\n)\nfrom colour.plotting.diagrams import plot_chromaticity_diagram\nfrom colour.utilities import (\n as_int_scalar,\n full,\n optional,\n tstack,\n validate_method,\n zeros,\n)\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"plot_planckian_locus\",\n \"plot_planckian_locus_in_chromaticity_diagram\",\n \"plot_planckian_locus_in_chromaticity_diagram_CIE1931\",\n \"plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS\",\n]\n\n\n@override_style()\ndef plot_planckian_locus(\n planckian_locus_colours: Optional[Union[ArrayLike, str]] = None,\n planckian_locus_opacity: Floating = 1,\n planckian_locus_labels: Optional[Sequence] = None,\n method: Union[\n Literal[\"CIE 1931\", \"CIE 1960 UCS\", \"CIE 1976 UCS\"], str\n ] = \"CIE 1931\",\n **kwargs: Any,\n) -> Tuple[plt.Figure, plt.Axes]:\n \"\"\"\n Plot the *Planckian Locus* according to given method.\n\n Parameters\n ----------\n planckian_locus_colours\n *Planckian Locus* colours, if ``planckian_locus_colours`` is set to\n *RGB*, the colours will be computed according to the corresponding\n chromaticity coordinates.\n planckian_locus_opacity\n Opacity of the *Planckian Locus*.\n planckian_locus_labels\n Array of labels used to customise which iso-temperature lines will be\n drawn along the *Planckian Locus*. Passing an empty array will result\n in no iso-temperature lines being drawn.\n method\n *Chromaticity Diagram* method.\n\n Other Parameters\n ----------------\n kwargs\n {:func:`colour.plotting.artist`, :func:`colour.plotting.render`},\n See the documentation of the previously listed definitions.\n\n Returns\n -------\n :class:`tuple`\n Current figure and axes.\n\n Examples\n --------\n >>> plot_planckian_locus(planckian_locus_colours='RGB')\n ... # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, <...AxesSubplot...>)\n\n .. image:: ../_static/Plotting_Plot_Planckian_Locus.png\n :align: center\n :alt: plot_planckian_locus\n \"\"\"\n\n method = validate_method(\n method, [\"CIE 1931\", \"CIE 1960 UCS\", \"CIE 1976 UCS\"]\n )\n\n planckian_locus_colours = optional(\n planckian_locus_colours, CONSTANTS_COLOUR_STYLE.colour.dark\n )\n\n labels = cast(\n Tuple,\n optional(\n planckian_locus_labels,\n (10**6 / 600, 2000, 2500, 3000, 4000, 6000, 10**6 / 100),\n ),\n )\n D_uv = 0.05\n\n settings: Dict[str, Any] = {\"uniform\": True}\n settings.update(kwargs)\n\n _figure, axes = artist(**settings)\n\n if method == \"cie 1931\":\n\n def uv_to_ij(uv: NDArray) -> NDArray:\n \"\"\"\n Convert given *uv* chromaticity coordinates to *ij* chromaticity\n coordinates.\n \"\"\"\n\n return UCS_uv_to_xy(uv)\n\n elif method == \"cie 1960 ucs\":\n\n def uv_to_ij(uv: NDArray) -> NDArray:\n \"\"\"\n Convert given *uv* chromaticity coordinates to *ij* chromaticity\n coordinates.\n \"\"\"\n\n return uv\n\n elif method == \"cie 1976 ucs\":\n\n def uv_to_ij(uv: NDArray) -> NDArray:\n \"\"\"\n Convert given *uv* chromaticity coordinates to *ij* chromaticity\n coordinates.\n \"\"\"\n\n return xy_to_Luv_uv(UCS_uv_to_xy(uv))\n\n def CCT_D_uv_to_plotting_colourspace(CCT_D_uv):\n \"\"\"\n Convert given *uv* chromaticity coordinates to the default plotting\n colourspace.\n \"\"\"\n\n return normalise_maximum(\n XYZ_to_plotting_colourspace(\n xy_to_XYZ(UCS_uv_to_xy(CCT_to_uv(CCT_D_uv, \"Robertson 1968\")))\n ),\n axis=-1,\n )\n\n start, end = 10**6 / 600, 10**6 / 10\n CCT = np.arange(start, end + 100, 100)\n CCT_D_uv = tstack([CCT, zeros(CCT.shape)]).reshape(-1, 1, 2)\n ij = uv_to_ij(CCT_to_uv(CCT_D_uv, \"Robertson 1968\"))\n\n use_RGB_planckian_locus_colours = (\n str(planckian_locus_colours).upper() == \"RGB\"\n )\n if use_RGB_planckian_locus_colours:\n pl_colours = CCT_D_uv_to_plotting_colourspace(CCT_D_uv)\n else:\n pl_colours = planckian_locus_colours\n\n line_collection = LineCollection(\n np.concatenate([ij[:-1], ij[1:]], axis=1),\n colors=pl_colours,\n alpha=planckian_locus_opacity,\n zorder=CONSTANTS_COLOUR_STYLE.zorder.foreground_line,\n )\n axes.add_collection(line_collection)\n\n for label in labels:\n CCT_D_uv = tstack(\n [full(10, label), np.linspace(-D_uv, D_uv, 10)]\n ).reshape(-1, 1, 2)\n\n if use_RGB_planckian_locus_colours:\n itl_colours = CCT_D_uv_to_plotting_colourspace(CCT_D_uv)\n else:\n itl_colours = planckian_locus_colours\n\n ij = uv_to_ij(CCT_to_uv(CCT_D_uv, \"Robertson 1968\"))\n\n line_collection = LineCollection(\n np.concatenate([ij[:-1], ij[1:]], axis=1),\n colors=itl_colours,\n alpha=planckian_locus_opacity,\n zorder=CONSTANTS_COLOUR_STYLE.zorder.foreground_line,\n )\n axes.add_collection(line_collection)\n axes.annotate(\n f\"{as_int_scalar(label)}K\",\n xy=(ij[-1, :, 0], ij[-1, :, 1]),\n xytext=(0, CONSTANTS_COLOUR_STYLE.geometry.long / 2),\n textcoords=\"offset points\",\n size=\"x-small\",\n zorder=CONSTANTS_COLOUR_STYLE.zorder.foreground_label,\n )\n\n settings = {\"axes\": axes}\n settings.update(kwargs)\n\n return render(**settings)\n\n\n@override_style()\ndef plot_planckian_locus_in_chromaticity_diagram(\n illuminants: Union[str, Sequence[str]],\n chromaticity_diagram_callable: Callable = (\n plot_chromaticity_diagram # type: ignore[has-type]\n ),\n method: Union[Literal[\"CIE 1931\", \"CIE 1960 UCS\"], str] = \"CIE 1931\",\n annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,\n plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,\n **kwargs: Any,\n) -> Tuple[plt.Figure, plt.Axes]:\n \"\"\"\n Plot the *Planckian Locus* and given illuminants in the\n *Chromaticity Diagram* according to given method.\n\n Parameters\n ----------\n illuminants\n Illuminants to plot. ``illuminants`` elements can be of any\n type or form supported by the\n :func:`colour.plotting.filter_passthrough` definition.\n chromaticity_diagram_callable\n Callable responsible for drawing the *Chromaticity Diagram*.\n method\n *Chromaticity Diagram* method.\n annotate_kwargs\n Keyword arguments for the :func:`matplotlib.pyplot.annotate`\n definition, used to annotate the resulting chromaticity coordinates\n with their respective spectral distribution names. ``annotate_kwargs``\n can be either a single dictionary applied to all the arrows with same\n settings or a sequence of dictionaries with different settings for each\n spectral distribution. The following special keyword arguments can also\n be used:\n\n - ``annotate`` : Whether to annotate the spectral distributions.\n plot_kwargs\n Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,\n used to control the style of the plotted illuminants. ``plot_kwargs``\n can be either a single dictionary applied to all the plotted\n illuminants with the same settings or a sequence of dictionaries with\n different settings for eachplotted illuminant.\n\n Other Parameters\n ----------------\n kwargs\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.diagrams.plot_chromaticity_diagram`,\n :func:`colour.plotting.temperature.plot_planckian_locus`,\n :func:`colour.plotting.render`},\n See the documentation of the previously listed definitions.\n\n Returns\n -------\n :class:`tuple`\n Current figure and axes.\n\n Examples\n --------\n >>> annotate_kwargs = [\n ... {'xytext': (-25, 15), 'arrowprops':{'arrowstyle':'-'}},\n ... {'arrowprops':{'arrowstyle':'-['}},\n ... {},\n ... ]\n >>> plot_kwargs = [\n ... {\n ... 'markersize' : 15,\n ... },\n ... { 'color': 'r'},\n ... {},\n ... ]\n >>> plot_planckian_locus_in_chromaticity_diagram(\n ... ['A', 'B', 'C'],\n ... annotate_kwargs=annotate_kwargs,\n ... plot_kwargs=plot_kwargs\n ... ) # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, <...AxesSubplot...>)\n\n .. image:: ../_static/Plotting_\\\nPlot_Planckian_Locus_In_Chromaticity_Diagram.png\n :align: center\n :alt: plot_planckian_locus_in_chromaticity_diagram\n \"\"\"\n\n cmfs = MSDS_CMFS[\"CIE 1931 2 Degree Standard Observer\"]\n\n illuminants_filtered = filter_passthrough(\n CCS_ILLUMINANTS.get(cmfs.name), illuminants # type: ignore[arg-type]\n )\n\n settings: Dict[str, Any] = {\"uniform\": True}\n settings.update(kwargs)\n\n _figure, axes = artist(**settings)\n\n method = method.upper()\n\n settings = {\"axes\": axes, \"method\": method}\n settings.update(kwargs)\n settings[\"standalone\"] = False\n\n chromaticity_diagram_callable(**settings)\n\n plot_planckian_locus(**settings)\n\n if method == \"CIE 1931\":\n\n def xy_to_ij(xy: NDArray) -> NDArray:\n \"\"\"\n Convert given *CIE xy* chromaticity coordinates to *ij*\n chromaticity coordinates.\n \"\"\"\n\n return xy\n\n bounding_box = (-0.1, 0.9, -0.1, 0.9)\n elif method == \"CIE 1960 UCS\":\n\n def xy_to_ij(xy: NDArray) -> NDArray:\n \"\"\"\n Convert given *CIE xy* chromaticity coordinates to *ij*\n chromaticity coordinates.\n \"\"\"\n\n return UCS_to_uv(XYZ_to_UCS(xy_to_XYZ(xy)))\n\n bounding_box = (-0.1, 0.7, -0.2, 0.6)\n else:\n raise ValueError(\n f'Invalid method: \"{method}\", must be one of '\n f'[\"CIE 1931\", \"CIE 1960 UCS\"]'\n )\n\n annotate_settings_collection = [\n {\n \"annotate\": True,\n \"xytext\": (-50, 30),\n \"textcoords\": \"offset points\",\n \"arrowprops\": CONSTANTS_ARROW_STYLE,\n \"zorder\": CONSTANTS_COLOUR_STYLE.zorder.foreground_annotation,\n }\n for _ in range(len(illuminants_filtered))\n ]\n\n if annotate_kwargs is not None:\n update_settings_collection(\n annotate_settings_collection,\n annotate_kwargs,\n len(illuminants_filtered),\n )\n\n plot_settings_collection = [\n {\n \"color\": CONSTANTS_COLOUR_STYLE.colour.brightest,\n \"label\": f\"{illuminant}\",\n \"marker\": \"o\",\n \"markeredgecolor\": CONSTANTS_COLOUR_STYLE.colour.dark,\n \"markeredgewidth\": CONSTANTS_COLOUR_STYLE.geometry.short * 0.75,\n \"markersize\": (\n CONSTANTS_COLOUR_STYLE.geometry.short * 6\n + CONSTANTS_COLOUR_STYLE.geometry.short * 0.75\n ),\n \"zorder\": CONSTANTS_COLOUR_STYLE.zorder.foreground_line,\n }\n for illuminant in illuminants_filtered\n ]\n\n if plot_kwargs is not None:\n update_settings_collection(\n plot_settings_collection, plot_kwargs, len(illuminants_filtered)\n )\n\n for i, (illuminant, xy) in enumerate(illuminants_filtered.items()):\n plot_settings = plot_settings_collection[i]\n\n ij = xy_to_ij(xy)\n\n axes.plot(ij[0], ij[1], **plot_settings)\n\n if annotate_settings_collection[i][\"annotate\"]:\n annotate_settings = annotate_settings_collection[i]\n annotate_settings.pop(\"annotate\")\n\n axes.annotate(illuminant, xy=ij, **annotate_settings)\n\n title = (\n (\n f\"{', '.join(illuminants_filtered)} Illuminants - Planckian Locus\\n\"\n f\"{method.upper()} Chromaticity Diagram - \"\n \"CIE 1931 2 Degree Standard Observer\"\n )\n if illuminants_filtered\n else (\n f\"Planckian Locus\\n{method.upper()} Chromaticity Diagram - \"\n f\"CIE 1931 2 Degree Standard Observer\"\n )\n )\n\n settings.update(\n {\n \"axes\": axes,\n \"standalone\": True,\n \"bounding_box\": bounding_box,\n \"title\": title,\n }\n )\n settings.update(kwargs)\n\n return render(**settings)\n\n\n@override_style()\ndef plot_planckian_locus_in_chromaticity_diagram_CIE1931(\n illuminants: Union[str, Sequence[str]],\n chromaticity_diagram_callable_CIE1931: Callable = (\n plot_chromaticity_diagram_CIE1931 # type: ignore[has-type]\n ),\n annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,\n plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,\n **kwargs: Any,\n) -> Tuple[plt.Figure, plt.Axes]:\n \"\"\"\n Plot the *Planckian Locus* and given illuminants in\n *CIE 1931 Chromaticity Diagram*.\n\n Parameters\n ----------\n illuminants\n Illuminants to plot. ``illuminants`` elements can be of any\n type or form supported by the\n :func:`colour.plotting.filter_passthrough` definition.\n chromaticity_diagram_callable_CIE1931\n Callable responsible for drawing the *CIE 1931 Chromaticity Diagram*.\n annotate_kwargs\n Keyword arguments for the :func:`matplotlib.pyplot.annotate`\n definition, used to annotate the resulting chromaticity coordinates\n with their respective spectral distribution names. ``annotate_kwargs``\n can be either a single dictionary applied to all the arrows with same\n settings or a sequence of dictionaries with different settings for each\n spectral distribution. The following special keyword arguments can also\n be used:\n\n - ``annotate`` : Whether to annotate the spectral distributions.\n plot_kwargs\n Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,\n used to control the style of the plotted illuminants. ``plot_kwargs``\n can be either a single dictionary applied to all the plotted\n illuminants with the same settings or a sequence of dictionaries with\n different settings for eachplotted illuminant.\n\n Other Parameters\n ----------------\n kwargs\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.diagrams.plot_chromaticity_diagram`,\n :func:`colour.plotting.temperature.plot_planckian_locus`,\n :func:`colour.plotting.temperature.\\\nplot_planckian_locus_in_chromaticity_diagram`,\n :func:`colour.plotting.render`},\n See the documentation of the previously listed definitions.\n\n Returns\n -------\n :class:`tuple`\n Current figure and axes.\n\n Examples\n --------\n >>> plot_planckian_locus_in_chromaticity_diagram_CIE1931(['A', 'B', 'C'])\n ... # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, <...AxesSubplot...>)\n\n .. image:: ../_static/Plotting_\\\nPlot_Planckian_Locus_In_Chromaticity_Diagram_CIE1931.png\n :align: center\n :alt: plot_planckian_locus_in_chromaticity_diagram_CIE1931\n \"\"\"\n\n settings = dict(kwargs)\n settings.update({\"method\": \"CIE 1931\"})\n\n return plot_planckian_locus_in_chromaticity_diagram(\n illuminants,\n chromaticity_diagram_callable_CIE1931,\n annotate_kwargs=annotate_kwargs,\n plot_kwargs=plot_kwargs,\n **settings,\n )\n\n\n@override_style()\ndef plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(\n illuminants: Union[str, Sequence[str]],\n chromaticity_diagram_callable_CIE1960UCS: Callable = (\n plot_chromaticity_diagram_CIE1960UCS # type: ignore[has-type]\n ),\n annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,\n plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,\n **kwargs: Any,\n) -> Tuple[plt.Figure, plt.Axes]:\n \"\"\"\n Plot the *Planckian Locus* and given illuminants in\n *CIE 1960 UCS Chromaticity Diagram*.\n\n Parameters\n ----------\n illuminants\n Illuminants to plot. ``illuminants`` elements can be of any\n type or form supported by the\n :func:`colour.plotting.filter_passthrough` definition.\n chromaticity_diagram_callable_CIE1960UCS\n Callable responsible for drawing the\n *CIE 1960 UCS Chromaticity Diagram*.\n annotate_kwargs\n Keyword arguments for the :func:`matplotlib.pyplot.annotate`\n definition, used to annotate the resulting chromaticity coordinates\n with their respective spectral distribution names. ``annotate_kwargs``\n can be either a single dictionary applied to all the arrows with same\n settings or a sequence of dictionaries with different settings for each\n spectral distribution. The following special keyword arguments can also\n be used:\n\n - ``annotate`` : Whether to annotate the spectral distributions.\n plot_kwargs\n Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,\n used to control the style of the plotted illuminants. ``plot_kwargs``\n can be either a single dictionary applied to all the plotted\n illuminants with the same settings or a sequence of dictionaries with\n different settings for eachplotted illuminant.\n\n Other Parameters\n ----------------\n kwargs\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.diagrams.plot_chromaticity_diagram`,\n :func:`colour.plotting.temperature.plot_planckian_locus`,\n :func:`colour.plotting.temperature.\\\nplot_planckian_locus_in_chromaticity_diagram`,\n :func:`colour.plotting.render`},\n See the documentation of the previously listed definitions.\n\n Returns\n -------\n :class:`tuple`\n Current figure and axes.\n\n Examples\n --------\n >>> plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(\n ... ['A', 'C', 'E']) # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, <...AxesSubplot...>)\n\n .. image:: ../_static/Plotting_\\\nPlot_Planckian_Locus_In_Chromaticity_Diagram_CIE1960UCS.png\n :align: center\n :alt: plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS\n \"\"\"\n\n settings = dict(kwargs)\n settings.update({\"method\": \"CIE 1960 UCS\"})\n\n return plot_planckian_locus_in_chromaticity_diagram(\n illuminants,\n chromaticity_diagram_callable_CIE1960UCS,\n annotate_kwargs=annotate_kwargs,\n plot_kwargs=plot_kwargs,\n **settings,\n )\n" ]
[ [ "numpy.reshape", "numpy.array", "numpy.tile" ], [ "numpy.array" ], [ "numpy.array" ], [ "numpy.array" ], [ "scipy.optimize.fmin", "numpy.array" ], [ "numpy.reshape", "numpy.array", "numpy.tile" ], [ "numpy.array" ], [ "numpy.array" ], [ "numpy.array" ], [ "numpy.concatenate", "numpy.arange", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ExternalRepositories/shroud
[ "86c39d2324d947d28055f9024f52cc493eb0c813", "86c39d2324d947d28055f9024f52cc493eb0c813" ]
[ "regression/run/struct-numpy-c/python/test.py", "regression/run/vectors-numpy/python/test.py" ]
[ "# Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and\n# other Shroud Project Developers.\n# See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (BSD-3-Clause)\n# #######################################################################\n#\n# Test Python API generated from struct.yaml.\n# struct-numpy-c\n#\nfrom __future__ import print_function\n\nimport numpy as np\nimport unittest\nimport cstruct\n\nclass Struct(unittest.TestCase):\n \"\"\"Test struct problem\"\"\"\n \n def XXsetUp(self):\n \"\"\" Setting up for the test \"\"\"\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")\n \n def XXtearDown(self):\n \"\"\"Cleaning up after the test\"\"\"\n print(\"FooTest:tearDown_:begin\")\n ## do something...\n print(\"FooTest:tearDown_:end\")\n\n def test_dtype(self):\n dt = cstruct.Cstruct1_dtype\n #print(\"Byte order is:\",dt.byteorder) \n #print(\"Size is:\",dt.itemsize) \n self.assertEqual(dt.names, ('ifield', 'dfield'))\n self.assertEqual(dt.char, 'V')\n self.assertEqual(\"void128\", dt.name) \n self.assertEqual(\"int32\", dt[\"ifield\"].name)\n self.assertEqual(\"float64\", dt[\"dfield\"].name)\n a = np.array([(1, 1.5), (2, 2.6)], dtype=dt) \n self.assertEqual(1, a.ndim)\n self.assertEqual(2, a.size)\n self.assertEqual(1, a[0][\"ifield\"])\n self.assertEqual(1.5, a[0][\"dfield\"])\n self.assertEqual(2, a[1][\"ifield\"])\n self.assertEqual(2.6, a[1][\"dfield\"])\n\n def test_passStructByValue(self):\n i = cstruct.passStructByValue((2, 2.0))\n self.assertEqual(4, i)\n\n i = cstruct.passStructByValue((2.0, 2.0))\n self.assertEqual(4, i)\n\n with self.assertRaises(ValueError) as context:\n i = cstruct.passStructByValue((2.0, \"two\"))\n self.assertTrue(\"arg must be a 1-D array of Cstruct1\" in str(context.exception))\n\n def test_passStruct1(self):\n i = cstruct.passStruct1((12,12.6))\n self.assertEqual(12, i)\n\n dt = cstruct.Cstruct1_dtype\n a = np.array((1, 1.5), dtype=dt)\n i = cstruct.passStruct1(a)\n self.assertEqual(1, i)\n\n def test_passStruct2(self):\n i, name = cstruct.passStruct2((22,22.8))\n self.assertEqual(22, i)\n self.assertEqual(\"passStruct2\", name)\n\n dt = cstruct.Cstruct1_dtype\n a = np.array((1, 1.5), dtype=dt)\n i, name = cstruct.passStruct2(a)\n self.assertEqual(1, i)\n self.assertEqual(\"passStruct2\", name)\n\n def test_acceptStructInPtr(self):\n s = np.array((3,3.0), dtype=cstruct.Cstruct1_dtype)\n cstruct.acceptStructInPtr(s)\n\n def test_acceptStructOutPtr(self):\n str = cstruct.acceptStructOutPtr(4, 4.5)\n self.assertIsInstance(str, np.ndarray)\n self.assertIs(str.dtype, cstruct.Cstruct1_dtype)\n self.assertEqual(4, str[\"ifield\"])\n self.assertEqual(4.5, str[\"dfield\"])\n\n def test_acceptStructInOutPtr(self):\n out = cstruct.acceptStructInOutPtr((22,22.8))\n self.assertIsInstance(out, np.ndarray)\n self.assertIs(out.dtype, cstruct.Cstruct1_dtype)\n self.assertEqual(23, out[\"ifield\"])\n self.assertEqual(23.8, out[\"dfield\"])\n\n dt = cstruct.Cstruct1_dtype\n a = np.array((4, 4.0), dtype=dt)\n out = cstruct.acceptStructInOutPtr(a)\n self.assertIsInstance(out, np.ndarray)\n self.assertIs(out.dtype, cstruct.Cstruct1_dtype)\n self.assertEqual(5, out[\"ifield\"])\n self.assertEqual(5.0, out[\"dfield\"])\n\n def test_returnStructByValue(self):\n out = cstruct.returnStructByValue(1, 2.5)\n self.assertIsInstance(out, np.ndarray)\n self.assertIs(out.dtype, cstruct.Cstruct1_dtype)\n self.assertEqual(0, out.ndim)\n self.assertEqual(1, out.size)\n self.assertEqual(1, out[\"ifield\"])\n self.assertEqual(2.5, out[\"dfield\"])\n\n def test_returnStructPtr1(self):\n out = cstruct.returnStructPtr1(33, 33.5)\n self.assertIsInstance(out, np.ndarray)\n self.assertIs(out.dtype, cstruct.Cstruct1_dtype)\n self.assertEqual(0, out.ndim)\n self.assertEqual(1, out.size)\n self.assertEqual(33, out[\"ifield\"])\n self.assertEqual(33.5, out[\"dfield\"])\n\n def test_returnStructPtr2(self):\n out, name = cstruct.returnStructPtr2(35, 35.5)\n self.assertIsInstance(out, np.ndarray)\n self.assertIs(out.dtype, cstruct.Cstruct1_dtype)\n self.assertEqual(0, out.ndim)\n self.assertEqual(1, out.size)\n self.assertEqual(35, out[\"ifield\"])\n self.assertEqual(35.5, out[\"dfield\"])\n self.assertEqual(\"returnStructPtr2\", name)\n\n\n# creating a new test suite\nnewSuite = unittest.TestSuite()\n \n# adding a test case\nnewSuite.addTest(unittest.makeSuite(Struct))\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and\n# other Shroud Project Developers.\n# See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (BSD-3-Clause)\n# #######################################################################\n#\n# Test Python API generated from vectors.yaml.\n# vectors-numpy\n#\nfrom __future__ import print_function\n\nimport numpy as np\nimport unittest\nimport vectors\n\nclass NotTrue:\n \"\"\"Test bool arguments errors\"\"\"\n def __bool__(self):\n raise NotImplementedError\n \nclass Vectors(unittest.TestCase):\n \"\"\"Test vectors problem\"\"\"\n \n def XXsetUp(self):\n \"\"\" Setting up for the test \"\"\"\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")\n \n def XXtearDown(self):\n \"\"\"Cleaning up after the test\"\"\"\n print(\"FooTest:tearDown_:begin\")\n ## do something...\n print(\"FooTest:tearDown_:end\")\n\n def test_vector_sum(self):\n irv = vectors.vector_sum([1,2,3,4,5])\n self.assertEqual(15, irv)\n\n arg = np.array([10,20,30,40,50], dtype=np.intc)\n irv = vectors.vector_sum(arg)\n self.assertEqual(150, irv)\n\n def test_vector_iota_out(self):\n # The intent(out) argument is returned from the function.\n arg = vectors.vector_iota_out()\n self.assertTrue(all(np.equal(arg, [1,2,3,4,5])))\n#\n# ! inta is intent(out), so it will be deallocated upon entry to vector_iota_out_alloc\n# call vector_iota_out_alloc(inta)\n# call assert_true(allocated(inta))\n# call assert_equals(5 , size(inta))\n# call assert_true( all(inta == [1,2,3,4,5]), &\n# \"vector_iota_out_alloc value\")\n#\n# ! inta is intent(inout), so it will NOT be deallocated upon entry to vector_iota_inout_alloc\n# ! Use previous value to append\n# call vector_iota_inout_alloc(inta)\n# call assert_true(allocated(inta))\n# call assert_equals(10 , size(inta))\n# call assert_true( all(inta == [1,2,3,4,5,11,12,13,14,15]), &\n# \"vector_iota_inout_alloc value\")\n# deallocate(inta)\n#\n# intv = [1,2,3,4,5]\n# call vector_increment(intv)\n# call assert_true(all(intv(:) .eq. [2,3,4,5,6]))\n\n def test_vector_iota_out_d(self):\n # The intent(out) argument is returned from the function.\n # As double.\n arg = vectors.vector_iota_out_d()\n self.assertTrue(np.allclose(arg, [1,2,3,4,5]))\n\n def test_returnVectorAlloc(self):\n rv = vectors.ReturnVectorAlloc(10)\n\n self.assertIsInstance(rv, np.ndarray)\n self.assertEqual('int32', rv.dtype.name)\n self.assertEqual(10, rv.size)\n self.assertTrue(all(np.equal(rv, [1,2,3,4,5,6,7,8,9,10])))\n# self.assertTrue(np.allclose(rv, outarray))\n\n\n# creating a new test suite\nnewSuite = unittest.TestSuite()\n \n# adding a test case\nnewSuite.addTest(unittest.makeSuite(Vectors))\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array" ], [ "numpy.array", "numpy.equal", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ecreager/jax
[ "948def817fd7cc2ee7a988b5142401a580b1bbd3" ]
[ "tests/lax_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nfrom functools import partial\nimport itertools\nfrom unittest import skip, SkipTest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as onp\nimport numpy.random as npr\n\nfrom jax import api\nfrom jax import core\nfrom jax import lax\nfrom jax import test_util as jtu\nfrom jax import lax_reference\nfrom jax.test_util import check_grads\nfrom jax.interpreters import xla\nfrom jax.lib import xla_bridge\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n\n\ndef num_float_bits(dtype):\n return onp.finfo(xla_bridge.canonicalize_dtype(dtype)).bits\n\n\n### lax tests\n\n# For standard unops and binops, we can generate a large number of tests on\n# arguments of appropriate shapes and dtypes using the following table.\n\nfloat_dtypes = [onp.float32, onp.float64]\ncomplex_dtypes = [onp.complex64, onp.complex128]\ninexact_dtypes = float_dtypes + complex_dtypes\nint_dtypes = [onp.int32, onp.int64]\nbool_dtypes = [onp.bool_]\ndefault_dtypes = float_dtypes + int_dtypes\nall_dtypes = float_dtypes + complex_dtypes + int_dtypes + bool_dtypes\n\ncompatible_shapes = [[(3,)], [(3, 4), (3, 1), (1, 4)], [(2, 3, 4), (2, 1, 4)]]\n\nOpRecord = collections.namedtuple(\"OpRecord\",\n [\"op\", \"nargs\", \"dtypes\", \"rng\", \"tol\"])\n\n\ndef op_record(op, nargs, dtypes, rng, tol=1e-5):\n return OpRecord(op, nargs, dtypes, rng, tol)\n\nLAX_OPS = [\n op_record(lax.neg, 1, default_dtypes + complex_dtypes, jtu.rand_small()),\n op_record(lax.sign, 1, default_dtypes, jtu.rand_small()),\n op_record(lax.floor, 1, float_dtypes, jtu.rand_small()),\n op_record(lax.ceil, 1, float_dtypes, jtu.rand_small()),\n op_record(lax.round, 1, float_dtypes, jtu.rand_default()),\n\n op_record(lax.is_finite, 1, float_dtypes, jtu.rand_small()),\n\n op_record(lax.exp, 1, float_dtypes + complex_dtypes, jtu.rand_small()),\n op_record(lax.expm1, 1, float_dtypes + complex_dtypes, jtu.rand_small()),\n op_record(lax.log, 1, float_dtypes + complex_dtypes, jtu.rand_positive()),\n op_record(lax.log1p, 1, float_dtypes + complex_dtypes, jtu.rand_positive()),\n op_record(lax.tanh, 1, float_dtypes + complex_dtypes, jtu.rand_small()),\n op_record(lax.sin, 1, float_dtypes + complex_dtypes, jtu.rand_default()),\n op_record(lax.cos, 1, float_dtypes + complex_dtypes, jtu.rand_default()),\n op_record(lax.atan2, 2, float_dtypes, jtu.rand_default()),\n\n op_record(lax.sqrt, 1, float_dtypes + complex_dtypes, jtu.rand_positive()),\n op_record(lax.rsqrt, 1, float_dtypes + complex_dtypes, jtu.rand_positive()),\n op_record(lax.square, 1, float_dtypes + complex_dtypes, jtu.rand_default()),\n op_record(lax.reciprocal, 1, float_dtypes + complex_dtypes, jtu.rand_positive()),\n op_record(lax.tan, 1, float_dtypes, jtu.rand_default()),\n op_record(lax.asin, 1, float_dtypes, jtu.rand_small()),\n op_record(lax.acos, 1, float_dtypes, jtu.rand_small()),\n op_record(lax.atan, 1, float_dtypes, jtu.rand_small()),\n op_record(lax.sinh, 1, float_dtypes + complex_dtypes, jtu.rand_default()),\n op_record(lax.cosh, 1, float_dtypes + complex_dtypes, jtu.rand_default()),\n op_record(lax.asinh, 1, float_dtypes + complex_dtypes, jtu.rand_positive()),\n op_record(lax.acosh, 1, float_dtypes + complex_dtypes, jtu.rand_positive()),\n\n op_record(lax.lgamma, 1, float_dtypes, jtu.rand_positive()),\n op_record(lax.digamma, 1, float_dtypes, jtu.rand_positive()),\n op_record(lax.erf, 1, float_dtypes, jtu.rand_small()),\n op_record(lax.erfc, 1, float_dtypes, jtu.rand_small()),\n op_record(lax.erf_inv, 1, float_dtypes, jtu.rand_small(), tol=1e-2),\n\n op_record(lax.real, 1, complex_dtypes, jtu.rand_default()),\n op_record(lax.imag, 1, complex_dtypes, jtu.rand_default()),\n op_record(lax.complex, 2, [onp.float32], jtu.rand_default()),\n op_record(lax.conj, 1, [onp.float32] + complex_dtypes, jtu.rand_default()),\n op_record(lax.abs, 1, default_dtypes + complex_dtypes, jtu.rand_default()),\n op_record(lax.pow, 2, float_dtypes + complex_dtypes, jtu.rand_positive()),\n\n op_record(lax.bitwise_and, 2, bool_dtypes, jtu.rand_small()),\n op_record(lax.bitwise_not, 1, bool_dtypes, jtu.rand_small()),\n op_record(lax.bitwise_or, 2, bool_dtypes, jtu.rand_small()),\n op_record(lax.bitwise_xor, 2, bool_dtypes, jtu.rand_small()),\n\n op_record(lax.add, 2, default_dtypes + complex_dtypes, jtu.rand_small()),\n op_record(lax.sub, 2, default_dtypes + complex_dtypes, jtu.rand_small()),\n op_record(lax.mul, 2, default_dtypes + complex_dtypes, jtu.rand_small()),\n op_record(lax.div, 2, default_dtypes + complex_dtypes, jtu.rand_nonzero()),\n op_record(lax.rem, 2, default_dtypes, jtu.rand_nonzero()),\n\n op_record(lax.max, 2, all_dtypes, jtu.rand_small()),\n op_record(lax.min, 2, all_dtypes, jtu.rand_small()),\n\n op_record(lax.eq, 2, all_dtypes, jtu.rand_some_equal()),\n op_record(lax.ne, 2, all_dtypes, jtu.rand_small()),\n op_record(lax.ge, 2, default_dtypes, jtu.rand_small()),\n op_record(lax.gt, 2, default_dtypes, jtu.rand_small()),\n op_record(lax.le, 2, default_dtypes, jtu.rand_small()),\n op_record(lax.lt, 2, default_dtypes, jtu.rand_small()),\n]\n\nCombosWithReplacement = itertools.combinations_with_replacement\n\n\nclass LaxTest(jtu.JaxTestCase):\n \"\"\"Numerical tests for LAX operations.\"\"\"\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n rec.op.__name__, shapes, itertools.repeat(dtype)),\n \"op\": rec.op, \"rng\": rec.rng, \"shapes\": shapes, \"dtype\": dtype}\n for shape_group in compatible_shapes\n for shapes in CombosWithReplacement(shape_group, rec.nargs)\n for dtype in rec.dtypes)\n for rec in LAX_OPS))\n def testOp(self, op, rng, shapes, dtype):\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n rec.op.__name__, shapes, itertools.repeat(dtype)),\n \"op\": rec.op, \"rng\": rec.rng, \"shapes\": shapes, \"dtype\": dtype,\n \"tol\": rec.tol}\n for shape_group in compatible_shapes\n for shapes in CombosWithReplacement(shape_group, rec.nargs)\n for dtype in rec.dtypes)\n for rec in LAX_OPS))\n def testOpAgainstNumpy(self, op, rng, shapes, dtype, tol):\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n numpy_op = getattr(lax_reference, op.__name__)\n self._CheckAgainstNumpy(op, numpy_op, args_maker, tol=tol)\n\n # TODO test shift_left, shift_right_arithmetic, shift_right_logical\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}\".format(\n from_dtype, to_dtype),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype, \"rng\": rng}\n for from_dtype, to_dtype in itertools.product(\n [onp.float32, onp.int32, \"float32\", \"int32\"], repeat=2)\n for rng in [jtu.rand_default()]))\n def testConvertElementType(self, from_dtype, to_dtype, rng):\n args_maker = lambda: [rng((2, 3), from_dtype)]\n op = lambda x: lax.convert_element_type(x, to_dtype)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}\"\n .format(from_dtype, to_dtype),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype, \"rng\": rng}\n for from_dtype, to_dtype in itertools.product(\n [onp.float32, onp.int32, \"float32\", \"int32\"], repeat=2)\n for rng in [jtu.rand_default()]))\n def testConvertElementTypeAgainstNumpy(self, from_dtype, to_dtype, rng):\n args_maker = lambda: [rng((2, 3), from_dtype)]\n op = lambda x: lax.convert_element_type(x, to_dtype)\n numpy_op = lambda x: lax_reference.convert_element_type(x, to_dtype)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}\"\n .format(from_dtype, to_dtype),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype, \"rng\": rng}\n for from_dtype, to_dtype in itertools.product(\n [onp.float32, onp.int32, \"float32\", \"int32\"], repeat=2)\n for rng in [jtu.rand_default()]))\n def testBitcastConvertType(self, from_dtype, to_dtype, rng):\n args_maker = lambda: [rng((2, 3), from_dtype)]\n op = lambda x: lax.bitcast_convert_type(x, to_dtype)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}\"\n .format(from_dtype, to_dtype),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype, \"rng\": rng}\n for from_dtype, to_dtype in itertools.product(\n [onp.float32, onp.int32, \"float32\", \"int32\"], repeat=2)\n for rng in [jtu.rand_default()]))\n def testBitcastConvertTypeAgainstNumpy(self, from_dtype, to_dtype, rng):\n args_maker = lambda: [rng((2, 3), from_dtype)]\n op = lambda x: lax.bitcast_convert_type(x, to_dtype)\n numpy_op = lambda x: lax_reference.bitcast_convert_type(x, to_dtype)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_min_shape={}_operand_shape={}_max_shape={}\".format(\n jtu.format_shape_dtype_string(min_shape, dtype),\n jtu.format_shape_dtype_string(operand_shape, dtype),\n jtu.format_shape_dtype_string(max_shape, dtype)),\n \"min_shape\": min_shape, \"operand_shape\": operand_shape,\n \"max_shape\": max_shape, \"dtype\": dtype, \"rng\": rng}\n for min_shape, operand_shape, max_shape in [\n [(), (2, 3), ()],\n [(2, 3), (2, 3), ()],\n [(), (2, 3), (2, 3)],\n [(2, 3), (2, 3), (2, 3)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testClamp(self, min_shape, operand_shape, max_shape, dtype, rng):\n shapes = [min_shape, operand_shape, max_shape]\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n self._CompileAndCheck(lax.clamp, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_min_shape={}_operand_shape={}_max_shape={}\".format(\n jtu.format_shape_dtype_string(min_shape, dtype),\n jtu.format_shape_dtype_string(operand_shape, dtype),\n jtu.format_shape_dtype_string(max_shape, dtype)),\n \"min_shape\": min_shape, \"operand_shape\": operand_shape,\n \"max_shape\": max_shape, \"dtype\": dtype, \"rng\": rng}\n for min_shape, operand_shape, max_shape in [\n [(), (2, 3), ()],\n [(2, 3), (2, 3), ()],\n [(), (2, 3), (2, 3)],\n [(2, 3), (2, 3), (2, 3)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testClampAgainstNumpy(self, min_shape, operand_shape, max_shape, dtype,\n rng):\n shapes = [min_shape, operand_shape, max_shape]\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n self._CheckAgainstNumpy(lax.clamp, lax_reference.clamp, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dim={}_baseshape=[{}]_dtype={}_narrs={}\".format(\n dim, \",\".join(str(d) for d in base_shape), onp.dtype(dtype).name,\n num_arrs),\n \"dim\": dim, \"base_shape\": base_shape, \"dtype\": dtype,\n \"num_arrs\": num_arrs, \"rng\": rng}\n for num_arrs in [3]\n for dtype in default_dtypes\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for dim in range(len(base_shape))\n for rng in [jtu.rand_default()]))\n def testConcatenate(self, dim, base_shape, dtype, num_arrs, rng):\n shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n op = lambda *args: lax.concatenate(args, dim)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dim={}_baseshape=[{}]_dtype={}_narrs={}\".format(\n dim, \",\".join(str(d) for d in base_shape), onp.dtype(dtype).name,\n num_arrs),\n \"dim\": dim, \"base_shape\": base_shape, \"dtype\": dtype,\n \"num_arrs\": num_arrs, \"rng\": rng}\n for num_arrs in [3]\n for dtype in default_dtypes\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for dim in range(len(base_shape))\n for rng in [jtu.rand_default()]))\n def testConcatenateAgainstNumpy(self, dim, base_shape, dtype, num_arrs, rng):\n shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n op = lambda *args: lax.concatenate(args, dim)\n numpy_op = lambda *args: lax_reference.concatenate(args, dim)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rng\": rng}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([2, 3], repeat=3)]\n for dtype in [onp.float32]\n for strides in [(1, 1), (1, 2), (2, 1)]\n for padding in [\"VALID\", \"SAME\"]\n for rng in [jtu.rand_small()]))\n def testConv(self, lhs_shape, rhs_shape, dtype, strides, padding, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv(lhs, rhs, strides, padding)\n\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rng\": rng}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([2, 3], repeat=3)]\n for dtype in [onp.float32]\n for strides in [(1, 1), (1, 2), (2, 1)]\n for padding in [\"VALID\", \"SAME\"]\n for rng in [jtu.rand_small()]))\n def testConvAgainstNumpy(self, lhs_shape, rhs_shape, dtype, strides, padding,\n rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n op = lambda lhs, rhs: lax.conv(lhs, rhs, strides, padding)\n numpy_op = lambda lhs, rhs: lax_reference.conv(lhs, rhs, strides, padding)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\"\n \"_lhs_dilation={}_rhs_dilation={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n strides, padding, lhs_dilation, rhs_dilation),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"lhs_dilation\": lhs_dilation,\n \"rhs_dilation\": rhs_dilation, \"rng\": rng}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([1, 2, 3], repeat=3)]\n for dtype in [onp.float32] for strides in [(1, 1), (1, 2), (2, 1)]\n for padding in [((0, 0), (0, 0)), ((1, 2), (2, 0))]\n for lhs_dilation, rhs_dilation in itertools.product(\n [(1, 1), (1, 2), (2, 2)], repeat=2)\n for rng in [jtu.rand_small()]))\n def testConvWithGeneralPadding(self, lhs_shape, rhs_shape, dtype, strides,\n padding, lhs_dilation, rhs_dilation, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv_with_general_padding(\n lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)\n\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\"\n \"_lhs_dilation={}_rhs_dilation={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n strides, padding, lhs_dilation, rhs_dilation),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"lhs_dilation\": lhs_dilation,\n \"rhs_dilation\": rhs_dilation, \"rng\": rng}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([1, 2, 3], repeat=3)]\n for dtype in [onp.float32] for strides in [(1, 1), (1, 2), (2, 1)]\n for padding in [((0, 0), (0, 0)), ((1, 2), (2, 0))]\n for lhs_dilation, rhs_dilation in itertools.product(\n [(1, 1), (1, 2), (2, 2)], repeat=2)\n for rng in [jtu.rand_small()]))\n def DISABLED_testConvWithGeneralPaddingAgainstNumpy(\n self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dilation,\n rhs_dilation, rng):\n # TODO(mattjj): make this test pass\n return SkipTest(\"this test is incomplete\")\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv_with_general_padding(\n lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)\n\n def numpy_fun(lhs, rhs):\n return lax_reference.conv_with_general_padding(\n lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)\n\n self._CheckAgainstNumpy(fun, numpy_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\"\n \"_lhs_dilation={}_rhs_dilation={}\"\n \"_dims={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n strides, padding, lhs_dilation, rhs_dilation,\n \",\".join(dim_nums)),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"lhs_dilation\": lhs_dilation,\n \"rhs_dilation\": rhs_dilation, \"dimension_numbers\": dim_nums,\n \"perms\": perms, \"rng\": rng}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([2, 3], repeat=3)]\n for dtype in [onp.float32] for strides in [(1, 1), (2, 1)]\n for padding in [((1, 2), (2, 0))]\n for lhs_dilation, rhs_dilation in itertools.product(\n [(1, 1), (1, 2)], repeat=2)\n for rng in [jtu.rand_small()]\n for dim_nums, perms in [\n ((\"NCHW\", \"OIHW\", \"NCHW\"), ([0, 1, 2, 3], [0, 1, 2, 3])),\n ((\"NHWC\", \"HWIO\", \"NHWC\"), ([0, 2, 3, 1], [2, 3, 1, 0])),\n ((\"NCHW\", \"HWIO\", \"NHWC\"), ([0, 1, 2, 3], [2, 3, 1, 0])),\n ]))\n def testConvGeneralDilated(self, lhs_shape, rhs_shape, dtype, strides,\n padding, lhs_dilation, rhs_dilation,\n dimension_numbers, perms, rng):\n lhs_perm, rhs_perm = perms # permute to compatible shapes\n\n def args_maker():\n return [lax.transpose(rng(lhs_shape, dtype), lhs_perm),\n lax.transpose(rng(rhs_shape, dtype), rhs_perm)]\n\n def fun(lhs, rhs):\n return lax.conv_general_dilated(\n lhs, rhs, strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers)\n\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n # TODO(mattjj): test conv_general_dilated against numpy\n\n @staticmethod\n def _conv_transpose_via_grad(data, kernel, strides, padding,\n dimension_numbers=None):\n \"\"\"Helper method: calculates conv transpose via grad for testing.\"\"\"\n assert len(data.shape) == len(kernel.shape)\n nspatial = len(data.shape) - 2\n one = (1,) * nspatial\n dn = lax.conv_dimension_numbers(data.shape, kernel.shape,\n dimension_numbers)\n in_shape = onp.take(data.shape, dn.lhs_spec)\n in_sdims = in_shape[2:]\n k_shape = onp.take(kernel.shape, dn.rhs_spec)\n k_sdims = k_shape[2:]\n if padding == 'VALID':\n o_sdims = [in_sdims[i]*strides[i] + max(k_sdims[i]-strides[i],0)\n for i in range(nspatial)]\n elif padding == 'SAME':\n o_sdims = [in_sdims[i]*strides[i] for i in range(nspatial)]\n o_shape = [in_shape[0], k_shape[1]] + o_sdims\n out_spec_inv = [x[0] for x in\n sorted(enumerate(dn.out_spec), key=lambda x: x[1])]\n o_layout = onp.take(onp.array(o_shape), out_spec_inv)\n placeholder = onp.ones(o_layout, data.dtype)\n conv = lambda x: lax.conv_general_dilated(x, kernel, strides, padding,\n one, one, dn)\n _, g = api.vjp(conv, placeholder)\n return g(data)[0]\n\n @staticmethod\n def _transpose_conv_kernel(data, kernel, dimension_numbers):\n dn = lax.conv_dimension_numbers(data.shape, kernel.shape,\n dimension_numbers)\n spatial_axes = onp.array(dn.rhs_spec)[2:]\n for axis in spatial_axes:\n kernel = onp.flip(kernel, axis)\n kernel = onp.swapaxes(kernel, dn.rhs_spec[0], dn.rhs_spec[1])\n return kernel\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rng\": rng, 'dspec': dspec}\n for lhs_shape, rhs_shape in [\n ((b, 9, 10, i), (k, k, j, i)) # NB: i,j flipped in RHS for transpose\n for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]\n for dtype in [onp.float32]\n for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]\n for padding in [\"VALID\", \"SAME\"]\n for dspec in [('NHWC', 'HWIO', 'NHWC'),]\n for rng in [jtu.rand_small()]))\n def testConvTranspose2DT(self, lhs_shape, rhs_shape, dtype, strides,\n padding, dspec, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n # NB: this test calculates conv_transpose performing identically to the\n # lhs-grad of conv.\n def fun(lhs, rhs):\n return lax.conv_transpose(lhs, rhs, strides, padding,\n dimension_numbers=dspec,\n transpose_kernel=True)\n\n def fun_via_grad(lhs, rhs):\n return self._conv_transpose_via_grad(lhs, rhs, strides, padding,\n dimension_numbers=dspec)\n\n # NB: below just checks for agreement, we're not calling numpy.\n self._CheckAgainstNumpy(fun, fun_via_grad, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rng\": rng, 'dspec': dspec}\n for lhs_shape, rhs_shape in [\n ((b, 9, 10, i), (k, k, i, j))\n for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]\n for dtype in [onp.float32]\n for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]\n for padding in [\"VALID\", \"SAME\"]\n for dspec in [('NHWC', 'HWIO', 'NHWC'),]\n for rng in [jtu.rand_small()]))\n def testConvTranspose2D(self, lhs_shape, rhs_shape, dtype, strides,\n padding, dspec, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv_transpose(lhs, rhs, strides, padding,\n dimension_numbers=dspec,\n transpose_kernel=False)\n\n def fun_via_grad(lhs, rhs):\n rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)\n return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,\n dimension_numbers=dspec)\n\n # NB: below just checks for agreement, we're not calling numpy.\n self._CheckAgainstNumpy(fun, fun_via_grad, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rng\": rng, 'dspec': dspec}\n for lhs_shape, rhs_shape in [\n ((b, 10, i), (k, i, j))\n for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]\n for dtype in [onp.float32]\n for strides in [(1,), (2,), (3,)]\n for padding in [\"VALID\", \"SAME\"]\n for dspec in [('NHC', 'HIO', 'NHC'),]\n for rng in [jtu.rand_small()]))\n def testConvTranspose1D(self, lhs_shape, rhs_shape, dtype, strides,\n padding, dspec, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv_transpose(lhs, rhs, strides, padding,\n dimension_numbers=dspec,\n transpose_kernel=False)\n\n def fun_via_grad(lhs, rhs):\n rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)\n return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,\n dimension_numbers=dspec)\n\n # NB: below just checks for agreement, we're not calling numpy.\n self._CheckAgainstNumpy(fun, fun_via_grad, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype)),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"rng\": rng}\n for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]\n for dtype in float_dtypes\n for rng in [jtu.rand_default()]))\n def testDot(self, lhs_shape, rhs_shape, dtype, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n self._CompileAndCheck(lax.dot, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype)),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"rng\": rng}\n for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]\n for dtype in float_dtypes\n for rng in [jtu.rand_default()]))\n def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n self._CheckAgainstNumpy(lax.dot, lax_reference.dot, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_lhs_contracting={}_rhs_contracting={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n lhs_contracting, rhs_contracting),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"lhs_contracting\": lhs_contracting, \"rhs_contracting\": rhs_contracting,\n \"rng\": rng}\n for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [\n # these all fail with \"RuntimeError: Unimplemented: Dot with\n # non-standard contracting dimensions not implemented.\"\n # [(3, 5), (2, 5), [1], [1]],\n # [(5, 3), (5, 2), [0], [0]],\n # [(5, 3, 2), (5, 2, 4), [0], [0]],\n # [(5, 3, 2), (5, 2, 4), [0,2], [0,1]],\n # [(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]],\n [(3, 2), (2, 4), [1], [0]],\n ]\n for dtype in float_dtypes\n for rng in [jtu.rand_small()]))\n def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype,\n lhs_contracting, rhs_contracting, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))\n\n def fun(lhs, rhs):\n return lax.dot_general(lhs, rhs, dimension_numbers)\n\n self._CompileAndCheck(fun, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_dimension_numbers={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n dimension_numbers),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers, \"rng\": rng}\n for lhs_shape, rhs_shape, dimension_numbers in [\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),\n ]\n for dtype in float_dtypes\n for rng in [jtu.rand_small()]))\n def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype,\n dimension_numbers, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.dot_general(lhs, rhs, dimension_numbers)\n\n self._CompileAndCheck(fun, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_dimension_numbers={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n dimension_numbers),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers, \"rng\": rng}\n for lhs_shape, rhs_shape, dimension_numbers in [\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),\n ]\n for dtype in float_dtypes\n for rng in [jtu.rand_small()]))\n def testDotGeneralAgainstNumpy(self, lhs_shape, rhs_shape, dtype,\n dimension_numbers, rng):\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n op = lambda x, y: lax.dot_general(x, y, dimension_numbers)\n numpy_op = lambda x, y: lax_reference.dot_general(x, y, dimension_numbers)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_dtype={}_broadcast_sizes={}\".format(\n shape, onp.dtype(dtype).name, broadcast_sizes),\n \"shape\": shape, \"dtype\": dtype, \"broadcast_sizes\": broadcast_sizes,\n \"rng\": rng}\n for shape in [(), (2, 3)]\n for dtype in default_dtypes\n for broadcast_sizes in [(), (2,), (1, 2)]\n for rng in [jtu.rand_default()]))\n def testBroadcast(self, shape, dtype, broadcast_sizes, rng):\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.broadcast(x, broadcast_sizes)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_broadcast_sizes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), broadcast_sizes),\n \"shape\": shape, \"dtype\": dtype, \"broadcast_sizes\": broadcast_sizes,\n \"rng\": rng}\n for shape in [(), (2, 3)]\n for dtype in default_dtypes\n for broadcast_sizes in [(), (2,), (1, 2)]\n for rng in [jtu.rand_default()]))\n def testBroadcastAgainstNumpy(self, shape, dtype, broadcast_sizes, rng):\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.broadcast(x, broadcast_sizes)\n numpy_op = lambda x: lax_reference.broadcast(x, broadcast_sizes)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}_bcdims={}\".format(\n jtu.format_shape_dtype_string(inshape, dtype),\n outshape, broadcast_dimensions),\n \"inshape\": inshape, \"dtype\": dtype, \"outshape\": outshape,\n \"dimensions\": broadcast_dimensions, \"rng\": rng}\n for inshape, outshape, broadcast_dimensions in [\n ([2], [2, 2], [0]),\n ([2], [2, 2], [1]),\n ([2], [2, 3], [0]),\n ([], [2, 3], []),\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testBroadcastInDim(self, inshape, dtype, outshape, dimensions, rng):\n args_maker = lambda: [rng(inshape, dtype)]\n op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}_bcdims={}\".format(\n jtu.format_shape_dtype_string(inshape, dtype),\n outshape, broadcast_dimensions),\n \"inshape\": inshape, \"dtype\": dtype, \"outshape\": outshape,\n \"dimensions\": broadcast_dimensions, \"rng\": rng}\n for inshape, outshape, broadcast_dimensions in [\n ([2], [2, 2], [0]),\n ([2], [2, 2], [1]),\n ([2], [2, 3], [0]),\n ([], [2, 3], []),\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testBroadcastInDimAgainstNumpy(self, inshape, dtype, outshape,\n dimensions, rng):\n args_maker = lambda: [rng(inshape, dtype)]\n op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)\n numpy_op = lambda x: lax_reference.broadcast_in_dim(x, outshape, dimensions)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype)),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype,\n \"rng\": rng}\n for dtype in default_dtypes\n for arg_shape, out_shape in [\n [(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]\n ]\n for rng in [jtu.rand_default()]))\n def testReshape(self, arg_shape, out_shape, dtype, rng):\n args_maker = lambda: [rng(arg_shape, dtype)]\n op = lambda x: lax.reshape(x, out_shape)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype)),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype,\n \"rng\": rng}\n for dtype in default_dtypes\n for arg_shape, out_shape in [\n [(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]\n ]\n for rng in [jtu.rand_default()]))\n def testReshapeAgainstNumpy(self, arg_shape, out_shape, dtype, rng):\n args_maker = lambda: [rng(arg_shape, dtype)]\n op = lambda x: lax.reshape(x, out_shape)\n numpy_op = lambda x: lax_reference.reshape(x, out_shape)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_pads={}\"\n .format(jtu.format_shape_dtype_string(shape, dtype), pads),\n \"shape\": shape, \"dtype\": dtype, \"pads\": pads, \"rng\": jtu.rand_small()}\n for shape in [(2, 3)]\n for dtype in default_dtypes\n for pads in [[(1, 2, 1), (0, 1, 0)]]))\n def testPad(self, shape, dtype, pads, rng):\n args_maker = lambda: [rng(shape, dtype)]\n fun = lambda operand: lax.pad(operand, onp.array(0, dtype), pads)\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_pads={}\"\n .format(jtu.format_shape_dtype_string(shape, dtype), pads),\n \"shape\": shape, \"dtype\": dtype, \"pads\": pads, \"rng\": jtu.rand_small()}\n for shape in [(2, 3)]\n for dtype in default_dtypes\n for pads in [[(1, 2, 1), (0, 1, 0)]]))\n def testPadAgainstNumpy(self, shape, dtype, pads, rng):\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.pad(x, onp.array(0, dtype), pads)\n numpy_op = lambda x: lax_reference.pad(x, onp.array(0, dtype), pads)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n def testReverse(self):\n rev = api.jit(lambda operand: lax.rev(operand, dimensions))\n\n dimensions = [0]\n self.assertAllClose(onp.array([3, 2, 1]), rev(onp.array([1, 2, 3])),\n check_dtypes=False)\n\n dimensions = [0, 1]\n self.assertAllClose(onp.array([[6, 5, 4], [3, 2, 1]]),\n rev(onp.array([[1, 2, 3], [4, 5, 6]])),\n check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_predshape={}_argshapes={}\".format(\n jtu.format_shape_dtype_string(pred_shape, onp.bool_),\n jtu.format_shape_dtype_string(arg_shape, arg_dtype)),\n \"pred_shape\": pred_shape, \"arg_shape\": arg_shape, \"arg_dtype\": arg_dtype,\n \"rng\": rng}\n for arg_shape in [(), (3,), (2, 3)]\n for pred_shape in ([(), arg_shape] if arg_shape else [()])\n for arg_dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testSelect(self, pred_shape, arg_shape, arg_dtype, rng):\n\n def args_maker():\n return [rng(pred_shape, onp.bool_), rng(arg_shape, arg_dtype),\n rng(arg_shape, arg_dtype)]\n\n return self._CompileAndCheck(lax.select, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_predshape={}_argshapes={}\".format(\n jtu.format_shape_dtype_string(pred_shape, onp.bool_),\n jtu.format_shape_dtype_string(arg_shape, arg_dtype)),\n \"pred_shape\": pred_shape, \"arg_shape\": arg_shape, \"arg_dtype\": arg_dtype,\n \"rng\": rng}\n for arg_shape in [(), (3,), (2, 3)]\n for pred_shape in ([(), arg_shape] if arg_shape else [()])\n for arg_dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testSelectAgainstNumpy(self, pred_shape, arg_shape, arg_dtype, rng):\n\n def args_maker():\n return [rng(pred_shape, onp.bool_), rng(arg_shape, arg_dtype),\n rng(arg_shape, arg_dtype)]\n\n return self._CheckAgainstNumpy(lax.select, lax_reference.select, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}_start_indices={}_limit_indices={}_strides={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n start_indices, limit_indices, strides),\n \"shape\": shape, \"dtype\": dtype, \"starts\": start_indices,\n \"limits\": limit_indices, \"strides\": strides, \"rng\": rng}\n for shape, start_indices, limit_indices, strides in [\n [(3,), (1,), (2,), None],\n [(7,), (4,), (7,), None],\n [(5,), (1,), (5,), (2,)],\n [(8,), (1,), (6,), (2,)],\n [(5, 3), (1, 1), (3, 2), None],\n [(5, 3), (1, 1), (3, 1), None],\n [(7, 5, 3), (4, 0, 1), (7, 1, 3), None],\n [(5, 3), (1, 1), (2, 1), (1, 1)],\n [(5, 3), (1, 1), (5, 3), (2, 1)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testSlice(self, shape, dtype, starts, limits, strides, rng):\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.slice(x, starts, limits, strides)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}_start_indices={}_limit_indices={}_strides={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n start_indices, limit_indices, strides),\n \"shape\": shape, \"dtype\": dtype, \"starts\": start_indices,\n \"limits\": limit_indices, \"strides\": strides, \"rng\": rng}\n for shape, start_indices, limit_indices, strides in [\n [(3,), (1,), (2,), None],\n [(7,), (4,), (7,), None],\n [(5,), (1,), (5,), (2,)],\n [(8,), (1,), (6,), (2,)],\n [(5, 3), (1, 1), (3, 2), None],\n [(5, 3), (1, 1), (3, 1), None],\n [(7, 5, 3), (4, 0, 1), (7, 1, 3), None],\n [(5, 3), (1, 1), (2, 1), (1, 1)],\n [(5, 3), (1, 1), (5, 3), (2, 1)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testSliceAgainstNumpy(self, shape, dtype, starts, limits,\n strides, rng):\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.slice(x, starts, limits, strides)\n numpy_op = lambda x: lax_reference.slice(x, starts, limits, strides)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_start_indices={}_size_indices={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n start_indices, size_indices),\n \"shape\": shape, \"dtype\": dtype, \"start_indices\": start_indices,\n \"size_indices\": size_indices, \"rng\": rng}\n for shape, start_indices, size_indices in [\n [(3,), (1,), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(7, 5, 3), (4, 1, 0), (2, 0, 1)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testDynamicSlice(self, shape, dtype, start_indices, size_indices, rng):\n args_maker = lambda: [rng(shape, dtype), onp.array(start_indices)]\n op = lambda x, starts: lax.dynamic_slice(x, starts, size_indices)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_start_indices={}_size_indices={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n start_indices, size_indices),\n \"shape\": shape, \"dtype\": dtype, \"start_indices\": start_indices,\n \"size_indices\": size_indices, \"rng\": rng}\n for shape, start_indices, size_indices in [\n [(3,), (1,), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(7, 5, 3), (4, 1, 0), (2, 0, 1)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testDynamicSliceAgainstNumpy(self, shape, dtype, start_indices,\n size_indices, rng):\n args_maker = lambda: [rng(shape, dtype), onp.array(start_indices)]\n op = lambda x, s: lax.dynamic_slice(x, s, size_indices)\n numpy_op = lambda x, s: lax_reference.dynamic_slice(x, s, size_indices)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_start_indices={}_update_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n start_indices, update_shape),\n \"shape\": shape, \"dtype\": dtype, \"start_indices\": start_indices,\n \"update_shape\": update_shape, \"rng\": rng}\n for shape, start_indices, update_shape in [\n [(3,), (1,), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(7, 5, 3), (4, 1, 0), (2, 0, 1)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testDynamicUpdateSlice(self, shape, dtype, start_indices, update_shape,\n rng):\n\n def args_maker():\n return [rng(shape, dtype), rng(update_shape, dtype),\n onp.array(start_indices)]\n\n self._CompileAndCheck(lax.dynamic_update_slice, args_maker,\n check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_start_indices={}_update_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n start_indices, update_shape),\n \"shape\": shape, \"dtype\": dtype, \"start_indices\": start_indices,\n \"update_shape\": update_shape, \"rng\": rng}\n for shape, start_indices, update_shape in [\n [(3,), (1,), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(7, 5, 3), (4, 1, 0), (2, 0, 1)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testDynamicUpdateSliceAgainstNumpy(self, shape, dtype, start_indices,\n update_shape, rng):\n\n def args_maker():\n return [rng(shape, dtype), rng(update_shape, dtype),\n onp.array(start_indices)]\n\n self._CheckAgainstNumpy(lax.dynamic_update_slice,\n lax_reference.dynamic_update_slice, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_perm={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), perm),\n \"shape\": shape, \"dtype\": dtype, \"perm\": perm, \"rng\": rng}\n for shape, perm in [\n [(3, 4), (1, 0)],\n [(3, 4), (0, 1)],\n [(3, 4, 5), (2, 1, 0)],\n [(3, 4, 5), (1, 0, 2)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testTranspose(self, shape, dtype, perm, rng):\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.transpose(x, perm)\n self._CompileAndCheck(op, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_perm={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), perm),\n \"shape\": shape, \"dtype\": dtype, \"perm\": perm, \"rng\": rng}\n for shape, perm in [\n [(3, 4), (1, 0)],\n [(3, 4), (0, 1)],\n [(3, 4, 5), (2, 1, 0)],\n [(3, 4, 5), (1, 0, 2)],\n ]\n for dtype in default_dtypes\n for rng in [jtu.rand_default()]))\n def testTransposeAgainstNumpy(self, shape, dtype, perm, rng):\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.transpose(x, perm)\n numpy_op = lambda x: lax_reference.transpose(x, perm)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_inshape={}_reducedims={}\"\n .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims),\n \"op\": op, \"init_val\": init_val, \"shape\": shape, \"dtype\": dtype,\n \"dims\": dims, \"rng\": rng}\n for init_val, op, dtypes in [\n (0, lax.add, default_dtypes),\n (-onp.inf, lax.max, float_dtypes),\n (onp.iinfo(onp.int32).min, lax.max, [onp.int32]),\n (onp.iinfo(onp.int64).min, lax.max, [onp.int64]),\n (onp.iinfo(onp.uint32).min, lax.max, [onp.uint32]),\n (onp.iinfo(onp.uint64).min, lax.max, [onp.uint64]),\n (onp.inf, lax.min, float_dtypes),\n (onp.iinfo(onp.int32).max, lax.min, [onp.int32]),\n (onp.iinfo(onp.int64).max, lax.min, [onp.int64]),\n (onp.iinfo(onp.uint32).max, lax.min, [onp.uint32]),\n (onp.iinfo(onp.uint64).max, lax.min, [onp.uint64]),\n ]\n for dtype in dtypes\n for shape, dims in [\n [(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)],\n [(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)]\n ]\n for rng in [jtu.rand_small()]))\n def testReduce(self, op, init_val, shape, dtype, dims, rng):\n init_val = onp.asarray(init_val, dtype=dtype)\n fun = lambda operand, init_val: lax.reduce(operand, init_val, op, dims)\n args_maker = lambda: [rng(shape, dtype), init_val]\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n # we separately test the version that uses a concrete init_val because it\n # can hit different code paths\n fun = lambda operand: lax.reduce(operand, init_val, op, dims)\n args_maker = lambda: [rng(shape, dtype)]\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_dtype={}_padding={}\"\n .format(op.__name__, onp.dtype(dtype).name, padding),\n \"op\": op, \"init_val\": init_val, \"dtype\": dtype, \"padding\": padding,\n \"rng\": rng}\n for init_val, op, dtypes in [\n (0, lax.add, [onp.float32]),\n (-onp.inf, lax.max, [onp.float32]),\n (onp.inf, lax.min, [onp.float32]),\n ]\n for dtype in dtypes\n for padding in [\"VALID\", \"SAME\"]\n for rng in [jtu.rand_small()]))\n def testReduceWindow(self, op, init_val, dtype, padding, rng):\n init_val = onp.asarray(init_val, dtype=dtype)\n\n all_configs = itertools.chain(\n itertools.product(\n [(4, 6)],\n [(2, 1), (1, 2)],\n [(1, 1), (2, 1), (1, 2)]),\n itertools.product(\n [(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)],\n [(1, 2, 2, 1), (1, 1, 1, 1)]))\n\n def fun(operand, init_val):\n return lax.reduce_window(operand, init_val, op, dims, strides, padding)\n\n # pylint: disable=cell-var-from-loop\n for shape, dims, strides in all_configs:\n args_maker = lambda: [rng(shape, dtype), init_val]\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n # pylint: enable=cell-var-from-loop\n\n # we separately test the version that uses a concrete init_val because it\n # can hit different code paths\n def fun(operand):\n return lax.reduce_window(operand, init_val, op, dims, strides, padding)\n\n # pylint: disable=cell-var-from-loop\n for shape, dims, strides in all_configs:\n args_maker = lambda: [rng(shape, dtype)]\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n # pylint: enable=cell-var-from-loop\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for dtype in [onp.float32, onp.int32, onp.uint32]\n for shape in [(5,), (5, 7)]\n for axis in [-1, len(shape) - 1]\n for rng in [jtu.rand_default()]))\n def testSort(self, shape, dtype, axis, rng):\n args_maker = lambda: [rng(shape, dtype)]\n fun = lambda x: lax.sort(x, axis)\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for dtype in [onp.float32, onp.int32, onp.uint32]\n for shape in [(5,), (5, 7)]\n for axis in [-1, len(shape) - 1]\n for rng in [jtu.rand_default()]))\n def testSortAgainstNumpy(self, shape, dtype, axis, rng):\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.sort(x, axis)\n numpy_op = lambda x: lax_reference.sort(x, axis)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_keyshape={}_valshape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, key_dtype),\n jtu.format_shape_dtype_string(shape, val_dtype),\n axis),\n \"rng\": rng, \"shape\": shape,\n \"key_dtype\": key_dtype, \"val_dtype\": val_dtype, \"axis\": axis}\n for key_dtype in [onp.float32, onp.int32, onp.uint32]\n for val_dtype in [onp.float32, onp.int32, onp.uint32]\n for shape in [(3,), (5, 3)]\n for axis in [-1, len(shape) - 1]\n for rng in [jtu.rand_default()]))\n def testSortKeyVal(self, shape, key_dtype, val_dtype, axis, rng):\n # This test relies on the property that wherever keys are tied, values are\n # too, since we don't guarantee the same ordering of values with equal keys.\n # To avoid that case, we generate unique keys (globally in the key array).\n perm_rng = onp.random.RandomState(0)\n def args_maker():\n flat_keys = onp.arange(onp.prod(shape, dtype=int), dtype=key_dtype)\n keys = perm_rng.permutation(flat_keys).reshape(shape)\n values = rng(shape, val_dtype)\n return keys, values\n\n fun = lambda keys, values: lax.sort_key_val(keys, values, axis)\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_keyshape={}_valshape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, key_dtype),\n jtu.format_shape_dtype_string(shape, val_dtype),\n axis),\n \"rng\": rng, \"shape\": shape,\n \"key_dtype\": key_dtype, \"val_dtype\": val_dtype, \"axis\": axis}\n for key_dtype in [onp.float32, onp.int32, onp.uint32]\n for val_dtype in [onp.float32, onp.int32, onp.uint32]\n for shape in [(3,), (5, 3)]\n for axis in [-1, len(shape) - 1]\n for rng in [jtu.rand_default()]))\n def testSortKeyValAgainstNumpy(self, shape, key_dtype, val_dtype, axis, rng):\n # This test relies on the property that wherever keys are tied, values are\n # too, since we don't guarantee the same ordering of values with equal keys.\n # To avoid that case, we generate unique keys (globally in the key array).\n perm_rng = onp.random.RandomState(0)\n def args_maker():\n flat_keys = onp.arange(onp.prod(shape, dtype=int), dtype=key_dtype)\n keys = perm_rng.permutation(flat_keys).reshape(shape)\n values = rng(shape, val_dtype)\n return keys, values\n\n op = lambda ks, vs: lax.sort_key_val(ks, vs, axis)\n numpy_op = lambda ks, vs: lax_reference.sort_key_val(ks, vs, axis)\n self._CheckAgainstNumpy(op, numpy_op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype)),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"rng\": rng}\n for lhs_shape, rhs_shape in [((3, 2), (2, 4)),\n ((5, 3, 2), (5, 2, 4)),\n ((1, 2, 2, 3), (1, 2, 3, 1))]\n for dtype in float_dtypes\n for rng in [jtu.rand_small()]))\n def testBatchMatMul(self, lhs_shape, rhs_shape, dtype, rng):\n arg_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n self._CompileAndCheck(lax.batch_matmul, arg_maker, check_dtypes=True)\n\n def testCollapse(self):\n\n @api.jit\n def collapse_first_two(x):\n return lax.collapse(x, 0, 2)\n\n self.assertEqual((6,), collapse_first_two(onp.zeros((2, 3))).shape)\n self.assertEqual((6, 4), collapse_first_two(onp.zeros((2, 3, 4))).shape)\n self.assertEqual((2, 3, 4),\n collapse_first_two(onp.zeros((1, 2, 3, 4))).shape)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), idxs, axes),\n \"shape\": shape, \"dtype\": dtype, \"idxs\": idxs, \"axes\": axes, \"rng\": rng}\n for dtype in all_dtypes\n for shape, idxs, axes in [\n [(3, 4, 5), (onp.array([0, 2, 1]),), (0,)],\n [(3, 4, 5), (onp.array([-1, -2]),), (0,)],\n [(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 1)],\n [(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 2)],\n ]\n for rng in [jtu.rand_default()]))\n def testIndexTake(self, shape, dtype, idxs, axes, rng):\n rand_idxs = lambda: tuple(rng(e.shape, e.dtype) for e in idxs)\n args_maker = lambda: [rng(shape, dtype), rand_idxs()]\n fun = lambda src, idxs: lax.index_take(src, idxs, axes)\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_dnums={}_slice_sizes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), idxs, dnums,\n slice_sizes),\n \"shape\": shape, \"dtype\": dtype, \"idxs\": idxs, \"dnums\": dnums,\n \"slice_sizes\": slice_sizes, \"rng\": rng, \"rng_idx\": rng_idx}\n for dtype in all_dtypes\n for shape, idxs, dnums, slice_sizes in [\n ((5,), onp.array([[0], [2]]), lax.GatherDimensionNumbers(\n offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),\n (1,)),\n ((10,), onp.array([[0], [0], [0]]), lax.GatherDimensionNumbers(\n offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),\n (2,)),\n ((10, 5,), onp.array([[0], [2], [1]]), lax.GatherDimensionNumbers(\n offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),\n (1, 3)),\n ((10, 5), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(\n offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),\n (1, 3)),\n ]\n for rng_idx in [jtu.rand_int(max(shape))]\n for rng in [jtu.rand_default()]))\n def testGather(self, shape, dtype, idxs, dnums, slice_sizes, rng, rng_idx):\n rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\n args_maker = lambda: [rng(shape, dtype), rand_idxs()]\n fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_update={}_dnums={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n idxs, update_shape, dnums),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"idxs\": idxs,\n \"update_shape\": update_shape, \"dnums\": dnums, \"rng\": rng,\n \"rng_idx\": rng_idx}\n for dtype in float_dtypes\n for arg_shape, idxs, update_shape, dnums in [\n ((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(\n update_window_dims=(), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(),\n scatter_dims_to_operand_dims=(0,))),\n ((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ]\n for rng_idx in [jtu.rand_int(max(arg_shape))]\n for rng in [jtu.rand_default()]))\n def testScatterAdd(self, arg_shape, dtype, idxs, update_shape, dnums, rng,\n rng_idx):\n rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\n args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),\n rng(update_shape, dtype)]\n fun = partial(lax.scatter_add, dimension_numbers=dnums)\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_update={}_dnums={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n idxs, update_shape, dnums),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"idxs\": idxs,\n \"update_shape\": update_shape, \"dnums\": dnums, \"rng\": rng,\n \"rng_idx\": rng_idx}\n for dtype in float_dtypes\n for arg_shape, idxs, update_shape, dnums in [\n ((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(\n update_window_dims=(), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(),\n scatter_dims_to_operand_dims=(0,))),\n ((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ]\n for rng_idx in [jtu.rand_int(max(arg_shape))]\n for rng in [jtu.rand_default()]))\n def testScatter(self, arg_shape, dtype, idxs, update_shape, dnums, rng,\n rng_idx):\n rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\n args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),\n rng(update_shape, dtype)]\n fun = partial(lax.scatter, dimension_numbers=dnums)\n self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n\n\nclass DeviceConstantTest(jtu.JaxTestCase):\n def _CheckDeviceConstant(self, make_const, expected):\n # check casting to ndarray works\n asarray_result = onp.asarray(make_const())\n\n # check passing as an argument works (should hit constant handler)\n zero = onp.array(0, expected.dtype)\n argument_result = lax.add(zero, make_const())\n\n # check looping into a compiled computation works\n jit_result = api.jit(lambda x: lax.add(x, make_const()))(zero)\n\n # ensure they're all the same\n self.assertAllClose(asarray_result, expected, check_dtypes=True)\n self.assertAllClose(argument_result, expected, check_dtypes=True)\n self.assertAllClose(jit_result, expected, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_fill={}\".format(\n jtu.format_shape_dtype_string(shape, dtype) if dtype else shape,\n fill_value),\n \"shape\": shape, \"dtype\": dtype, \"fill_value\": fill_value}\n for dtype in itertools.chain(default_dtypes, [None])\n for shape in [(), (3,), (2, 3), (2, 3, 4)]\n for fill_value in [0, 1, onp.pi]))\n def testFilledConstant(self, shape, fill_value, dtype):\n make_const = lambda: lax.full(shape, fill_value, dtype)\n expected = onp.full(shape, fill_value, dtype)\n self._CheckDeviceConstant(make_const, expected)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_dim={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), dimension),\n \"shape\": shape, \"dtype\": dtype, \"dimension\": dimension}\n for dtype in default_dtypes\n for shape in [(), (3,), (2, 3), (2, 3, 4)]\n for dimension in range(len(shape))))\n def testIotaConstant(self, dtype, shape, dimension):\n make_const = lambda: lax.broadcasted_iota(dtype, shape, dimension)\n\n arr = onp.arange(shape[dimension], dtype=xla_bridge.canonicalize_dtype(dtype))\n singleton_shape = [1] * len(shape)\n singleton_shape[dimension] = shape[dimension]\n expected = onp.broadcast_to(arr.reshape(singleton_shape), shape)\n\n self._CheckDeviceConstant(make_const, expected)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axes),\n \"shape\": shape, \"dtype\": dtype, \"axes\": axes}\n for dtype in default_dtypes\n for shape, axes in [\n [(2, 3), (0, 1)],\n [(2, 3, 4), (0, 1)],\n [(2, 3, 4), (0, 2)],\n [(2, 3, 4), (1, 2)],\n [(2, 3, 4), (0, 1, 2)],\n [(2, 3, 4, 2), (0, 1, 2)],\n [(2, 3, 4, 2), (0, 2, 3)],\n ]))\n def testEyeConstant(self, dtype, shape, axes):\n make_const = lambda: lax.broadcasted_eye(dtype, shape, axes)\n\n # don't check the asarray case, just assume it's right\n expected = onp.asarray(make_const())\n\n self._CheckDeviceConstant(make_const, expected)\n\n\nGradTestSpec = collections.namedtuple(\n \"GradTestSpec\", [\"op\", \"nargs\", \"order\", \"rng\", \"dtypes\"])\n\nLAX_GRAD_OPS = [\n GradTestSpec(lax.neg, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.floor, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64]),\n GradTestSpec(lax.ceil, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64]),\n GradTestSpec(lax.round, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64]),\n # GradTestSpec(lax.rem, nargs=2, order=2, rng=jtu.rand_default(),\n # dtypes=[onp.float64]), # TODO(mattjj): enable\n\n GradTestSpec(lax.exp, nargs=1, order=2, rng=jtu.rand_small(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.expm1, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.log, nargs=1, order=2, rng=jtu.rand_positive(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.log1p, nargs=1, order=2, rng=jtu.rand_positive(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.tanh, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.sin, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.cos, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64, onp.complex64]),\n # TODO(proteneer): atan2 input is already a representation of a\n # complex number. Need to think harder about what this even means\n # if each input itself is a complex number.\n GradTestSpec(lax.atan2, nargs=2, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64]),\n\n GradTestSpec(lax.erf, nargs=1, order=2, rng=jtu.rand_small(),\n dtypes=[onp.float64]),\n GradTestSpec(lax.erfc, nargs=1, order=2, rng=jtu.rand_small(),\n dtypes=[onp.float64]),\n GradTestSpec(lax.erf_inv, nargs=1, order=2, rng=jtu.rand_small(),\n dtypes=[onp.float64]),\n # GradTestSpec(lax.lgamma, nargs=1, order=2, rng=jtu.rand_small(),\n # dtypes=[onp.float64]), # TODO(mattjj): enable\n\n GradTestSpec(lax.real, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.complex64]),\n GradTestSpec(lax.imag, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.complex64]),\n # GradTestSpec(lax.complex, nargs=2, order=2, rng=jtu.rand_default(),\n # dtypes=[onp.float32]), # TODO(mattjj): enable\n GradTestSpec(lax.conj, nargs=1, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float32, onp.complex64]),\n GradTestSpec(lax.abs, nargs=1, order=2, rng=jtu.rand_positive(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.pow, nargs=2, order=2, rng=jtu.rand_positive(),\n dtypes=[onp.float64, onp.complex64]),\n\n GradTestSpec(lax.add, nargs=2, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.sub, nargs=2, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.mul, nargs=2, order=2, rng=jtu.rand_default(),\n dtypes=[onp.float64, onp.complex64]),\n GradTestSpec(lax.div, nargs=2, order=1, rng=jtu.rand_not_small(),\n dtypes=[onp.float64, onp.complex64]),\n\n GradTestSpec(lax.max, nargs=2, order=2, rng=jtu.rand_some_equal(),\n dtypes=[onp.float64]),\n GradTestSpec(lax.min, nargs=2, order=2, rng=jtu.rand_some_equal(),\n dtypes=[onp.float64]),\n]\n\n\ndef check_grads_bilinear(f, args, order, atol=None, rtol=None):\n # Can use large eps to make up for numerical inaccuracies since the op is\n # bilinear (relying on the fact that we only check one arg at a time)\n lhs, rhs = args\n check_grads(lambda lhs: f(lhs, rhs), (lhs,), order, atol, rtol, eps=1.)\n check_grads(lambda rhs: f(lhs, rhs), (rhs,), order, atol, rtol, eps=1.)\n\n\nclass LaxAutodiffTest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n rec.op.__name__, shapes, itertools.repeat(dtype)),\n \"op\": rec.op, \"rng\": rec.rng, \"shapes\": shapes, \"dtype\": dtype,\n \"order\": rec.order}\n for shape_group in compatible_shapes\n for shapes in CombosWithReplacement(shape_group, rec.nargs)\n for dtype in rec.dtypes)\n for rec in LAX_GRAD_OPS))\n def testOpGrad(self, op, rng, shapes, dtype, order):\n if FLAGS.jax_test_dut and FLAGS.jax_test_dut.startswith(\"tpu\"):\n if op is lax.pow:\n raise SkipTest(\"pow grad imprecise on tpu\")\n tol = 1e-1 if num_float_bits(dtype) == 32 else None\n args = tuple(rng(shape, dtype) for shape in shapes)\n check_grads(op, args, order, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}\".format(\n jtu.dtype_str(from_dtype), jtu.dtype_str(to_dtype)),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype, \"rng\": rng}\n for from_dtype, to_dtype in itertools.product(\n float_dtypes + complex_dtypes, repeat=2)\n for rng in [jtu.rand_default()]))\n def testConvertElementTypeGrad(self, from_dtype, to_dtype, rng):\n args = (rng((2, 3), from_dtype),)\n convert_element_type = lambda x: lax.convert_element_type(x, to_dtype)\n check_grads(convert_element_type, args, 1, 1e-3, 1e-3, 1e-3)\n check_grads(convert_element_type, args, 2, 1e-3, 1e-3, 1e-3)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_min_shape={}_operand_shape={}_max_shape={}\".format(\n jtu.format_shape_dtype_string(min_shape, dtype),\n jtu.format_shape_dtype_string(operand_shape, dtype),\n jtu.format_shape_dtype_string(max_shape, dtype)),\n \"min_shape\": min_shape, \"operand_shape\": operand_shape,\n \"max_shape\": max_shape, \"dtype\": dtype, \"rng\": rng}\n for min_shape, operand_shape, max_shape in [\n [(), (), ()],\n [(), (2, 3), ()],\n [(2, 3), (2, 3), (2, 3)],\n ]\n for dtype in float_dtypes\n for rng in [jtu.rand_default()]))\n def testClampGrad(self, min_shape, operand_shape, max_shape, dtype, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n shapes = [min_shape, operand_shape, max_shape]\n min, operand, max = (rng(shape, dtype) for shape in shapes)\n min, max = onp.minimum(min, max), onp.maximum(min, max) # broadcast\n check_grads(lax.clamp, (min, operand, max), 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dim={}_baseshape=[{}]_dtype={}_narrs={}\".format(\n dim, \",\".join(str(d) for d in base_shape), onp.dtype(dtype).name,\n num_arrs),\n \"dim\": dim, \"base_shape\": base_shape, \"dtype\": dtype,\n \"num_arrs\": num_arrs, \"rng\": rng}\n for num_arrs in [3]\n for dtype in float_dtypes\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for dim in range(len(base_shape))\n for rng in [jtu.rand_default()]))\n def testConcatenateGrad(self, dim, base_shape, dtype, num_arrs, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]\n operands = tuple(rng(shape, dtype) for shape in shapes)\n concatenate = lambda *args: lax.concatenate(args, dim)\n check_grads(concatenate, operands, 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n strides, padding),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rng\": rng,}\n for lhs_shape, rhs_shape, all_strides in itertools.chain(\n [((b, i, 3, 4), (j, i, 1, 2), [(1, 1), (1, 2), (2, 1)])\n for b, i, j in itertools.product([2, 3], repeat=3)],\n [((4, 2, 1), (3, 2, 1), [(1,)])])\n for strides in all_strides\n for dtype in [onp.float32]\n for padding in [\"VALID\", \"SAME\"]\n for rng in [jtu.rand_small()]))\n @jtu.skip_on_devices(\"tpu\")\n def testConvGrad(self, lhs_shape, rhs_shape, dtype, strides, padding, rng):\n lhs = rng(lhs_shape, dtype)\n rhs = rng(rhs_shape, dtype)\n conv = partial(lax.conv, window_strides=strides, padding=padding)\n check_grads_bilinear(conv, (lhs, rhs), order=2, atol=1e-2, rtol=1e-2)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_lhs_dilation={}_\"\n \"rhs_dilation={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n strides, padding, lhs_dil, rhs_dil),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"lhs_dil\": lhs_dil,\n \"rhs_dil\": rhs_dil, \"rng\": rng}\n for lhs_shape, rhs_shape, all_strides, all_pads, lhs_dils, rhs_dils in\n itertools.chain(\n [((b, i, 3, 4), (j, i, 1, 2), [(1, 1), (1, 2), (2, 1)],\n [((0, 0), (0, 0)), ((-1, 0), (0, -1)), ((1, 0), (0, 1))],\n [(1, 1), (2, 1)], [(1, 1)])\n for b, i, j in itertools.product([2, 3], repeat=3)],\n [((4, 2, 1), (3, 2, 1), [(1,)], [((1, 1),), ((0, 0),)],\n [(1,), (2,)], [(1,), (2,)])])\n for strides in all_strides\n for rhs_dil in rhs_dils\n for lhs_dil in lhs_dils\n for dtype in [onp.float32]\n for padding in all_pads\n for rng in [jtu.rand_small()]))\n @jtu.skip_on_devices(\"tpu\")\n def testConvWithGeneralPaddingGrad(self, lhs_shape, rhs_shape, dtype, strides,\n padding, lhs_dil, rhs_dil, rng):\n lhs = rng(lhs_shape, dtype)\n rhs = rng(rhs_shape, dtype)\n conv = partial(lax.conv_with_general_padding, window_strides=strides,\n padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil)\n check_grads_bilinear(conv, (lhs, rhs), order=2, atol=1e-2, rtol=1e-2)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_lhs_dilation={}_\"\n \"rhs_dilation={}_dims={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n strides, padding, lhs_dil, rhs_dil, \",\".join(dim_nums)),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"lhs_dil\": lhs_dil,\n \"rhs_dil\": rhs_dil, \"rng\": rng, \"dimension_numbers\": dim_nums,\n \"perms\": perms}\n for lhs_shape, rhs_shape, all_strides, all_pads, lhs_dils, rhs_dils in [\n ((b, i, 6, 7), # lhs_shape\n (j, i, 1, 2), # rhs_shape\n [(1, 1), (1, 2), (2, 1)], # strides\n [((0, 0), (0, 0)), ((1, 0), (0, 1)), ((0, -1), (0, 0))], # pads\n [(1, 1), (2, 1)], # lhs_dils\n [(1, 1), (2, 2)]) # rhs_dils\n for b, i, j in itertools.product([1, 2], repeat=3)]\n for strides in all_strides\n for rhs_dil in rhs_dils\n for lhs_dil in lhs_dils\n for dtype in [onp.float32]\n for padding in all_pads\n for rng in [jtu.rand_default()]\n for dim_nums, perms in [\n ((\"NCHW\", \"OIHW\", \"NCHW\"), ([0, 1, 2, 3], [0, 1, 2, 3])),\n ((\"NHWC\", \"HWIO\", \"NHWC\"), ([0, 2, 3, 1], [2, 3, 1, 0])),\n ((\"NHWC\", \"OIHW\", \"NCHW\"), ([0, 2, 3, 1], [0, 1, 2, 3]))\n ]))\n @jtu.skip_on_devices(\"tpu\")\n def testConvGeneralDilatedGrad(self, lhs_shape, rhs_shape, dtype, strides,\n padding, lhs_dil, rhs_dil, dimension_numbers,\n perms, rng):\n tol = 1e-1 if onp.finfo(dtype).bits == 32 else 1e-3\n lhs_perm, rhs_perm = perms # permute to compatible shapes\n lhs = onp.transpose(rng(lhs_shape, dtype), lhs_perm)\n rhs = onp.transpose(rng(rhs_shape, dtype), rhs_perm)\n conv = partial(lax.conv_general_dilated, window_strides=strides,\n padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil,\n dimension_numbers=dimension_numbers)\n check_grads_bilinear(conv, (lhs, rhs), order=2, atol=tol, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype)),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"rng\": jtu.rand_default()}\n for lhs_shape in [(2,), (3, 2)] for rhs_shape in [(2,), (2, 4)]\n for dtype in float_dtypes))\n @jtu.skip_on_flag(\"jax_xla_backend\", \"xrt\")\n @jtu.skip_on_devices(\"tpu\")\n def testDotGrad(self, lhs_shape, rhs_shape, dtype, rng):\n tol = 1e-1 if num_float_bits(dtype) == 32 else 1e-3\n lhs = rng(lhs_shape, dtype)\n rhs = rng(rhs_shape, dtype)\n check_grads_bilinear(lax.dot, (lhs, rhs), order=2, atol=tol, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_dimension_numbers={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n dimension_numbers),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers, \"rng\": jtu.rand_small()}\n for lhs_shape, rhs_shape, dimension_numbers in [\n ((3, 2), (2, 4), (([1], [0]), ([], []))),\n ((3, 5), (2, 5), (([1], [1]), ([], []))),\n ((5, 3), (5, 2), (([0], [0]), ([], []))),\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),\n ]\n for dtype in float_dtypes))\n @jtu.skip_on_devices(\"tpu\")\n def testDotGeneralContractAndBatchGrads(self, lhs_shape, rhs_shape, dtype,\n dimension_numbers, rng):\n tol = 1e-1 if onp.finfo(dtype).bits == 32 else 1e-2\n lhs = rng(lhs_shape, dtype)\n rhs = rng(rhs_shape, dtype)\n dot_general = partial(lax.dot_general, dimension_numbers=dimension_numbers)\n check_grads_bilinear(dot_general, (lhs, rhs), order=2, atol=tol, rtol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_dtype={}_broadcast_sizes={}\".format(\n shape, onp.dtype(dtype).name, broadcast_sizes),\n \"shape\": shape, \"dtype\": dtype, \"broadcast_sizes\": broadcast_sizes,\n \"rng\": rng}\n for shape in [(), (2, 3)]\n for dtype in float_dtypes\n for broadcast_sizes in [(), (2,), (1, 2)]\n for rng in [jtu.rand_default()]))\n def testBroadcastGrad(self, shape, dtype, broadcast_sizes, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n args = (rng(shape, dtype),)\n broadcast = lambda x: lax.broadcast(x, broadcast_sizes)\n check_grads(broadcast, args, 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}_bcdims={}\".format(\n jtu.format_shape_dtype_string(inshape, dtype),\n outshape, broadcast_dimensions),\n \"inshape\": inshape, \"dtype\": dtype, \"outshape\": outshape,\n \"dimensions\": broadcast_dimensions, \"rng\": rng}\n for inshape, outshape, broadcast_dimensions in [\n ([2], [2, 2], [0]),\n ([2], [2, 2], [1]),\n ([2], [2, 3], [0]),\n ([], [2, 3], []),\n ]\n for dtype in float_dtypes\n for rng in [jtu.rand_default()]))\n def testBroadcastInDimGrad(self, inshape, dtype, outshape, dimensions, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n operand = rng(inshape, dtype)\n broadcast_in_dim = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)\n check_grads(broadcast_in_dim, (operand,), 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype)),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype,\n \"rng\": rng}\n for dtype in float_dtypes\n for arg_shape, out_shape in [\n [(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]\n ]\n for rng in [jtu.rand_default()]))\n def testReshapeGrad(self, arg_shape, out_shape, dtype, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n operand = rng(arg_shape, dtype)\n reshape = lambda x: lax.reshape(x, out_shape)\n check_grads(reshape, (operand,), 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_pads={}\"\n .format(jtu.format_shape_dtype_string(shape, dtype), pads),\n \"shape\": shape, \"dtype\": dtype, \"pads\": pads, \"rng\": jtu.rand_small()}\n for shape in [(2, 3)]\n for dtype in float_dtypes\n for pads in [[(1, 2, 1), (0, 1, 0)], [(-1, 0, 0), (-1, 0, 2)]]))\n def testPadGrad(self, shape, dtype, pads, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n\n operand = rng(shape, dtype)\n pad = lambda operand: lax.pad(operand, onp.array(0, dtype), pads)\n check_grads(pad, (operand,), 2, tol, tol, tol)\n\n operand = rng(shape, dtype)\n padding_value = onp.array(0., dtype)\n pad = lambda operand, padding_value: lax.pad(operand, padding_value, pads)\n check_grads(pad, (operand, padding_value), 2, tol, tol, tol)\n\n def testReverseGrad(self):\n rev = lambda operand: lax.rev(operand, dimensions)\n\n dimensions = [0]\n check_grads(rev, (onp.array([3., 2., 1.]),), 2)\n\n dimensions = [0, 1]\n check_grads(rev, (onp.array([[6., 5., 4.], [3., 2., 1.]]),), 2)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_predshape={}_argshapes={}\".format(\n jtu.format_shape_dtype_string(pred_shape, onp.bool_),\n jtu.format_shape_dtype_string(arg_shape, dtype)),\n \"pred_shape\": pred_shape, \"arg_shape\": arg_shape, \"dtype\": dtype,\n \"rng\": rng}\n for arg_shape in [(), (3,), (2, 3)]\n for pred_shape in ([(), arg_shape] if arg_shape else [()])\n for dtype in float_dtypes\n for rng in [jtu.rand_default()]))\n def testSelectGrad(self, pred_shape, arg_shape, dtype, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n pred = rng(pred_shape, onp.bool_)\n on_true = rng(arg_shape, dtype)\n on_false = rng(arg_shape, dtype)\n select = lambda on_true, on_false: lax.select(pred, on_true, on_false)\n check_grads(select, (on_true, on_false), 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}_start_indices={}_limit_indices={}_strides={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n start_indices, limit_indices, strides),\n \"shape\": shape, \"dtype\": dtype, \"starts\": start_indices,\n \"limits\": limit_indices, \"strides\": strides, \"rng\": rng}\n for shape, start_indices, limit_indices, strides in [\n [(3,), (1,), (2,), None],\n [(7,), (4,), (7,), None],\n [(5,), (1,), (5,), (2,)],\n [(8,), (1,), (6,), (2,)],\n [(5, 3), (1, 1), (3, 2), None],\n [(5, 3), (1, 1), (3, 1), None],\n [(7, 5, 3), (4, 0, 1), (7, 1, 3), None],\n [(5, 3), (1, 1), (2, 1), (1, 1)],\n [(5, 3), (1, 1), (5, 3), (2, 1)],\n ]\n for dtype in float_dtypes\n for rng in [jtu.rand_default()]))\n def testSliceGrad(self, shape, dtype, starts, limits, strides, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n operand = rng(shape, dtype)\n slice = lambda x: lax.slice(x, starts, limits, strides)\n check_grads(slice, (operand,), 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_start_indices={}_size_indices={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n start_indices, size_indices),\n \"shape\": shape, \"dtype\": dtype, \"start_indices\": start_indices,\n \"size_indices\": size_indices, \"rng\": rng}\n for shape, start_indices, size_indices in [\n [(3,), (1,), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(7, 5, 3), (4, 1, 0), (2, 0, 1)],\n ]\n for dtype in float_dtypes\n for rng in [jtu.rand_default()]))\n def testDynamicSliceGrad(self, shape, dtype, start_indices, size_indices,\n rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n operand = rng(shape, dtype)\n dynamic_slice = lambda x: lax.dynamic_slice(x, start_indices, size_indices)\n check_grads(dynamic_slice, (operand,), 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_start_indices={}_update_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n start_indices, update_shape),\n \"shape\": shape, \"dtype\": dtype, \"start_indices\": start_indices,\n \"update_shape\": update_shape, \"rng\": rng}\n for shape, start_indices, update_shape in [\n [(3,), (1,), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(7, 5, 3), (4, 1, 0), (2, 0, 1)],\n ]\n for dtype in float_dtypes\n for rng in [jtu.rand_default()]))\n def testDynamicUpdateSliceGrad(self, shape, dtype, start_indices,\n update_shape, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n operand = rng(shape, dtype)\n update = rng(update_shape, dtype)\n start_indices = onp.array(start_indices)\n\n dus = lambda x, y: lax.dynamic_update_slice(x, y, start_indices)\n check_grads(dus, (operand, update), 2, tol, tol, tol)\n\n dus = lambda x: lax.dynamic_update_slice(x, update, start_indices)\n check_grads(dus, (operand,), 2, tol, tol, tol)\n\n dus = lambda y: lax.dynamic_update_slice(operand, y, start_indices)\n check_grads(dus, (update,), 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_perm={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), perm),\n \"shape\": shape, \"dtype\": dtype, \"perm\": perm, \"rng\": rng}\n for shape, perm in [\n [(3, 4), (1, 0)],\n [(3, 4), (0, 1)],\n [(3, 4, 5), (2, 1, 0)],\n [(3, 4, 5), (1, 0, 2)],\n ]\n for dtype in float_dtypes\n for rng in [jtu.rand_default()]))\n def testTransposeGrad(self, shape, dtype, perm, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n operand = rng(shape, dtype)\n transpose = lambda x: lax.transpose(x, perm)\n check_grads(transpose, (operand,), 2, tol, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_inshape={}_reducedims={}\"\n .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims),\n \"op\": op, \"init_val\": init_val, \"shape\": shape, \"dtype\": dtype,\n \"dims\": dims, \"rng\": rng}\n for init_val, op, dtypes in [\n (0, lax.add, inexact_dtypes),\n (-onp.inf, lax.max, inexact_dtypes),\n (onp.inf, lax.min, inexact_dtypes),\n ]\n for dtype in dtypes\n for shape, dims in [\n [(3, 4, 5), (0,)],\n [(3, 4, 5), (1, 2)],\n [(3, 4, 5), (0, 2)],\n [(3, 4, 5), (0, 1, 2)]\n ]\n for rng in [jtu.rand_small()]))\n def testReduceGrad(self, op, init_val, shape, dtype, dims, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n operand = rng(shape, dtype)\n init_val = onp.asarray(init_val, dtype=dtype)\n reduce = lambda operand: lax.reduce(operand, init_val, op, dims)\n check_grads(reduce, (operand,), 1, tol, tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_dtype={}_padding={}\"\n .format(op.__name__, onp.dtype(dtype).name, padding),\n \"op\": op, \"init_val\": init_val, \"dtype\": dtype, \"padding\": padding,\n \"rng\": rng}\n for init_val, op, dtypes, rng in [\n (0, lax.add, [onp.float32], jtu.rand_small()),\n (-onp.inf, lax.max, [onp.float32], jtu.rand_default()),\n (onp.inf, lax.min, [onp.float32], jtu.rand_default()),\n ]\n for dtype in dtypes\n for padding in [\"VALID\", \"SAME\"]\n for rng in [jtu.rand_default()]))\n def testReduceWindowGrad(self, op, init_val, dtype, padding, rng):\n init_val = onp.asarray(init_val, dtype=dtype)\n\n # We need this conditional and the corresponding loop logic to be in the\n # test method, rather than at the parameterized test level, because it\n # depends on FLAGS for the device under test.\n # TODO(b/31565929): enable when fixed.\n if FLAGS.jax_test_dut == \"tpu\" and op is not lax.add:\n all_configs = [((6, 5, 4, 3), (2, 2, 1, 1), (1, 2, 1, 1))]\n else:\n all_configs = itertools.chain(\n itertools.product(\n [(4, 6)], # shapes\n [(2, 1), (1, 2)], # window_dimensions\n [(1, 1), (2, 1), (1, 2)] # strides\n ),\n itertools.product(\n [(3, 2, 4, 6)], # shapes\n [(1, 1, 2, 1), (2, 1, 2, 1)], # window_dimensions\n [(1, 2, 2, 1), (1, 1, 1, 1)]), # strides\n )\n\n def fun(operand):\n return lax.reduce_window(operand, init_val, op, dims, strides, padding)\n\n # pylint: disable=cell-var-from-loop\n for shape, dims, strides in all_configs:\n operand = rng(shape, dtype)\n if op is not lax.add:\n # this test can fail if there are duplicates in operand\n self.assertEqual(onp.unique(operand).size, operand.size,\n msg=\"test requires operand elements to be unique.\")\n jtu.check_vjp(fun, partial(api.vjp, fun), (operand,), 1e-2, 1e-2, 1e-2)\n check_grads(fun, (operand,), 3, 1e-2, 1e-2, 1e-2)\n # pylint: enable=cell-var-from-loop\n\n # TODO(b/205052657): enable more tests when supported\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis),\n \"rng\": rng, \"shape\": shape, \"dtype\": dtype, \"axis\": axis}\n for dtype in [onp.float32]\n for shape in [(5,), (5, 7)]\n for axis in [len(shape) - 1]\n for rng in [jtu.rand_default()]))\n def testSortGrad(self, shape, dtype, axis, rng):\n tol = 1e-2 if onp.finfo(dtype).bits == 32 else None\n operand = rng(shape, dtype)\n sort = lambda x: lax.sort(x, axis)\n check_grads(sort, (operand,), 2, tol, tol, tol)\n\n # TODO(b/205052657): enable more tests when supported\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_keyshape={}_valshape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, key_dtype),\n jtu.format_shape_dtype_string(shape, val_dtype),\n axis),\n \"rng\": rng, \"shape\": shape,\n \"key_dtype\": key_dtype, \"val_dtype\": val_dtype, \"axis\": axis}\n for key_dtype in [onp.float32]\n for val_dtype in [onp.float32]\n for shape in [(3,), (5, 3)]\n for axis in [len(shape) - 1]\n for rng in [jtu.rand_default()]))\n def testSortKeyValGrad(self, shape, key_dtype, val_dtype, axis, rng):\n # This test relies on the property that wherever keys are tied, values are\n # too, since we don't guarantee the same ordering of values with equal keys.\n # To avoid that case, we generate unique keys (globally in the key array).\n perm_rng = onp.random.RandomState(0)\n def args_maker():\n flat_keys = onp.arange(onp.prod(shape, dtype=int), dtype=key_dtype)\n keys = perm_rng.permutation(flat_keys).reshape(shape)\n values = rng(shape, val_dtype)\n return keys, values\n keys, values = args_maker()\n\n fun = lambda keys, values: lax.sort_key_val(keys, values, axis)\n check_grads(fun, (keys, values), 2, 1e-2, 1e-2, 1e-2)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), idxs, axes),\n \"shape\": shape, \"dtype\": dtype, \"idxs\": idxs, \"axes\": axes, \"rng\": rng}\n for dtype in float_dtypes\n for shape, idxs, axes in [\n [(3, 4, 5), (onp.array([0, 2, 1]),), (0,)],\n [(3, 4, 5), (onp.array([-1, -2]),), (0,)],\n [(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 1)],\n [(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 2)],\n ]\n for rng in [jtu.rand_default()]))\n def testIndexTakeGrad(self, shape, dtype, idxs, axes, rng):\n idxs = tuple(rng(e.shape, e.dtype) for e in idxs)\n src = rng(shape, dtype)\n index_take = lambda src: lax.index_take(src, idxs, axes)\n check_grads(index_take, (src,), 2, 1e-2, 1e-2, 1)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_dnums={}_slice_sizes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), idxs, dnums,\n slice_sizes),\n \"shape\": shape, \"dtype\": dtype, \"idxs\": idxs, \"dnums\": dnums,\n \"slice_sizes\": slice_sizes, \"rng\": rng, \"rng_idx\": rng_idx}\n for dtype in float_dtypes\n for shape, idxs, dnums, slice_sizes in [\n ((5,), onp.array([[0], [2]]), lax.GatherDimensionNumbers(\n offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),\n (1,)),\n ((10,), onp.array([[0], [0], [0]]), lax.GatherDimensionNumbers(\n offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),\n (2,)),\n ((10, 5,), onp.array([[0], [2], [1]]), lax.GatherDimensionNumbers(\n offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),\n (1, 3)),\n ]\n for rng_idx in [jtu.rand_int(max(shape))]\n for rng in [jtu.rand_default()]))\n def testGatherGrad(self, shape, dtype, idxs, dnums, slice_sizes, rng, rng_idx):\n idxs = rng_idx(idxs.shape, idxs.dtype)\n gather = lambda x: lax.gather(x, idxs, dimension_numbers=dnums,\n slice_sizes=slice_sizes)\n x = rng(shape, dtype)\n check_grads(gather, (x,), 2, 1e-2, 1e-2, 1.)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_update={}_dnums={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n idxs, update_shape, dnums),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"idxs\": idxs,\n \"update_shape\": update_shape, \"dnums\": dnums, \"rng\": rng,\n \"rng_idx\": rng_idx}\n for dtype in float_dtypes\n for arg_shape, idxs, update_shape, dnums in [\n ((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(\n update_window_dims=(), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(),\n scatter_dims_to_operand_dims=(0,))),\n ((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ]\n for rng_idx in [jtu.rand_int(max(arg_shape))]\n for rng in [jtu.rand_default()]))\n def testScatterAddGrad(self, arg_shape, dtype, idxs, update_shape, dnums, rng,\n rng_idx):\n idxs = rng_idx(idxs.shape, idxs.dtype)\n scatter_add = lambda x, y: lax.scatter_add(x, idxs, y,\n dimension_numbers=dnums)\n x = rng(arg_shape, dtype)\n y = rng(update_shape, dtype)\n check_grads(scatter_add, (x, y), 2, 1e-2, 1e-2, 1.)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_update={}_dnums={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n idxs, update_shape, dnums),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"idxs\": idxs,\n \"update_shape\": update_shape, \"dnums\": dnums, \"rng\": rng,\n \"rng_idx\": rng_idx}\n for dtype in float_dtypes\n for arg_shape, idxs, update_shape, dnums in [\n ((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(\n update_window_dims=(), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(),\n scatter_dims_to_operand_dims=(0,))),\n ((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ]\n for rng_idx in [jtu.rand_int(max(arg_shape))]\n for rng in [jtu.rand_default()]))\n def testScatterGrad(self, arg_shape, dtype, idxs, update_shape, dnums, rng,\n rng_idx):\n idxs = rng_idx(idxs.shape, idxs.dtype)\n scatter = lambda x, y: lax.scatter(x, idxs, y, dimension_numbers=dnums)\n x = rng(arg_shape, dtype)\n y = rng(update_shape, dtype)\n check_grads(scatter, (x, y), 2, 1e-2, 1e-2, 1.)\n\n def testStopGradient(self):\n def f(x):\n return lax.sin(x) * lax.cos(lax.stop_gradient(x))\n\n def f2(x, y):\n return lax.sin(x) * lax.cos(y)\n\n x = 3.14\n ans = api.grad(f)(x)\n expected = api.grad(f2)(x, x)\n self.assertAllClose(ans, expected, check_dtypes=True)\n\n ans = api.grad(api.grad(f))(x)\n expected = api.grad(api.grad(f2))(x, x)\n self.assertAllClose(ans, expected, check_dtypes=True)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.swapaxes", "numpy.minimum", "numpy.take", "numpy.maximum", "numpy.unique", "numpy.asarray", "numpy.dtype", "numpy.ones", "numpy.full", "numpy.finfo", "numpy.iinfo", "numpy.prod", "numpy.array", "numpy.random.RandomState", "numpy.flip", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tmay-sarsaparilla/advent-of-code-2021
[ "3cd827df57d315dd96627544b9f5c31b7db1aa11" ]
[ "23/day_twentythree.py" ]
[ "\nimport numpy as np\nfrom numpy.lib.index_tricks import index_exp\n\n\ndef create_burrow(lines):\n longest = max(len(l) for l in lines)\n normalised_lines = []\n for line in lines:\n if len(line) == longest:\n normalised_lines.append(line)\n continue\n missing = longest - len(line)\n for _ in range(missing):\n line += \" \"\n normalised_lines.append(line)\n return np.array([list(l) for l in normalised_lines])\n\n\ndef find_amphopods(burrow):\n amphopods = []\n for idx, elem in np.ndenumerate(burrow):\n if elem not in \"ABCD\":\n continue\n amphopods.append(idx)\n return amphopods\n\n\ndef find_rooms(burrow):\n amphopods = \"ABCD\"\n rooms = []\n for idx, elem in np.ndenumerate(burrow):\n if elem not in amphopods:\n continue\n x, y = idx\n if burrow[x+1][y] not in amphopods:\n continue\n room = [(x, y), (x+1, y)]\n rooms.append(room)\n return rooms\n\n\ndef find_target_room(amphopod, rooms):\n amphopods = \"ABCD\"\n room_index = amphopods.index(amphopod)\n return rooms[room_index]\n\n\ndef find_possible_moves(burrow, position, searched=None):\n if not searched:\n searched = set()\n searched.add(position)\n x, y = position\n adjacent = {(x-1, y), (x+1, y), (x, y-1), (x, y+1)}\n possible_moves = set()\n for i, j in {a for a in adjacent if a not in searched}:\n try:\n value = burrow[i][j]\n except IndexError:\n continue\n if value == \".\":\n possible_moves.add((i, j))\n possible_moves.update(find_possible_moves(burrow, (i, j), searched))\n return possible_moves\n\n\ndef is_in_room(position, rooms):\n if any(position in room for room in rooms):\n return True\n return False\n\n\ndef is_in_hallway(position):\n if position[0] == 1 and 1 <= position[1] <= 11:\n return True\n return False\n\n\ndef is_outside_room(position, rooms):\n if not is_in_hallway(position):\n return False\n for room in rooms:\n entrance = room[0]\n if position[1] == entrance[1]:\n return True\n return False\n\n\ndef find_energy_costs(burrow):\n rooms = find_rooms(burrow)\n positions = find_amphopods(burrow)\n for position in positions:\n x, y = position\n amphopod = burrow[x][y]\n target_room = find_target_room(amphopod, rooms)\n possible_moves = find_possible_moves(burrow, position)\n for move in possible_moves:\n if is_outside_room(move, rooms):\n continue # can't move to position directly outside room\n if is_in_hallway(position) and move not in target_room:\n continue # can't move from hallway into a room which isn't the target room\n \n\n\ndef main():\n with open(\"test.txt\") as f:\n lines = [l.replace(\"\\n\", \"\") for l in f.readlines()]\n burrow = create_burrow(lines)\n print(burrow)\n rooms = find_rooms(burrow)\n print(find_possible_moves(burrow, (3, 9)))\n\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.ndenumerate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Angericky/CenterPoint-KITTI
[ "093a4a352e8b14cd1fb430770d9b4ccd7d3b0803" ]
[ "pcdet/datasets/dataset.py" ]
[ "from collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nfrom numpy.lib.arraysetops import unique\nimport torch.utils.data as torch_data\n\nfrom ..utils import common_utils\nfrom .augmentor.data_augmentor import DataAugmentor\nfrom .processor.data_processor import DataProcessor\nfrom .processor.point_feature_encoder import PointFeatureEncoder\nfrom .processor.cylind_feat import CylinderFeatureEncoder\n\ndef cart2polar(xyz):\n rho = np.sqrt(xyz[:, 0] ** 2 + xyz[:, 1] ** 2)\n phi = np.arctan2(xyz[:, 1], xyz[:, 0])\n return np.stack((rho, phi, xyz[:, 2]), axis=1)\n\ndef polar2cat(xyz_polar):\n x = xyz_polar[0] * np.cos(xyz_polar[1])\n y = xyz_polar[0] * np.sin(xyz_polar[1])\n return np.stack((x, y, xyz_polar[2]), axis=0)\n\ndef get_distance(x, y):\n return np.sqrt(x ** 2 + y ** 2)\n\nclass DatasetTemplate(torch_data.Dataset):\n def __init__(self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None):\n super().__init__()\n self.dataset_cfg = dataset_cfg\n self.training = training\n self.class_names = class_names\n self.logger = logger\n self.root_path = root_path if root_path is not None else Path(self.dataset_cfg.DATA_PATH)\n self.logger = logger\n if self.dataset_cfg is None or class_names is None:\n return\n\n self.point_cloud_range = np.array(self.dataset_cfg.POINT_CLOUD_RANGE, dtype=np.float32)\n self.cylind_range = np.array(self.dataset_cfg.CYLIND_RANGE, dtype=np.float32) if hasattr(self.dataset_cfg, 'CYLIND_RANGE') else None\n self.cylind_size = np.array(self.dataset_cfg.CYLIND_SIZE, dtype=np.float32) if hasattr(self.dataset_cfg, 'CYLIND_SIZE') else None\n self.cylind_feats = self.dataset_cfg.CYLIND_FEATS if hasattr(self.dataset_cfg, 'CYLIND_FEATS') else False\n self.cart_feats = self.dataset_cfg.CART_FEATS if hasattr(self.dataset_cfg, 'CART_FEATS') else False\n self.cy_grid_size = np.round((self.cylind_range[3:6] - self.cylind_range[0:3]) / np.array(self.cylind_size)).astype(np.int64) if hasattr(self.dataset_cfg, 'CYLIND_RANGE') else None\n \n self.voxel_centers = self.dataset_cfg.VOXEL_CENTERS if hasattr(self.dataset_cfg, 'VOXEL_CENTERS') else False\n\n self.point_feature_encoder = PointFeatureEncoder(\n self.dataset_cfg.POINT_FEATURE_ENCODING,\n point_cloud_range=self.point_cloud_range\n )\n\n self.num_point_features = self.point_feature_encoder.num_point_features\n # self.num_point_features += 2\n # self.cylind_fea_model = CylinderFeatureEncoder(fea_dim=4, fea_compre=16)\n\n if self.cylind_feats and self.cart_feats:\n self.num_point_features = 6\n if self.voxel_centers:\n self.num_point_features += 3\n\n self.data_augmentor = DataAugmentor(\n self.root_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger\n ) if self.training else None\n self.data_processor = DataProcessor(\n self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range, training=self.training\n )\n\n self.grid_size = self.data_processor.grid_size\n self.voxel_size = self.data_processor.voxel_size\n self.total_epochs = 0\n self._merge_all_iters_to_one_epoch = False\n\n self.sum_grid_ind = np.zeros((0, 3))\n\n @property\n def mode(self):\n return 'train' if self.training else 'test'\n\n def __getstate__(self):\n d = dict(self.__dict__)\n del d['logger']\n return d\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n\n @staticmethod\n def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):\n \"\"\"\n To support a custom dataset, implement this function to receive the predicted results from the model, and then\n transform the unified normative coordinate to your required coordinate, and optionally save them to disk.\n\n Args:\n batch_dict: dict of original data from the dataloader\n pred_dicts: dict of predicted results from the model\n pred_boxes: (N, 7), Tensor\n pred_scores: (N), Tensor\n pred_labels: (N), Tensor\n class_names:\n output_path: if it is not None, save the results to this path\n Returns:\n\n \"\"\"\n\n def merge_all_iters_to_one_epoch(self, merge=True, epochs=None):\n if merge:\n self._merge_all_iters_to_one_epoch = True\n self.total_epochs = epochs\n else:\n self._merge_all_iters_to_one_epoch = False\n\n def __len__(self):\n raise NotImplementedError\n\n def __getitem__(self, index):\n \"\"\"\n To support a custom dataset, implement this function to load the raw data (and labels), then transform them to\n the unified normative coordinate and call the function self.prepare_data() to process the data and send them\n to the model.\n\n Args:\n index:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n \n def statistics(grid_ind, grid_size, nonempty_voxel_num):\n all_voxels = grid_size[0] / 8 * grid_size[1] * grid_size[2]\n for i in range(8):\n distance = (i + 1) * grid_size[0] / 8\n nonempty_voxel_num[i] += grid_ind[grid_ind[:, 0] < distance].shape\n \n\n def prepare_data(self, data_dict):\n \"\"\"\n Args:\n data_dict:\n points: (N, 3 + C_in)\n gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]\n gt_names: optional, (N), string\n ...\n\n Returns:\n data_dict:\n frame_id: string\n points: (N, 3 + C_in)\n gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]\n gt_names: optional, (N), string\n use_lead_xyz: bool\n voxels: optional (num_voxels, max_points_per_voxel, 3 + C)\n voxel_coords: optional (num_voxels, 3)\n voxel_num_points: optional (num_voxels)\n ...\n \"\"\"\n if self.training:\n assert 'gt_boxes' in data_dict, 'gt_boxes should be provided for training'\n gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)\n\n data_dict = self.data_augmentor.forward(\n data_dict={\n **data_dict,\n 'gt_boxes_mask': gt_boxes_mask\n }\n )\n if len(data_dict['gt_boxes']) == 0:\n new_index = np.random.randint(self.__len__())\n return self.__getitem__(new_index)\n\n if data_dict.get('gt_boxes', None) is not None:\n selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)\n data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]\n data_dict['gt_names'] = data_dict['gt_names'][selected]\n gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)\n gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)\n data_dict['gt_boxes'] = gt_boxes\n\n data_dict = self.point_feature_encoder.forward(data_dict)\n\n data_dict = self.data_processor.forward(\n data_dict=data_dict\n )\n\n if self.cylind_size is not None:\n # replace 'voxels'(V, max_num, C=4) and 'voxel_coords'(V, C=3) (L, W, H) in data_dicts\n xyz = data_dict['points'][:, :3]\n intensity = data_dict['points'][:, 3][:, np.newaxis]\n\n xyz_pol = cart2polar(xyz) # (N, 3)\n\n z_feats = xyz[:, 2:3]\n\n if self.cart_feats and self.cylind_feats:\n pol_feats = np.concatenate((xyz_pol[:, :2], xyz[:, :2], z_feats, intensity), axis=1)\n elif self.cart_feats:\n pol_feats = np.concatenate((xyz[:, :2], z_feats, intensity), axis=1)\n elif self.cylind_feats:\n pol_feats = np.concatenate((xyz_pol[:, :2], z_feats, intensity), axis=1)\n\n max_bound = np.array(self.cylind_range[3:6])\n min_bound = np.array(self.cylind_range[0:3])\n max_bound_1e4 = np.round(max_bound * 1e4).astype(np.int64)\n min_bound_1e4 = np.round(min_bound * 1e4).astype(np.int64)\n\n crop_range = self.cylind_range[3:6] - self.cylind_range[0:3]\n grid_size = crop_range / np.array(self.cylind_size)\n self.grid_size = np.round(grid_size).astype(np.int64)\n\n intervals = self.cylind_size\n intervals_1e4 = (self.cylind_size * 1e4).astype(np.int)\n\n if (intervals_1e4 == 0).any(): print(\"Zero interval!\")\n \n remove_index = np.concatenate((np.where(xyz_pol * 1e4 < min_bound_1e4 + 1e-20)[0], np.where(xyz_pol * 1e4 > max_bound_1e4 - 1e-20)[0]))\n xyz_pol = np.delete(xyz_pol, remove_index, axis=0)\n pol_feats = np.delete(pol_feats, remove_index, axis=0)\n\n cy_grid_ind = (np.floor((np.clip(xyz_pol.astype(np.float64) * 1e4, min_bound_1e4, max_bound_1e4) - min_bound_1e4) / intervals_1e4)).astype(np.int)\n \n if self.voxel_centers:\n voxel_centers = (cy_grid_ind.astype(np.float32) + 0.5) * intervals + min_bound\n data_dict['voxel_centers'] = voxel_centers\n \n # unq, cy_feats = self.cylind_fea_model(pol_feats, cy_grid_ind)\n # sort potential repeated grid_inds first by the 1st col, then 3nd col, then 3rd col. \n sorted_indices = np.lexsort((cy_grid_ind[:, 2], cy_grid_ind[:, 1], cy_grid_ind[:, 0]))\n sorted_pol_feats = pol_feats[sorted_indices]\n sorted_cy_grid_ind = cy_grid_ind[sorted_indices]\n\n unique_grid_ind, first_indexes, grid_cnts = np.unique(sorted_cy_grid_ind, axis=0, return_index=True, return_counts=True)\n \n # get a list of all indices of unique elements in a numpy array\n sector_feats = np.split(sorted_pol_feats, first_indexes[1:])\n voxel_max_num = 5 #data_dict['voxels'].shape[1]\n sectors = np.zeros((unique_grid_ind.shape[0], voxel_max_num, (sector_feats[0].shape[1])))\n\n for i in range(len(sector_feats)):\n if sector_feats[i].shape[0] > 5:\n grid_cnts[i] = 5\n sectors[i, :, :sector_feats[i].shape[1]] = sector_feats[i][np.random.choice(sector_feats[i].shape[0], 5, replace=False)]\n #sectors[i, :, sector_feats[i].shape[1]:] = np.expand_dims(unique_grid_ind[i, [1,0]], 0).repeat(5, 0)\n else:\n point_num_in_sector = sector_feats[i].shape[0]\n sectors[i, :point_num_in_sector, :sector_feats[i].shape[1]] = sector_feats[i]\n #sectors[i, :point_num_in_sector, sector_feats[i].shape[1]:] = np.expand_dims(unique_grid_ind[i, [1,0]], 0).repeat(point_num_in_sector, 0)\n\n data_dict['voxel_coords'] = unique_grid_ind[:, [2, 1, 0]]\n data_dict['voxels'] = sectors\n data_dict['voxel_num_points'] = grid_cnts\n if self.voxel_centers:\n cy_fea = np.concatenate((voxel_centers, pol_feats), axis=1)\n else:\n cy_fea = pol_feats\n data_dict['pt_fea'] = cy_fea\n data_dict['xy_ind'] = cy_grid_ind\n\n\n # self.sum_grid_ind = np.unique(np.concatenate((self.sum_grid_ind, unique_grid_ind)), axis=0)\n # input_sp_tensor = spconv.SparseConvTensor(\n # features=voxel_features,\n # indices=voxel_coords.int(),\n # spatial_shape=self.sparse_shape,\n # batch_size=batch_size\n # )\n\n # voxel_position = np.zeros(self.grid_size, dtype=np.float32)\n # dim_array = np.ones(len(self.grid_size) + 1, int)\n # dim_array[0] = -1\n # # voxel_position (3, H, W, L)\n # voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array) \n \n # import pdb\n # pdb.set_trace()\n\n # # get xyz of different polar: voxel_position: (3, H, W, Z)\n # voxel_position = polar2cat(voxel_position)\n\n # processed_label = np.ones(self.grid_size, dtype=np.uint8) #* self.ignore_label\n # #label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)\n # #label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]\n # #processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)\n \n # # processed_label: (H, W, Z)\n # data_tuple = (voxel_position, processed_label)\n\n # # center data on each voxel for PTnet\n # voxel_centers = (cy_grid_ind.astype(np.float32) + 0.5) * intervals + min_bound\n # return_xyz = xyz_pol - voxel_centers # (N, 3) points' cylinder offset to voxel centers\n # return_xyz = np.concatenate((return_xyz, xyz_pol, xyz[:, :2]), axis=1) # (N, 8)\n\n # return_fea = return_xyz\n\n # if self.return_test:\n # data_tuple += (grid_ind, return_fea, index)\n # else:\n # data_tuple += (grid_ind, return_fea)\n\n data_dict.pop('gt_names', None)\n\n return data_dict\n\n @staticmethod\n def collate_batch(batch_list, _unused=False):\n data_dict = defaultdict(list)\n for cur_sample in batch_list:\n for key, val in cur_sample.items():\n data_dict[key].append(val)\n batch_size = len(batch_list)\n ret = {}\n\n for key, val in data_dict.items():\n try:\n if key in ['voxels', 'voxel_num_points', 'voxel_centers']:\n ret[key] = np.concatenate(val, axis=0)\n elif key in ['pt_fea', 'xy_ind']:\n ret[key] = val\n elif key in ['points', 'voxel_coords']:\n coors = []\n for i, coor in enumerate(val):\n coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)\n coors.append(coor_pad)\n ret[key] = np.concatenate(coors, axis=0)\n elif key in ['gt_boxes']:\n max_gt = max([len(x) for x in val])\n batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)\n for k in range(batch_size):\n batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]\n ret[key] = batch_gt_boxes3d\n else:\n ret[key] = np.stack(val, axis=0)\n except:\n print('Error in collate_batch: key=%s' % key)\n raise TypeError\n\n ret['batch_size'] = batch_size\n\n return ret\n" ]
[ [ "numpy.split", "numpy.sqrt", "numpy.pad", "numpy.unique", "numpy.random.choice", "numpy.cos", "numpy.stack", "numpy.sin", "numpy.arctan2", "numpy.lexsort", "numpy.delete", "numpy.concatenate", "numpy.round", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bbartling/bacnet-people-counter
[ "db269b6b62e4e3e207c443dfb89164af587e9baf" ]
[ "restful_people_count.py" ]
[ "# To read from webcam and write back out to disk:\n# py -3.9 people_counter.py \n\n# import the necessary packages\nfrom pyimagesearch.centroidtracker import CentroidTracker\nfrom pyimagesearch.trackableobject import TrackableObject\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport dlib\nimport cv2\n\n\nfrom flask import Flask, request, jsonify, render_template, Response\nimport flask\nimport time\nimport threading\n\n\n\nclass mycomputer_vision(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n self.netPeopleCount = 0\n self.netCountDown = 0\n self.netCountUp = 0\n self.totalFrames = 0\n self.killswitch = False\n\n\n def kill(self):\n\n print(\"kill switch hit\")\n self.killswitch = True\n\n\n def run(self):\n global framecopy, vs\n \n model = \"./mobilenet_ssd/MobileNetSSD_deploy.caffemodel\" \n prototxt = \"./mobilenet_ssd/MobileNetSSD_deploy.prototxt\"\n\n # load our serialized model from disk\n print(\"[INFO] loading model...\")\n \n # net = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n net = cv2.dnn.readNetFromCaffe(prototxt, model)\n\n # load our serialized model from disk\n print(\"[INFO] model loaded success...\")\n\n\n\n # initialize the list of class labels MobileNet SSD was trained to\n # detect\n CLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\n\n\n\n print(\"[INFO] starting video stream...\")\n vs = VideoStream(src=0).start()\n time.sleep(0.0)\n\n # initialize the video writer (we'll instantiate later if need be)\n writer = None\n\n # initialize the frame dimensions (we'll set them as soon as we read\n # the first frame from the video)\n W = None\n H = None\n\n # instantiate our centroid tracker, then initialize a list to store\n # each of our dlib correlation trackers, followed by a dictionary to\n # map each unique object ID to a TrackableObject\n ct = CentroidTracker(maxDisappeared=40, maxDistance=50)\n trackers = []\n trackableObjects = {}\n\n\n # start the frames per second throughput estimator\n fps = FPS().start()\n \n \n def countUp():\n print(\"countUp()\")\n self.netCountUp += 1\n netPeopleCount = self.netCountUp - self.netCountDown\n if netPeopleCount <= 0:\n netPeopleCount = 0\n to.counted = True\n self.netPeopleCount = netPeopleCount\n\n\n def countDown():\n print(\"countDown()\")\n self.netCountDown += 1\n netPeopleCount = self.netCountUp - self.netCountDown\n if netPeopleCount <= 0:\n netPeopleCount = 0\n to.counted = True\n self.netPeopleCount = netPeopleCount\n\n\n # loop over frames from the video stream\n while self.killswitch == False:\n # grab the next frame and handle if we are reading from either\n # VideoCapture or VideoStream\n frame = vs.read()\n\n # resize the frame to have a maximum width of 500 pixels (the\n # less data we have, the faster we can process it), then convert\n # the frame from BGR to RGB for dlib\n frame = imutils.resize(frame, width=500)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # if the frame dimensions are empty, set them\n if W is None or H is None:\n (H, W) = frame.shape[:2]\n\n # if we are supposed to be writing a video to disk, initialize\n # the writer\n if args[\"output\"] is not None and writer is None:\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n (W, H), True)\n\n # initialize the current status along with our list of bounding\n # box rectangles returned by either (1) our object detector or\n # (2) the correlation trackers\n status = \"Waiting\"\n rects = []\n\n # check to see if we should run a more computationally expensive\n # object detection method to aid our tracker\n if self.totalFrames % args[\"skip_frames\"] == 0:\n # set the status and initialize our new set of object trackers\n status = \"Detecting\"\n trackers = []\n\n # convert the frame to a blob and pass the blob through the\n # network and obtain the detections\n blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)\n net.setInput(blob)\n detections = net.forward()\n\n # loop over the detections\n for i in np.arange(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated\n # with the prediction\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections by requiring a minimum\n # confidence\n if confidence > args[\"confidence\"]:\n # extract the index of the class label from the\n # detections list\n idx = int(detections[0, 0, i, 1])\n\n # if the class label is not a person, ignore it\n if CLASSES[idx] != \"person\":\n continue\n\n # compute the (x, y)-coordinates of the bounding box\n # for the object\n box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # construct a dlib rectangle object from the bounding\n # box coordinates and then start the dlib correlation\n # tracker\n tracker = dlib.correlation_tracker()\n rect = dlib.rectangle(int(startX), int(startY), int(endX), int(endY))\n tracker.start_track(rgb, rect)\n\n # add the tracker to our list of trackers so we can\n # utilize it during skip frames\n trackers.append(tracker)\n\n # otherwise, we should utilize our object *trackers* rather than\n # object *detectors* to obtain a higher frame processing throughput\n else:\n # loop over the trackers\n for tracker in trackers:\n # set the status of our system to be 'tracking' rather\n # than 'waiting' or 'detecting'\n status = \"Tracking\"\n\n # update the tracker and grab the updated position\n tracker.update(rgb)\n pos = tracker.get_position()\n\n # unpack the position object\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n\n # add the bounding box coordinates to the rectangles list\n rects.append((startX, startY, endX, endY))\n\n # draw a horizontal line in the center of the frame -- once an\n # object crosses this line we will determine whether they were\n # moving 'up' or 'down'\n\n if not args[\"vertical\"]:\n cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)\n \n else:\n cv2.line(frame, (W//2,0), (W//2, H) , (0,255,255), 2)\n\n\n # use the centroid tracker to associate the (1) old object\n # centroids with (2) the newly computed object centroids\n objects = ct.update(rects)\n\n # loop over the tracked objects\n for (objectID, centroid) in objects.items():\n # check to see if a trackable object exists for the current\n # object ID\n to = trackableObjects.get(objectID, None)\n\n # if there is no existing trackable object, create one\n if to is None:\n to = TrackableObject(objectID, centroid)\n\n # otherwise, there is a trackable object so we can utilize it\n # to determine direction\n else:\n # the difference between the y-coordinate of the *current*\n # centroid and the mean of *previous* centroids will tell\n # us in which direction the object is moving (negative for\n # 'up' and positive for 'down')\n y = [c[1] for c in to.centroids]\n direction = centroid[1] - np.mean(y)\n to.centroids.append(centroid)\n\n # check to see if the object has been counted or not\n if not to.counted:\n # if the direction is negative (indicating the object\n # is moving up) AND the centroid is above the center\n # line, count the object\n if direction < 0 and centroid[1] < H // 2:\n countUp()\n\n # if the direction is positive (indicating the object\n # is moving down) AND the centroid is below the\n # center line, count the object\n elif direction > 0 and centroid[1] > H // 2:\n countDown()\n\n # store the trackable object in our dictionary\n trackableObjects[objectID] = to\n\n # draw both the ID of the object and the centroid of the\n # object on the output frame\n text = \"ID {}\".format(objectID)\n cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n\n # construct a tuple of information we will be displaying on the\n # frame\n info = [\n (\"Up\", self.netCountUp),\n (\"Down\", self.netCountDown),\n (\"Status\", status),\n ]\n\n # loop over the info tuples and draw them on our frame\n for (i, (k, v)) in enumerate(info):\n text = \"{}: {}\".format(k, v)\n cv2.putText(frame, text, (10, H - ((i * 20) + 20)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n\n # check to see if we should write the frame to disk\n if writer is not None:\n writer.write(frame)\n\n # concat frame one by one and show result\n # web app\n framecopy = frame.copy() \n\n # increment the total number of frames processed thus far and\n # then update the FPS counter\n self.totalFrames += 1\n fps.update()\n\n # stop the timer and display FPS information\n fps.stop()\n print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n # check to see if we need to release the video writer pointer\n if writer is not None:\n writer.release()\n\n # kill switch kill computer vision\n vs.stop()\n\n\n'''\nRestful Web APP SETUP BELOW\n'''\n\n\napp = Flask(__name__)\ncomputer_vision = mycomputer_vision()\n\n# used to render computer vision in browser\ndef gen_frames():\n\n global framecopy\n while True:\n if framecopy is None:\n continue\n\n ret, buffer = cv2.imencode('.jpg', framecopy)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n') \n\n\[email protected]('/people') \ndef get_updates(): \n\n info = {\"count\":computer_vision.netPeopleCount,\n \"out\":computer_vision.netCountDown,\n \"in\":computer_vision.netCountUp} \n\n response_obj = {'status':'success','info':info}\n \n return response_obj\n\n\[email protected]('/people/count') \ndef get_updates_count(): \n return jsonify(computer_vision.netPeopleCount)\n\n\[email protected]('/people/out') \ndef get_updates_out(): \n return jsonify(computer_vision.netCountDown)\n\n\[email protected]('/people/in') \ndef get_updates_in(): \n return jsonify(computer_vision.netCountUp)\n\n\[email protected]('/reset') \ndef reset_params(): \n\n computer_vision.netPeopleCount = 0\n computer_vision.netCountDown = 0\n computer_vision.netCountUp = 0\n\n response_obj = {'status':'success'}\n\n return response_obj\n\n\n\[email protected]('/video_feed')\ndef video_feed():\n #Video streaming route. Put this in the src attribute of an img tag\n return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n\[email protected]('/')\ndef index():\n \"\"\"Video streaming home page.\"\"\"\n return render_template('index.html')\n\n\n\n'''\nMAIN LOOP BELOW\n'''\n\n\nif __name__ == \"__main__\":\n\n # construct the argument parse and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-o\",\n \"--output\",\n type=str,\n help=\"path to optional output video file\")\n\n ap.add_argument(\"-c\",\n \"--confidence\",\n type=float,\n default=0.4,\n help=\"minimum probability to filter weak detections\")\n\n ap.add_argument(\"-s\",\n \"--skip-frames\",\n type=int,\n default=30,\n help=\"# of skip frames between detections\")\n\n ap.add_argument(\"-v\",\n \"--vertical\",\n type=bool,\n default=False,\n help=\"specify vertical or horizontal line\")\n\n ap.add_argument('-p',\n '--port',\n required=False,\n type=int,\n default=5000,\n help='port number to run web app on default is 5000')\n\n\n args = vars(ap.parse_args())\n print('Port for the Flask App Is ' + str(args[\"port\"]))\n\n # start computer vision on seperate thread\n computer_vision.start()\n\n # start flask app \n app.run(debug=True,\n host=\"0.0.0.0\",\n port=args[\"port\"],\n use_reloader=False,\n threaded=True)\n\n\n# CNTRL - C to kill computer vision\ncomputer_vision.kill()\n\n\n" ]
[ [ "numpy.arange", "numpy.array", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ezzaimsoufiane/keras-ocr
[ "df98046b9ed2a506311515db830d3fd7357ca45a" ]
[ "keras_ocr/evaluation.py" ]
[ "# pylint: disable=invalid-name,too-many-locals\nimport copy\nimport warnings\n\nimport editdistance\nimport numpy as np\nimport pyclipper\nimport cv2\n\n\n# Adapted from https://github.com/andreasveit/coco-text/blob/master/coco_evaluation.py\ndef iou_score(box1, box2):\n \"\"\"Returns the Intersection-over-Union score, defined as the area of\n the intersection divided by the intersection over the union of\n the two bounding boxes. This measure is symmetric.\n\n Args:\n box1: The coordinates for box 1 as a list of (x, y) coordinates\n box2: The coordinates for box 2 in same format as box1.\n \"\"\"\n if len(box1) == 2:\n x1, y1 = box1[0]\n x2, y2 = box1[1]\n box1 = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]])\n if len(box2) == 2:\n x1, y1 = box2[0]\n x2, y2 = box2[1]\n box2 = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]])\n if any(cv2.contourArea(np.int32(box)[:, np.newaxis, :]) == 0 for box in [box1, box2]):\n warnings.warn('A box with zero area was detected.')\n return 0\n pc = pyclipper.Pyclipper()\n pc.AddPath(np.int32(box1), pyclipper.PT_SUBJECT, closed=True)\n pc.AddPath(np.int32(box2), pyclipper.PT_CLIP, closed=True)\n intersection_solutions = pc.Execute(pyclipper.CT_INTERSECTION, pyclipper.PFT_EVENODD,\n pyclipper.PFT_EVENODD)\n union_solutions = pc.Execute(pyclipper.CT_UNION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD)\n union = sum(cv2.contourArea(np.int32(points)[:, np.newaxis, :]) for points in union_solutions)\n intersection = sum(\n cv2.contourArea(np.int32(points)[:, np.newaxis, :]) for points in intersection_solutions)\n return intersection / union\n\n\ndef score(true, pred, iou_threshold=0.5, similarity_threshold=0.5, translator=None):\n \"\"\"\n Args:\n true: The ground truth boxes provided as a dictionary of {image_id: annotations}\n mappings. `annotations` should be lists of dicts with a `text` and `vertices` key.\n `vertices` should be a list of (x, y) coordinates. Optionally, an \"ignore\" key can be\n added to indicate that detecting an annotation should neither count as a false positive\n nor should failure to detect it count as a false negative.\n pred: The predicted boxes in the same format as `true`.\n iou_threshold: The minimum IoU to qualify a box as a match.\n similarity_threshold: The minimum texg similarity required to qualify\n a text string as a match.\n translator: A translator acceptable by `str.translate`. Used to\n modify ground truth / predicted strings. For example,\n `str.maketrans(string.ascii_uppercase, string.ascii_lowercase,\n string.punctuation)` would yield a translator that changes all\n strings to lowercase and removes punctuation.\n\n Returns:\n A results dictionary reporting false positives, false negatives, true positives\n and near matches (IoU > iou_threshold but similarity < similarity_threshold) along\n with the compute precision and recall.\n \"\"\"\n true_ids = sorted(true)\n pred_ids = sorted(pred)\n assert all(true_id == pred_id for true_id, pred_id in zip(\n true_ids, pred_ids)), 'true and pred dictionaries must have the same keys'\n results = {\n 'true_positives': [],\n 'false_positives': [],\n 'near_true_positives': [],\n 'false_negatives': []\n }\n for image_id in true_ids:\n true_anns = true[image_id]\n pred_anns = copy.deepcopy(pred[image_id])\n pred_matched = set()\n for true_index, true_ann in enumerate(true_anns):\n match = None\n for pred_index, pred_ann in enumerate(pred_anns):\n iou = iou_score(true_ann['vertices'], pred_ann['vertices'])\n if iou >= iou_threshold:\n match = {'true_idx': true_index, 'pred_idx': pred_index, 'image_id': image_id}\n pred_matched.add(pred_index)\n true_text = true_ann['text']\n pred_text = pred_ann['text']\n if true_ann.get('ignore', False):\n # We recorded that this prediction matched something,\n # so it won't be a false positive. But we're also ignoring\n # this ground truth label so we won't count it as a true\n # positive or a near true positive.\n continue\n if translator is not None:\n true_text = true_text.translate(translator)\n pred_text = pred_text.translate(translator)\n edit_distance_norm = max(len(true_text), len(pred_text))\n if edit_distance_norm == 0:\n similarity = 1\n else:\n similarity = 1 - (editdistance.eval(true_text, pred_text) /\n max(len(true_text), len(pred_text)))\n if similarity >= similarity_threshold:\n results['true_positives'].append(match)\n else:\n results['near_true_positives'].append(match)\n if match is None and not true_ann.get('ignore', False):\n results['false_negatives'].append({'image_id': image_id, 'true_idx': true_index})\n results['false_positives'].extend({\n 'pred_index': pred_index,\n 'image_id': image_id\n } for pred_index, _ in enumerate(pred_anns) if pred_index not in pred_matched)\n fns = len(results['false_negatives'])\n fps = len(results['false_positives'])\n tps = len(\n set((true_positive['image_id'], true_positive['true_idx'])\n for true_positive in results['true_positives']))\n precision = tps / (tps + fps)\n recall = tps / (tps + fns)\n return results, (precision, recall)\n" ]
[ [ "numpy.array", "numpy.int32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
XinGuoZJU/SPFN
[ "e7fc2fb40e42c39c1a9329b2495127d2b945cef8" ]
[ "pointnet_plusplus/architectures.py" ]
[ "import os, sys\nBASE_DIR = os.path.normpath(\n os.path.join(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(os.path.join(BASE_DIR, 'utils'))\n\nfrom pointnet_util import pointnet_sa_module, pointnet_fp_module\nimport tensorflow as tf\nimport tf_util\n\ndef build_pointnet2_seg(scope, X, out_dims, is_training, bn_decay):\n with tf.variable_scope(scope):\n l0_xyz = tf.slice(X, [0,0,0], [-1,-1,3])\n l0_points = tf.slice(X, [0,0,3], [-1,-1,0])\n\n # Set Abstraction layers\n l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points,\n npoint=512, radius=0.2, nsample=64, mlp=[64,64,128],\n mlp2=None, group_all=False, is_training=is_training,\n bn_decay=bn_decay, scope='layer1')\n\n l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points,\n npoint=128, radius=0.4, nsample=64, mlp=[128,128,256],\n mlp2=None, group_all=False, is_training=is_training,\n bn_decay=bn_decay, scope='layer2')\n\n l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points,\n npoint=None, radius=None, nsample=None, mlp=[256,512,1024],\n mlp2=None, group_all=True, is_training=is_training,\n bn_decay=bn_decay, scope='layer3')\n\n # Feature Propagation layers\n l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,\n [256,256], is_training, bn_decay, scope='fa_layer1')\n\n l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,\n [256,128], is_training, bn_decay, scope='fa_layer2')\n\n l0_points = pointnet_fp_module(l0_xyz, l1_xyz,\n tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128],\n is_training, bn_decay, scope='fa_layer3')\n\n # FC layers\n net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,\n scope='dp1')\n\n results = []\n for idx, out_dim in enumerate(out_dims):\n current_result = tf_util.conv1d(net, out_dim, 1, padding='VALID', activation_fn=None, scope='fc2_{}'.format(idx))\n results.append(current_result)\n\n return results\n\ndef build_pointnet2_cls(scope, point_cloud, out_dims, is_training, bn_decay):\n with tf.variable_scope(scope):\n batch_size = tf.shape(point_cloud)[0]\n l0_xyz = point_cloud\n l0_points = None\n\n # Set abstraction layers\n # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).\n # So we only use NCHW for layer 1 until this issue can be resolved.\n l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)\n l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')\n l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')\n\n # Fully connected layers\n net = tf.reshape(l3_points, [batch_size, 1024])\n net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')\n net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')\n\n results = []\n for idx, out_dim in enumerate(out_dims):\n current_result = tf_util.fully_connected(net, out_dim, activation_fn=None, scope='fc3_{}'.format(idx))\n results.append(current_result)\n\n return results\n" ]
[ [ "tensorflow.concat", "tensorflow.shape", "tensorflow.slice", "tensorflow.reshape", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
nishanth-/goldenowl
[ "7e45d1e5274366e90332bb6fd06aa87f818217e3" ]
[ "goldenowl/portfolio/simplePut.py" ]
[ "import itertools\nimport pandas as pd\nimport datetime as dt\nfrom xirr.math import xirr\nimport goldenowl.asset.asset\nfrom goldenowl.portfolio.holding import Holding\n\nclass SimplePut(Holding):\n def __init__(self, aName, aAsset, aStrike, aExpiry, aOTMCostFactor):\n Holding.__init__(self, aName, aAsset);\n self.m_otm_cost_factor = aOTMCostFactor;\n self.m_strike = aStrike;\n self.m_expiry = pd.to_datetime(aExpiry);\n\n\n def _getAssetValue(self, aDate):\n norm_date = pd.to_datetime(aDate);\n underlying_val = -1;\n if (norm_date < self.m_expiry):\n underlying_val = self.m_inst_pr_map.getValue(aDate);\n prem_val = self.m_otm_cost_factor*underlying_val;\n if (underlying_val < self.m_strike):\n return prem_val + self.m_strike - underlying_val;\n else:\n return prem_val;\n else:\n underlying_val = self.m_inst_pr_map.getValue(self.m_expiry);\n if (underlying_val < self.m_strike):\n return self.m_strike - underlying_val;\n else:\n return 0;\n\n\n" ]
[ [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
JieZheng-ShanghaiTech/KG4SL
[ "dc52424aaf36de28124013f080dfa59083c314a8" ]
[ "src/train.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom model import KG4SL\nimport pandas as pd\nfrom sklearn.model_selection import ShuffleSplit\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\ndef reindexid2geneName(test_data):\n entity2id = pd.read_csv('../data/entity2id.txt', sep='\\t')\n entity_dict = {}\n for index, row in entity2id.iterrows():\n entity_dict[row['b']] = row['a']\n\n test_data.columns = ['gene_a', 'gene_b', 'label']\n\n db_ida = []\n db_idb = []\n for index, row in test_data.iterrows():\n db_ida.append(entity_dict[row['gene_a']])\n db_idb.append(entity_dict[row['gene_b']])\n test_data['db_ida'] = db_ida\n test_data['db_idb'] = db_idb\n\n dbid2name = pd.read_csv('../data/dbid2name.csv', sep=',', header=0)\n id2name_dict = {}\n for index, row in dbid2name.iterrows():\n id2name_dict[row['_id']] = row['name']\n\n name_a = []\n name_b = []\n for index, row in test_data.iterrows():\n name_a.append(id2name_dict[row['db_ida']])\n name_b.append(id2name_dict[row['db_idb']])\n test_data['name_a'] = name_a\n test_data['name_b'] = name_b\n\n col_order = ['gene_a', 'gene_b', 'db_ida', 'db_idb', 'name_a', 'name_b', 'label']\n test_data = test_data[col_order]\n\n return test_data\n\ndef train(args, data, string):\n\n # data: # n_nodea(0), n_nodeb(1), n_entity(2), n_relation(3), adj_entity(4), adj_relation(5), train_data(6), test_data(7)\n n_nodea, n_nodeb, n_entity, n_relation = data[0], data[1], data[2], data[3]\n adj_entity, adj_relation = data[4], data[5]\n\n test_data = data[7]\n \n test_data_mapping = reindexid2geneName(pd.DataFrame(test_data)) # Add gene names\n \n pd.DataFrame(test_data).to_csv('../results/test_data_' + string + '.csv',header=False, index=False)\n test_data_mapping.to_csv('../results/test_data_mapping_' + string + '.csv')\n\n kf = ShuffleSplit(n_splits=9,test_size=0.2,random_state=43)\n cross_validation = 1\n train_auc_kf_list = []\n train_f1_kf_list = []\n train_aupr_kf_list = []\n\n eval_auc_kf_list = []\n eval_f1_kf_list = []\n eval_aupr_kf_list = []\n\n test_auc_kf_list = []\n test_f1_kf_list = []\n test_aupr_kf_list = []\n\n loss_kf_list = []\n loss_curve = pd.DataFrame(columns=['epoch', 'loss', 'train_auc', 'train_f1', 'train_aupr', 'eval_auc', 'eval_f1', 'eval_aupr', 'test_auc','test_f1', 'test_aupr'])\n kk=1\n\n for train_kf, eval_kf in kf.split(data[6]):\n if (cross_validation == 6):\n print(str(cross_validation-1)+' cross validation stop!')\n break\n else:\n tf.reset_default_graph()\n\n train_data = data[6][train_kf]\n eval_data = data[6][eval_kf]\n pd.DataFrame(train_data).to_csv('../results/train_data_' + string + '_' + str(kk) + '.csv',header=False, index=False)\n pd.DataFrame(eval_data).to_csv('../results/eval_data_'+ string + '_' + str(kk) + '.csv',header=False, index=False)\n\n model = KG4SL(args,n_entity, n_relation, adj_entity, adj_relation)\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n best_loss_flag = 1000000\n early_stopping_flag = 2\n best_eval_auc_flag = 0\n for step in range(args.n_epochs):\n # training\n loss_list = []\n start = 0\n # skip the last incomplete minibatch if its size < batch size\n while start + args.batch_size <= train_data.shape[0]:\n _, loss = model.train(sess, get_feed_dict(model, train_data, start, start + args.batch_size))\n start += args.batch_size\n loss_list.append(loss)\n loss_mean = np.mean(loss_list)\n\n train_nodea_emb_list, train_nodeb_emb_list, train_score, train_score_binary, train_auc, train_f1, train_aupr = ctr_eval(sess, args, model, train_data,args.batch_size)\n eval_nodea_emb_list, eval_nodeb_emb_list, eval_score, eval_score_binary, eval_auc, eval_f1, eval_aupr = ctr_eval(sess, args, model, eval_data, args.batch_size)\n test_nodea_emb_list, test_nodeb_emb_list, test_score, test_score_binary, test_auc, test_f1, test_aupr = ctr_eval(sess, args, model, test_data,args.batch_size)\n\n # print('epoch %d train auc: %.4f f1: %.4f train_aupr: %.4f eval auc: %.4f f1: %.4f eval aupr: %.4f test auc: %.4f f1: %.4f test_aupr: %.4f loss: %.4f'\n # % (step, train_auc, train_f1, train_aupr, eval_auc, eval_f1,eval_aupr, test_auc, test_f1, test_aupr, loss_mean))\n\n print(\"-\"*50)\n print('Epoch %d' % step + ':')\n print('The AUC, AUPR and F1 values on the training data are: %.4f, %.4f, %.4f' %(train_auc, train_aupr, train_f1))\n print('The AUC, AUPR and F1 values on the validation data are: %.4f, %.4f, %.4f' % (eval_auc, eval_aupr, eval_f1))\n print('The AUC, AUPR and F1 values on the testing data are: %.4f, %.4f, %.4f' % (test_auc, test_aupr, test_f1))\n print('The training loss is: %.4f' % loss_mean)\n\n loss_curve.loc[step] = [step, loss_mean, train_auc, train_f1, train_aupr,eval_auc, eval_f1, eval_aupr, test_auc, test_f1,test_aupr]\n\n # save the models with the highest eval_acu\n if (eval_auc > best_eval_auc_flag):\n best_eval_auc_flag = eval_auc\n best_k = int(string[-1])\n best_kk = kk\n best_iteration = step\n\n best_train_auc = train_auc\n best_train_f1 = test_f1\n best_train_aupr = train_aupr\n\n best_eval_auc = eval_auc\n best_eval_f1 = eval_f1\n best_eval_aupr = eval_aupr\n\n best_test_auc = test_auc\n best_test_f1 = test_f1\n best_test_aupr = test_aupr\n best_loss = loss_mean\n\n best_test_score = test_score\n best_test_score_binary = test_score_binary\n\n best_train_nodea_emb_list = train_nodea_emb_list\n best_train_nodeb_emb_list = train_nodeb_emb_list\n best_eval_nodea_emb_list = eval_nodea_emb_list\n best_eval_nodeb_emb_list = eval_nodeb_emb_list\n best_test_nodea_emb_list = test_nodea_emb_list\n best_test_nodeb_emb_list = test_nodeb_emb_list\n\n # saver.save(sess,'../best_models/best_model_' + str(best_k) + '_' + str(best_kk) + '.ckpt', global_step=best_iteration)\n\n\n\n\n # early_stopping\n if(args.earlystop_flag):\n if (loss_mean < best_loss_flag):\n stopping_step = 0\n best_loss_flag = loss_mean\n else:\n stopping_step += 1\n if (stopping_step >= early_stopping_flag):\n print('Early stopping is trigger at step: %.4f loss: %.4f test_auc: %.4f test_f1: %.4f test_aupr: %.4f' % (step, loss_mean, test_auc, test_f1, test_aupr))\n break\n\n # draw training curve\n loss_curve.to_csv('../results/loss_curve_' + string +'_' + str(kk) + '.csv', index=0)\n\n np.savetxt('../results/' + string + '_' + str(best_kk) + '_' + str(best_iteration) + '_train_nodea_emb.csv', (best_train_nodea_emb_list.eval())[args.batch_size:], delimiter='\\t')\n np.savetxt('../results/' + string + '_' + str(best_kk) + '_' + str(best_iteration) + '_train_nodeb_emb.csv', (best_train_nodeb_emb_list.eval())[args.batch_size:], delimiter='\\t')\n np.savetxt('../results/' + string + '_' + str(best_kk) + '_' + str(best_iteration) + '_eval_nodea_emb.csv', (best_eval_nodea_emb_list.eval())[args.batch_size:], delimiter='\\t')\n np.savetxt('../results/' + string + '_' + str(best_kk) + '_' + str(best_iteration) + '_eval_nodeb_emb.csv', (best_eval_nodeb_emb_list.eval())[args.batch_size:], delimiter='\\t')\n np.savetxt('../results/' + string + '_' + str(best_kk) + '_' + str(best_iteration) + '_test_nodea_emb.csv', (best_test_nodea_emb_list.eval())[args.batch_size:], delimiter='\\t')\n np.savetxt('../results/' + string + '_' + str(best_kk) + '_' + str(best_iteration) + '_test_nodeb_emb.csv', (best_test_nodeb_emb_list.eval())[args.batch_size:], delimiter='\\t')\n\n pd.DataFrame((np.array(best_test_score)).reshape(-1,1)).to_csv('../results/'+string+'_'+str(best_kk)+'_'+str(best_iteration)+'_scores.csv',header=False, index=False)\n pd.DataFrame((np.array(best_test_score_binary)).reshape(-1,1)).to_csv('../results/'+string+'_'+str(best_kk)+'_'+str(best_iteration)+'_scores_binary.csv',header=False, index=False)\n\n font = {\n 'family': 'SimHei',\n 'weight': 'normal',\n 'size': 20,\n 'color': 'black'\n }\n\n pl = plt.figure(figsize=(10, 10))\n # plt.suptitle('training curve', fontsize=25)\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace = 0.3, hspace = 0.3)\n plt.rcParams['savefig.dpi'] = 300\n plt.rcParams['figure.dpi'] = 300\n\n # add the first subplot\n pl.add_subplot(2, 2, 1)\n plt.plot(loss_curve['epoch'], loss_curve['loss'], linestyle='solid', color='#FF8C00', alpha=0.8, linewidth=3,label='loss')\n\n plt.legend(loc=\"upper right\")\n plt.rcParams['legend.title_fontsize'] = 14\n plt.rcParams['legend.fontsize'] = 14\n plt.xlabel('Number Of Epochs', font)\n plt.ylabel('Training Loss', font)\n # plt.ylim(0.3, 0.9)\n plt.tick_params(labelsize=20)\n # plt.xticks([0, 1, 2, 3, 4, 5])\n # plt.xticks([0,5,10,15,20,25,30])\n # plt.title('loss_mean: ' + str(round(best_loss, 4)), font)\n\n # add the second subplot\n pl.add_subplot(2, 2, 2)\n plt.plot(loss_curve['epoch'], loss_curve['train_auc'], linestyle='dotted', color='#4169E1', alpha=0.8, linewidth=3,label='Train')\n plt.plot(loss_curve['epoch'], loss_curve['eval_auc'], linestyle='dashed', color='#FF4500', alpha=0.8, linewidth=3,label='Eval')\n plt.plot(loss_curve['epoch'], loss_curve['test_auc'], linestyle='dashdot', color='#228B22', alpha=0.8, linewidth=3,label='Test')\n\n plt.legend(loc=\"lower right\")\n plt.rcParams['legend.title_fontsize'] = 14\n plt.rcParams['legend.fontsize'] = 14\n plt.xlabel('Number Of Epochs', font)\n plt.ylabel('AUC', font)\n # plt.ylim(0.84, 1.0)\n plt.tick_params(labelsize=20)\n # plt.xticks([0, 1, 2, 3, 4, 5])\n plt.xticks([0,5,10,15,20,25,30])\n # plt.title('test_auc: ' + str(round(best_test_auc, 4)), font)\n\n # add the third subplot\n pl.add_subplot(2, 2, 3)\n plt.plot(loss_curve['epoch'], loss_curve['train_f1'], linestyle='dotted', color='#4169E1', alpha=0.8, linewidth=3,label='Train',markerfacecolor='none')\n plt.plot(loss_curve['epoch'], loss_curve['eval_f1'], linestyle='dashed', color='#FF4500', alpha=0.8, linewidth=3,label='Eval',markerfacecolor='none')\n plt.plot(loss_curve['epoch'], loss_curve['test_f1'], linestyle='dashdot', color='#228B22', alpha=0.8, linewidth=3,label='Test',markerfacecolor='none')\n\n plt.legend(loc=\"lower right\")\n plt.rcParams['legend.title_fontsize'] = 14\n plt.rcParams['legend.fontsize'] = 14\n plt.xlabel('Number Of Epochs', font)\n plt.ylabel('F1', font)\n # plt.ylim(0.84, 1.0)\n plt.tick_params(labelsize=20)\n # plt.xticks([0, 1, 2, 3, 4, 5])\n plt.xticks([0,5,10,15,20,25,30])\n # plt.title('test_f1: ' + str(round(best_test_f1, 4)), font)\n\n # add the fourth subplot\n pl.add_subplot(2, 2, 4)\n plt.plot(loss_curve['epoch'], loss_curve['train_aupr'], linestyle='dotted', color='#4169E1', alpha=0.8, linewidth=3,label='Train',markerfacecolor='none')\n plt.plot(loss_curve['epoch'], loss_curve['eval_aupr'], linestyle='dashed', color='#FF4500', alpha=0.8, linewidth=3,label='Eval',markerfacecolor='none')\n plt.plot(loss_curve['epoch'], loss_curve['test_aupr'], linestyle='dashdot', color='#228B22', alpha=0.8, linewidth=3,label='Test',markerfacecolor='none')\n\n plt.legend(loc=\"lower right\")\n plt.rcParams['legend.title_fontsize'] = 14\n plt.rcParams['legend.fontsize'] = 14\n plt.xlabel('Number Of Epochs', font)\n plt.ylabel('AUPR', font)\n # plt.ylim(0.84, 1.0)\n plt.tick_params(labelsize=20)\n # plt.xticks([0, 1, 2, 3, 4, 5])\n plt.xticks([0,5,10,15,20,25,30])\n # plt.title('test_aupr: ' + str(round(best_test_aupr, 4)), font)\n\n # save curve\n pl.tight_layout()\n pl.savefig('../results/training_curve_' + string + '_' + str(kk)+'.png', bbox_inches='tight')\n pl.clf()\n\n tf.get_default_graph().finalize()\n\n kk=kk+1\n\n train_auc_kf_list.append(best_train_auc)\n train_f1_kf_list.append(best_train_f1)\n train_aupr_kf_list.append(best_train_aupr)\n\n eval_auc_kf_list.append(best_eval_auc)\n eval_f1_kf_list.append(best_eval_f1)\n eval_aupr_kf_list.append(best_eval_aupr)\n\n test_auc_kf_list.append(best_test_auc)\n test_f1_kf_list.append(best_test_f1)\n test_aupr_kf_list.append(best_test_aupr)\n\n loss_kf_list.append(best_loss)\n\n cross_validation = cross_validation + 1\n\n\n train_auc_kf_mean = np.mean(train_auc_kf_list)\n train_f1_kf_mean = np.mean(train_f1_kf_list)\n train_aupr_kf_mean = np.mean(train_aupr_kf_list)\n\n eval_auc_kf_mean = np.mean(eval_auc_kf_list)\n eval_f1_kf_mean = np.mean(eval_f1_kf_list)\n eval_aupr_kf_mean = np.mean(eval_aupr_kf_list)\n\n test_auc_kf_mean = np.mean(test_auc_kf_list)\n test_f1_kf_mean = np.mean(test_f1_kf_list)\n test_aupr_kf_mean = np.mean(test_aupr_kf_list)\n\n loss_kf_mean = np.mean(loss_kf_list)\n\n # print('train auc_std: %.4f train f1_std: %.4f train_aupr_std: %.4f eval auc_std: %.4f eval f1_std: %.4f eval_aupr_std: %.4f test auc_std: %.4f test f1_std: %.4f test_aupr_std: %.4f loss_std: %.4f'\n # % (np.std(train_auc_kf_list), np.std(train_f1_kf_list), np.std(train_aupr_kf_list), np.std(eval_auc_kf_list), np.std(eval_f1_kf_list),\n # np.std(eval_aupr_kf_list), np.std(test_auc_kf_list), np.std(test_f1_kf_list), np.std(test_aupr_kf_list), np.std(loss_kf_list)))\n\n print(\"-\" * 50)\n print('final results')\n print('The std of AUC, AUPR and F1 values on the training data are: %.4f, %.4f, %.4f' % (np.std(train_auc_kf_list), np.std(train_aupr_kf_list), np.std(train_f1_kf_list)))\n print('The std of AUC, AUPR and F1 values on the validation data are: %.4f, %.4f, %.4f' % (np.std(eval_auc_kf_list), np.std(eval_aupr_kf_list), np.std(eval_f1_kf_list)))\n print('The std of AUC, AUPR and F1 values on the testing data are: %.4f, %.4f, %.4f' % (np.std(test_auc_kf_list), np.std(test_aupr_kf_list), np.std(test_f1_kf_list)))\n print('The std of training loss is: %.4f' % np.std(loss_kf_list))\n\n return loss_kf_mean, train_auc_kf_mean, train_f1_kf_mean, train_aupr_kf_mean, eval_auc_kf_mean, eval_f1_kf_mean, eval_aupr_kf_mean, test_auc_kf_mean, test_f1_kf_mean, test_aupr_kf_mean\n\ndef get_feed_dict(model, data, start, end):\n feed_dict = {model.nodea_indices: data[start:end, 0],\n model.nodeb_indices: data[start:end, 1],\n model.labels: data[start:end, 2]}\n return feed_dict\n\n\ndef ctr_eval(sess, args, model, data, batch_size):\n start = 0\n auc_list = []\n f1_list = []\n aupr_list = []\n scores_list = []\n scores_binary_list = []\n nodea_emb_list = tf.zeros([args.batch_size, args.dim])\n nodeb_emb_list = tf.zeros([args.batch_size, args.dim])\n\n while start + batch_size <= data.shape[0]:\n nodea_emb, nodeb_emb, scores_output, scores_binary_output, auc, f1, aupr = model.eval(sess, get_feed_dict(model, data, start, start + batch_size))\n\n nodea_emb_list = tf.concat([nodea_emb_list, nodea_emb], 0)\n nodeb_emb_list = tf.concat([nodeb_emb_list, nodeb_emb], 0)\n\n scores_list.append(scores_output)\n scores_binary_list.append(scores_binary_output)\n auc_list.append(auc)\n f1_list.append(f1)\n aupr_list.append(aupr)\n start += batch_size\n return nodea_emb_list, nodeb_emb_list, scores_list, scores_binary_list, float(np.mean(auc_list)), float(np.mean(f1_list)), float(np.mean(aupr_list))\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.concat", "tensorflow.zeros", "pandas.DataFrame", "matplotlib.pyplot.plot", "numpy.mean", "tensorflow.get_default_graph", "pandas.read_csv", "sklearn.model_selection.ShuffleSplit", "numpy.std", "tensorflow.reset_default_graph", "tensorflow.Session", "matplotlib.pyplot.subplots_adjust", "tensorflow.train.Saver", "matplotlib.pyplot.figure", "tensorflow.global_variables_initializer", "numpy.array", "matplotlib.pyplot.ylabel", "matplotlib.use", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.tick_params" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
gregbugaj/form-processor
[ "0c803de43a98b4a02efa956803e64793995256ff" ]
[ "hsv_selector.py" ]
[ "import cv2\nimport numpy as np\n\ndef nothing(x):\n pass\n\n# Load image\nimg_path ='./assets/forms-seg/001_fake.png'\nimg_path ='/tmp/form-segmentation/PID_10_5_0_2787.original.redacted.tif/work/resized_mask.png'\nimg_path ='/tmp/form-segmentation/269692_202006290005214_001.tif/work/resized_mask.png'\nimg_path ='/tmp/form-segmentation/mask.png'\nimg_path ='/tmp/form-segmentation/272946_0031516169334_001.tif/debug/filtered_1627313939110508043.png'\nimg_path ='/home/greg/tmp/hicfa_mask/PID_10_5_0_3101.original_fake_processed.png'\nimage = cv2.imread(img_path)\n\n# Create a window\ncv2.namedWindow('image', cv2.WINDOW_FREERATIO)\n\n# Create trackbars for color change\n# Hue is from 0-179 for Opencv\ncv2.createTrackbar('HMin', 'image', 0, 179, nothing)\ncv2.createTrackbar('SMin', 'image', 0, 255, nothing)\ncv2.createTrackbar('VMin', 'image', 0, 255, nothing)\ncv2.createTrackbar('HMax', 'image', 0, 179, nothing)\ncv2.createTrackbar('SMax', 'image', 0, 255, nothing)\ncv2.createTrackbar('VMax', 'image', 0, 255, nothing)\n\n# Set default value for Max HSV trackbars\ncv2.setTrackbarPos('HMax', 'image', 179)\ncv2.setTrackbarPos('SMax', 'image', 255)\ncv2.setTrackbarPos('VMax', 'image', 255)\n\n# Initialize HSV min/max values\nhMin = sMin = vMin = hMax = sMax = vMax = 0\nphMin = psMin = pvMin = phMax = psMax = pvMax = 0\n\nwhile(1):\n # Get current positions of all trackbars\n hMin = cv2.getTrackbarPos('HMin', 'image')\n sMin = cv2.getTrackbarPos('SMin', 'image')\n vMin = cv2.getTrackbarPos('VMin', 'image')\n hMax = cv2.getTrackbarPos('HMax', 'image')\n sMax = cv2.getTrackbarPos('SMax', 'image')\n vMax = cv2.getTrackbarPos('VMax', 'image')\n\n # Set minimum and maximum HSV values to display\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n\n # Convert to HSV format and color threshold\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, lower, upper)\n result = cv2.bitwise_and(image, image, mask=mask)\n\n # Print if there is a change in HSV value\n if((phMin != hMin) | (psMin != sMin) | (pvMin != vMin) | (phMax != hMax) | (psMax != sMax) | (pvMax != vMax) ):\n print(\"(hMin = %d , sMin = %d, vMin = %d), (hMax = %d , sMax = %d, vMax = %d)\" % (hMin , sMin , vMin, hMax, sMax , vMax))\n phMin = hMin\n psMin = sMin\n pvMin = vMin\n phMax = hMax\n psMax = sMax\n pvMax = vMax\n\n # Display result image\n cv2.imshow('image', result)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\ncv2.destroyAllWindows()" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
covid-19-impact-lab/sid-germany
[ "aef4bbfb326adaf9190c6d8880e15b3d6f150d28", "aef4bbfb326adaf9190c6d8880e15b3d6f150d28" ]
[ "src/create_initial_states/task_build_full_params.py", "src/plotting/task_create_scenario_comparison_tables.py" ]
[ "from pathlib import Path\n\nimport pandas as pd\nimport pytask\nimport sid\n\nfrom src.config import BLD\nfrom src.config import SRC\nfrom src.contact_models.get_contact_models import get_all_contact_models\n\nEPI_PARAMS_PATH = Path(sid.__file__).parent.joinpath(\"covid_epi_params.csv\").resolve()\n\n_DEPENDENCIES = {\n \"dist_other_non_recurrent\": BLD\n / \"contact_models\"\n / \"empirical_distributions\"\n / \"other_non_recurrent.pkl\",\n \"dist_work_non_recurrent\": BLD\n / \"contact_models\"\n / \"empirical_distributions\"\n / \"work_non_recurrent.pkl\",\n \"assort_other_non_recurrent\": BLD\n / \"contact_models\"\n / \"age_assort_params\"\n / \"other_non_recurrent.pkl\",\n \"assort_work_non_recurrent\": BLD\n / \"contact_models\"\n / \"age_assort_params\"\n / \"work_non_recurrent.pkl\",\n \"vacations\": BLD / \"data\" / \"vacations.pkl\",\n \"infection_probs\": SRC / \"simulation\" / \"infection_probs.pkl\",\n \"susceptibility\": SRC / \"original_data\" / \"susceptibility.csv\",\n \"contact_models.py\": SRC / \"contact_models\" / \"get_contact_models.py\",\n \"covid_epi_params\": EPI_PARAMS_PATH,\n}\n\n\[email protected]_on(_DEPENDENCIES)\[email protected](BLD / \"params.pkl\")\ndef task_create_full_params(depends_on, produces):\n epi_params = sid.load_epidemiological_parameters()\n vacations = pd.read_pickle(depends_on[\"vacations\"])\n infection_probs = pd.read_pickle(depends_on[\"infection_probs\"])\n susceptibility = pd.read_csv(\n depends_on[\"susceptibility\"], index_col=[\"category\", \"subcategory\", \"name\"]\n )\n\n distributions = {\n name[5:]: path for name, path in depends_on.items() if name.startswith(\"dist_\")\n }\n dist_params = []\n for category, path in distributions.items():\n dist = pd.read_pickle(path)\n dist = _make_mergable_with_params(dist, category)\n dist_params.append(dist)\n dist_params = pd.concat(dist_params, axis=0)\n\n age_assort_params = {}\n for name, path in depends_on.items():\n if name.startswith(\"assort\"):\n age_assort_params[name[7:]] = pd.read_pickle(path)\n\n contact_models = get_all_contact_models()\n\n assort_params = _build_assort_params(contact_models, age_assort_params)\n\n reaction_params = _build_reaction_params(contact_models)\n share_known_cases_params = _build_share_known_cases_params()\n param_slices = [\n infection_probs,\n reaction_params,\n dist_params,\n assort_params,\n epi_params,\n vacations,\n share_known_cases_params,\n susceptibility,\n ]\n params = pd.concat(param_slices, axis=0)\n\n # number of available tests is implemented in the test demand model.\n # therefore, we set the \"sid\" limit, which is time invariant to one test\n # per individual\n params.loc[(\"testing\", \"allocation\", \"rel_available_tests\"), \"value\"] = 100_000\n params.loc[(\"testing\", \"processing\", \"rel_available_capacity\"), \"value\"] = 100_000\n\n params = _add_virus_strain_params(params)\n params = _add_vacation_model_distribution_params(params)\n\n # Share of individuals refusing to be vaccinated.\n # 80% of Germans are somewhat or definitely willing to be vaccinated.\n # 12% are undecided. 8% are opposed to being vaccinated.\n # We assume that 15% will refuse to be vaccinated.\n # source: https://bit.ly/3c9mTgX (publication date: 2021-03-02)\n params.loc[(\"vaccinations\", \"share_refuser\", \"share_refuser\"), \"value\"] = 0.15\n\n # source: https://bit.ly/3gHlcKd (section 3.5, 2021-03-09, accessed 2021-04-28)\n # 82% say they would get a PCR test after a positive rapid test\n loc = (\"test_demand\", \"shares\", \"share_w_positive_rapid_test_requesting_test\")\n params.loc[loc, \"value\"] = 0.82\n\n params = _add_work_rapid_test_params(params)\n params = _add_educ_rapid_test_fade_in_params(params)\n params = _add_private_rapid_test_demand_fade_in_params(params)\n params = _add_rapid_test_reaction_params(params)\n params = _add_event_params(params)\n\n # seasonality parameter\n params.loc[(\"seasonality_effect\", \"seasonality_effect\", \"weak\"), \"value\"] = 0.21\n params.loc[(\"seasonality_effect\", \"seasonality_effect\", \"strong\"), \"value\"] = 0.42\n\n params = _convert_index_to_int_where_possible(params)\n assert params[\"value\"].notnull().all(), \"Params contains NaNs.\"\n params.to_pickle(produces)\n\n\ndef _make_mergable_with_params(dist, category):\n \"\"\"Change the index and Series name to easily merge it to params.\n\n Args:\n dist (pandas.Series): distribution of number of contacts. The\n index is the support, the values the probabilities.\n category (str): name of the contact model to which the distribution\n belongs. This is set as the category index level of the\n returned Series.\n Returns:\n pandas.Series: Series with triple index category, subcategory, name.\n the name index level is the support. the value column contains\n the probabilities.\n\n \"\"\"\n dist.name = \"value\"\n dist = dist.to_frame()\n dist[\"category\"] = category\n dist[\"subcategory\"] = \"n_contacts\"\n dist[\"name\"] = dist.index\n dist = dist.set_index([\"category\", \"subcategory\", \"name\"], drop=True)\n return dist\n\n\ndef _build_assort_params(contact_models, age_assort_params):\n df = pd.DataFrame(columns=[\"category\", \"subcategory\", \"name\", \"value\"])\n sr = df.set_index([\"category\", \"subcategory\", \"name\"])[\"value\"]\n for name, model in contact_models.items():\n if not model[\"is_recurrent\"]:\n for var in model[\"assort_by\"]:\n if var == \"county\":\n sr[(\"assortative_matching\", name, var)] = 0.8\n else:\n sr = pd.concat([sr, age_assort_params[name]], axis=0)\n return sr.to_frame()\n\n\ndef _build_reaction_params(contact_models):\n df = pd.DataFrame(columns=[\"category\", \"subcategory\", \"name\", \"value\"])\n df = df.set_index([\"category\", \"subcategory\", \"name\"])\n multipliers = [\n # source: The COSMO Study of 2021-03-09: 85% of individuals would isolate\n # after a positive rapid test.\n (\"symptomatic_multiplier\", 0.15, 0.7),\n (\"positive_test_multiplier\", 0.05, 0.5),\n ]\n for name, multiplier, hh_multiplier in multipliers:\n for cm in contact_models:\n if \"household\" in cm:\n df.loc[(cm, name, name)] = hh_multiplier\n else:\n df.loc[(cm, name, name)] = multiplier\n return df\n\n\ndef _add_virus_strain_params(params):\n \"\"\"Add parameters governing the infectiousness of the virus strains.\n\n source: https://doi.org/10.1126/science.abg3055\n\n \"We estimate that this variant has a 43–90% (range of 95% credible\n intervals 38–130%) higher reproduction number than preexisting variants\"\n\n We take the midpoint of 67%.\n\n \"\"\"\n params = params.copy(deep=True)\n params.loc[(\"virus_strain\", \"base_strain\", \"factor\"), \"value\"] = 1.0\n params.loc[(\"virus_strain\", \"b117\", \"factor\"), \"value\"] = 1.67\n\n # for Delta:\n # - CDC\n # (https://www.cdc.gov/coronavirus/2019-ncov/variants/delta-variant.html):\n # \"more than 2x as contagious as previous variants.\" (Sep 2)\n # - meta-analysis (10.1093/jtm/taab124):\n # basic replication number estimated from 3.2 to 8.\n # - BBC (https://www.bbc.com/news/health-57431420):\n # basic replication number from 5-8 vs. 4-5 for Alpha.\n params.loc[(\"virus_strain\", \"delta\", \"factor\"), \"value\"] = 3.33 # 2.0 * 1.67\n return params\n\n\ndef _add_vacation_model_distribution_params(params):\n params = params.copy(deep=True)\n loc = (\"additional_other_vacation_contact\", \"probability\")\n # 2020\n params.loc[(*loc, \"Winterferien\"), \"value\"] = 0.5\n params.loc[(*loc, \"Osterferien\"), \"value\"] = 0.5\n params.loc[(*loc, \"Pfingstferien\"), \"value\"] = 0.5\n params.loc[(*loc, \"Sommerferien\"), \"value\"] = 0.5\n params.loc[(*loc, \"Herbstferien\"), \"value\"] = 0.5\n params.loc[(*loc, \"Weihnachtsferien\"), \"value\"] = 0.5\n # 2021\n params.loc[(*loc, \"Winterferien2021\"), \"value\"] = 0.5\n params.loc[(*loc, \"Osterferien2021\"), \"value\"] = 0.5\n params.loc[(*loc, \"Pfingstferien2021\"), \"value\"] = 0.5\n params.loc[(*loc, \"Sommerferien2021\"), \"value\"] = 0.5\n return params\n\n\ndef _add_work_rapid_test_params(params):\n \"\"\"Add parameters governing the rapid test demand at work.\n\n Only 60% of workers receiving a test offer accept it regularly\n (https://bit.ly/3t1z0lf (COSMO, 2021-04-21))\n\n We assume rapid tests in firms on Jan 01 2021.\n\n 2021-03-17-19: 20% of employers offer weekly test (https://bit.ly/3eu0meK)\n second half of March: 23% of workers report test offer (https://bit.ly/3gANaan)\n\n 2021-04-05: 60% of workers get weekly test (https://bit.ly/2RWCDMz)\n\n 2021-04-15: 70% of workers expected to get weekly tests (https://bit.ly/32BqKhd)\n COSMO (https://bit.ly/3t1z0lf, 2021-04-20) report <2/3 of people having\n work contacts receiving a test offer. We summarize this as 2/3 getting a test.\n\n 2021-04-19: employers are required by law to offer two weekly tests\n (https://bit.ly/3tJNUh1, https://bit.ly/2QfNctJ)\n There is no data available on compliance or take-up yet.\n\n \"\"\"\n params = params.copy(deep=True)\n\n accept_loc = (\"rapid_test_demand\", \"share_accepting_work_offer\")\n # constant by default\n params.loc[(*accept_loc, \"2020-01-01\"), \"value\"] = 0.6\n params.loc[(*accept_loc, \"2021-01-01\"), \"value\"] = 0.6\n params.loc[(*accept_loc, \"2021-04-06\"), \"value\"] = 0.6\n params.loc[(*accept_loc, \"2025-12-31\"), \"value\"] = 0.6\n\n offer_loc = (\"rapid_test_demand\", \"share_workers_receiving_offer\")\n params.loc[(*offer_loc, \"2020-01-01\"), \"value\"] = 0.0\n params.loc[(*offer_loc, \"2021-01-01\"), \"value\"] = 0.0\n params.loc[(*offer_loc, \"2021-03-17\"), \"value\"] = 0.23\n params.loc[(*offer_loc, \"2021-04-05\"), \"value\"] = 0.6\n params.loc[(*offer_loc, \"2021-04-15\"), \"value\"] = 0.66\n params.loc[(*offer_loc, \"2021-06-15\"), \"value\"] = 0.80\n params.loc[(*offer_loc, \"2025-12-31\"), \"value\"] = 0.80\n return params\n\n\ndef _add_educ_rapid_test_fade_in_params(params):\n \"\"\"Add the shares how many people with educ contacts get a rapid test.\n\n Sources:\n - 17-24 of March 2021 (Mon, 2021-03-22):\n - NRW had 80% tests for students before Easter (https://bit.ly/3u7z8Rx)\n - BY: test offers to educ_workers (https://bit.ly/3tbVX5u)\n - BW: only tests for educ workers (https://bit.ly/2S7251M)\n\n - federal level:\n \"In Kitas und Schulen sollen die Testmöglichkeiten \"mit der\n steigenden Verfügbarkeit von Schnell- und Selbsttests\"\n ausgebaut werden\" (https://bit.ly/3nuCSKi)\n - Some KiTa workers are being tested (https://bit.ly/3nyGyus)\n - Self tests for students in Berlin (https://bit.ly/2ScGu8m)\n - Schleswig-Holstein: test offer (https://bit.ly/3eVfkuv)\n - mandatory tests in Saxony (https://bit.ly/3eEQGhn)\n - no tests yet for students in Hessia, but already ordered\n (https://bit.ly/3gMGJB4)\n - Niedersachsen had one test week before Easter (https://bit.ly/3gOOC96)\n\n => assume 90% of teachers and 40% of students do rapid tests\n\n - After Easter (2021-04-07):\n - NRW: tests are mandatory starting April 12th (https://bit.ly/3xqVbUn)\n - Bavaria: tests are mandatory for all (https://bit.ly/3nz5fXS,\n https://bit.ly/2QHilX3)\n - BW: voluntary tests for students (https://bit.ly/3vuetaD)\n - Brandenburg starts with tests (https://bit.ly/3xAihZB)\n - Schleswig-Holstein: mandatory tests (https://bit.ly/3eVfkuv)\n\n => assume 95% of teachers and 75% of students get tested\n\n - BW: tests mandatory starting 2021-04-19 (https://bit.ly/3vuetaD)\n\n => assume 95% of teachers and 95% of students get tested\n\n \"\"\"\n params = params.copy(deep=True)\n\n loc = (\"rapid_test_demand\", \"educ_worker_shares\")\n params.loc[(*loc, \"2020-01-01\"), \"value\"] = 0.0\n params.loc[(*loc, \"2021-01-01\"), \"value\"] = 0.0\n # this is arbitrary to have a more convex shape\n params.loc[(*loc, \"2021-03-01\"), \"value\"] = 0.3\n params.loc[(*loc, \"2021-03-22\"), \"value\"] = 0.9\n params.loc[(*loc, \"2021-04-07\"), \"value\"] = 0.95\n params.loc[(*loc, \"2021-04-19\"), \"value\"] = 0.95\n params.loc[(*loc, \"2021-06-01\"), \"value\"] = 0.95\n params.loc[(*loc, \"2025-12-31\"), \"value\"] = 0.95\n\n loc = (\"rapid_test_demand\", \"student_shares\")\n params.loc[(*loc, \"2020-01-01\"), \"value\"] = 0.0\n params.loc[(*loc, \"2021-02-01\"), \"value\"] = 0.0\n params.loc[(*loc, \"2021-03-01\"), \"value\"] = 0.1\n params.loc[(*loc, \"2021-03-22\"), \"value\"] = 0.4\n params.loc[(*loc, \"2021-04-07\"), \"value\"] = 0.75\n params.loc[(*loc, \"2021-04-19\"), \"value\"] = 0.95\n params.loc[(*loc, \"2021-06-01\"), \"value\"] = 1.0\n params.loc[(*loc, \"2025-12-31\"), \"value\"] = 1.0\n\n # Assume weekly tests before Easter and twice weekly tests after Easter\n # We should get a fade-in through different ends of Easter vaccation\n params.loc[(\"rapid_test_demand\", \"educ_frequency\", \"before_easter\"), \"value\"] = 7\n params.loc[(\"rapid_test_demand\", \"educ_frequency\", \"after_easter\"), \"value\"] = 3\n\n return params\n\n\ndef _add_private_rapid_test_demand_fade_in_params(params):\n \"\"\"Add the share of people demanding a rapid test after a Covid household event.\n\n Bürgertests started in mid March but demand was very low initially\n (https://bit.ly/3ehmGcj). Anecdotally, the demand continues to be limited.\n\n First tests to self-administer became available starting March 6.\n However, supply was very limited in the beginning (https://bit.ly/3xJCIn8).\n\n According to the COSMO study (https://bit.ly/2QSFAgR, 2021-05-25) 63% would\n have been willing to take a rapid test in the round of 23rd of February 2021 and\n 60% in mid December 2020 when an acquaintance would have tested positive.\n For own symptoms that share was 70%.\n\n We assume that for Easter visits many people demanded tests for the first\n time and are more likely to test themselves after knowing where to get them.\n\n \"\"\"\n params = params.copy(deep=True)\n loc = (\"rapid_test_demand\", \"private_demand\")\n params.loc[(*loc, \"2020-01-01\"), \"value\"] = 0\n params.loc[(*loc, \"2021-02-28\"), \"value\"] = 0\n params.loc[(*loc, \"2021-03-20\"), \"value\"] = 0.1\n params.loc[(*loc, \"2021-03-31\"), \"value\"] = 0.225\n params.loc[(*loc, \"2021-04-06\"), \"value\"] = 0.225\n params.loc[(*loc, \"2021-05-04\"), \"value\"] = 0.63\n params.loc[(*loc, \"2025-12-31\"), \"value\"] = 0.63\n\n return params\n\n\ndef _add_rapid_test_reaction_params(params):\n \"\"\"Add rapid test reaction params.\n\n source: The COSMO Study of 2021-03-09 (https://bit.ly/3gHlcKd)\n In section 3.5 \"Verhalten nach positivem Selbsttest\"\n 85% claim they would isolate (\"isoliere mich und beschränke meine Kontakte\n bis zur Klärung\") => We use this multiplier of 0.15 here.\n\n We assume households are only reduced by 30%, i.e. have a multiplier of 0.7.\n\n \"\"\"\n params = params.copy(deep=True)\n params.loc[\n (\"rapid_test_demand\", \"reaction\", \"hh_contacts_multiplier\"), \"value\"\n ] = 0.7\n params.loc[\n (\"rapid_test_demand\", \"reaction\", \"not_hh_contacts_multiplier\"), \"value\"\n ] = 0.15\n return params\n\n\ndef _convert_index_to_int_where_possible(params):\n params = params.reset_index().copy(deep=True)\n params[\"name\"] = params[\"name\"].apply(_convert_to_int_if_possible)\n params = params.set_index([\"category\", \"subcategory\", \"name\"])\n return params\n\n\ndef _build_share_known_cases_params():\n params_slice = pd.Series(\n {\n # from dunkelzifferradar\n \"2020-01-01\": 0.07,\n \"2020-03-01\": 0.07,\n \"2020-03-17\": 0.2,\n \"2020-06-10\": 0.2,\n \"2020-07-05\": 0.46,\n \"2020-08-20\": 0.60,\n \"2020-09-28\": 0.60,\n \"2020-10-28\": 0.38,\n # \"2020-11-04\": 0.36,\n # \"2020-11-14\": 0.39,\n # \"2020-11-17\": 0.39,\n \"2020-11-28\": 0.31,\n \"2020-12-22\": 0.31,\n \"2020-12-24\": 0.22,\n # free parameters\n \"2020-12-27\": 0.22,\n \"2021-01-02\": 0.28,\n \"2021-01-07\": 0.31,\n \"2021-03-28\": 0.31,\n \"2021-04-01\": 0.22,\n \"2021-04-05\": 0.22,\n \"2021-04-07\": 0.31,\n \"2021-08-15\": 0.31,\n },\n name=\"value\",\n ).to_frame()\n params = pd.concat([params_slice], keys=[\"share_known_cases\"])\n params = pd.concat(\n [params], keys=[\"share_known_cases\"], names=[\"category\", \"subcategory\", \"name\"]\n )\n return params\n\n\ndef _convert_to_int_if_possible(x):\n \"\"\"pd.to_numeric did not correctly work.\"\"\"\n try:\n return int(x)\n except ValueError:\n return x\n\n\ndef _add_event_params(params):\n params = params.copy(deep=True)\n params.loc[(\"events\", \"b117_cases_per_100_000\", \"2021-01-01\"), \"value\"] = 0\n params.loc[(\"events\", \"b117_cases_per_100_000\", \"2021-01-31\"), \"value\"] = 0.986\n\n params.loc[(\"events\", \"delta_cases_per_100_000\", \"2021-04-15\"), \"value\"] = 0\n params.loc[(\"events\", \"delta_cases_per_100_000\", \"2021-06-01\"), \"value\"] = 0.06\n\n return params\n", "import pandas as pd\nimport pytask\n\nfrom src.config import BLD\nfrom src.config import FAST_FLAG\nfrom src.config import N_HOUSEHOLDS\nfrom src.config import POPULATION_GERMANY\nfrom src.config import SRC\nfrom src.plotting.plotting import make_scenario_name_nice\nfrom src.plotting.task_plot_scenario_comparisons import SCHOOL_SCENARIOS\nfrom src.policies.policy_tools import combine_dictionaries\nfrom src.policies.policy_tools import filter_dictionary\nfrom src.simulation.scenario_config import create_path_to_scenario_outcome_time_series\nfrom src.simulation.scenario_config import get_available_scenarios\nfrom src.simulation.scenario_config import get_named_scenarios\n\n\ndef _create_table_path(name):\n return BLD / \"tables\" / f\"{name}_table.tex\"\n\n\ndef _create_deps(scenarios, groupby):\n available_scenarios = get_available_scenarios(get_named_scenarios())\n\n py_dependencies = {\n \"config.py\": SRC / \"config.py\",\n \"scenario_config.py\": SRC / \"simulation\" / \"scenario_config.py\",\n \"plotting.py\": SRC / \"plotting\" / \"plotting.py\",\n }\n\n data_dependencies = {}\n scenarios_to_compare = [s for s in scenarios if s in available_scenarios]\n for scenario in scenarios_to_compare:\n outcome = (\n \"newly_infected\" if groupby is None else f\"newly_infected_by_{groupby}\"\n )\n data_dependencies[scenario] = create_path_to_scenario_outcome_time_series(\n scenario, outcome\n )\n dependencies = combine_dictionaries([py_dependencies, data_dependencies])\n return dependencies\n\n\nWORK_RAPID_TEST_SCENARIOS = [\n \"spring_without_work_rapid_tests\",\n \"spring_baseline\",\n]\n\n\n_PARAMETRIZATION = [\n (\n _create_deps(SCHOOL_SCENARIOS, \"age_group_rki\"),\n \"2021-04-06\" if FAST_FLAG != \"debug\" else None,\n \"5-14\",\n \"predicted total infections among 5-14 year olds from Easter until {end_date}\",\n _create_table_path(\"student_infections\"),\n ),\n (\n _create_deps(WORK_RAPID_TEST_SCENARIOS, None),\n None,\n None,\n \"predicted total infections from {start_date} until {end_date}\",\n _create_table_path(\"role_of_work_rapid_tests\"),\n ),\n]\n\n\[email protected](\n \"depends_on, start_date, group, name, produces\", _PARAMETRIZATION\n)\ndef task_create_infections_across_scenarios_table(\n depends_on, start_date, group, name, produces\n):\n data_dependencies = filter_dictionary(lambda x: not x.endswith(\".py\"), depends_on)\n scenario_to_data = {\n scenario: pd.read_pickle(path) for scenario, path in data_dependencies.items()\n }\n\n if scenario_to_data:\n scenario_infections = _create_table_with_total_infections(\n scenario_to_data=scenario_to_data,\n start_date=start_date,\n group=group,\n name=name,\n )\n else:\n scenario_infections = pd.DataFrame()\n\n with open(produces, \"w\") as f:\n f.write(scenario_infections.to_latex())\n\n\ndef _create_table_with_total_infections(scenario_to_data, start_date, group, name):\n scenario_to_infections = pd.Series(dtype=float)\n scenario_to_infections.index.name = \"scenario\"\n\n scaling_factor = POPULATION_GERMANY / N_HOUSEHOLDS\n\n for scenario, data in scenario_to_data.items():\n if start_date is not None:\n data = data.loc[start_date:]\n if group is not None:\n data = data.unstack().swaplevel(axis=1)\n total_infected = int(data[group].sum().mean() * scaling_factor)\n else:\n total_infected = int(scaling_factor * data.sum().mean())\n nice_name = make_scenario_name_nice(scenario).replace(\"\\n\", \" \")\n scenario_to_infections[nice_name] = total_infected\n\n scenario_to_infections.name = name.format(\n start_date=data.index.min().date(), end_date=data.index.max().date()\n )\n return scenario_to_infections\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.Series", "pandas.DataFrame", "pandas.read_pickle" ], [ "pandas.read_pickle", "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
quynhu-d/asr_project_template
[ "1f2cebcf7d516b66a2d049b3e67611141866b0e7" ]
[ "hw_asr/text_encoder/ctc_char_text_encoder.py" ]
[ "from typing import List, Tuple\n\nimport torch\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\nfrom hw_asr.text_encoder.char_text_encoder import CharTextEncoder\nimport numpy as np\n\nclass CTCCharTextEncoder(CharTextEncoder):\n EMPTY_TOK = \"^\"\n\n def __init__(self, alphabet: List[str]):\n super().__init__(alphabet)\n self.ind2char = {\n 0: self.EMPTY_TOK\n }\n for text in alphabet:\n self.ind2char[max(self.ind2char.keys()) + 1] = text\n self.char2ind = {v: k for k, v in self.ind2char.items()}\n\n def ctc_decode(self, inds: List[int]) -> str:\n res = []\n last_blank = False\n if isinstance(inds, torch.Tensor):\n inds = inds.squeeze().tolist()\n for ind in inds:\n if ind == self.char2ind[self.EMPTY_TOK]:\n last_blank = True\n else:\n if len(res) == 0 or last_blank or res[-1] != ind:\n res.append(ind)\n last_blank = False\n return ''.join([self.ind2char[c] for c in res])\n\n def ctc_beam_search(self, probs: torch.tensor, probs_length: int = None, beam_size: int = 100, verbose: bool = False) -> List[Tuple[str, float]]:\n \"\"\"\n Performs beam search and returns a list of pairs (hypothesis, hypothesis probability).\n \"\"\"\n assert len(probs.shape) == 2\n log_probs = np.log(probs[:probs_length, :])\n char_length, voc_size = log_probs.shape\n assert voc_size == len(self.ind2char)\n\n def extend_and_merge(next_char_probs, src_paths):\n new_paths = defaultdict(float)\n for next_char_ind, next_char_prob in enumerate(next_char_probs):\n next_char = self.ind2char[next_char_ind]\n for (text, last_char), path_prob in src_paths.items():\n new_prefix = text if next_char == last_char else (text + next_char)\n new_prefix = new_prefix.replace(self.EMPTY_TOK, '')\n new_paths[(new_prefix, next_char)] += path_prob * next_char_prob\n return new_paths\n\n def truncate_beam(paths, beam_size):\n return dict(sorted(paths.items(), key=lambda x: x[1])[-beam_size:])\n\n # hypos = []\n paths = {('', self.EMPTY_TOK): 1.0}\n for next_char_probs in tqdm(log_probs, desc=\"Beam search\", leave=False) if verbose else probs:\n paths = extend_and_merge(next_char_probs, paths)\n paths = truncate_beam(paths, beam_size)\n hypos = [(prefix, score.item()) for (prefix, _), score in paths.items()]\n return sorted(hypos, key=lambda x: x[1], reverse=True)\n" ]
[ [ "numpy.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Lkruitwagen/global-fossil-fuel-supply-chain
[ "f5d804a5f7cee19af46d2f31e635590d3930bacd", "f5d804a5f7cee19af46d2f31e635590d3930bacd" ]
[ "ffsc/flow/make_network.py", "ffsc/pipeline/datasets/gpkg_dataset.py" ]
[ "import logging, json, os, sys\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\nimport pandas as pd\nfrom tqdm import tqdm\ntqdm.pandas()\n\nfrom ffsc.flow.recipes import recipes\n\ndef get_edges(df,dup_1, dup_2, reverse):\n edges = ['START','END']\n if dup_1:\n edges[0] = 'START_B'\n if dup_2:\n edges[1] = 'END_B'\n if reverse:\n edges = [edges[1], edges[0]]\n\n return df[edges+['IMPEDANCE']].to_records(index=False).tolist()\n\ndef make_coal_network(\n df_cities, \n df_powerstations,\n df_coalmines,\n df_edges_cities,\n df_edges_powerstations,\n df_edges_coalmines,\n df_edges_other_railways,\n df_edges_railways_other,\n df_edges_shippingroutes_other,\n df_edges_railways_railways,\n df_edges_shippingroutes_shippingroutes, flow_parameters):\n \n logger = logging.getLogger(f'flow_edges_coal')\n \n for df in [df_cities, \n df_powerstations,\n df_coalmines,\n df_edges_cities,\n df_edges_powerstations,\n df_edges_coalmines,\n df_edges_other_railways,\n df_edges_railways_other,\n df_edges_shippingroutes_other,\n df_edges_railways_railways,\n df_edges_shippingroutes_shippingroutes]:\n print (df.head(5))\n \n edge_dfs = [df_edges_cities,\n df_edges_powerstations,\n df_edges_coalmines,\n df_edges_other_railways,\n df_edges_railways_other,\n df_edges_shippingroutes_other,\n df_edges_railways_railways,\n df_edges_shippingroutes_shippingroutes]\n \n names = ['df_edges_cities',\n 'df_edges_powerstations',\n 'df_edges_coalmines',\n 'df_edges_other_railways',\n 'df_edges_railways_other',\n 'df_edges_shippingroutes_other',\n 'df_edges_other_shippingroutes',\n 'df_edges_railways_railways',\n 'df_edges_shippingroutes_shippingroutes']\n \n for df, name in zip(edge_dfs, names):\n logger.info(f'{name}, {df[\"START\"].str.split(\"_\").str[0].unique()}, {df[\"END\"].str.split(\"_\").str[0].unique()}')\n \n ## trim for coal\n logger.info('Trimming for coal')\n powerstations_noncoal = df_powerstations.loc[~df_powerstations['features'].apply(lambda el: json.loads(el)['fuel1']=='Coal'),'unique_id'].values\n df_powerstations = df_powerstations[~df_powerstations['unique_id'].isin(powerstations_noncoal)]\n df_edges_powerstations = df_edges_powerstations[df_edges_powerstations['END'].isin(df_powerstations['unique_id'].values)]\n df_edges_railways_other = df_edges_railways_other[~df_edges_railways_other['END'].isin(powerstations_noncoal)]\n df_edges_shippingroutes_other = df_edges_shippingroutes_other[~df_edges_shippingroutes_other['END'].str.split('_').str[0].isin(['LNGTERMINAL','SHIPPINGROUTE'])]\n\n ### get ingredients\n df_ingredients = {\n 'coalmines-railways':df_edges_other_railways.copy(),\n 'coalmines-firstmile':df_edges_coalmines.copy(),\n 'railways-railways':df_edges_railways_railways.copy(),\n 'railways-ports':df_edges_railways_other[df_edges_railways_other['END'].str.split('_').str[0]=='PORT'].copy(),\n 'shipping-ports':df_edges_shippingroutes_other[df_edges_shippingroutes_other['END'].str.split('_').str[0]=='PORT'].copy(),\n 'shipping-shipping':df_edges_shippingroutes_shippingroutes.copy(),\n 'railways-powerstations':df_edges_railways_other[df_edges_railways_other['END'].str.split('_').str[0]=='POWERSTATION'].copy(),\n 'railways-cities':df_edges_railways_other[df_edges_railways_other['END'].str.split('_').str[0]=='CITY'].copy(),\n 'lastmile-powerstations': df_edges_powerstations.copy(),\n 'cities-lastmile':df_edges_cities.copy()\n }\n \n ### add impendances\n logger.info('Adding impedances')\n df_ingredients['coalmines-railways']['IMPEDANCE']= (df_ingredients['coalmines-railways']['DISTANCE']/1000*flow_parameters['RAILCOST'] + flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']\n df_ingredients['coalmines-firstmile']['IMPEDANCE']=(df_ingredients['coalmines-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'] + flow_parameters['ROADLOAD']/2)*flow_parameters['tperTJ']['coal']\n df_ingredients['railways-railways']['IMPEDANCE']=(df_ingredients['railways-railways']['DISTANCE']/1000*flow_parameters['RAILCOST'])*flow_parameters['tperTJ']['coal']\n df_ingredients['railways-ports']['IMPEDANCE']=(df_ingredients['railways-ports']['DISTANCE']/1000*flow_parameters['RAILCOST']+ flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']\n df_ingredients['shipping-ports']['IMPEDANCE']=(df_ingredients['shipping-ports']['DISTANCE']/1000*flow_parameters['SEACOST'] + flow_parameters['SEALOAD']/2)*flow_parameters['tperTJ']['coal']\n df_ingredients['shipping-shipping']['IMPEDANCE']=(df_ingredients['shipping-shipping']['DISTANCE']/1000*flow_parameters['SEACOST'])*flow_parameters['tperTJ']['coal']\n df_ingredients['railways-powerstations']['IMPEDANCE']=(df_ingredients['railways-powerstations']['DISTANCE']/1000*flow_parameters['RAILCOST'] + flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']\n df_ingredients['railways-cities']['IMPEDANCE']=(df_ingredients['railways-cities']['DISTANCE']/1000*flow_parameters['RAILCOST'] + flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']\n df_ingredients['lastmile-powerstations']['IMPEDANCE']=(df_ingredients['lastmile-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'] + flow_parameters['ROADLOAD']/2)*flow_parameters['tperTJ']['coal']\n df_ingredients['cities-lastmile']['IMPEDANCE']=(df_ingredients['cities-lastmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['coal']\n \n for step in recipes['coal']:\n if step['dup_1']==True:\n df_ingredients[step['name']]['START_B'] = df_ingredients[step['name']]['START']+'_B'\n if step['dup_2']==True:\n df_ingredients[step['name']]['END_B'] = df_ingredients[step['name']]['END'] +'_B'\n \n ### assemble\n logger.info('assembling edge dataframe')\n all_edges = []\n for step in recipes['coal']:\n all_edges += get_edges(df_ingredients[step['name']], step['dup_1'], step['dup_2'], step['reverse'])\n \n print (len(all_edges))\n \n return pd.DataFrame(all_edges, columns=['START','END','IMPEDANCE'])\n\ndef make_oil_network(\n df_cities, \n df_powerstations,\n df_oilfields,\n df_oilwells,\n df_edges_cities,\n df_edges_powerstations,\n df_edges_oilfields,\n df_edges_oilwells,\n df_edges_other_pipelines,\n df_edges_pipelines_other,\n df_edges_pipelines_pipelines,\n df_edges_shippingroutes_other,\n df_edges_shippingroutes_shippingroutes, \n flow_parameters):\n \n\n \n logger = logging.getLogger(f'flow_edges_oil')\n \n edge_dfs = [df_edges_cities,\n df_edges_powerstations,\n df_edges_oilfields,\n df_edges_oilwells,\n df_edges_other_pipelines,\n df_edges_pipelines_other,\n df_edges_pipelines_pipelines,\n df_edges_shippingroutes_other,\n df_edges_shippingroutes_shippingroutes]\n \n names = ['df_edges_cities',\n 'df_edges_powerstations',\n 'df_edges_oilfields',\n 'df_edges_oilwells',\n 'df_edges_other_pipelines',\n 'df_edges_pipelines_other',\n 'df_edges_pipelines_pipelines',\n 'df_edges_shippingroutes_other',\n 'df_edges_shippingroutes_shippingroutes']\n \n for df, name in zip(edge_dfs, names):\n logger.info(f'{name}, {df[\"START\"].str.split(\"_\").str[0].unique()}, {df[\"END\"].str.split(\"_\").str[0].unique()}')\n \n ## trim for oil\n logger.info('Trimming for oil')\n print ('step 1')\n print (df_edges_powerstations)\n powerstations_nonoil = df_powerstations.loc[~df_powerstations['features'].apply(lambda el: json.loads(el)['fuel1']=='Oil'),'unique_id'].values\n df_powerstations = df_powerstations[~df_powerstations['unique_id'].isin(powerstations_nonoil)]\n print ('step 2')\n print (df_edges_powerstations)\n df_edges_powerstations = df_edges_powerstations[df_edges_powerstations['END'].isin(df_powerstations['unique_id'].values)]\n print ('step 3')\n print (df_edges_powerstations)\n df_edges_pipelines_other = df_edges_pipelines_other[~df_edges_pipelines_other['END'].isin(powerstations_nonoil)]\n #print (df_edges_pipelines_other)\n #print (df_edges_pipelines_other['END'].str.split('_').str[0]=='LNGTERMINAL')\n df_edges_pipelines_other = df_edges_pipelines_other[~(df_edges_pipelines_other['END'].str.split('_').str[0]=='LNGTERMINAL')]\n df_edges_shippingroutes_other = df_edges_shippingroutes_other[~(df_edges_shippingroutes_other['END'].str.split('_').str[0]=='LNGTERMINAL')]\n \n \n ### get ingredients\n df_ingredients = {\n 'pipelines-oilfields':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILFIELD'].copy(),\n 'pipelines-oilwells':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILWELL'].copy(),\n 'oilfields-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILFIELD'].copy(),\n 'oilwells-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILWELL'].copy(),\n 'pipelines-ports':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='PORT'].copy(),\n 'pipelines-refineries':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='REFINERY'].copy(),\n 'shipping-ports':df_edges_shippingroutes_other[df_edges_shippingroutes_other['END'].str.split('_').str[0]=='PORT'].copy(),\n 'shipping-shipping':df_edges_shippingroutes_shippingroutes.copy(),\n 'pipelines-pipelines':df_edges_pipelines_pipelines.copy(),\n 'pipelines-cities':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='CITY'].copy(),\n 'pipelines-powerstations':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='POWERSTATION'].copy(),\n 'lmports-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='PORT'].copy(),\n 'lmcities-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='CITY'].copy(),\n 'cities-lastmile':df_edges_cities.copy()\n }\n \n\n \n ### add impendances\n logger.info('Adding impedances')\n df_ingredients['pipelines-oilfields']['IMPEDANCE'] = (df_ingredients['pipelines-oilfields']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-oilwells']['IMPEDANCE'] = (df_ingredients['pipelines-oilwells']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['oilfields-firstmile']['IMPEDANCE'] = (df_ingredients['oilfields-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['oilwells-firstmile']['IMPEDANCE'] = (df_ingredients['oilwells-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-ports']['IMPEDANCE'] = (df_ingredients['pipelines-ports']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-refineries']['IMPEDANCE'] = (df_ingredients['pipelines-refineries']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['shipping-ports']['IMPEDANCE'] = (df_ingredients['shipping-ports']['DISTANCE']/1000*flow_parameters['SEACOST'] + flow_parameters['SEALOAD']/2)*flow_parameters['tperTJ']['oil']\n df_ingredients['shipping-shipping']['IMPEDANCE'] = (df_ingredients['shipping-shipping']['DISTANCE']/1000*flow_parameters['SEACOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-pipelines']['IMPEDANCE'] = (df_ingredients['pipelines-pipelines']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-cities']['IMPEDANCE'] = (df_ingredients['pipelines-cities']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-powerstations']['IMPEDANCE']= (df_ingredients['pipelines-powerstations']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['lmcities-powerstations']['IMPEDANCE'] = (df_ingredients['lmcities-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['lmports-powerstations']['IMPEDANCE'] = (df_ingredients['lmports-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['cities-lastmile']['IMPEDANCE'] = (df_ingredients['cities-lastmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n \n for step in recipes['oil']:\n if step['dup_1']==True:\n df_ingredients[step['name']]['START_B'] = df_ingredients[step['name']]['START']+'_B'\n if step['dup_2']==True:\n df_ingredients[step['name']]['END_B'] = df_ingredients[step['name']]['END'] +'_B'\n \n ### assemble\n logger.info('assembling edge dataframe')\n all_edges = []\n for step in recipes['oil']:\n all_edges += get_edges(df_ingredients[step['name']], step['dup_1'], step['dup_2'], step['reverse'])\n \n print (len(all_edges))\n \n return pd.DataFrame(all_edges, columns=['START','END','IMPEDANCE'])\n\n\ndef make_gas_network(\n df_cities, \n df_powerstations,\n df_oilfields,\n df_oilwells,\n df_edges_cities,\n df_edges_powerstations,\n df_edges_oilfields,\n df_edges_oilwells,\n df_edges_other_pipelines,\n df_edges_pipelines_other,\n df_edges_pipelines_pipelines,\n df_edges_shippingroutes_other,\n df_edges_shippingroutes_shippingroutes, \n flow_parameters):\n \n\n \n logger = logging.getLogger(f'flow_edges_Gas')\n \n edge_dfs = [df_edges_cities,\n df_edges_powerstations,\n df_edges_oilfields,\n df_edges_oilwells,\n df_edges_other_pipelines,\n df_edges_pipelines_other,\n df_edges_pipelines_pipelines,\n df_edges_shippingroutes_other,\n df_edges_shippingroutes_shippingroutes]\n \n names = ['df_edges_cities',\n 'df_edges_powerstations',\n 'df_edges_oilfields',\n 'df_edges_oilwells',\n 'df_edges_other_pipelines',\n 'df_edges_pipelines_other',\n 'df_edges_pipelines_pipelines',\n 'df_edges_shippingroutes_other',\n 'df_edges_shippingroutes_shippingroutes']\n \n for df, name in zip(edge_dfs, names):\n logger.info(f'{name}, {df[\"START\"].str.split(\"_\").str[0].unique()}, {df[\"END\"].str.split(\"_\").str[0].unique()}')\n \n ## trim for oil\n logger.info('Trimming for gas')\n powerstations_nonoil = df_powerstations.loc[~df_powerstations['features'].apply(lambda el: json.loads(el)['fuel1']=='Gas'),'unique_id'].values\n df_powerstations = df_powerstations[~df_powerstations['unique_id'].isin(powerstations_nonoil)]\n df_edges_powerstations = df_edges_powerstations[df_edges_powerstations['END'].isin(df_powerstations['unique_id'].values)]\n df_edges_pipelines_other = df_edges_pipelines_other[~df_edges_pipelines_other['END'].isin(powerstations_nonoil)]\n df_edges_pipelines_other = df_edges_pipelines_other[~(df_edges_pipelines_other['END'].str.split('_').str[0]=='PORT')]\n df_edges_shippingroutes_other = df_edges_shippingroutes_other[~(df_edges_shippingroutes_other['END'].str.split('_').str[0]=='PORT')]\n \n \n ### get ingredients\n df_ingredients = {\n 'pipelines-oilfields':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILFIELD'].copy(),\n 'pipelines-oilwells':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILWELL'].copy(),\n 'oilfields-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILFIELD'].copy(),\n 'oilwells-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILWELL'].copy(),\n 'pipelines-lng':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='LNGTERMINAL'].copy(),\n 'pipelines-refineries':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='REFINERY'].copy(),\n 'shipping-lng':df_edges_shippingroutes_other[df_edges_shippingroutes_other['END'].str.split('_').str[0]=='LNGTERMINAL'].copy(),\n 'shipping-shipping':df_edges_shippingroutes_shippingroutes.copy(),\n 'pipelines-pipelines':df_edges_pipelines_pipelines.copy(),\n 'pipelines-cities':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='CITY'].copy(),\n 'pipelines-powerstations':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='POWERSTATION'].copy(),\n 'lmports-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='PORT'].copy(),\n 'lmcities-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='CITY'].copy(),\n 'cities-lastmile':df_edges_cities.copy()\n }\n \n\n \n ### add impendances\n logger.info('Adding impedances')\n df_ingredients['pipelines-oilfields']['IMPEDANCE'] = (df_ingredients['pipelines-oilfields']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-oilwells']['IMPEDANCE'] = (df_ingredients['pipelines-oilwells']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['oilfields-firstmile']['IMPEDANCE'] = (df_ingredients['oilfields-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['oilwells-firstmile']['IMPEDANCE'] = (df_ingredients['oilwells-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-lng']['IMPEDANCE'] = (df_ingredients['pipelines-lng']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-refineries']['IMPEDANCE'] = (df_ingredients['pipelines-refineries']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['shipping-lng']['IMPEDANCE'] = (df_ingredients['shipping-lng']['DISTANCE']/1000*flow_parameters['SEACOST'] + flow_parameters['SEALOAD']/2)*flow_parameters['tperTJ']['oil']\n df_ingredients['shipping-shipping']['IMPEDANCE'] = (df_ingredients['shipping-shipping']['DISTANCE']/1000*flow_parameters['SEACOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-pipelines']['IMPEDANCE'] = (df_ingredients['pipelines-pipelines']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-cities']['IMPEDANCE'] = (df_ingredients['pipelines-cities']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['pipelines-powerstations']['IMPEDANCE']= (df_ingredients['pipelines-powerstations']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']\n df_ingredients['lmcities-powerstations']['IMPEDANCE'] = (df_ingredients['lmcities-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['lmports-powerstations']['IMPEDANCE'] = (df_ingredients['lmports-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n df_ingredients['cities-lastmile']['IMPEDANCE'] = (df_ingredients['cities-lastmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']\n \n for step in recipes['gas']:\n if step['dup_1']==True:\n df_ingredients[step['name']]['START_B'] = df_ingredients[step['name']]['START']+'_B'\n if step['dup_2']==True:\n df_ingredients[step['name']]['END_B'] = df_ingredients[step['name']]['END'] +'_B'\n \n ### assemble\n logger.info('assembling edge dataframe')\n all_edges = []\n for step in recipes['gas']:\n all_edges += get_edges(df_ingredients[step['name']], step['dup_1'], step['dup_2'], step['reverse'])\n \n print (len(all_edges))\n \n return pd.DataFrame(all_edges, columns=['START','END','IMPEDANCE'])", "from kedro.io.core import AbstractDataSet\nfrom pathlib import Path\nfrom typing import Any, Dict\nimport geopandas as gpd\nimport pandas as pd\n\n\nclass GPKGDataSet(AbstractDataSet):\n def __init__(self, dirpath: str, filelistpath: str):\n self.dir_path = dirpath\n self.file_list_path = filelistpath\n super().__init__()\n\n def _save(self):\n raise NotImplementedError\n\n def _load(self) -> Any:\n load_file_list_path = Path(self.file_list_path)\n entity_list = pd.read_csv(load_file_list_path.open(\"rb\")).dropna(\n subset=[\"Code\"]\n )\n all_entities_list = []\n for _, country in entity_list.iterrows():\n load_path = Path(self.dir_path + \"/\" + str(country[\"Code\"]) + \".gpkg\")\n if load_path.is_file():\n with load_path.open(\"rb\") as local_file:\n all_entities_list.append(gpd.read_file(local_file))\n\n all_entities_data = pd.concat(all_entities_list, ignore_index=True)\n # load_path = Path(self.file_path)\n # with load_path.open(\"rb\") as local_file:\n # return gpd.read_file(local_file)\n return all_entities_data\n\n def _describe(self) -> Dict[str, Any]:\n return \"GPKG Dataset.\"\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
tyjiang1997/NonLocalProp_MVD
[ "5cf5a5b422fd20e710429363447dc36a90f12b18", "5cf5a5b422fd20e710429363447dc36a90f12b18" ]
[ "core/utils/inverse_warp_d.py", "ddptest.py" ]
[ "from __future__ import division\nimport torch\nfrom torch.autograd import Variable\nimport pdb\n\npixel_coords = None\n\n\ndef set_id_grid(depth):\n global pixel_coords\n b, d, h, w = depth.size()\n i_range = Variable(torch.arange(0, h).view(1, 1, h, 1).expand(1,d,h,w)).type_as(depth) # [1, H, W]\n j_range = Variable(torch.arange(0, w).view(1, 1, 1, w).expand(1,d,h,w)).type_as(depth) # [1, H, W]\n ones = Variable(torch.ones(1,d,h,w)).type_as(depth)\n\n pixel_coords = torch.stack((j_range, i_range, ones), dim=1) \n\n\ndef check_sizes(input, input_name, expected):\n condition = [input.ndimension() == len(expected)]\n for i,size in enumerate(expected):\n if size.isdigit():\n condition.append(input.size(i) == int(size))\n assert(all(condition)), \"wrong size for {}, expected {}, got {}\".format(input_name, 'x'.join(expected), list(input.size()))\n\n\ndef pixel2cam(depth, intrinsics_inv):\n global pixel_coords\n \"\"\"Transform coordinates in the pixel frame to the camera frame.\n Args:\n depth: depth maps -- [B, D, H, W]\n intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]\n Returns:\n array of (u,v,1) cam coordinates -- [B, 3, H, W]\n \"\"\"\n b, d, h, w = depth.size()\n if (pixel_coords is None) or pixel_coords.size(3) != h:\n set_id_grid(depth)\n current_pixel_coords = pixel_coords[:,:,:,:h,:w].expand(b,3,d,h,w).contiguous().view(b, 3, -1).cuda() # [B, 3, D*H*W]\n cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, d, h, w)\n \n e_cam_coords = cam_coords * depth.unsqueeze(1) #extended camcoords\n stack_cam_coords = []\n stack_cam_coords.append(e_cam_coords[:,:,:,0:h,0:w])\n \n return torch.stack(stack_cam_coords, dim = 5)\n\n\ndef cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):\n \"\"\"Transform coordinates in the camera frame to the pixel frame.\n Args:\n cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 3, D, H, W, 1]\n proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]\n proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]\n Returns:\n array of [-1,1] coordinates -- [B, 2, H, W]\n \"\"\"\n b, _, d, h, w, _ = cam_coords.size()\n\n cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, DHW]\n \n if proj_c2p_rot is not None:\n pcoords = (proj_c2p_rot.bmm(cam_coords_flat)).view(b,3,d,h,w,-1)\n else:\n pcoords = cam_coords\n\n if proj_c2p_tr is not None :\n pcoords = pcoords + proj_c2p_tr.view(b,3,1,1,1,1)\n\n X = pcoords[:, 0]\n Y = pcoords[:, 1]\n Z = pcoords[:, 2].clamp(min=1e-3)\n\n\n X_norm = 2*(X / Z)/(w-1) - 1 \n Y_norm = 2*(Y / Z)/(h-1) - 1 \n if padding_mode == 'zeros': \n X_mask = ((X_norm > 1)+(X_norm < -1)).detach()\n X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray\n Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()\n Y_norm[Y_mask] = 2\n\n src_pixel_coords = torch.stack([X_norm, Y_norm, Variable(torch.linspace(0,d-1,d).view(1,d,1,1,1).expand(b,d,h,w,1)).type_as(X_norm)], dim=5)\n \n return src_pixel_coords\n\n\ndef inverse_warp_d(feat, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):\n \"\"\"\n Inverse warp a source image to the target image plane.\n\n Args:\n feat: the source feature (where to sample pixels) -- [B, CH, H, W]\n depth: depth map of the target image -- [B, H, W]\n pose: 6DoF pose parameters from target to source -- [B, 6]\n intrinsics: camera intrinsic matrix -- [B, 3, 3]\n intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]\n Returns:\n Source image warped to the target image plane\n \"\"\"\n check_sizes(depth, 'depth', 'BDHW')\n check_sizes(pose, 'pose', 'B34')\n check_sizes(intrinsics, 'intrinsics', 'B33')\n check_sizes(intrinsics_inv, 'intrinsics', 'B33')\n \n assert(intrinsics_inv.size() == intrinsics.size())\n\n batch_size, ch, feat_height, feat_width = feat.size()\n\n cam_coords = pixel2cam(depth, intrinsics_inv) \n\n pose_mat = pose\n pose_mat = pose_mat.cuda()\n\n\n # Get projection matrix for tgt camera frame to source pixel frame\n proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]\n\n src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,D,H,W,1,3]\n\n \n projected_feat = torch.nn.functional.grid_sample(feat.unsqueeze(2), src_pixel_coords.view(batch_size,src_pixel_coords.size(1),feat_height,-1,3), padding_mode=padding_mode,align_corners=True)\n #projected_feat = torch.nn.functional.grid_sample(feat.unsqueeze(2), src_pixel_coords.view(batch_size,src_pixel_coords.size(1),feat_height,-1,3), padding_mode=padding_mode, align_corners=True,align_corners=True)\n\n return projected_feat.view(batch_size,ch,projected_feat.size(2),feat_height,feat_width,-1)#[B,CH,D,H,W,1]\n\n", "from bdb import set_trace\nimport imp\nfrom operator import is_\nimport os\nfrom re import I\nimport time\nimport csv\nimport numpy as np\nfrom path import Path\nimport argparse\nimport matplotlib.pyplot as plt\nfrom tensorboardX import SummaryWriter\nimport cv2\nimport torch\nimport torch.nn.functional as F\nfrom core.dataset import custom_transforms\nfrom core.networks.MVDNet_conf import MVDNet_conf\nfrom core.networks.MVDNet_joint import MVDNet_joint\nfrom core.networks.MVDNet_nslpn import MVDNet_nslpn\nfrom core.networks.MVDNet_prop import MVDNet_prop\nfrom core.utils.utils import load_config_file\nfrom core.networks.loss_functions import compute_errors_test_batch, compute_angles, cross_entropy\n\nfrom core.utils.logger import AverageMeter\nfrom core.dataset import SequenceFolder, NoisySequenceFolder\nimport torch.distributed as dist\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom utils import *\nfrom core.utils.utils import load_config_file, normalize_depth_for_display, vis_normal\n\nparser = argparse.ArgumentParser(description='Iterative solver for multi-view depth and normal',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('config_file', metavar='DIR', help='path to config file')\nparser.add_argument(\"--local_rank\", default=-1)\nparser.add_argument(\"--seed\", type=int, default=1, metavar='S', help='random seed')\n\nargs = parser.parse_args()\nset_random_seed(args.seed)\n\nnum_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\nis_distributed = num_gpus > 1\n\nlocal_rank = int(args.local_rank)\ncfg = load_config_file(args.config_file)\ncfg.local_rank = args.local_rank\ndevice = torch.device(local_rank)\nif is_distributed:\n torch.cuda.set_device(local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n# save writer\nsave_path = Path(cfg.output_dir)\n\ndef main(cfg):\n # os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(cfg.cuda) \n\n global n_iter\n\n # Loading data\n normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n \n valid_transform = custom_transforms.Compose([custom_transforms.ArrayToTensor(), normalize])\n if local_rank == 0 or not is_distributed:\n print(\"=> fetching scenes in '{}'\".format(cfg.dataset_path))\n \n if cfg.dataset == 'scannet': \n test_set = SequenceFolder(cfg.dataset_path, transform=valid_transform, ttype=cfg.test_list) \n else:\n raise NotImplementedError\n\n if local_rank == 0 or not is_distributed:\n print('{} samples found in {} test scenes'.format(len(test_set), len(test_set.scenes)))\n\n if is_distributed:\n test_sampler = torch.utils.data.DistributedSampler(test_set, num_replicas=dist.get_world_size(),\n rank=dist.get_rank()) \n test_loader = torch.utils.data.DataLoader(test_set, batch_size=cfg.batch_size, shuffle=False,\n num_workers=cfg.num_workers, pin_memory=True, sampler=test_sampler)\n else:\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=cfg.batch_size, shuffle=False,\n num_workers=cfg.num_workers, pin_memory=True)\n\n # create model\n if local_rank == 0 or not is_distributed:\n print(\"=> creating model\")\n if cfg.model_name == 'MVDNet_conf':\n mvdnet = MVDNet_conf(cfg).cuda()\n elif cfg.model_name == 'MVDNet_joint':\n mvdnet = MVDNet_joint(cfg).cuda()\n elif cfg.model_name == 'MVDNet_nslpn':\n mvdnet = MVDNet_nslpn(cfg).cuda()\n elif cfg.model_name == 'MVDNet_prop':\n mvdnet = MVDNet_prop(cfg).cuda()\n else:\n raise NotImplementedError\n \n \n if not os.path.isfile(cfg.pretrained_mvdn):\n pretrained_mvdn = save_path / 'checkpoints' / sorted(os.listdir((save_path / 'checkpoints')))[-1]\n else:\n pretrained_mvdn = cfg.pretrained_mvdn\n \n if local_rank == 0 or not is_distributed:\n print(f\"=> loading weights for MVDNet: {pretrained_mvdn}\")\n weights = torch.load(pretrained_mvdn) \n mvdnet.load_state_dict(weights['state_dict'], strict=True)\n\n torch.backends.cudnn.benchmark = True\n\n mvdnet.to(device)\n if is_distributed:\n mvdnet = DDP(mvdnet,device_ids=[local_rank], output_device=local_rank) \n else:\n mvdnet = torch.nn.DataParallel(mvdnet)\n\n errors, error_names = validate_with_gt(cfg, test_loader, mvdnet)\n\n if local_rank == 0 or not is_distributed:\n\n decisive_error = errors[0]\n with open(save_path/'eval_log_summary.txt', 'a') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t')\n writer.writerow([decisive_error, errors[1], errors[2], errors[3], errors[4], errors[5], errors[6], errors[7]])\n \n\n\ndef validate_with_gt(cfg, test_loader, mvdnet):\n batch_time = AverageMeter()\n test_error_names = ['abs_rel','abs_diff','sq_rel','rms','log_rms','a1','a2','a3', 'dconf', 'nconf', 'mean_angle']\n test_errors = AverageMeter(i=len(test_error_names))\n\n mvdnet.eval()\n\n end = time.time()\n with torch.no_grad(): \n for i, (tgt_img, ref_imgs, gt_nmap, ref_poses, intrinsics, intrinsics_inv, tgt_depth, ref_depths, tgt_id) in enumerate(test_loader):\n tgt_img_var = tgt_img.cuda()\n ref_imgs_var = [img.cuda() for img in ref_imgs]\n gt_nmap_var = gt_nmap.cuda()\n ref_poses_var = [pose.cuda() for pose in ref_poses]\n intrinsics_var = intrinsics.cuda()\n intrinsics_inv_var = intrinsics_inv.cuda()\n tgt_depth_var = tgt_depth.cuda()\n\n pose = torch.cat(ref_poses_var,1)\n if (pose != pose).any():\n continue\n \n if cfg.model_name == 'MVDNet_conf':\n outputs = mvdnet(tgt_img_var, ref_imgs_var, pose, intrinsics_var, intrinsics_inv_var)\n elif cfg.model_name == 'MVDNet_joint':\n outputs = mvdnet(tgt_img_var, ref_imgs_var, pose, tgt_depth_var, gt_nmap_var, intrinsics_var, intrinsics_inv_var)\n elif cfg.model_name == 'MVDNet_nslpn':\n outputs = mvdnet(tgt_img_var, ref_imgs_var, pose, intrinsics_var, intrinsics_inv_var)\n elif cfg.model_name == 'MVDNet_prop':\n outputs = mvdnet(tgt_img_var, ref_imgs_var, pose, tgt_depth_var, gt_nmap_var, intrinsics_var, intrinsics_inv_var)\n else:\n raise NotImplementedError\n \n output_depth = outputs[0].data.cpu()\n nmap = outputs[1].permute(0,2,3,1)\n dconf, nconf = outputs[-2], outputs[-1]\n \n mask = (tgt_depth <= 10) & (tgt_depth >= 0.5) & (tgt_depth == tgt_depth)\n\n if not mask.any():\n continue\n \n n = tgt_depth.shape[0]\n test_errors_ = list(compute_errors_test_batch(tgt_depth[mask], output_depth[mask]))\n gt_dconf = 1.0 - cfg.conf_dgamma * torch.abs(tgt_depth - output_depth) / (tgt_depth + 1e-6)\n dconf_e = torch.abs(dconf.cpu()[mask] - gt_dconf[mask]).mean()\n test_errors_.append(dconf_e.item())\n\n n_mask = (gt_nmap_var.permute(0,2,3,1)[0,:,:] != 0)\n n_mask = n_mask[:,:,0] | n_mask[:,:,1] | n_mask[:,:,2]\n\n total_angles_m = compute_angles(gt_nmap_var.permute(0,2,3,1)[0], nmap[0])\n gt_nconf = 1.0 - cfg.conf_ngamma * total_angles_m / 180.0\n nconf_e = torch.abs(nconf[0][n_mask] - gt_nconf[n_mask]).mean()\n test_errors_.append(nconf_e.item())\n \n mask_angles = total_angles_m[n_mask]\n total_angles_m[~ n_mask] = 0\n test_errors_.append(torch.mean(mask_angles).item())\n \n # from pdb import set_trace; set_trace()\n test_errors.update(test_errors_, n)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if (i % cfg.print_freq == 0 or i == len(test_loader)-1) and (local_rank==0 or not is_distributed):\n print('valid: Time {} Rel Error {:.4f} ({:.4f}) DConf Error {:.4f} ({:.4f}) Iter {}/{}'.format(batch_time, test_errors.val[0], test_errors.avg[0], test_errors.val[-3], test_errors.avg[-3], i, len(test_loader)))\n if cfg.save_samples:\n output_dir = Path(os.path.join(cfg.output_dir, 'evalvis'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n output_depth = output_depth.numpy()\n nmap_ = nmap.cpu().numpy()\n for picid, imgsave, normal in zip(tgt_id, output_depth, nmap_):\n # from pdb import set_trace; set_trace()\n depth_nor = normalize_depth_for_display(imgsave)\n plt.imsave(output_dir/ f'{picid}_depth.png', depth_nor)\n\n normal_n = normal / (np.linalg.norm(normal, axis=2, keepdims=True) + 1e-10)\n normal_img = ((normal_n + 1.0) / 2.0) * 255.0\n cv2.imwrite(output_dir/ f'{picid}_normal.png', normal_img[:,:,::-1].astype(np.uint8))\n\n \n if is_distributed :\n rank, world_size = get_dist_info()\n errors = merge_results_dist(test_errors, world_size, tmpdir= output_dir / 'tmpdir')\n else:\n errors = test_errors.avg\n # print(f'local{rank}',errors)\n return errors, test_error_names\n\n\nif __name__ == '__main__':\n\n n_iter = 0\n main(cfg)\n" ]
[ [ "torch.stack", "torch.linspace", "torch.ones", "torch.arange" ], [ "torch.abs", "matplotlib.pyplot.imsave", "torch.mean", "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.load", "torch.cat", "torch.utils.data.DataLoader", "numpy.linalg.norm", "torch.no_grad", "torch.device", "torch.distributed.get_rank", "torch.nn.DataParallel", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
ludwig-ai/ludw
[ "b9d95bbdb474bc22260269de1bc094bc5455f37c", "b9d95bbdb474bc22260269de1bc094bc5455f37c", "b9d95bbdb474bc22260269de1bc094bc5455f37c" ]
[ "tests/integration_tests/test_sequence_sampled_softmax.py", "ludwig/modules/mlp_mixer_modules.py", "ludwig/utils/loss_utils.py" ]
[ "# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom ludwig.experiment import experiment_cli\nfrom tests.integration_tests.utils import generate_data, run_experiment, sequence_feature\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nlogging.getLogger(\"ludwig\").setLevel(logging.INFO)\n\n\[email protected](reason=\"Issue #1333: Sequence output generation.\")\[email protected](scope=\"module\")\ndef generate_deterministic_sequence(num_records=200):\n in_vocab = [x + str(d) for x in list(\"abcde\") for d in range(1, 4)]\n\n def generate_output(x):\n letter = x[0]\n repeat = int(x[1])\n return \" \".join(repeat * letter)\n\n input_list = []\n output_list = []\n for _ in range(num_records):\n n = np.random.randint(1, 7, 1)\n input_seq = np.random.choice(in_vocab, n, replace=True)\n output_seq = [generate_output(x) for x in input_seq]\n input_list.append(\" \".join(input_seq))\n output_list.append(\" \".join(output_seq))\n\n train = {\"in_seq\": input_list, \"out_seq\": output_list}\n\n return pd.DataFrame(train)\n\n\n# testing only a subset of options to reduce test run time\n# combinations selected to test are the major tensor structures/sizes expected\n# to be encountered: AttentionWrapperState, BeamSearchDecoderState, None\[email protected](reason=\"Issue #1333: Sequence output generation.\")\[email protected](\"loss_sampler\", [\"learned_unigram\", \"fixed_unigram\", \"log_uniform\", \"uniform\"])\[email protected](\"dec_attention\", [None, \"luong\"])\[email protected](\"dec_cell_type\", [\"gru\", \"lstm\"])\[email protected](\"enc_cell_type\", [\"rnn\", \"lstm\"])\[email protected](\"enc_encoder\", [\"rnn\"])\[email protected](\"dec_beam_width\", [1, 2])\[email protected](\"dec_num_layers\", [1, 2])\ndef test_sequence_generator(\n enc_encoder,\n enc_cell_type,\n dec_cell_type,\n dec_attention,\n dec_beam_width,\n dec_num_layers,\n loss_sampler,\n generate_deterministic_sequence,\n):\n # Define input and output features\n input_features = [\n {\n \"name\": \"in_seq\",\n \"type\": \"sequence\",\n \"encoder\": enc_encoder,\n \"cell_type\": enc_cell_type,\n \"reduce_output\": None,\n }\n ]\n output_features = [\n {\n \"name\": \"out_seq\",\n \"type\": \"sequence\",\n \"cell_type\": dec_cell_type,\n \"num_layers\": dec_num_layers,\n \"beam_width\": dec_beam_width,\n \"decoder\": \"generator\",\n \"attention\": dec_attention,\n \"reduce_input\": None,\n \"loss\": {\"type\": \"sampled_softmax_cross_entropy\", \"negative_samples\": 10, \"sampler\": loss_sampler},\n }\n ]\n model_definition = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"fc_size\": 14}, # 'concat'\n \"training\": {\n \"\" \"epochs\": 2,\n \"early_stop\": 5,\n \"batch_size\": 80,\n \"optimizer\": {\"type\": \"adam\"},\n \"learning_rate\": 0.001,\n },\n }\n args = {\n \"config\": model_definition,\n \"skip_save_processed_input\": True,\n \"skip_save_progress\": True,\n \"skip_save_unprocessed_output\": True,\n \"skip_save_model\": True,\n \"skip_save_log\": True,\n \"debug\": False,\n }\n # Generate test data\n np.random.seed(42) # 13\n df = generate_deterministic_sequence\n\n # run the experiment\n experiment_cli(dataset=df, **args)\n\n\[email protected](reason=\"Issue #1333: Sequence output generation.\")\[email protected](\"enc_cell_type\", [\"rnn\", \"gru\", \"lstm\"])\[email protected](\"attention\", [False, True])\ndef test_sequence_tagger(enc_cell_type, attention, csv_filename):\n # Define input and output features\n input_features = [sequence_feature(max_len=10, encoder=\"rnn\", cell_type=enc_cell_type, reduce_output=None)]\n output_features = [\n sequence_feature(\n max_len=10,\n decoder=\"tagger\",\n attention=attention,\n reduce_input=None,\n )\n ]\n\n # Generate test data\n rel_path = generate_data(input_features, output_features, csv_filename)\n\n # setup sampled softmax loss\n output_features[0].update({\"loss\": {\"type\": \"sampled_softmax_cross_entropy\", \"negative_samples\": 7}})\n\n # run the experiment\n run_experiment(input_features, output_features, dataset=rel_path)\n", "# Copyright (c) 2021 Linux Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom typing import Tuple, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom ludwig.modules.activation_modules import gelu\nfrom ludwig.utils.torch_utils import LudwigModule\n\n\nclass MLP(LudwigModule):\n def __init__(\n self,\n in_features: Union[int, Tuple[int]],\n hidden_size: int,\n out_features: Union[int, Tuple[int]] = None,\n dropout: float = 0.0,\n ):\n super().__init__()\n\n out_features = out_features or in_features\n\n self._input_shape = in_features\n self._output_shape = out_features\n\n self.linear1 = nn.Linear(in_features=in_features, out_features=hidden_size)\n self.linear2 = nn.Linear(in_features=hidden_size, out_features=out_features)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n def forward(self, inputs, **kwargs):\n hidden = self.dropout1(gelu(self.linear1(inputs)))\n return self.dropout2(self.linear2(hidden))\n\n @property\n def input_shape(self) -> torch.Size:\n return torch.Size([self._input_shape])\n\n @property\n def output_shape(self) -> torch.Size:\n return torch.Size([self._output_shape])\n\n\nclass MixerBlock(LudwigModule):\n def __init__(self, embed_size: int, n_patches: int, token_dim: int, channel_dim: int, dropout: float = 0.0):\n super().__init__()\n self._input_shape = (n_patches, embed_size)\n self._output_shape = (n_patches, embed_size)\n\n self.mlp1 = MLP(in_features=n_patches, hidden_size=token_dim, dropout=dropout)\n\n self.mlp2 = MLP(in_features=embed_size, hidden_size=channel_dim, dropout=dropout)\n\n self.layernorm1 = nn.LayerNorm(normalized_shape=embed_size)\n self.layernorm2 = nn.LayerNorm(normalized_shape=embed_size)\n\n def forward(self, inputs: torch.Tensor, **kwargs):\n assert inputs.shape[1:] == self.input_shape\n\n hidden = inputs\n hidden = self.layernorm1(hidden).transpose(1, 2)\n hidden = self.mlp1(hidden).transpose(1, 2)\n\n mid = hidden + inputs\n\n hidden = self.layernorm2(mid)\n hidden = self.mlp2(hidden)\n\n output = hidden + mid\n assert output.shape[1:] == self.output_shape\n return output\n\n @property\n def input_shape(self) -> torch.Size:\n return torch.Size(self._input_shape)\n\n @property\n def output_shape(self) -> torch.Size:\n return torch.Size(self._output_shape)\n\n\nclass MLPMixer(LudwigModule):\n \"\"\"MLPMixer.\n\n Implements\n MLP-Mixer: An all-MLP Architecture for Vision\n https://arxiv.org/abs/2105.01601\n \"\"\"\n\n def __init__(\n self,\n img_height: int,\n img_width: int,\n in_channels: int,\n patch_size: int = 16,\n embed_size: int = 512,\n token_size: int = 2048,\n channel_dim: int = 256,\n num_layers: int = 8,\n dropout: float = 0.0,\n avg_pool: bool = True,\n ):\n super().__init__()\n assert (img_height % patch_size == 0) and (img_width % patch_size == 0)\n\n self._input_shape = (in_channels, img_height, img_width)\n n_patches = int(img_height * img_width / (patch_size ** 2))\n\n self.patch_conv = nn.Conv2d(\n in_channels=in_channels, out_channels=embed_size, kernel_size=patch_size, stride=patch_size\n )\n\n self.mixer_blocks = nn.ModuleList(\n [\n MixerBlock(\n embed_size=embed_size,\n n_patches=n_patches,\n token_dim=token_size,\n channel_dim=channel_dim,\n dropout=dropout,\n )\n for _ in range(num_layers)\n ]\n )\n\n self.layer_norm = nn.LayerNorm(normalized_shape=embed_size)\n\n self.avg_pool = avg_pool\n if self.avg_pool:\n self._output_shape = torch.Size((embed_size,))\n else:\n self._output_shape = torch.Size((n_patches, embed_size))\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n assert inputs.shape[1:] == self.input_shape\n hidden = self.patch_conv(inputs)\n hidden = hidden.flatten(2).transpose(1, 2)\n\n for mixer_block in self.mixer_blocks:\n hidden = mixer_block(hidden)\n hidden = self.layer_norm(hidden)\n\n if self.avg_pool:\n hidden = torch.mean(hidden, dim=1)\n\n assert hidden.shape[1:] == self.output_shape\n\n return hidden\n\n @property\n def input_shape(self) -> torch.Size:\n return torch.Size(self._input_shape)\n\n @property\n def output_shape(self) -> torch.Size:\n return self._output_shape\n", "import torch\n\n\ndef rmspe_loss(targets: torch.Tensor, predictions: torch.Tensor) -> torch.Tensor:\n \"\"\"Root mean square percentage error.\"\"\"\n loss = torch.sqrt(torch.mean(((targets - predictions).float() / targets) ** 2))\n return loss\n\n\ndef mean_confidence_penalty(probabilities: torch.Tensor, num_classes: int) -> torch.Tensor:\n max_entropy = torch.log(torch.tensor(num_classes))\n # clipping needed for avoiding log(0) = -inf\n entropy_per_class = torch.maximum(-probabilities * torch.log(torch.clamp(probabilities, 1e-10, 1)), 0)\n entropy = torch.sum(entropy_per_class, -1)\n penalty = (max_entropy - entropy) / max_entropy\n return torch.mean(penalty)\n\n\n# # used for categorical and sequence features\n# def sample_values_from_classes(\n# labels,\n# sampler,\n# num_classes,\n# negative_samples,\n# unique,\n# class_counts,\n# distortion,\n# ):\n# \"\"\"returns sampled_values using the chosen sampler\"\"\"\n# if sampler == \"fixed_unigram\":\n# sampled_values = tf.random.fixed_unigram_candidate_sampler(\n# true_classes=labels,\n# num_true=1,\n# num_sampled=negative_samples,\n# unique=unique,\n# range_max=num_classes,\n# unigrams=class_counts,\n# distortion=distortion,\n# )\n# elif sampler == \"uniform\":\n# sampled_values = tf.random.uniform_candidate_sampler(\n# true_classes=labels,\n# num_true=1,\n# num_sampled=negative_samples,\n# unique=unique,\n# range_max=num_classes,\n# )\n# elif sampler == \"log_uniform\":\n# sampled_values = tf.random.log_uniform_candidate_sampler(\n# true_classes=labels,\n# num_true=1,\n# num_sampled=negative_samples,\n# unique=unique,\n# range_max=num_classes,\n# )\n# elif sampler == \"learned_unigram\":\n# sampled_values = tf.random.learned_unigram_candidate_sampler(\n# true_classes=labels,\n# num_true=1,\n# num_sampled=negative_samples,\n# unique=unique,\n# range_max=num_classes,\n# )\n# else:\n# raise ValueError(\"Unsupported sampler {}\".format(sampler))\n# return sampled_values\n\n\n#\n# # For categorical feature\n# def sampled_softmax_cross_entropy(\n# labels,\n# last_hidden,\n# num_classes=1,\n# decoder_weights=None,\n# decoder_biases=None,\n# sampler=None,\n# negative_samples=0,\n# class_counts=0,\n# distortion=1,\n# unique=False,\n# **kwargs\n# ):\n# labels = tf.cast(tf.expand_dims(labels, -1), tf.int64)\n#\n# sampled_values = sample_values_from_classes(\n# labels,\n# sampler,\n# num_classes,\n# negative_samples,\n# unique,\n# class_counts,\n# distortion,\n# )\n# train_loss = tf.nn.sampled_softmax_loss(\n# weights=tf.transpose(decoder_weights),\n# biases=decoder_biases,\n# labels=labels,\n# inputs=last_hidden,\n# num_sampled=negative_samples,\n# num_classes=num_classes,\n# sampled_values=sampled_values,\n# )\n#\n# return train_loss\n#\n#\n# # custom class to support Laplace smoothing of Fixed Unigram candidate sampler\n# # Required because of zeros returned in the true_expected_count for\n# # <PAD> and <UNK> tokens in loss['class_counts'] list\n# class FixedUnigramCandidateSampler(\n# collections.namedtuple(\n# \"FixedUnigramCandidateSampler\",\n# (\n# \"sampled_candidates\",\n# \"true_expected_count\",\n# \"sampled_expected_count\",\n# ),\n# )\n# ):\n# pass\n#\n#\n# # For sequence feature\n# def sequence_sampled_softmax_cross_entropy(\n# targets, train_logits, decoder_weights, decoder_biases, num_classes, **loss\n# ):\n# batch_max_targets_sequence_length = tf.shape(targets)[1]\n# targets_sequence_length = sequence_length_2D(tf.cast(targets, tf.int64))\n# batch_max_train_logits_sequence_length = tf.shape(train_logits)[1]\n#\n# logits_pad_len = tf.maximum(\n# 0,\n# batch_max_targets_sequence_length\n# - batch_max_train_logits_sequence_length,\n# )\n# targets_pad_len = tf.maximum(\n# 0,\n# batch_max_train_logits_sequence_length\n# - batch_max_targets_sequence_length,\n# )\n#\n# padded_logits = tf.pad(train_logits, [[0, 0], [0, logits_pad_len], [0, 0]])\n# padded_targets = tf.pad(targets, [[0, 0], [0, targets_pad_len]])\n#\n# output_exp = tf.cast(tf.reshape(padded_targets, [-1, 1]), tf.int64)\n# sampled_values = sample_values_from_classes(\n# output_exp,\n# loss[\"sampler\"],\n# num_classes,\n# loss[\"negative_samples\"],\n# loss[\"unique\"],\n# loss[\"class_counts\"],\n# loss[\"distortion\"],\n# )\n#\n# if loss[\"sampler\"] == \"fixed_unigram\":\n# # regenerate sampled_values structure for specified samplers\n# # to handle any zero values in true_expected_count tensor\n# sampled_values = FixedUnigramCandidateSampler(\n# sampled_values.sampled_candidates,\n# # add smoothing constant EPSILON to handle any zero values\n# tf.add(sampled_values.true_expected_count, EPSILON),\n# sampled_values.sampled_expected_count,\n# )\n#\n# def _sampled_loss(labels, logits):\n# labels = tf.cast(labels, tf.int64)\n# labels = tf.reshape(labels, [-1, 1])\n# logits = tf.cast(logits, tf.float32)\n#\n# return tf.cast(\n# tf.nn.sampled_softmax_loss(\n# weights=tf.transpose(decoder_weights),\n# biases=decoder_biases,\n# labels=labels,\n# inputs=logits,\n# num_sampled=loss[\"negative_samples\"],\n# num_classes=num_classes,\n# sampled_values=sampled_values,\n# ),\n# tf.float32,\n# )\n#\n# train_loss = tfa.seq2seq.sequence_loss(\n# padded_logits,\n# padded_targets,\n# tf.sequence_mask(\n# targets_sequence_length,\n# tf.shape(padded_targets)[1],\n# dtype=tf.float32,\n# ),\n# average_across_timesteps=True,\n# average_across_batch=False,\n# softmax_loss_function=_sampled_loss,\n# )\n#\n# return train_loss\n#\n#\n" ]
[ [ "numpy.random.choice", "numpy.random.seed", "pandas.DataFrame", "numpy.random.randint" ], [ "torch.mean", "torch.Size", "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.LayerNorm", "torch.nn.Linear" ], [ "torch.mean", "torch.clamp", "torch.sum", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nishiwen1214/GLUE-bert4keras
[ "4ac2477192471001cd2b98ec9ff329ad1c20e767" ]
[ "MRPC.py" ]
[ "#! -*- coding:utf-8 -*-\n# https://github.com/nishiwen1214/GLUE-bert4keras\n# 句子对分类任务,MRPC数据集\n# val_acc: 84.174, F1: 88.525\n\nimport numpy as np\nfrom bert4keras.backend import keras, set_gelu, K\nfrom bert4keras.tokenizers import Tokenizer\nfrom bert4keras.models import build_transformer_model\nfrom bert4keras.optimizers import Adam\nfrom bert4keras.snippets import sequence_padding, DataGenerator\nfrom bert4keras.snippets import open\nfrom keras.layers import Dropout, Dense\nfrom sklearn import metrics\nimport numpy as np\nfrom tqdm import tqdm\nimport csv\nimport os\n# 使用第二张GPU卡,'0'为第一张\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\nset_gelu('tanh') # 切换gelu版本\n\nmaxlen = 128\nbatch_size = 32\nepochs = 10\nlr = 2e-5\n\nconfig_path = './uncased_L-12_H-768_A-12/bert_config.json'\ncheckpoint_path = './uncased_L-12_H-768_A-12/bert_model.ckpt'\ndict_path = './uncased_L-12_H-768_A-12/vocab.txt'\n\n\ndef load_data(filename):\n \"\"\"加载数据\n 单条格式:(文本1, 文本2, 标签id)\n \"\"\"\n D = []\n i = 1\n with open(filename, encoding='utf-8') as f:\n for l in f:\n if i == 1: # 跳过数据第一行\n i = 2\n else:\n label,_,_, text1, text2 = l.strip().split('\\t')\n D.append((text1, text2, int(label)))\n return D\n\n\ndef load_data_test(filename):\n \"\"\"加载test数据\n 单条格式:(文本1, 文本2, label)\n \"\"\"\n D = []\n i = 1\n with open(filename, encoding='utf-8') as f:\n for l in f:\n if i == 1: # 跳过数据第一行\n i = 2\n else:\n _,_,_, text1, text2 = l.strip().split('\\t')\n D.append((text1, text2, 0))\n return D\n\n# 加载数据集\ntrain_data = load_data(\n './datasets/MRPC/msr_paraphrase_train.txt'\n)\nvalid_data = load_data(\n './datasets/MRPC/msr_paraphrase_test.txt'\n)\n\n# 建立分词器\ntokenizer = Tokenizer(dict_path, do_lower_case=True)\n\n\nclass data_generator(DataGenerator):\n \"\"\"数据生成器\n \"\"\"\n def __iter__(self, random=False):\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n for is_end, (text1, text2, label) in self.sample(random):\n token_ids, segment_ids = tokenizer.encode(\n text1, text2, maxlen=maxlen\n )\n batch_token_ids.append(token_ids)\n batch_segment_ids.append(segment_ids)\n batch_labels.append([label])\n if len(batch_token_ids) == self.batch_size or is_end:\n batch_token_ids = sequence_padding(batch_token_ids)\n batch_segment_ids = sequence_padding(batch_segment_ids)\n batch_labels = sequence_padding(batch_labels)\n yield [batch_token_ids, batch_segment_ids], batch_labels\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n\n\n# 加载预训练模型\nbert = build_transformer_model(\n config_path=config_path,\n checkpoint_path=checkpoint_path,\n with_pool=True,\n return_keras_model=False,\n)\n\noutput = Dropout(rate=0.1)(bert.model.output)\noutput = Dense(\n units=2, activation='softmax', kernel_initializer=bert.initializer\n)(output)\n\nmodel = keras.models.Model(bert.model.input, output)\nmodel.summary()\n\nmodel.compile(\n loss='sparse_categorical_crossentropy',\n optimizer=Adam(lr), # 用足够小的学习率\n metrics=['accuracy'],\n)\n\n# 转换数据集\ntrain_generator = data_generator(train_data, batch_size)\nvalid_generator = data_generator(valid_data, batch_size)\n\ndef evaluate(data):\n total, right = 0., 0.\n y_true_all = np.array([], dtype=int)\n y_pred_all = np.array([], dtype=int)\n for x_true, y_true in data:\n y_pred = model.predict(x_true).argmax(axis=1)\n y_true = y_true[:, 0]\n y_pred_all = np.append(y_pred_all, y_pred)\n y_true_all = np.append(y_true_all, y_true)\n total += len(y_true)\n right += (y_true == y_pred).sum()\n f1 = metrics.f1_score(y_true_all,y_pred_all)\n return right / total, f1\n\n\nclass Evaluator(keras.callbacks.Callback):\n \"\"\"评估与保存\n \"\"\"\n def __init__(self):\n self.best_val_acc = 0.\n\n def on_epoch_end(self, epoch, logs=None):\n val_acc, f1 = evaluate(valid_generator)\n if val_acc > self.best_val_acc:\n self.best_val_acc = val_acc\n model.save_weights('best_model_MRPC.weights')\n print(\n u'val_acc: %.5f, best_val_acc: %.5f, F1: %.5f\\n' %\n (val_acc, self.best_val_acc, f1)\n )\n\ndef test_predict(in_file, out_file):\n \"\"\"输出测试结果到文件\n 结果文件可以提交到 https://gluebenchmark.com 评测。\n \"\"\"\n test_data = load_data_test(in_file)\n test_generator = data_generator(test_data, batch_size)\n\n results = []\n for x_true, _ in tqdm(test_generator, ncols=0):\n y_pred = model.predict(x_true).argmax(axis=1)\n results.extend(y_pred)\n \n with open(out_file,'w',encoding='utf-8') as f:\n csv_writer = csv.writer(f, delimiter='\\t')\n csv_writer.writerow([\"index\",\"prediction\"])\n # 写入tsv文件内容\n for i, pred in enumerate(results):\n csv_writer.writerow([i,pred])\n # 关闭文件\n f.close()\n \nif __name__ == '__main__':\n\n evaluator = Evaluator()\n\n model.fit(\n train_generator.forfit(),\n steps_per_epoch=len(train_generator),\n epochs=epochs,\n callbacks=[evaluator]\n )\n model.load_weights('best_model_MRPC.weights')\n # 预测测试集,输出到结果文件\n test_predict(\n in_file = './datasets/MRPC/test.tsv',\n out_file = './results/MRPC.tsv'\n )\n \nelse:\n\n model.load_weights('best_model_MRPC.weights')\n" ]
[ [ "sklearn.metrics.f1_score", "numpy.array", "numpy.append" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hammer-wang/FOCAL-ICLR
[ "4d19149f86acc1d6b987c93cdd3a9d957535c5e3", "4d19149f86acc1d6b987c93cdd3a9d957535c5e3" ]
[ "rlkit/launchers/launcher_util.py", "rlkit/torch/brac/utils.py" ]
[ "import json\nimport os\nimport os.path as osp\nimport shutil\nimport pickle\nimport random\nimport sys\nimport time\nimport uuid\nimport click\nfrom collections import namedtuple\n\nimport __main__ as main\nimport datetime\nimport dateutil.tz\nimport numpy as np\n\nfrom rlkit.core import logger\nfrom rlkit.launchers import config\nfrom rlkit.torch.pytorch_util import set_gpu_mode\n\nGitInfo = namedtuple('GitInfo', ['code_diff', 'commit_hash', 'branch_name'])\n\n\ndef recursive_items(dictionary):\n \"\"\"\n Get all (key, item) recursively in a potentially recursive dictionary.\n Usage:\n\n ```\n x = {\n 'foo' : {\n 'bar' : 5\n }\n }\n recursive_items(x)\n # output:\n # ('foo', {'bar' : 5})\n # ('bar', 5)\n ```\n :param dictionary:\n :return:\n \"\"\"\n for key, value in dictionary.items():\n yield key, value\n if type(value) is dict:\n yield from recursive_items(value)\n\n\ndef create_mounts(\n mode,\n base_log_dir,\n sync_interval=180,\n local_input_dir_to_mount_point_dict=None,\n):\n if local_input_dir_to_mount_point_dict is None:\n local_input_dir_to_mount_point_dict = {}\n else:\n raise NotImplementedError(\"TODO(vitchyr): Implement this\")\n\n mounts = [m for m in CODE_MOUNTS]\n for dir, mount_point in local_input_dir_to_mount_point_dict.items():\n mounts.append(mount.MountLocal(\n local_dir=dir,\n mount_point=mount_point,\n pythonpath=False,\n ))\n\n if mode != 'local':\n for m in NON_CODE_MOUNTS:\n mounts.append(m)\n\n if mode == 'ec2':\n output_mount = mount.MountS3(\n s3_path='',\n mount_point=config.OUTPUT_DIR_FOR_DOODAD_TARGET,\n output=True,\n sync_interval=sync_interval,\n )\n elif mode == 'local':\n output_mount = mount.MountLocal(\n local_dir=base_log_dir,\n mount_point=None, # For purely local mode, skip mounting.\n output=True,\n )\n elif mode == 'local_docker':\n output_mount = mount.MountLocal(\n local_dir=base_log_dir,\n mount_point=config.OUTPUT_DIR_FOR_DOODAD_TARGET,\n output=True,\n )\n else:\n raise NotImplementedError(\"Mode not supported: {}\".format(mode))\n mounts.append(output_mount)\n return mounts\n\n\ndef save_experiment_data(dictionary, log_dir):\n with open(log_dir + '/experiment.pkl', 'wb') as handle:\n pickle.dump(dictionary, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef run_experiment_here(\n experiment_function,\n variant=None,\n exp_id=0,\n seed=0,\n use_gpu=True,\n # Logger params:\n exp_prefix=\"default\",\n snapshot_mode='last',\n snapshot_gap=1,\n git_info=None,\n script_name=None,\n base_log_dir=None,\n log_dir=None,\n):\n \"\"\"\n Run an experiment locally without any serialization.\n\n :param experiment_function: Function. `variant` will be passed in as its\n only argument.\n :param exp_prefix: Experiment prefix for the save file.\n :param variant: Dictionary passed in to `experiment_function`.\n :param exp_id: Experiment ID. Should be unique across all\n experiments. Note that one experiment may correspond to multiple seeds,.\n :param seed: Seed used for this experiment.\n :param use_gpu: Run with GPU. By default False.\n :param script_name: Name of the running script\n :param log_dir: If set, set the log directory to this. Otherwise,\n the directory will be auto-generated based on the exp_prefix.\n :return:\n \"\"\"\n if variant is None:\n variant = {}\n variant['exp_id'] = str(exp_id)\n\n if seed is None and 'seed' not in variant:\n seed = random.randint(0, 100000)\n variant['seed'] = str(seed)\n reset_execution_environment()\n\n actual_log_dir = setup_logger(\n exp_prefix=exp_prefix,\n variant=variant,\n exp_id=exp_id,\n seed=seed,\n snapshot_mode=snapshot_mode,\n snapshot_gap=snapshot_gap,\n base_log_dir=base_log_dir,\n log_dir=log_dir,\n git_info=git_info,\n script_name=script_name,\n )\n\n set_seed(seed)\n set_gpu_mode(use_gpu)\n\n run_experiment_here_kwargs = dict(\n variant=variant,\n exp_id=exp_id,\n seed=seed,\n use_gpu=use_gpu,\n exp_prefix=exp_prefix,\n snapshot_mode=snapshot_mode,\n snapshot_gap=snapshot_gap,\n git_info=git_info,\n script_name=script_name,\n base_log_dir=base_log_dir,\n )\n save_experiment_data(\n dict(\n run_experiment_here_kwargs=run_experiment_here_kwargs\n ),\n actual_log_dir\n )\n return experiment_function(variant)\n\n\ndef create_exp_name(exp_prefix, exp_id=0, seed=0):\n \"\"\"\n Create a semi-unique experiment name that has a timestamp\n :param exp_prefix:\n :param exp_id:\n :return:\n \"\"\"\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')\n return \"%s_%s_%04d--s-%d\" % (exp_prefix, timestamp, exp_id, seed)\n\n\ndef create_simple_exp_name():\n \"\"\"\n Create a unique experiment name with a timestamp\n \"\"\"\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')\n return timestamp\n\n\ndef create_log_dir(exp_prefix, exp_id=None, seed=None, base_log_dir=None):\n \"\"\"\n Creates and returns a unique log directory.\n\n :param exp_prefix: name of log directory\n :param exp_id: name of experiment category (e.g. the env)\n :return:\n \"\"\"\n if base_log_dir is None:\n base_log_dir = config.LOCAL_LOG_DIR\n exp_name = exp_id\n if exp_name is None:\n exp_name = create_simple_exp_name()\n if seed is not None:\n log_dir = os.path.join(base_log_dir, exp_prefix.replace(\"_\", \"-\"), exp_name + f\"_seed{seed}\")\n else:\n log_dir = os.path.join(base_log_dir, exp_prefix.replace(\"_\", \"-\"), exp_name)\n os.makedirs(log_dir, exist_ok=True)\n return log_dir\n\n\ndef setup_logger(\n exp_prefix=\"default\",\n exp_id=0,\n seed=None,\n variant=None,\n base_log_dir=None,\n text_log_file=\"debug.log\",\n variant_log_file=\"variant.json\",\n tabular_log_file=\"progress.csv\",\n snapshot_mode=\"last\",\n snapshot_gap=1,\n log_tabular_only=False,\n log_dir=None,\n git_info=None,\n script_name=None,\n):\n \"\"\"\n Set up logger to have some reasonable default settings.\n\n Will save log output to\n\n based_log_dir/exp_prefix/exp_name.\n\n exp_name will be auto-generated to be unique.\n\n If log_dir is specified, then that directory is used as the output dir.\n\n :param exp_prefix: The sub-directory for this specific experiment.\n :param exp_id: The number of the specific experiment run within this\n experiment.\n :param variant:\n :param base_log_dir: The directory where all log should be saved.\n :param text_log_file:\n :param variant_log_file:\n :param tabular_log_file:\n :param snapshot_mode:\n :param log_tabular_only:\n :param snapshot_gap:\n :param log_dir:\n :param git_info:\n :param script_name: If set, save the script name to this.\n :return:\n \"\"\"\n first_time = log_dir is None\n if first_time:\n log_dir = create_log_dir(exp_prefix, exp_id=exp_id, seed=seed,\n base_log_dir=base_log_dir)\n\n if variant is not None:\n logger.log(\"Variant:\")\n logger.log(json.dumps(dict_to_safe_json(variant), indent=2))\n variant_log_path = osp.join(log_dir, variant_log_file)\n logger.log_variant(variant_log_path, variant)\n\n tabular_log_path = osp.join(log_dir, tabular_log_file)\n text_log_path = osp.join(log_dir, text_log_file)\n\n logger.add_text_output(text_log_path)\n if first_time:\n logger.add_tabular_output(tabular_log_path)\n else:\n logger._add_output(tabular_log_path, logger._tabular_outputs,\n logger._tabular_fds, mode='a')\n for tabular_fd in logger._tabular_fds:\n logger._tabular_header_written.add(tabular_fd)\n logger.set_snapshot_dir(log_dir)\n logger.set_snapshot_mode(snapshot_mode)\n logger.set_snapshot_gap(snapshot_gap)\n logger.set_log_tabular_only(log_tabular_only)\n exp_name = log_dir.split(\"/\")[-1]\n logger.push_prefix(\"[%s] \" % exp_name)\n\n if git_info is not None:\n code_diff, commit_hash, branch_name = git_info\n if code_diff is not None:\n with open(osp.join(log_dir, \"code.diff\"), \"w\") as f:\n f.write(code_diff)\n with open(osp.join(log_dir, \"git_info.txt\"), \"w\") as f:\n f.write(\"git hash: {}\".format(commit_hash))\n f.write('\\n')\n f.write(\"git branch name: {}\".format(branch_name))\n if script_name is not None:\n with open(osp.join(log_dir, \"script_name.txt\"), \"w\") as f:\n f.write(script_name)\n return log_dir\n\n\ndef dict_to_safe_json(d):\n \"\"\"\n Convert each value in the dictionary into a JSON'able primitive.\n :param d:\n :return:\n \"\"\"\n new_d = {}\n for key, item in d.items():\n if safe_json(item):\n new_d[key] = item\n else:\n if isinstance(item, dict):\n new_d[key] = dict_to_safe_json(item)\n else:\n new_d[key] = str(item)\n return new_d\n\n\ndef safe_json(data):\n if data is None:\n return True\n elif isinstance(data, (bool, int, float)):\n return True\n elif isinstance(data, (tuple, list)):\n return all(safe_json(x) for x in data)\n elif isinstance(data, dict):\n return all(isinstance(k, str) and safe_json(v) for k, v in data.items())\n return False\n\n\ndef set_seed(seed):\n \"\"\"\n Set the seed for all the possible random number generators.\n\n :param seed:\n :return: None\n \"\"\"\n seed = int(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n\ndef reset_execution_environment():\n \"\"\"\n Call this between calls to separate experiments.\n :return:\n \"\"\"\n import importlib\n importlib.reload(logger)\n\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions for offline RL.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport datetime\nimport re\n\nimport numpy as np\n\n\nimport torch\nimport torch\n\ndef clip_by_eps(x, spec, eps=0.0):\n return torch.clamp(\n x, min=spec.minimum + eps, max=spec.maximum - eps)\n\n# TODO: add customized gradient\ndef clip_v2(x, low, high):\n \"\"\"Clipping with modified gradient behavior.\"\"\"\n value = torch.min(torch.max(x, low * torch.ones_like((x))), high * torch.ones_like(x))\n# def grad(dy):\n# if_y_pos = torch.gt(dy, 0.0).type(torch.float32)\n# if_x_g_low = torch.gt(x, low).type(torch.float32)\n# if_x_l_high = torch.le(x, high).type(torch.float32)\n# return (if_y_pos * if_x_g_low +\n# (1.0 - if_y_pos) * if_x_l_high) * dy\n# return value, grad\n return value\n\n\n# class clip_v2(torch.autograd.Function):\n# @staticmethod\n# def forward(ctx, x):\n# ctx.save_for_backward(x)\n# return torch.min(torch.max(x, 0. * torch.ones_like((x))), 500. * torch.ones_like(x))\n# @staticmethod\n# def backward(ctx, grad_output):\n# x, = ctx.saved_tensors\n# grad_cpy = grad_output.clone()\n# if_y_pos = torch.gt(grad_cpy, 0.0).type(torch.float32)\n# if_x_g_low = torch.gt(x, 0.).type(torch.float32)\n# if_x_l_high = torch.le(x, 500.).type(torch.float32)\n# return (if_y_pos * if_x_g_low +\n# (1.0 - if_y_pos) * if_x_l_high) * grad_cpy\n\ndef soft_relu(x):\n \"\"\"Compute log(1 + exp(x)).\"\"\"\n # Note: log(sigmoid(x)) = x - soft_relu(x) = - soft_relu(-x).\n # log(1 - sigmoid(x)) = - soft_relu(x)\n return torch.log(1.0 + torch.exp(-torch.abs(x))) + torch.max(x, torch.zeros_like(x))\n\n" ]
[ [ "numpy.random.seed" ], [ "torch.abs", "torch.clamp", "torch.zeros_like", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ronner1234/BERT-for-IBC-TF1
[ "e2b8f628974017df159ab50ba615aeb2ea1d363c" ]
[ "data/generate_political_corpus_test.py" ]
[ "import pandas as pd\nimport spacy\nimport numpy as np\n\nnlp=spacy.load(\"en_core_web_md\") # load sentence tokenzation\n\ninput_data=pd.read_csv(\"ideological_books_corpus.csv\", header=None, sep=\"@\", names=['label', 'sentence'])\n\nprint(input_data)\n\nmapping = {'Liberal': 1, 'Conservative': 2, 'Neutral': 3}\n\noutput = input_data.replace({'label': mapping})\n\nprint(output)\n\noutput.to_csv(\"idc_raw.csv\", sep='@', index=False)\n\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
balakhonoff/catalyst
[ "82d904aee97045efbaef3963e36c2ce5173ddac4", "82d904aee97045efbaef3963e36c2ce5173ddac4", "82d904aee97045efbaef3963e36c2ce5173ddac4", "82d904aee97045efbaef3963e36c2ce5173ddac4" ]
[ "catalyst/contrib/scripts/find_thresholds.py", "catalyst/dl/utils/torch.py", "catalyst/contrib/utils/visualization.py", "catalyst/data/scripts/split_dataframe.py" ]
[ "from typing import Any, Callable, Dict, List, Tuple\nimport argparse\nfrom itertools import repeat\nimport json\nfrom pathlib import Path\nfrom pprint import pprint\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.special import expit\nfrom sklearn import metrics\nfrom sklearn.model_selection import RepeatedStratifiedKFold\n\nfrom catalyst import utils\n\nBINARY_PER_CLASS_METRICS = [\n \"accuracy_score\",\n \"precision_score\",\n \"recall_score\",\n \"f1_score\",\n \"roc_auc_score\",\n]\n\nRANK_METRICS = [\n \"ndcg_score\",\n \"coverage_error\",\n \"label_ranking_loss\",\n \"label_ranking_average_precision_score\",\n]\n\n\ndef build_args(parser):\n \"\"\"Constructs the command-line arguments.\"\"\"\n parser.add_argument(\n \"--in-csv\",\n type=Path,\n help=\"Path to .csv with labels column\",\n required=True,\n )\n parser.add_argument(\n \"--in-label-column\",\n type=str,\n help=\"Column to get labels\",\n required=False,\n default=\"labels\",\n )\n parser.add_argument(\n \"--in-npy\",\n type=Path,\n help=\"Path to .npy with class logits\",\n required=True,\n )\n parser.add_argument(\n \"--out-thresholds\",\n type=Path,\n help=\"Path to save .json with thresholds\",\n required=True,\n )\n\n parser.add_argument(\n \"--metric\",\n type=str,\n help=\"Metric to use\",\n required=False,\n choices=BINARY_PER_CLASS_METRICS,\n default=\"roc_auc_score\",\n )\n # parser.add_argument(\n # \"--ignore-label\", type=int,\n # required=False,\n # default=None\n # )\n parser.add_argument(\n \"--num-splits\", type=int, help=\"NUM_SPLITS\", required=False, default=5\n )\n parser.add_argument(\n \"--num-repeats\",\n type=int,\n help=\"NUM_REPEATS\",\n required=False,\n default=1,\n )\n parser.add_argument(\n \"--num-workers\",\n type=int,\n help=\"CPU pool size\",\n required=False,\n default=1,\n )\n\n utils.boolean_flag(parser, \"verbose\", default=False)\n utils.boolean_flag(parser, \"sigmoid\", default=False)\n\n return parser\n\n\ndef parse_args():\n \"\"\"Parses the command line arguments for the main method.\"\"\"\n parser = argparse.ArgumentParser()\n build_args(parser)\n args = parser.parse_args()\n return args\n\n\ndef get_binary_labels(labels: np.array, label: int, ignore_label: int = None):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n binary_labels = labels == label\n if ignore_label is not None:\n binary_labels[labels == ignore_label] = 0\n return (binary_labels).astype(int)\n\n\ndef find_best_split_threshold(\n y_pred: np.array, y_true: np.array, metric: Callable,\n):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n thresholds = np.linspace(0.0, 1.0, num=100)\n metric_values = []\n for t in thresholds:\n predictions = (y_pred >= t).astype(int)\n if sum(predictions) > 0:\n metric_values.append(metric(y_true, predictions))\n else:\n metric_values.append(0.0)\n\n best_threshold = thresholds[np.argmax(metric_values)]\n return best_threshold\n\n\ndef find_best_threshold(\n y_pred: np.ndarray,\n y_true: np.ndarray,\n metric_fn: Callable = metrics.roc_auc_score,\n num_splits: int = 5,\n num_repeats: int = 1,\n random_state: int = 42,\n):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n rkf = RepeatedStratifiedKFold(\n n_splits=num_splits, n_repeats=num_repeats, random_state=random_state\n )\n fold_thresholds = []\n fold_metrics = {k: [] for k in BINARY_PER_CLASS_METRICS}\n\n for train_index, test_index in rkf.split(y_true, y_true):\n y_pred_train, y_pred_test = y_pred[train_index], y_pred[test_index]\n y_true_train, y_true_test = y_true[train_index], y_true[test_index]\n\n best_threshold = find_best_split_threshold(\n y_pred_train, y_true_train, metric=metric_fn\n )\n best_predictions = (y_pred_test >= best_threshold).astype(int)\n\n for metric_name in BINARY_PER_CLASS_METRICS:\n try:\n metric_value = metrics.__dict__[metric_name](\n y_true_test, best_predictions\n )\n except ValueError:\n metric_value = 0.0\n\n fold_metrics[metric_name].append(metric_value)\n fold_thresholds.append(best_threshold)\n\n fold_best_threshold = np.mean(fold_thresholds)\n for metric_name in fold_metrics:\n fold_metrics[metric_name] = np.mean(fold_metrics[metric_name])\n\n return fold_best_threshold, fold_metrics\n\n\ndef wrap_find_best_threshold(args: Tuple[Any]):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n class_id, function_args = args[0], args[1:]\n threshold, metrics = find_best_threshold(*function_args)\n return class_id, threshold, metrics\n\n\ndef optimize_thresholds(\n predictions: np.ndarray,\n labels: np.ndarray,\n classes: List[int],\n metric_fn: Callable = metrics.roc_auc_score,\n num_splits: int = 5,\n num_repeats: int = 1,\n num_workers: int = 0,\n ignore_label: int = None,\n) -> Tuple[Dict, Dict]:\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n pool = utils.get_pool(num_workers)\n\n predictions_ = predictions.copy()\n\n predictions_list, labels_list = [], []\n for cls in classes:\n predictions_list.append(predictions_[:, cls])\n labels_list.append(\n get_binary_labels(labels, cls, ignore_label=ignore_label)\n )\n\n results = utils.tqdm_parallel_imap(\n wrap_find_best_threshold,\n zip(\n classes,\n predictions_list,\n labels_list,\n repeat(metric_fn),\n repeat(num_splits),\n repeat(num_repeats),\n ),\n pool,\n )\n results = [(r[1], r[2]) for r in sorted(results, key=lambda x: x[0])]\n\n result_thresholds = [r[0] for r in results]\n result_metrics = [r[1] for r in results]\n class_thresholds = {c: t for (c, t) in zip(classes, result_thresholds)}\n class_metrics = {c: m for (c, m) in zip(classes, result_metrics)}\n return class_thresholds, class_metrics\n\n\ndef get_model_confidences(\n confidences: np.ndarray,\n thresholds: Dict[int, float] = None,\n classes: List[int] = None,\n):\n \"\"\"\n @TODO: Docs (add description). Contribution is welcome\n\n Args:\n confidences (np.ndarray): model predictions of shape\n [dataset_len; class_confidences]\n thresholds (Dict[int, float]): thresholds for each class\n classes (List[int]): classes of interest for evaluation\n \"\"\"\n if classes is not None:\n classes = np.array(classes)\n confidences = confidences[:, classes]\n\n confidences_th = confidences.copy()\n if thresholds is not None:\n assert confidences.shape[1] == len(thresholds)\n thresholds = np.array(list(thresholds.values()))\n confidences_th = confidences - thresholds\n\n return confidences_th\n\n\ndef score_model_coverage(confidences: np.ndarray, labels: np.ndarray):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n candidates = np.argsort(-confidences, axis=1)\n confidences = -np.sort(-confidences, axis=1)\n candidates[confidences < 0] = -1\n labels = labels[:, None]\n\n coverage_metrics = {}\n\n for top_k in [1, 3, 5]:\n metric = (candidates[:, :top_k] == labels).sum(axis=1).mean()\n coverage_metrics[f\"Recall@{top_k:02d}\"] = metric\n\n return coverage_metrics\n\n\ndef _sort_dict_by_keys(disordered: Dict):\n key = lambda item: item[0]\n sorted_dict = {k: v for k, v in sorted(disordered.items(), key=key)}\n return sorted_dict\n\n\ndef _save_json(dct: Dict, outpath: Path, suffix: str = None):\n outpath = str(outpath)\n if suffix is not None:\n outpath = outpath.replace(\".json\", f\"{suffix}.json\")\n dct = _sort_dict_by_keys({str(k): v for k, v in dct.copy().items()})\n with open(outpath, \"w\") as fout:\n json.dump(dct, fout, ensure_ascii=False, indent=4)\n\n\ndef main(args, _=None):\n \"\"\"Run ``catalyst-contrib find-thresholds`` script.\"\"\"\n predictions = expit(np.load(args.in_npy))\n if args.sigmoid:\n predictions = expit(predictions)\n labels = pd.read_csv(args.in_csv)[args.in_label_column].values\n classes = list(set(labels)) # - set([args.ignore_label]))\n\n assert args.metric in metrics.__dict__.keys()\n metric_fn = metrics.__dict__[args.metric]\n\n class_thresholds, class_metrics = optimize_thresholds(\n predictions=predictions,\n labels=labels,\n classes=classes,\n metric_fn=metric_fn,\n num_splits=args.num_splits,\n num_repeats=args.num_repeats,\n ignore_label=None, # args.ignore_label,\n num_workers=args.num_workers,\n )\n _save_json(class_thresholds, outpath=args.out_thresholds)\n\n class_metrics[\"_mean\"] = {\n key_metric: np.mean(\n [\n class_metrics[key_class][key_metric]\n for key_class in class_metrics.keys()\n ]\n )\n for key_metric in BINARY_PER_CLASS_METRICS\n }\n\n _save_json(class_metrics, args.out_thresholds, suffix=\".class.metrics\")\n\n if args.verbose:\n print(\"CLASS METRICS\")\n pprint(class_metrics)\n print(\"CLASS THRESHOLDS\")\n pprint(class_thresholds)\n\n labels_scores = np.zeros(predictions.shape)\n labels_scores[:, labels] = 1.0\n for class_thresholds_ in [None, class_thresholds]:\n thresholds_used = class_thresholds_ is not None\n\n confidences = get_model_confidences(\n confidences=predictions,\n thresholds=class_thresholds_,\n classes=classes,\n )\n\n rank_metrics = {\n key: metrics.__dict__[key](labels_scores, confidences)\n for key in RANK_METRICS\n }\n postfix = (\n \".rank.metrics\"\n if not thresholds_used\n else \".rank.metrics.thresholds\"\n )\n _save_json(rank_metrics, args.out_thresholds, suffix=postfix)\n\n coverage_metrics = score_model_coverage(confidences, labels)\n postfix = (\n \".coverage.metrics.json\"\n if not thresholds_used\n else \".coverage.metrics.thresholds.json\"\n )\n _save_json(coverage_metrics, args.out_thresholds, suffix=postfix)\n\n if args.verbose:\n print(\n \"RANK METRICS\"\n if not thresholds_used\n else \"RANK METRICS WITH THRESHOLD\"\n )\n pprint(rank_metrics)\n print(\n \"COVERAGE METRICS\"\n if not thresholds_used\n else \"COVERAGE METRICS WITH THRESHOLD\"\n )\n pprint(coverage_metrics)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n", "from typing import Callable, Iterable\n\nimport torch\nfrom torch.utils.data.dataloader import default_collate as default_collate_fn\n\nfrom catalyst.data import ListDataset\n\n\ndef get_loader(\n data_source: Iterable[dict],\n open_fn: Callable,\n dict_transform: Callable = None,\n sampler=None,\n collate_fn: Callable = default_collate_fn,\n batch_size: int = 32,\n num_workers: int = 4,\n shuffle: bool = False,\n drop_last: bool = False,\n):\n \"\"\"Creates a DataLoader from given source and its open/transform params.\n\n Args:\n data_source (Iterable[dict]): and iterable containing your\n data annotations,\n (for example path to images, labels, bboxes, etc)\n open_fn (Callable): function, that can open your\n annotations dict and\n transfer it to data, needed by your network\n (for example open image by path, or tokenize read string)\n dict_transform (callable): transforms to use on dict\n (for example normalize image, add blur, crop/resize/etc)\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset\n collate_fn (callable, optional): merges a list of samples to form a\n mini-batch of Tensor(s). Used when using batched loading from a\n map-style dataset\n batch_size (int, optional): how many samples per batch to load\n num_workers (int, optional): how many subprocesses to use for data\n loading. ``0`` means that the data will be loaded\n in the main process\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: ``False``).\n drop_last (bool, optional): set to ``True`` to drop\n the last incomplete batch, if the dataset size is not divisible\n by the batch size. If ``False`` and the size of dataset\n is not divisible by the batch size, then the last batch\n will be smaller. (default: ``False``)\n\n Returns:\n DataLoader with ``catalyst.data.ListDataset``\n \"\"\"\n dataset = ListDataset(\n list_data=data_source, open_fn=open_fn, dict_transform=dict_transform,\n )\n loader = torch.utils.data.DataLoader(\n dataset=dataset,\n sampler=sampler,\n collate_fn=collate_fn,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=shuffle,\n pin_memory=torch.cuda.is_available(),\n drop_last=drop_last,\n )\n return loader\n\n\n__all__ = [\"get_loader\"]\n", "import itertools\n\nimport numpy as np\n\nfrom .image import tensor_from_rgb_image\n\n\ndef plot_confusion_matrix(\n cm,\n class_names=None,\n normalize=False,\n title=\"confusion matrix\",\n fname=None,\n show=True,\n figsize=12,\n fontsize=32,\n colormap=\"Blues\",\n):\n \"\"\"\n Render the confusion matrix and return matplotlib\"s figure with it.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n import matplotlib\n\n matplotlib.use(\"Agg\")\n import matplotlib.pyplot as plt\n\n plt.ioff()\n\n cmap = plt.cm.__dict__[colormap]\n\n if class_names is None:\n class_names = [str(i) for i in range(len(np.diag(cm)))]\n\n if normalize:\n cm = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]\n\n plt.rcParams.update(\n {\"font.size\": int(fontsize / np.log2(len(class_names)))}\n )\n\n f = plt.figure(figsize=(figsize, figsize))\n plt.title(title)\n plt.imshow(cm, interpolation=\"nearest\", cmap=cmap)\n plt.colorbar()\n\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45, ha=\"right\")\n\n plt.yticks(tick_marks, class_names)\n\n fmt = \".2f\" if normalize else \"d\"\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n\n plt.tight_layout()\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n\n if fname is not None:\n plt.savefig(fname=fname)\n\n if show:\n plt.show()\n\n return f\n\n\ndef render_figure_to_tensor(figure):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n import matplotlib\n\n matplotlib.use(\"Agg\")\n import matplotlib.pyplot as plt\n\n plt.ioff()\n\n figure.canvas.draw()\n\n image = np.array(figure.canvas.renderer._renderer)\n plt.close(figure)\n del figure\n\n image = tensor_from_rgb_image(image)\n return image\n\n\n__all__ = [\"plot_confusion_matrix\", \"render_figure_to_tensor\"]\n", "import argparse\nimport json\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom catalyst.utils import folds_to_list, split_dataframe\n\n\ndef build_args(parser):\n \"\"\"Constructs the command-line arguments for\n ``catalyst-data split-dataframe``.\n \"\"\"\n parser.add_argument(\n \"--in-csv\",\n type=Path,\n dest=\"in_csv\",\n help=\"Path to the csv to split\",\n required=True,\n )\n parser.add_argument(\n \"-n\", \"--num-folds\", type=int, default=5, help=\"Number of result folds\"\n )\n parser.add_argument(\n \"-t\",\n \"--train-folds\",\n type=str,\n dest=\"train_folds\",\n help=\"Numbers separated by commas. They represent train folds\",\n required=True,\n )\n parser.add_argument(\n \"-v\",\n \"--valid-folds\",\n type=str,\n dest=\"valid_folds\",\n default=None,\n help=\"Numbers separated by commas. They represent valid folds\",\n )\n parser.add_argument(\n \"-i\",\n \"--infer-folds\",\n type=str,\n dest=\"infer_folds\",\n default=None,\n help=\"Numbers separated by commas. They represent infer folds\",\n )\n\n parser.add_argument(\n \"--out-csv\",\n type=str,\n help=\"Output CSV path for train and valid parts\",\n required=True,\n )\n\n parser.add_argument(\n \"--tag2class\",\n type=str,\n default=None,\n help=\"Path to YAML or JSON of label mappings\",\n )\n parser.add_argument(\n \"--tag-column\",\n type=str,\n default=None,\n dest=\"tag_column\",\n help=\"Column of labels (works in pair with `--tag2class` flag)\",\n )\n parser.add_argument(\n \"--class-column\",\n type=str,\n default=None,\n dest=\"class_column\",\n help=\"Column of classes\",\n )\n\n parser.add_argument(\n \"--seed\", type=int, default=42, help=\"Random seed for split folds\"\n )\n\n return parser\n\n\ndef parse_args():\n \"\"\"Parses the command line arguments for the main method.\"\"\"\n parser = argparse.ArgumentParser()\n build_args(parser)\n args, uargs = parser.parse_known_args()\n return args, uargs\n\n\ndef main(args, uargs=None):\n \"\"\"Run the ``catalyst-data split-dataframe`` script.\"\"\"\n dataframe = pd.read_csv(args.in_csv)\n\n train_folds = (\n folds_to_list(args.train_folds)\n if args.train_folds is not None\n else None\n )\n valid_folds = (\n folds_to_list(args.valid_folds)\n if args.valid_folds is not None\n else None\n )\n infer_folds = (\n folds_to_list(args.infer_folds)\n if args.infer_folds is not None\n else None\n )\n\n tag2class = (\n json.load(open(args.tag2class)) if args.tag2class is not None else None\n )\n\n df_all, train, valid, infer = split_dataframe(\n dataframe,\n train_folds=train_folds,\n valid_folds=valid_folds,\n infer_folds=infer_folds,\n tag2class=tag2class,\n tag_column=args.tag_column,\n class_column=args.class_column,\n seed=args.seed,\n n_folds=args.num_folds,\n )\n\n out_csv: str = args.out_csv\n if out_csv.endswith(\".csv\"):\n out_csv = out_csv[:-4]\n\n df_all.to_csv(f\"{out_csv}.csv\", index=False)\n train.to_csv(f\"{out_csv}_train.csv\", index=False)\n valid.to_csv(f\"{out_csv}_valid.csv\", index=False)\n infer.to_csv(f\"{out_csv}_infer.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n args, uargs = parse_args()\n main(args, uargs)\n" ]
[ [ "sklearn.metrics.__dict__.keys", "pandas.read_csv", "numpy.linspace", "scipy.special.expit", "numpy.load", "numpy.sort", "sklearn.model_selection.RepeatedStratifiedKFold", "numpy.mean", "numpy.argmax", "numpy.argsort", "numpy.array", "numpy.zeros" ], [ "torch.cuda.is_available" ], [ "numpy.diag", "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.use", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ioff", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
raviddoss/ActivityNet
[ "eba0ec905d831802e131ecae6fea58d376da49dd" ]
[ "Evaluation/ava/per_image_evaluation.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Evaluate Object Detection result on a single image.\n\nAnnotate each detected result as true positives or false positive according to\na predefined IOU ratio. Non Maximum Supression is used by default. Multi class\ndetection is supported by default.\nBased on the settings, per image evaluation is either performed on boxes or\non object masks.\n\"\"\"\nimport numpy as np\n\nimport np_box_list\nimport np_box_list_ops\nimport np_box_mask_list\nimport np_box_mask_list_ops\n\n\nclass PerImageEvaluation(object):\n \"\"\"Evaluate detection result of a single image.\"\"\"\n\n def __init__(self,\n num_groundtruth_classes,\n matching_iou_threshold=0.5):\n \"\"\"Initialized PerImageEvaluation by evaluation parameters.\n\n Args:\n num_groundtruth_classes: Number of ground truth object classes\n matching_iou_threshold: A ratio of area intersection to union, which is\n the threshold to consider whether a detection is true positive or not\n \"\"\"\n self.matching_iou_threshold = matching_iou_threshold\n self.num_groundtruth_classes = num_groundtruth_classes\n\n def compute_object_detection_metrics(\n self, detected_boxes, detected_scores, detected_class_labels,\n groundtruth_boxes, groundtruth_class_labels,\n groundtruth_is_difficult_list, groundtruth_is_group_of_list,\n detected_masks=None, groundtruth_masks=None):\n \"\"\"Evaluates detections as being tp, fp or ignored from a single image.\n\n The evaluation is done in two stages:\n 1. All detections are matched to non group-of boxes; true positives are\n determined and detections matched to difficult boxes are ignored.\n 2. Detections that are determined as false positives are matched against\n group-of boxes and ignored if matched.\n\n Args:\n detected_boxes: A float numpy array of shape [N, 4], representing N\n regions of detected object regions.\n Each row is of the format [y_min, x_min, y_max, x_max]\n detected_scores: A float numpy array of shape [N, 1], representing\n the confidence scores of the detected N object instances.\n detected_class_labels: A integer numpy array of shape [N, 1], repreneting\n the class labels of the detected N object instances.\n groundtruth_boxes: A float numpy array of shape [M, 4], representing M\n regions of object instances in ground truth\n groundtruth_class_labels: An integer numpy array of shape [M, 1],\n representing M class labels of object instances in ground truth\n groundtruth_is_difficult_list: A boolean numpy array of length M denoting\n whether a ground truth box is a difficult instance or not\n groundtruth_is_group_of_list: A boolean numpy array of length M denoting\n whether a ground truth box has group-of tag\n detected_masks: (optional) A uint8 numpy array of shape\n [N, height, width]. If not None, the metrics will be computed based\n on masks.\n groundtruth_masks: (optional) A uint8 numpy array of shape\n [M, height, width].\n\n Returns:\n scores: A list of C float numpy arrays. Each numpy array is of\n shape [K, 1], representing K scores detected with object class\n label c\n tp_fp_labels: A list of C boolean numpy arrays. Each numpy array\n is of shape [K, 1], representing K True/False positive label of\n object instances detected with class label c\n \"\"\"\n detected_boxes, detected_scores, detected_class_labels, detected_masks = (\n self._remove_invalid_boxes(detected_boxes, detected_scores,\n detected_class_labels, detected_masks))\n scores, tp_fp_labels = self._compute_tp_fp(\n detected_boxes=detected_boxes,\n detected_scores=detected_scores,\n detected_class_labels=detected_class_labels,\n groundtruth_boxes=groundtruth_boxes,\n groundtruth_class_labels=groundtruth_class_labels,\n groundtruth_is_difficult_list=groundtruth_is_difficult_list,\n groundtruth_is_group_of_list=groundtruth_is_group_of_list,\n detected_masks=detected_masks,\n groundtruth_masks=groundtruth_masks)\n\n return scores, tp_fp_labels\n\n def _compute_tp_fp(self, detected_boxes, detected_scores,\n detected_class_labels, groundtruth_boxes,\n groundtruth_class_labels, groundtruth_is_difficult_list,\n groundtruth_is_group_of_list,\n detected_masks=None, groundtruth_masks=None):\n \"\"\"Labels true/false positives of detections of an image across all classes.\n\n Args:\n detected_boxes: A float numpy array of shape [N, 4], representing N\n regions of detected object regions.\n Each row is of the format [y_min, x_min, y_max, x_max]\n detected_scores: A float numpy array of shape [N, 1], representing\n the confidence scores of the detected N object instances.\n detected_class_labels: A integer numpy array of shape [N, 1], repreneting\n the class labels of the detected N object instances.\n groundtruth_boxes: A float numpy array of shape [M, 4], representing M\n regions of object instances in ground truth\n groundtruth_class_labels: An integer numpy array of shape [M, 1],\n representing M class labels of object instances in ground truth\n groundtruth_is_difficult_list: A boolean numpy array of length M denoting\n whether a ground truth box is a difficult instance or not\n groundtruth_is_group_of_list: A boolean numpy array of length M denoting\n whether a ground truth box has group-of tag\n detected_masks: (optional) A np.uint8 numpy array of shape\n [N, height, width]. If not None, the scores will be computed based\n on masks.\n groundtruth_masks: (optional) A np.uint8 numpy array of shape\n [M, height, width].\n\n Returns:\n result_scores: A list of float numpy arrays. Each numpy array is of\n shape [K, 1], representing K scores detected with object class\n label c\n result_tp_fp_labels: A list of boolean numpy array. Each numpy array is of\n shape [K, 1], representing K True/False positive label of object\n instances detected with class label c\n\n Raises:\n ValueError: If detected masks is not None but groundtruth masks are None,\n or the other way around.\n \"\"\"\n if detected_masks is not None and groundtruth_masks is None:\n raise ValueError(\n 'Detected masks is available but groundtruth masks is not.')\n if detected_masks is None and groundtruth_masks is not None:\n raise ValueError(\n 'Groundtruth masks is available but detected masks is not.')\n\n result_scores = []\n result_tp_fp_labels = []\n for i in range(self.num_groundtruth_classes):\n groundtruth_is_difficult_list_at_ith_class = (\n groundtruth_is_difficult_list[groundtruth_class_labels == i])\n groundtruth_is_group_of_list_at_ith_class = (\n groundtruth_is_group_of_list[groundtruth_class_labels == i])\n (gt_boxes_at_ith_class, gt_masks_at_ith_class,\n detected_boxes_at_ith_class, detected_scores_at_ith_class,\n detected_masks_at_ith_class) = self._get_ith_class_arrays(\n detected_boxes, detected_scores, detected_masks,\n detected_class_labels, groundtruth_boxes, groundtruth_masks,\n groundtruth_class_labels, i)\n scores, tp_fp_labels = self._compute_tp_fp_for_single_class(\n detected_boxes=detected_boxes_at_ith_class,\n detected_scores=detected_scores_at_ith_class,\n groundtruth_boxes=gt_boxes_at_ith_class,\n groundtruth_is_difficult_list=\n groundtruth_is_difficult_list_at_ith_class,\n groundtruth_is_group_of_list=\n groundtruth_is_group_of_list_at_ith_class,\n detected_masks=detected_masks_at_ith_class,\n groundtruth_masks=gt_masks_at_ith_class)\n result_scores.append(scores)\n result_tp_fp_labels.append(tp_fp_labels)\n return result_scores, result_tp_fp_labels\n\n def _get_overlaps_and_scores_box_mode(\n self,\n detected_boxes,\n detected_scores,\n groundtruth_boxes,\n groundtruth_is_group_of_list):\n \"\"\"Computes overlaps and scores between detected and groudntruth boxes.\n\n Args:\n detected_boxes: A numpy array of shape [N, 4] representing detected box\n coordinates\n detected_scores: A 1-d numpy array of length N representing classification\n score\n groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth\n box coordinates\n groundtruth_is_group_of_list: A boolean numpy array of length M denoting\n whether a ground truth box has group-of tag. If a groundtruth box\n is group-of box, every detection matching this box is ignored.\n\n Returns:\n iou: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If\n gt_non_group_of_boxlist.num_boxes() == 0 it will be None.\n ioa: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If\n gt_group_of_boxlist.num_boxes() == 0 it will be None.\n scores: The score of the detected boxlist.\n num_boxes: Number of non-maximum suppressed detected boxes.\n \"\"\"\n detected_boxlist = np_box_list.BoxList(detected_boxes)\n detected_boxlist.add_field('scores', detected_scores)\n gt_non_group_of_boxlist = np_box_list.BoxList(\n groundtruth_boxes[~groundtruth_is_group_of_list])\n iou = np_box_list_ops.iou(detected_boxlist, gt_non_group_of_boxlist)\n scores = detected_boxlist.get_field('scores')\n num_boxes = detected_boxlist.num_boxes()\n return iou, None, scores, num_boxes\n\n def _compute_tp_fp_for_single_class(\n self, detected_boxes, detected_scores, groundtruth_boxes,\n groundtruth_is_difficult_list, groundtruth_is_group_of_list,\n detected_masks=None, groundtruth_masks=None):\n \"\"\"Labels boxes detected with the same class from the same image as tp/fp.\n\n Args:\n detected_boxes: A numpy array of shape [N, 4] representing detected box\n coordinates\n detected_scores: A 1-d numpy array of length N representing classification\n score\n groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth\n box coordinates\n groundtruth_is_difficult_list: A boolean numpy array of length M denoting\n whether a ground truth box is a difficult instance or not. If a\n groundtruth box is difficult, every detection matching this box\n is ignored.\n groundtruth_is_group_of_list: A boolean numpy array of length M denoting\n whether a ground truth box has group-of tag. If a groundtruth box\n is group-of box, every detection matching this box is ignored.\n detected_masks: (optional) A uint8 numpy array of shape\n [N, height, width]. If not None, the scores will be computed based\n on masks.\n groundtruth_masks: (optional) A uint8 numpy array of shape\n [M, height, width].\n\n Returns:\n Two arrays of the same size, containing all boxes that were evaluated as\n being true positives or false positives; if a box matched to a difficult\n box or to a group-of box, it is ignored.\n\n scores: A numpy array representing the detection scores.\n tp_fp_labels: a boolean numpy array indicating whether a detection is a\n true positive.\n \"\"\"\n if detected_boxes.size == 0:\n return np.array([], dtype=float), np.array([], dtype=bool)\n\n (iou, _, scores,\n num_detected_boxes) = self._get_overlaps_and_scores_box_mode(\n detected_boxes=detected_boxes,\n detected_scores=detected_scores,\n groundtruth_boxes=groundtruth_boxes,\n groundtruth_is_group_of_list=groundtruth_is_group_of_list)\n\n if groundtruth_boxes.size == 0:\n return scores, np.zeros(num_detected_boxes, dtype=bool)\n\n tp_fp_labels = np.zeros(num_detected_boxes, dtype=bool)\n is_matched_to_difficult_box = np.zeros(num_detected_boxes, dtype=bool)\n is_matched_to_group_of_box = np.zeros(num_detected_boxes, dtype=bool)\n\n # The evaluation is done in two stages:\n # 1. All detections are matched to non group-of boxes; true positives are\n # determined and detections matched to difficult boxes are ignored.\n # 2. Detections that are determined as false positives are matched against\n # group-of boxes and ignored if matched.\n\n # Tp-fp evaluation for non-group of boxes (if any).\n if iou.shape[1] > 0:\n groundtruth_nongroup_of_is_difficult_list = groundtruth_is_difficult_list[\n ~groundtruth_is_group_of_list]\n max_overlap_gt_ids = np.argmax(iou, axis=1)\n is_gt_box_detected = np.zeros(iou.shape[1], dtype=bool)\n for i in range(num_detected_boxes):\n gt_id = max_overlap_gt_ids[i]\n if iou[i, gt_id] >= self.matching_iou_threshold:\n if not groundtruth_nongroup_of_is_difficult_list[gt_id]:\n if not is_gt_box_detected[gt_id]:\n tp_fp_labels[i] = True\n is_gt_box_detected[gt_id] = True\n else:\n is_matched_to_difficult_box[i] = True\n\n return scores[~is_matched_to_difficult_box\n & ~is_matched_to_group_of_box], tp_fp_labels[\n ~is_matched_to_difficult_box\n & ~is_matched_to_group_of_box]\n\n def _get_ith_class_arrays(self, detected_boxes, detected_scores,\n detected_masks, detected_class_labels,\n groundtruth_boxes, groundtruth_masks,\n groundtruth_class_labels, class_index):\n \"\"\"Returns numpy arrays belonging to class with index `class_index`.\n\n Args:\n detected_boxes: A numpy array containing detected boxes.\n detected_scores: A numpy array containing detected scores.\n detected_masks: A numpy array containing detected masks.\n detected_class_labels: A numpy array containing detected class labels.\n groundtruth_boxes: A numpy array containing groundtruth boxes.\n groundtruth_masks: A numpy array containing groundtruth masks.\n groundtruth_class_labels: A numpy array containing groundtruth class\n labels.\n class_index: An integer index.\n\n Returns:\n gt_boxes_at_ith_class: A numpy array containing groundtruth boxes labeled\n as ith class.\n gt_masks_at_ith_class: A numpy array containing groundtruth masks labeled\n as ith class.\n detected_boxes_at_ith_class: A numpy array containing detected boxes\n corresponding to the ith class.\n detected_scores_at_ith_class: A numpy array containing detected scores\n corresponding to the ith class.\n detected_masks_at_ith_class: A numpy array containing detected masks\n corresponding to the ith class.\n \"\"\"\n selected_groundtruth = (groundtruth_class_labels == class_index)\n gt_boxes_at_ith_class = groundtruth_boxes[selected_groundtruth]\n if groundtruth_masks is not None:\n gt_masks_at_ith_class = groundtruth_masks[selected_groundtruth]\n else:\n gt_masks_at_ith_class = None\n selected_detections = (detected_class_labels == class_index)\n detected_boxes_at_ith_class = detected_boxes[selected_detections]\n detected_scores_at_ith_class = detected_scores[selected_detections]\n if detected_masks is not None:\n detected_masks_at_ith_class = detected_masks[selected_detections]\n else:\n detected_masks_at_ith_class = None\n return (gt_boxes_at_ith_class, gt_masks_at_ith_class,\n detected_boxes_at_ith_class, detected_scores_at_ith_class,\n detected_masks_at_ith_class)\n\n def _remove_invalid_boxes(self, detected_boxes, detected_scores,\n detected_class_labels, detected_masks=None):\n \"\"\"Removes entries with invalid boxes.\n\n A box is invalid if either its xmax is smaller than its xmin, or its ymax\n is smaller than its ymin.\n\n Args:\n detected_boxes: A float numpy array of size [num_boxes, 4] containing box\n coordinates in [ymin, xmin, ymax, xmax] format.\n detected_scores: A float numpy array of size [num_boxes].\n detected_class_labels: A int32 numpy array of size [num_boxes].\n detected_masks: A uint8 numpy array of size [num_boxes, height, width].\n\n Returns:\n valid_detected_boxes: A float numpy array of size [num_valid_boxes, 4]\n containing box coordinates in [ymin, xmin, ymax, xmax] format.\n valid_detected_scores: A float numpy array of size [num_valid_boxes].\n valid_detected_class_labels: A int32 numpy array of size\n [num_valid_boxes].\n valid_detected_masks: A uint8 numpy array of size\n [num_valid_boxes, height, width].\n \"\"\"\n valid_indices = np.logical_and(detected_boxes[:, 0] < detected_boxes[:, 2],\n detected_boxes[:, 1] < detected_boxes[:, 3])\n detected_boxes = detected_boxes[valid_indices]\n detected_scores = detected_scores[valid_indices]\n detected_class_labels = detected_class_labels[valid_indices]\n if detected_masks is not None:\n detected_masks = detected_masks[valid_indices]\n return [\n detected_boxes, detected_scores, detected_class_labels, detected_masks\n ]\n" ]
[ [ "numpy.array", "numpy.logical_and", "numpy.zeros", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zrobertson466920/RL_Baselines_BCO
[ "0287305a926864e6c685a9c46aa2b9094da1e213" ]
[ "enjoy_double_Q.py" ]
[ "import gym\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nimport numpy as np\nimport random\nfrom matplotlib import pyplot as plt\nimport pickle\nfrom tensorflow import convert_to_tensor\nimport tensorflow as tf\n\n# CARTPOLE GAME SETTINGS\nOBSERVATION_SPACE_DIMS = 4\nACTION_SPACE = [0, 1]\n\n# AGENT/NETWORK HYPERPARAMETERS\nEPSILON_INITIAL = 0.5 # exploration rate\nEPSILON_DECAY = 0.99\nEPSILON_MIN = 0.01\nALPHA = 0.001 # learning rate\nGAMMA = 0.99 # discount factor\nTAU = 0.1 # target network soft update hyperparameter\nBETA = 0.0 # Threshold for accepting off-policy actions\nEXPERIENCE_REPLAY_BATCH_SIZE = 32\nAGENT_MEMORY_LIMIT = 10000\nMIN_MEMORY_FOR_EXPERIENCE_REPLAY = 500\n\n\ndef create_dqn():\n # not actually that deep\n nn = Sequential()\n nn.add(Dense(64, input_dim=OBSERVATION_SPACE_DIMS, activation='relu'))\n nn.add(Dense(64, activation='relu'))\n nn.add(Dense(len(ACTION_SPACE), activation='linear'))\n nn.compile(loss='mse', optimizer=Adam(lr=ALPHA))\n return nn\n\n\ndef create_bc():\n # not actually that deep\n nn = Sequential()\n nn.add(Dense(64, input_dim=OBSERVATION_SPACE_DIMS, activation='relu'))\n nn.add(Dense(64, activation='relu'))\n nn.add(Dense(len(ACTION_SPACE), activation='softmax'))\n nn.compile(loss='categorical_crossentropy', optimizer=Adam(lr=ALPHA))\n return nn\n\n\nclass DoubleDQNAgent(object):\n\n def __init__(self):\n self.memory = []\n self.online_network = create_dqn()\n self.target_network = create_dqn()\n # Create clone agent\n self.bc_network = create_bc()\n self.threshold = 0\n self.epsilon = EPSILON_INITIAL\n self.has_talked = False\n\n def act(self, state):\n if self.epsilon > np.random.rand():\n # explore\n return np.random.choice(ACTION_SPACE)\n else:\n # exploit\n state = self._reshape_state_for_net(state)\n q_values = self.online_network.predict(state)[0]\n return np.argmax(q_values)\n\n def select_action(self, next_state):\n\n # Select action according to policy with probability (1-eps)\n # otherwise, select random action\n if np.random.uniform(0, 1) > 0.001:\n q_val = self.online_network.predict(next_state)[0]\n ratio = self.bc_network.predict(next_state)[0] / np.max(self.bc_network.predict(next_state)[0])\n mask = np.ma.masked_where(ratio >= BETA, ratio).mask\n # Use large negative number to mask actions from argmax\n return np.argmax(q_val*mask)\n else:\n return np.random.randint(len(ACTION_SPACE))\n\n # Note: this performs an update for BCQ\n def experience_replay(self):\n\n minibatch = random.sample(self.memory, EXPERIENCE_REPLAY_BATCH_SIZE)\n minibatch_new_q_values = []\n\n # 4. Sample mini-batch M of N transitions (s,a,r,s') from B\n for experience in minibatch:\n state, action, reward, next_state, done = experience\n state = self._reshape_state_for_net(state)\n experience_new_q_values = self.online_network.predict(state)[0]\n if done:\n q_update = reward\n else:\n next_state = self._reshape_state_for_net(next_state)\n # using online network to SELECT action\n # 5. a' = argmax_{a' | pi(a'|s') / max_{a} pi(a | s') > tau} Q(s',a') <-- Implement this\n #online_net_selected_action = np.argmax(self.online_network.predict(next_state))\n online_net_selected_action = self.select_action(next_state)\n # using target network to EVALUATE action\n target_net_evaluated_q_value = self.target_network.predict(next_state)[0][online_net_selected_action]\n # Build the target prediction for 6.\n q_update = reward + GAMMA * target_net_evaluated_q_value\n experience_new_q_values[action] = q_update\n # Collect into mini-batch for 6.\n minibatch_new_q_values.append(experience_new_q_values)\n minibatch_states = np.array([e[0] for e in minibatch])\n minibatch_new_q_values = np.array(minibatch_new_q_values)\n # 6. Update parameters for batch (r + gamma * Q_target - Q_online)\n self.online_network.fit(minibatch_states, minibatch_new_q_values, verbose=False, epochs=1)\n # 7. Update parameters for (s,a,_,_) in batch for log(pi(a | s)) <-- Implement this\n self.bc_update()\n\n def bc_update(self):\n\n minibatch = random.sample(self.memory, EXPERIENCE_REPLAY_BATCH_SIZE)\n #minibatch_new_q_values = []\n minibatch_actions = []\n\n # 4. Sample mini-batch M of N transitions (s,a,r,s') from B\n for experience in minibatch:\n state, action, reward, next_state, done = experience\n state = self._reshape_state_for_net(state)\n #experience_new_q_values[action] = q_update\n #experience_actions = action\n # Collect into mini-batch for 6.\n #minibatch_new_q_values.append(experience_new_q_values)\n minibatch_actions.append(action)\n minibatch_states = np.array([e[0] for e in minibatch])\n #minibatch_new_q_values = np.array(minibatch_new_q_values)\n minibatch_actions = np.array(minibatch_actions)\n # 6. Update parameters for batch (r + gamma * Q_target - Q_online)\n # self.online_network.fit(minibatch_states, minibatch_new_q_values, verbose=False, epochs=1)\n # 7. Update parameters for (s,a,_,_) in batch for log(pi(a | s)) <-- Implement this\n\n self.bc_network.fit(minibatch_states,to_categorical(minibatch_actions, len(ACTION_SPACE)), verbose = False, epochs = 1)\n\n def update_target_network(self):\n q_network_theta = self.online_network.get_weights()\n target_network_theta = self.target_network.get_weights()\n counter = 0\n for q_weight, target_weight in zip(q_network_theta, target_network_theta):\n target_weight = target_weight * (1 - TAU) + q_weight * TAU\n target_network_theta[counter] = target_weight\n counter += 1\n self.target_network.set_weights(target_network_theta)\n\n def remember(self, state, action, reward, next_state, done):\n if len(self.memory) <= AGENT_MEMORY_LIMIT:\n experience = (state, action, reward, next_state, done)\n self.memory.append(experience)\n else:\n experience = (state, action, reward, next_state, done)\n self.memory = self.memory[1:] + [experience]\n\n def update_epsilon(self):\n self.epsilon = max(self.epsilon * EPSILON_DECAY, EPSILON_MIN)\n\n def _reshape_state_for_net(self, state):\n return np.reshape(state, (1, OBSERVATION_SPACE_DIMS))\n\n\ndef test_agent(flag = False):\n env = gym.make('CartPole-v0')\n env.seed(1)\n trials = []\n NUMBER_OF_TRIALS = 1\n MAX_TRAINING_EPISODES = 100\n MAX_STEPS_PER_EPISODE = 200\n\n for trial_index in range(NUMBER_OF_TRIALS):\n agent = DoubleDQNAgent()\n trial_episode_scores = []\n\n for episode_index in range(1, MAX_TRAINING_EPISODES + 1):\n state = env.reset()\n episode_score = 0\n\n for _ in range(MAX_STEPS_PER_EPISODE):\n action = agent.act(state)\n next_state, reward, done, _ = env.step(action)\n episode_score += reward\n agent.remember(state, action, reward, next_state, done)\n state = next_state\n if len(agent.memory) > MIN_MEMORY_FOR_EXPERIENCE_REPLAY:\n if not flag:\n agent.experience_replay()\n agent.update_target_network()\n agent.bc_update()\n else:\n agent.bc_update()\n if done:\n break\n\n trial_episode_scores.append(episode_score)\n agent.update_epsilon()\n last_100_avg = np.mean(trial_episode_scores[-100:])\n print('E %d scored %d, avg %.2f' % (episode_index, episode_score, last_100_avg))\n if len(trial_episode_scores) >= 100 and last_100_avg >= 195.0:\n print('Trial %d solved in %d episodes!' % (trial_index, (episode_index - 100)))\n break\n\n # Save experience replay buffer\n with open(\"cart_pole_experience_expert_replay.pkl\", \"wb\") as fp: # Pickling\n pickle.dump(agent.memory, fp)\n trials.append(np.array(trial_episode_scores))\n return np.array(trials)\n\n\n# Loads experience replay buffer and uses that for training\n# Evaluates in the environment\ndef test_BCQ_agent(flag = False):\n env = gym.make('CartPole-v0')\n env.seed(1)\n trials = []\n NUMBER_OF_TRIALS = 1\n MAX_TRAINING_EPISODES = 100\n MAX_STEPS_PER_EPISODE = 200\n\n for trial_index in range(NUMBER_OF_TRIALS):\n agent = DoubleDQNAgent()\n trial_episode_scores = []\n\n for episode_index in range(1, MAX_TRAINING_EPISODES + 1):\n state = env.reset()\n episode_score = 0\n with open(\"cart_pole_experience_expert_replay.pkl\", \"rb\") as fp: # Unpickling\n agent.memory = pickle.load(fp)\n\n for _ in range(MAX_STEPS_PER_EPISODE):\n if len(agent.memory) > MIN_MEMORY_FOR_EXPERIENCE_REPLAY:\n if not flag:\n agent.experience_replay()\n agent.update_target_network()\n agent.bc_update()\n else:\n agent.bc_update()\n\n '''indices = np.random.choice(len(agent.memory), 100)\n count = 0\n for index in indices:\n state, action, reward, next_state, done = agent.memory[index]\n state = agent._reshape_state_for_net(state)\n q_values = agent.online_network.predict(state)[0]\n pred_action = np.argmax(q_values)\n count += (pred_action == action) / 100\n print(count)'''\n\n for _ in range(MAX_STEPS_PER_EPISODE):\n action = agent.act(state)\n next_state, reward, done, _ = env.step(action)\n episode_score += reward\n #agent.remember(state, action, reward, next_state, done)\n state = next_state\n if len(agent.memory) > MIN_MEMORY_FOR_EXPERIENCE_REPLAY:\n if not flag:\n agent.experience_replay()\n agent.update_target_network()\n agent.bc_update()\n else:\n agent.bc_update()\n if done:\n break\n\n trial_episode_scores.append(episode_score)\n agent.update_epsilon()\n last_100_avg = np.mean(trial_episode_scores[-100:])\n print('E %d scored %d, avg %.2f' % (episode_index, episode_score, last_100_avg))\n if len(trial_episode_scores) >= 100 and last_100_avg >= 195.0:\n print('Trial %d solved in %d episodes!' % (trial_index, (episode_index - 100)))\n break\n\n # Save experience replay buffer\n with open(\"cart_pole_experience_replay.pkl\", \"wb\") as fp: # Pickling\n pickle.dump(agent.memory, fp)\n trials.append(np.array(trial_episode_scores))\n return np.array(trials)\n\n\n# Loads experience replay buffer and uses that for training\n# Evaluates in the environment\ndef offline_BCQ_agent(flag = False):\n NUMBER_OF_TRIALS = 1\n MAX_TRAINING_EPISODES = 100\n MAX_STEPS_PER_EPISODE = 200\n\n for trial_index in range(NUMBER_OF_TRIALS):\n agent = DoubleDQNAgent()\n\n for episode_index in range(1, MAX_TRAINING_EPISODES + 1):\n with open(\"cart_pole_experience_expert_replay.pkl\", \"rb\") as fp: # Unpickling\n agent.memory = pickle.load(fp)\n\n for _ in range(MAX_STEPS_PER_EPISODE):\n if len(agent.memory) > MIN_MEMORY_FOR_EXPERIENCE_REPLAY:\n if not flag:\n agent.experience_replay()\n agent.update_target_network()\n agent.bc_update()\n else:\n agent.bc_update()\n\n indices = np.random.choice(len(agent.memory), 100)\n count = 0\n for index in indices:\n state, action, reward, next_state, done = agent.memory[index]\n state = agent._reshape_state_for_net(state)\n q_values = agent.online_network.predict(state)[0]\n pred_action = np.argmax(q_values)\n count += (pred_action == action) / 100\n print(count)\n\n return\n\n\ndef plot_trials(trials):\n _, axis = plt.subplots()\n\n for i, trial in enumerate(trials):\n steps_till_solve = trial.shape[0] - 100\n # stop trials at 2000 steps\n if steps_till_solve < 1900:\n bar_color = 'b'\n bar_label = steps_till_solve\n else:\n bar_color = 'r'\n bar_label = 'Stopped at 2000'\n plt.bar(np.arange(i, i + 1), steps_till_solve, 0.5, color=bar_color, align='center', alpha=0.5)\n axis.text(i - .25, steps_till_solve + 20, bar_label, color=bar_color)\n\n plt.ylabel('Episodes Till Solve')\n plt.xlabel('Trial')\n trial_labels = [str(i + 1) for i in range(len(trials))]\n plt.xticks(np.arange(len(trials)), trial_labels)\n # remove y axis labels and ticks\n axis.yaxis.set_major_formatter(plt.NullFormatter())\n plt.tick_params(axis='both', left='off')\n\n plt.title('Double DQN CartPole v-0 Trials')\n plt.show()\n\n\ndef plot_individual_trial(trial):\n plt.plot(trial)\n plt.ylabel('Steps in Episode')\n plt.xlabel('Episode')\n plt.title('BCQ CartPole v-0 Steps in Select Trial')\n plt.show()\n\n\nif __name__ == '__main__':\n trials = offline_BCQ_agent(flag = False)\n #print(list(trials[0]))\n #np.save('cartpole_BCQ_beta_0.5_trials.npy', trials)\n #trials = np.load('cartpole_BCQ_beta_0.5_trials.npy')\n #plot_trials(trials)\n #plot_individual_trial(trials[0])" ]
[ [ "matplotlib.pyplot.title", "numpy.random.choice", "numpy.reshape", "numpy.arange", "numpy.ma.masked_where", "matplotlib.pyplot.subplots", "matplotlib.pyplot.plot", "matplotlib.pyplot.NullFormatter", "numpy.random.uniform", "numpy.argmax", "numpy.mean", "numpy.random.rand", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anjunhu/inter-rel-net
[ "c25fbee3ef4607a492e37728a80323137d83e368" ]
[ "src/datasets/YMJA.py" ]
[ "import pandas as pd\nimport numpy as np\nimport os\nimport random\nimport glob\n\nfrom misc import data_io\n\nDATA_DIR = 'data/YMJA/'\n\n\"\"\" Folder structure\naction/\n clip0_positions.json\n clip1_positions.json\n clip2_positions.json\n \nEx: DATA_DIR + 'Tripping/_2017-11-06-det-van-home15.json'\n\"\"\"\n\nFOLD_MECH = 'Uniform' # Can either be Random or Uniform\nNUM_FOLDS = 5\nACTIONS = ['No_penalty', 'Holding', 'Hooking', 'Slashing', 'Tripping'] # ['No_penalty', 'Cross_Checking', 'Hi_sticking', 'Holding', 'Hooking', 'Interference', 'Roughing', 'Slashing', 'Tripping']\n\n# Determine folds once for entire directory\nFOLDS = []\nFILES = []\nnextint = 0\nfor subdir, dirs, files in os.walk(DATA_DIR):\n for file in files:\n if file.endswith(\".json\"):\n FILES.append(os.path.join(subdir, file))\n if FOLD_MECH == 'Random':\n FOLDS.append(random.randint(0, NUM_FOLDS))\n else: # Uniform distribution\n FOLDS.append(nextint)\n nextint = (nextint + 1) % NUM_FOLDS\n\ndef get_ground_truth(data_dir=DATA_DIR):\n action_lst = []\n for file in FILES:\n penalty_class = file.split(\"/\")[2]\n action_lst.append(ACTIONS.index(penalty_class))\n \n dataframe_dict = {'fold': FOLDS,\n 'path': FILES,\n 'action': action_lst}\n\n ground_truth = pd.DataFrame(dataframe_dict)\n return ground_truth\n\ndef get_folds():\n folds = np.arange(NUM_FOLDS)\n \n return folds\n\ndef get_train_gt(fold_num):\n if fold_num < 0 or fold_num > NUM_FOLDS:\n raise ValueError(\"fold_num must be within 0 and \" + NUM_FOLDS + \", value entered: \"+str(fold_num))\n \n ground_truth = get_ground_truth()\n gt_split = ground_truth[ground_truth.fold != fold_num]\n \n return gt_split\n\ndef get_val_gt(fold_num):\n if fold_num < 0 or fold_num > NUM_FOLDS:\n raise ValueError(\"fold_num must be within 0 and \" + NUM_FOLDS + \", value entered: \"+str(fold_num))\n \n ground_truth = get_ground_truth()\n gt_split = ground_truth[ground_truth.fold == fold_num]\n \n return gt_split\n\ndef get_train(fold_num, **kwargs):\n if fold_num < 0 or fold_num > NUM_FOLDS:\n raise ValueError(\"fold_num must be within 0 and \" + NUM_FOLDS + \", value entered: \"+str(fold_num))\n \n ground_truth = get_ground_truth()\n gt_split = ground_truth[ground_truth.fold != fold_num]\n \n X, Y = data_io.get_data(gt_split, pose_style='YMJA', **kwargs)\n \n return X, Y\n \ndef get_val(fold_num, **kwargs):\n if fold_num < 0 or fold_num > NUM_FOLDS:\n raise ValueError(\"fold_num must be within 0 and \" + NUM_FOLDS + \", value entered: \"+str(fold_num))\n \n ground_truth = get_ground_truth()\n gt_split = ground_truth[ground_truth.fold == fold_num]\n \n X, Y = data_io.get_data(gt_split, pose_style='YMJA', **kwargs)\n \n return X, Y\n\n" ]
[ [ "numpy.arange", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
amitbcp/tsai-vision
[ "14a66d4c3295714fdcc97db13804ffba9d6f06cc" ]
[ "assignment_6/src/test.py" ]
[ "import torch\nimport torch.nn.functional as F\n\n\ndef test(model, device, test_loader, test_acc, test_losses):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n # sum up batch loss\n test_loss += F.nll_loss(output, target, reduction='sum').item()\n # get the index of the max log-probability\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n test_losses.append(test_loss)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n test_acc.append(100. * correct / len(test_loader.dataset))\n\n" ]
[ [ "torch.no_grad", "torch.nn.functional.nll_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pnnl/deimos
[ "3f5e8d67a698818679bea91b7605c6418ef02265" ]
[ "tests/test_calibration.py" ]
[ "import deimos\nimport numpy as np\nimport pytest\n\n\[email protected]()\ndef ccs_cal():\n return deimos.calibration.CCSCalibration()\n\n\[email protected]()\ndef pos():\n return {'mz': [118.086255, 322.048121, 622.028960, 922.009798, 1221.990636, 1521.971475],\n 'ta': [13.72, 18.65, 25.20, 30.44, 35.36, 39.83],\n 'ccs': [121.3, 153.7, 203, 243.6, 282.2, 317],\n 'q': [1, 1, 1, 1, 1, 1]}\n\n\[email protected]()\ndef neg():\n return {'mz': [301.998139, 601.978977, 1033.988109, 1333.968947, 1633.949786],\n 'ta': [17.04, 22.53, 32.13, 36.15, 40.70],\n 'ccs': [140, 180.8, 255.3, 284.8, 319],\n 'q': [1, 1, 1, 1, 1]}\n\n\nclass TestCCSCalibration:\n\n def test_init(self, ccs_cal):\n for attr, expected in zip(['buffer_mass', 'beta', 'tfix', 'fit'],\n [None, None, None, dict]):\n assert hasattr(ccs_cal, attr)\n\n tmp = getattr(ccs_cal, attr)\n\n if expected is None:\n assert tmp is expected\n\n else:\n assert type(tmp) is expected\n\n for k in ['r', 'p', 'se']:\n assert k in tmp.keys()\n assert tmp[k] is None\n\n @pytest.mark.parametrize('beta,tfix',\n [(1, 0)])\n def test__check(self, ccs_cal, beta, tfix):\n ccs_cal.beta = beta\n ccs_cal.tfix = tfix\n ccs_cal._check()\n\n @pytest.mark.parametrize('beta,tfix',\n [(1, None),\n (None, 0),\n (None, None)])\n def test__check_fail(self, ccs_cal, beta, tfix):\n ccs_cal.beta = beta\n ccs_cal.tfix = tfix\n\n with pytest.raises(ValueError):\n ccs_cal._check()\n\n @pytest.mark.parametrize('calc,beta,tfix,beta_exp,tfix_exp',\n [(False, 1, 0, 1, 0),\n (True, 1, 0, 0.12722, -0.11387),\n (True, None, None, 0.12722, -0.11387)])\n def test_calibrate(self, ccs_cal, pos, calc, beta, tfix, beta_exp, tfix_exp):\n if calc is True:\n ccs_cal.calibrate(beta=beta, tfix=tfix, **pos)\n for k in ['r', 'p', 'se']:\n assert ccs_cal.fit[k] is not None\n else:\n ccs_cal.calibrate(beta=beta, tfix=tfix)\n\n assert abs(ccs_cal.beta - beta_exp) <= 1E-3\n assert abs(ccs_cal.tfix - tfix_exp) <= 1E-3\n\n def test_arrival2ccs(self, ccs_cal, pos, neg):\n for data in [pos, neg]:\n ccs_cal.calibrate(**data)\n ccs = ccs_cal.arrival2ccs(data['mz'], data['ta'], q=data['q'])\n\n error = np.abs(ccs - data['ccs']) / data['ccs']\n\n assert (error <= 0.005).all()\n\n def test_ccs2arrival(self, ccs_cal, pos, neg):\n for data in [pos, neg]:\n ccs_cal.calibrate(**data)\n ta = ccs_cal.ccs2arrival(data['mz'], data['ccs'], q=data['q'])\n\n error = np.abs(ta - data['ta']) / data['ta']\n\n assert (error <= 0.005).all()\n\n\[email protected]('calc,beta,tfix,beta_exp,tfix_exp',\n [(False, 1, 0, 1, 0),\n (True, 1, 0, 0.12722, -0.11387),\n (True, None, None, 0.12722, -0.11387)])\ndef test_calibrate_ccs(pos, calc, beta, tfix, beta_exp, tfix_exp):\n if calc is True:\n ccs_cal = deimos.calibration.calibrate_ccs(beta=beta, tfix=tfix, **pos)\n for k in ['r', 'p', 'se']:\n assert ccs_cal.fit[k] is not None\n else:\n ccs_cal = deimos.calibration.calibrate_ccs(beta=beta, tfix=tfix)\n\n assert type(ccs_cal) is deimos.calibration.CCSCalibration\n assert abs(ccs_cal.beta - beta_exp) <= 1E-3\n assert abs(ccs_cal.tfix - tfix_exp) <= 1E-3\n" ]
[ [ "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bvsk35/Linear-Regression-
[ "0b4791c99dd97a99f8f4309f204b95307d3e21f6" ]
[ "GenerateData.py" ]
[ "# This file generates data required for Linear Regression\r\n\r\n# Import required libraries\r\nimport numpy\r\n\r\n# To generate X\r\na = numpy.arange(1, 51)\r\nb = numpy.ones(50)\r\nX = numpy.concatenate((b[:, numpy.newaxis], a[:, numpy.newaxis]), axis=1)\r\nnumpy.savetxt('X.txt', X)\r\n\r\n# To generate Y\r\nA = numpy.arange(1, 51)\r\nB = numpy.random.uniform(-1, 1, (50))\r\nY = A+B\r\nnumpy.savetxt('Y.txt', Y)\r\n\r\n# Inital weights for Y = W0 + W1 * X\r\nW = numpy.random.uniform(-1, 1, (1, 2))\r\nnumpy.savetxt('W.txt', W)" ]
[ [ "numpy.arange", "numpy.ones", "numpy.concatenate", "numpy.savetxt", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
parthgajjar4/infertrade
[ "2eebf2286f5cc669759de632970e4f8f8a40f232" ]
[ "tests/test_performance.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\nfrom pathlib import Path\nfrom examples.my_first_infertrade_strategy import buy_on_small_rises\nfrom infertrade.PandasEnum import PandasEnum\nfrom infertrade.utilities.performance import calculate_allocation_from_cash\nimport infertrade.utilities.performance\n\n\ndef test_calculate_allocation_from_cash1():\n \"\"\" Checks current allocation is 0 when last_cash_after_trade and last_securities_after_transaction are both 0 \"\"\"\n last_cash_after_trade = 0.0\n last_securities_after_transaction = 0.0\n spot_price = 30\n\n out_actual = calculate_allocation_from_cash(last_cash_after_trade, last_securities_after_transaction, spot_price)\n out_expect = 0.0\n\n assert out_actual == out_expect\n\n\ndef test_calculate_allocation_from_cash2():\n \"\"\" Checks current allocation is 0 when spot_price is 0 (ie; bankrupt) \"\"\"\n last_cash_after_trade = 30.12\n last_securities_after_transaction = 123.56\n spot_price = 0.0\n\n out_actual = calculate_allocation_from_cash(last_cash_after_trade, last_securities_after_transaction, spot_price)\n out_expect = 0.0\n\n assert out_actual == out_expect\n\n\ndef test_calculate_allocation_from_cash3():\n \"\"\" Checks current allocation is 0 when spot_price is less than 0 (ie; bankrupt) \"\"\"\n last_cash_after_trade = 30.12\n last_securities_after_transaction = 123.56\n spot_price = -5.12\n\n out_actual = calculate_allocation_from_cash(last_cash_after_trade, last_securities_after_transaction, spot_price)\n out_expect = 0.0\n\n assert out_actual == out_expect\n\n\ndef test_calculate_allocation_from_cash4():\n \"\"\" Checks current allocation is correct value \"\"\"\n last_cash_after_trade = 30.12\n last_securities_after_transaction = 123.56\n spot_price = 20\n\n out_actual = calculate_allocation_from_cash(last_cash_after_trade, last_securities_after_transaction, spot_price)\n out_expect = 0.9879\n\n assert pytest.approx(out_actual, 0.0001) == out_expect\n\n\ndef test_assert_after_target_position_not_valid():\n \"\"\"Test checks the functionality of assert found after target_position_not_valid is false\"\"\"\n lbma_gold_location = Path(Path(__file__).absolute().parent, \"LBMA_Gold.csv\")\n my_dataframe = pd.read_csv(lbma_gold_location)\n my_dataframe_without_allocations = my_dataframe.rename(columns={\"LBMA/GOLD usd (pm)\": \"price\", \"Date\": \"date\"})\n my_dataframe_with_allocations = pd.DataFrame(buy_on_small_rises(my_dataframe_without_allocations))\n\n my_dataframe_with_allocations = pd.DataFrame(buy_on_small_rises(my_dataframe_without_allocations))\n for ii_period in range(0, len(my_dataframe_with_allocations[PandasEnum.ALLOCATION.value])):\n my_dataframe_with_allocations[PandasEnum.ALLOCATION.value][ii_period] = np.inf\n try:\n infertrade.utilities.performance.calculate_portfolio_performance_python(\n df_with_positions=my_dataframe_with_allocations, annual_strategy_fee=0.5,\n )\n except AssertionError:\n print(\"zapravo radim\")\n pass\n\n\ndef test_calculate_portfolio_performance_python():\n \"\"\"Test is used to determine the functionality of checks found in calculate_portfolio_performance_python\"\"\"\n lbma_gold_location = Path(Path(__file__).absolute().parent, \"LBMA_Gold.csv\")\n my_dataframe = pd.read_csv(lbma_gold_location)\n my_dataframe_without_allocations = my_dataframe.rename(columns={\"LBMA/GOLD usd (pm)\": \"price\", \"Date\": \"date\"})\n my_dataframe_with_allocations = pd.DataFrame(buy_on_small_rises(my_dataframe_without_allocations))\n\n try:\n infertrade.utilities.performance.calculate_portfolio_performance_python(\n df_with_positions=my_dataframe_with_allocations, annual_strategy_fee=int(1)\n )\n except TypeError:\n pass\n\n try:\n infertrade.utilities.performance.calculate_portfolio_performance_python(\n df_with_positions=my_dataframe_with_allocations, daily_spread_percent_override=int(1)\n )\n except TypeError:\n pass\n\n returned_df = infertrade.utilities.performance.calculate_portfolio_performance_python(\n df_with_positions=my_dataframe_with_allocations, minimum_allocation_change_to_adjust=np.inf\n )\n assert isinstance(returned_df, pd.DataFrame)\n\n returned_df = infertrade.utilities.performance.calculate_portfolio_performance_python(\n df_with_positions=my_dataframe_with_allocations\n )\n assert isinstance(returned_df, pd.DataFrame)\n\n my_dataframe_with_allocations[PandasEnum.MID.value] = [\n -0.1 for i in range(0, len(my_dataframe_with_allocations[\"price\"]))\n ]\n returned_df = infertrade.utilities.performance.calculate_portfolio_performance_python(\n df_with_positions=my_dataframe_with_allocations\n )\n assert isinstance(returned_df, pd.DataFrame)\n\n\ndef test_get_percentage_bid_offer():\n \"\"\"Test checks functionality and reliability of function and returned values\"\"\"\n lbma_gold_location = Path(Path(__file__).absolute().parent, \"LBMA_Gold.csv\")\n my_dataframe = pd.read_csv(lbma_gold_location)\n my_dataframe_without_allocations = my_dataframe.rename(columns={\"LBMA/GOLD usd (pm)\": \"price\", \"Date\": \"date\"})\n my_dataframe_with_allocations = pd.DataFrame(buy_on_small_rises(my_dataframe_without_allocations))\n\n returned_float = infertrade.utilities.performance._get_percentage_bid_offer(\n df_with_positions=my_dataframe_with_allocations, day=0, daily_spread_percent_override=1.0\n )\n assert isinstance(returned_float, float)\n\n try:\n returned_float = infertrade.utilities.performance._get_percentage_bid_offer(\n df_with_positions=my_dataframe_with_allocations, day=0, daily_spread_percent_override=None\n )\n except (KeyError, IndexError):\n pass\n\n\ndef test_check_still_valid():\n \"\"\"Tests if check_still_valid encounters exception when working with integers instead of expected floats\"\"\"\n try:\n infertrade.utilities.performance._check_still_valid(\n annual_strategy_fee=int(1),\n cumulative_portfolio_return=int(1),\n daily_spread_percentage=int(1),\n last_cash_after_trade=int(1),\n last_good_position=int(1),\n last_securities_after_transaction=int(1),\n skip_checks=False,\n spot_price=int(1),\n todays_position=int(1),\n )\n except TypeError:\n pass\n\n\ndef test_check_if_should_skip_return_calculation():\n \"\"\"Tests if returned values are the correct data type when working with multiple different parameters\"\"\"\n returned_tuple = infertrade.utilities.performance.check_if_should_skip_return_calculation(\n previous_portfolio_return=0.0,\n spot_price=1.0,\n day=1,\n day_of_return_to_calculate=1,\n show_absolute_bankruptcies=False,\n )\n returned_tuple_value = returned_tuple[0]\n assert isinstance(returned_tuple_value, bool)\n returned_tuple_value = returned_tuple[1]\n assert isinstance(returned_tuple_value, str) or isinstance(returned_tuple_value, float)\n returned_tuple_value = returned_tuple[2]\n assert isinstance(returned_tuple_value, bool)\n\n returned_tuple = infertrade.utilities.performance.check_if_should_skip_return_calculation(\n previous_portfolio_return=1,\n spot_price=1.0,\n day=2,\n day_of_return_to_calculate=1,\n show_absolute_bankruptcies=False,\n )\n returned_tuple_value = returned_tuple[0]\n assert isinstance(returned_tuple_value, bool)\n returned_tuple_value = returned_tuple[1]\n assert isinstance(returned_tuple_value, str) or isinstance(returned_tuple_value, float)\n returned_tuple_value = returned_tuple[2]\n assert isinstance(returned_tuple_value, bool)\n\n returned_tuple = infertrade.utilities.performance.check_if_should_skip_return_calculation(\n previous_portfolio_return=1,\n spot_price=1.0,\n day=2,\n day_of_return_to_calculate=1,\n show_absolute_bankruptcies=False,\n bankrupt=True,\n )\n returned_tuple_value = returned_tuple[0]\n assert isinstance(returned_tuple_value, bool)\n returned_tuple_value = returned_tuple[1]\n assert isinstance(returned_tuple_value, str) or isinstance(returned_tuple_value, float)\n returned_tuple_value = returned_tuple[2]\n assert isinstance(returned_tuple_value, bool)\n\n\ndef test_cumulative_return_if_bankrupt():\n \"\"\"Tests if the returned value is the correct data type\"\"\"\n returned_float = infertrade.utilities.performance._cumulative_return_if_bankrupt(\n prior_portfolio_return=1.0, show_absolute_bankruptcies=True\n )\n assert isinstance(returned_float, float)\n\n\ndef test_portfolio_index():\n \"\"\"Tests checks found in portfolio_index, if they are the correct data types and if the returned values\n are NaN\"\"\"\n try:\n infertrade.utilities.performance.portfolio_index(\n position_on_last_good_price=int(1),\n spot_price_usd=int(1),\n last_good_price_usd=int(1),\n current_bid_offer_spread_percent=int(1),\n target_allocation_perc=int(1),\n annual_strategy_fee_perc=int(1),\n last_securities_volume=int(1),\n last_cash_after_trade_usd=int(1),\n show_working=False,\n )\n except TypeError:\n pass\n\n returned_tuple = infertrade.utilities.performance.portfolio_index(\n position_on_last_good_price=0.5,\n spot_price_usd=0.5,\n last_good_price_usd=0.5,\n current_bid_offer_spread_percent=0.5,\n target_allocation_perc=0.5,\n annual_strategy_fee_perc=0.5,\n last_securities_volume=0.5,\n last_cash_after_trade_usd=0.5,\n show_working=True,\n )\n assert isinstance(returned_tuple[0], float)\n assert not np.isnan(returned_tuple[0])\n assert isinstance(returned_tuple[1], float)\n assert not np.isnan(returned_tuple[1])\n assert isinstance(returned_tuple[2], float)\n assert not np.isnan(returned_tuple[2])\n\n returned_tuple = infertrade.utilities.performance.portfolio_index(\n position_on_last_good_price=0.5,\n spot_price_usd=0.5,\n last_good_price_usd=np.NAN,\n current_bid_offer_spread_percent=0.5,\n target_allocation_perc=0.5,\n annual_strategy_fee_perc=0.5,\n last_securities_volume=0.5,\n last_cash_after_trade_usd=0.5,\n show_working=False,\n )\n assert isinstance(returned_tuple[0], float)\n assert not np.isnan(returned_tuple[0])\n assert isinstance(returned_tuple[1], float)\n assert not np.isnan(returned_tuple[1])\n assert isinstance(returned_tuple[2], float)\n assert not np.isnan(returned_tuple[2])\n\n returned_tuple = infertrade.utilities.performance.portfolio_index(\n position_on_last_good_price=np.inf,\n spot_price_usd=0.5,\n last_good_price_usd=0.5,\n current_bid_offer_spread_percent=0.5,\n target_allocation_perc=0.5,\n annual_strategy_fee_perc=0.5,\n last_securities_volume=0.5,\n last_cash_after_trade_usd=0.5,\n show_working=False,\n )\n assert isinstance(returned_tuple[0], float)\n assert not np.isnan(returned_tuple[0])\n assert isinstance(returned_tuple[1], float)\n assert not np.isnan(returned_tuple[1])\n assert isinstance(returned_tuple[2], float)\n assert not np.isnan(returned_tuple[2])\n\n returned_tuple = infertrade.utilities.performance.portfolio_index(\n position_on_last_good_price=-0.5,\n spot_price_usd=-0.5,\n last_good_price_usd=-0.5,\n current_bid_offer_spread_percent=-0.5,\n target_allocation_perc=-0.5,\n annual_strategy_fee_perc=-0.5,\n last_securities_volume=-0.5,\n last_cash_after_trade_usd=-0.5,\n show_working=False,\n )\n assert isinstance(returned_tuple[0], float)\n assert not np.isnan(returned_tuple[0])\n assert isinstance(returned_tuple[1], float)\n assert not np.isnan(returned_tuple[1])\n assert isinstance(returned_tuple[2], float)\n assert not np.isnan(returned_tuple[2])\n\n\ndef test_rounded_allocation_target():\n \"\"\"Test to ensure that the returned rounded allocations fit the expected returned\n types in cases of NaN and float\"\"\"\n returned_float = infertrade.utilities.performance.rounded_allocation_target(\n unconstrained_target_position=np.NAN, minimum_allocation_change_to_adjust=np.NAN\n )\n assert np.isnan(returned_float)\n\n returned_float = infertrade.utilities.performance.rounded_allocation_target(\n unconstrained_target_position=1.0, minimum_allocation_change_to_adjust=1.0\n )\n assert isinstance(returned_float, float)\n assert returned_float == 1\n" ]
[ [ "numpy.isnan", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
DLPerf/Open3D-ML
[ "6e8c3160642749936b7fe0c40e1f1aa72960eab2" ]
[ "ml3d/tf/models/point_rcnn.py" ]
[ "import tensorflow as tf\n\nimport numpy as np\nimport os\nimport pickle\n\nfrom .base_model_objdet import BaseModel\nfrom ..modules.losses.smooth_L1 import SmoothL1Loss\nfrom ..modules.losses.focal_loss import FocalLoss\nfrom ..modules.losses.cross_entropy import CrossEntropyLoss\nfrom ..modules.pointnet import Pointnet2MSG, PointnetSAModule\nfrom ..utils.objdet_helper import xywhr_to_xyxyr\nfrom open3d.ml.tf.ops import nms\nfrom ..utils.tf_utils import gen_CNN\nfrom ...datasets.utils import BEVBox3D, DataProcessing, ObjdetAugmentation\nfrom ...datasets.utils.operations import filter_by_min_points, points_in_box\n\nfrom ...utils import MODEL\nfrom ..modules.schedulers import OneCycleScheduler\n\nfrom ..utils.roipool3d import roipool3d_utils\nfrom ...metrics import iou_3d\n\n\nclass PointRCNN(BaseModel):\n \"\"\"Object detection model. Based on the PoinRCNN architecture\n https://github.com/sshaoshuai/PointRCNN.\n\n The network is not trainable end-to-end, it requires pre-training of the RPN\n module, followed by training of the RCNN module. For this the mode must be\n set to 'RPN', with this, the network only outputs intermediate results. If\n the RPN module is trained, the mode can be set to 'RCNN' (default), with\n this, the second module can be trained and the output are the final\n predictions.\n\n For inference use the 'RCNN' mode.\n\n Args:\n name (string): Name of model.\n Default to \"PointRCNN\".\n device (string): 'cuda' or 'cpu'.\n Default to 'cuda'.\n classes (string[]): List of classes used for object detection:\n Default to ['Car'].\n score_thres (float): Min confindence score for prediction.\n Default to 0.3.\n npoints (int): Number of processed input points.\n Default to 16384.\n rpn (dict): Config of RPN module.\n Default to {}.\n rcnn (dict): Config of RCNN module.\n Default to {}.\n mode (string): Execution mode, 'RPN' or 'RCNN'.\n Default to 'RCNN'.\n \"\"\"\n\n def __init__(self,\n name=\"PointRCNN\",\n classes=['Car'],\n score_thres=0.3,\n npoints=16384,\n rpn={},\n rcnn={},\n mode=\"RCNN\",\n **kwargs):\n super().__init__(name=name, **kwargs)\n assert mode == \"RPN\" or mode == \"RCNN\"\n self.mode = mode\n\n self.npoints = npoints\n self.classes = classes\n self.name2lbl = {n: i for i, n in enumerate(classes)}\n self.lbl2name = {i: n for i, n in enumerate(classes)}\n self.score_thres = score_thres\n\n self.rpn = RPN(**rpn)\n self.rcnn = RCNN(num_classes=len(self.classes), **rcnn)\n\n if self.mode == \"RCNN\":\n self.rpn.trainable = False\n else:\n self.rcnn.trainable = False\n\n def call(self, inputs, training=True):\n cls_score, reg_score, backbone_xyz, backbone_features = self.rpn(\n inputs[0], training=self.mode == \"RPN\" and training)\n\n if self.mode != \"RPN\":\n cls_score = tf.stop_gradient(cls_score)\n reg_score = tf.stop_gradient(reg_score)\n backbone_xyz = tf.stop_gradient(backbone_xyz)\n backbone_features = tf.stop_gradient(backbone_features)\n\n rpn_scores_raw = tf.stop_gradient(cls_score[:, :, 0])\n rois, _ = self.rpn.proposal_layer(rpn_scores_raw,\n reg_score,\n backbone_xyz,\n training=training) # (B, M, 7)\n rois = tf.stop_gradient(rois)\n\n output = {\"rois\": rois, \"cls\": cls_score, \"reg\": reg_score}\n\n if self.mode == \"RCNN\":\n rpn_scores_norm = tf.sigmoid(rpn_scores_raw)\n\n seg_mask = tf.cast((rpn_scores_norm > self.score_thres), tf.float32)\n pts_depth = tf.norm(backbone_xyz, ord=2, axis=2)\n\n seg_mask = tf.stop_gradient(seg_mask)\n pts_depth = tf.stop_gradient(pts_depth)\n\n gt_boxes = None\n if training or self.mode == \"RPN\":\n gt_boxes = inputs[1]\n\n output = self.rcnn(rois,\n gt_boxes,\n backbone_xyz,\n tf.transpose(backbone_features, (0, 2, 1)),\n seg_mask,\n pts_depth,\n training=training)\n\n return output\n\n def get_optimizer(self, cfg):\n\n beta1, beta2 = cfg.get('betas', [0.9, 0.99])\n lr_scheduler = OneCycleScheduler(40800, cfg.lr, cfg.div_factor)\n\n optimizer = tf.optimizers.Adam(learning_rate=lr_scheduler,\n beta_1=beta1,\n beta_2=beta2)\n\n return optimizer\n\n def load_gt_database(self, pickle_path, min_points_dict, sample_dict):\n \"\"\"Load ground truth object database.\n\n Args:\n pickle_path: Path of pickle file generated using `scripts/collect_bbox.py`.\n min_points_dict: A dictionary to filter objects based on number of points inside.\n sample_dict: A dictionary to decide number of objects to sample.\n\n \"\"\"\n db_boxes = pickle.load(open(pickle_path, 'rb'))\n\n if min_points_dict is not None:\n db_boxes = filter_by_min_points(db_boxes, min_points_dict)\n\n db_boxes_dict = {}\n for key in sample_dict.keys():\n db_boxes_dict[key] = []\n\n for db_box in db_boxes:\n if db_box.label_class in sample_dict.keys():\n db_boxes_dict[db_box.label_class].append(db_box)\n\n self.db_boxes_dict = db_boxes_dict\n\n def augment_data(self, data, attr):\n \"\"\"Augment object detection data.\n\n Available augmentations are:\n `ObjectSample`: Insert objects from ground truth database.\n `ObjectRangeFilter`: Filter pointcloud from given bounds.\n `PointShuffle`: Shuffle the pointcloud.\n\n Args:\n data: A dictionary object returned from the dataset class.\n attr: Attributes for current pointcloud.\n\n Returns:\n Augmented `data` dictionary.\n\n \"\"\"\n cfg = self.cfg.augment\n\n if 'ObjectSample' in cfg.keys():\n if not hasattr(self, 'db_boxes_dict'):\n data_path = attr['path']\n # remove tail of path to get root data path\n for _ in range(3):\n data_path = os.path.split(data_path)[0]\n pickle_path = os.path.join(data_path, 'bboxes.pkl')\n self.load_gt_database(pickle_path, **cfg['ObjectSample'])\n\n data = ObjdetAugmentation.ObjectSample(\n data,\n db_boxes_dict=self.db_boxes_dict,\n sample_dict=cfg['ObjectSample']['sample_dict'])\n\n if cfg.get('ObjectRangeFilter', False):\n data = ObjdetAugmentation.ObjectRangeFilter(\n data, self.cfg.point_cloud_range)\n\n if cfg.get('PointShuffle', False):\n data = ObjdetAugmentation.PointShuffle(data)\n\n return data\n\n def loss(self, results, inputs, training=True):\n if self.mode == \"RPN\":\n return self.rpn.loss(results, inputs)\n else:\n if not training:\n return {\"loss\": tf.constant(0.0)}\n return self.rcnn.loss(results, inputs)\n\n def filter_objects(self, bbox_objs):\n \"\"\"Filter objects based on classes to train.\n\n Args:\n bbox_objs: Bounding box objects from dataset class.\n\n Returns:\n Filtered bounding box objects.\n\n \"\"\"\n filtered = []\n for bb in bbox_objs:\n if bb.label_class in self.classes:\n filtered.append(bb)\n return filtered\n\n def preprocess(self, data, attr):\n if attr['split'] in ['train', 'training']:\n data = self.augment_data(data, attr)\n\n data['bounding_boxes'] = self.filter_objects(data['bounding_boxes'])\n\n # remove intensity\n points = np.array(data['point'][..., :3], dtype=np.float32)\n calib = data['calib']\n\n # transform in cam space\n points = DataProcessing.world2cam(points, calib['world_cam'])\n\n new_data = {'point': points, 'calib': calib}\n\n # bounding_boxes are objects of type BEVBox3D. It is renamed to\n # bbox_objs to clarify them as objects and not matrix of type [N, 7].\n if attr['split'] not in ['test', 'testing']:\n new_data['bbox_objs'] = data['bounding_boxes']\n\n return new_data\n\n @staticmethod\n def generate_rpn_training_labels(points, bboxes, bboxes_world, calib=None):\n \"\"\"Generates labels for RPN network.\n\n Classifies each point as foreground/background based on points inside bbox.\n We don't train on ambiguous points which are just outside bounding boxes(calculated\n by `extended_boxes`).\n Also computes regression labels for bounding box proposals(in bounding box frame).\n\n Args:\n points: Input pointcloud.\n bboxes: bounding boxes in camera frame.\n bboxes_world: bounding boxes in world frame.\n calib: Calibration file for cam_to_world matrix.\n\n Returns:\n Classification and Regression labels.\n\n \"\"\"\n cls_label = np.zeros((points.shape[0]), dtype=np.int32)\n reg_label = np.zeros((points.shape[0], 7),\n dtype=np.float32) # dx, dy, dz, ry, h, w, l\n\n if len(bboxes) == 0:\n return cls_label, reg_label\n\n pts_idx = points_in_box(points.copy(),\n bboxes_world,\n camera_frame=True,\n cam_world=DataProcessing.invT(\n calib['world_cam']))\n\n # enlarge the bbox3d, ignore nearby points\n extended_boxes = bboxes_world.copy()\n # Enlarge box by 0.4m (from PointRCNN paper).\n extended_boxes[3:6] += 0.4\n # Decrease z coordinate, as z_center is at bottom face of box.\n extended_boxes[:, 2] -= 0.2\n\n pts_idx_ext = points_in_box(points.copy(),\n extended_boxes,\n camera_frame=True,\n cam_world=DataProcessing.invT(\n calib['world_cam']))\n\n for k in range(bboxes.shape[0]):\n fg_pt_flag = pts_idx[:, k]\n fg_pts_rect = points[fg_pt_flag]\n cls_label[fg_pt_flag] = 1\n\n fg_enlarge_flag = pts_idx_ext[:, k]\n ignore_flag = np.logical_xor(fg_pt_flag, fg_enlarge_flag)\n cls_label[ignore_flag] = -1\n\n # pixel offset of object center\n center3d = bboxes[k][0:3].copy() # (x, y, z)\n center3d[1] -= bboxes[k][3] / 2\n reg_label[fg_pt_flag, 0:3] = center3d - fg_pts_rect\n\n # size and angle encoding\n reg_label[fg_pt_flag, 3] = bboxes[k][3] # h\n reg_label[fg_pt_flag, 4] = bboxes[k][4] # w\n reg_label[fg_pt_flag, 5] = bboxes[k][5] # l\n reg_label[fg_pt_flag, 6] = bboxes[k][6] # ry\n\n return cls_label, reg_label\n\n def transform(self, data, attr):\n points = data['point']\n\n if attr['split'] not in ['test', 'testing']: #, 'val', 'validation']:\n if self.npoints < len(points):\n pts_depth = points[:, 2]\n pts_near_flag = pts_depth < 40.0\n far_idxs_choice = np.where(pts_near_flag == 0)[0]\n near_idxs = np.where(pts_near_flag == 1)[0]\n near_idxs_choice = np.random.choice(near_idxs,\n self.npoints -\n len(far_idxs_choice),\n replace=False)\n\n choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \\\n if len(far_idxs_choice) > 0 else near_idxs_choice\n np.random.shuffle(choice)\n else:\n choice = np.arange(0, len(points), dtype=np.int32)\n if self.npoints > len(points):\n extra_choice = np.random.choice(choice,\n self.npoints - len(points),\n replace=False)\n choice = np.concatenate((choice, extra_choice), axis=0)\n np.random.shuffle(choice)\n\n points = points[choice, :]\n\n t_data = {'point': points, 'calib': data['calib']}\n\n if attr['split'] not in ['test', 'testing']:\n labels = []\n bboxes = []\n bboxes_world = []\n if len(data['bbox_objs']) != 0:\n labels = np.stack([\n self.name2lbl.get(bb.label_class, len(self.classes))\n for bb in data['bbox_objs']\n ])\n\n bboxes = np.stack([bb.to_camera() for bb in data['bbox_objs']\n ]) # Camera frame.\n bboxes_world = np.stack(\n [bb.to_xyzwhlr() for bb in data['bbox_objs']])\n\n if self.mode == \"RPN\":\n labels, bboxes = PointRCNN.generate_rpn_training_labels(\n points, bboxes, bboxes_world, data['calib'])\n t_data['labels'] = np.array(labels)\n t_data['bbox_objs'] = data['bbox_objs'] # Objects of type BEVBox3D.\n if attr['split'] in ['train', 'training'] or self.mode == \"RPN\":\n t_data['bboxes'] = bboxes\n\n return t_data\n\n def inference_end(self, results, inputs):\n if self.mode == 'RPN':\n return [[]]\n\n roi_boxes3d = results['rois'] # (B, M, 7)\n batch_size = roi_boxes3d.shape[0]\n\n rcnn_cls = tf.reshape(results['cls'],\n (batch_size, -1, results['cls'].shape[1]))\n rcnn_reg = tf.reshape(results['reg'],\n (batch_size, -1, results['reg'].shape[1]))\n\n pred_boxes3d, rcnn_cls = self.rcnn.proposal_layer(rcnn_cls,\n rcnn_reg,\n roi_boxes3d,\n training=False)\n\n inference_result = []\n for calib, bboxes, scores in zip(inputs[3], pred_boxes3d, rcnn_cls):\n # scoring\n if scores.shape[-1] == 1:\n scores = tf.sigmoid(scores)\n labels = tf.cast(scores < self.score_thres, tf.int64)\n else:\n labels = tf.argmax(scores)\n scores = tf.nn.softmax(scores, axis=0)\n scores = scores[labels]\n\n fltr = tf.reshape(scores > self.score_thres, (-1))\n bboxes = bboxes[fltr]\n labels = labels[fltr]\n scores = scores[fltr]\n\n bboxes = bboxes.numpy()\n scores = scores.numpy()\n labels = labels.numpy()\n inference_result.append([])\n\n world_cam, cam_img = calib.numpy()\n\n for bbox, score, label in zip(bboxes, scores, labels):\n pos = bbox[:3]\n dim = bbox[[4, 3, 5]]\n # transform into world space\n pos = DataProcessing.cam2world(pos.reshape((1, -1)),\n world_cam).flatten()\n pos = pos + [0, 0, dim[1] / 2]\n yaw = bbox[-1]\n\n name = self.lbl2name.get(label[0], \"ignore\")\n inference_result[-1].append(\n BEVBox3D(pos, dim, yaw, name, score, world_cam, cam_img))\n\n return inference_result\n\n def get_batch_gen(self, dataset, steps_per_epoch=None, batch_size=1):\n\n def batcher():\n count = len(dataset) if steps_per_epoch is None else steps_per_epoch\n p1 = tf.zeros((0, 7), dtype=tf.float32)\n p2 = tf.zeros((0,), dtype=tf.int32)\n for i in np.arange(0, count, batch_size):\n batch = [dataset[i + bi]['data'] for bi in range(batch_size)]\n points = tf.stack([b['point'] for b in batch], axis=0)\n\n bboxes = [\n b.get('bboxes', p1)\n for b in batch\n ]\n max_gt = 0\n for bbox in bboxes:\n max_gt = max(max_gt, bbox.shape[0])\n pad_bboxes = np.zeros((len(bboxes), max_gt, 7),\n dtype=np.float32)\n for j in range(len(bboxes)):\n pad_bboxes[j, :bboxes[j].shape[0], :] = bboxes[j]\n bboxes = tf.constant(pad_bboxes)\n\n labels = [\n b.get('labels', p2)\n for b in batch\n ]\n max_lab = 0\n for lab in labels:\n max_lab = max(max_lab, lab.shape[0])\n\n if 'labels' in batch[\n 0] and labels[0].shape[0] != points.shape[1]:\n pad_labels = np.ones(\n (len(labels), max_lab), dtype=np.int32) * (-1)\n for j in range(len(labels)):\n pad_labels[j, :labels[j].shape[0]] = labels[j]\n labels = tf.constant(pad_labels)\n\n else:\n labels = tf.stack(labels, axis=0)\n\n calib = [\n tf.constant([\n b.get('calib', {}).get('world_cam', np.eye(4)),\n b.get('calib', {}).get('cam_img', np.eye(4))\n ]) for b in batch\n ]\n yield (points, bboxes, labels, calib)\n\n gen_func = batcher\n gen_types = (tf.float32, tf.float32, tf.int32, tf.float32)\n gen_shapes = ([batch_size, None, 3], [batch_size, None,\n 7], [batch_size,\n None], [batch_size, 2, 4, 4])\n\n return gen_func, gen_types, gen_shapes\n\n\nMODEL._register_module(PointRCNN, 'tf')\n\n\ndef get_reg_loss(pred_reg,\n reg_label,\n loc_scope,\n loc_bin_size,\n num_head_bin,\n anchor_size,\n get_xz_fine=True,\n get_y_by_bin=False,\n loc_y_scope=0.5,\n loc_y_bin_size=0.25,\n get_ry_fine=False):\n \"\"\"Bin-based 3D bounding boxes regression loss. See\n https://arxiv.org/abs/1812.04244 for more details.\n\n Args:\n pred_reg: (N, C)\n reg_label: (N, 7) [dx, dy, dz, h, w, l, ry]\n loc_scope: Constant\n loc_bin_size: Constant\n num_head_bin: Constant\n anchor_size: (N, 3) or (3)\n get_xz_fine: Whether to get fine xz loss.\n get_y_by_bin: Whether to divide y coordinate into bin.\n loc_y_scope: Scope length for y coordinate.\n loc_y_bin_size: Bin size for classifying y coordinate.\n get_ry_fine: Whether to use fine yaw loss.\n \"\"\"\n per_loc_bin_num = int(loc_scope / loc_bin_size) * 2\n loc_y_bin_num = int(loc_y_scope / loc_y_bin_size) * 2\n\n reg_loss_dict = {}\n loc_loss = 0\n\n # xz localization loss\n x_offset_label, y_offset_label, z_offset_label = reg_label[:,\n 0], reg_label[:,\n 1], reg_label[:,\n 2]\n x_shift = tf.clip_by_value(x_offset_label + loc_scope, 0,\n loc_scope * 2 - 1e-3)\n z_shift = tf.clip_by_value(z_offset_label + loc_scope, 0,\n loc_scope * 2 - 1e-3)\n x_bin_label = tf.cast(tf.floor(x_shift / loc_bin_size), tf.int64)\n z_bin_label = tf.cast(tf.floor(z_shift / loc_bin_size), tf.int64)\n\n x_bin_l, x_bin_r = 0, per_loc_bin_num\n z_bin_l, z_bin_r = per_loc_bin_num, per_loc_bin_num * 2\n start_offset = z_bin_r\n\n loss_x_bin = CrossEntropyLoss()(pred_reg[:, x_bin_l:x_bin_r], x_bin_label)\n loss_z_bin = CrossEntropyLoss()(pred_reg[:, z_bin_l:z_bin_r], z_bin_label)\n reg_loss_dict['loss_x_bin'] = loss_x_bin.numpy()\n reg_loss_dict['loss_z_bin'] = loss_z_bin.numpy()\n loc_loss += loss_x_bin + loss_z_bin\n\n if get_xz_fine:\n x_res_l, x_res_r = per_loc_bin_num * 2, per_loc_bin_num * 3\n z_res_l, z_res_r = per_loc_bin_num * 3, per_loc_bin_num * 4\n start_offset = z_res_r\n\n x_res_label = x_shift - (\n tf.cast(x_bin_label, tf.float32) * loc_bin_size + loc_bin_size / 2)\n z_res_label = z_shift - (\n tf.cast(z_bin_label, tf.float32) * loc_bin_size + loc_bin_size / 2)\n x_res_norm_label = x_res_label / loc_bin_size\n z_res_norm_label = z_res_label / loc_bin_size\n\n x_bin_onehot = tf.one_hot(x_bin_label, per_loc_bin_num)\n z_bin_onehot = tf.one_hot(z_bin_label, per_loc_bin_num)\n\n loss_x_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, x_res_l:x_res_r] *\n x_bin_onehot,\n axis=1), x_res_norm_label)\n loss_z_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, z_res_l:z_res_r] *\n z_bin_onehot,\n axis=1), z_res_norm_label)\n reg_loss_dict['loss_x_res'] = loss_x_res.numpy()\n reg_loss_dict['loss_z_res'] = loss_z_res.numpy()\n loc_loss += loss_x_res + loss_z_res\n\n # y localization loss\n if get_y_by_bin:\n y_bin_l, y_bin_r = start_offset, start_offset + loc_y_bin_num\n y_res_l, y_res_r = y_bin_r, y_bin_r + loc_y_bin_num\n start_offset = y_res_r\n\n y_shift = tf.clip_by_value(y_offset_label + loc_y_scope, 0,\n loc_y_scope * 2 - 1e-3)\n y_bin_label = tf.cast(tf.floor(y_shift / loc_y_bin_size), tf.int64)\n y_res_label = y_shift - (tf.cast(y_bin_label, tf.float32) *\n loc_y_bin_size + loc_y_bin_size / 2)\n y_res_norm_label = y_res_label / loc_y_bin_size\n\n y_bin_onehot = tf.one_hot(y_bin_label, loc_y_bin_num)\n\n loss_y_bin = CrossEntropyLoss()(pred_reg[:, y_bin_l:y_bin_r],\n y_bin_label)\n loss_y_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, y_res_l:y_res_r] *\n y_bin_onehot,\n axis=1), y_res_norm_label)\n\n reg_loss_dict['loss_y_bin'] = loss_y_bin.numpy()\n reg_loss_dict['loss_y_res'] = loss_y_res.numpy()\n\n loc_loss += loss_y_bin + loss_y_res\n else:\n y_offset_l, y_offset_r = start_offset, start_offset + 1\n start_offset = y_offset_r\n\n loss_y_offset = SmoothL1Loss()(tf.reduce_sum(\n pred_reg[:, y_offset_l:y_offset_r], axis=1), y_offset_label)\n reg_loss_dict['loss_y_offset'] = loss_y_offset.numpy()\n loc_loss += loss_y_offset\n\n # angle loss\n ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin\n ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin\n\n ry_label = reg_label[:, 6]\n\n if get_ry_fine:\n # divide pi/2 into several bins\n angle_per_class = (np.pi / 2) / num_head_bin\n\n ry_label = ry_label % (2 * np.pi) # 0 ~ 2pi\n ry_label = tf.where((ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5),\n (ry_label + np.pi) % (2 * np.pi),\n ry_label) # (0 ~ pi/2, 3pi/2 ~ 2pi)\n shift_angle = (ry_label + np.pi * 0.5) % (2 * np.pi) # (0 ~ pi)\n\n shift_angle = tf.clip_by_value(shift_angle - np.pi * 0.25, 1e-3,\n np.pi * 0.5 - 1e-3) # (0, pi/2)\n\n # bin center is (5, 10, 15, ..., 85)\n ry_bin_label = tf.cast(tf.floor(shift_angle / angle_per_class),\n tf.int64)\n ry_res_label = shift_angle - (tf.cast(ry_bin_label, tf.float32) *\n angle_per_class + angle_per_class / 2)\n ry_res_norm_label = ry_res_label / (angle_per_class / 2)\n\n else:\n # divide 2pi into several bins\n angle_per_class = (2 * np.pi) / num_head_bin\n heading_angle = ry_label % (2 * np.pi) # 0 ~ 2pi\n\n shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)\n ry_bin_label = tf.cast(tf.floor(shift_angle / angle_per_class),\n tf.int64)\n ry_res_label = shift_angle - (tf.cast(ry_bin_label, tf.float32) *\n angle_per_class + angle_per_class / 2)\n ry_res_norm_label = ry_res_label / (angle_per_class / 2)\n\n ry_bin_onehot = tf.one_hot(ry_bin_label, num_head_bin)\n loss_ry_bin = CrossEntropyLoss()(pred_reg[:, ry_bin_l:ry_bin_r],\n ry_bin_label)\n loss_ry_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, ry_res_l:ry_res_r] *\n ry_bin_onehot,\n axis=1), ry_res_norm_label)\n\n reg_loss_dict['loss_ry_bin'] = loss_ry_bin.numpy()\n reg_loss_dict['loss_ry_res'] = loss_ry_res.numpy()\n angle_loss = loss_ry_bin + loss_ry_res\n\n # size loss\n size_res_l, size_res_r = ry_res_r, ry_res_r + 3\n assert pred_reg.shape[1] == size_res_r, '%d vs %d' % (pred_reg.shape[1],\n size_res_r)\n\n size_res_norm_label = (reg_label[:, 3:6] - anchor_size) / anchor_size\n size_res_norm = pred_reg[:, size_res_l:size_res_r]\n size_loss = SmoothL1Loss()(size_res_norm, size_res_norm_label)\n\n # Total regression loss\n reg_loss_dict['loss_loc'] = loc_loss\n reg_loss_dict['loss_angle'] = angle_loss\n reg_loss_dict['loss_size'] = size_loss\n\n return loc_loss, angle_loss, size_loss, reg_loss_dict\n\n\nclass RPN(tf.keras.layers.Layer):\n\n def __init__(self,\n backbone={},\n cls_in_ch=128,\n cls_out_ch=[128],\n reg_in_ch=128,\n reg_out_ch=[128],\n db_ratio=0.5,\n head={},\n focal_loss={},\n loss_weight=[1.0, 1.0],\n **kwargs):\n\n super().__init__()\n\n # backbone\n self.backbone = Pointnet2MSG(**backbone)\n self.proposal_layer = ProposalLayer(**head)\n\n # classification branch\n layers = []\n for i in range(len(cls_out_ch)):\n layers.extend([\n tf.keras.layers.Conv1D(cls_out_ch[i],\n 1,\n use_bias=False,\n data_format=\"channels_first\"),\n tf.keras.layers.BatchNormalization(axis=1,\n momentum=0.9,\n epsilon=1e-05),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Dropout(db_ratio)\n ])\n layers.append(\n tf.keras.layers.Conv1D(\n 1,\n 1,\n use_bias=True,\n bias_initializer=tf.keras.initializers.Constant(-np.log(\n (1 - 0.01) / 0.01)),\n data_format=\"channels_first\"))\n\n self.cls_blocks = tf.keras.Sequential(layers)\n\n # regression branch\n per_loc_bin_num = int(self.proposal_layer.loc_scope /\n self.proposal_layer.loc_bin_size) * 2\n if self.proposal_layer.loc_xz_fine:\n reg_channel = per_loc_bin_num * 4 + self.proposal_layer.num_head_bin * 2 + 3\n else:\n reg_channel = per_loc_bin_num * 2 + self.proposal_layer.num_head_bin * 2 + 3\n reg_channel = reg_channel + 1 # reg y\n\n layers = []\n for i in range(len(reg_out_ch)):\n layers.extend([\n tf.keras.layers.Conv1D(reg_out_ch[i],\n 1,\n use_bias=False,\n data_format=\"channels_first\"),\n tf.keras.layers.BatchNormalization(axis=1,\n momentum=0.9,\n epsilon=1e-05),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Dropout(db_ratio)\n ])\n layers.append(\n tf.keras.layers.Conv1D(\n reg_channel,\n 1,\n use_bias=True,\n kernel_initializer=tf.keras.initializers.RandomNormal(\n stddev=0.001),\n data_format=\"channels_first\"))\n\n self.reg_blocks = tf.keras.Sequential(layers)\n\n self.loss_cls = FocalLoss(**focal_loss)\n self.loss_weight = loss_weight\n\n def call(self, x, training=True):\n backbone_xyz, backbone_features = self.backbone(\n x, training=training) # (B, N, 3), (B, C, N)\n\n rpn_cls = tf.transpose(\n self.cls_blocks(backbone_features, training=training),\n (0, 2, 1)) # (B, N, 1)\n rpn_reg = tf.transpose(\n self.reg_blocks(backbone_features, training=training),\n (0, 2, 1)) # (B, N, C)\n\n return rpn_cls, rpn_reg, backbone_xyz, backbone_features\n\n def loss(self, results, inputs):\n rpn_cls = results['cls']\n rpn_reg = results['reg']\n\n rpn_reg_label = inputs[1]\n rpn_cls_label = inputs[2]\n\n rpn_cls_label_flat = tf.reshape(rpn_cls_label, (-1))\n rpn_cls_flat = tf.reshape(rpn_cls, (-1))\n fg_mask = (rpn_cls_label_flat > 0)\n\n # focal loss\n rpn_cls_target = tf.cast((rpn_cls_label_flat > 0), tf.int32)\n pos = tf.cast((rpn_cls_label_flat > 0), tf.float32)\n neg = tf.cast((rpn_cls_label_flat == 0), tf.float32)\n cls_weights = pos + neg\n pos_normalizer = tf.reduce_sum(pos)\n cls_weights = cls_weights / tf.maximum(pos_normalizer, 1.0)\n rpn_loss_cls = self.loss_cls(rpn_cls_flat,\n rpn_cls_target,\n cls_weights,\n avg_factor=1.0)\n\n # RPN regression loss\n point_num = rpn_reg.shape[0] * rpn_reg.shape[1]\n fg_sum = tf.reduce_sum(tf.cast(fg_mask, tf.int64)).numpy()\n if fg_sum != 0:\n loss_loc, loss_angle, loss_size, reg_loss_dict = \\\n get_reg_loss(tf.reshape(rpn_reg, (point_num, -1))[fg_mask],\n tf.reshape(rpn_reg_label, (point_num, 7))[fg_mask],\n loc_scope=self.proposal_layer.loc_scope,\n loc_bin_size=self.proposal_layer.loc_bin_size,\n num_head_bin=self.proposal_layer.num_head_bin,\n anchor_size=self.proposal_layer.mean_size,\n get_xz_fine=self.proposal_layer.loc_xz_fine,\n get_y_by_bin=False,\n get_ry_fine=False)\n\n loss_size = 3 * loss_size\n rpn_loss_reg = loss_loc + loss_angle + loss_size\n else:\n rpn_loss_reg = tf.reduce_mean(rpn_reg * 0)\n\n return {\n \"cls\": rpn_loss_cls * self.loss_weight[0],\n \"reg\": rpn_loss_reg * self.loss_weight[1]\n }\n\n\nclass RCNN(tf.keras.layers.Layer):\n\n def __init__(\n self,\n num_classes,\n in_channels=128,\n SA_config={\n \"npoints\": [128, 32, -1],\n \"radius\": [0.2, 0.4, 100],\n \"nsample\": [64, 64, 64],\n \"mlps\": [[128, 128, 128], [128, 128, 256], [256, 256, 512]]\n },\n cls_out_ch=[256, 256],\n reg_out_ch=[256, 256],\n db_ratio=0.5,\n use_xyz=True,\n xyz_up_layer=[128, 128],\n head={},\n target_head={},\n loss={}):\n\n super().__init__()\n self.rcnn_input_channel = 5\n\n self.pool_extra_width = target_head.get(\"pool_extra_width\", 1.0)\n self.num_points = target_head.get(\"num_points\", 512)\n\n self.proposal_layer = ProposalLayer(**head)\n\n self.SA_modules = []\n for i in range(len(SA_config[\"npoints\"])):\n mlps = [in_channels] + SA_config[\"mlps\"][i]\n npoint = SA_config[\"npoints\"][\n i] if SA_config[\"npoints\"][i] != -1 else None\n self.SA_modules.append(\n PointnetSAModule(npoint=npoint,\n radius=SA_config[\"radius\"][i],\n nsample=SA_config[\"nsample\"][i],\n mlp=mlps,\n use_xyz=use_xyz,\n use_bias=True))\n in_channels = mlps[-1]\n\n self.xyz_up_layer = gen_CNN([self.rcnn_input_channel] + xyz_up_layer,\n conv=tf.keras.layers.Conv2D)\n c_out = xyz_up_layer[-1]\n self.merge_down_layer = gen_CNN([c_out * 2, c_out],\n conv=tf.keras.layers.Conv2D)\n\n # classification layer\n cls_channel = 1 if num_classes == 2 else num_classes\n\n layers = []\n for i in range(len(cls_out_ch)):\n layers.extend([\n tf.keras.layers.Conv1D(\n cls_out_ch[i],\n 1,\n use_bias=True,\n data_format=\"channels_first\",\n kernel_initializer=tf.keras.initializers.GlorotNormal(),\n bias_initializer=tf.keras.initializers.Constant(0.0)),\n tf.keras.layers.ReLU()\n ])\n layers.append(\n tf.keras.layers.Conv1D(\n cls_channel,\n 1,\n use_bias=True,\n data_format=\"channels_first\",\n kernel_initializer=tf.keras.initializers.GlorotNormal(),\n bias_initializer=tf.keras.initializers.Constant(0.0)))\n\n self.cls_blocks = tf.keras.Sequential(layers)\n\n self.loss_cls = tf.keras.losses.BinaryCrossentropy()\n\n # regression branch\n per_loc_bin_num = int(self.proposal_layer.loc_scope /\n self.proposal_layer.loc_bin_size) * 2\n loc_y_bin_num = int(self.proposal_layer.loc_y_scope /\n self.proposal_layer.loc_y_bin_size) * 2\n reg_channel = per_loc_bin_num * 4 + self.proposal_layer.num_head_bin * 2 + 3\n reg_channel += (1 if not self.proposal_layer.get_y_by_bin else\n loc_y_bin_num * 2)\n\n layers = []\n for i in range(len(reg_out_ch)):\n layers.extend([\n tf.keras.layers.Conv1D(\n reg_out_ch[i],\n 1,\n use_bias=True,\n data_format=\"channels_first\",\n kernel_initializer=tf.keras.initializers.GlorotNormal(),\n bias_initializer=tf.keras.initializers.Constant(0.0)),\n tf.keras.layers.ReLU()\n ])\n layers.append(\n tf.keras.layers.Conv1D(\n reg_channel,\n 1,\n use_bias=True,\n data_format=\"channels_first\",\n kernel_initializer=tf.keras.initializers.RandomNormal(\n stddev=0.001),\n bias_initializer=tf.keras.initializers.Constant(0.0)))\n\n self.reg_blocks = tf.keras.Sequential(layers)\n\n self.proposal_target_layer = ProposalTargetLayer(**target_head)\n\n def _break_up_pc(self, pc):\n xyz = pc[..., 0:3]\n features = (tf.transpose(pc[..., 3:],\n (0, 2, 1)) if pc.shape[-1] > 3 else None)\n\n return xyz, features\n\n def call(self,\n roi_boxes3d,\n gt_boxes3d,\n rpn_xyz,\n rpn_features,\n seg_mask,\n pts_depth,\n training=True):\n pts_extra_input_list = [tf.expand_dims(seg_mask, axis=2)]\n pts_extra_input_list.append(\n tf.expand_dims(pts_depth / 70.0 - 0.5, axis=2))\n pts_extra_input = tf.concat(pts_extra_input_list, axis=2)\n pts_feature = tf.concat((pts_extra_input, rpn_features), axis=2)\n\n if gt_boxes3d is not None:\n target = self.proposal_target_layer(\n [roi_boxes3d, gt_boxes3d, rpn_xyz, pts_feature])\n for k in target:\n target[k] = tf.stop_gradient(target[k])\n pts_input = tf.concat(\n (target['sampled_pts'], target['pts_feature']), axis=2)\n target['pts_input'] = pts_input\n else:\n pooled_features, pooled_empty_flag = roipool3d_utils.roipool3d_gpu(\n rpn_xyz,\n pts_feature,\n roi_boxes3d,\n self.pool_extra_width,\n sampled_pt_num=self.num_points)\n\n # canonical transformation\n batch_size = roi_boxes3d.shape[0]\n roi_center = roi_boxes3d[:, :, 0:3]\n poss = []\n for k in range(batch_size):\n pos = pooled_features[k, :, :, :3] - tf.expand_dims(\n roi_center[k], axis=1)\n pos = rotate_pc_along_y_tf(pos, roi_boxes3d[k, :, 6])\n poss.append(pos)\n pooled_features = tf.concat(\n [tf.stack(poss), pooled_features[:, :, :, 3:]], axis=3)\n\n pts_input = tf.reshape(\n pooled_features,\n (-1, pooled_features.shape[2], pooled_features.shape[3]))\n\n xyz, features = self._break_up_pc(pts_input)\n\n xyz_input = tf.expand_dims(tf.transpose(\n pts_input[..., 0:self.rcnn_input_channel], (0, 2, 1)),\n axis=3)\n xyz_feature = self.xyz_up_layer(xyz_input, training=training)\n\n rpn_feature = tf.expand_dims(tf.transpose(\n pts_input[..., self.rcnn_input_channel:], (0, 2, 1)),\n axis=3)\n\n merged_feature = tf.concat((xyz_feature, rpn_feature), axis=1)\n merged_feature = self.merge_down_layer(merged_feature,\n training=training)\n l_xyz, l_features = [xyz], [tf.squeeze(merged_feature, axis=3)]\n\n for i in range(len(self.SA_modules)):\n li_xyz, li_features = self.SA_modules[i](l_xyz[i],\n l_features[i],\n training=training)\n l_xyz.append(li_xyz)\n l_features.append(li_features)\n\n rcnn_cls = tf.squeeze(tf.transpose(\n self.cls_blocks(l_features[-1], training=training), (0, 2, 1)),\n axis=1) # (B, 1 or 2)\n rcnn_reg = tf.squeeze(tf.transpose(\n self.reg_blocks(l_features[-1], training=training), (0, 2, 1)),\n axis=1) # (B, C)\n\n ret_dict = {'rois': roi_boxes3d, 'cls': rcnn_cls, 'reg': rcnn_reg}\n\n if gt_boxes3d is not None:\n ret_dict.update(target)\n return ret_dict\n\n def loss(self, results, inputs):\n rcnn_cls = results['cls']\n rcnn_reg = results['reg']\n\n cls_label = tf.cast(results['cls_label'], tf.float32)\n reg_valid_mask = results['reg_valid_mask']\n gt_boxes3d_ct = results['gt_of_rois']\n pts_input = results['pts_input']\n\n cls_label_flat = tf.reshape(cls_label, (-1))\n\n # binary cross entropy\n rcnn_cls_flat = tf.reshape(rcnn_cls, (-1))\n batch_loss_cls = tf.keras.losses.BinaryCrossentropy(reduction=\"none\")(\n tf.sigmoid(rcnn_cls_flat), cls_label)\n cls_valid_mask = tf.cast((cls_label_flat >= 0), tf.float32)\n rcnn_loss_cls = tf.reduce_sum(\n batch_loss_cls * cls_valid_mask) / tf.maximum(\n tf.reduce_sum(cls_valid_mask), 1.0)\n\n # rcnn regression loss\n batch_size = pts_input.shape[0]\n fg_mask = (reg_valid_mask > 0)\n fg_sum = tf.reduce_sum(tf.cast(fg_mask, tf.int64)).numpy()\n if fg_sum != 0:\n anchor_size = self.proposal_layer.mean_size\n\n loss_loc, loss_angle, loss_size, reg_loss_dict = \\\n get_reg_loss(tf.reshape(rcnn_reg, (batch_size, -1))[fg_mask],\n tf.reshape(gt_boxes3d_ct, (batch_size, 7))[fg_mask],\n loc_scope=self.proposal_layer.loc_scope,\n loc_bin_size=self.proposal_layer.loc_bin_size,\n num_head_bin=self.proposal_layer.num_head_bin,\n anchor_size=anchor_size,\n get_xz_fine=True, get_y_by_bin=self.proposal_layer.get_y_by_bin,\n loc_y_scope=self.proposal_layer.loc_y_scope, loc_y_bin_size=self.proposal_layer.loc_y_bin_size,\n get_ry_fine=True)\n\n loss_size = 3 * loss_size # consistent with old codes\n rcnn_loss_reg = loss_loc + loss_angle + loss_size\n else:\n # Regression loss is zero when no point is classified as foreground.\n rcnn_loss_reg = tf.reduce_mean(rcnn_reg * 0)\n\n return {\"cls\": rcnn_loss_cls, \"reg\": rcnn_loss_reg}\n\n\ndef rotate_pc_along_y(pc, rot_angle):\n \"\"\"\n Args:\n pc: (N, 3+C), (N, 3) is in the rectified camera coordinate.\n rot_angle: rad scalar\n\n Returns:\n pc: updated pc with XYZ rotated.\n \"\"\"\n cosval = np.cos(rot_angle)\n sinval = np.sin(rot_angle)\n rotmat = np.array([[cosval, -sinval], [sinval, cosval]])\n pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))\n return pc\n\n\nclass ProposalLayer(tf.keras.layers.Layer):\n\n def __init__(self,\n nms_pre=9000,\n nms_post=512,\n nms_thres=0.85,\n nms_post_val=None,\n nms_thres_val=None,\n mean_size=[1.0],\n loc_xz_fine=True,\n loc_scope=3.0,\n loc_bin_size=0.5,\n num_head_bin=12,\n get_y_by_bin=False,\n get_ry_fine=False,\n loc_y_scope=0.5,\n loc_y_bin_size=0.25,\n post_process=True):\n super().__init__()\n self.nms_pre = nms_pre\n self.nms_post = nms_post\n self.nms_thres = nms_thres\n self.nms_post_val = nms_post_val\n self.nms_thres_val = nms_thres_val\n self.mean_size = tf.constant(mean_size)\n self.loc_scope = loc_scope\n self.loc_bin_size = loc_bin_size\n self.num_head_bin = num_head_bin\n self.loc_xz_fine = loc_xz_fine\n self.get_y_by_bin = get_y_by_bin\n self.get_ry_fine = get_ry_fine\n self.loc_y_scope = loc_y_scope\n self.loc_y_bin_size = loc_y_bin_size\n self.post_process = post_process\n\n def call(self, rpn_scores, rpn_reg, xyz, training=True):\n batch_size = xyz.shape[0]\n proposals = decode_bbox_target(\n tf.reshape(xyz, (-1, xyz.shape[-1])),\n tf.reshape(rpn_reg, (-1, rpn_reg.shape[-1])),\n anchor_size=self.mean_size,\n loc_scope=self.loc_scope,\n loc_bin_size=self.loc_bin_size,\n num_head_bin=self.num_head_bin,\n get_xz_fine=self.loc_xz_fine,\n get_y_by_bin=self.get_y_by_bin,\n get_ry_fine=self.get_ry_fine,\n loc_y_scope=self.loc_y_scope,\n loc_y_bin_size=self.loc_y_bin_size) # (N, 7)\n\n proposals = tf.reshape(proposals, (batch_size, -1, 7))\n\n nms_post = self.nms_post\n nms_thres = self.nms_thres\n if not training:\n if self.nms_post_val is not None:\n nms_post = self.nms_post_val\n if self.nms_thres_val is not None:\n nms_thres = self.nms_thres_val\n\n if self.post_process:\n proposals = tf.concat([\n proposals[..., :1], proposals[..., 1:2] +\n proposals[..., 3:4] / 2, proposals[..., 2:]\n ],\n axis=-1) # set y as the center of bottom\n scores = rpn_scores\n sorted_idxs = tf.argsort(scores, axis=1, direction=\"DESCENDING\")\n\n batch_size = scores.shape[0]\n ret_bbox3d = []\n ret_scores = []\n for k in range(batch_size):\n scores_single = scores[k]\n proposals_single = proposals[k]\n order_single = sorted_idxs[k]\n\n scores_single, proposals_single = self.distance_based_proposal(\n scores_single, proposals_single, order_single, training)\n\n proposals_tot = proposals_single.shape[0]\n\n ret_bbox3d.append(\n tf.concat([\n proposals_single,\n tf.zeros((nms_post - proposals_tot, 7))\n ],\n axis=0))\n ret_scores.append(\n tf.concat(\n [scores_single,\n tf.zeros((nms_post - proposals_tot,))],\n axis=0))\n ret_bbox3d = tf.stack(ret_bbox3d)\n ret_scores = tf.stack(ret_scores)\n else:\n batch_size = rpn_scores.shape[0]\n ret_bbox3d = []\n ret_scores = []\n for k in range(batch_size):\n bev = xywhr_to_xyxyr(\n tf.stack([proposals[k, :, i] for i in [0, 2, 3, 5, 6]],\n axis=-1))\n keep_idx = nms(bev, rpn_scores[k, :, 0], nms_thres)\n\n ret_bbox3d.append(tf.gather(proposals[k], keep_idx))\n ret_scores.append(tf.gather(rpn_scores[k], keep_idx))\n\n return ret_bbox3d, ret_scores\n\n def distance_based_proposal(self, scores, proposals, order, training=True):\n \"\"\"Propose ROIs in two area based on the distance.\n\n Args:\n scores: (N)\n proposals: (N, 7)\n order: (N)\n training (bool): Whether we are training?\n \"\"\"\n nms_post = self.nms_post\n nms_thres = self.nms_thres\n if not training:\n if self.nms_post_val is not None:\n nms_post = self.nms_post_val\n if self.nms_thres_val is not None:\n nms_thres = self.nms_thres_val\n\n nms_range_list = [0, 40.0, 80.0]\n pre_top_n_list = [\n 0,\n int(self.nms_pre * 0.7), self.nms_pre - int(self.nms_pre * 0.7)\n ]\n post_top_n_list = [\n 0, int(nms_post * 0.7), nms_post - int(nms_post * 0.7)\n ]\n\n scores_single_list, proposals_single_list = [], []\n\n # sort by score\n scores_ordered = tf.gather(scores, order)\n proposals_ordered = tf.gather(proposals, order)\n\n dist = proposals_ordered[:, 2]\n first_mask = (dist > nms_range_list[0]) & (dist <= nms_range_list[1])\n for i in range(1, len(nms_range_list)):\n # get proposal distance mask\n dist_mask = ((dist > nms_range_list[i - 1]) &\n (dist <= nms_range_list[i]))\n\n if tf.reduce_any(dist_mask):\n # this area has points\n # reduce by mask\n cur_scores = scores_ordered[dist_mask]\n cur_proposals = proposals_ordered[dist_mask]\n\n # fetch pre nms top K\n cur_scores = cur_scores[:pre_top_n_list[i]]\n cur_proposals = cur_proposals[:pre_top_n_list[i]]\n else:\n assert i == 2, '%d' % i\n # this area doesn't have any points, so use rois of first area\n cur_scores = scores_ordered[first_mask]\n cur_proposals = proposals_ordered[first_mask]\n\n # fetch top K of first area\n cur_scores = cur_scores[pre_top_n_list[i -\n 1]:][:pre_top_n_list[i]]\n cur_proposals = cur_proposals[\n pre_top_n_list[i - 1]:][:pre_top_n_list[i]]\n\n # oriented nms\n bev = xywhr_to_xyxyr(\n tf.gather(cur_proposals, [0, 2, 3, 5, 6], axis=1))\n keep_idx = nms(bev, cur_scores, nms_thres)\n\n # Fetch post nms top k\n keep_idx = keep_idx[:post_top_n_list[i]]\n\n scores_single_list.append(tf.gather(cur_scores, keep_idx))\n proposals_single_list.append(tf.gather(cur_proposals, keep_idx))\n\n scores_single = tf.concat(scores_single_list, axis=0)\n proposals_single = tf.concat(proposals_single_list, axis=0)\n return scores_single, proposals_single\n\n\ndef decode_bbox_target(roi_box3d,\n pred_reg,\n loc_scope,\n loc_bin_size,\n num_head_bin,\n anchor_size,\n get_xz_fine=True,\n get_y_by_bin=False,\n loc_y_scope=0.5,\n loc_y_bin_size=0.25,\n get_ry_fine=False):\n \"\"\"\n Args:\n roi_box3d: (N, 7)\n pred_reg: (N, C)\n loc_scope:\n loc_bin_size:\n num_head_bin:\n anchor_size:\n get_xz_fine:\n get_y_by_bin:\n loc_y_scope:\n loc_y_bin_size:\n get_ry_fine:\n \"\"\"\n per_loc_bin_num = int(loc_scope / loc_bin_size) * 2\n loc_y_bin_num = int(loc_y_scope / loc_y_bin_size) * 2\n\n # recover xz localization\n x_bin_l, x_bin_r = 0, per_loc_bin_num\n z_bin_l, z_bin_r = per_loc_bin_num, per_loc_bin_num * 2\n start_offset = z_bin_r\n\n x_bin = tf.argmax(pred_reg[:, x_bin_l:x_bin_r], axis=1)\n z_bin = tf.argmax(pred_reg[:, z_bin_l:z_bin_r], axis=1)\n\n pos_x = tf.cast(x_bin,\n tf.float32) * loc_bin_size + loc_bin_size / 2 - loc_scope\n pos_z = tf.cast(z_bin,\n tf.float32) * loc_bin_size + loc_bin_size / 2 - loc_scope\n\n if get_xz_fine:\n x_res_l, x_res_r = per_loc_bin_num * 2, per_loc_bin_num * 3\n z_res_l, z_res_r = per_loc_bin_num * 3, per_loc_bin_num * 4\n start_offset = z_res_r\n\n x_res_norm = tf.gather(pred_reg[:, x_res_l:x_res_r],\n x_bin,\n batch_dims=1)\n z_res_norm = tf.gather(pred_reg[:, z_res_l:z_res_r],\n z_bin,\n batch_dims=1)\n x_res = x_res_norm * loc_bin_size\n z_res = z_res_norm * loc_bin_size\n\n pos_x += x_res\n pos_z += z_res\n\n # recover y localization\n if get_y_by_bin:\n y_bin_l, y_bin_r = start_offset, start_offset + loc_y_bin_num\n y_res_l, y_res_r = y_bin_r, y_bin_r + loc_y_bin_num\n start_offset = y_res_r\n\n y_bin = tf.argmax(pred_reg[:, y_bin_l:y_bin_r], axis=1)\n y_res_norm = tf.gather(pred_reg[:, y_res_l:y_res_r],\n y_bin,\n batch_dims=1)\n y_res = y_res_norm * loc_y_bin_size\n pos_y = tf.cast(\n y_bin, tf.float32\n ) * loc_y_bin_size + loc_y_bin_size / 2 - loc_y_scope + y_res\n pos_y = pos_y + roi_box3d[:, 1]\n else:\n y_offset_l, y_offset_r = start_offset, start_offset + 1\n start_offset = y_offset_r\n\n pos_y = roi_box3d[:, 1] + pred_reg[:, y_offset_l]\n\n # recover ry rotation\n ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin\n ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin\n\n ry_bin = tf.argmax(pred_reg[:, ry_bin_l:ry_bin_r], axis=1)\n ry_res_norm = tf.gather(pred_reg[:, ry_res_l:ry_res_r],\n ry_bin,\n batch_dims=1)\n if get_ry_fine:\n # divide pi/2 into several bins\n angle_per_class = (np.pi / 2) / num_head_bin\n ry_res = ry_res_norm * (angle_per_class / 2)\n ry = (tf.cast(ry_bin, tf.float32) * angle_per_class +\n angle_per_class / 2) + ry_res - np.pi / 4\n else:\n angle_per_class = (2 * np.pi) / num_head_bin\n ry_res = ry_res_norm * (angle_per_class / 2)\n\n # bin_center is (0, 30, 60, 90, 120, ..., 270, 300, 330)\n ry = (tf.cast(ry_bin, tf.float32) * angle_per_class + ry_res) % (2 *\n np.pi)\n ry = tf.where(ry > np.pi, ry - 2 * np.pi, ry)\n\n # recover size\n size_res_l, size_res_r = ry_res_r, ry_res_r + 3\n assert size_res_r == pred_reg.shape[1]\n\n size_res_norm = pred_reg[:, size_res_l:size_res_r]\n hwl = size_res_norm * anchor_size + anchor_size\n\n # shift to original coords\n roi_center = roi_box3d[:, 0:3]\n shift_ret_box3d = tf.concat(\n (tf.reshape(pos_x, (-1, 1)), tf.reshape(\n pos_y, (-1, 1)), tf.reshape(pos_z,\n (-1, 1)), hwl, tf.reshape(ry, (-1, 1))),\n axis=1)\n ret_box3d = shift_ret_box3d\n if roi_box3d.shape[1] == 7:\n roi_ry = roi_box3d[:, 6:7]\n ret_box3d = rotate_pc_along_y_tf(shift_ret_box3d, -roi_ry)\n ret_box3d = tf.concat([ret_box3d[:, :6], ret_box3d[:, 6:7] + roi_ry],\n axis=1)\n ret_box3d = tf.concat([\n ret_box3d[:, :1] + roi_center[:, :1], ret_box3d[:, 1:2],\n ret_box3d[:, 2:3] + roi_center[:, 2:3], ret_box3d[:, 3:]\n ],\n axis=1)\n\n return ret_box3d\n\n\ndef rotate_pc_along_y_tf(pc, rot_angle):\n \"\"\"\n :param pc: (N, 3 + C)\n :param rot_angle: (N)\n :return:\n \"\"\"\n cosa = tf.reshape(tf.cos(rot_angle), (-1, 1)) # (N, 1)\n sina = tf.reshape(tf.sin(rot_angle), (-1, 1)) # (N, 1)\n\n raw_1 = tf.concat([cosa, -sina], axis=1) # (N, 2)\n raw_2 = tf.concat([sina, cosa], axis=1) # (N, 2)\n R = tf.concat(\n (tf.expand_dims(raw_1, axis=1), tf.expand_dims(raw_2, axis=1)),\n axis=1) # (N, 2, 2)\n\n pc_temp = tf.reshape(tf.stack([pc[..., 0], pc[..., 2]], axis=-1),\n ((pc.shape[0], -1, 2))) # (N, 512, 2)\n pc_temp = tf.matmul(pc_temp, tf.transpose(R, (0, 2, 1)))\n pc_temp = tf.reshape(pc_temp, (pc.shape[:-1] + (2,))) # (N, 512, 2)\n\n pc = tf.concat(\n [pc_temp[..., :1], pc[..., 1:2], pc_temp[..., 1:2], pc[..., 3:]],\n axis=-1)\n\n return pc\n\n\nclass ProposalTargetLayer(tf.keras.layers.Layer):\n\n def __init__(self,\n pool_extra_width=1.0,\n num_points=512,\n reg_fg_thresh=0.55,\n cls_fg_thresh=0.6,\n cls_bg_thresh=0.45,\n cls_bg_thresh_lo=0.05,\n fg_ratio=0.5,\n roi_per_image=64,\n aug_rot_range=18,\n hard_bg_ratio=0.8,\n roi_fg_aug_times=10):\n super().__init__()\n self.pool_extra_width = pool_extra_width\n self.num_points = num_points\n self.reg_fg_thresh = reg_fg_thresh\n self.cls_fg_thresh = cls_fg_thresh\n self.cls_bg_thresh = cls_bg_thresh\n self.cls_bg_thresh_lo = cls_bg_thresh_lo\n self.fg_ratio = fg_ratio\n self.roi_per_image = roi_per_image\n self.aug_rot_range = aug_rot_range\n self.hard_bg_ratio = hard_bg_ratio\n self.roi_fg_aug_times = roi_fg_aug_times\n\n def call(self, x):\n roi_boxes3d, gt_boxes3d, rpn_xyz, pts_feature = x\n batch_rois, batch_gt_of_rois, batch_roi_iou = self.sample_rois_for_rcnn(\n roi_boxes3d, gt_boxes3d)\n\n # point cloud pooling\n pooled_features, pooled_empty_flag = \\\n roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, self.pool_extra_width,\n sampled_pt_num=self.num_points)\n\n sampled_pts, sampled_features = pooled_features[:, :, :, 0:\n 3], pooled_features[:, :, :,\n 3:]\n\n # data augmentation\n sampled_pts, batch_rois, batch_gt_of_rois = \\\n self.data_augmentation(sampled_pts, batch_rois, batch_gt_of_rois)\n\n # canonical transformation\n batch_size = batch_rois.shape[0]\n roi_ry = batch_rois[:, :, 6:7] % (2 * np.pi)\n roi_center = batch_rois[:, :, 0:3]\n sampled_pts = sampled_pts - tf.expand_dims(roi_center,\n axis=2) # (B, M, 512, 3)\n batch_gt_of_rois = tf.concat([\n batch_gt_of_rois[:, :, :3] - roi_center,\n batch_gt_of_rois[:, :, 3:6], batch_gt_of_rois[:, :, 6:] - roi_ry\n ],\n axis=2)\n\n sampled_pts = tf.unstack(sampled_pts)\n batch_gt_of_rois = tf.unstack(batch_gt_of_rois)\n for k in range(batch_size):\n sampled_pts[k] = rotate_pc_along_y_tf(sampled_pts[k],\n batch_rois[k, :, 6])\n batch_gt_of_rois[k] = tf.squeeze(rotate_pc_along_y_tf(\n tf.expand_dims(batch_gt_of_rois[k], axis=1), roi_ry[k]),\n axis=1)\n sampled_pts = tf.stack(sampled_pts)\n batch_gt_of_rois = tf.stack(batch_gt_of_rois)\n\n # regression valid mask\n valid_mask = (pooled_empty_flag == 0)\n reg_valid_mask = tf.cast(\n ((batch_roi_iou > self.reg_fg_thresh) & valid_mask), tf.int64)\n\n # classification label\n batch_cls_label = tf.cast((batch_roi_iou > self.cls_fg_thresh),\n tf.int64)\n invalid_mask = (batch_roi_iou > self.cls_bg_thresh) & (\n batch_roi_iou < self.cls_fg_thresh)\n batch_cls_label = tf.where(\n tf.reduce_any([tf.logical_not(valid_mask), invalid_mask], axis=0),\n -1, batch_cls_label)\n\n output_dict = {\n 'sampled_pts':\n tf.reshape(sampled_pts, (-1, self.num_points, 3)),\n 'pts_feature':\n tf.reshape(sampled_features,\n (-1, self.num_points, sampled_features.shape[3])),\n 'cls_label':\n tf.reshape(batch_cls_label, (-1)),\n 'reg_valid_mask':\n tf.reshape(reg_valid_mask, (-1)),\n 'gt_of_rois':\n tf.reshape(batch_gt_of_rois, (-1, 7)),\n 'gt_iou':\n tf.reshape(batch_roi_iou, (-1)),\n 'roi_boxes3d':\n tf.reshape(batch_rois, (-1, 7))\n }\n\n return output_dict\n\n def sample_rois_for_rcnn(self, roi_boxes3d, gt_boxes3d):\n \"\"\"\n\n Args:\n roi_boxes3d: (B, M, 7)\n gt_boxes3d: (B, N, 8) [x, y, z, h, w, l, ry, cls]\n\n Returns:\n batch_rois: (B, N, 7)\n batch_gt_of_rois: (B, N, 8)\n batch_roi_iou: (B, N)\n \"\"\"\n batch_size = roi_boxes3d.shape[0]\n\n fg_rois_per_image = int(np.round(self.fg_ratio * self.roi_per_image))\n\n batch_rois, batch_gt_of_rois, batch_roi_iou = [], [], []\n for idx in range(batch_size):\n cur_roi, cur_gt = roi_boxes3d[idx], gt_boxes3d[idx]\n\n k = cur_gt.__len__() - 1\n while tf.reduce_sum(cur_gt[k]) == 0:\n k -= 1\n cur_gt = cur_gt[:k + 1]\n\n # include gt boxes in the candidate rois\n iou3d = iou_3d(cur_roi.numpy()[:, [0, 1, 2, 5, 3, 4, 6]],\n cur_gt[:,\n 0:7].numpy()[:,\n [0, 1, 2, 5, 3, 4, 6]]) # (M, N)\n iou3d = tf.constant(iou3d)\n\n gt_assignment = tf.argmax(iou3d, axis=1)\n max_overlaps = tf.gather(iou3d, gt_assignment, batch_dims=1)\n\n # sample fg, easy_bg, hard_bg\n fg_thresh = min(self.reg_fg_thresh, self.cls_fg_thresh)\n fg_inds = tf.reshape(tf.where((max_overlaps >= fg_thresh)), (-1))\n\n # TODO: this will mix the fg and bg when CLS_BG_THRESH_LO < iou < CLS_BG_THRESH\n # fg_inds = tf.concat((fg_inds, roi_assignment), axis=0) # consider the roi which has max_iou with gt as fg\n\n easy_bg_inds = tf.reshape(\n tf.where((max_overlaps < self.cls_bg_thresh_lo)), (-1))\n hard_bg_inds = tf.reshape(\n tf.where((max_overlaps < self.cls_bg_thresh) &\n (max_overlaps >= self.cls_bg_thresh_lo)), (-1))\n\n fg_num_rois = len(fg_inds.shape)\n bg_num_rois = len(hard_bg_inds.shape) + len(easy_bg_inds.shape)\n\n if fg_num_rois > 0 and bg_num_rois > 0:\n # sampling fg\n fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)\n\n rand_num = tf.constant(np.random.permutation(fg_num_rois),\n dtype=tf.int64)\n fg_inds = tf.gather(fg_inds, rand_num[:fg_rois_per_this_image])\n\n # sampling bg\n bg_rois_per_this_image = self.roi_per_image - fg_rois_per_this_image\n bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds,\n bg_rois_per_this_image)\n\n elif fg_num_rois > 0 and bg_num_rois == 0:\n # sampling fg\n rand_num = np.floor(\n np.random.rand(self.roi_per_image) * fg_num_rois)\n rand_num = tf.constant(rand_num, dtype=tf.int64)\n fg_inds = fg_inds[rand_num]\n fg_rois_per_this_image = self.roi_per_image\n bg_rois_per_this_image = 0\n elif bg_num_rois > 0 and fg_num_rois == 0:\n # sampling bg\n bg_rois_per_this_image = self.roi_per_image\n bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds,\n bg_rois_per_this_image)\n\n fg_rois_per_this_image = 0\n else:\n import pdb\n pdb.set_trace()\n raise NotImplementedError\n\n # augment the rois by noise\n roi_list, roi_iou_list, roi_gt_list = [], [], []\n if fg_rois_per_this_image > 0:\n fg_rois_src = tf.gather(cur_roi, fg_inds)\n gt_of_fg_rois = tf.gather(cur_gt,\n tf.gather(gt_assignment, fg_inds))\n iou3d_src = tf.gather(max_overlaps, fg_inds)\n fg_rois, fg_iou3d = self.aug_roi_by_noise_torch(\n fg_rois_src,\n gt_of_fg_rois,\n iou3d_src,\n aug_times=self.roi_fg_aug_times)\n roi_list.append(fg_rois)\n roi_iou_list.append(fg_iou3d)\n roi_gt_list.append(gt_of_fg_rois)\n\n if bg_rois_per_this_image > 0:\n bg_rois_src = tf.gather(cur_roi, bg_inds)\n gt_of_bg_rois = tf.gather(cur_gt,\n tf.gather(gt_assignment, bg_inds))\n iou3d_src = tf.gather(max_overlaps, bg_inds)\n aug_times = 1 if self.roi_fg_aug_times > 0 else 0\n bg_rois, bg_iou3d = self.aug_roi_by_noise_torch(\n bg_rois_src, gt_of_bg_rois, iou3d_src, aug_times=aug_times)\n roi_list.append(bg_rois)\n roi_iou_list.append(bg_iou3d)\n roi_gt_list.append(gt_of_bg_rois)\n\n rois = tf.concat(roi_list, axis=0)\n iou_of_rois = tf.concat(roi_iou_list, axis=0)\n gt_of_rois = tf.concat(roi_gt_list, axis=0)\n\n batch_rois.append(rois)\n batch_gt_of_rois.append(gt_of_rois)\n batch_roi_iou.append(iou_of_rois)\n\n return tf.stack(batch_rois), tf.stack(batch_gt_of_rois), tf.stack(\n batch_roi_iou)\n\n def sample_bg_inds(self, hard_bg_inds, easy_bg_inds,\n bg_rois_per_this_image):\n if len(hard_bg_inds.shape) > 0 and len(easy_bg_inds.shape) > 0:\n hard_bg_rois_num = int(bg_rois_per_this_image * self.hard_bg_ratio)\n easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num\n\n # sampling hard bg\n rand_idx = tf.constant(np.random.randint(low=0,\n high=len(\n hard_bg_inds.shape),\n size=(hard_bg_rois_num,)),\n dtype=tf.int64)\n hard_bg_inds = tf.gather(hard_bg_inds, rand_idx)\n\n # sampling easy bg\n rand_idx = tf.constant(np.random.randint(low=0,\n high=len(\n easy_bg_inds.shape),\n size=(easy_bg_rois_num,)),\n dtype=tf.int64)\n easy_bg_inds = tf.gather(easy_bg_inds, rand_idx)\n\n bg_inds = tf.concat([hard_bg_inds, easy_bg_inds], axis=0)\n elif len(hard_bg_inds.shape) > 0 and len(easy_bg_inds.shape) == 0:\n hard_bg_rois_num = bg_rois_per_this_image\n # sampling hard bg\n rand_idx = tf.constant(np.random.randint(low=0,\n high=len(\n hard_bg_inds.shape),\n size=(hard_bg_rois_num,)),\n dtype=tf.int64)\n bg_inds = tf.gather(hard_bg_inds, rand_idx)\n elif len(hard_bg_inds.shape) == 0 and len(easy_bg_inds.shape) > 0:\n easy_bg_rois_num = bg_rois_per_this_image\n # sampling easy bg\n rand_idx = tf.constant(np.random.randint(low=0,\n high=len(\n easy_bg_inds.shape),\n size=(easy_bg_rois_num,)),\n dtype=tf.int64)\n bg_inds = tf.gather(easy_bg_inds, rand_idx)\n else:\n raise NotImplementedError\n\n return bg_inds\n\n def aug_roi_by_noise_torch(self,\n roi_boxes3d,\n gt_boxes3d,\n iou3d_src,\n aug_times=10):\n pos_thresh = min(self.reg_fg_thresh, self.cls_fg_thresh)\n\n aug_boxes = []\n iou_of_rois = []\n for k in range(roi_boxes3d.shape[0]):\n temp_iou = cnt = 0\n roi_box3d = roi_boxes3d[k]\n\n gt_box3d = tf.reshape(gt_boxes3d[k], (1, 7))\n aug_box3d = roi_box3d\n keep = True\n while temp_iou < pos_thresh and cnt < aug_times:\n if np.random.rand() < 0.2:\n aug_box3d = roi_box3d # p=0.2 to keep the original roi box\n keep = True\n else:\n aug_box3d = self.random_aug_box3d(roi_box3d)\n keep = False\n aug_box3d = tf.reshape(aug_box3d, ((1, 7)))\n\n iou3d = iou_3d(aug_box3d.numpy()[:, [0, 1, 2, 5, 3, 4, 6]],\n gt_box3d.numpy()[:, [0, 1, 2, 5, 3, 4, 6]])\n iou3d = tf.constant(iou3d)\n temp_iou = iou3d[0][0]\n cnt += 1\n aug_boxes.append(tf.reshape(aug_box3d, (-1)))\n if cnt == 0 or keep:\n iou_of_rois.append(iou3d_src[k])\n else:\n iou_of_rois.append(temp_iou)\n return tf.stack(aug_boxes), tf.stack(iou_of_rois)\n\n @staticmethod\n def random_aug_box3d(box3d):\n \"\"\"\n Random shift, scale, orientation.\n\n Args:\n box3d: (7) [x, y, z, h, w, l, ry]\n \"\"\"\n # pos_range, hwl_range, angle_range, mean_iou\n range_config = [[0.2, 0.1, np.pi / 12,\n 0.7], [0.3, 0.15, np.pi / 12, 0.6],\n [0.5, 0.15, np.pi / 9,\n 0.5], [0.8, 0.15, np.pi / 6, 0.3],\n [1.0, 0.15, np.pi / 3, 0.2]]\n idx = tf.constant(np.random.randint(low=0,\n high=len(range_config),\n size=(1,))[0],\n dtype=tf.int64)\n\n pos_shift = ((tf.random.uniform(\n (3,)) - 0.5) / 0.5) * range_config[idx][0]\n hwl_scale = ((tf.random.uniform(\n (3,)) - 0.5) / 0.5) * range_config[idx][1] + 1.0\n angle_rot = ((tf.random.uniform(\n (1,)) - 0.5) / 0.5) * range_config[idx][2]\n\n aug_box3d = tf.concat([\n box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale,\n box3d[6:7] + angle_rot\n ],\n axis=0)\n return aug_box3d\n\n def data_augmentation(self, pts, rois, gt_of_rois):\n \"\"\"\n Args:\n pts: (B, M, 512, 3)\n rois: (B, M. 7)\n gt_of_rois: (B, M, 7)\n \"\"\"\n batch_size, boxes_num = pts.shape[0], pts.shape[1]\n\n # rotation augmentation\n angles = (tf.random.uniform(\n (batch_size, boxes_num)) - 0.5 / 0.5) * (np.pi / self.aug_rot_range)\n\n # calculate gt alpha from gt_of_rois\n temp_x, temp_z, temp_ry = gt_of_rois[:, :,\n 0], gt_of_rois[:, :,\n 2], gt_of_rois[:, :,\n 6]\n temp_beta = tf.atan2(temp_z, temp_x)\n gt_alpha = -tf.sign(\n temp_beta) * np.pi / 2 + temp_beta + temp_ry # (B, M)\n\n temp_x, temp_z, temp_ry = rois[:, :, 0], rois[:, :, 2], rois[:, :, 6]\n temp_beta = tf.atan2(temp_z, temp_x)\n roi_alpha = -tf.sign(\n temp_beta) * np.pi / 2 + temp_beta + temp_ry # (B, M)\n\n pts = tf.unstack(pts)\n gt_of_rois = tf.unstack(gt_of_rois)\n rois = tf.unstack(rois)\n for k in range(batch_size):\n pts[k] = rotate_pc_along_y_tf(pts[k], angles[k])\n gt_of_rois[k] = tf.squeeze(rotate_pc_along_y_tf(\n tf.expand_dims(gt_of_rois[k], axis=1), angles[k]),\n axis=1)\n rois[k] = tf.squeeze(rotate_pc_along_y_tf(\n tf.expand_dims(rois[k], axis=1), angles[k]),\n axis=1)\n\n pts = tf.stack(pts)\n gt_of_rois = tf.stack(gt_of_rois)\n rois = tf.stack(rois)\n\n # calculate the ry after rotation\n temp_x, temp_z = gt_of_rois[:, :, :1], gt_of_rois[:, :, 2:3]\n temp_beta = tf.atan2(temp_z, temp_x)\n gt_of_rois = tf.concat([\n gt_of_rois[:, :, :6],\n tf.sign(temp_beta) * np.pi / 2 + tf.expand_dims(gt_alpha, axis=-1) -\n temp_beta\n ],\n axis=2)\n\n temp_x, temp_z = rois[:, :, :1], rois[:, :, 2:3]\n temp_beta = tf.atan2(temp_z, temp_x)\n rois = tf.concat([\n rois[:, :, :6],\n tf.sign(temp_beta) * np.pi / 2 +\n tf.expand_dims(roi_alpha, axis=-1) - temp_beta\n ],\n axis=2)\n\n # scaling augmentation\n scales = 1 + ((tf.random.uniform(\n (batch_size, boxes_num)) - 0.5) / 0.5) * 0.05\n pts = pts * tf.expand_dims(tf.expand_dims(scales, axis=2), axis=3)\n gt_of_rois = tf.concat([\n gt_of_rois[:, :, :6] * tf.expand_dims(scales, axis=2),\n gt_of_rois[:, :, 6:]\n ],\n axis=2)\n rois = tf.concat(\n [rois[:, :, :6] * tf.expand_dims(scales, axis=2), rois[:, :, 6:]],\n axis=2)\n\n # flip augmentation\n flip_flag = tf.sign(tf.random.uniform((batch_size, boxes_num, 1)) - 0.5)\n pts = tf.concat([\n pts[:, :, :, :1] * tf.expand_dims(flip_flag, axis=3), pts[:, :, :,\n 1:]\n ],\n axis=3)\n gt_of_rois = tf.concat(\n [gt_of_rois[:, :, :1] * flip_flag, gt_of_rois[:, :, 1:]], axis=2)\n # flip orientation: ry > 0: pi - ry, ry < 0: -pi - ry\n src_ry = gt_of_rois[:, :, 6:7]\n ry = tf.cast((flip_flag == 1), tf.float32) * src_ry + tf.cast(\n (flip_flag == -1), tf.float32) * (tf.sign(src_ry) * np.pi - src_ry)\n gt_of_rois = tf.concat([gt_of_rois[:, :, :6], ry], axis=2)\n\n rois = tf.concat([rois[:, :, :1] * flip_flag, rois[:, :, 1:]], axis=2)\n # flip orientation: ry > 0: pi - ry, ry < 0: -pi - ry\n src_ry = rois[:, :, 6:7]\n ry = tf.cast((flip_flag == 1), tf.float32) * src_ry + tf.cast(\n (flip_flag == -1), tf.float32) * (tf.sign(src_ry) * np.pi - src_ry)\n rois = tf.concat([rois[:, :, :6], ry], axis=2)\n\n return pts, rois, gt_of_rois\n" ]
[ [ "numpy.logical_xor", "tensorflow.sign", "tensorflow.concat", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.cast", "tensorflow.keras.Sequential", "numpy.round", "numpy.concatenate", "tensorflow.argsort", "tensorflow.where", "numpy.where", "tensorflow.keras.initializers.GlorotNormal", "numpy.arange", "numpy.eye", "tensorflow.floor", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.squeeze", "numpy.sin", "tensorflow.stop_gradient", "tensorflow.gather", "tensorflow.argmax", "numpy.zeros", "tensorflow.logical_not", "tensorflow.norm", "numpy.log", "tensorflow.keras.layers.ReLU", "tensorflow.unstack", "tensorflow.reduce_any", "tensorflow.random.uniform", "tensorflow.atan2", "tensorflow.one_hot", "numpy.random.rand", "numpy.transpose", "tensorflow.optimizers.Adam", "numpy.array", "tensorflow.clip_by_value", "tensorflow.keras.initializers.Constant", "tensorflow.sin", "tensorflow.constant", "tensorflow.cos", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.nn.softmax", "tensorflow.maximum", "tensorflow.keras.layers.Conv1D", "tensorflow.reshape", "tensorflow.sigmoid", "numpy.cos", "tensorflow.expand_dims", "numpy.random.shuffle", "tensorflow.keras.layers.BatchNormalization", "numpy.random.permutation", "tensorflow.keras.layers.Dropout", "tensorflow.keras.initializers.RandomNormal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
jmaces/rde
[ "d71169d697c322695901653fedd2ccf97413f018", "d71169d697c322695901653fedd2ccf97413f018" ]
[ "stl10/script_lime.py", "mnist/data/keras_generators.py" ]
[ "import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom lime.lime_image import LimeImageExplainer\nfrom tqdm import tqdm\n\nimport instances\n\nfrom models import load_model\n\n\n# PARAMETERS\nINDICES = range(0, 7999, 160) # data samples\n\n\nif __name__ == \"__main__\":\n # LOAD MODEL\n model = load_model()\n\n # LOAD INSTANCES\n generator = instances.load_generator()\n for INDEX in tqdm(INDICES, \"data sample\"):\n x = generator[INDEX][0, ...]\n xname = os.path.splitext(os.path.split(generator.filenames[INDEX])[1])[\n 0\n ]\n savedir = os.path.join(\"results\", xname)\n os.makedirs(savedir, exist_ok=True)\n pred = model.predict(np.expand_dims(x, 0))\n node = np.argmax(pred[0, ...])\n\n # RUN LIME METHOD\n explainer = LimeImageExplainer()\n explanations = explainer.explain_instance(x, model.predict,)\n explanation = explanations.local_exp[node]\n seg = explanations.segments\n mapping = np.zeros(x.shape)\n for pos, val in explanation:\n mapping[seg == pos] = val\n mapping = np.reshape(mapping, x.shape)\n\n plt.imsave(\n os.path.join(savedir, \"lime.png\",),\n np.mean(mapping, axis=-1),\n cmap=\"RdBu_r\" if mapping.min() < 0 else \"Reds\",\n vmin=-np.abs(mapping).max() if mapping.min() < 0 else 0.0,\n vmax=np.abs(mapping).max(),\n format=\"png\",\n )\n np.savez_compressed(\n os.path.join(savedir, \"lime.npz\",),\n **{\n \"mapping\": mapping,\n \"method\": \"lime\",\n \"index\": INDEX,\n \"node\": node,\n \"prediction\": pred,\n }\n )\n", "import os\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\n# default parameters\nif K.image_data_format() == \"channels_first\":\n INPUT_SHAPE = (1, 28, 28)\n IMAGE_SHAPE = INPUT_SHAPE[1:]\nelse:\n INPUT_SHAPE = (28, 28, 1)\n IMAGE_SHAPE = INPUT_SHAPE[:-1]\n\nBATCH_SIZE_STAT = 10000 # batch size for statistics calculations\nBATCH_SIZE_TRAIN = 128 # batch size for model training\nBATCH_SIZE_TEST = 1 # batch size for model evaluation\n\n\n# loader functions\ndef load_train_data(\n batch_size=BATCH_SIZE_STAT,\n class_mode=\"categorical\",\n rescale=None,\n shuffle=False,\n samplewise_center=True,\n):\n train_data_preprocessor = ImageDataGenerator(\n preprocessing_function=None,\n samplewise_center=samplewise_center,\n horizontal_flip=False,\n rescale=rescale,\n )\n train_generator = train_data_preprocessor.flow_from_directory(\n os.path.join(os.path.split(__file__)[0], \"training\"),\n target_size=IMAGE_SHAPE,\n batch_size=batch_size,\n class_mode=class_mode,\n color_mode=\"grayscale\",\n shuffle=shuffle,\n )\n return train_generator\n\n\ndef load_train_data_augmented(\n batch_size=BATCH_SIZE_TRAIN,\n class_mode=\"categorical\",\n rescale=None,\n shuffle=True,\n samplewise_center=True,\n):\n train_data_preprocessor = ImageDataGenerator(\n preprocessing_function=None,\n samplewise_center=samplewise_center,\n rotation_range=5,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range=0.05,\n zoom_range=0.05,\n horizontal_flip=False,\n rescale=rescale,\n )\n train_generator = train_data_preprocessor.flow_from_directory(\n os.path.join(os.path.split(__file__)[0], \"training\"),\n target_size=IMAGE_SHAPE,\n batch_size=batch_size,\n class_mode=class_mode,\n color_mode=\"grayscale\",\n shuffle=shuffle,\n )\n return train_generator\n\n\ndef load_val_data(\n batch_size=BATCH_SIZE_TRAIN,\n class_mode=\"categorical\",\n rescale=None,\n shuffle=False,\n samplewise_center=True,\n):\n val_data_preprocessor = ImageDataGenerator(\n preprocessing_function=None,\n samplewise_center=samplewise_center,\n rescale=rescale,\n )\n val_generator = val_data_preprocessor.flow_from_directory(\n os.path.join(os.path.split(__file__)[0], \"validation\"),\n target_size=IMAGE_SHAPE,\n batch_size=batch_size,\n class_mode=class_mode,\n color_mode=\"grayscale\",\n shuffle=shuffle,\n )\n return val_generator\n\n\ndef load_test_data(\n batch_size=BATCH_SIZE_TEST,\n class_mode=None,\n rescale=None,\n shuffle=False,\n samplewise_center=True,\n):\n test_data_preprocessor = ImageDataGenerator(\n preprocessing_function=None,\n samplewise_center=samplewise_center,\n rescale=rescale,\n )\n test_generator = test_data_preprocessor.flow_from_directory(\n os.path.join(os.path.split(__file__)[0], \"testing\"),\n target_size=IMAGE_SHAPE,\n batch_size=batch_size,\n class_mode=class_mode,\n color_mode=\"grayscale\",\n shuffle=shuffle,\n )\n return test_generator\n" ]
[ [ "numpy.expand_dims", "numpy.abs", "numpy.reshape", "numpy.argmax", "numpy.mean", "numpy.zeros" ], [ "tensorflow.keras.preprocessing.image.ImageDataGenerator", "tensorflow.keras.backend.image_data_format" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
reliapy/reliapy
[ "3efd48af5cc3bedbcbc5de64fb43e6c5625e3f6d", "3efd48af5cc3bedbcbc5de64fb43e6c5625e3f6d", "3efd48af5cc3bedbcbc5de64fb43e6c5625e3f6d" ]
[ "src/reliapy/distributions/continuous/_exponnorm.py", "src/reliapy/distributions/continuous/_chi.py", "src/reliapy/distributions/continuous/_erlang.py" ]
[ "from reliapy.distributions.continuous import _Continuous\nfrom scipy.stats import exponnorm as prob\n\n\nclass ExponNorm(_Continuous):\n\n def __init__(self, K=None, loc=None, scale=None, random_state=None):\n self.K = K\n self.loc = loc\n self.scale = scale\n self.stats = prob.stats(K=self.K, loc=self.loc, scale=self.scale, moments='mv')\n self.random_state = random_state\n\n super().__init__()\n\n def pdf(self, X=None):\n \"\"\"\n PDF.\n\n **Input:**\n * **X** (`float`)\n Argument.\n\n **Output**\n PDF of X.\n \"\"\"\n return prob.pdf(X, K=self.K, loc=self.loc, scale=self.scale)\n\n def cdf(self, X=None):\n \"\"\"\n CDF.\n\n **Input:**\n * **X** (`float`)\n Argument.\n\n **Output**\n CDF of X.\n \"\"\"\n return prob.cdf(X, K=self.K, loc=self.loc, scale=self.scale)\n\n def icdf(self, y=None):\n \"\"\"\n Inverse CDF.\n\n **Input:**\n * **X** (`float`)\n Argument.\n\n **Output**\n Inverse CDF of X.\n \"\"\"\n return prob.ppf(y, K=self.K, loc=self.loc, scale=self.scale)\n\n def moment(self, n=1):\n \"\"\"\n Get the non-central moments of order n.\n\n **Input:**\n * **n** (`float`)\n Order of the moment.\n\n **Output**\n non central moment.\n \"\"\"\n return prob.moment(n, K=self.K, loc=self.loc, scale=self.scale)\n\n def rvs(self, n_sim=1):\n \"\"\"\n Get `n_sim` random samples.\n\n **Input:**\n * **n_sim** (`float`)\n Number of random samples.\n\n **Output**\n Samples.\n \"\"\"\n return prob.rvs(K=self.K, loc=self.loc, scale=self.scale, size=n_sim, random_state=self.random_state)\n", "from reliapy.distributions.continuous import _Continuous\nfrom scipy.stats import chi as prob\n\n\nclass Chi(_Continuous):\n\n def __init__(self, df=None, loc=None, scale=None, random_state=None):\n self.df = df\n self.loc = loc\n self.scale = scale\n self.stats = prob.stats(df=self.df, loc=self.loc, scale=self.scale, moments='mv')\n self.random_state = random_state\n\n super().__init__()\n\n def pdf(self, X=None):\n \"\"\"\n PDF.\n\n **Input:**\n * **X** (`float`)\n Argument.\n\n **Output**\n PDF of X.\n \"\"\"\n return prob.pdf(X, df=self.df, loc=self.loc, scale=self.scale)\n\n def cdf(self, X=None):\n \"\"\"\n CDF.\n\n **Input:**\n * **X** (`float`)\n Argument.\n\n **Output**\n CDF of X.\n \"\"\"\n return prob.cdf(X, df=self.df, loc=self.loc, scale=self.scale)\n\n def icdf(self, y=None):\n \"\"\"\n Inverse CDF.\n\n **Input:**\n * **X** (`float`)\n Argument.\n\n **Output**\n Inverse CDF of X.\n \"\"\"\n return prob.ppf(y, df=self.df, loc=self.loc, scale=self.scale)\n\n def moment(self, n=1):\n \"\"\"\n Get the non-central moments of order n.\n\n **Input:**\n * **n** (`float`)\n Order of the moment.\n\n **Output**\n non central moment.\n \"\"\"\n return prob.moment(n, df=self.df, loc=self.loc, scale=self.scale)\n\n def rvs(self, n_sim=1):\n \"\"\"\n Get `n_sim` random samples.\n\n **Input:**\n * **n_sim** (`float`)\n Number of random samples.\n\n **Output**\n Samples.\n \"\"\"\n return prob.rvs(df=self.df, loc=self.loc, scale=self.scale, size=n_sim, random_state=self.random_state)\n", "from reliapy.distributions.continuous import _Continuous\nfrom scipy.stats import erlang as prob\n\n\nclass Erlang(_Continuous):\n\n def __init__(self, a=None, loc=None, scale=None, random_state=None):\n self.a = a\n self.loc = loc\n self.scale = scale\n self.stats = prob.stats(a=self.a, loc=self.loc, scale=self.scale, moments='mv')\n self.random_state = random_state\n\n super().__init__()\n\n def pdf(self, X=None):\n \"\"\"\n PDF.\n\n **Input:**\n * **X** (`float`)\n Argument.\n\n **Output**\n PDF of X.\n \"\"\"\n return prob.pdf(X, a=self.a, loc=self.loc, scale=self.scale)\n\n def cdf(self, X=None):\n \"\"\"\n CDF.\n\n **Input:**\n * **X** (`float`)\n Argument.\n\n **Output**\n CDF of X.\n \"\"\"\n return prob.cdf(X, a=self.a, loc=self.loc, scale=self.scale)\n\n def icdf(self, y=None):\n \"\"\"\n Inverse CDF.\n\n **Input:**\n * **X** (`float`)\n Argument.\n\n **Output**\n Inverse CDF of X.\n \"\"\"\n return prob.ppf(y, a=self.a, loc=self.loc, scale=self.scale)\n\n def moment(self, n=1):\n \"\"\"\n Get the non-central moments of order n.\n\n **Input:**\n * **n** (`float`)\n Order of the moment.\n\n **Output**\n non central moment.\n \"\"\"\n return prob.moment(n, a=self.a, loc=self.loc, scale=self.scale)\n\n def rvs(self, n_sim=1):\n \"\"\"\n Get `n_sim` random samples.\n\n **Input:**\n * **n_sim** (`float`)\n Number of random samples.\n\n **Output**\n Samples.\n \"\"\"\n return prob.rvs(a=self.a, loc=self.loc, scale=self.scale, size=n_sim, random_state=self.random_state)\n" ]
[ [ "scipy.stats.exponnorm.cdf", "scipy.stats.exponnorm.pdf", "scipy.stats.exponnorm.stats", "scipy.stats.exponnorm.ppf", "scipy.stats.exponnorm.rvs", "scipy.stats.exponnorm.moment" ], [ "scipy.stats.chi.pdf", "scipy.stats.chi.cdf", "scipy.stats.chi.stats", "scipy.stats.chi.moment", "scipy.stats.chi.ppf", "scipy.stats.chi.rvs" ], [ "scipy.stats.erlang.cdf", "scipy.stats.erlang.rvs", "scipy.stats.erlang.stats", "scipy.stats.erlang.pdf", "scipy.stats.erlang.moment", "scipy.stats.erlang.ppf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fkwai/geolearn
[ "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0", "30cb4353d22af5020a48100d07ab04f465a315b0" ]
[ "app/paper/presentation/temp.py", "app/wqFull/legacy/temp.py", "hydroDL/model/layers.py", "app/waterQual/newSite/prep/countSitePlot.py", "app/waterQual/model/HBN_CQ_click.py", "hydroDL/data/usgs/read.py", "app/waterQual/WRTDS-L5/geoCorr.py", "app/streamflow/regional/box_region_lev0.py", "app/region/ecoRegion.py", "app/paper/paperSigma/sigmabin_HUC.py", "app/wqFull/paper/multiModel.py", "app/waterQual/model/basinRef_single.py", "app/waterQual/30yr/reason/attr_single.py", "app/closeLoop/ForcastDiffFactor.py", "app/waterQual/30yr/reason/box_region.py", "app/waterQual/CQ/All/geoWRTDS_eco.py", "app/waterQual/legacy/silica/linear.py", "app/closeLoop/DigTime.py" ]
[ "from scipy.stats import invgamma\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\nx = np.linspace(invgamma.ppf(0.01, a),\n invgamma.ppf(0.99, a), 100)\nrv = invgamma(a)\nfig, ax = plt.subplots(1, 1)\n\nax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')\n", "import numpy as np\nimport pandas as pd\nfrom hydroDL import kPath\nfrom hydroDL.data import usgs, gageII, gridMET, ntn, transform, GLASS\nfrom hydroDL.app import waterQuality\nimport matplotlib.pyplot as plt\nfrom hydroDL.post import axplot, figplot\n\nsiteNo = '10343500'\ndfV = GLASS.readBasin(siteNo)\n\nvarF = gridMET.varLst+ntn.varLst+['distNTN']\nvarC = usgs.varC\nvarQ = usgs.varQ\nvarG = gageII.lstWaterQuality\nvarLst = varQ+varC+varF\ndf = waterQuality.readSiteTS(siteNo, varLst=varLst, freq='D')\ndf = df.join(dfV)\n\npVar = ['00915', 'NPP', '00060']\nfig, axes = plt.subplots(len(pVar), 1)\nfor k, var in enumerate(pVar):\n axplot.plotTS(axes[k], df.index, df[var])\nfig.show()\n\n\n# interpolation of R\nvar = 'NPP'\nsdStr = '1982-01-01'\nedStr = '2018-12-31'\ntR = pd.date_range(np.datetime64(sdStr), np.datetime64(edStr))\ndfVP = pd.DataFrame({'date': tR}).set_index('date').join(df[var])\ndfVP = dfVP.interpolate(method='cubicspline')\nfig, ax = plt.subplots(1, 1)\naxplot.plotTS(ax, dfV.index, dfV[var], styLst='*', cLst='r')\naxplot.plotTS(ax, dfVP.index, dfVP[var], styLst='-', cLst='b')\nfig.show()\n\nfig, ax = plt.subplots(1, 1)\nax2 = ax.twinx()\nax.plot(df.index, df['00915'], '*r')\nax2.plot(dfVP.index, dfVP['NPP'],'-b')\nfig.show()\n", "import math\nimport torch\nfrom torch import nn, Tensor\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model: int, dropout: float = 0.5, max_len: int = 20000):\n super().__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n position = torch.arange(max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2)\n * (-math.log(3650.0) / d_model))\n pe = torch.zeros(max_len, 1, d_model)\n pe[:, 0, 0::2] = torch.sin(position * div_term)\n pe[:, 0, 1::2] = torch.cos(position * div_term)\n self.register_buffer('pe', pe)\n\n def forward(self, x: Tensor) -> Tensor:\n x = x + self.pe[:x.size(0)]\n return self.dropout(x)\n", "from hydroDL import kPath, utils\nfrom hydroDL.app import waterQuality\nfrom hydroDL.data import gageII, usgs\nfrom hydroDL.master import basins\nfrom hydroDL.post import axplot, figplot\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\nimport time\nimport scipy\nimport json\n\n# all gages\ndirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')\nfileSiteNo = os.path.join(dirInv, 'siteNoLst-1979')\nsiteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()\ncodeLst = sorted(usgs.codeLst)\ncountMatD = np.load(os.path.join(dirInv, 'matCountDaily.npy'))\ncountMatW = np.load(os.path.join(dirInv, 'matCountWeekly.npy'))\n\n\nny = 3\nnsLst = np.arange(5, 20)*ny\n# nsLst = [20, 24, 28, 32, 36, 40, 44, 45,\n# 46, 47, 48, 52, 56, 60, 64, 68, 72, 76]\noutMat = np.ndarray([len(codeLst), len(nsLst)])\nfor i, code in enumerate(codeLst):\n ic = codeLst.index(code)\n count = np.sum(countMatW[:, -ny:, ic], axis=1)\n for j, ns in enumerate(nsLst):\n outMat[i, j] = np.sum(count >= ns)\n\n# plot\nfig, ax = plt.subplots(1, 1, figsize=(6, 6))\naxplot.plotHeatMap(ax, outMat, labLst=[codeLst, nsLst])\nfig.show()\n", "from hydroDL.master import basins\nfrom hydroDL.app import waterQuality, relaCQ\nfrom hydroDL import kPath\nfrom hydroDL.model import trainTS\nfrom hydroDL.data import gageII, usgs\nfrom hydroDL.post import axplot, figplot\n\nimport torch\nimport os\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nwqData = waterQuality.DataModelWQ('HBN')\n\n\noutLst = ['HBN-first50-opt1', 'HBN-first50-opt2']\ntrainSet = 'first50'\ntestSet = 'last50'\npLst1, pLst2, errMatLst1, errMatLst2 = [list() for x in range(4)]\nfor outName in outLst:\n p1, o1 = basins.testModel(outName, trainSet, wqData=wqData)\n p2, o2 = basins.testModel(outName, testSet, wqData=wqData)\n errMat1 = wqData.errBySite(p1, subset=trainSet)\n errMat2 = wqData.errBySite(p2, subset=testSet)\n pLst1.append(p1)\n pLst2.append(p2)\n errMatLst1.append(errMat1)\n errMatLst2.append(errMat2)\n\n# plot\ncodeSel = ['00955', '00940', '00915']\n# codeSel = ['00600', '00605', '00405']\nsiteNoLst = wqData.info['siteNo'].unique().tolist()\ndfCrd = gageII.readData(\n varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)\nlat = dfCrd['LAT_GAGE'].values\nlon = dfCrd['LNG_GAGE'].values\nicLst = [wqData.varC.index(code) for code in codeSel]\ncodePdf = usgs.codePdf\npdfArea = gageII.readData(varLst=['DRAIN_SQKM'], siteNoLst=siteNoLst)\nunitConv = 0.3048**3*365*24*60*60/1000**2\n\n\ndef funcMap():\n figM, axM = plt.subplots(len(codeSel), 1, figsize=(8, 6))\n for k, code in enumerate(codeSel):\n ic = wqData.varC.index(code)\n shortName = codePdf.loc[code]['shortName']\n title = 'correlation of {} {}'.format(shortName, code)\n axplot.mapPoint(axM[k], lat, lon,\n errMat2[:, ic, 1], s=12, title=title)\n figP, axP = plt.subplots(len(codeSel), 3, figsize=(8, 6))\n return figM, axM, figP, axP, lon, lat\n\n\ndef funcPoint(iP, axP):\n siteNo = siteNoLst[iP]\n info1 = wqData.subsetInfo(trainSet)\n info2 = wqData.subsetInfo(testSet)\n ind1 = info1[info1['siteNo'] == siteNo].index\n ind2 = info2[info2['siteNo'] == siteNo].index\n # cp = np.concatenate([p1[ind1], p2[ind2]])\n # ct = np.concatenate([o1[ind1], o2[ind2]])\n # q = wqData.q[-1, np.concatenate([ind1, ind2]), 0]\n cLst = [o2[ind2]]+[p[ind2] for p in pLst2]\n area = pdfArea.loc[siteNo]['DRAIN_SQKM']\n q = wqData.q[-1, ind2, 0]/area*unitConv\n\n x = 10**np.linspace(np.log10(np.min(q[q > 0])),\n np.log10(np.max(q[~np.isnan(q)])), 20)\n for k, code in enumerate(codeSel):\n ic = wqData.varC.index(code)\n for j, c, s in zip([0, 1, 2], cLst, ['obs', 'op1', 'opt2']):\n sa, sb, ys = relaCQ.slopeModel(q, c[:, ic], x)\n ceq, dw, yk = relaCQ.kateModel(q, c[:, ic], x)\n title = '{} [{:.2f},{:.2f}], [{:.2f},{:.2f}]'.format(\n s, sa, sb, ceq, dw)\n axP[k, j].plot(np.log10(q), c[:, ic], '*k', label='obs')\n axP[k, j].plot(np.log10(x), ys, '-b', label='slope')\n axP[k, j].plot(np.log10(x), yk, '-r', label='kate')\n axP[k, j].set_title(title)\n # axP[k, j].set_xticks([])\n\n\nfigplot.clickMap(funcMap, funcPoint)\n", "import os\nimport pandas as pd\nimport numpy as np\nfrom hydroDL import kPath\n\n# fileName = r'C:\\Users\\geofk\\work\\waterQuality\\USGS\\dailyTS\\08075400'\n\n__all__ = ['readSample', 'readStreamflow', 'readUsgsText', 'removeFlag']\n\n\ndef readSample(siteNo, codeLst, startDate=None, csv=True, flag=0):\n \"\"\"read USGS sample data, did:\n 1. extract data of interested code and date\n 2. average repeated daily observation\n Arguments:\n siteNo {str} -- site number\n Keyword Arguments:\n codeLst {list} -- usgs code of interesting fields (default: {sampleCodeLst})\n startDate {date} -- start date (default: {None})\n flag {int} -- 0 no flag; 1 str flag; 2 num flag \n Returns:\n pandas.DataFrame -- [description]\n \"\"\"\n if csv is False:\n fileC = os.path.join(kPath.dirData, 'USGS', 'sample', siteNo)\n dfC = readUsgsText(fileC, dataType='sample')\n if startDate is not None:\n dfC = dfC[dfC['date'] >= startDate]\n dfC = dfC.set_index('date')\n codeSel = list(set(codeLst) & set(dfC.columns.tolist()))\n codeSel_cd = [code + '_cd' for code in codeSel]\n dfC = dfC[codeSel+codeSel_cd].dropna(how='all')\n if len(dfC) == 0:\n return None if flag == 0 else (None, None)\n dfC1 = dfC[codeSel]\n dfC2 = dfC[codeSel_cd]\n bx = dfC1.notna().values & dfC2.isna().values\n dfC2[bx] = 'x'\n dfC2 = dfC2.fillna('')\n bDup = dfC.index.duplicated(keep=False)\n indUni = dfC.index[~bDup]\n indDup = dfC.index[bDup].unique()\n indAll = dfC.index.unique()\n dfO1 = pd.DataFrame(index=indAll, columns=codeSel)\n dfO2 = pd.DataFrame(index=indAll, columns=codeSel_cd)\n dfO1.loc[indUni] = dfC1.loc[indUni][codeSel]\n dfO2.loc[indUni] = dfC2.loc[indUni][codeSel_cd]\n for ind in indDup:\n temp1 = dfC1.loc[ind]\n temp2 = dfC2.loc[ind]\n for code in codeSel:\n if 'x' in temp2[code+'_cd'].tolist():\n dfO1.loc[ind][code] = temp1[code][temp2[code+'_cd']\n == 'x'].mean()\n if temp2[code+'_cd'].tolist().count('x') > 1:\n dfO2.loc[ind][code+'_cd'] = 'X'\n else:\n dfO2.loc[ind][code+'_cd'] = 'x'\n else:\n dfO1.loc[ind][code] = temp1[code].mean()\n dfO2.loc[ind][code+'_cd'] = ''.join(temp2[code+'_cd'])\n else:\n dirC = os.path.join(kPath.dirData, 'USGS', 'sample', 'csv')\n fileC1 = os.path.join(dirC, siteNo)\n if not os.path.exists(fileC1):\n return None if flag == 0 else (None, None)\n dfO1 = pd.read_csv(fileC1)\n dfO1['date'] = pd.to_datetime(dfO1['date'], format='%Y-%m-%d')\n dfO1 = dfO1.set_index('date')\n if flag > 0:\n fileC2 = os.path.join(dirC, siteNo+'_flag')\n dfO2 = pd.read_csv(fileC2)\n dfO2['date'] = pd.to_datetime(dfO2['date'], format='%Y-%m-%d')\n dfO2 = dfO2.set_index('date')\n if startDate is not None:\n dfO1 = dfO1[dfO1.index >= startDate]\n if flag > 0:\n dfO2 = dfO2[dfO2.index >= startDate]\n if flag > 0:\n if flag == 2:\n dfO3 = pd.DataFrame(\n index=dfO2.index, columns=dfO2.columns, dtype=int)\n dfO3[(dfO2 == 'x') | (dfO2 == 'X')] = 0\n dfO3[(dfO2 != 'x') & (dfO2 != 'X') & (dfO2.notna())] = 1\n dfO2 = dfO3\n codeLst_cd = [code + '_cd' for code in codeLst]\n return (dfO1.reindex(columns=codeLst), dfO2.reindex(columns=codeLst_cd))\n else:\n return dfO1.reindex(columns=codeLst)\n\n\ndef readStreamflow(siteNo, startDate=None, csv=True):\n \"\"\"read USGS streamflow (00060) data, did:\n 1. fill missing average observation (00060_00003) by available max and min.\n Arguments:\n siteNo {str} -- site number\n Keyword Arguments:\n startDate {date} -- start date (default: {None})\n Returns:\n pandas.DataFrame -- [description]\n \"\"\"\n if csv is False:\n fileQ = os.path.join(kPath.dirData, 'USGS', 'streamflow', siteNo)\n dfQ = readUsgsText(fileQ, dataType='streamflow')\n if dfQ is None:\n return None\n if startDate is not None:\n dfQ = dfQ[dfQ['date'] >= startDate]\n if '00060_00001' in dfQ.columns and '00060_00002' in dfQ.columns:\n # fill nan using other two fields\n avgQ = dfQ[['00060_00001', '00060_00002']].mean(\n axis=1, skipna=False)\n dfQ['00060_00003'] = dfQ['00060_00003'].fillna(avgQ)\n dfQ = dfQ[['date', '00060_00003']]\n else:\n dfQ = dfQ[['date', '00060_00003']]\n else:\n fileQ = os.path.join(kPath.dirData, 'USGS',\n 'streamflow', 'csv', siteNo)\n dfQ = pd.read_csv(fileQ)\n dfQ['date'] = pd.to_datetime(dfQ['date'], format='%Y-%m-%d')\n if startDate is not None:\n dfQ = dfQ[dfQ['date'] >= startDate]\n return dfQ.set_index('date')\n\n\ndef readUsgsText(fileName, dataType=None):\n \"\"\"read usgs text file, rename head for given dataType\n Arguments:\n fileName {str} -- file name\n Keyword Arguments:\n dataType {str} -- dailyTS, streamflow or sample (default: {None})\n \"\"\"\n with open(fileName) as f:\n k = 0\n line = f.readline()\n while line[0] == \"#\":\n line = f.readline()\n k = k + 1\n headLst = line[:-1].split('\\t')\n typeLst = f.readline()[:-1].split('\\t')\n if k == 0:\n return None\n\n pdf = pd.read_table(fileName, header=k, dtype=str).drop(0)\n for i, x in enumerate(typeLst):\n if x[-1] == 'n':\n pdf[headLst[i]] = pd.to_numeric(pdf[headLst[i]], errors='coerce')\n if x[-1] == 'd':\n pdf[headLst[i]] = pd.to_datetime(pdf[headLst[i]], errors='coerce')\n # modify - only rename head or add columns, will not modify values\n if dataType == 'dailyTS':\n out = renameDailyTS(pdf)\n elif dataType == 'sample':\n out = renameSample(pdf)\n elif dataType == 'streamflow':\n out = renameStreamflow(pdf)\n else:\n out = pdf\n return out\n\n\ndef renameDailyTS(pdf):\n # rename observation fields\n headLst = pdf.columns.tolist()\n for i, head in enumerate(headLst):\n temp = head.split('_')\n if temp[0].isdigit():\n if len(temp) == 3:\n headLst[i] = temp[1] + '_' + temp[2]\n pdf[head] = pdf[head].astype(np.float)\n else:\n headLst[i] = temp[1] + '_' + temp[2] + '_cd'\n pdf.columns = headLst\n # time field\n pdf['date'] = pd.to_datetime(pdf['datetime'], format='%Y-%m-%d')\n return pdf\n\n\ndef renameStreamflow(pdf):\n # pick the longest average Q field\n headLst = pdf.columns.tolist()\n tempS = [head.split('_') for head in headLst if head[-1].isdigit()]\n codeLst = list(set([int(s[0])-int(s[2]) for s in tempS]))\n tempN = list()\n for code in codeLst:\n for k in range(3):\n head = '{}_00060_{:05n}'.format(code+k+1, k+1)\n if head not in headLst:\n pdf[head] = np.nan\n pdf[head+'_cd'] = 'N'\n tempLst = ['{}_00060_{:05n}'.format(code+k+1, k+1) for k in range(3)]\n temp = ((~pdf[tempLst[0]].isna()) & (~pdf[tempLst[1]].isna())) | (\n ~pdf[tempLst[2]].isna())\n tempN.append(temp.sum())\n code = codeLst[tempN.index(max(tempN))]\n # (searched and no code of leading zero)\n pdf = pdf.rename(columns={'{}_00060_{:05n}'.format(\n code+x+1, x+1): '00060_{:05n}'.format(x+1) for x in range(3)})\n pdf = pdf.rename(columns={'{}_00060_{:05n}_cd'.format(\n code+x+1, x+1): '00060_{:05n}_cd'.format(x+1) for x in range(3)})\n\n # time field\n pdf['date'] = pd.to_datetime(pdf['datetime'], format='%Y-%m-%d')\n return pdf\n\n\ndef renameSample(pdf):\n # rename observation fields\n headLst = pdf.columns.tolist()\n for i, head in enumerate(headLst):\n if head[1:].isdigit():\n if head.startswith('p'):\n headLst[i] = head[1:]\n pdf[head] = pdf[head].astype(np.float)\n else:\n headLst[i] = head[1:] + '_cd'\n pdf.columns = headLst\n # time field - not work for nan time, use date for current\n # temp = pdf['sample_dt'] + ' ' + pdf['sample_tm']\n # pdf['datetime'] = pd.to_datetime(temp, format='%Y-%m-%d %H:%M')\n pdf['date'] = pd.to_datetime(pdf['sample_dt'], format='%Y-%m-%d')\n return pdf\n\n\ndef removeFlag(dfC, dfCF):\n codeLstF = dfCF.columns.tolist()\n codeLst = [code[:5] for code in codeLstF]\n dfOut = dfC.copy()\n data = dfC[codeLst].values\n dataF = dfCF[codeLstF].values\n data[dataF == 1] = np.nan\n dfOut[codeLst] = data\n return dfOut\n", "from hydroDL import kPath, utils\nfrom hydroDL.app import waterQuality\nfrom hydroDL.master import basins\nfrom hydroDL.data import usgs, gageII, gridMET, ntn\nfrom hydroDL.master import slurm\nfrom hydroDL.post import axplot, figplot\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport json\n\n# load WRTDS results\ndirRoot1 = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS_weekly')\ndirRoot2 = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS_weekly_rmq')\n\ncode = '00955'\ndfRes1 = pd.read_csv(os.path.join(dirRoot1, 'result', code), dtype={\n 'siteNo': str}).set_index('siteNo')\ndfRes2 = pd.read_csv(os.path.join(dirRoot2, 'result', code), dtype={\n 'siteNo': str}).set_index('siteNo')\n\n# dfRes1[dfRes1 == -9999] = np.nan\ndfGeo = gageII.readData(siteNoLst=dfRes1.index.tolist())\ndfGeo = gageII.updateCode(dfGeo)\n\n# select sites\nnS = 200\ndfR1 = dfRes1[dfRes1['count'] > nS]\nsiteNoLst = dfR1.index.tolist()\ndfR2 = dfRes2.loc[siteNoLst]\ndfG = dfGeo.loc[siteNoLst]\n\n\n\n\nvarGLst = dfG.columns.tolist()\ndfRsq = pd.DataFrame(index=varGLst, columns=['Rsq1', 'Rsq2'])\nfor varG in varGLst:\n x = dfG[varG].values\n y1 = dfR1['corr'].values\n y2 = dfR1['corr'].values\n (xx, yy1, yy2), _ = utils.rmNan([x, y1, y2])\n r1 = np.corrcoef(xx, yy1)[0, 1]\n dfRsq.at[varG, 'Rsq1'] = r1**2\n r2 = np.corrcoef(xx, yy2)[0, 1]\n dfRsq.at[varG, 'Rsq2'] = r2**2\n\ndfRsq.to_csv('temp')\ndfRsq.sort_values('Rsq1', ascending=False)\n\n# varG = 'SLOPE_PCT'\nvarG = 'HLR_BAS_PCT_100M'\nx = dfG[varG].values\ny = dfR1['corr'].values\nx[x < -900] = np.nan\nfig, ax = plt.subplots(1, 1)\nax.plot(x, y, '*')\nax.set_xlabel(varG)\nax.set_ylabel('WRTDS corr')\nfig.show()\n\n# map\nlat = dfG['LAT_GAGE'].values\nlon = dfG['LNG_GAGE'].values\nfigM, axM = plt.subplots(1, 2, figsize=(12, 4))\naxplot.mapPoint(axM[0], lat, lon, x, s=16)\naxplot.mapPoint(axM[1], lat, lon, y, vRange=[0, 1], s=16)\nshortName = usgs.codePdf.loc[code]['shortName']\naxM[0].set_title(varG)\naxM[1].set_title('WRTDS corr, {}'.format(shortName))\nfigM.show()\n", "import matplotlib.pyplot as plt\nfrom hydroDL.post import axplot, figplot\nimport scipy\nfrom hydroDL.data import dbBasin\nfrom hydroDL.master import basinFull\nimport os\nimport pandas as pd\nfrom hydroDL import kPath, utils\nimport importlib\nimport time\nimport numpy as np\nfrom hydroDL.data import usgs, gageII, gridMET, ntn, transform\n\n\ncaseLst = ['08', '09']\n\n\ndataName = 'Q90'\ndm = dbBasin.DataModelFull(dataName)\noutName = '{}-B10'.format(dataName)\nyP, ycP = basinFull.testModel(\n outName, DM=dm, batchSize=20, testSet='all')\nyO, ycO = basinFull.getObs(outName, 'all', DM=dm)\nindT = np.where(dm.t == np.datetime64('2010-01-01'))[0][0]\nnash0 = utils.stat.calNash(yP[indT:, :, 0], yO[indT:, :, 0])\nrmse0 = utils.stat.calRmse(yP[indT:, :, 0], yO[indT:, :, 0])\ncorr0 = utils.stat.calCorr(yP[indT:, :, 0], yO[indT:, :, 0])\nbias0 = utils.stat.calBias(yP[indT:, :, 0], yO[indT:, :, 0])\n\nnashLst = list()\nrmseLst = list()\ncorrLst = list()\nbiasLst = list()\n\n\nfor case in caseLst:\n testSet = 'Eco'+case\n nashLstTemp = list()\n rmseLstTemp = list()\n corrLstTemp = list()\n biasLstTemp = list()\n\n # global model\n indS = [dm.siteNoLst.index(siteNo) for siteNo in dm.subset[testSet]]\n nashLstTemp.append(nash0[indS])\n rmseLstTemp.append(rmse0[indS])\n corrLstTemp.append(corr0[indS])\n biasLstTemp.append(bias0[indS])\n\n trainLst = [case[:2]]\n outLst = ['{}-Eco{}-B10-gs'.format(dataName, x)\n for x in trainLst]\n for outName in outLst:\n yP, ycP = basinFull.testModel(\n outName, DM=dm, batchSize=20, testSet=testSet)\n yO, ycO = basinFull.getObs(outName, testSet, DM=dm)\n nash2 = utils.stat.calNash(yP[indT:, :, 0], yO[indT:, :, 0])\n rmse2 = utils.stat.calRmse(yP[indT:, :, 0], yO[indT:, :, 0])\n corr2 = utils.stat.calCorr(yP[indT:, :, 0], yO[indT:, :, 0])\n bias2 = utils.stat.calBias(yP[indT:, :, 0], yO[indT:, :, 0])\n nashLstTemp.append(nash2)\n rmseLstTemp.append(rmse2)\n corrLstTemp.append(corr2)\n biasLstTemp.append(bias2)\n nashLst.append(nashLstTemp)\n rmseLst.append(rmseLstTemp)\n corrLst.append(corrLstTemp)\n biasLst.append(biasLstTemp)\n\n\n# plot box\nlabel1 = caseLst\nlabel2 = ['CONUS', 'lev0']\ndataBox = rmseLst\nfig = figplot.boxPlot(dataBox, widths=0.5, cLst='brgk', label1=label1,\n label2=label2, figsize=(6, 4))\nfig.show()\n", "# initial\nfrom hydroDL import pathSMAP, master\nimport os\nfrom hydroDL.data import dbCsv\nfrom hydroDL.post import plot, stat\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nsubsetLst = ['ecoRegion{0:0>2}_v2f1'.format(x) for x in range(1, 18)]\ncaseLst1 = ['Local', 'CONUS']\ncaseLst2 = ['Forcing', 'Soilm']\nsaveFolder = os.path.join(pathSMAP['dirResult'], 'regionalization')\n\n# load data and calculate stat\nstatLst = list()\ntRange = [20160401, 20180401]\nfor k in range(len(subsetLst)):\n testName = subsetLst[k]\n tempLst = list()\n for case1 in caseLst1:\n for case2 in caseLst2:\n if case1 == 'Local':\n out = os.path.join(pathSMAP['Out_L3_NA'], 'ecoRegion',\n subsetLst[k] + '_' + case2)\n elif case1 == 'CONUS':\n out = os.path.join(pathSMAP['Out_L3_NA'], 'CONUSv2f1_' + case2)\n df, yp, yt = master.test(out, tRange=tRange, subset=testName)\n temp = stat.statError(yp[:, :, 0], yt[:, :, 0])\n tempLst.append(temp)\n statLst.append(tempLst)\n\n# plot box\nkeyLst = stat.keyLst\ncaseLst = list()\nfor case1 in caseLst1:\n for case2 in caseLst2:\n caseLst.append(case1 + ' ' + case2)\necoLst = ['{0:0>2}'.format(x) for x in range(1, 18)]\nfor k in range(len(keyLst)):\n dataBox = list()\n key = keyLst[k]\n for ss in statLst:\n temp = list()\n for s in ss:\n temp.append(s[key])\n dataBox.append(temp)\n fig = plot.plotBoxFig(dataBox, ecoLst, caseLst, title=key, figsize=(12, 4))\n plt.subplots_adjust(wspace=0, hspace=0)\n fig.show()\n saveFile = os.path.join(saveFolder, 'ecoRegion_box_' + key)\n fig.savefig(saveFile)\n\n# improvement from model vs CONUS\nkeyLst = ['RMSE', 'Corr']\nfig, axes = plt.subplots(1, len(keyLst), figsize=(12, 6))\nfor kk in range(len(keyLst)):\n key = keyLst[kk]\n px = list()\n py = list()\n for ss in statLst:\n a = ss[0][key]\n b = ss[1][key]\n c = ss[2][key]\n if key == 'Corr':\n px.append(np.nanmean((b / a)))\n py.append(np.nanmean((c / a)))\n titleStr = 'Correlation ratio'\n elif key == 'RMSE':\n px.append(np.nanmean((b - a)))\n py.append(np.nanmean((c - a)))\n titleStr = 'RMSE difference'\n plot.plotVS(px,\n py,\n ax=axes[kk],\n title=titleStr,\n xlabel='improve from CONUS',\n ylabel='improve from Model')\n dist = np.square(px - np.mean(px)) + np.square(py - np.mean(py))\n ind = np.argsort(-dist)[:4]\n for k in ind:\n axes[kk].text(px[k], py[k], '{:02d}'.format(k + 1))\nsaveFile = os.path.join(saveFolder, 'ecoRegion_vs')\nfig.savefig(saveFile)\nfig.show()\n", "import os\nimport rnnSMAP\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy\n\nimport imp\nimp.reload(rnnSMAP)\nrnnSMAP.reload()\n\n\nhucLst = ['04051118', '03101317', '02101114',\n '01020304', '02030406', '14151617']\nhucTitleLst = ['HUC 04+05+11+18', 'HUC 03+10+13+17', 'HUC 02+10+11+14',\n 'HUC 02+03+04+06', 'HUC 01+02+03+04', 'HUC 14+15+16+17']\n\n# hucLst = ['04051118', '03101317', '14151617', '02030406']\n# hucTitleLst = ['HUC 04+05+11+18', 'HUC 03+10+13+17',\n# 'HUC 14+15+16+17', 'HUC 02+03+04+06']\nrootDB = rnnSMAP.kPath['DB_L3_NA']\nrootOut = rnnSMAP.kPath['OutSigma_L3_NA']\nsaveFolder = os.path.join(\n rnnSMAP.kPath['dirResult'], 'paperSigma')\nmatplotlib.rcParams.update({'font.size': 16})\nmatplotlib.rcParams.update({'lines.linewidth': 2})\nmatplotlib.rcParams.update({'lines.markersize': 10})\ncLst = 'myrgcb'\n\n\ndoOpt = []\ndoOpt.append('test')\ndoOpt.append('plotBin')\n# doOpt.append('plotProb')\n\n#################################################\nif 'test' in doOpt:\n dsLst = list()\n statErrLst = list()\n statSigmaLst = list()\n statConfLst = list()\n for k in range(0, len(hucLst)):\n trainName = hucLst[k]+'_v2f1'\n out = trainName+'_y15_Forcing_dr60'\n testName = 'ex_'+hucLst[k]+'_v2f1'\n\n ds = rnnSMAP.classDB.DatasetPost(\n rootDB=rootDB, subsetName=testName, yrLst=[2015])\n ds.readData(var='SMAP_AM', field='SMAP')\n ds.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')\n dsLst.append(ds)\n statErr = ds.statCalError(predField='LSTM', targetField='SMAP')\n statErrLst.append(statErr)\n statSigma = ds.statCalSigma(field='LSTM')\n statSigmaLst.append(statSigma)\n statConf = ds.statCalConf(\n predField='LSTM', targetField='SMAP', rmBias=True)\n statConfLst.append(statConf)\n\n#################################################\n# plot confidence figure\nif 'plotBin' in doOpt:\n fig, ax = plt.subplots(1, 1, figsize=(12, 6))\n for iHuc in range(0, len(hucLst)):\n sigmaMC = getattr(statSigmaLst[iHuc], 'sigmaMC_mat')\n sigmaX = getattr(statSigmaLst[iHuc], 'sigmaX_mat')\n dataBin = sigmaMC/sigmaX\n # dataBin = sigmaX\n sigma = getattr(statSigmaLst[iHuc], 'sigma')\n ubRMSE = getattr(statErrLst[iHuc], 'ubRMSE')\n confMat = getattr(statConfLst[iHuc], 'conf_sigma')\n nbin = 10\n xbin = np.percentile(dataBin, range(0, 101, int(100/nbin)))\n xbinMean = (xbin[0:nbin]+xbin[1:nbin+1])/2\n corrLst = list()\n distLst = list()\n for k in range(0, nbin):\n ind = (dataBin > xbin[k]) & (dataBin <= xbin[k+1])\n conf = rnnSMAP.funPost.flatData(confMat[ind])\n if k == 0:\n print(iHuc, len(conf))\n yRank = np.arange(len(conf))/float(len(conf)-1)\n dist = np.abs(conf - yRank).max()\n distLst.append(dist)\n ax.plot(xbinMean, distLst, marker='*',\n color=cLst[iHuc], label=hucLst[iHuc])\n ax.set_ylabel(r'd($p_{mc}$, 1-to-1)')\n ax.set_xlabel(r'$\\sigma_{mc}$ / $\\sigma_{x}$')\n # ax.set_xlabel(r'$\\sigma_{x}$')\n ax.legend()\n fig.show()\n # saveFile = os.path.join(saveFolder, 'CONUS_sigmaRatioBin')\n saveFile = os.path.join(saveFolder, 'CONUS_sigmaMCBin')\n fig.savefig(saveFile, dpi=100)\n fig.savefig(saveFile+'.eps')\n", "\nimport pandas as pd\nfrom hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom hydroDL.post import axplot, figplot\nfrom hydroDL import kPath, utils\nimport json\nimport os\nimport importlib\nfrom hydroDL.master import basinFull\nfrom hydroDL.app.waterQuality import WRTDS\n\ndataNameLst = ['G200N', 'G200']\nlabelLst = ['FPRT2QC', 'QFPRT2C', 'QFRT2C', 'QFPT2C', 'QT2C']\ntrainLst = ['rmR20', 'rmL20', 'rmRT20', 'rmYr5', 'B10']\ntestLst = ['pkR20', 'pkL20', 'pkRT20', 'pkYr5', 'A10']\n\nDF = dbBasin.DataFrameBasin('G200')\nep = 500\ncodeLst = usgs.newC\n\n# count matrix\nmatB = (~np.isnan(DF.c)).astype(int).astype(float)\n# for trainSet, testSet in zip(trainLst, testLst):\ntrainSet = 'rmR20'\ntestSet = 'pkR20'\nmatB1 = DF.extractSubset(matB, trainSet)\nmatB2 = DF.extractSubset(matB, testSet)\ncount1 = np.nansum(matB1, axis=0)\ncount2 = np.nansum(matB2, axis=0)\nmatRm = (count1 < 160) & (count2 < 40)\ncorrLst1 = list()\ncorrLst2 = list()\nfor label in labelLst:\n for dataName in dataNameLst:\n outName = '{}-{}-{}'.format(dataName, label, trainSet)\n outFolder = basinFull.nameFolder(outName)\n corrName1 = 'corrQ-{}-Ep{}.npy'.format(trainSet, ep)\n corrName2 = 'corrQ-{}-Ep{}.npy'.format(testSet, ep)\n corrFile1 = os.path.join(outFolder, corrName1)\n corrFile2 = os.path.join(outFolder, corrName2)\n corr1 = np.load(corrFile1)\n corr1[matRm] = np.nan\n corrLst1.append(corr1)\n corr2 = np.load(corrFile2)\n corr2[matRm] = np.nan\n corrLst2.append(corr2)\n\n# label name\ncaseLst = list()\nfor label in labelLst:\n labelStr = label.split('T')[0]\n for dataName in dataNameLst:\n if dataName[-1] == 'N':\n caseLst.append('{}-LN'.format(labelStr))\n else:\n caseLst.append(labelStr)\n\n# WRTDS\ndirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-dbBasin')\ncorrName1 = 'corr-{}-{}-{}.npy'.format('G200N', trainSet, testSet)\ncorrName2 = 'corr-{}-{}-{}.npy'.format('G200N', testSet, testSet)\ncorrFile1 = os.path.join(dirWRTDS, corrName1)\ncorrFile2 = os.path.join(dirWRTDS, corrName2)\ncorrW1 = np.load(corrFile1)\ncorrW1[matRm] = np.nan\ncorrLst1.append(corrW1)\ncorrW2 = np.load(corrFile2)\ncorrW2[matRm] = np.nan\ncorrLst2.append(corrW2)\ncaseLst.append('WRTDS')\n\n# plot\nfigFolder = r'C:\\Users\\geofk\\work\\waterQuality\\paper\\G200'\ncodeStrLst = [usgs.codePdf.loc[code]['shortName'] for code in codeLst]\n\nmatPlot = np.full([len(corrLst2), len(codeLst)], np.nan)\nfor k, corr in enumerate(corrLst2):\n matPlot[k, :] = np.nanmean(corr, axis=0)\nfig, ax = plt.subplots(1, 1)\naxplot.plotHeatMap(ax, matPlot*100, labLst=[caseLst, codeStrLst])\ntitle = 'Median Testing Correlation of Models'\nax.set_title(title)\nplt.tight_layout()\nfig.show()\nplt.savefig(os.path.join(figFolder, 'heatmap_AllModel'))\n", "from hydroDL.master import basins\nfrom hydroDL.app import waterQuality\nfrom hydroDL import kPath\nfrom hydroDL.model import trainTS\nfrom hydroDL.data import gageII, usgs\nfrom hydroDL.post import axplot, figplot\n\nimport torch\nimport os\nimport json\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nwqData = waterQuality.DataModelWQ('basinRef')\n\ndoLst = list()\ndoLst.append('subset')\n\nif 'subset' in doLst:\n # find ind have SiO4, NO3\n codeLst = ['00618', '00955']\n icLst = [wqData.varC.index(code) for code in codeLst]\n indAll = np.where(~np.isnan(wqData.c[:, icLst]).all(axis=1))[0]\n indAny = np.where(~np.isnan(wqData.c[:, icLst]).any(axis=1))[0]\n # print number of samples\n for code in codeLst:\n ic = wqData.varC.index(code)\n indC = np.where(~np.isnan(wqData.c[:, ic]))[0]\n # seperate index by years\n for ind, lab in zip([indAll, indAny], ['all', 'any']):\n indYr = waterQuality.indYr(\n wqData.info.iloc[ind], yrLst=[1979, 2000])[0]\n indYrCmp = np.setdiff1d(ind, indYr)\n wqData.saveSubset('-'.join(sorted(codeLst)+[lab, 'Y8090']), indYr)\n wqData.saveSubset('-'.join(sorted(codeLst)+[lab, 'rmY8090']), indYrCmp)\n for code in codeLst:\n ic = wqData.varC.index(code)\n indC = np.where(~np.isnan(wqData.c[:, ic]))[0]\n indYr = waterQuality.indYr(\n wqData.info.iloc[indC], yrLst=[1979, 2000])[0]\n indYrCmp = np.setdiff1d(indC, indYr)\n wqData.saveSubset(code+'-Y8090', indYr)\n wqData.saveSubset(code+'-rmY8090', indYrCmp)\n # d=wqData.info.iloc[wqData.subset['00618-00955-any-Y10']]['date']\n # np.sort(pd.DatetimeIndex(d).year.unique())\n # ind=wqData.info.iloc[wqData.subset['00618-00955-any-Y10']].index.values\n # wqData.c[ind, wqData.varC.index('00618')]\n\n# train local\n\n", "import importlib\nfrom hydroDL import kPath, utils\nfrom hydroDL.app import waterQuality\nfrom hydroDL.master import basins\nfrom hydroDL.data import usgs, gageII, gridMET, ntn\nfrom hydroDL.master import slurm\nfrom hydroDL.post import axplot, figplot\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport json\nimport scipy\nfrom astropy.timeseries import LombScargle\nimport matplotlib.gridspec as gridspec\n\ndirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')\nwith open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:\n dictSite = json.load(f)\n\ncodeLst = sorted(usgs.newC)\nep = 500\nreTest = False\ndataName = 'rbWN5'\nsiteNoLst = dictSite['comb']\nnSite = len(siteNoLst)\n\n# load all sequence\nif False:\n dictLSTMLst = list()\n # LSTM\n labelLst = ['QTFP_C']\n for label in labelLst:\n dictLSTM = dict()\n trainSet = 'comb-B10'\n outName = '{}-{}-{}-{}'.format(dataName, 'comb', label, trainSet)\n for k, siteNo in enumerate(siteNoLst):\n print('\\t site {}/{}'.format(k, len(siteNoLst)), end='\\r')\n df = basins.loadSeq(outName, siteNo)\n dictLSTM[siteNo] = df\n dictLSTMLst.append(dictLSTM)\n # WRTDS\n dictWRTDS = dict()\n dirWRTDS = os.path.join(kPath.dirWQ, 'modelStat',\n 'WRTDS-W', 'B10', 'output')\n for k, siteNo in enumerate(siteNoLst):\n print('\\t site {}/{}'.format(k, len(siteNoLst)), end='\\r')\n saveFile = os.path.join(dirWRTDS, siteNo)\n df = pd.read_csv(saveFile, index_col=None).set_index('date')\n # df = utils.time.datePdf(df)\n dictWRTDS[siteNo] = df\n # Observation\n dictObs = dict()\n for k, siteNo in enumerate(siteNoLst):\n print('\\t site {}/{}'.format(k, len(siteNoLst)), end='\\r')\n df = waterQuality.readSiteTS(\n siteNo, varLst=['00060']+codeLst, freq='W')\n dictObs[siteNo] = df\n\n # calculate correlation\n tt = np.datetime64('2010-01-01')\n t0 = np.datetime64('1980-01-01')\n ind1 = np.where((df.index.values < tt) & (df.index.values >= t0))[0]\n ind2 = np.where(df.index.values >= tt)[0]\n dictLSTM = dictLSTMLst[0]\n corrMat = np.full([len(siteNoLst), len(codeLst), 3], np.nan)\n for ic, code in enumerate(codeLst):\n for siteNo in dictSite[code]:\n indS = siteNoLst.index(siteNo)\n v1 = dictLSTM[siteNo][code].iloc[ind2].values\n v2 = dictWRTDS[siteNo][code].iloc[ind2].values\n v3 = dictObs[siteNo][code].iloc[ind2].values\n vv1, vv2, vv3 = utils.rmNan([v1, v2, v3], returnInd=False)\n rmse1, corr1 = utils.stat.calErr(vv1, vv2)\n rmse2, corr2 = utils.stat.calErr(vv1, vv3)\n rmse3, corr3 = utils.stat.calErr(vv2, vv3)\n corrMat[indS, ic, 0] = corr1\n corrMat[indS, ic, 1] = corr2\n corrMat[indS, ic, 2] = corr3\n\n # load basin attributes\n regionLst = ['ECO2_BAS_DOM', 'NUTR_BAS_DOM',\n 'HLR_BAS_DOM_100M', 'PNV_BAS_DOM']\n dfG = gageII.readData(siteNoLst=siteNoLst)\n fileT = os.path.join(gageII.dirTab, 'lookupPNV.csv')\n tabT = pd.read_csv(fileT).set_index('PNV_CODE')\n for code in range(1, 63):\n siteNoTemp = dfG[dfG['PNV_BAS_DOM'] == code].index\n dfG.at[siteNoTemp, 'PNV_BAS_DOM2'] = tabT.loc[code]['PNV_CLASS_CODE']\n dfG = gageII.updateCode(dfG)\n\n# color mat\n# cVar = 'CONTACT'\n# cMat = dfG[cVar].values\n# cMat = np.log(cMat+1)\n# cR = [np.nanpercentile(cMat, 10), np.nanpercentile(cMat, 90)]\n# cR = [np.nanmin(cMat), np.nanmax(cMat)]\n\n# estimate travel time\nd = dfG['ROCKDEPAVE'].values # inches\na = dfG['DRAIN_SQKM'].values # sqkm\nc = dfG['AWCAVE'].values # []\nq = np.ndarray(len(siteNoLst))\nfor k, siteNo in enumerate(siteNoLst):\n q[k] = dictObs[siteNo]['00060'].mean() # cubic feet / s\nunitCov = 0.0254*10**6/0.3048**3/24/60/60/365 # year\ncMat = d*a*c/q * unitCov\ncMatLog = np.log10(cMat)\n\ncVar = 'Estimated Travel time'\ncR = [np.nanmin(cMat), np.nanmax(cMat)]\ncR = [np.nanpercentile(cMat, 10), np.nanpercentile(cMat, 90)]\n\ncode = '00660'\ncVar = 'DEVOPENNLCD06'\nth = 4.755000114\ncMat = dfG[cVar].values\n\n# attr vs diff\ncMatLog = np.log(cMat+1)\nfig, axes = plt.subplots(1, 2, figsize=(10, 4))\nic = codeLst.index(code)\nsc = axplot.scatter121(axes[0], corrMat[:, ic, 1],\n corrMat[:, ic, 2], cMatLog, size=30)\naxes[0].set_xlabel('Corr LSTM')\naxes[0].set_ylabel('Corr WRTDS')\nfig.colorbar(sc, ax=axes[0])\nx = cMat\ny = corrMat[:, ic, 1]**2-corrMat[:, ic, 2]**2\naxes[1].plot(x, y, '*')\naxes[1].plot([np.nanmin(x), np.nanmax(x)], [0, 0], 'k-')\naxes[1].set_ylim([-0.5, 0.5])\naxes[1].set_xlabel(cVar)\naxes[1].set_ylabel('Rsq LSTM - Rsq WRTDS')\nfig.suptitle('affect of {} on {} {}'.format(\n cVar, code, usgs.codePdf.loc[code]['shortName']))\nfig.show()\n\n# threshold\nind1 = np.where(cMat <= th)\nind2 = np.where(cMat > th)\ndataBox = list()\npLst = list()\nfor ind in [ind1, ind2]:\n a = corrMat[ind, ic, 1].flatten()\n b = corrMat[ind, ic, 2].flatten()\n aa, bb = utils.rmNan([a, b], returnInd=False)\n s, p = scipy.stats.ttest_ind(aa, bb)\n dataBox.append([a, b])\n pLst.append(p)\nlabel1 = ['<={:.3f}\\np-value={:.0e}'.format(th, pLst[0]),\n '>{:.3f}\\np-value={:.0e}'.format(th, pLst[1])]\nfig = figplot.boxPlot(dataBox, label1=label1, label2=['LSTM', 'WRTDS'],\n widths=0.5, figsize=(6, 4), yRange=[0, 1])\nfig.suptitle('affect of {} on {} {}'.format(\n cVar, code, usgs.codePdf.loc[code]['shortName']))\nfig.show()\n", "from hydroDL import pathSMAP, master, utils\nfrom hydroDL.master import default\nfrom hydroDL.post import plot, stat\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport pandas as pd\n\ndoLst = list()\n# doLst.append('train')\ndoLst.append('test')\ndoLst.append('post')\nsaveDir = os.path.join(pathSMAP['dirResult'], 'DA')\n\n# test\nif 'test' in doLst:\n torch.cuda.set_device(2)\n\n subset = 'CONUSv2f1'\n tRange = [20150402, 20180401]\n yrStrLst = ['2015', '2016', '2017']\n yfLst = list()\n ypLst = list()\n for yrStr in yrStrLst:\n out = os.path.join(pathSMAP['Out_L3_NA'], 'DA', 'CONUSv2f1_DA' + yrStr)\n df, yf, obs = master.test(\n out, tRange=tRange, subset=subset, batchSize=100)\n out = os.path.join(pathSMAP['Out_L3_NA'], 'DA',\n 'CONUSv2f1_LSTM' + yrStr)\n df, yp, obs = master.test(out, tRange=tRange, subset=subset)\n yf = yf.squeeze()\n yp = yp.squeeze()\n yfLst.append(yf)\n ypLst.append(yp)\n obs = obs.squeeze()\n\n# figure out how many days observation lead\nmaskObs = 1 * ~np.isnan(obs.squeeze())\nmaskDay = np.zeros(maskObs.shape).astype(int)\nngrid, nt = maskObs.shape\nfor j in range(ngrid):\n temp = 0\n for i in range(nt):\n maskDay[j, i] = temp\n if maskObs[j, i] == 1:\n temp = 1\n else:\n if temp != 0:\n temp = temp + 1\nind = np.random.randint(0, ngrid)\nmaskObsDay = maskObs * maskDay\nunique, counts = np.unique(maskObsDay, return_counts=True)\nmaskF = (maskDay >= 1) & (maskDay <= 3)\nstatPLst = list()\nstatFLst = list()\nfor k in range(3):\n statP = stat.statError(\n utils.fillNan(ypLst[k], maskF), utils.fillNan(obs, maskF))\n statF = stat.statError(\n utils.fillNan(yfLst[k], maskF), utils.fillNan(obs, maskF))\n statPLst.append(statP)\n statFLst.append(statF)\n\ncropFile = r'/mnt/sdb/Data/Crop/cropRate_CONUSv2f1.csv'\ncropRate = pd.read_csv(cropFile, dtype=np.float, header=None).values\n# croprate - 0 corn, 4 soybean, 22 spring wheat, 23 winter wheat\ndataGrid = [(statPLst[0]['RMSE'] - statFLst[0]['RMSE']) / statPLst[0]['RMSE'],\n (statPLst[1]['RMSE'] - statFLst[1]['RMSE']) / statPLst[1]['RMSE'],\n (statPLst[2]['RMSE'] - statFLst[2]['RMSE']) / statPLst[2]['RMSE'], \n ]\nprcp = df.getDataTs('APCP_FORA').squeeze()\ndataTs = [[obs, ypLst[0], yfLst[0]], [obs, ypLst[1], yfLst[1]],\n [obs, ypLst[2], yfLst[2]], [prcp]]\ncrd = df.getGeo()\nt = df.getT()\nmapNameLst = ['dRMSE 2015', 'dRMSE 2016', 'dRMSE 2017']\ntsNameLst = ['obs', 'prj', 'fore']\ntBar = [utils.time.t2dt(20160401), utils.time.t2dt(20170401)]\n#plt.tight_layout()\nplot.plotTsMap(\n dataGrid,\n dataTs,\n lat=crd[0],\n lon=crd[1],\n t=t,\n mapNameLst=mapNameLst,\n isGrid=True,\n multiTS=True,\n linewidth=1,\n figsize=(10, 10),\n tBar=tBar)\n\n# see result for different seasons\ntRangeLst = [[20180101, 20180201], [20180201, 20180301], [20180301, 20180401],\n [20160401, 20160501], [20160501, 20160601], [20160601, 20160701],\n [20160701, 20160801], [20160801, 20160901], [20160901, 20161001],\n [20161001, 20161101], [20161101, 20161201], [20161201, 20170101],\n [20170101, 20170201], [20170201, 20170301], [20170301, 20170401],\n [20170401, 20170501], [20170501, 20170601], [20170601, 20170701],\n [20170701, 20170801], [20170801, 20170901], [20170901, 20171001],\n [20171001, 20171101], [20171101, 20171201], [20171201, 20180101]]\ntAllR = [20150402, 20180401]\ntAllA = utils.time.tRange2Array(tAllR)\nstatPLst = list()\nstatFLst = list()\nfor k in range(12):\n tRLst = [tRangeLst[k], tRangeLst[k + 12]]\n temp = list()\n for tR in tRLst:\n tA = utils.time.tRange2Array(tR)\n ind0 = np.array(range(nt))\n ind1, ind2 = utils.time.intersect(tAllA, tA)\n temp.append(ind1)\n indT = np.concatenate(temp)\n yfTemp = utils.fillNan(yf, maskF)[:, indT]\n ypTemp = utils.fillNan(yp, maskF)[:, indT]\n obsTemp = utils.fillNan(obs, maskF)[:, indT]\n statPLst.append(stat.statError(ypTemp, obsTemp))\n statFLst.append(stat.statError(yfTemp, obsTemp))\n\nimport matplotlib\nmatplotlib.rcParams.update({'font.size': 14})\nmatplotlib.rcParams.update({'lines.linewidth': 2})\nmatplotlib.rcParams.update({'lines.markersize': 6})\n\nlabCrop = ['Corn', 'Spring wheat', 'Winter wheat']\nindCrop = [0, 22, 23]\ncropFile = r'/mnt/sdb/Data/Crop/cropRate_CONUSv2f1.csv'\ncropRate = pd.read_csv(cropFile, dtype=np.float, header=None).values\nkey = 'RMSE'\n[lat, lon] = df.getGeo()\nfig, axes = plt.subplots(1, 3, figsize=[12, 5])\nfor k in range(3):\n grid, uy, ux = utils.grid.array2grid(\n cropRate[:, indCrop[k]], lat=lat, lon=lon)\n plot.plotMap(\n grid, ax=axes[k], lat=uy, lon=ux, title=labCrop[k] + ' percentage')\n plt.tight_layout()\n fig.show()\n\nimport matplotlib\nmatplotlib.rcParams.update({'font.size': 14})\nmatplotlib.rcParams.update({'lines.linewidth': 2})\nmatplotlib.rcParams.update({'lines.markersize': 6})\nindLst = [cropRate[:, 0] > 30, cropRate[:, 22] > 5, cropRate[:, 23] > 10]\nlabMonth = [\n 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Agu', 'Sep', 'Oct',\n 'Nov', 'Dec'\n]\nlabCrop = ['Corn', 'Spring wheat', 'Winter wheat']\ncLst = 'rgb'\ndataBox = list()\nfor iC in range(len(indLst)):\n dataBox = list()\n for k in range(12):\n data = statPLst[k]['RMSE'][indLst[iC]] - statFLst[k]['RMSE'][\n indLst[iC]]\n if len(data[~np.isnan(data)]) < 20:\n data = []\n dataBox.append(data)\n fig = plot.plotBoxFig(\n dataBox,\n label1=labMonth,\n label2=[labCrop[iC]],\n sharey=True,\n figsize=[8, 3],\n colorLst=cLst[iC])\n plt.subplots_adjust(wspace=0, hspace=0)\n plt.ylim(-0.02, 0.04)\n fig.show()\n", "\nfrom hydroDL import kPath, utils\nfrom hydroDL.app import waterQuality\nfrom hydroDL.master import basins\nfrom hydroDL.data import usgs, gageII, gridMET, ntn\nfrom hydroDL.master import slurm\nfrom hydroDL.post import axplot, figplot\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport json\nimport scipy\n\ndirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')\nwith open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:\n dictSite = json.load(f)\n\ncodeLst = sorted(usgs.newC)\nep = 500\nreTest = False\ndataName = 'rbWN5'\nsiteNoLst = dictSite['comb']\nnSite = len(siteNoLst)\n\n# load all sequence\ndictLSTMLst = list()\n# LSTM\nlabelLst = ['QT_C', 'QTFP_C', 'FP_QC']\nfor label in labelLst:\n dictLSTM = dict()\n trainSet = 'comb-B10'\n outName = '{}-{}-{}-{}'.format(dataName, 'comb', label, trainSet)\n for k, siteNo in enumerate(siteNoLst):\n print('\\t site {}/{}'.format(k, len(siteNoLst)), end='\\r')\n df = basins.loadSeq(outName, siteNo)\n dictLSTM[siteNo] = df\n dictLSTMLst.append(dictLSTM)\n# WRTDS\ndictWRTDS = dict()\ndirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-W', 'B10', 'output')\nfor k, siteNo in enumerate(siteNoLst):\n print('\\t site {}/{}'.format(k, len(siteNoLst)), end='\\r')\n saveFile = os.path.join(dirWRTDS, siteNo)\n df = pd.read_csv(saveFile, index_col=None).set_index('date')\n # df = utils.time.datePdf(df)\n dictWRTDS[siteNo] = df\n# Observation\ndictObs = dict()\nfor k, siteNo in enumerate(siteNoLst):\n print('\\t site {}/{}'.format(k, len(siteNoLst)), end='\\r')\n df = waterQuality.readSiteTS(siteNo, varLst=codeLst, freq='W')\n dictObs[siteNo] = df\n\n# calculate correlation\ntt = np.datetime64('2010-01-01')\nind1 = np.where(df.index.values < tt)[0]\nind2 = np.where(df.index.values >= tt)[0]\ndictLSTM = dictLSTMLst[1]\ncorrMat = np.full([len(siteNoLst), len(codeLst), 3], np.nan)\nfor ic, code in enumerate(codeLst):\n for siteNo in dictSite[code]:\n indS = siteNoLst.index(siteNo)\n v1 = dictLSTM[siteNo][code].iloc[ind2].values\n v2 = dictWRTDS[siteNo][code].iloc[ind2].values\n v3 = dictObs[siteNo][code].iloc[ind2].values\n vv1, vv2, vv3 = utils.rmNan([v1, v2, v3], returnInd=False)\n rmse1, corr1 = utils.stat.calErr(vv1, vv2)\n rmse2, corr2 = utils.stat.calErr(vv1, vv3)\n rmse3, corr3 = utils.stat.calErr(vv2, vv3)\n corrMat[indS, ic, 0] = corr1\n corrMat[indS, ic, 1] = corr2\n corrMat[indS, ic, 2] = corr3\n\n# bSite\ndirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')\nwith open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:\n dictSite = json.load(f)\nbSiteMat = np.full([len(siteNoLst), len(codeLst)], False)\nfor k, code in enumerate(codeLst):\n siteNoCode = dictSite[code]\n indS = [siteNoLst.index(siteNo) for siteNo in siteNoCode]\n bSiteMat[indS, k] = True\n\n\n# load regions\nregionLst = [\n 'ECO2_BAS_DOM',\n 'NUTR_BAS_DOM',\n 'HLR_BAS_DOM_100M',\n 'PNV_BAS_DOM',\n]\ndfG = gageII.readData(\n varLst=regionLst+['LAT_GAGE', 'LNG_GAGE', 'CLASS'], siteNoLst=siteNoLst)\nfileT = os.path.join(gageII.dirTab, 'lookupPNV.csv')\ntabT = pd.read_csv(fileT).set_index('PNV_CODE')\nfor code in range(1, 63):\n siteNoTemp = dfG[dfG['PNV_BAS_DOM'] == code].index\n dfG.at[siteNoTemp, 'PNV_BAS_DOM2'] = tabT.loc[code]['PNV_CLASS_CODE']\n\n# define what to show\nv = dfG['PNV_BAS_DOM2'].values\nfor vv in [2, 3, 4, 5, 9, 11]:\n # vv = 2\n vStr = 'PNV{}'.format(vv)\n labLst2 = list()\n for s1 in ['', 'Non-']:\n for s2 in ['LSTM', 'WRTDS']:\n labLst2.append('{}{} {}'.format(s1, vStr, s2))\n cLst = 'rbkgcmyrbkgcmy'\n\n # plot box\n labLst1 = [usgs.codePdf.loc[code]['shortName'] +\n '\\n'+code for code in codeLst]\n dataBox = list()\n for k, code in enumerate(codeLst):\n bSite = bSiteMat[:, k]\n temp = list()\n bv = v == vv\n for i in [1, 2]:\n indS = np.where(bSite & bv)[0]\n temp.append(corrMat[indS, k, i])\n bv = v != vv\n for i in [1, 2]:\n indS = np.where(bSite & bv)[0]\n temp.append(corrMat[indS, k, i])\n dataBox.append(temp)\n fig = figplot.boxPlot(dataBox, label1=labLst1, widths=0.5,\n label2=labLst2, figsize=(12, 4), yRange=[0, 1])\n fig.show()\n", "import matplotlib.cm as cm\nfrom sklearn import decomposition\nfrom astropy.timeseries import LombScargle\nimport matplotlib.gridspec as gridspec\nimport importlib\nfrom hydroDL import kPath, utils\nfrom hydroDL.app import waterQuality\nfrom hydroDL.master import basins\nfrom hydroDL.data import usgs, gageII, gridMET, ntn\nfrom hydroDL.master import slurm\nfrom hydroDL.post import axplot, figplot\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport json\nimport scipy\n\n# count\nfileSiteNo = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteNoLst-1979')\nsiteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()\ncodeCount = sorted(usgs.codeLst)\ndirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')\ncountMatAll = np.load(os.path.join(dirInv, 'matCountWeekly.npy'))\ncountMat = np.ndarray([len(siteNoLstAll), len(codeCount)])\nfor ic, code in enumerate(codeCount):\n countMat[:, ic] = np.sum(countMatAll[:, :, ic], axis=1)\n\n\n# load WRTDS performance\ndirWrtds = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-D', 'All')\nfileC = os.path.join(dirWrtds, 'corr')\ndfCorr = pd.read_csv(fileC, dtype={'siteNo': str}).set_index('siteNo')\nfileR = os.path.join(dirWrtds, 'rmse')\ndfRmse = pd.read_csv(fileR, dtype={'siteNo': str}).set_index('siteNo')\n\n# load geo attr\ndfGeo = gageII.readData(siteNoLst=siteNoLstAll)\ndfGeo = gageII.updateCode(dfGeo)\n\n# select site\nn = 40*2\ncode = '00915'\nbMat = countMat[:, codeCount.index(code)] > n\nindSel = np.where(countMat[:, codeCount.index(code)] > n)[0]\nsiteNoLst = [siteNoLstAll[ind] for ind in indSel]\ndfG = dfGeo.loc[siteNoLst]\n\n# eco region\ndirEco = os.path.join(kPath.dirData, 'USGS', 'inventory', 'ecoregion')\nfileEco = os.path.join(dirEco, 'basinEco')\ndfEcoAll = pd.read_csv(fileEco, dtype={'siteNo': str}).set_index('siteNo')\ndfEco = dfEcoAll.loc[siteNoLst]\nfor field in ['code'+str(k) for k in range(3)]:\n dfEco[field] = dfEco[field].astype(int).astype(str).str.zfill(2)\ndfEco['comb'] = dfEco[['code0', 'code1']].agg('-'.join, axis=1)\necoLst = sorted(dfEco['comb'].unique().tolist())\n\n# plot\ngeoField = 'FORESTNLCD06'\n# geoField = 'PLANTNLCD06'\ncorrMat = dfCorr.loc[siteNoLst][code].values\ngeoMat = dfG[geoField].values\ncLst = cm.rainbow(np.linspace(0, 1, len(ecoLst)))\nfig, ax = plt.subplots(1, 1)\nfor k, eco in enumerate(ecoLst):\n ind = np.where((dfEco['comb'] == eco).values)[0]\n ax.plot(geoMat[ind], corrMat[ind], c=cLst[k],\n label=eco, marker='*', ls='')\nax.legend()\nfig.show()\n\n# plot\ngeoField = 'FORESTNLCD06'\n# geoField = 'PLANTNLCD06'\ncorrMat = dfCorr.loc[siteNoLst][code].values\ngeoMat = dfG[geoField].values\neco = '10-01'\nfig, ax = plt.subplots(1, 1)\nind = np.where((dfEco['comb'] == eco).values)[0]\nax.plot(geoMat[ind], corrMat[ind],'*')\nfig.show()\n", "import importlib\nfrom hydroDL.master import basins\nfrom hydroDL.app import waterQuality\nfrom hydroDL import kPath, utils\nfrom hydroDL.model import trainTS\nfrom hydroDL.data import gageII, usgs, transform\nfrom hydroDL.post import axplot, figplot\n\nimport torch\nimport os\nimport json\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\n\n# input\noutName = 'Silica64-Y8090-opt1'\ntestset = 'Y0010'\nwqData = waterQuality.DataModelWQ('Silica64')\n\nmaster = basins.loadMaster(outName)\ndataName = master['dataName']\nif wqData is None:\n wqData = waterQuality.DataModelWQ(dataName)\ntrainset = master['trainName']\ninfoTrain = wqData.info.iloc[wqData.subset[trainset]].reset_index()\ninfoTest = wqData.info.iloc[wqData.subset[testset]].reset_index()\n\n# linear reg data\nstatTup = basins.loadStat(outName)\nvarTup = (master['varX'], master['varXC'], master['varY'], master['varYC'])\ndataTup1 = wqData.transIn(subset=trainset, varTup=varTup, statTup=statTup)\ndataTup2 = wqData.transIn(subset=testset, varTup=varTup, statTup=statTup)\ndataTup1 = trainTS.dealNaN(dataTup1, master['optNaN'])\ndataTup2 = trainTS.dealNaN(dataTup2, master['optNaN'])\nvarYC = varTup[3]\nstatYC = statTup[3]\nx1 = dataTup1[0][-1, :, :]\nyc1 = dataTup1[3]\nx2 = dataTup2[0][-1, :, :]\n\n# point test l2 - linear\nnc = len(varYC)\nmatP1 = np.full([len(infoTrain), nc], np.nan)\nmatP2 = np.full([len(infoTest), nc], np.nan)\nsiteNoLst = infoTest['siteNo'].unique().tolist()\nfor siteNo in siteNoLst:\n ind1 = infoTrain[infoTrain['siteNo'] == siteNo].index\n ind2 = infoTest[infoTest['siteNo'] == siteNo].index\n xT1 = x1[ind1, :]\n ycT1 = yc1[ind1, :]\n for ic in range(nc):\n [xx, yy], iv = utils.rmNan([xT1, ycT1[:, ic]])\n if len(iv) > 0:\n modelYC = LinearRegression().fit(xx, yy)\n matP1[ind1, ic] = modelYC.predict(xT1)\n if len(ind2) > 0:\n xT2 = x2[ind2, :]\n matP1[ind2, ic] = modelYC.predict(xT2)\nmatO1 = wqData.transOut(matP1, statYC, varYC)\nmatO2 = wqData.transOut(matP2, statYC, varYC)\n\nerrMatL1 = wqData.errBySiteC(matO1, varYC, subset=trainset)\nerrMatL2 = wqData.errBySiteC(matO2, varYC, subset=testset)\n\n# box\ndataBox = list()\nfor k in range(nc):\n temp = [errMatL1[:, k, 1], errMatL2[:, k, 1]]\n dataBox.append(temp)\nfig = figplot.boxPlot(dataBox)\nfig.show()\n\n# auto regression\nx1 = dataTup1[0]\nyc1 = dataTup1[3]\nx2 = dataTup2[0]\n\nsiteNo = siteNoLst[0]\nind1 = infoTrain[infoTrain['siteNo'] == siteNo].index\nind2 = infoTest[infoTest['siteNo'] == siteNo].index\nxT1 = x1[:, ind1, :]\nycT1 = yc1[ind1, :]\nxT2 = x1[:, ind2, :]\nfor ic in range(nc):\n [xx, yy], iv = utils.rmNan([xT1, ycT1[:, ic]])\n if len(iv) > 0:\n modelYC = LinearRegression().fit(xx, yy)\n matP1[ind1, ic] = modelYC.predict(xT1)\n if len(ind2) > 0:\n xT2 = x2[ind2, :]\n matP1[ind2, ic] = modelYC.predict(xT2)\n", "from hydroDL import pathSMAP, master, utils\nfrom hydroDL.master import default\nfrom hydroDL.post import plot, stat\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport pandas as pd\n\ndoLst = list()\n# doLst.append('train')\ndoLst.append('test')\ndoLst.append('post')\nsaveDir = os.path.join(pathSMAP['dirResult'], 'DA')\nyrLst = ['2015', '2016', '2017']\ntRangeLst = [[20150402, 20160401], [20160402, 20170401], [20170402, 20180401]]\n\n# test\nif 'test' in doLst:\n torch.cuda.set_device(2)\n torch.cuda.empty_cache()\n subset = 'CONUSv2f1'\n tRange = [20150402, 20180401]\n yfLst = list()\n for yr in yrLst:\n out = os.path.join(pathSMAP['Out_L3_NA'], 'DA', 'CONUSv2f1_DA' + yr)\n df, yf, obs = master.test(\n out, tRange=tRange, subset=subset, batchSize=100)\n yfLst.append(yf)\n out = os.path.join(pathSMAP['Out_L3_NA'], 'DA', 'CONUSv2f1_LSTM')\n df, yp, obs = master.test(out, tRange=tRange, subset=subset)\n yf = yf.squeeze()\n yp = yp.squeeze()\n obs = obs.squeeze()\n\n# figure out how many days observation lead\nmaskObs = 1 * ~np.isnan(obs.squeeze())\nmaskDay = np.zeros(maskObs.shape).astype(int)\nngrid, nt = maskObs.shape\nfor j in range(ngrid):\n temp = 0\n for i in range(nt):\n maskDay[j, i] = temp\n if maskObs[j, i] == 1:\n temp = 1\n else:\n if temp != 0:\n temp = temp + 1\nind = np.random.randint(0, ngrid)\nmaskObsDay = maskObs * maskDay\nmaskF = (maskDay >= 1) & (maskDay <= 3)\n\n# figure out train and test time index\ntR0 = [20150402, 20180401]\ntA0 = utils.time.tRange2Array(tR0)\nnt = len(tA0)\ntTrainLst = list()\ntTestLst = list()\nfor k in range(len(yrLst)):\n tR = tRangeLst[k]\n tA = utils.time.tRange2Array(tR)\n ind0 = np.array(range(nt))\n ind1, ind2 = utils.time.intersect(tA0, tA)\n tTestLst.append(np.delete(ind0, ind1))\n tTrainLst.append(ind1)\n\n# calculate stat\nfor k in range(len(yrLst)):\n yfTemp = utils.fillNan(yfLst[k], maskF)\n yfTemp = yfTemp[:, tTestLst[k]]\n statP = stat.statError(yfTemp, utils.fillNan(obs, maskF))\n statF = stat.statError(yfTemp, utils.fillNan(obs, maskF))\n" ]
[ [ "scipy.stats.invgamma", "matplotlib.pyplot.subplots", "scipy.stats.invgamma.ppf" ], [ "matplotlib.pyplot.subplots", "pandas.DataFrame", "numpy.datetime64" ], [ "torch.nn.Dropout", "torch.sin", "torch.zeros", "torch.arange", "torch.cos" ], [ "numpy.arange", "pandas.read_csv", "numpy.sum", "matplotlib.pyplot.subplots" ], [ "numpy.isnan", "numpy.log10", "numpy.min" ], [ "pandas.to_datetime", "pandas.read_csv", "pandas.DataFrame", "pandas.read_table", "pandas.to_numeric" ], [ "numpy.corrcoef", "matplotlib.pyplot.subplots", "pandas.DataFrame" ], [ "numpy.datetime64" ], [ "numpy.argsort", "numpy.mean", "numpy.nanmean", "matplotlib.pyplot.subplots_adjust" ], [ "numpy.abs", "matplotlib.rcParams.update", "matplotlib.pyplot.subplots" ], [ "matplotlib.pyplot.tight_layout", "numpy.isnan", "matplotlib.pyplot.subplots", "numpy.nansum", "numpy.nanmean", "numpy.load" ], [ "numpy.isnan", "numpy.setdiff1d" ], [ "numpy.nanmax", "scipy.stats.ttest_ind", "numpy.log", "numpy.nanpercentile", "pandas.read_csv", "numpy.nanmin", "matplotlib.pyplot.subplots", "numpy.datetime64", "numpy.log10", "numpy.where" ], [ "pandas.read_csv", "matplotlib.pyplot.tight_layout", "torch.cuda.set_device", "numpy.unique", "numpy.isnan", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "numpy.concatenate", "matplotlib.rcParams.update", "matplotlib.pyplot.subplots_adjust", "numpy.zeros", "numpy.random.randint" ], [ "pandas.read_csv", "numpy.where", "numpy.datetime64" ], [ "pandas.read_csv", "numpy.where", "numpy.sum", "matplotlib.pyplot.subplots" ], [ "sklearn.linear_model.LinearRegression" ], [ "torch.cuda.set_device", "torch.cuda.empty_cache", "numpy.delete", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
toogy/mnist-em-bmm-gmm
[ "8a4f4d743cd0226a45a70464648ee8724757577e" ]
[ "visualize.py" ]
[ "import scipy.misc\nimport matplotlib.pyplot as plt\n\ndef plot_means(means):\n\n k = means.shape[0]\n\n rows = k // 5 + 1\n columns = min(k, 5)\n\n for i in range(k):\n plt.subplot(rows, columns, i + 1)\n plt.imshow(scipy.misc.toimage(means[i].reshape(28, 28),\n cmin=0.0, cmax=1.0))\n" ]
[ [ "matplotlib.pyplot.subplot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DavidXu9000/AdversarialAblation
[ "a692bfde3a9814bf6639a95ca870fd44f56efbb0" ]
[ "dump_hdf5_dataset.py" ]
[ "# Author: Wei-Ning Hsu\nimport h5py\nimport json\nimport librosa\nimport numpy as np\nimport os\nimport scipy\nimport time\nfrom pathlib import Path\nfrom PIL import Image\nfrom torchvision.transforms import transforms\n\nfrom dataloaders.utils import WINDOWS, compute_spectrogram\n\n\ndef run(json_path, hdf5_json_path, audio_path, image_path, audio_conf={}):\n with open(json_path, 'r') as f:\n data_and_dirs = json.load(f)\n data = data_and_dirs['data']\n audio_base = data_and_dirs['audio_base_path']\n image_base = data_and_dirs['image_base_path']\n print('Loaded %d data from %s' % (len(data), json_path))\n\n run_audio(data, audio_base, audio_path, audio_conf)\n run_image(data, image_base, image_path)\n \n Path(os.path.dirname(hdf5_json_path)).mkdir(parents=True, exist_ok=True)\n with open(hdf5_json_path, 'w') as f:\n d = {'audio_hdf5_path': audio_path, 'image_hdf5_path': image_path}\n json.dump(d, f)\n\n\n# Solution borrows from https://github.com/h5py/h5py/issues/745\ndef run_image(data, image_base, image_path):\n if os.path.exists(image_path):\n print('%s already exists. skip' % image_path)\n return\n\n print('Dumping image to HDF5 : %s' % image_path)\n n = len(data)\n Path(os.path.dirname(image_path)).mkdir(parents=True, exist_ok=True)\n f = h5py.File(image_path, 'w')\n dt = h5py.special_dtype(vlen=np.dtype('uint8'))\n dset_img = f.create_dataset('image', (n,), dtype=dt)\n \n start = time.time()\n for i, d in enumerate(data):\n with open('%s/%s' % (image_base, d['image']), 'rb') as f_img:\n binary_img = f_img.read()\n dset_img[i] = np.frombuffer(binary_img, dtype='uint8')\n\n if i % 100 == 0:\n t = time.time() - start\n print('processed %d / %d images (%.fs)' % (i, n, t))\n\n\ndef run_audio(data, audio_base, audio_path, audio_conf):\n if os.path.exists(audio_path):\n print('%s already exists. skip' % audio_path)\n return\n\n print('Dumping audio to HDF5 : %s' % audio_path)\n print(' audio_conf : %s' % audio_conf)\n\n audio_conf['num_mel_bins'] = audio_conf.get('num_mel_bins', 40)\n audio_conf['target_length'] = audio_conf.get('target_length', 2048)\n audio_conf['use_raw_length'] = audio_conf.get('use_raw_length', False)\n assert(not audio_conf['use_raw_length'])\n \n # dump audio\n n = len(data)\n Path(os.path.dirname(audio_path)).mkdir(parents=True, exist_ok=True)\n f = h5py.File(audio_path, 'w')\n dset_mel_shape = (n, audio_conf['num_mel_bins'],\n audio_conf['target_length'])\n dset_mel = f.create_dataset('melspec', dset_mel_shape, dtype='f')\n dset_len = f.create_dataset('melspec_len', (n,), dtype='i8')\n\n start = time.time()\n for i, d in enumerate(data):\n y, sr = librosa.load('%s/%s' % (audio_base, d['wav']), None)\n logspec, n_frames = compute_spectrogram(y, sr, audio_conf)\n dset_mel[i, :, :] = logspec\n dset_len[i] = n_frames\n\n if i % 100 == 0:\n t = time.time() - start\n print('processed %d / %d audios (%.fs)' % (i, n, t))\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('inp_json_path', type=str, help='input JSON file')\n parser.add_argument('out_json_path', type=str, help='path to save output json')\n parser.add_argument('audio_h5_path', type=str, help='path to save audio HDF5')\n parser.add_argument('image_h5_path', type=str, help='path to save image HDF5')\n args = parser.parse_args()\n print(args)\n\n run(args.inp_json_path, args.out_json_path,\n args.audio_h5_path, args.image_h5_path)\n" ]
[ [ "numpy.frombuffer", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
podismine/BrainAgeReg
[ "134ea1e088a330449c75ef732dc979ae126ca2cf" ]
[ "utils/visualize.py" ]
[ "#coding:utf8\nimport visdom\nimport time\nimport numpy as np\n\nclass Visualizer(object):\n def __init__(self, env='default', **kwargs):\n self.vis = visdom.Visdom(env=env, **kwargs)\n \n self.index = {} \n self.log_text = ''\n def reinit(self,env='default',**kwargs):\n\n self.vis = visdom.Visdom(env=env,**kwargs)\n return self\n\n def plot_many(self, d):\n\n for k, v in d.iteritems():\n self.plot(k, v)\n\n def img_many(self, d):\n for k, v in d.iteritems():\n self.img(k, v)\n\n def plot(self, name, y):\n\n x = self.index.get(name, 0)\n self.vis.line(Y=np.array([y]), X=np.array([x]),\n win=str(name),\n opts=dict(title=name),\n update=None if x == 0 else 'append'\n )\n self.index[name] = x + 1\n\n def img(self, name, img_,**kwargs):\n\n self.vis.images(img_.cpu().numpy(),\n win=str(name),\n opts=dict(title=name),\n **kwargs\n )\n\n\n def log(self,info,win='log_text'):\n\n self.log_text += ('[{time}] {info} <br>'.format(\n time=time.strftime('%m%d_%H%M%S'),\\\n info=info)) \n self.vis.text(self.log_text,win) \n\n def __getattr__(self, name):\n return getattr(self.vis, name)\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlbanOdot/DeepPhysics-article
[ "ff9da848873098396c30de78e5ef086bd9644d87" ]
[ "network_architecture.py" ]
[ "import torch\nimport torch.nn as nn\nfrom typing import List\n\n\nclass FCNN(nn.Module):\n \"\"\"Class that describe the architecture and behavior of the neural network\"\"\"\n neurons_per_layer: int = 0 # number of neurons per layer\n layers: List[nn.Module] # Ordered list of the network layers\n sequence: nn.Sequential # Sequential object that reduces code complexity\n\n def __init__(self, neurons_per_layer: int = 0):\n super(FCNN, self).__init__()\n self.neurons_per_layer = neurons_per_layer\n\n self.layers = [nn.Linear(in_features=self.neurons_per_layer, out_features=self.neurons_per_layer),\n nn.PReLU(num_parameters=self.neurons_per_layer),\n nn.Linear(in_features=self.neurons_per_layer, out_features=self.neurons_per_layer),\n nn.PReLU(num_parameters=self.neurons_per_layer),\n nn.Linear(in_features=self.neurons_per_layer, out_features=self.neurons_per_layer),\n nn.PReLU(num_parameters=self.neurons_per_layer),\n nn.Linear(in_features=self.neurons_per_layer, out_features=self.neurons_per_layer)]\n\n self.sequence = nn.Sequential(*self.layers)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\" Override nn.Module's forward function.\n Take the NN input and return the predicted tensor.\"\"\"\n return self.sequence(x)\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sequential", "torch.nn.PReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
UTokyo-ICEPP/multiml_htautau
[ "5f926c2291a55f57419aa0130d07e2a793fc7353", "5f926c2291a55f57419aa0130d07e2a793fc7353", "5f926c2291a55f57419aa0130d07e2a793fc7353" ]
[ "multiml_htautau/task/keras/higgsId_mass.py", "multiml_htautau/task/keras/tau4vec_zero.py", "examples/pytorch/run_utils.py" ]
[ "from . import HiggsID_BaseTask\n\n\nclass HiggsID_MassTask(HiggsID_BaseTask):\n ''' HiggsID MLP task\n '''\n def __init__(self,\n layers=None,\n activation=None,\n batch_norm=False,\n scale_mass=1.,\n **kwargs):\n \"\"\"\n\n Args:\n layers (list(int)): the number of nodes in hidden layers in MLP that used for mass transformation.\n activation (str): activation function for MLP.\n batch_norm (bool): use batch normalization\n scale_mass (float): scaling output of mass layer\n **kwargs: Arbitrary keyword arguments\n \"\"\"\n super().__init__(**kwargs)\n\n self._layers = layers\n self._activation = activation\n self._batch_norm = batch_norm\n self._scale_mass = scale_mass\n\n def mass_layer(self, tau_4vec):\n import tensorflow as tf\n from tensorflow.keras.layers import Concatenate\n from tensorflow.keras import backend as K\n tau_4vec = K.reshape(tau_4vec, (-1, self._njets, self._n_features))\n pt = K.exp(K.clip(tau_4vec[:, :, 0], -7., 7.)) - 0.1\n eta = tau_4vec[:, :, 1]\n phi = tau_4vec[:, :, 2]\n mass = 1.777\n\n px = pt * K.cos(phi)\n py = pt * K.sin(phi)\n pz = pt * tf.math.sinh(K.clip(eta, -5, 5))\n epsilon = 0.1 # avoid nan when e=0. sqrt(x)^' = -1/2 * 1/sqrt(x)\n e = K.sqrt(epsilon + px**2 + py**2 + pz**2 + mass**2)\n px = K.reshape(px, (-1, self._njets, 1))\n py = K.reshape(py, (-1, self._njets, 1))\n pz = K.reshape(pz, (-1, self._njets, 1))\n e = K.reshape(e, (-1, self._njets, 1))\n tau_4vec = Concatenate(axis=2)([px, py, pz, e])\n tau_4vec = K.sum(tau_4vec, axis=1)\n px = tau_4vec[:, 0]\n py = tau_4vec[:, 1]\n pz = tau_4vec[:, 2]\n e = tau_4vec[:, 3]\n masssq = e**2 - (px**2 + py**2 + pz**2)\n mass = K.sqrt(epsilon + masssq)\n mass = K.reshape(mass, [-1, 1])\n return mass\n\n def build_model(self):\n from tensorflow.keras.models import Model\n from tensorflow.keras.layers import Lambda\n from multiml.task.keras.modules import MLPBlock\n\n input = self.get_inputs()[0]\n x = input\n\n x = Lambda(self.mass_layer, output_shape=(1, ))(x)\n\n x *= self._scale_mass\n\n mlp = MLPBlock(layers=self._layers,\n activation=self._activation,\n activation_last=self._activation_last,\n batch_norm=self._batch_norm)\n x = mlp(x)\n\n self._model = Model(inputs=input, outputs=x)\n\n self.compile_model()\n\n def _get_custom_objects(self):\n return {\"mass_layer\": self.mass_layer}\n", "from . import Tau4vec_BaseTask\n\n\nclass Tau4vec_ZeroTask(Tau4vec_BaseTask):\n ''' Tau4vec Zero task\n '''\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self._trainable_model = False\n\n def build_model(self):\n from tensorflow.keras.models import Model\n from tensorflow.keras.layers import Lambda\n from tensorflow.keras import backend as K\n\n from .modules import zero_layer\n\n input_energy, input_jet = self.get_inputs()\n x = K.reshape(input_jet, (-1, self._n_features))[:, 0:3] # mass is not used\n x = K.reshape(x, (-1, self._njets * (self._n_features - 1)))\n\n x = Lambda(zero_layer)(x)\n x = K.reshape(x, (-1, len(self._output_var_names)))\n\n self._model = Model(inputs=[input_energy, input_jet], outputs=[x])\n\n self.compile_model()\n", "def preprocessing(save_dir,\n config,\n device='cpu',\n tau4vec_tasks=['MLP', 'conv2D', 'SF'],\n higgsId_tasks=['mlp', 'lstm', 'mass'],\n load_weights=False,\n truth_intermediate_inputs=True):\n\n from multiml import logger\n logger.set_level(config.log_level)\n\n from multiml.saver import Saver\n saver = Saver(save_dir, serial_id=config.seed)\n saver.add(\"seed\", config.seed)\n\n # Storegate\n from my_storegate import get_storegate\n \n storegate = get_storegate(\n data_path=config.dataset.params.data_path,\n max_events=config.dataset.params.max_events\n )\n\n # Task scheduler\n from multiml.task_scheduler import TaskScheduler\n\n from my_tasks import get_higgsId_subtasks, get_tau4vec_subtasks\n task_scheduler = TaskScheduler()\n\n if len(tau4vec_tasks) > 0 and len(higgsId_tasks) > 0:\n subtask1 = get_higgsId_subtasks(config.sub_task_params.higgsId,\n saver,\n device=device,\n subtask_names=higgsId_tasks,\n truth_input=truth_intermediate_inputs,\n load_weights=load_weights,\n use_logits = True)\n task_scheduler.add_task(task_id='higgsId',\n parents=['tau4vec'],\n children=[],\n subtasks=subtask1)\n\n subtask2 = get_tau4vec_subtasks(config.sub_task_params.tau4vec,\n saver,\n subtask_names=tau4vec_tasks,\n device=device,\n load_weights=load_weights)\n \n task_scheduler.add_task(task_id='tau4vec',\n parents=[],\n children=['higgsId'],\n subtasks=subtask2)\n\n elif len(higgsId_tasks) > 0:\n subtask = get_higgsId_subtasks(config.sub_task_params.higgsId,\n saver,\n device=device,\n subtask_names=higgsId_tasks,\n load_weights=load_weights,\n use_logits = True)\n task_scheduler.add_task(task_id='higgsId', subtasks=subtask)\n\n elif len(tau4vec_tasks) > 0:\n subtask = get_tau4vec_subtasks(config.sub_task_params.tau4vec,\n saver,\n subtask_names=tau4vec_tasks,\n device=device,\n load_weights=load_weights)\n task_scheduler.add_task(task_id='tau4vec', subtasks=subtask)\n\n else:\n raise ValueError(\"Strange task combination...\")\n\n # Metric\n if len(tau4vec_tasks) > 0 and len(higgsId_tasks) == 0:\n from multiml_htautau.task.metrics import CustomMSEMetric\n from my_tasks import corr_tau_4vec, truth_tau_4vec\n metric = CustomMSEMetric(\n pred_var_name=corr_tau_4vec,\n true_var_name=truth_tau_4vec,\n phase='test'\n )\n else:\n from multiml.agent.metric import AUCMetric\n metric = AUCMetric(pred_var_name='probability',\n true_var_name='label',\n phase='test')\n\n return saver, storegate, task_scheduler, metric\n\n\ndef get_multi_loss(X: float = None):\n if X is None:\n use_multi_loss = False\n loss_weights = None\n else:\n use_multi_loss = True\n loss_weights = {'higgsId': 1.0 - X, \"tau4vec\": X}\n\n return use_multi_loss, loss_weights\n\n\ndef set_seed(seed=1):\n import os\n import random\n\n import numpy as np\n import tensorflow as tf\n from tensorflow.random import set_seed\n import torch\n tf.random.set_seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n # optional\n # for numpy.random\n np.random.seed(seed)\n # for built-in random\n random.seed(seed)\n # for hash seed\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.backend.cos", "tensorflow.keras.backend.sin", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Lambda", "tensorflow.keras.backend.sqrt", "tensorflow.keras.backend.sum", "tensorflow.keras.backend.reshape", "tensorflow.keras.backend.clip" ], [ "tensorflow.keras.backend.reshape", "tensorflow.keras.layers.Lambda", "tensorflow.keras.models.Model" ], [ "torch.manual_seed", "numpy.random.seed", "tensorflow.random.set_seed", "torch.cuda.manual_seed_all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adithyabsk/foreshadow
[ "ca2e927c396ae0d61923b287d6e32e142f3ba96f", "ca2e927c396ae0d61923b287d6e32e142f3ba96f", "ca2e927c396ae0d61923b287d6e32e142f3ba96f", "ca2e927c396ae0d61923b287d6e32e142f3ba96f" ]
[ "foreshadow/tests/test_core/test_resolver.py", "foreshadow/steps/feature_summarizer.py", "foreshadow/tests/test_transformers/test_concrete/test_cleaners/test_data_cleaner.py", "foreshadow/intents/numeric.py" ]
[ "\"\"\"Test intent resolution steps.\"\"\"\n\n\ndef test_resolver_overall():\n \"\"\"Big picture intent resolution test.\"\"\"\n\n import numpy as np\n import pandas as pd\n from foreshadow.cachemanager import CacheManager\n from foreshadow.steps import IntentMapper\n\n columns = [\"financials\"]\n data = pd.DataFrame({\"financials\": np.arange(100)}, columns=columns)\n cs = CacheManager()\n ir = IntentMapper(cache_manager=cs)\n ir.fit(data)\n assert cs[\"intent\", \"financials\"] == \"Droppable\"\n", "# noqa\nimport json\n\nimport pandas as pd\n\nfrom foreshadow.utils import AcceptedKey, get_transformer, standard_col_summary\n\nfrom .preparerstep import PreparerStep\n\n\nclass FeatureSummarizerMapper(PreparerStep): # noqa\n def __init__(self, y_var=False, problem_type=None, **kwargs):\n \"\"\"Define the single step for FeatureSummarizer.\n\n Args:\n y_var: whether the summerizer will be applied to X or y\n problem_type: when y_var is True, indicate whether this\n is a regression or classification problem\n **kwargs: kwargs to PreparerStep initializer.\n\n \"\"\"\n super().__init__(**kwargs)\n self.y_var = y_var\n self.problem_type = problem_type\n\n def fit(self, X, *args, **kwargs):\n \"\"\"Fit this step.\n\n calls underlying parallel process.\n\n Args:\n X: input DataFrame\n *args: args to _fit\n **kwargs: kwargs to _fit\n\n Returns:\n transformed data handled by Pipeline._fit\n\n \"\"\"\n summary = self._summarize(X)\n\n if not self.y_var:\n json.dump(summary, open(\"X_train_summary.json\", \"w\"), indent=4)\n\n summary_frame = self._cache_data_summary(summary)\n self.cache_manager[AcceptedKey.SUMMARY] = summary_frame\n return self\n\n def transform(self, X, *args, **kwargs):\n \"\"\"Pass through transform.\n\n Args:\n X (:obj:`numpy.ndarray`): X data\n *args: positional args.\n **kwargs: key word args.\n\n Returns:\n :obj:`numpy.ndarray`: X\n\n \"\"\"\n return X\n\n def inverse_transform(self, X):\n \"\"\"Pass through transform.\n\n Args:\n X (:obj:`numpy.ndarray`): X data\n\n Returns:\n :obj:`numpy.ndarray`: X\n\n \"\"\"\n return X\n\n def _summarize(self, X_df):\n summary = {}\n if self.y_var:\n intent = \"Label\"\n data = standard_col_summary(X_df)\n summary[X_df.columns[0]] = {\"intent\": intent, \"data\": data}\n else:\n for k in X_df.columns.values.tolist():\n intent = self.cache_manager[AcceptedKey.INTENT, k]\n data = get_transformer(intent).column_summary(X_df[[k]])\n summary[k] = {\"intent\": intent, \"data\": data}\n return summary\n\n def _convert_top(self, tops):\n result = {}\n accumulated_frequency = 0\n for i, (value, count, frequency) in enumerate(tops):\n accumulated_frequency += frequency\n result[\"#%d_value\" % (i + 1)] = \"%s %3.2f%%\" % (\n value,\n accumulated_frequency * 100,\n )\n return result\n\n def _cache_data_summary(self, summary):\n records = {}\n for key, value in summary.items():\n rec = {\"intent\": value[\"intent\"]}\n rec.update(value[\"data\"])\n tops = rec.pop(\"top10\")\n rec.update(self._convert_top(tops))\n records[key] = rec\n result = pd.DataFrame(records)\n result.fillna(value={\"invalid_pct\": 0.0}, axis=0, inplace=True)\n result.fillna(\"\", inplace=True)\n result.sort_values(by=\"intent\", axis=1, inplace=True)\n return result\n", "\"\"\"Test data_cleaner.py\"\"\"\nimport pytest\n\n\ndef test_data_cleaner_transform_before_fit():\n import pandas as pd\n from foreshadow.steps import CleanerMapper\n from foreshadow.cachemanager import CacheManager\n\n data = pd.DataFrame(\n {\"financials\": [\"$1.00\", \"$550.01\", \"$1234\", \"$12353.3345\"]},\n columns=[\"financials\"],\n )\n cs = CacheManager()\n dc = CleanerMapper(cache_manager=cs)\n\n with pytest.raises(ValueError) as e:\n dc.transform(data)\n\n assert str(e.value) == \"Cleaner has not been fitted yet.\"\n\n\n# TODO: This is no longer valid as we have separated data cleaner into\n# flattener and cleaner\[email protected](\"TODO: need to fix the flattener and cleaner issue.\")\ndef test_data_cleaner_fit():\n \"\"\"Test basic fit call.\"\"\"\n import pandas as pd\n import numpy as np\n from foreshadow.steps import CleanerMapper\n from foreshadow.cachemanager import CacheManager\n\n data = pd.DataFrame(\n {\n \"dates\": [\"2019-02-11\", \"2019/03/12\", \"2000-04-15\", \"1900/01/55\"],\n \"json\": [\n '{\"date\": \"2019-04-11\"}',\n '{\"financial\": \"$1.0\"}',\n '{\"financial\": \"$1000.00\"}',\n '{\"random\": \"asdf\"}',\n ],\n \"financials\": [\"$1.00\", \"$550.01\", \"$1234\", \"$12353.3345\"],\n },\n columns=[\"dates\", \"json\", \"financials\"],\n )\n cs = CacheManager()\n dc = CleanerMapper(cache_manager=cs)\n dc.fit(data)\n data = dc.transform(data)\n check = pd.DataFrame(\n [\n [\"2019\", \"02\", \"11\", \"2019\", \"04\", \"11\", np.nan, np.nan, \"1.00\"],\n [\"2019\", \"03\", \"12\", np.nan, \"\", \"\", \"1.0\", np.nan, \"550.01\"],\n [\"2000\", \"04\", \"15\", np.nan, \"\", \"\", \"1000.00\", np.nan, \"1234\"],\n [\"1900\", \"01\", \"55\", np.nan, \"\", \"\", np.nan, \"asdf\", \"12353.3345\"],\n ],\n columns=[\n \"dates0\",\n \"dates1\",\n \"dates2\",\n \"json_date0\",\n \"json_date1\",\n \"json_date2\",\n \"json_financial\",\n \"json_random\",\n \"financials\",\n ],\n )\n print(data.values)\n print(check.values)\n assert np.all(\n np.equal(data.values[data.notna()], check.values[check.notna()])\n )\n\n\ndef test_financials():\n \"\"\"Test financial column cleaned correctly.\"\"\"\n import pandas as pd\n from foreshadow.preparer import CleanerMapper\n from foreshadow.cachemanager import CacheManager\n import numpy as np\n\n data = pd.DataFrame(\n {\"financials\": [\"$1.00\", \"$550.01\", \"$1234\", \"$12353.3345\"]},\n columns=[\"financials\"],\n )\n cs = CacheManager()\n dc = CleanerMapper(cache_manager=cs)\n dc.fit(data)\n transformed_data = dc.transform(data)\n check = pd.DataFrame(\n {\"financials\": [\"1.00\", \"550.01\", \"1234\", \"12353.3345\"]},\n columns=[\"financials\"],\n )\n assert np.all(\n np.equal(\n transformed_data.values[data.notna()], check.values[check.notna()]\n )\n )\n\n\ndef test_json():\n \"\"\"Test json input cleaned correctly.\"\"\"\n import pandas as pd\n from foreshadow.preparer import CleanerMapper\n from foreshadow.cachemanager import CacheManager\n import numpy as np\n\n data = pd.DataFrame(\n [\n [\"2019-04-11\", np.nan, np.nan],\n [np.nan, \"$1.0\", np.nan],\n [np.nan, \"$1000.00\", np.nan],\n [np.nan, np.nan, \"asdf\"],\n ],\n columns=[\"json_date\", \"json_financial\", \"json_random\"],\n )\n cs = CacheManager()\n dc = CleanerMapper(cache_manager=cs)\n dc.fit(data)\n data = dc.transform(data)\n check = pd.DataFrame(\n [\n [\"2019\", \"04\", \"11\", np.nan, np.nan],\n [np.nan, \"\", \"\", \"1.0\", np.nan],\n [np.nan, \"\", \"\", \"1000.00\", np.nan],\n [np.nan, \"\", \"\", np.nan, \"asdf\"],\n ],\n columns=[\n \"json_date0\",\n \"json_date1\",\n \"json_date2\",\n \"json_financial\",\n \"json_random\",\n ],\n )\n assert np.all(\n np.equal(data.values[data.notna()], check.values[check.notna()])\n )\n\n\ndef test_drop_entire_data_frame():\n \"\"\"Test drop called when expected to.\"\"\"\n import pandas as pd\n from foreshadow.preparer import CleanerMapper\n from foreshadow.cachemanager import CacheManager\n\n columns = [\"financials\"]\n data = pd.DataFrame({\"financials\": [\"\", \"\", \"\", \"\"]}, columns=columns)\n cs = CacheManager()\n dc = CleanerMapper(cache_manager=cs)\n import pytest\n\n with pytest.raises(ValueError) as excinfo:\n dc.fit_transform(data)\n error_msg = (\n \"All columns are dropped since they all have over 90% of \"\n \"missing values. Aborting foreshadow.\"\n )\n assert error_msg in str(excinfo.value)\n\n\ndef test_drop_empty_columns():\n \"\"\"Test drop empty columns called when expected to.\"\"\"\n import pandas as pd\n from foreshadow.preparer import CleanerMapper\n from foreshadow.cachemanager import CacheManager\n\n columns = [\"financials\", \"nums\"]\n data = pd.DataFrame(\n {\"financials\": [\"\", \"\", \"\", \"\"], \"nums\": [1, 2, 3, 4]}, columns=columns\n )\n cs = CacheManager()\n dc = CleanerMapper(cache_manager=cs)\n\n transformed_data = dc.fit_transform(data)\n assert len(transformed_data.columns) == 1\n assert list(transformed_data.columns)[0] == \"nums\"\n\n\ndef test_numerical_input():\n \"\"\"Test numerical input.\"\"\"\n import numpy as np\n import pandas as pd\n from foreshadow.preparer import CleanerMapper\n from foreshadow.cachemanager import CacheManager\n\n columns = [\"financials\"]\n data = pd.DataFrame({\"financials\": np.arange(10)}, columns=columns)\n cs = CacheManager()\n dc = CleanerMapper(cache_manager=cs)\n dc.fit(data)\n transformed_data = dc.transform(data)\n assert np.array_equal(transformed_data, data)\n\n\ndef test_numerical_input_fittransform():\n \"\"\"Test numerical input.\"\"\"\n import numpy as np\n import pandas as pd\n from foreshadow.preparer import CleanerMapper\n from foreshadow.cachemanager import CacheManager\n\n columns = [\"financials\"]\n data = pd.DataFrame({\"financials\": np.arange(10)}, columns=columns)\n cs = CacheManager()\n dc = CleanerMapper(cache_manager=cs)\n transformed_data = dc.fit_transform(data)\n assert np.array_equal(transformed_data, data)\n\n\n# TODO test graph, could be implemented very wrong.\n", "\"\"\"Numeric intent.\"\"\"\n\nimport pandas as pd\n\nfrom foreshadow.metrics import (\n MetricWrapper,\n is_numeric,\n is_string,\n num_valid,\n unique_heur,\n)\nfrom foreshadow.utils import get_outliers, standard_col_summary\n\nfrom .base import BaseIntent\n\n\nclass Numeric(BaseIntent):\n \"\"\"Defines a numeric column type.\"\"\"\n\n confidence_computation = {\n MetricWrapper(num_valid): 0.3,\n MetricWrapper(unique_heur, invert=True): 0.2,\n MetricWrapper(is_numeric): 0.4,\n MetricWrapper(is_string, invert=True): 0.1,\n }\n\n def fit(self, X, y=None, **fit_params):\n \"\"\"Empty fit.\n\n Args:\n X: The input data\n y: The response variable\n **fit_params: Additional parameters for the fit\n\n Returns:\n self\n\n \"\"\"\n return self\n\n def transform(self, X, y=None):\n \"\"\"Convert a column to a numeric form.\n\n Args:\n X: The input data\n y: The response variable\n\n Returns:\n A column with all rows converted to numbers.\n\n \"\"\"\n return X.apply(pd.to_numeric, errors=\"coerce\")\n\n @classmethod\n def column_summary(cls, df): # noqa\n result = standard_col_summary(df)\n\n data_transformed = pd.to_numeric(df.iloc[:, 0], errors=\"coerce\")\n invalid_pct = (\n data_transformed.isnull().sum() * 100.0 / result[\"count\"]\n - result[\"nan_percent\"]\n )\n outliers = get_outliers(data_transformed, count=5).values.tolist()\n\n result.update(\n [\n (\"invalid_percent\", invalid_pct),\n (\"mean\", float(data_transformed.mean())),\n (\"std\", float(data_transformed.std())),\n (\"min\", float(data_transformed.min())),\n (\"25%\", float(data_transformed.quantile(0.25))),\n (\"50%\", float(data_transformed.quantile(0.5))),\n (\"75%\", float(data_transformed.quantile(0.75))),\n (\"max\", float(data_transformed.max())),\n (\"5_outliers\", outliers),\n ]\n )\n return result\n" ]
[ [ "numpy.arange" ], [ "pandas.DataFrame" ], [ "numpy.arange", "numpy.array_equal", "pandas.DataFrame" ], [ "pandas.to_numeric" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ZhiliangWu/etips
[ "e5bee81c498287005658f012912c27b491ef3892" ]
[ "tuning.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# etips\n#\n# Copyright (c) Siemens AG, 2020\n# Authors:\n# Zhiliang Wu <[email protected]>\n# License-Identifier: MIT\n\nimport gc\nfrom functools import partial\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom hyperopt import fmin, hp, rand, Trials, STATUS_OK, STATUS_FAIL\nfrom hyperopt.pyll import scope\n\nfrom bandit import LoggingPolicy\nfrom lstm import build_callbacks, build_lstm_classifier, \\\n build_direct_method_classifier, build_bandit_lstm_classifier\nfrom utils import fix_random_seed, InvokeTimes, load_counting_data, load_mnist_data\n\n\[email protected]\ndef roundup(a, decimals=1):\n return np.around(a, decimals)\n\n\ndef hyperopt_supervised(space, data, counter, fp):\n \"\"\"hyperopt for the supervised learning\n \"\"\"\n x_tr, y_tr, x_val, y_val, x_te, y_te = data\n counter.add_one()\n print(f'============= TRIAL NUMBER {counter.number} =============')\n print(space)\n\n batch_size = space.pop('batch_size', 32)\n\n model = build_lstm_classifier(timesteps=x_tr.shape[1], feature_size=x_tr.shape[2],\n output_shape=y_tr.shape[1], **space)\n\n cbs = build_callbacks(monitor='val_loss', save=False)\n try:\n print('fitting the model')\n history = model.fit(x=x_tr, y=y_tr, epochs=100, verbose=0, batch_size=batch_size,\n validation_data=(x_val, y_val), callbacks=cbs)\n model.save(fp / f'model_trial_{counter.number}.h5')\n print('model saved')\n except Exception as e:\n print(f'Exception: {e}')\n return {'status': STATUS_FAIL, 'exception': e, **space}\n else:\n loss = - history.history['val_acc'][-1]\n _, test_acc = model.evaluate(x_te, y_te, verbose=0)\n epoch_count = len(history.epoch)\n\n print(f'val_acc is {-loss}')\n print(f'test_acc: {test_acc}')\n print(f'number of epochs is {epoch_count}')\n\n # useful for runnining many experiments with tf\n tf.keras.backend.clear_session()\n del model\n gc.collect()\n\n return {'loss': loss,\n 'test_acc': test_acc,\n 'status': STATUS_OK,\n 'epoch': epoch_count,\n 'batch_size': batch_size,\n 'trial_index': counter.number,\n **space\n }\n\n\ndef hyperopt_logging_policy(space, data, counter, fp):\n \"\"\"hyperopt for the training on 5% data, use one of it as logging policy\n \"\"\"\n x_tr, y_tr, x_te, y_te = data\n counter.add_one()\n print(f'============= TRIAL NUMBER {counter.number} =============')\n print(space)\n\n lp = LoggingPolicy(model_path=None, x_train=x_tr, y_train=y_tr, x_test=x_te, y_test=y_te, rate=0.05)\n\n try:\n history = lp.train_the_policy(**space)\n lp.model.save(fp / f'model_trial_{counter.number}.h5')\n except Exception as e:\n print(f'Exception: {e}')\n return {'status': STATUS_FAIL, 'exception': e, **space}\n else:\n loss = - history.history['val_acc'][-1]\n epoch_count = len(history.epoch)\n test_acc = lp.compute_performance()\n print(f'val_acc: {-loss}, test_acc: {test_acc}')\n print(f'number of epochs is {epoch_count}')\n\n tf.keras.backend.clear_session()\n gc.collect()\n del lp.model\n\n return {'loss': loss,\n 'test_acc': test_acc,\n 'status': STATUS_OK,\n 'epoch': epoch_count,\n 'trial_index': counter.number,\n **space}\n\n\ndef hyperopt_score_estimation(space, data, counter, fp):\n \"\"\"hyperopt for estimating the propensity score\n \"\"\"\n x_tr, a_tr, x_val, a_val, x_te, y_te = data\n counter.add_one()\n print(f'============= TRIAL NUMBER {counter.number} =============')\n print(space)\n\n batch_size = space.pop('batch_size', 32)\n\n model = build_lstm_classifier(timesteps=x_tr.shape[1], feature_size=x_tr.shape[2],\n output_shape=a_tr.shape[1], **space)\n\n cbs = build_callbacks(monitor='val_loss', save=False)\n try:\n print('fitting the model')\n history = model.fit(x=x_tr, y=a_tr, epochs=75, verbose=0, batch_size=batch_size,\n validation_data=(x_val, a_val), callbacks=cbs)\n model.save(fp / f'model_trial_{counter.number}.h5')\n print('model saved')\n except Exception as e:\n print(f'Exception: {e}')\n return {'status': STATUS_FAIL, 'exception': e, **space}\n else:\n loss = - history.history['val_acc'][-1]\n _, test_acc = model.evaluate(x_te, y_te, verbose=0)\n epoch_count = len(history.epoch)\n\n print(f'val_acc is {-loss}')\n print(f'accuracy w.r.t ground-truth: {test_acc}')\n print(f'number of epochs is {epoch_count}')\n\n tf.keras.backend.clear_session()\n del model\n gc.collect()\n\n return {'loss': loss,\n 'test_acc': test_acc,\n 'status': STATUS_OK,\n 'epoch': epoch_count,\n 'batch_size': batch_size,\n 'trial_index': counter.number,\n **space\n }\n\n\ndef hyperopt_direct_method(space, data, counter, fp):\n \"\"\"hyperopt for direct method\n \"\"\"\n x_tr, a_tr, d_tr, x_val, a_val, d_val, x_te, y_te = data\n counter.add_one()\n print(f'============= TRIAL NUMBER {counter.number} =============')\n print(space)\n\n batch_size = space.pop('batch_size', 32)\n\n model = build_direct_method_classifier(timesteps=x_tr.shape[1], feature_size=x_tr.shape[2],\n action_size=a_tr.shape[1], **space)\n\n cbs = build_callbacks(monitor='val_loss', save=False)\n try:\n print('fitting the model')\n history = model.fit(x=[x_tr, a_tr], y=d_tr, epochs=60, verbose=0, batch_size=batch_size,\n validation_data=([x_val, a_val], d_val), callbacks=cbs)\n model.save(fp / f'model_trial_{counter.number}.h5')\n print('model saved')\n except Exception as e:\n print(f'Exception: {e}')\n return {'status': STATUS_FAIL, 'exception': e, **space}\n else:\n loss = - history.history['val_acc'][-1]\n\n # compute the test accuracy with the model\n p_list = []\n for i in range(a_tr.shape[1]):\n a_i = np.zeros(y_te.shape)\n a_i[:, i] = 1\n p = model.predict([x_te, a_i])\n # p is a 1D-array, length is the same as x_te.shape[0]\n # each value is the probability of being one\n p_list.append(p)\n\n p_pred = np.concatenate(p_list, axis=1)\n y_pred = np.argmax(p_pred, axis=1)\n y_true = np.argmax(y_te, axis=1)\n test_acc = accuracy_score(y_true, y_pred)\n\n epoch_count = len(history.epoch)\n\n print(f'val_acc is {-loss}')\n print(f'test_acc: {test_acc}')\n print(f'number of epochs is {epoch_count}')\n\n tf.keras.backend.clear_session()\n del model\n gc.collect()\n\n return {'loss': loss,\n 'test_acc': test_acc,\n 'status': STATUS_OK,\n 'epoch': epoch_count,\n 'batch_size': batch_size,\n 'trial_index': counter.number,\n **space\n }\n\n\ndef hyperopt_ips(space, data, counter, fp, translation=False):\n \"\"\"hyperopt for training with counterfactual risk minimization w/ or w/o\n translation\n \"\"\"\n x_tr, a_tr, p_tr, d_tr, x_val, a_val, p_val, d_val, x_te, y_te = data\n counter.add_one()\n print(f'============= TRIAL NUMBER {counter.number} =============')\n print(space)\n\n batch_size = space.pop('batch_size', 32)\n\n model, m_test = build_bandit_lstm_classifier(timesteps=x_tr.shape[1], feature_size=x_tr.shape[2],\n output_shape=a_tr.shape[1], inp_drop=0.0, re_drop=0.0, **space)\n\n cbs = build_callbacks(monitor='val_loss', save=False)\n try:\n print('fitting the model')\n history = model.fit(x=[x_tr, a_tr, p_tr, d_tr], y=None, epochs=60, verbose=0, batch_size=batch_size,\n validation_data=([x_val, a_val, p_val, d_val], None), callbacks=cbs)\n model.save(fp / f'model_trial_{counter.number}.h5')\n m_test.save(fp / f'test_model_trial_{counter.number}.h5')\n print('model saved')\n except Exception as e:\n print(f'Exception: {e}')\n return {'status': STATUS_FAIL, 'exception': e, **space}\n else:\n val_loss = - history.history['val_loss'][-1]\n epoch_count = len(history.epoch)\n\n # quickly compute some important values\n predictions = m_test.predict(x_tr)\n pro_a = predictions[a_tr.nonzero()]\n imp_ratio = np.divide(pro_a, p_tr)\n average_imp_ratio = np.mean(imp_ratio) #\n\n risks = np.multiply(d_tr, imp_ratio)\n ips_loss = np.mean(risks)\n # with translation, ips_loss will be very different from the loss during\n # training/validation\n if translation:\n sn_loss = ips_loss / average_imp_ratio #\n loss = sn_loss\n # without translation, ips_loss will be similar to the training/validation loss\n else:\n loss = ips_loss\n\n _, test_acc = m_test.evaluate(x_te, y_te, verbose=0)\n\n print(f'test_acc: {test_acc}')\n print(f'number of epochs is {epoch_count}')\n\n tf.keras.backend.clear_session()\n del model, m_test\n gc.collect()\n\n return {'loss': loss,\n 'val_loss': val_loss,\n 'test_acc': test_acc,\n 'average_imp_ratio': average_imp_ratio,\n 'status': STATUS_OK,\n 'epoch': epoch_count,\n 'batch_size': batch_size,\n 'trial_index': counter.number,\n **space\n }\n\n\ndef train_logging_policy(mpath, source_number=1):\n if source_number == 1:\n x, y = load_counting_data(fp=Path('./data'), fn='Dataset_10k.pickle')\n elif source_number == 2:\n x, y = load_mnist_data()\n else:\n raise ValueError('Source data is not found')\n\n X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=0)\n data = X_train, y_train, X_test, y_test\n it = InvokeTimes()\n\n temp_fp = mpath / 'logging'\n temp_fp.mkdir(parents=True, exist_ok=True)\n\n func = partial(hyperopt_logging_policy, data=data, counter=it, fp=temp_fp)\n config = {'repr_size': 16 * scope.int(hp.quniform('repr_size', 1, 8, 1)),\n 'activation': hp.choice('activation',\n ['sigmoid', 'relu', 'tanh']),\n # 'inp_drop': scope.roundup(hp.uniform('inp_drop', 0.1, 0.9)),\n # 're_drop': scope.roundup(hp.uniform('re_drop', 0.1, 0.9)),\n 'l2_coef': np.power(10, scope.int(\n hp.quniform('l2_coef', -10, -1, 1))),\n 'lr': np.power(10, scope.int(hp.quniform('lr', -10, -1, 1))),\n }\n trials = Trials()\n fmin(fn=func, space=config, algo=rand.suggest, max_evals=20, trials=trials,\n rstate=np.random.RandomState(0), return_argmin=False,\n show_progressbar=True)\n df = pd.DataFrame(trials.results)\n df.to_csv(mpath / 'trials.csv')\n\n\nif __name__ == '__main__':\n fix_random_seed(0)\n path = Path('./models')\n train_logging_policy(path)\n" ]
[ [ "numpy.multiply", "numpy.around", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "numpy.concatenate", "numpy.argmax", "tensorflow.keras.backend.clear_session", "numpy.mean", "numpy.random.RandomState", "numpy.zeros", "numpy.divide", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
SamGalanakis/FlowCompare
[ "ed26e48298fe42cf9ddcc252c19b502b4a71d54e" ]
[ "models/scene_seg_PAConv/model/pointnet2/pointnet2_paconv_seg.py" ]
[ "from collections import namedtuple\n\nimport torch\nimport torch.nn as nn\n\nfrom .pointnet2_paconv_modules import PointNet2FPModule\nfrom models.scene_seg_PAConv.util import block\nfrom models import MLP\n\n\n# Code adapted from : https://github.com/CVMI-Lab/PAConv\n\nclass PointNet2SSGSeg(nn.Module):\n r\"\"\"\n PointNet2 with single-scale grouping\n Semantic segmentation network that uses feature propogation layers\n Parameters\n ----------\n k: int\n Number of semantics classes to predict over -- size of softmax classifier that run for each point\n c: int = 6\n Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this\n value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors\n use_xyz: bool = True\n Whether or not to use the xyz position of a point as a feature\n \"\"\"\n\n def __init__(self, c=3, k=13, use_xyz=True, out_mlp_dims=[512,512,512], args={}):\n super().__init__()\n self.out_mlp_dims = out_mlp_dims\n self.nsamples = args.get('nsamples', [32, 32, 32, 32])\n self.npoints = args.get('npoints', [None, None, None, None])\n self.sa_mlps = args.get('sa_mlps', [[c, 32, 32, 64], [64, 64, 64, 128], [128, 128, 128, 256], [256, 256, 256, 512]])\n self.fp_mlps = args.get('fp_mlps', [[128 + c, 128, 128, 128], [256 + 64, 256, 128], [256 + 128, 256, 256], [512 + 256, 256, 256]])\n self.paconv = args.get('pointnet2_paconv', [True, True, True, True, False, False, False, False])\n \n\n if args.get('cuda', False):\n from .pointnet2_paconv_modules import PointNet2SAModuleCUDA as PointNet2SAModule\n else:\n from .pointnet2_paconv_modules import PointNet2SAModule\n\n self.SA_modules = nn.ModuleList()\n self.SA_modules.append(PointNet2SAModule(npoint=self.npoints[0], nsample=self.nsamples[0], mlp=self.sa_mlps[0], use_xyz=use_xyz,\n use_paconv=self.paconv[0], args=args))\n self.SA_modules.append(PointNet2SAModule(npoint=self.npoints[1], nsample=self.nsamples[1], mlp=self.sa_mlps[1], use_xyz=use_xyz,\n use_paconv=self.paconv[1], args=args))\n self.SA_modules.append(PointNet2SAModule(npoint=self.npoints[2], nsample=self.nsamples[2], mlp=self.sa_mlps[2], use_xyz=use_xyz,\n use_paconv=self.paconv[2], args=args))\n self.SA_modules.append(PointNet2SAModule(npoint=self.npoints[3], nsample=self.nsamples[3], mlp=self.sa_mlps[3], use_xyz=use_xyz,\n use_paconv=self.paconv[3], args=args))\n self.FP_modules = nn.ModuleList()\n self.FP_modules.append(PointNet2FPModule(mlp=self.fp_mlps[0], use_paconv=self.paconv[4], args=args))\n self.FP_modules.append(PointNet2FPModule(mlp=self.fp_mlps[1], use_paconv=self.paconv[5], args=args))\n self.FP_modules.append(PointNet2FPModule(mlp=self.fp_mlps[2], use_paconv=self.paconv[6], args=args))\n self.FP_modules.append(PointNet2FPModule(mlp=self.fp_mlps[3], use_paconv=self.paconv[7], args=args))\n self.out_mlp = MLP(128, out_mlp_dims, k, torch.nn.GELU())\n def _break_up_pc(self, pc):\n xyz = pc[..., 0:3].contiguous()\n features = (pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None)\n return xyz, features\n\n def forward(self, pointcloud: torch.cuda.FloatTensor):\n r\"\"\"\n Forward pass of the network\n Parameters\n ----------\n pointcloud: Variable(torch.cuda.FloatTensor)\n (B, N, 3 + input_channels) tensor\n Point cloud to run predicts on\n Each point in the point-cloud MUST\n be formated as (x, y, z, features...)\n \"\"\"\n xyz, features = self._break_up_pc(pointcloud)\n l_xyz, l_features = [xyz], [features]\n for i in range(len(self.SA_modules)):\n li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])\n l_xyz.append(li_xyz)\n l_features.append(li_features)\n for i in range(-1, -(len(self.FP_modules) + 1), -1):\n l_features[i - 1] = self.FP_modules[i](l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i])\n return self.out_mlp(l_features[0].permute((0,2,1)))\n\n " ]
[ [ "torch.nn.ModuleList", "torch.nn.GELU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Stelele/papi_iot
[ "d416276417244b05c42184e2daf619b14f0c5162" ]
[ "papi_iot/papi_storage_offline.py" ]
[ "import os\nimport glob\nfrom shutil import copy\nfrom os import listdir\nfrom os import makedirs\nfrom os import path\nfrom matplotlib import image\nfrom papi_iot.papi_exceptions import DirectoryCreationFail\n\nclass OfflineStorage (object):\n rootDir = 'home/pi'\n knownFaces = '/knownFaces'\n unknownFaces = '/unknownFaces'\n\n def __init__(self):\n \"\"\"\n Initial state of the object by assigning the values of the object’s properties.\n Create knownFaces and unknownFaces folders.\n \"\"\"\n self.setOfflinePhotoStorageLocation()\n self.setOfflineVideoStorageLocation()\n \n def setOfflinePhotoStorageLocation(self):\n \"\"\"\n Create the locations/folders for the known faces images and unknown faces images. \n Paths created are /home/pi/photos/knownFaces and /home/pi/photos/unknownFaces \n \"\"\"\n\n pathKnownFaces = self.rootDir + '/photos' + self.knownFaces\n pathUknownFaces = self.rootDir + '/photos' + self.unknownFaces\n \n if (path.isdir(pathKnownFaces) == False):\n try: \n makedirs(pathKnownFaces,exist_ok = True)\n except OSError: \n raise DirectoryCreationFail(pathKnownFaces)\n\n if (path.isdir(pathUknownFaces) == False):\n try: \n makedirs(pathUknownFaces,exist_ok = True)\n print(\"Directory '%s' created successfully\" %pathUknownFaces) \n except OSError: \n raise DirectoryCreationFail(pathUknownFaces)\n\n def getOfflinePhotoStorageLocation(self, category):\n \"\"\"\n Obtain the path to known and unknown faces based on given category.\n\n Parameters\n ----------\n category : str\n Path choice. Can either be knownFaces or unknownFaces\n\n Returns\n -------\n Path : str\n Can either be path to knownFaces or unknownFaces folder\n \"\"\"\n if category == 'knownFaces':\n return './' + self.rootDir + '/photos' + self.knownFaces\n else: \n return './' + self.rootDir + '/photos' + self.unknownFaces\n\n def setOfflineVideoStorageLocation(self):\n \"\"\"\n Create the locations/folder for videos. Path to video /home/pi/videos\n \"\"\"\n pathVideos = self.rootDir + '/videos'\n if (path.isdir(pathVideos) == False):\n try: \n makedirs(pathVideos, exist_ok = True)\n except OSError as error: \n raise DirectoryCreationFail(pathVideos)\n \n def getOfflineVideoStorageLocation(self):\n return self.rootDir + '/videos'\n\n def storeOfflinePhotos(self, filename, destination):\n \"\"\"\n Store photos from pi camera into the given folder\n\n args:\n filename : str\n filename for image\n destination : str\n location to store image\n \"\"\"\n copy(filename, destination)\n\n def storeOfflineVideos(self, filename):\n \"\"\"\n Store video from pi camera into the given video folder\n\n args:\n filename : str\n filename for video\n \"\"\"\n copy(filename, self.rootDir + '/videos')\n\n def getOfflinePhoto(self, destination):\n \"\"\"\n Obtain photo based on destination given.\n\n args: \n destination : str\n filename for image\n \n return:\n image as pixel array\n \"\"\"\n return image.imread(destination)\n\n def getOfflinePhotos(self):\n \"\"\"\n Obtain all photos from both knownFaces and unknownFace folders\n\n return:\n knownFacesImageList : list\n known faces image pixel array list\n unknownFacesImageList : list\n unknown faces image pixel array list\n \"\"\"\n knownFacesImageList = list()\n unknownFacesImageList = list()\n for filename in listdir('./' + self.rootDir + '/photos' + self.knownFaces):\n imgData = image.imread('./' + self.rootDir + '/photos' + self.knownFaces + '/' + filename)\n knownFacesImageList.append(imgData)\n\n for filename in listdir('./' + self.rootDir + '/photos' + self.unknownFaces):\n imgData = image.imread('./' + self.rootDir + '/photos' + self.unknownFaces + '/' + filename)\n unknownFacesImageList.append(imgData)\n\n return knownFacesImageList, unknownFacesImageList\n\n def getOfflinesVideo(self):\n \"\"\"\n Obtain list of vides in video folder\n\n Returns\n -------\n videoList : list\n list of videos \n \"\"\"\n videoList = list()\n for filename in listdir('./' + self.rootDir + '/videos'):\n videoData = image.imread('./' + self.rootDir + '/videos' + '/' + filename)\n videoList.append(videoData)\n\n return videoList\n\n def storeNewKnownUser(self, filename):\n \"\"\"\n Store the new known person in the knownFaces folder. \n \"\"\"\n newFileName = filename.split('/')[-1]\n self.storeOfflinePhotos(filename,self.getOfflinePhotoStorageLocation('knownFaces') + '/' + newFileName)\n\n def storeNewKnownUsers(self, sourceFolder, picType='.jpg'):\n \"\"\"\n\n Store known photos in known Faces folder\n\n \"\"\"\n\n picLocationList = glob.glob(sourceFolder + '/' + picType)\n\n for picLocation in picLocationList:\n self.storeNewKnownUser(picLocation)\n\n\n def storeUnknownPhotos(self, sourceFolder, picType='.jpg'):\n \"\"\"\n\n store unknown photos in unknown folder\n\n \"\"\"\n\n picLocationList = glob.glob(sourceFolder + '/*' + picType)\n\n for picLocation in picLocationList:\n name = picLocation.split('/')[-1]\n newLocation = self.getOfflinePhotoStorageLocation('unknown') + '/' + name\n self.storeOfflinePhotos(picLocation, newLocation)\n\n def removeKnownUser(self, userName):\n \"\"\"\n Remove the new known person in the knownFaces folder.\n\n Parameters\n ----------\n userName : str\n Name of the person to be removed\n\n Returns\n -------\n filename : bool\n removed file True or False\n \"\"\"\n fileName = self.getOfflinePhotoStorageLocation('knownFaces') + '/' + userName + '.jpg'\n return self.removeFile(fileName)\n\n def removeFile(self, fileName):\n \"\"\"\n Remove the file from given file name\n\n Parameters\n ----------\n filename : str\n remove file named filename \n\n Returns\n -------\n removed : bool\n removed file True or False\n \"\"\"\n removed = False\n\n if os.path.exists(fileName):\n os.remove(fileName)\n removed = True\n\n return removed\n \n\n" ]
[ [ "matplotlib.image.imread" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rominf/videoflow
[ "704ce6069a32332256264787d920bc296f2ca57c" ]
[ "videoflow/processors/vision/trackers.py" ]
[ "from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport numpy as np\nfrom filterpy.kalman import KalmanFilter\nfrom sklearn.utils.linear_assignment_ import linear_assignment\nimport math\n\nfrom ...core.node import OneTaskProcessorNode\n\nclass BoundingBoxTracker(OneTaskProcessorNode):\n '''\n Tracks bounding boxes from one frame to another.\n It keeps an internal state representation that allows\n it to track across frames.\n '''\n def _track(self, dets : np.array) -> np.array:\n '''\n - Arguments: \n - dets: np.array of shape (nb_boxes, 6) \\\n Specifically (nb_boxes, [ymin, xmin, ymax, xmax, class_index, score])\n '''\n raise NotImplementedError(\"Subclass must implement _track method\")\n \n def process(self, dets : np.array) -> np.array:\n '''\n - Arguments: \n - dets: np.array of shape (nb_boxes, 6) \\\n Specifically (nb_boxes, [ymin, xmin, ymax, xmax, class_index, score])\n - Returns:\n - tracks: np.array of shape (nb_boxes, 5) \\\n Specifically (nb_boxes, [ymin, xmin, ymax, xmax, track_id])\n '''\n return self._track(dets)\n\ndef eucl(bb_test, bb_gt):\n '''\n Computes the euclidean distance between two boxes\n in the form [x1, y1, x2, y2]\n '''\n center_1 = [(bb_test[0] + bb_test[2]) / 2.0, (bb_test[1] + bb_test[3]) / 2.0]\n center_2 = [(bb_gt[0] + bb_gt[2]) / 2.0, (bb_gt[1] + bb_gt[3]) / 2.0]\n eucl = math.sqrt((center_1[0] - center_2[0])*(center_1[0] - center_2[0]) + (center_1[1] - center_2[1])*(center_1[1] - center_2[1]))\n return -eucl\n\ndef iou(bb_test, bb_gt):\n \"\"\"\n Computes IUO between two bboxes in the form [y1, x1, y2, x2]\n IOU is the intersection of areas.\n \"\"\"\n yy1 = np.maximum(bb_test[0], bb_gt[0])\n xx1 = np.maximum(bb_test[1], bb_gt[1])\n yy2 = np.minimum(bb_test[2], bb_gt[2])\n xx2 = np.minimum(bb_test[3], bb_gt[3])\n w = np.maximum(0., xx2 - xx1)\n h = np.maximum(0., yy2 - yy1)\n wh = w * h\n o = wh / ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1])\n + (bb_gt[2] - bb_gt[0]) * (bb_gt[3] - bb_gt[1]) - wh)\n return(o)\n\ndef metric_factory(metric_type):\n if metric_type == \"iou\":\n return iou\n elif metric_type == \"euclidean\":\n return eucl\n else:\n raise ValueError(\"Cannot identify metric_type {}\".format(metric_type))\n\ndef convert_bbox_to_z(bbox):\n \"\"\"\n Takes a bounding box in the form [x1, y1, x2, y2] and returns z in the form\n [x, y, s, r] where x, y is the centre of the box and s is the scale/area and r is\n the aspect ratio\n \"\"\"\n h = bbox[2] - bbox[0]\n w = bbox[3] - bbox[1]\n x = bbox[1] + w/2.\n y = bbox[0] + h/2.\n s = w * h #scale is just area\n r = w / float(h)\n return np.array([x, y, s, r]).reshape((4, 1))\n\ndef convert_x_to_bbox(x, score=None):\n '''\n Takes a bounding box in the form [x, y, s, r] and returns it in the form\n [y1, x1, y2, x2] where x1, y1 is the top left and x2, y2 is the bottom right\n '''\n w = np.sqrt(x[2] * x[3])\n h = x[2] / w\n if score == None:\n return np.array([x[1] - h/2., x[0] - w/2., x[1] + h/2., x[0] + w/2.]).reshape((1, 4))\n else:\n return np.array([x[1] - h/2., x[0] - w/2., x[1] + h/2., x[0] + w/2., score]).reshape((1, 5))\n\ndef associate_detections_to_trackers(detections, trackers, metric_function, iou_threshold = 0.1):\n \"\"\"\n Assigns detections to tracked object (both represented as bounding boxes)\n Returns 3 lists of matches, unmatched_detections and unmatched_trackers\n \"\"\"\n distance_threshold = 500\n\n if len(trackers) == 0:\n return np.empty((0, 2), dtype = int), np.arange(len(detections)), np.empty((0, 5), dtype = int)\n iou_matrix = np.zeros((len(detections), len(trackers)), dtype=np.float32)\n\n for d, det in enumerate(detections):\n for t, trk in enumerate(trackers):\n iou_matrix[d, t] = metric_function(det, trk)\n matched_indices = linear_assignment(-iou_matrix)\n \n unmatched_detections = []\n for d,det in enumerate(detections):\n if(d not in matched_indices[:,0]):\n unmatched_detections.append(d)\n unmatched_trackers = []\n for t, trk in enumerate(trackers):\n if(t not in matched_indices[:,1]):\n unmatched_trackers.append(t)\n\n #filter out matched with low IOU\n matches = []\n for m in matched_indices:\n if iou_matrix[m[0], m[1]] < iou_threshold:\n #if(iou_matrix[m[0], m[1]] > distance_threshold):\n unmatched_detections.append(m[0])\n unmatched_trackers.append(m[1])\n else:\n matches.append(m.reshape(1, 2))\n if len(matches) == 0:\n matches = np.empty((0, 2), dtype = int)\n else:\n matches = np.concatenate(matches, axis = 0)\n\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)\n\nclass KalmanBoxTracker(object):\n \"\"\"\n This class represents the internel state of individual tracked objects observed as bbox.\n \"\"\"\n count = 0\n def __init__(self,bbox):\n \"\"\"\n Initialises a tracker using initial bounding box.\n \"\"\"\n #define constant velocity model\n self.kf = KalmanFilter(dim_x=7, dim_z=4)\n self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],[0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])\n self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])\n\n self.kf.R[2:,2:] *= 10.\n self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities\n self.kf.P *= 10.\n self.kf.Q[-1,-1] *= 0.01\n self.kf.Q[4:,4:] *= 0.01\n\n self.kf.x[:4] = convert_bbox_to_z(bbox)\n self.time_since_update = 0\n self.id = KalmanBoxTracker.count\n KalmanBoxTracker.count += 1\n self.history = []\n self.hits = 0\n self.hit_streak = 0\n self.age = 0\n\n def update(self, bbox):\n \"\"\"\n Updates the state vector with observed bbox.\n \"\"\"\n self.time_since_update = 0\n self.history = []\n self.hits += 1\n self.hit_streak += 1\n self.kf.update(convert_bbox_to_z(bbox))\n\n def predict(self):\n \"\"\"\n Advances the state vector and returns the predicted bounding box estimate.\n \"\"\"\n if((self.kf.x[6] + self.kf.x[2]) <= 0):\n self.kf.x[6] *= 0.0\n self.kf.predict()\n self.age += 1\n if(self.time_since_update > 0):\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(convert_x_to_bbox(self.kf.x))\n \n return self.history[-1]\n\n def get_state(self):\n \"\"\"\n Returns the current bounding box estimate.\n \"\"\"\n return convert_x_to_bbox(self.kf.x)\n\n\nclass KalmanFilterBoundingBoxTracker(BoundingBoxTracker):\n '''\n - Arguments:\n - max_age: If no bounding box is matched to an internal tracklet for ``max_age`` steps \\\n the internal tracklet is considered dead and is removed.\n - min_hits: A tracklet is considered a valid track if it has a hit streak larger \\\n than or equal to ``min_hits``\n - metric_function_type : str, one of ``iou`` or ``euclidean`` \n '''\n \n def __init__(self, max_age = 7, min_hits = 3, metric_function_type = 'iou'):\n self.max_age = max_age\n self.min_hits = min_hits\n self.trackers = []\n self.frame_count = 0\n self.metric_function_type = metric_function_type\n self.previous_fid = -1\n self.metric_function = metric_factory(metric_function_type)\n super(KalmanFilterBoundingBoxTracker, self).__init__()\n\n def _track(self, dets, fid = None):\n \"\"\"\n Requires: this method must be called once for each frame even with empty detections.\n\n - Arguments:\n - dets: a numpy array of detections in the format [[ymin,xmin,ymax,xmax,score],[ymin,xmin,ymax,xmax,score],...]\n \n - Returns:\n - A similar array, where the last column is the object or track id. The number of objects returned may differ from the number of detections provided.\n \"\"\"\n if fid is None:\n fid = self.previous_fid + 1\n \n self.frame_count += 1\n #get predicted locations from existing trackers.\n trks = np.zeros((len(self.trackers), 5))\n to_del = []\n ret = []\n for t, trk in enumerate(trks):\n pos = self.trackers[t].predict()[0]\n trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]\n if(np.any(np.isnan(pos))):\n to_del.append(t)\n trks = np.ma.compress_rows(np.ma.masked_invalid(trks))\n for t in reversed(to_del):\n self.trackers.pop(t)\n matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets, trks, self.metric_function)\n\n #update matched trackers with assigned detections\n for t, trk in enumerate(self.trackers):\n if(t not in unmatched_trks):\n d = matched[np.where(matched[:,1] == t)[0], 0] \n trk.update(dets[d,:][0])\n\n #create and initialise new trackers for unmatched detections\n for i in unmatched_dets:\n trk = KalmanBoxTracker(dets[i,:]) \n self.trackers.append(trk)\n i = len(self.trackers)\n for trk in reversed(self.trackers):\n d = trk.get_state()[0]\n if((trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):\n ret.append(np.concatenate((d,[trk.id + 1])).reshape(1, -1)) # +1 as MOT benchmark requires positive\n i -= 1\n \n #remove dead tracklet\n if(trk.time_since_update > self.max_age):\n self.trackers.pop(i)\n if(len(ret) > 0):\n return np.concatenate(ret)\n \n self.previous_fid = fid\n return np.empty((0, 5))\n " ]
[ [ "sklearn.utils.linear_assignment_.linear_assignment", "numpy.maximum", "numpy.sqrt", "numpy.minimum", "numpy.isnan", "numpy.concatenate", "numpy.ma.masked_invalid", "numpy.array", "numpy.where", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cclauss/SparseSC
[ "bd5c65f162a5431f92ed957df3385c803f2d3365" ]
[ "SparseSC/cross_validation.py" ]
[ "from SparseSC.fit_fold import fold_v_matrix, fold_score\nfrom SparseSC.fit_loo import loo_v_matrix, loo_score, loo_weights\nfrom SparseSC.fit_ct import ct_v_matrix, ct_score\n#-- from SparseSC.optimizers.cd_line_search import cdl_search\nfrom SparseSC.lambda_utils import get_max_lambda, L2_pen_guestimate\nimport atexit\nimport numpy as np\nimport itertools\nfrom concurrent import futures\nimport warnings\nfrom collections import namedtuple\n\ndef score_train_test(X, \n Y,\n train,\n test,\n X_treat=None,\n Y_treat=None,\n FoldNumber=None, # For consistency with score_train_test_sorted_lambdas()\n grad_splits=None, # If present, use k fold gradient descent. See fold_v_matrix for details\n **kwargs):\n \"\"\" presents a unified api for ct_v_matrix and loo_v_matrix\n and returns the v_mat, l2_pen_w (possibly calculated, possibly a parameter), and the score \n \"\"\"\n # to use `pdb.set_trace()` here, set `parallel = False` above\n if X_treat is None != Y_treat is None:\n raise ValueError(\"parameters `X_treat` and `Y_treat` must both be Matrices or None\")\n\n if X_treat is not None:\n # >> K-fold validation on the Treated units; assuming that Y and Y_treat are pre-intervention outcomes\n\n # PARAMETER QC\n try:\n X = np.asmatrix(X)\n except ValueError:\n raise ValueError(\"X is not coercible to a matrix\")\n try:\n Y = np.asmatrix(Y)\n except ValueError:\n raise ValueError(\"Y is not coercible to a matrix\")\n if X_treat.shape[1] == 0:\n raise ValueError(\"X_treat.shape[1] == 0\")\n if Y_treat.shape[1] == 0:\n raise ValueError(\"Y_treat.shape[1] == 0\")\n if X_treat.shape[0] != Y_treat.shape[0]:\n raise ValueError(\"X_treat and Y_treat have different number of rows (%s and %s)\" % \n (X.shape[0], Y.shape[0],))\n\n # FIT THE V-MATRIX AND POSSIBLY CALCULATE THE L2_PEN_W\n # note that the weights, score, and loss function value returned here are for the in-sample predictions\n _, v_mat, _, _, l2_pen_w, _ = \\\n ct_v_matrix(X = np.vstack((X,X_treat[train, :])),\n Y = np.vstack((Y,Y_treat[train, :])),\n treated_units = [X.shape[0] + i for i in range(len(train))],\n # method = cdl_search,\n **kwargs)\n\n # GET THE OUT-OF-SAMPLE PREDICTION ERROR\n s = ct_score(X = np.vstack((X,X_treat[test, :])),\n Y = np.vstack((Y,Y_treat[test, :])), \n treated_units = [X.shape[0] + i for i in range(len(test))],\n V = v_mat,\n L2_PEN_W = l2_pen_w)\n\n else: # X_treat *is* None\n # >> K-fold validation on the only control units; assuming that Y contains post-intervention outcomes \n if grad_splits is not None:\n\n\n try:\n iter(grad_splits)\n except TypeError:\n # not iterable\n pass\n else:\n # TRIM THE GRAD SPLITS NEED TO THE TRAINING SET\n match = lambda a, b: np.concatenate([np.where(a == x)[0] for x in b])# inspired by R's match() function\n grad_splits = [ (match(train,_X),match(train,_Y) ) for _X,_Y in grad_splits]\n\n # FIT THE V-MATRIX AND POSSIBLY CALCULATE THE L2_PEN_W\n # note that the weights, score, and loss function value returned here are for the in-sample predictions\n _, v_mat, _, _, l2_pen_w, _ = \\\n fold_v_matrix(X = X[train, :],\n Y = Y[train, :], \n # treated_units = [X.shape[0] + i for i in range(len(train))],\n # method = cdl_search,\n grad_splits = grad_splits,\n **kwargs)\n\n # GET THE OUT-OF-SAMPLE PREDICTION ERROR (could also use loo_score, actually...)\n s = ct_score(X = X, Y = Y, # formerly: fold_score\n treated_units = test,\n V = v_mat,\n L2_PEN_W = l2_pen_w)\n\n else:\n\n # FIT THE V-MATRIX AND POSSIBLY CALCULATE THE L2_PEN_W\n # note that the weights, score, and loss function value returned here are for the in-sample predictions\n try:\n _, v_mat, _, _, l2_pen_w, _ = \\\n loo_v_matrix(X = X[train, :],\n Y = Y[train, :], \n # treated_units = [X.shape[0] + i for i in range(len(train))],\n # method = cdl_search,\n **kwargs)\n except MemoryError as err:\n raise RuntimeError(\"MemoryError encountered. Try setting `grad_splits` parameter to reduce memory requirements.\")\n\n # GET THE OUT-OF-SAMPLE PREDICTION ERROR\n s = ct_score(X = X, Y = Y, \n treated_units = test,\n V = v_mat,\n L2_PEN_W = l2_pen_w)\n\n return v_mat, l2_pen_w, s\n\n\ndef score_train_test_sorted_lambdas(LAMBDA,\n start=None,\n cache=False,\n progress=False,\n FoldNumber=None,\n **kwargs):\n \"\"\" a wrapper which calls score_train_test() for each element of an\n array of `LAMBDA`'s, optionally caching the optimized v_mat and using it\n as the start position for the next iteration.\n \"\"\"\n\n # DEFAULTS\n values = [None]*len(LAMBDA)\n\n if progress > 0:\n import time\n t0 = time.time()\n\n for i,Lam in enumerate(LAMBDA):\n v_mat, _, _ = values[i] = score_train_test( LAMBDA = Lam, start = start, **kwargs)\n\n if cache: \n start = np.diag(v_mat)\n if progress > 0 and (i % progress) == 0:\n t1 = time.time() \n if FoldNumber is None:\n print(\"lambda: %0.4f, value %s of %s, time elapsed: %0.4f sec.\" % \n (Lam, i+1, len(LAMBDA), t1 - t0, ))\n #print(\"iteration %s of %s time: %0.4f ,lambda: %0.4f, diags: %s\" % \n # (i+1, len(LAMBDA), t1 - t0, Lam, np.diag(v_mat),))\n else:\n print(\"Fold %s,lambda: %0.4f, value %s of %s, time elapsed: %0.4f sec.\" % \n (FoldNumber, Lam, i+1, len(LAMBDA), t1 - t0, ))\n #print(\"Fold %s, iteration %s of %s, time: %0.4f ,lambda: %0.4f, diags: %s\" % \n # (FoldNumber, i+1, len(LAMBDA), t1 - t0, Lam, np.diag(v_mat),))\n t0 = time.time() \n\n return list(zip(*values))\n\n\ndef CV_score(X,Y,\n LAMBDA,\n X_treat=None,\n Y_treat=None,\n splits=5,\n sub_splits=None, # ignore pylint -- this is here for consistency...\n quiet=False,\n parallel=False,\n max_workers=None,\n **kwargs):\n \"\"\" Cross fold validation for 1 or more L1 Penalties, holding the L2 penalty fixed. \n \"\"\"\n\n # PARAMETER QC\n try:\n X = np.asmatrix(X)\n except ValueError:\n raise ValueError(\"X is not coercible to a matrix\")\n try:\n Y = np.asmatrix(Y)\n except ValueError:\n raise ValueError(\"X is not coercible to a matrix\")\n if X_treat is None != Y_treat is None:\n raise ValueError(\"parameters `X_treat` and `Y_treat` must both be Matrices or None\")\n if X.shape[1] == 0:\n raise ValueError(\"X.shape[1] == 0\")\n if Y.shape[1] == 0:\n raise ValueError(\"Y.shape[1] == 0\")\n if X.shape[0] != Y.shape[0]:\n raise ValueError(\"X and Y have different number of rows (%s and %s)\" % (X.shape[0], Y.shape[0],))\n\n try:\n _LAMBDA = iter(LAMBDA)\n except TypeError:\n # Lambda is a single value \n multi_lambda = False\n __score_train_test__ = score_train_test\n else:\n # Lambda is an iterable of values\n multi_lambda = True\n __score_train_test__ = score_train_test_sorted_lambdas\n\n if X_treat is not None:\n\n # PARAMETER QC\n if not isinstance(X_treat, np.matrix):\n raise TypeError(\"X_treat is not a matrix\")\n if not isinstance(Y_treat, np.matrix):\n raise TypeError(\"Y_treat is not a matrix\")\n if X_treat.shape[1] == 0:\n raise ValueError(\"X_treat.shape[1] == 0\")\n if Y_treat.shape[1] == 0:\n raise ValueError(\"Y_treat.shape[1] == 0\")\n if X_treat.shape[0] != Y_treat.shape[0]: \n raise ValueError(\"X_treat and Y_treat have different number of rows (%s and %s)\" % \n (X.shape[0], Y.shape[0],))\n\n try:\n iter(splits)\n except TypeError: \n from sklearn.model_selection import KFold\n splits = KFold(splits,True).split(np.arange(X_treat.shape[0]))\n train_test_splits = list(splits)\n n_splits = len(train_test_splits)\n\n # MESSAGING\n if not quiet: \n print(\"%s-fold validation with %s control and %s treated units %s predictors and %s outcomes, holding out one fold among Treated units; Assumes that `Y` and `Y_treat` are pre-intervention outcomes\" % \n (n_splits, X.shape[0] , X_treat.shape[0],X.shape[1],Y.shape[1],))\n\n if parallel: \n\n if max_workers is None:\n # CALCULATE A DEFAULT FOR MAX_WORKERS\n import multiprocessing\n multiprocessing.cpu_count()\n if n_splits == 1:\n print(\"WARNING: Using Parallel options with a single split is expected reduce performance\")\n max_workers = min(max(multiprocessing.cpu_count() - 2,1),len(train_test_splits))\n if max_workers == 1 and n_splits > 1:\n print(\"WARNING: Default for max_workers is 1 on a machine with %s cores is 1.\")\n\n _initialize_Global_worker_pool(max_workers)\n\n try:\n\n promises = [ _worker_pool.submit(__score_train_test__,\n X = X,\n Y = Y,\n LAMBDA = LAMBDA,\n X_treat = X_treat, \n Y_treat = Y_treat, \n train = train,\n test = test,\n FoldNumber = fold,\n **kwargs)\n for fold, (train,test) in enumerate(train_test_splits) ] \n results = [ promise.result() for promise in futures.as_completed(promises)]\n\n finally:\n\n _clean_up_worker_pool()\n\n else:\n\n results = [ __score_train_test__(X = X,\n Y = Y,\n X_treat = X_treat, \n Y_treat = Y_treat, \n LAMBDA = LAMBDA,\n train = train,\n test = test,\n FoldNumber = fold,\n **kwargs)\n for fold, (train,test) in enumerate(train_test_splits) ] \n\n\n else: # X_treat *is* None\n\n try:\n iter(splits)\n except TypeError: \n from sklearn.model_selection import KFold\n splits = KFold(splits).split(np.arange(X.shape[0]))\n train_test_splits = [ x for x in splits ]\n n_splits = len(train_test_splits)\n\n # MESSAGING\n if not quiet: \n print(\"%s-fold Cross Validation with %s control units, %s predictors and %s outcomes; Y may contain post-intervention outcomes\" % \n (n_splits, X.shape[0],X.shape[1],Y.shape[1],) )\n\n if parallel: \n\n if max_workers is None:\n # CALCULATE A DEFAULT FOR MAX_WORKERS\n import multiprocessing\n multiprocessing.cpu_count()\n if n_splits == 1:\n print(\"WARNING: Using Parallel options with a single split is expected reduce performance\")\n max_workers = min(max(multiprocessing.cpu_count() - 2,1),len(train_test_splits))\n if max_workers == 1 and n_splits > 1:\n print(\"WARNING: Default for max_workers is 1 on a machine with %s cores is 1.\")\n\n _initialize_Global_worker_pool(max_workers)\n\n try:\n\n promises = [ _worker_pool.submit(__score_train_test__,\n X = X,\n Y = Y,\n LAMBDA = LAMBDA,\n train = train,\n test = test,\n FoldNumber = fold,\n **kwargs)\n for fold, (train,test) in enumerate(train_test_splits) ] \n\n results = [ promise.result() for promise in futures.as_completed(promises)]\n\n finally:\n\n _clean_up_worker_pool()\n\n else:\n results = [ __score_train_test__(X = X,\n Y = Y,\n LAMBDA = LAMBDA,\n train = train,\n test = test,\n FoldNumber = fold,\n **kwargs)\n for fold, (train,test) in enumerate(train_test_splits) ] \n\n # extract the score.\n _, _, scores = list(zip(* results))\n\n if multi_lambda:\n total_score = [sum(s) for s in zip(*scores)]\n else:\n total_score = sum(scores)\n\n return total_score\n\n\ndef joint_penalty_optimzation(X, Y, L1_pen_start = None, L2_pen_start = None, bounds = ((-6,6,),)*2, X_treat = None, Y_treat = None):\n #TODO: Default bounds?\n # -----------------------------------------------------------------\n # Optimization of the L2 and L1 Penalties Simultaneously\n # -----------------------------------------------------------------\n from scipy.optimize import fmin_l_bfgs_b, differential_evolution \n import time\n\n if L2_pen_start is None:\n L2_pen_start = L2_pen_guestimate(X)\n\n L1_pen_start = get_max_lambda(X,Y,X_treat=X_treat,Y_treat=Y_treat) #TODO: is this right?\n\n # build the objective function to be minimized\n n_calls = [0,]\n temp_results =[]\n\n def L1_L2_obj_func (x): \n n_calls[0] += 1\n t1 = time.time()\n score = CV_score(X = X, Y = Y,\n X_treat = X_treat, Y_treat = Y_treat,\n # if LAMBDA is a single value, we get a single score, If it's an array of values, we get an array of scores.\n LAMBDA = L1_pen_start * np.exp(x[0]),\n L2_PEN_W = L2_pen_start * np.exp(x[1]),\n # suppress the analysis type message\n quiet = True)\n t2 = time.time()\n temp_results.append((n_calls[0],x,score))\n print(\"calls: %s, time: %0.4f, x0: %0.4f, Cross Validation Error: %s\" % (n_calls[0], t2 - t1, x[0], score))\n #print(\"calls: %s, time: %0.4f, x0: %0.4f, x1: %0.4f, Cross Validation Error: %s, R-Squared: %s\" % (n_calls[0], t2 - t1, x[0], x[1], score, 1 - score / SS ))\n return score\n\n # the actual optimization\n diff_results = differential_evolution(L1_L2_obj_func, bounds = bounds)\n diff_results.x[0] = L1_pen_start * np.exp(diff_results.x[0])\n diff_results.x[1] = L2_pen_start * np.exp(diff_results.x[1])\n return diff_results\n\n\ndef _ncr(n, r):\n #https://stackoverflow.com/questions/4941753/is-there-a-math-ncr-function-in-python\n import operator as op\n import functools\n r = min(r, n-r)\n numer = functools.reduce(op.mul, range(n, n-r, -1), 1) #from py2 xrange()\n denom = functools.reduce(op.mul, range(1, r+1), 1) #from py2 xrange()\n return numer//denom\n\ndef random_combination(iterable, r):\n \"Random selection from itertools.combinations(iterable, r)\"\n #https://stackoverflow.com/questions/22229796/choose-at-random-from-combinations\n import random\n\n pool = tuple(iterable)\n n = len(pool)\n indices = sorted(random.sample(range(n), r))\n return tuple(pool[i] for i in indices)\n\ndef repeatfunc(func, times=None, *args):\n \"\"\"Repeat calls to func with specified arguments.\n\n Example: repeatfunc(random.random)\n \"\"\"\n\n if times is None:\n return itertools.starmap(func, itertools.repeat(args))\n return itertools.starmap(func, itertools.repeat(args, times))\n\ndef _gen_placebo_stats_from_diffs(effect_vecs, pre_tr_rmspes,\n control_effect_vecs, pre_c_rmspes,\n max_n_pl = 1000000, ret_pl = False, ret_CI=False, level=0.95):\n N1 = effect_vecs.shape[0]\n N0 = control_effect_vecs.shape[0]\n T1 = effect_vecs.shape[1]\n #ret_p1s=False\n keep_pl = ret_pl or ret_CI\n\n #Get rest of the outcomes\n ##Get the joint effects\n joint_effects = np.sqrt(np.mean(np.square(effect_vecs), axis=1))\n control_joint_effects = np.sqrt(np.mean(np.square(control_effect_vecs), axis=1))\n ## Standardized effect vecs\n std_effect_vecs = np.diagflat(1/pre_tr_rmspes).dot(effect_vecs)\n control_std_effect_vecs = np.diagflat(1/ pre_c_rmspes).dot(control_effect_vecs)\n ##Get the standardized joint effects\n joint_std_effects = np.multiply((1 / pre_tr_rmspes), joint_effects)\n control_joint_std_effects = np.multiply((1/ pre_c_rmspes), control_joint_effects) \n\n #Compute the outcomes for treatment\n effect_vec = np.mean(effect_vecs, axis=0)\n std_effect_vec = np.mean(std_effect_vecs, axis=0)\n joint_effect = np.mean(joint_effects)\n joint_std_effect = np.mean(joint_std_effects)\n\n n_pl = _ncr(N0, N1)\n if (max_n_pl > 0 & n_pl > max_n_pl): #randomize\n comb_iter = itertools.combinations(range(N0), N1)\n comb_len = max_n_pl\n else:\n comb_iter = repeatfunc(random_combination, n_pl, range(N0), N1)\n comb_len = n_pl\n placebo_effect_vecs = None\n if keep_pl:\n placebo_effect_vecs = np.empty((comb_len,T1))\n p2s = np.zeros((1,T1))\n p2s_std = np.zeros((1,T1))\n #p1s = np.zero((1,T1))\n #p1s_std = np.zero((1,T1))\n #effect_vec_sgn = np.sign(effect_vec)\n joint_p = 0\n joint_std_p = 0\n for idx, comb in enumerate(comb_iter):\n placebo_effect_vec = np.mean(control_effect_vecs[comb,:], 0)\n placebo_std_effect_vec = np.mean(control_std_effect_vecs[comb,:], 0)\n placebo_joint_effect = np.mean(control_joint_effects[comb,:])\n placebo_joint_std_effect = np.mean(control_joint_std_effects[comb,:])\n\n p2s += (abs(placebo_effect_vec) >= abs(effect_vec))\n p2s_std += (abs(placebo_std_effect_vec) >= abs(std_effect_vec))\n #p1s += (effect_vec_sgn*placebo_effect_vec >= effect_vec_sgn*effect_vec)\n #p1s_std += (effect_vec_sgn*placebo_std_effect_vec >= effect_vec_sgn*std_effect_vec)\n joint_p += (placebo_joint_effect >= joint_effect)\n joint_std_p += (placebo_joint_std_effect >= joint_std_effect)\n if keep_pl:\n placebo_effect_vecs[idx,:] = placebo_effect_vec\n p2s = p2s/comb_len\n p2s_std = p2s_std/comb_len\n #p1s = p1s/comb_len\n #p1s_std = p1s_std/comb_len\n joint_p = joint_p/comb_len\n joint_std_p = joint_std_p/comb_len\n #p2s = 2*p1s #Ficher 2-sided p-vals (less common)\n if ret_CI:\n #CI - All hypothetical true effects (beta0) that would not be reject at the certain level\n # To test non-zero beta0, apply beta0 to get unexpected deviation beta_hat-beta0 and compare to permutation distribution\n # This means that we take the level-bounds of the permutation distribution then \"flip it around beta_hat\"\n # To make the math a bit nicer, I will reject a hypothesis if pval<=(1-level)\n assert level<=1; \"Use a level in [0,1]\"\n alpha = (1-level)\n p2min = 2/n_pl\n alpha_ind = max((1,round(alpha/p2min)))\n alpha = alpha_ind* p2min\n CIs = np.empty((2,T1))\n for t in range(T1):\n sorted_eff = np.sort(placebo_effect_vecs[:,t]) #TODO: check with Stata about sort order\n low_effect = sorted_eff[alpha_ind]\n high_effect = sorted_eff[(comb_len+1)-alpha_ind]\n if np.sign(low_effect)==np.sign(high_effect):\n warnings.warn(\"CI doesn't containt effect. You might not have enough placebo effects.\")\n CIs[:,t] = (effect_vec[t] - high_effect, effect_vec[t] - low_effect) \n else:\n CIs = None\n\n EstResultCI = namedtuple('EstResults', 'effect p ci')\n \n SparseSCEstResults = namedtuple('SparseSCEstResults', 'effect_vec_res std_p joint_p joint_std_p N_placebo placebo_effect_vecs')\n ret_struct = SparseSCEstResults(EstResultCI(effect_vec, p2s, CIs), p2s_std, joint_p, joint_std_p, comb_len, placebo_effect_vecs)\n return ret_struct\n\ndef estimate_effects(X, Y_pre, Y_post, treated_units, max_n_pl = 1000000, ret_pl = False, ret_CI=False, level=0.95, \n V_penalty = None, W_penalty=None, **kwargs):\n #TODO: Cleanup returning placebo distribution (incl pre?)\n #N1 = len(treated_units)\n X_and_Y_pre = np.hstack( ( X, Y_pre,) )\n N = X_and_Y_pre.shape[0]\n #N0 = N - N1\n #T1 = Y_post.shape[1]\n control_units = list(set(range(N)) - set(treated_units)) \n all_units = list(range(N))\n Y_post_c = Y_post[control_units, :]\n Y_post_tr = Y_post[treated_units, :]\n X_and_Y_pre_c = X_and_Y_pre[control_units, :]\n \n if V_penalty is None: #TODO (handle this case better)\n results = joint_penalty_optimzation(X = X_and_Y_pre_c, Y = Y_post_c, **kwargs)\n V_penalty,W_penalty = results.x[0],results.x[1]\n\n weights, V, ts_score, ts_loss, L2_PEN_W, opt = loo_v_matrix(X = X_and_Y_pre_c, \n Y = Y_post_c,\n LAMBDA = V_penalty, L2_PEN_W = W_penalty)\n\n weights = loo_weights(X = X_and_Y_pre,\n V = V,\n L2_PEN_W = W_penalty,\n treated_units = all_units,\n control_units = control_units)\n Y_post_sc = weights.dot(Y_post_c)\n # Get post effects\n Y_post_tr_sc = Y_post_sc[treated_units, :]\n Y_post_c_sc = Y_post_sc[control_units, :]\n effect_vecs = Y_post_tr - Y_post_tr_sc\n control_effect_vecs = Y_post_c - Y_post_c_sc\n \n # Get pre match MSE (match quality)\n Y_pre_tr = Y_pre[treated_units, :]\n Y_pre_c = Y_pre[control_units, :]\n Y_pre_sc = weights.dot(Y_pre_c)\n Y_pre_tr_sc = Y_pre_sc[treated_units, :]\n Y_pre_c_sc = Y_pre_sc[control_units, :]\n pre_tr_pes = Y_pre_tr - Y_pre_tr_sc\n pre_c_pes = Y_pre_c - Y_pre_c_sc\n pre_tr_rmspes = np.sqrt(np.mean(np.square(pre_tr_pes), axis=1))\n pre_c_rmspes = np.sqrt(np.mean(np.square(pre_c_pes), axis=1))\n\n\n return _gen_placebo_stats_from_diffs(effect_vecs, pre_tr_rmspes,\n control_effect_vecs, pre_c_rmspes,\n max_n_pl, ret_pl, ret_CI, level)\n\n# ------------------------------------------------------------\n# utilities for maintaining a worker pool\n# ------------------------------------------------------------\n\n_worker_pool = None\n\ndef _initialize_Global_worker_pool(n_workers):\n global _worker_pool\n\n if _worker_pool is not None:\n return # keep it itempotent, please\n\n _worker_pool = futures.ProcessPoolExecutor(max_workers=n_workers)\n\ndef _clean_up_worker_pool():\n global _worker_pool\n\n if _worker_pool is not None:\n _worker_pool.shutdown()\n _worker_pool = None\n\natexit.register(_clean_up_worker_pool)\n" ]
[ [ "numpy.diag", "numpy.hstack", "numpy.square", "scipy.optimize.differential_evolution", "numpy.multiply", "numpy.diagflat", "numpy.arange", "numpy.vstack", "numpy.sort", "sklearn.model_selection.KFold", "numpy.asmatrix", "numpy.sign", "numpy.mean", "numpy.exp", "numpy.zeros", "numpy.where", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
cmcuervol/Mojana
[ "e5491d6af0b6d5ac1900371ece561b8bf8835f02" ]
[ "Modules/Hidrografas.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\nimport Graphs\n\nPath = os.getcwd()\n\ndef kmsq2misq(km2):\n \"\"\"\n Pass square kms to square milles\n INPUTS\n km2 : float of square kms\n \"\"\"\n if km2 is not None:\n return km2/2.58998811034\n else:\n return None\n\ndef km2mi(km):\n \"\"\"\n Pass kms to milles\n INPUTS\n km : float of kms\n \"\"\"\n if km is not None:\n return km/1.609\n else:\n return None\ndef km2ft(km):\n \"\"\"\n Pass kms to foot\n INPUTS\n km : float of kms\n \"\"\"\n if km is not None:\n return (km/1.609)*5280\n else:\n return None\ndef m2ft(m):\n \"\"\"\n Pass metres to foot\n INPUTS\n km : float of metres\n \"\"\"\n if m is not None:\n return m*3.28084\n else:\n return None\n\nHUFF = np.array([[0,50,85,93,95,97,98,99,100,100,100],\n [0,49,75,88,92,95,97,98, 99,100,100],\n [0,30,65,82,89,92,95,97, 97, 99,100],\n [0,22,58,78,85,90,93,95, 96, 98,100],\n [0,18,50,71,80,85,90,94, 95, 98,100],\n [0,14,45,65,75,80,85,91, 94, 97,100],\n [0,11,40,58,68,73,79,87, 92, 96,100],\n [0, 8,35,51,59,66,72,80, 90, 95,100],\n [0, 5,28,45,52,57,63,71, 82, 93,100]])\nclass Hidrografa:\n \"\"\"\n Class to calc unitarian hydrograph\n INPUTS\n Area : Área de drenaje de la cuenca [kmˆ2]\n Perimetro : Perímetro de la cuenca [km]\n CN_II : Numero de la curva\n Long_Cause : Longitud del cause principal [km]\n Long_RioDivi : Longitud río hasta la divisoria [km]\n Pend_Cause : Pendiente del cause principal [%]\n Pend_Cuenca : Pendiente de la cuenca [%]\n Cota_MaxCuenca : Cota mayor de la cuenca [m]\n Cota_MinCuenca : Cota menor de la cuenca [m]\n Cota_MaxRio : Cota mayor del río [m]\n Cota_MinRio : Cota menor del río [m]\n Long_Centroide : Longitud del cause al centroide [km]\n Long_Cuenca : Longitud al punto más alejado, longitud de la cuenca [km]\n Enlong_Ratio : Relación de enlongación\n Horton_FormFact : Factor de forma de Horton\n Compacidad_Coef : Coeficiente de compacidad\n TR : List or array with return times\n IDF : List or array with Intensity asociated with return times\n K1_TR : List or array with constants k valid for duratios < 105\n K2_TR : List or array with constants k valid for duratios > 105\n t_dist : List or array with time distribution\n Prob_Huff : Float of probability of Huff distribution\n \"\"\"\n def __init__(self, Area=None, Perimetro=None, CN_II=None,\n Long_Cause=None, Long_RioDivi=None,\n Pend_Cause=None, Pend_Cuenca=None,\n Cota_MaxCuenca=None, Cota_MinCuenca=None,\n Cota_MaxRio=None, Cota_MinRio=None,\n Long_Centroide=None, Long_Cuenca=None,\n Enlong_Ratio=None, Horton_FormFact=None, Compacidad_Coef=None,\n TR=None, IDF=None,t_dist=None, Prob_Huff=None,K1_TR=None, K2_TR=None, ):\n\n self.Area_km = Area\n self.Area_mi = kmsq2misq(Area)\n\n self.Perim = Perimetro\n self.CN_II = CN_II\n self.LongCause_km = Long_Cause\n self.LongCause_ft = km2ft(Long_Cause)\n self.LongCause_mi = km2mi(Long_Cause)\n\n self.LongCuenca_km = Long_Cuenca\n self.LongCuenca_ft = km2ft(Long_Cuenca)\n self.LongCuenca_mi = km2mi(Long_Cuenca)\n\n self.LongRioDivi_km = Long_RioDivi\n self.LongRioDivi_ft = km2ft(Long_RioDivi)\n\n self.PendCause = Pend_Cause\n self.PendCuenca = Pend_Cuenca\n\n self.HmaxCuenca_m = Cota_MaxCuenca\n self.HmaxCuenca_ft = m2ft(Cota_MaxCuenca)\n\n self.HminCuenca_m = Cota_MinCuenca\n self.HminCuenca_ft = m2ft(Cota_MinCuenca)\n\n self.HmaxRio_m = Cota_MaxRio\n self.HmaxRio_ft = m2ft(Cota_MaxRio)\n\n self.HminRio_m = Cota_MinRio\n self.HminRio_ft = m2ft(Cota_MinRio)\n\n self.Centroide = Long_Centroide\n self.Enlong = Enlong_Ratio\n self.HortonFF = Horton_FormFact\n self.Compacidad = Compacidad_Coef\n\n self.IDF = IDF\n\n if TR is None:\n self.TR = np.array([2.33,5,10,25,50,100])\n else:\n self.TR = TR\n\n # if K1_TR is None:\n # self.K1 = np.array([31.87,37.08,41.00,45.64,48.91,52.06])\n # else:\n # self.K1 = K1_TR\n #\n # if K2_TR is None:\n # self.K2 = np.array([1664.90,1937.23,2142.03,2384.33,2555.25,2719.43])\n # else:\n # self.K2 = K2_TR\n\n if Prob_Huff is None:\n self.Prob_Huff = 50\n else:\n self.Prob_Huff = Prob_Huff\n\n if t_dist is None:\n # self.t_dist = np.array([0,16,52,69,80,85,87,91,94,98,100])\n self.t_dist = HUFF[int(self.Prob_Huff/10 -1)]\n else:\n self.t_dist = t_dist\n\n def SCS(self, tiempos=None, Tc=None,\n graph=False, title_fig='', name_fig='Hidrografa_SCS',\n pdf_out=True, png_out=False, Path_Figs=Path,):\n \"\"\"\n Calcula hidrogrfa unitaria del SCS\n INPUTS\n tiempos : array with times to interpolate the flow\n graph : Boolean to do grahpic\n title_fig: Figure title\n name_fig : Name to save figure\n pdf_out : Boolean to save figure in pdf format\n png_out : Boolean to save figure in png format\n PathFigs : Aboslute route to directory where figure will be save\n\n \"\"\"\n if Tc is None:\n K = 3.28084*(self.LongCause_km**3)/(self.HmaxRio_ft - self.HminRio_ft)\n self.tc_SCS = 0.97*(K**0.385)\n else:\n self.tc_SCS = Tc\n\n self.Tr_SCS = self.tc_SCS*3/5\n self.T_SCS = 0.133*self.tc_SCS\n self.Tp_SCS = self.T_SCS/2. + self.Tr_SCS\n self.Up_SCS = (484*self.Area_mi/self.Tp_SCS)/(25.4*35.315)\n Adim= np.array([\n (0.0,0.000),(0.1,0.030),(0.2,0.100),(0.3,0.190),(0.4,0.310),(0.5,0.470),\n (0.6,0.660),(0.7,0.820),(0.8,0.930),(0.9,0.990),(1.0,1.000),(1.1,0.990),\n (1.2,0.930),(1.3,0.860),(1.4,0.780),(1.5,0.680),(1.6,0.560),(1.7,0.460),\n (1.8,0.390),(1.9,0.330),(2.0,0.280),(2.2,0.207),(2.4,0.147),(2.6,0.107),\n (2.8,0.077),(3.0,0.055),(3.2,0.040),(3.4,0.029),(3.6,0.021),(3.8,0.015),\n (4.0,0.011),(4.5,0.005),(5.0,0.000)\n ])\n\n self.t_SCS = self.Tp_SCS*Adim[:,0]\n self.Q_SCS = self.Up_SCS*Adim[:,1]\n\n if graph == True:\n Graphs.GraphHydrografa(self.t_SCS,self.Q_SCS, title_fig, name_fig, pdf_out, png_out, Path_Figs)\n if tiempos is None:\n return self.t_SCS ,self.Q_SCS\n else:\n return np.interp(tiempos, self.t_SCS ,self.Q_SCS)\n\n def Sneyder(self, tiempos=None, Tc= None, Cp=0.8,\n graph=False, title_fig='', name_fig='Hidrografa_Sneyder',\n pdf_out=True, png_out=False, Path_Figs=Path,):\n \"\"\"\n Calcula hidrogrfa unitaria del Sneyder\n INPUTS\n tiempos : array with times to interpolate the flow\n Tc : concentration time\n Cp : bolean between 0.5 to 0.8\n graph : Boolean to do grahpic\n title_fig: Figure title\n name_fig : Name to save figure\n pdf_out : Boolean to save figure in pdf format\n png_out : Boolean to save figure in png format\n PathFigs : Aboslute route to directory where figure will be save\n\n \"\"\"\n if Tc is None:\n K = 3.28084*(self.LongCause_km**3)/(self.HmaxRio_ft - self.HminRio_ft)\n self.tc_Sny = 0.97*(K**0.385)\n else:\n self.tc_Sny = Tc\n self.Tr_Sny = self.tc_Sny*3/5\n self.Ts_Sny = self.Tr_Sny/5.5\n self.Tp_Sny = self.tc_Sny/2. + self.Tr_Sny\n\n self.u_Sny = Cp*640/(self.Tr_Sny+(self.tc_Sny-self.Ts_Sny)/4)\n self.Up_Sny = (self.Area_mi*self.u_Sny)/(25.4*35.315)\n\n self.Tb_Sny = 3+3*(self.Tr_Sny/24)\n if self.Tb_Sny > 3:\n self.Tb_Sny = 4*self.Tp_Sny\n\n self.W50_Sny = 770/(self.u_Sny**1.08)\n self.W75_Sny = 440/(self.u_Sny**1.08)\n\n self.t_Sny = np.array([0,\n self.Tp_Sny - self.W50_Sny/3,\n self.Tp_Sny - self.W75_Sny/3,\n self.Tp_Sny,\n self.Tp_Sny + 2*self.W75_Sny/3,\n self.Tp_Sny + 2*self.W50_Sny/3,\n self.Tb_Sny\n ])\n self.Q_Sny = np.array([0,\n self.Up_Sny*0.5,\n self.Up_Sny*0.75,\n self.Up_Sny,\n self.Up_Sny*0.75,\n self.Up_Sny*0.5,\n 0\n ])\n\n if graph == True:\n Graphs.GraphHydrografa(self.t_Sny,self.Q_Sny, title_fig, name_fig, pdf_out, png_out, Path_Figs)\n if tiempos is None:\n return self.t_Sny ,self.Q_Sny\n else:\n return np.interp(tiempos, self.t_Sny ,self.Q_Sny)\n\n def WilliansHann(self, tiempos=None, Tc=None,\n graph=False, title_fig='', name_fig='Hidrografa_WilliamsHann',\n pdf_out=True, png_out=False, Path_Figs=Path,):\n \"\"\"\n Calcula hidrogrfa unitaria del Willians & Hann\n INPUTS\n tiempos : array with times to interpolate the flow\n Tc : concentration time\n graph : Boolean to do grahpic\n title_fig: Figure title\n name_fig : Name to save figure\n pdf_out : Boolean to save figure in pdf format\n png_out : Boolean to save figure in png format\n PathFigs : Aboslute route to directory where figure will be save\n \"\"\"\n Wc = self.Area_mi/self.LongCuenca_mi\n Sc = (self.HmaxRio_ft-self.HminRio_ft)/self.LongCause_mi\n\n self.K_WH = 27*(self.Area_mi**0.231)*(Sc**-0.777)*((self.LongCuenca_mi/Wc)**0.124)\n if Tc is None:\n self.Tp_WH = 4.63*(self.Area_mi**0.422)*(Sc**-0.48)*((self.LongCuenca_mi/Wc)**0.133)\n else:\n self.Tp_WH = Tc\n\n frac = self.K_WH/self.Tp_WH\n\n n = 1 + (1/(2*frac)+((1/(4*(frac**2))+1/frac)**0.5))**2\n if n <= 1.27:\n y = np.poly1d([7527.3397824, - 28318.9594289, 35633.3593146,-14897.3755403])\n # elif n <= 12:\n else:\n y = np.poly1d([-0.0053450748, 0.1120132788, -0.1735395123, -12.7945848518, 163.3452557299,-85.1829993108])\n\n self.B_WH = y(n)\n\n self.t0_WH = self.Tp_WH*(1+ (1/((n-1)**0.5)))\n self.t1_WH = self.t0_WH+ 2*self.K_WH\n\n # self.Up_WH = (self.B_WH*self.Area_mi/self.Tp_WH)/(25.4*35.315)/25.4\n self.Up_WH = (self.B_WH*self.Area_mi/self.Tp_WH)*((0.3048)**3)*(1/25.4)\n self.U0_WH = self.Up_WH * ((self.t0_WH/self.Tp_WH)**(n-1)) * np.exp((1-n)*((self.t0_WH/self.Tp_WH)-1))\n self.U1_WH = self.U0_WH * np.exp((self.t0_WH-self.t1_WH)/self.K_WH)\n\n\n tr1 = np.arange(0,self.Tp_WH, self.Tp_WH/10.)\n tr2 = np.arange(self.Tp_WH, self.t0_WH, self.Tp_WH/10.)\n tr3 = np.arange(self.t0_WH, self.t1_WH+self.Tp_WH, self.Tp_WH/10.)\n self.t_WH = np.concatenate((tr1,tr2,tr3), axis=None)\n\n q = np.zeros(self.t_WH.shape, dtype=float)\n for i in range(len(self.t_WH)):\n if self.t_WH[i] <= self.t0_WH :\n q[i] = self.Up_WH * ((self.t_WH[i]/self.Tp_WH)**(n-1)) * np.exp((1-n)*((self.t_WH[i]/self.Tp_WH)-1))\n elif self.t_WH[i] <= self.t1_WH:\n q[i] = self.U0_WH * np.exp((self.t0_WH-self.t_WH[i])/self.K_WH)\n else:\n q[i] = self.U1_WH * np.exp((self.t1_WH-self.t_WH[i])/(2*self.K_WH))\n\n self.Q_WH = q\n\n if graph == True:\n Graphs.GraphHydrografa(self.t_WH,self.Q_WH, title_fig, name_fig, pdf_out, png_out, Path_Figs)\n if tiempos is None:\n return self.t_WH ,self.Q_WH\n else:\n return np.interp(tiempos, self.t_WH ,self.Q_WH)\n\n def Tc_Kirpich(self):\n \"\"\"\n Calculate concentration time of Kirpich (1990)\n \"\"\"\n self.tc_Kirpich = 0.066*((self.LongCause_km/((self.PendCause/100)**0.5))**0.77)\n\n return self.tc_Kirpich\n\n\n def Tc_Temez(self):\n \"\"\"\n Calculate concentration time of Temez (1978)\n \"\"\"\n self.tc_Temez = 0.3*((self.LongCause_km/(self.PendCause**0.25))**0.76)\n\n return self.tc_Temez\n\n\n def Tc_Giandoti(self):\n \"\"\"\n Calculate concentration time of Giandoti (1990)\n \"\"\"\n self.tc_Giandoti = (4*(self.Area_km**0.5)+ 1.5*self.LongCause_km)/(25.3*((self.LongCause_km*self.PendCause/100)**0.5))\n\n return self.tc_Giandoti\n\n\n def Tc_Williams(self):\n \"\"\"\n Calculate concentration time of Williams\n \"\"\"\n self.tc_Williams = 0.272*self.LongCause_km*(self.Area_km**0.4)/(((self.PendCause/100)**0.2)*((4*self.Area_km/np.pi)**0.5))\n\n return self.tc_Williams\n\n\n def Tc_Johnstone(self):\n \"\"\"\n Calculate concentration time of Johnstone (1949)\n \"\"\"\n # self.tc_Johnstone = 5*self.LongCause_mi/(((self.PendCause/100)*1609/3.281)**0.5)\n self.tc_Johnstone = 5*self.LongCause_mi/(((self.HmaxRio_ft-self.HminRio_ft)/self.LongCause_mi)**0.5)\n\n return self.tc_Johnstone\n\n\n def Tc_California(self):\n \"\"\"\n Calculate concentration time of California Culverts Practice (1942)\n \"\"\"\n self.tc_California = ((0.87075*(self.LongCause_km**3)/(self.HmaxCuenca_m - self.HminCuenca_m))**0.385)\n return self.tc_California\n\n\n def Tc_Clark(self):\n \"\"\"\n Calculate concentration time of Clark\n \"\"\"\n self.tc_Clark = 0.0335*((self.Area_km/((self.PendCause/100)**0.5))**0.593)\n return self.tc_Clark\n\n\n def Tc_Passinni(self):\n \"\"\"\n Calculate concentration time of Passinni\n \"\"\"\n self.tc_Passinni = 0.108*((self.LongCause_km*self.Area_km)**(1/3))/(((self.PendCause/100)**0.5))\n\n return self.tc_Passinni\n\n\n def Tc_Pilgrim(self):\n \"\"\"\n Calculate concentration time of Pilgrim\n \"\"\"\n self.tc_Pilgrim = 0.76*(self.Area_km**0.38)\n\n return self.tc_Pilgrim\n\n\n def Tc_SCS(self):\n \"\"\"\n Calculate concentration time of SCS\n \"\"\"\n self.tc_SCS = 0.947*(((self.LongCause_km**3)/(self.HmaxRio_m-self.HminRio_m))**0.385)\n\n return self.tc_SCS\n\n\n def Tc_Valencia(self):\n \"\"\"\n Calculate concentration time of Valencia\n \"\"\"\n self.tc_Valencia = 1.7694*(self.Area_km**0.325)*(self.LongCause_km**-0.096)*(self.PendCause**-0.29)\n\n return self.tc_Valencia\n\n\n def Tc_Bransby(self):\n \"\"\"\n Calculate concentration time of Bransby\n \"\"\"\n self.tc_Bransby = (1/60.)*14.6*self.LongCause_km/((self.Area_km**0.1)*((self.PendCause/100)**0.2))\n\n return self.tc_Bransby\n\n def ConcentrationTimes(self,\n graph=False,title_fig='',\n name_fig='ConcentrationTimes',\n pdf_out=True, png_out=False, Path_Figs=Path,):\n \"\"\"\n Calculate concentration time with all methodologies\n INPUTS\n graph : Boolean to do grahpic\n title_fig: Figure title\n name_fig : Name to save figure\n pdf_out : Boolean to save figure in pdf format\n png_out : Boolean to save figure in png format\n PathFigs : Aboslute route to directory where figure will be save\n \"\"\"\n\n self.Tc = {'Kirpich' : self.Tc_Kirpich(),\n 'Temez' : self.Tc_Temez(),\n 'Giandoti' : self.Tc_Giandoti(),\n 'Williams' : self.Tc_Williams(),\n 'Johnstone' : self.Tc_Johnstone(),\n 'California' : self.Tc_California(),\n 'Clark' : self.Tc_Clark(),\n 'Passinni' : self.Tc_Passinni(),\n 'Pilgrim' : self.Tc_Pilgrim(),\n 'SCS' : self.Tc_SCS(),\n 'Valencia' : self.Tc_Valencia(),\n 'Bransby' : self.Tc_Bransby(),\n }\n\n vals = np.array(list(self.Tc.values()))\n lims = np.percentile(vals, [25,75])\n\n idx = np.where((vals>lims[0])& (vals<lims[1]))[0]\n\n self.Tc_mean = np.mean(vals[idx])\n if graph == True:\n Graphs.GraphTc(self.Tc, title_fig, name_fig, pdf_out, png_out, Path_Figs)\n\n self.Tc.update({'MEAN':self.Tc_mean})\n return self.Tc\n\n\n #\n # def IDF(self, Tr=None, K1=None, K2=None,\n # graph=False, cmap_name='jet',\n # name_fig='IDF', pdf_out=True, png_out=False, Path_Figs=Path):\n # \"\"\"\n # Calculate IDF\n # INPUTS\n # Tr : List or array with return times\n # K1 : List or array with constants k valid for duratios < 105\n # K2 : List or array with constants k valid for duratios > 105\n # graph : Boolean to do grahp\n # cmap_name: name of cmap\n # name_fig : Name to save figure\n # pdf_out : Boolean to save figure in pdf format\n # png_out : Boolean to save figure in png format\n # PathFigs : Aboslute route to directory where figure will be save\n # \"\"\"\n # if Tr is None:\n # Tr = self.TR\n # if K1 is None:\n # K1 = self.K1\n # if K2 is None:\n # K2 = self.K2\n # min_duration = 5\n # max_duration = 1440\n # d1 = np.tile(np.arange(min_duration,105,1), (len(Tr),1))\n # d2 = np.tile(np.arange(105,max_duration+1,1), (len(Tr),1))\n # duration = np.arange(min_duration,max_duration+1,1)\n # I1 = np.zeros(d1.shape)\n # I2 = np.zeros(d2.shape)\n # for i in range(len(Tr)):\n # I1[i,:] = K1[i]*(46.2/(d1[i,:]**(0.75))- 43.05/d1[i,:] )\n # I2[i,:] = K2[i]*(d2[i,:]**(-0.85))\n #\n # I = np.column_stack((I1,I2))\n # I = np.rollaxis(I, 1,0)\n #\n # if graph == True:\n # Graphs.GraphIDF(I, duration, Tr, cmap_name, name_fig, pdf_out, png_out, Path_Figs)\n # return I, duration, Tr\n #\n #\n # def IDF_value(self, duration, Tr=None, K1=None, K2=None,):\n # \"\"\"\n # Calculate IDF single value\n # INPUTS\n # duration : Float of duration [min]\n # Tr : List or array with return times\n # K1 : List or array with constants k valid for duratios < 105\n # K2 : List or array with constants k valid for duratios > 105\n # \"\"\"\n # if Tr is None:\n # Tr = self.TR\n # if K1 is None:\n # K1 = self.K1\n # if K2 is None:\n # K2 = self.K2\n #\n #\n # if (duration >=5 )&(duration <105):\n # I = K1*(46.2/(duration**(0.75))- 43.05/duration )\n # elif (duration >= 105) & (duration <= 1440):\n # I = K2*(duration**(-0.85))\n # else:\n # raise Exception(\"duration must be in the interval [5,1440]\")\n #\n # return I\n\n def PPT_total(self, Tc, Tr=None, t_rule=None):\n \"\"\"\n Calculate the total precipitation given concentration time for the return times given\n INPUTS\n Tc : Concetration time [min]\n Tr : List or array with return times\n t_rule :List or array with time distribution\n \"\"\"\n if t_rule is None:\n t_rule = self.t_dist\n\n T_acum = t_rule/100.\n # Int = self.IDF_value(Tc, Tr)\n Int = self.IDF\n PPT = Int*Tc\n P_acum = np.zeros((len(T_acum), len(PPT)),dtype=float)\n P_tota = np.zeros((len(T_acum), len(PPT)),dtype=float)\n\n for i in range(len(T_acum)):\n P_acum[i,:] = PPT*T_acum[i]\n if i != 0:\n P_tota[i,:] = P_acum[i,:] - P_acum[i-1,:]\n\n return P_acum, P_tota\n\n def Loses_SCS(self, Tc, Tr=None, t_rule=None):\n \"\"\"\n Calculate the loses SCS for a given concentration time for the return times given\n INPUTS\n Tc : Concetration time [min]\n Tr : List or array with return times\n t_rule :List or array with time distribution\n \"\"\"\n self.CN_III = 23*self.CN_II/(10+0.13*self.CN_II)\n\n S = 25.4*((1000/self.CN_III )-10)\n la = 0.2*S\n\n P_acum, P_tota = self.PPT_total(Tc, Tr)\n\n Pe_acum = np.zeros(P_acum.shape, dtype=float)\n Pe_tota = np.zeros(P_tota.shape, dtype=float)\n\n for i in range(Pe_acum.shape[0]):\n for j in range(Pe_acum.shape[1]):\n if (P_acum[i,j]-la) > 0:\n Pe_acum[i,j] = ((P_acum[i,j]-la)**2)/(P_acum[i,j]-la+S)\n\n if i !=0:\n Pe_tota[i,:] = Pe_acum[i,:] - Pe_acum[i-1,:]\n\n return Pe_acum, Pe_tota\n\n\n def Hietogram(self, Tc, Tr=None, t_rule=None,\n graph=False, title_fig='',name_fig='Hietogram',\n pdf_out=True, png_out=False, Path_Figs=Path):\n\n \"\"\"\n Make precipitation hietogram\n INPUTS\n Tc : Concetration time [min]\n Tr : List or array with return times\n t_rule : List or array with time distribution\n graph : Boolean to do grahpic\n name_fig : Name to save figure\n pdf_out : Boolean to save figure in pdf format\n png_out : Boolean to save figure in png format\n PathFigs : Aboslute route to directory where figure will be save\n \"\"\"\n if Tr is None:\n Tr = self.TR\n if t_rule is None:\n t_rule = self.t_dist\n\n self.P_acum, self.P_tota = self.PPT_total(Tc,Tr,t_rule)\n self.Pe_acum, self.Pe_tota = self.Loses_SCS(Tc,Tr,t_rule)\n t = Tc*np.linspace(0,1, self.P_tota.shape[0])\n if graph == True:\n Graphs.GraphHietogram(self.P_tota, self.Pe_tota, np.around(t,1), Tr, title_fig,name_fig, pdf_out, png_out, Path_Figs)\n return self.P_acum, self.P_tota, self.Pe_acum, self.Pe_tota\n\n def Hydrogram(self,Tc, Time, U_Hidrograph, Tr=None, t_rule=None,\n graph=False, join=True, title_fig='', name_fig='Hydrogram',\n cmap_name='jet',pdf_out=True, png_out=False, Path_Figs=Path):\n \"\"\"\n Make total Hidrogram\n INPUTS\n Tc : Concentratio time\n Time :\n U_Hidrograph: Unitarian hydrograph\n Tr : List or array with return times\n t_rule : List or array with time distribution\n graph : Boolean to do grahpic\n title_fig : Figure title\n name_fig : Name to save figure\n cmap_name : color map name\n pdf_out : Boolean to save figure in pdf format\n png_out : Boolean to save figure in png format\n PathFigs : Aboslute route to directory where figure will be save\n \"\"\"\n if Tr is None:\n Tr = self.TR\n if t_rule is None:\n t_rule = self.t_dist\n\n Pe_a, Pe_t = self.Loses_SCS(Tc,Tr,t_rule)\n n = 3\n C1 = np.zeros((Pe_a.shape[0]*(n+1)-1, Pe_a.shape[0]*n,len(Tr)),dtype=float)\n C1[:Pe_a.shape[0],0,:] = Pe_t\n for i in range(n*Pe_a.shape[0]-1):\n C1[:,i+1,:] = np.roll(C1[:,i,:],1,axis=0)\n\n Q = np.zeros(C1.shape[0], dtype=float)\n Q[Pe_a.shape[0]-1:Pe_a.shape[0]+U_Hidrograph.shape[0]-1] = U_Hidrograph\n\n C2 = np.zeros(C1.shape,dtype=float)\n for i in range(C1.shape[1]):\n for j in range(C1.shape[2]):\n C2[:,i,j] = Q*C1[:,i,j]\n\n self.H = np.sum(C2,axis=0)\n # self.t_hydrogram = np.arange(0,self.H.shape[0])*(Tc/(len(Time)-1))/60 #hours\n self.t_hydrogram = np.arange(0,self.H.shape[0])*(Tc/(len(Time)-1)) #hours\n if graph == True:\n Graphs.GraphHydrogram(self.t_hydrogram, self.H, Tr, join, title_fig, name_fig, cmap_name, pdf_out, png_out, Path_Figs)\n\n return self.H, self.t_hydrogram\n\n\n def Qmax(self, Tc=None, Time=None,\n graph=False, title_fig='', name_fig='MaxFlow',\n pdf_out=True, png_out=False, Path_Figs=Path,):\n \"\"\"\n Calculate Maximum flow foe each return time\n INPUTS\n Tc\n \"\"\"\n if Tc is None:\n Tc = self.Tc_mean\n\n if Time is None:\n Time = np.linspace(0,Tc,11) # make vector with 10% delta, can be any delta\n\n\n h_SCS, t_hydrogram = self.Hydrogram(Tc, Time, self.SCS(tiempos=Time,Tc=Tc))\n h_Sny, t_hydrogram = self.Hydrogram(Tc, Time, self.Sneyder(tiempos=Time,Tc=Tc))\n h_Wil, t_hydrogram = self.Hydrogram(Tc, Time, self.WilliansHann(tiempos=Time,))\n\n self.Qmax_SCS = np.max(h_SCS,axis=0)\n self.Qmax_Sny = np.max(h_Sny,axis=0)\n self.Qmax_Wil = np.max(h_Wil,axis=0)\n\n if graph == True:\n Graphs.GraphQmax(self.TR, self.Qmax_SCS, self.Qmax_Sny, self.Qmax_Wil,title_fig, name_fig, pdf_out, png_out, Path_Figs )\n\n return {'SCS': self.Qmax_SCS, 'Sny':self.Qmax_Sny, 'Wil':self.Qmax_Wil}\n" ]
[ [ "numpy.poly1d", "numpy.linspace", "numpy.arange", "numpy.around", "numpy.percentile", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.interp", "numpy.where", "numpy.exp", "numpy.roll", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NotAnyMike/stable-baselines
[ "dee991c673cfd52a81534bee033023e080517a21" ]
[ "tests/test_continuous.py" ]
[ "import subprocess\nimport os\n\nimport gym\nimport pytest\nimport numpy as np\n\nfrom stable_baselines import A2C, SAC\n# TODO: add support for continuous actions\n# from stable_baselines.acer import ACER\n# from stable_baselines.acktr import ACKTR\nfrom stable_baselines.ddpg import DDPG\nfrom stable_baselines.ppo1 import PPO1\nfrom stable_baselines.ppo2 import PPO2\nfrom stable_baselines.trpo_mpi import TRPO\nfrom stable_baselines.common import set_global_seeds\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.common.identity_env import IdentityEnvBox\nfrom stable_baselines.ddpg import AdaptiveParamNoiseSpec\nfrom tests.test_common import _assert_eq\n\n\nN_TRIALS = 1000\nNUM_TIMESTEPS = 15000\n\nMODEL_LIST = [\n A2C,\n # ACER,\n # ACKTR,\n DDPG,\n PPO1,\n PPO2,\n SAC,\n TRPO\n]\n\n\[email protected]\[email protected](\"model_class\", MODEL_LIST)\ndef test_model_manipulation(model_class):\n \"\"\"\n Test if the algorithm can be loaded and saved without any issues, the environment switching\n works and that the action prediction works\n\n :param model_class: (BaseRLModel) A model\n \"\"\"\n try:\n env = DummyVecEnv([lambda: IdentityEnvBox(eps=0.5)])\n\n # create and train\n model = model_class(policy=\"MlpPolicy\", env=env)\n model.learn(total_timesteps=NUM_TIMESTEPS, seed=0)\n\n # predict and measure the acc reward\n acc_reward = 0\n set_global_seeds(0)\n obs = env.reset()\n for _ in range(N_TRIALS):\n action, _ = model.predict(obs)\n obs, reward, _, _ = env.step(action)\n acc_reward += reward\n acc_reward = sum(acc_reward) / N_TRIALS\n\n # saving\n model.save(\"./test_model\")\n\n del model, env\n\n # loading\n model = model_class.load(\"./test_model\")\n\n # changing environment (note: this can be done at loading)\n env = DummyVecEnv([lambda: IdentityEnvBox(eps=0.5)])\n model.set_env(env)\n\n # predict the same output before saving\n loaded_acc_reward = 0\n set_global_seeds(0)\n obs = env.reset()\n for _ in range(N_TRIALS):\n action, _ = model.predict(obs)\n obs, reward, _, _ = env.step(action)\n loaded_acc_reward += reward\n loaded_acc_reward = sum(loaded_acc_reward) / N_TRIALS\n\n with pytest.warns(None) as record:\n act_prob = model.action_probability(obs)\n\n if model_class in [DDPG, SAC]:\n # check that only one warning was raised\n assert len(record) == 1, \"No warning was raised for {}\".format(model_class)\n assert act_prob is None, \"Error: action_probability should be None for {}\".format(model_class)\n else:\n assert act_prob[0].shape == (1, 1) and act_prob[1].shape == (1, 1), \\\n \"Error: action_probability not returning correct shape\"\n\n # test action probability for given (obs, action) pair\n # must return zero and raise a warning or raise an exception if not defined\n env = model.get_env()\n obs = env.reset()\n observations = np.array([obs for _ in range(10)])\n observations = np.squeeze(observations)\n observations = observations.reshape((-1, 1))\n actions = np.array([env.action_space.sample() for _ in range(10)])\n\n if model_class == DDPG:\n with pytest.raises(ValueError):\n model.action_probability(observations, actions=actions)\n else:\n with pytest.warns(UserWarning):\n actions_probas = model.action_probability(observations, actions=actions)\n assert actions_probas.shape == (len(actions), 1), actions_probas.shape\n assert np.all(actions_probas == 0.0), actions_probas\n\n # assert <15% diff\n assert abs(acc_reward - loaded_acc_reward) / max(acc_reward, loaded_acc_reward) < 0.15, \\\n \"Error: the prediction seems to have changed between loading and saving\"\n\n # learn post loading\n model.learn(total_timesteps=100, seed=0)\n\n # validate no reset post learning\n # This test was failing from time to time for no good reason\n # other than bad luck\n # We should change this test\n # loaded_acc_reward = 0\n # set_global_seeds(0)\n # obs = env.reset()\n # for _ in range(N_TRIALS):\n # action, _ = model.predict(obs)\n # obs, reward, _, _ = env.step(action)\n # loaded_acc_reward += reward\n # loaded_acc_reward = sum(loaded_acc_reward) / N_TRIALS\n # # assert <10% diff\n # assert abs(acc_reward - loaded_acc_reward) / max(acc_reward, loaded_acc_reward) < 0.1, \\\n # \"Error: the prediction seems to have changed between pre learning and post learning\"\n\n # predict new values\n obs = env.reset()\n for _ in range(N_TRIALS):\n action, _ = model.predict(obs)\n obs, _, _, _ = env.step(action)\n\n # Free memory\n del model, env\n\n finally:\n if os.path.exists(\"./test_model\"):\n os.remove(\"./test_model\")\n\n\ndef test_ddpg():\n args = ['--env-id', 'Pendulum-v0', '--num-timesteps', 1000, '--noise-type', 'ou_0.01']\n args = list(map(str, args))\n return_code = subprocess.call(['python', '-m', 'stable_baselines.ddpg.main'] + args)\n _assert_eq(return_code, 0)\n\n\ndef test_ddpg_eval_env():\n \"\"\"\n Additional test to check that everything is working when passing\n an eval env.\n \"\"\"\n eval_env = gym.make(\"Pendulum-v0\")\n model = DDPG(\"MlpPolicy\", \"Pendulum-v0\", nb_rollout_steps=5,\n nb_train_steps=2, nb_eval_steps=10,\n eval_env=eval_env, verbose=0)\n model.learn(1000)\n\n\ndef test_ddpg_normalization():\n \"\"\"\n Test that observations and returns normalizations are properly saved and loaded.\n \"\"\"\n param_noise = AdaptiveParamNoiseSpec(initial_stddev=0.05, desired_action_stddev=0.05)\n model = DDPG('MlpPolicy', 'Pendulum-v0', memory_limit=50000, normalize_observations=True,\n normalize_returns=True, nb_rollout_steps=128, nb_train_steps=1,\n batch_size=64, param_noise=param_noise)\n model.learn(1000)\n obs_rms_params = model.sess.run(model.obs_rms_params)\n ret_rms_params = model.sess.run(model.ret_rms_params)\n model.save('./test_ddpg')\n\n loaded_model = DDPG.load(\"test_ddpg\")\n obs_rms_params_2 = loaded_model.sess.run(loaded_model.obs_rms_params)\n ret_rms_params_2 = loaded_model.sess.run(loaded_model.ret_rms_params)\n\n for param, param_loaded in zip(obs_rms_params + ret_rms_params,\n obs_rms_params_2 + ret_rms_params_2):\n assert np.allclose(param, param_loaded)\n\n del model, loaded_model\n\n if os.path.exists(\"./test_ddpg\"):\n os.remove(\"./test_ddpg\")\n" ]
[ [ "numpy.all", "numpy.squeeze", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Asichurter/Few-Shot-Project
[ "865cd6aa7b996c518dfa48dcc9ffad90445f9efe" ]
[ "modules/model/SNAIL_.py" ]
[ "import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom modules.utils.dlUtils import get_block_1\n\n\nclass CasualConv1d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size,\n stride=1, dilation=1, groups=1, bias=True):\n super(CasualConv1d, self).__init__()\n self.dilation = dilation\n padding = dilation * (kernel_size - 1)\n self.conv1d = nn.Conv1d(in_channels, out_channels, kernel_size, stride,\n padding, dilation, groups, bias)\n\n def forward(self, input):\n # Takes something of shape (N, in_channels, T),\n # returns (N, out_channels, T)\n out = self.conv1d(input)\n return out[:, :, :-self.dilation] # TODO: make this correct for different strides/padding\n\n\nclass DenseBlock(nn.Module):\n def __init__(self, in_channels, dilation, filters, kernel_size=2):\n super(DenseBlock, self).__init__()\n self.casualconv1 = CasualConv1d(in_channels, filters, kernel_size, dilation=dilation)\n self.casualconv2 = CasualConv1d(in_channels, filters, kernel_size, dilation=dilation)\n\n def forward(self, input):\n # input is dimensions (N, in_channels, T)\n xf = self.casualconv1(input)\n xg = self.casualconv2(input)\n activations = torch.tanh(xf) * torch.sigmoid(xg) # shape: (N, filters, T)\n return torch.cat((input, activations), dim=1)\n\n\nclass TCBlock(nn.Module):\n def __init__(self, in_channels, seq_length, filters):\n super(TCBlock, self).__init__()\n self.dense_blocks = nn.ModuleList([DenseBlock(in_channels + i * filters, 2 ** (i + 1), filters)\n for i in range(int(math.ceil(math.log(seq_length, 2))))])\n\n def forward(self, input):\n # input is dimensions (N, T, in_channels)\n input = torch.transpose(input, 1, 2)\n for block in self.dense_blocks:\n input = block(input)\n return torch.transpose(input, 1, 2)\n\n\nclass AttentionBlock(nn.Module):\n def __init__(self, in_channels, key_size, value_size):\n super(AttentionBlock, self).__init__()\n self.linear_query = nn.Linear(in_channels, key_size)\n self.linear_keys = nn.Linear(in_channels, key_size)\n self.linear_values = nn.Linear(in_channels, value_size)\n self.sqrt_key_size = math.sqrt(key_size)\n\n def forward(self, input):\n # input is dim (N, T, in_channels) where N is the batch_size, and T is\n # the sequence length\n mask = np.array([[1 if i > j else 0 for i in range(input.shape[1])] for j in range(input.shape[1])])\n mask = torch.ByteTensor(mask).cuda()\n\n # import pdb; pdb.set_trace()\n keys = self.linear_keys(input) # shape: (N, T, key_size)\n query = self.linear_query(input) # shape: (N, T, key_size)\n values = self.linear_values(input) # shape: (N, T, value_size)\n temp = torch.bmm(query, torch.transpose(keys, 1, 2)) # shape: (N, T, T)\n temp.data.masked_fill_(mask.bool(), -float('inf'))\n temp = F.softmax(temp / self.sqrt_key_size,\n dim=1) # shape: (N, T, T), broadcasting over any slice [:, x, :], each row of the matrix\n temp = torch.bmm(temp, values) # shape: (N, T, value_size)\n return torch.cat((input, temp), dim=2) # shape: (N, T, in_channels + value_size)\n\nclass SNAIL(nn.Module):\n def __init__(self, N, K):\n # N-way, K-shot\n super(SNAIL, self).__init__()\n channels = [1, 64, 64, 64, 64]\n num_channels = channels[-1] + N\n strides = [2, 2, 2, 1]\n paddings = [1, 1, 1, 1]\n kernels = [3, 3, 3, 3]\n layers = [get_block_1(channels[i], channels[i+1],\n strides[i], kernels[i],\n paddings[i]) for i in range(4)]\n self.encoder = nn.Sequential(*layers)\n num_filters = int(math.ceil(math.log(N * K + 1, 2)))\n self.attention1 = AttentionBlock(num_channels, 64, 32)\n num_channels += 32\n self.tc1 = TCBlock(num_channels, N * K + 1, 128)\n num_channels += num_filters * 128\n self.attention2 = AttentionBlock(num_channels, 256, 128)\n num_channels += 128\n self.tc2 = TCBlock(num_channels, N * K + 1, 128)\n num_channels += num_filters * 128\n self.attention3 = AttentionBlock(num_channels, 512, 256)\n num_channels += 256\n self.fc = nn.Linear(num_channels, N)\n self.N = N\n self.K = K\n\n def forward(self, x, labels):\n # batch_size = query.size(0)\n # support = self.encoder(support).view(self.K*self.N, -1).repeat((batch_size,1,1))\n # query = self.encoder(query).view(batch_size, 1, -1)\n # x = torch.cat((support, query), dim=1)\n\n # s_labels = s_labels.repeat((batch_size,1,1))\n # q_labels = torch.zeros((batch_size, 1, s_labels.size(2))).cuda()\n # labels = torch.cat((s_labels, q_labels), dim=1)\n\n # returned = None\n x = self.encoder(x).squeeze()\n batch_size = int(labels.size()[0] / (self.N * self.K + 1))\n last_idxs = [(i + 1) * (self.N * self.K + 1) - 1 for i in range(batch_size)]\n labels[last_idxs] = torch.Tensor(np.zeros((batch_size, labels.size()[1]))).cuda()\n x = torch.cat((x, labels), 1)\n x = x.view((batch_size, self.N * self.K + 1, -1))\n x = self.attention1(x)\n x = self.tc1(x)\n x = self.attention2(x)\n x = self.tc2(x)\n x = self.attention3(x)\n x = self.fc(x)\n return F.log_softmax(x, dim=2)" ]
[ [ "torch.nn.Sequential", "torch.nn.functional.softmax", "torch.transpose", "torch.sigmoid", "torch.ByteTensor", "torch.nn.functional.log_softmax", "torch.cat", "torch.tanh", "torch.nn.Linear", "torch.bmm", "torch.nn.Conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RubensBritto/AlgoritmoGenetico
[ "dc79bdb46bccadbaf0ad851bb2844378f6400b62", "dc79bdb46bccadbaf0ad851bb2844378f6400b62" ]
[ "hibrido/main.py", "rede-neural/perceptron.py" ]
[ "from ga import *\nfrom perceptron import *\nimport time\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nepocasPlot = []\ntimePlot = []\nclf = Perceptron()\n\ndf = pd.read_csv('dataSetTrain2.csv')\ndf.head()\n\nX_train = df.iloc[0:,[0,1,2,3,4,5,6,7]].values\ny_train = df.iloc[0:,8].values\n\n\ndef train(population):\n scores = []\n for individual in population:\n score = clf.train(X_train, y_train, individual)\n #if score == 0:\n #print('Score 0')\n scores.append(score)\n return scores\n\ndef main ():\n ini = time.time()\n initial_population = get_initial_population(X_train)\n population = initial_population\n epocas = 0 \n while True:\n #ini = time.time()\n scores = train(population)\n scores.sort(reverse=True)\n if scores[0] == 0 or epocas == 20:\n print('Pesos: ', population[0])\n print(f'epocas - {epocas}')\n print(f'score: {scores}')\n \n return population\n population,scores = selectionFirst(population,scores )\n new_population = crossover([], population)\n new_population = mutation(new_population, X_train)\n new_scores = train(new_population)\n population,scores = selection(population, scores, new_population, new_scores)\n #print(scores)\n if new_scores[0] == 0 or epocas == 20:\n print('Pesos: ', population[0])\n print(f'epocas - {epocas}')\n print(f'score: {new_scores}')\n return population\n epocas+=1\n new_population.clear\n new_scores.clear\n fim = time.time()\n timePlot.append(fim-ini)\n #print(f'timePlot {timePlot}')\n epocasPlot.append(epocas)\n\ndef plot():\n plt.plot(epocasPlot,timePlot)\n plt.ylabel('Time(s)')\n plt.xlabel('Epocas')\n plt.show()\n\ndef test(population):\n df = pd.read_csv('dataSetTest2.csv')\n df.head()\n\n X_2 = df.iloc[0:,[0,1,2,3,4,5,6,7]].values\n y_2 = df.iloc[0:,8].values\n clf.test(X_2,y_2,population[0])\n \n print(f'Acertos: {clf.acertosApurado}')\n print(f'Erros: {clf.errosApurado}')\n print(f'Acuracia: {clf.acertosApurado/(clf.acertosApurado+clf.errosApurado)}')\n\nif __name__ == \"__main__\":\n pop = main()\n test(pop)\n #plot()", "import numpy as np\nimport time\nimport math\n\nclass Perceptron(object):\n def __init__(self, learningRate=0.2, epochs=1600):\n self.learningRate = learningRate\n self.epochs = epochs\n self.epocasPlot = []\n self.timePlot = []\n self.acertosApurado = 0\n self.errosApurado = 0\n self.u = 0\n def train(self, X, y):\n ini = time.time()\n self._weights = np.zeros(1 + X.shape[1])\n self.errors = []\n \n for _ in range (self.epochs):\n #ini = time.time()\n errors = 0\n for xi, target in zip(X, y):\n error = (target - self.predict(xi))\n errors += int(error != 0.0)\n \n update = self.learningRate * error\n self._weights[1:] += update * xi\n self._weights[0] += update\n self.errors.append(errors)\n \n fim = time.time()\n self.timePlot.append(fim-ini)\n #print(f'Tempo {self.timePlot}')\n self.epocasPlot.append(len(self.epocasPlot)+1)\n return self\n \n def net_input(self, X):\n bias = self._weights[0]\n output = 0\n i = 0\n for i in range(len(X)):\n output = X[i] * self._weights[i+1]\n output = bias\n return output\n \n def activation_function(self, X):\n x = self.net_input(X)\n if x >= 0:\n z = math.exp(-x)\n sig = 1 / (1 + z)\n return sig\n else:\n z = math.exp(x)\n sig = z / (1 + z)\n return sig\n def predict(self, X):\n return self.activation_function(X)\n \n def test(self, X, y): \n for xi, target in zip(X, y):\n i = 0\n output = 0\n bias = self._weights[0]\n for i in range(len(xi)):\n output = xi[i] * self._weights[i+1]\n self.u = output + bias\n print(f'Valor de u - {self.u}, TARGET - {target}')\n self.saida(self.u, target)\n self.u = 0\n \n def saida(self, u, target):\n a = 1 / (1 + math.exp(-u))\n print(f'sub {abs(target - a)}')\n if abs(target - a) == 0 or abs(target - a) == 1:\n self.acertosApurado+=1\n else:\n self.errosApurado+=1" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Diuner/sign-segmentation
[ "4b5bd72898d393913f6d1dd451ebde9ef6964179" ]
[ "4_pred_postprocessing.py" ]
[ "import cv2\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nimport json\nfrom shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\nimport argparse\n\nthis_dir = \"/\".join(os.path.realpath(__file__).split('/')[:-1]) + '/'\nparent_dir = '/'.join(this_dir.split('/')[:-2]) + '/'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--input', type=str, \n default=this_dir+'0_input/1_input_maps/',\n help='Input folder.')\nparser.add_argument('-m', '--masks', type=str, \n default=this_dir+'0_input/2_masks/',\n help='Masks folder.')\nparser.add_argument('-o', '--output', type=str, \n default=this_dir+'0_input/3_output/',\n help='Output folder.')\nparser.add_argument('-x', '--x_mod', type=float, default=0.9,\n help='Parameter for box stretching in x direction')\nparser.add_argument('-y', '--y_mod', type=float, default=0.7,\n help='Parameter for box stretching in y direction')\nparser.add_argument('-b', '--min_box_dim', type=int, default=10,\n help='Minimum bounding box dimension')\nFLAGS = parser.parse_args()\n\ndef list_of_files(working_directory, extension):\n '''Get list of paths to files for further OCR with certain extension'''\n file_to_check = []\n if type(extension) == list:\n extension = tuple(extension) \n for file in os.listdir(working_directory):\n if file.endswith(extension):\n file_to_check.append('{}/{}'.format(working_directory,file))\n return(file_to_check)\n\ndef main():\n before_pred = list_of_files(FLAGS.input, '.jpg')\n after_pred = list_of_files(FLAGS.masks, '.jpg')\n out_folder = FLAGS.output\n \n #Rectangle pred\n min_dim = FLAGS.min_box_dim\n for before in tqdm(before_pred[:]):\n for after in after_pred:\n if before.split('/')[-1] == after.split('/')[-1]:\n true_img = cv2.imread(before)\n\n shape_x = true_img.shape[1]\n shape_y = true_img.shape[0]\n x_multiplier = shape_x / 512\n y_multiplier = shape_y / 512\n\n im = cv2.imread(after)\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n contours, _ = cv2.findContours(gray, cv2.RETR_TREE, \n cv2.CHAIN_APPROX_SIMPLE)\n area_list = [cv2.contourArea(cnt) for cnt in contours]\n contours_filtered = []\n for cnt, area in zip(contours, area_list):\n if area > min_dim * x_multiplier * min_dim * y_multiplier:\n contours_filtered.append(cnt)\n\n true_pred = true_img.copy()\n boxes_list = []\n for cnt in contours_filtered:\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n boxes_list.append(box)\n\n mid_points = []\n length_width = []\n for cnt in boxes_list:\n x = [point[0] for point in cnt]\n y = [point[1] for point in cnt]\n M = cv2.moments(cnt)\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX = (max(x) + min(x))/2\n cY = (max(y) + min(y))/2\n\n mid_points.append((cX, cY))\n\n length = max(x) - min(x)\n width = max(y) - min(y)\n length_width.append((length, width))\n\n\n expanded_contours = []\n x_mod = FLAGS.x_mod\n y_mod = FLAGS.y_mod\n for cnt, mid, dims in zip(boxes_list, mid_points, length_width):\n new_cnt = []\n x_mid, y_mid = mid\n length, width = dims\n for point in cnt:\n if point[0] < x_mid and point[1] < y_mid:\n new_point = [[int(point[0]-(((1/x_mod)-1)/2)*length), \n int(point[1]-(((1/y_mod)-1)/2)*width)]]\n elif point[0] < x_mid and point[1] > y_mid:\n new_point = [[int(point[0]-(((1/x_mod)-1)/2)*length), \n int(point[1]+(((1/y_mod)-1)/2)*width)]]\n elif point[0] > x_mid and point[1] > y_mid:\n new_point = [[int(point[0]+(((1/x_mod)-1)/2)*length), \n int(point[1]+(((1/y_mod)-1)/2)*width)]]\n elif point[0] > x_mid and point[1] < y_mid:\n new_point = [[int(point[0]+(((1/x_mod)-1)/2)*length), \n int(point[1]-(((1/y_mod)-1)/2)*width)]]\n elif point[0] == x_mid and point[1] > y_mid:\n new_point = [[int(point[0]), \n int(point[1]+(((1/y_mod)-1)/2)*width)]]\n elif point[0] == x_mid and point[1] < y_mid:\n new_point = [[int(point[0]), \n int(point[1]-(((1/y_mod)-1)/2)*width)]]\n elif point[0] > x_mid and point[1] == y_mid:\n new_point = [[int(point[0]+(((1/x_mod)-1)/2)*length), \n int(point[1])]]\n elif point[0] < x_mid and point[1] == y_mid:\n new_point = [[int(point[0]-(((1/x_mod)-1)/2)*length), \n int(point[1])]]\n\n new_cnt.append(new_point)\n new_cnt = np.array(new_cnt, dtype = 'int32')\n expanded_contours.append(new_cnt)\n\n all_rects = [Polygon([tuple(r[0][0]), \n tuple(r[1][0]), tuple(r[2][0]),\n tuple(r[3][0])]) for r in expanded_contours]\n\n\n boxes_to_remove = []\n for rect in expanded_contours:\n p1 = Point(tuple(rect[0][0]))\n p2 = Point(tuple(rect[1][0]))\n p3 = Point(tuple(rect[2][0]))\n p4 = Point(tuple(rect[3][0]))\n\n for poly in all_rects:\n check = [poly.contains(p1), poly.contains(p2), \n poly.contains(p3), poly.contains(p4)]\n if sum(check) == 4:\n p1_coords = list(p1.coords)\n p2_coords = list(p2.coords)\n p3_coords = list(p3.coords)\n p4_coords = list(p4.coords)\n\n box_to_remove = np.array([[list(p1_coords[0])], \n [list(p2_coords[0])], \n [list(p3_coords[0])], \n [list(p4_coords[0])]], \n dtype = 'int32')\n\n boxes_to_remove.append(box_to_remove)\n\n if boxes_to_remove == []:\n filtered_boxes = expanded_contours\n else:\n filtered_boxes = []\n for i in expanded_contours:\n checker = [np.array_equal(i, j) for j in boxes_to_remove]\n if sum(checker) == 0:\n filtered_boxes.append(i)\n\n json_out = []\n for rect in filtered_boxes:\n p1 = list(rect[0][0])\n p2 = list(rect[1][0])\n p3 = list(rect[2][0])\n p4 = list(rect[3][0])\n\n json_out.append((list(map(int, p1)), list(map(int, p2)), \n list(map(int, p3)), list(map(int, p4))))\n \n new_file = before.split('/')[-1].split('.')[0]\n with open(out_folder + '/' + new_file + '.json', 'w') as f:\n json.dump(json_out, f)\n\n dd = cv2.drawContours(true_pred, filtered_boxes, -1, (0,0,255), 3)\n cv2.imwrite(out_folder + '/' + before.split('/')[-1], true_pred) \n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.int0", "numpy.array", "numpy.array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rmgogogo/tfx
[ "8ed47f2570bd01d258d8ee9b1ab001e08d16af89", "8ed47f2570bd01d258d8ee9b1ab001e08d16af89", "8ed47f2570bd01d258d8ee9b1ab001e08d16af89", "8ed47f2570bd01d258d8ee9b1ab001e08d16af89" ]
[ "tfx/utils/io_utils.py", "tfx/components/pusher/executor_test.py", "tfx/components/pusher/component_test.py", "tfx/components/example_gen/big_query_example_gen/executor_test.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility class for I/O.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom typing import List, Text\n\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom google.protobuf.message import Message\nfrom tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow_metadata.proto.v0 import schema_pb2\n\n\n# Nano seconds per second.\nNANO_PER_SEC = 1000 * 1000 * 1000\n\n# If path starts with one of those, consider files are in remote filesystem.\n_REMOTE_FS_PREFIX = ['gs://', 'hdfs://', 's3://']\n\n\ndef ensure_local(file_path: Text) -> Text:\n \"\"\"Ensures that the given file path is made available locally.\"\"\"\n if not any([file_path.startswith(prefix) for prefix in _REMOTE_FS_PREFIX]):\n return file_path\n\n local_path = os.path.basename(file_path)\n copy_file(file_path, local_path, True)\n return local_path\n\n\ndef copy_file(src: Text, dst: Text, overwrite: bool = False):\n \"\"\"Copies a single file from source to destination.\"\"\"\n\n if overwrite and tf.io.gfile.exists(dst):\n tf.io.gfile.remove(dst)\n dst_dir = os.path.dirname(dst)\n tf.io.gfile.makedirs(dst_dir)\n tf.io.gfile.copy(src, dst, overwrite=overwrite)\n\n\ndef copy_dir(src: Text, dst: Text) -> None:\n \"\"\"Copies the whole directory recursively from source to destination.\"\"\"\n\n if tf.io.gfile.exists(dst):\n tf.io.gfile.rmtree(dst)\n tf.io.gfile.makedirs(dst)\n\n for dir_name, sub_dirs, leaf_files in tf.io.gfile.walk(src):\n for leaf_file in leaf_files:\n leaf_file_path = os.path.join(dir_name, leaf_file)\n new_file_path = os.path.join(dir_name.replace(src, dst, 1), leaf_file)\n tf.io.gfile.copy(leaf_file_path, new_file_path)\n\n for sub_dir in sub_dirs:\n tf.io.gfile.makedirs(os.path.join(dst, sub_dir))\n\n\ndef get_only_uri_in_dir(dir_path: Text) -> Text:\n \"\"\"Gets the only uri from given directory.\"\"\"\n\n files = tf.io.gfile.listdir(dir_path)\n if len(files) != 1:\n raise RuntimeError(\n 'Only one file per dir is supported: {}.'.format(dir_path))\n filename = os.path.dirname(os.path.join(files[0], ''))\n return os.path.join(dir_path, filename)\n\n\ndef delete_dir(path: Text) -> None:\n \"\"\"Deletes a directory if exists.\"\"\"\n\n if tf.io.gfile.isdir(path):\n tf.io.gfile.rmtree(path)\n\n\ndef write_string_file(file_name: Text, string_value: Text) -> None:\n \"\"\"Writes a string to file.\"\"\"\n\n tf.io.gfile.makedirs(os.path.dirname(file_name))\n file_io.write_string_to_file(file_name, string_value)\n\n\ndef write_pbtxt_file(file_name: Text, proto: Message) -> None:\n \"\"\"Writes a text protobuf to file.\"\"\"\n\n write_string_file(file_name, text_format.MessageToString(proto))\n\n\ndef write_tfrecord_file(file_name: Text, proto: Message) -> None:\n \"\"\"Writes a serialized tfrecord to file.\"\"\"\n\n tf.io.gfile.makedirs(os.path.dirname(file_name))\n with tf.io.TFRecordWriter(file_name) as writer:\n writer.write(proto.SerializeToString())\n\n\ndef parse_pbtxt_file(file_name: Text, message: Message) -> Message:\n \"\"\"Parses a protobuf message from a text file and return message itself.\"\"\"\n contents = file_io.read_file_to_string(file_name)\n text_format.Parse(contents, message)\n return message\n\n\ndef load_csv_column_names(csv_file: Text) -> List[Text]:\n \"\"\"Parse the first line of a csv file as column names.\"\"\"\n with file_io.FileIO(csv_file, 'r') as f:\n return f.readline().strip().split(',')\n\n\ndef all_files_pattern(file_pattern: Text) -> Text:\n \"\"\"Returns file pattern suitable for Beam to locate multiple files.\"\"\"\n return '{}*'.format(file_pattern)\n\n\ndef generate_fingerprint(split_name: Text, file_pattern: Text) -> Text:\n \"\"\"Generates a fingerprint for all files that match the pattern.\"\"\"\n files = tf.io.gfile.glob(file_pattern)\n total_bytes = 0\n # Checksum used here is based on timestamp (mtime).\n # Checksums are xor'ed and sum'ed over the files so that they are order-\n # independent.\n xor_checksum = 0\n sum_checksum = 0\n for f in files:\n stat = tf.io.gfile.stat(f)\n total_bytes += stat.length\n # Take mtime only up to second-granularity.\n mtime = int(stat.mtime_nsec / NANO_PER_SEC)\n xor_checksum ^= mtime\n sum_checksum += mtime\n\n return 'split:%s,num_files:%d,total_bytes:%d,xor_checksum:%d,sum_checksum:%d' % (\n split_name, len(files), total_bytes, xor_checksum, sum_checksum)\n\n\nclass SchemaReader(object):\n \"\"\"Schema reader.\"\"\"\n\n def read(self, schema_path: Text) -> schema_pb2.Schema:\n \"\"\"Gets a tf.metadata schema.\n\n Args:\n schema_path: Path to schema file.\n\n Returns:\n A tf.metadata schema.\n \"\"\"\n\n result = schema_pb2.Schema()\n contents = file_io.read_file_to_string(schema_path)\n text_format.Parse(contents, result)\n return result\n", "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.pusher.executor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nfrom google.protobuf import json_format\nfrom tfx.components.pusher import executor\nfrom tfx.proto import pusher_pb2\nfrom tfx.types import standard_artifacts\n\n\nclass ExecutorTest(tf.test.TestCase):\n\n def setUp(self):\n super(ExecutorTest, self).setUp()\n self._source_data_dir = os.path.join(\n os.path.dirname(os.path.dirname(__file__)), 'testdata')\n self._output_data_dir = os.path.join(\n os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),\n self._testMethodName)\n tf.io.gfile.makedirs(self._output_data_dir)\n self._model_export = standard_artifacts.Model()\n self._model_export.uri = os.path.join(self._source_data_dir,\n 'trainer/current/')\n self._model_blessing = standard_artifacts.ModelBlessing()\n self._input_dict = {\n 'model_export': [self._model_export],\n 'model_blessing': [self._model_blessing],\n }\n\n self._model_push = standard_artifacts.PushedModel()\n self._model_push.uri = os.path.join(self._output_data_dir, 'model_push')\n tf.io.gfile.makedirs(self._model_push.uri)\n self._output_dict = {\n 'model_push': [self._model_push],\n }\n self._serving_model_dir = os.path.join(self._output_data_dir,\n 'serving_model_dir')\n tf.io.gfile.makedirs(self._serving_model_dir)\n self._exec_properties = {\n 'push_destination':\n json_format.MessageToJson(\n pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=self._serving_model_dir)),\n preserving_proto_field_name=True),\n }\n self._executor = executor.Executor()\n\n def testDoBlessed(self):\n self._model_blessing.uri = os.path.join(self._source_data_dir,\n 'model_validator/blessed/')\n self._model_blessing.set_int_custom_property('blessed', 1)\n self._executor.Do(self._input_dict, self._output_dict,\n self._exec_properties)\n self.assertNotEqual(0, len(tf.io.gfile.listdir(self._serving_model_dir)))\n self.assertNotEqual(0, len(tf.io.gfile.listdir(self._model_push.uri)))\n self.assertEqual(\n 1, self._model_push.artifact.custom_properties['pushed'].int_value)\n\n def testDoNotBlessed(self):\n self._model_blessing.uri = os.path.join(self._source_data_dir,\n 'model_validator/not_blessed/')\n self._model_blessing.set_int_custom_property('blessed', 0)\n self._executor.Do(self._input_dict, self._output_dict,\n self._exec_properties)\n self.assertEqual(0, len(tf.io.gfile.listdir(self._serving_model_dir)))\n self.assertEqual(0, len(tf.io.gfile.listdir(self._model_push.uri)))\n self.assertEqual(\n 0, self._model_push.artifact.custom_properties['pushed'].int_value)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.pusher.component.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom typing import Text\nimport tensorflow as tf\nfrom tfx.components.base import executor_spec\nfrom tfx.components.pusher import component\nfrom tfx.components.pusher import executor\nfrom tfx.orchestration import data_types\nfrom tfx.proto import pusher_pb2\nfrom tfx.types import channel_utils\nfrom tfx.types import standard_artifacts\n\n\nclass ComponentTest(tf.test.TestCase):\n\n class _MyCustomPusherExecutor(executor.Executor):\n \"\"\"Mock class to test custom executor injection.\"\"\"\n pass\n\n def setUp(self):\n super(ComponentTest, self).setUp()\n self.model = channel_utils.as_channel([standard_artifacts.Model()])\n self.model_blessing = channel_utils.as_channel(\n [standard_artifacts.ModelBlessing()])\n\n def testConstruct(self):\n pusher = component.Pusher(\n model=self.model,\n model_blessing=self.model_blessing,\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory='push_destination')))\n self.assertEqual(standard_artifacts.PushedModel.TYPE_NAME,\n pusher.outputs['model_push'].type_name)\n\n def testConstructWithParameter(self):\n push_dir = data_types.RuntimeParameter(name='push-dir', ptype=Text)\n pusher = component.Pusher(\n model=self.model,\n model_blessing=self.model_blessing,\n push_destination={'filesystem': {\n 'base_directory': push_dir\n }})\n self.assertEqual(standard_artifacts.PushedModel.TYPE_NAME,\n pusher.outputs['model_push'].type_name)\n\n def testConstructNoDestination(self):\n with self.assertRaises(ValueError):\n _ = component.Pusher(\n model=self.model,\n model_blessing=self.model_blessing,\n )\n\n def testConstructNoDestinationCustomExecutor(self):\n pusher = component.Pusher(\n model=self.model,\n model_blessing=self.model_blessing,\n custom_executor_spec=executor_spec.ExecutorClassSpec(\n self._MyCustomPusherExecutor),\n )\n self.assertEqual(standard_artifacts.PushedModel.TYPE_NAME,\n pusher.outputs['model_push'].type_name)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.example_gen.big_query_example_gen.executor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport apache_beam as beam\nfrom apache_beam.testing import util\nimport mock\nimport tensorflow as tf\nfrom google.cloud import bigquery\nfrom google.protobuf import json_format\nfrom tfx.components.example_gen.big_query_example_gen import executor\nfrom tfx.proto import example_gen_pb2\nfrom tfx.types import standard_artifacts\n\n\[email protected]_fn\ndef _MockReadFromBigQuery(pipeline, query): # pylint: disable=invalid-name, unused-argument\n mock_query_results = []\n for i in range(10000):\n mock_query_result = {\n 'i': None if random.randrange(10) == 0 else i,\n 'f': None if random.randrange(10) == 0 else float(i),\n 's': None if random.randrange(10) == 0 else str(i)\n }\n mock_query_results.append(mock_query_result)\n return pipeline | beam.Create(mock_query_results)\n\n\[email protected]_fn\ndef _MockReadFromBigQuery2(pipeline, query): # pylint: disable=invalid-name, unused-argument\n mock_query_results = [{\n 'i': 1,\n 'f': 2.0,\n 's': 'abc',\n }]\n return pipeline | beam.Create(mock_query_results)\n\n\nclass ExecutorTest(tf.test.TestCase):\n\n def setUp(self):\n # Mock BigQuery result schema.\n self._schema = [\n bigquery.SchemaField('i', 'INTEGER', mode='REQUIRED'),\n bigquery.SchemaField('f', 'FLOAT', mode='REQUIRED'),\n bigquery.SchemaField('s', 'STRING', mode='REQUIRED'),\n ]\n super(ExecutorTest, self).setUp()\n\n @mock.patch.multiple(\n executor,\n _ReadFromBigQuery=_MockReadFromBigQuery2, # pylint: disable=invalid-name, unused-argument\n )\n @mock.patch.object(bigquery, 'Client')\n def testBigQueryToExample(self, mock_client):\n # Mock query result schema for _BigQueryConverter.\n mock_client.return_value.query.return_value.result.return_value.schema = self._schema\n\n with beam.Pipeline() as pipeline:\n examples = (\n pipeline | 'ToTFExample' >> executor._BigQueryToExample(\n input_dict={},\n exec_properties={},\n split_pattern='SELECT i, f, s FROM `fake`'))\n\n feature = {}\n feature['i'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[1]))\n feature['f'] = tf.train.Feature(\n float_list=tf.train.FloatList(value=[2.0]))\n feature['s'] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes('abc')]))\n example_proto = tf.train.Example(\n features=tf.train.Features(feature=feature))\n util.assert_that(examples, util.equal_to([example_proto]))\n\n @mock.patch.multiple(\n executor,\n _ReadFromBigQuery=_MockReadFromBigQuery, # pylint: disable=invalid-name, unused-argument\n )\n @mock.patch.object(bigquery, 'Client')\n def testDo(self, mock_client):\n # Mock query result schema for _BigQueryConverter.\n mock_client.return_value.query.return_value.result.return_value.schema = self._schema\n\n output_data_dir = os.path.join(\n os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),\n self._testMethodName)\n\n # Create output dict.\n train_examples = standard_artifacts.Examples(split='train')\n train_examples.uri = os.path.join(output_data_dir, 'train')\n eval_examples = standard_artifacts.Examples(split='eval')\n eval_examples.uri = os.path.join(output_data_dir, 'eval')\n output_dict = {'examples': [train_examples, eval_examples]}\n\n # Create exe properties.\n exec_properties = {\n 'input_config':\n json_format.MessageToJson(\n example_gen_pb2.Input(splits=[\n example_gen_pb2.Input.Split(\n name='bq', pattern='SELECT i, f, s FROM `fake`'),\n ]),\n preserving_proto_field_name=True),\n 'output_config':\n json_format.MessageToJson(\n example_gen_pb2.Output(\n split_config=example_gen_pb2.SplitConfig(splits=[\n example_gen_pb2.SplitConfig.Split(\n name='train', hash_buckets=2),\n example_gen_pb2.SplitConfig.Split(\n name='eval', hash_buckets=1)\n ])),\n preserving_proto_field_name=True)\n }\n\n # Run executor.\n big_query_example_gen = executor.Executor()\n big_query_example_gen.Do({}, output_dict, exec_properties)\n\n # Check BigQuery example gen outputs.\n train_output_file = os.path.join(train_examples.uri,\n 'data_tfrecord-00000-of-00001.gz')\n eval_output_file = os.path.join(eval_examples.uri,\n 'data_tfrecord-00000-of-00001.gz')\n self.assertTrue(tf.io.gfile.exists(train_output_file))\n self.assertTrue(tf.io.gfile.exists(eval_output_file))\n self.assertGreater(\n tf.io.gfile.GFile(train_output_file).size(),\n tf.io.gfile.GFile(eval_output_file).size())\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.io.TFRecordWriter", "tensorflow.io.gfile.isdir", "tensorflow.io.gfile.walk", "tensorflow.io.gfile.exists", "tensorflow.python.lib.io.file_io.FileIO", "tensorflow.io.gfile.stat", "tensorflow.python.lib.io.file_io.write_string_to_file", "tensorflow.io.gfile.makedirs", "tensorflow.io.gfile.glob", "tensorflow.python.lib.io.file_io.read_file_to_string", "tensorflow.io.gfile.remove", "tensorflow.io.gfile.listdir", "tensorflow.io.gfile.rmtree", "tensorflow.io.gfile.copy" ], [ "tensorflow.io.gfile.listdir", "tensorflow.io.gfile.makedirs", "tensorflow.test.main" ], [ "tensorflow.test.main" ], [ "tensorflow.io.gfile.exists", "tensorflow.io.gfile.GFile", "tensorflow.test.main", "tensorflow.compat.as_bytes", "tensorflow.train.Features", "tensorflow.train.FloatList", "tensorflow.train.Int64List" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shashank3959/NAS-Projects
[ "5eed8101a78d223a20a43494176051298b24ac3a", "2c0577231a52375de5ebd7a588750899a8c7bf1c" ]
[ "others/GDAS/lib/nas_rnn/model_search.py", "lib/nas_infer_model/operations.py" ]
[ "import copy, torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import namedtuple\nfrom .genotypes import PRIMITIVES, STEPS, CONCAT, Genotype\nfrom .basemodel import DARTSCell, RNNModel\n\n\nclass DARTSCellSearch(DARTSCell):\n\n def __init__(self, ninp, nhid, dropouth, dropoutx):\n super(DARTSCellSearch, self).__init__(ninp, nhid, dropouth, dropoutx, genotype=None)\n self.bn = nn.BatchNorm1d(nhid, affine=False)\n self.check_zero = False\n\n def set_check(self, check_zero):\n self.check_zero = check_zero\n\n def cell(self, x, h_prev, x_mask, h_mask, arch_probs):\n s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)\n s0 = self.bn(s0)\n if self.check_zero:\n arch_probs_cpu = arch_probs.cpu().tolist()\n #arch_probs = F.softmax(self.weights, dim=-1)\n\n offset = 0\n states = s0.unsqueeze(0)\n for i in range(STEPS):\n if self.training:\n masked_states = states * h_mask.unsqueeze(0)\n else:\n masked_states = states\n ch = masked_states.view(-1, self.nhid).mm(self._Ws[i]).view(i+1, -1, 2*self.nhid)\n c, h = torch.split(ch, self.nhid, dim=-1)\n c = c.sigmoid()\n\n s = torch.zeros_like(s0)\n for k, name in enumerate(PRIMITIVES):\n if name == 'none':\n continue\n fn = self._get_activation(name)\n unweighted = states + c * (fn(h) - states)\n if self.check_zero:\n INDEX, INDDX = [], []\n for jj in range(offset, offset+i+1):\n if arch_probs_cpu[jj][k] > 0:\n INDEX.append(jj)\n INDDX.append(jj-offset)\n if len(INDEX) == 0: continue\n s += torch.sum(arch_probs[INDEX, k].unsqueeze(-1).unsqueeze(-1) * unweighted[INDDX, :, :], dim=0)\n else:\n s += torch.sum(arch_probs[offset:offset+i+1, k].unsqueeze(-1).unsqueeze(-1) * unweighted, dim=0)\n s = self.bn(s)\n states = torch.cat([states, s.unsqueeze(0)], 0)\n offset += i+1\n output = torch.mean(states[-CONCAT:], dim=0)\n return output\n\n\nclass RNNModelSearch(RNNModel):\n\n def __init__(self, *args):\n super(RNNModelSearch, self).__init__(*args)\n self._args = copy.deepcopy( args )\n\n k = sum(i for i in range(1, STEPS+1))\n self.arch_weights = nn.Parameter(torch.Tensor(k, len(PRIMITIVES)))\n nn.init.normal_(self.arch_weights, 0, 0.001)\n\n def base_parameters(self):\n lists = list(self.lockdrop.parameters())\n lists += list(self.encoder.parameters())\n lists += list(self.rnns.parameters())\n lists += list(self.decoder.parameters())\n return lists\n\n def arch_parameters(self):\n return [self.arch_weights]\n\n def genotype(self):\n\n def _parse(probs):\n gene = []\n start = 0\n for i in range(STEPS):\n end = start + i + 1\n W = probs[start:end].copy()\n #j = sorted(range(i + 1), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[0]\n j = sorted(range(i + 1), key=lambda x: -max(W[x][k] for k in range(len(W[x])) ))[0]\n k_best = None\n for k in range(len(W[j])):\n #if k != PRIMITIVES.index('none'):\n # if k_best is None or W[j][k] > W[j][k_best]:\n # k_best = k\n if k_best is None or W[j][k] > W[j][k_best]:\n k_best = k\n gene.append((PRIMITIVES[k_best], j))\n start = end\n return gene\n\n with torch.no_grad():\n gene = _parse(F.softmax(self.arch_weights, dim=-1).cpu().numpy())\n genotype = Genotype(recurrent=gene, concat=list(range(STEPS+1)[-CONCAT:]))\n return genotype\n", "##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #\n##################################################\nimport torch\nimport torch.nn as nn\n\nOPS = {\n 'none' : lambda C_in, C_out, stride, affine: Zero(stride),\n 'avg_pool_3x3' : lambda C_in, C_out, stride, affine: POOLING(C_in, C_out, stride, 'avg'),\n 'max_pool_3x3' : lambda C_in, C_out, stride, affine: POOLING(C_in, C_out, stride, 'max'),\n 'nor_conv_7x7' : lambda C_in, C_out, stride, affine: ReLUConvBN(C_in, C_out, (7,7), (stride,stride), (3,3), affine),\n 'nor_conv_3x3' : lambda C_in, C_out, stride, affine: ReLUConvBN(C_in, C_out, (3,3), (stride,stride), (1,1), affine),\n 'nor_conv_1x1' : lambda C_in, C_out, stride, affine: ReLUConvBN(C_in, C_out, (1,1), (stride,stride), (0,0), affine),\n 'skip_connect' : lambda C_in, C_out, stride, affine: Identity() if stride == 1 and C_in == C_out else FactorizedReduce(C_in, C_out, stride, affine),\n 'sep_conv_3x3' : lambda C_in, C_out, stride, affine: SepConv(C_in, C_out, 3, stride, 1, affine=affine),\n 'sep_conv_5x5' : lambda C_in, C_out, stride, affine: SepConv(C_in, C_out, 5, stride, 2, affine=affine),\n 'sep_conv_7x7' : lambda C_in, C_out, stride, affine: SepConv(C_in, C_out, 7, stride, 3, affine=affine),\n 'dil_conv_3x3' : lambda C_in, C_out, stride, affine: DilConv(C_in, C_out, 3, stride, 2, 2, affine=affine),\n 'dil_conv_5x5' : lambda C_in, C_out, stride, affine: DilConv(C_in, C_out, 5, stride, 4, 2, affine=affine),\n 'conv_7x1_1x7' : lambda C_in, C_out, stride, affine: Conv717(C_in, C_out, stride, affine),\n 'conv_3x1_1x3' : lambda C_in, C_out, stride, affine: Conv313(C_in, C_out, stride, affine)\n}\n\n\nclass POOLING(nn.Module):\n\n def __init__(self, C_in, C_out, stride, mode):\n super(POOLING, self).__init__()\n if C_in == C_out:\n self.preprocess = None\n else:\n self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0)\n if mode == 'avg' : self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)\n elif mode == 'max': self.op = nn.MaxPool2d(3, stride=stride, padding=1)\n\n def forward(self, inputs):\n if self.preprocess is not None:\n x = self.preprocess(inputs)\n else: x = inputs\n return self.op(x)\n\n\nclass Conv313(nn.Module):\n\n def __init__(self, C_in, C_out, stride, affine):\n super(Conv313, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in , C_out, (1,3), stride=(1, stride), padding=(0, 1), bias=False),\n nn.Conv2d(C_out, C_out, (3,1), stride=(stride, 1), padding=(1, 0), bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass Conv717(nn.Module):\n\n def __init__(self, C_in, C_out, stride, affine):\n super(Conv717, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in , C_out, (1,7), stride=(1, stride), padding=(0, 3), bias=False),\n nn.Conv2d(C_out, C_out, (7,1), stride=(stride, 1), padding=(3, 0), bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass ReLUConvBN(nn.Module):\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(ReLUConvBN, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass DilConv(nn.Module):\n \n def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):\n super(DilConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass SepConv(nn.Module):\n \n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(SepConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_in, affine=affine),\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride= 1, padding=padding, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass Identity(nn.Module):\n\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass Zero(nn.Module):\n\n def __init__(self, stride):\n super(Zero, self).__init__()\n self.stride = stride\n\n def forward(self, x):\n if self.stride == 1:\n return x.mul(0.)\n return x[:,:,::self.stride,::self.stride].mul(0.)\n\n def extra_repr(self):\n return 'stride={stride}'.format(**self.__dict__)\n\n\nclass FactorizedReduce(nn.Module):\n\n def __init__(self, C_in, C_out, stride, affine=True):\n super(FactorizedReduce, self).__init__()\n self.stride = stride\n self.C_in = C_in \n self.C_out = C_out \n self.relu = nn.ReLU(inplace=False)\n if stride == 2:\n #assert C_out % 2 == 0, 'C_out : {:}'.format(C_out)\n C_outs = [C_out // 2, C_out - C_out // 2]\n self.convs = nn.ModuleList()\n for i in range(2):\n self.convs.append( nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False) )\n self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)\n elif stride == 4:\n assert C_out % 4 == 0, 'C_out : {:}'.format(C_out)\n self.convs = nn.ModuleList()\n for i in range(4):\n self.convs.append( nn.Conv2d(C_in, C_out // 4, 1, stride=stride, padding=0, bias=False) )\n self.pad = nn.ConstantPad2d((0, 3, 0, 3), 0)\n else:\n raise ValueError('Invalid stride : {:}'.format(stride))\n \n self.bn = nn.BatchNorm2d(C_out, affine=affine)\n\n def forward(self, x):\n x = self.relu(x)\n y = self.pad(x)\n if self.stride == 2:\n out = torch.cat([self.convs[0](x), self.convs[1](y[:,:,1:,1:])], dim=1)\n else:\n out = torch.cat([self.convs[0](x), self.convs[1](y[:,:,1:-2,1:-2]),\n self.convs[2](y[:,:,2:-1,2:-1]), self.convs[3](y[:,:,3:,3:])], dim=1)\n out = self.bn(out)\n return out\n\n def extra_repr(self):\n return 'C_in={C_in}, C_out={C_out}, stride={stride}'.format(**self.__dict__)\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.mean", "torch.nn.functional.softmax", "torch.zeros_like", "torch.nn.init.normal_", "torch.no_grad", "torch.split" ], [ "torch.nn.ConstantPad2d", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MarvinLavechin/daem
[ "2994297af7547510d00c64aa193a12021cd557e5" ]
[ "tools/reconstruct.py" ]
[ "import numpy as np\nfrom tifffile import imread as tifread\nfrom matplotlib import pyplot as plt\nfrom skimage.measure import label\n\n\ndef label_cytoplasma_and_overlap(data):\n \"\"\"\n Transform a volume of labels to a stack of two channels. The first channel is the cytoplasma label and the second\n channel is the overlap between adjacent labels.\n\n Note: This data structure can be used as output image for the GAN.\n\n Type signature: int (size, image_height, image_width) --> bool (size, image_height, image_width, 2)\n\n Args:\n data: int (size, image_height, image_width)\n\n Returns:\n two_channels: bool (size, image_height, image_width, 2)\n \"\"\"\n\n sz, sy, sx = data.shape\n two_channels = np.zeros((sz, sy, sx, 2))\n\n # Channel 0: Cytoplasma\n # If label is not membrane (label == 0)\n # and if label does not change in either y or x direction (without this condition labels touch each other)\n two_channels[:, :, :, 0] = data != 0\n two_channels[:, :-1, :, 0] *= np.diff(data, n=1, axis=1) == 0\n two_channels[:, :, :-1, 0] *= np.diff(data, n=1, axis=2) == 0\n\n # Channel 1: Overlap of cytoplasma with same label:\n # If label does not change in z direction\n # and if label is cytoplasma\n two_channels[:-1, :, :, 1] = np.diff(data, n=1, axis=0) == 0\n two_channels[:, :, :, 1] *= two_channels[:, :, :, 0]\n\n two_channels *= 255 # Gray scale values between 0 and 255\n return two_channels\n\n\ndef stack_cytoplasma_and_overlap(two_channels, method='zip'):\n \"\"\"\n Interleave the two channels in alternating fashing to obtain the following stack of cytoplasma slices and\n connecting slices:\n\n cytoplasma -> overlap -> cytoplasma -> overlap -> .. -> cytoplasma -> overlap\n\n Note: The last overlap section is empty.\n\n Type signature: bool (size, image_height, image_width, 2) --> bool (2 * size, image_height, image_width)\n\n Args:\n two_channels: bool (size, image_height, image_width, 2)\n method: (in the result it makes no difference)\n 'zip': using zip (default)\n 'swapchannels': using numpy.swapaxes (looks better)\n\n Returns:\n stack: bool (2 * size, image_height, image_width)\n \"\"\"\n\n sz, sy, sx, sc = two_channels.shape\n\n # TODO: Measure which method is faster.\n\n if method == 'zip': # (sz, sy, sx, sc) --> (sz, sc, sy, sx)\n stack = np.array( zip(two_channels[:, :, :, 0], two_channels[:, :, :, 1]) )\n\n if method == 'swapaxes': # (sz, sy, sx, sc) --> (sz, sc, sx, sy) --> (sz, sc, sy, sx)\n stack = two_channels.swapaxes(1, 3).swapaxes(2, 3)\n\n # (sz, sc, sy, sx) --> (sz * sc, sy, sx)\n stack = np.resize(stack,(2*sz, sy, sx))\n\n return stack\n\n\ndef relabel_and_slice(stack):\n \"\"\"\n Relabel the connected components that is the cytoplasma slices and the connecting slices.\n Returns only the cytoplasma labels by discarding the interleaving labelled connecting slices.\n\n stack relabeled stack relabel\n\n cytoplasma section [ ] [+] [+] [ ] [1] [1]\n : :\n overlap section [ ] [+] [ ] [ ] [1] [ ] [ ] [1] [1] relabel section\n | | :\n cytoplasma section [+]--[+]--[ ] --> [1]--[1]--[ ] --> [1]--[1]--[ ] relabel section\n | | :\n overlap section [ ] [ ] [ ] [ ] [ ] [ ] [ ] [2] [2] relabel section\n : :\n cytoplasma section [ ] [+] [+] [ ] [2] [2]\n\n\n Note: Only orthogonal connected voxels are treated as a neighbor (1-connectivity).\n See: http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.label\n\n Type signature: bool (2 * size, image_height, image_width) --> int (size, image_height, image_width)\n\n Args:\n stack: bool (2 * size, image_height, image_width)\n\n Returns:\n relabel: int (size, image_height, image_width)\n\n \"\"\"\n relabel = label(stack, connectivity=1)\n relabel = relabel [0::2]\n return relabel\n\n\ndef test_relabeling():\n # Test conversion of three dimensional region labelling into cytoplasma and overlap and reconstruction from that\n # TODO Prediction of cytoplasma (membranes) and overlap from (adjacent) EM images\n\n # Get 3D dataset as multi tiff file\n # TODO Load from series of png images (!)\n filename = 'cortex/temp/train-labels.tif'\n data = tifread('../datasets/' + filename)\n\n plt.subplot(241)\n plt.imshow(data[0])\n plt.title('label [z=0,:,:]')\n plt.subplot(245)\n plt.imshow(data[1])\n plt.title('label [z=1,:,:]')\n two_channels = label_cytoplasma_and_overlap(data)\n plt.subplot(242)\n plt.imshow(two_channels[0, :, :, 0])\n plt.title('cytoplasma [z=0,:,:]')\n plt.subplot(246)\n plt.imshow(two_channels[0, :, :, 1])\n plt.title('overlap [z=0,:,:]')\n stack = stack_cytoplasma_and_overlap(two_channels)\n plt.subplot(243)\n plt.imshow(stack[0, :, :])\n plt.title('stack [z=0,:,:]')\n plt.subplot(247)\n plt.imshow(stack[1, :, :])\n plt.title('stack [z=0+1/2,:,:]')\n relabel = relabel_and_slice(stack)\n plt.subplot(244)\n plt.imshow(relabel[0, :, :])\n plt.title('relabel [z=0,:,:]')\n plt.subplot(248)\n plt.imshow(relabel[1, :, :])\n plt.title('relabel [z=1,:,:]')\n plt.show()\n\n\nif __name__ == '__main__':\n test_relabeling()\n\n\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.resize", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "numpy.diff", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
8-prime/trading-bot
[ "531b13df0fda0603ebc272cc32ed4b4fbe45aa70" ]
[ "scrape.py" ]
[ "import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\n\n'''\nTakes an integer specifiying how many of the top changers are to be returned\nTakes Yahoo finance and parses for a list which is the list of the top gainers and then returns the first n entries\n'''\ndef get_daily_top_n (top_n):\n URL = 'https://finance.yahoo.com/screener/predefined/day_gainers?guce_referrer=aHR0cHM6Ly93d3cuZ29vZ2xlLmNvbS8&guce_referrer_sig=AQAAAJPzhFJgI--8KNKwgdiM8Kk7WMl_EHqQdXXO5CTmur7k9dFFg15hppLMOhEDIO1kXDNbZHUeWbHd_C0YlFu7OQAvpyolavIM_C0mLJMi0KYNalp-jWYFz70rGmTjS96gm-8ZuMwPME_JOKIJPtZdeTnDHaBUvfar2oqZfEND0wIl&_guc_consent_skip=1596900194'\n page = requests.get(URL)\n\n df_list = pd.read_html(page.text)\n df = df_list[0]\n return(df['Symbol'][:top_n])" ]
[ [ "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
AaronJny/luwu
[ "05ee0bc605926661e42cada6cff5e281f4506291" ]
[ "luwu/run.py" ]
[ "# -*- coding: utf-8 -*-\n# @Author : AaronJny\n# @LastEditTime : 2021-01-29\n# @FilePath : /LuWu/luwu/run.py\n# @Desc :\nimport time\nimport traceback\nfrom multiprocessing import Process\nimport os\nimport sys\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\n\nimport tensorflow as tf\nfrom loguru import logger\n\n# 引入顺序不能变动,必须先执行此段代码,才能引入luwu下的模块\ngpus = tf.config.experimental.list_physical_devices(device_type=\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\nfrom luwu.backend import app\nfrom luwu.scripts import scheduler\nfrom luwu.backend.config import Config\n\n\ndef init_luwu_dir():\n dir_name = Config.LUWU_DIR\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\ndef run():\n while True:\n processes = []\n init_luwu_dir()\n try:\n # 启动Web服务\n logger.info(\"正在启动 Web 服务进程...\")\n web_process = Process(target=app.run)\n processes.append(web_process)\n web_process.start()\n\n time.sleep(20)\n\n # 启动调度进程\n logger.info(\"正在启动任务调度进程...\")\n scheduler_process = Process(target=scheduler.run)\n processes.append(scheduler_process)\n scheduler_process.start()\n\n for process in processes:\n process.join()\n\n except KeyboardInterrupt:\n logger.info(\"收到终止信号,正在关闭所有进程...\")\n for process in processes:\n if process.is_alive():\n process.terminate()\n if process.is_alive():\n process.kill()\n logger.info(\"关闭完成!结束程序!\")\n break\n except Exception as e:\n logger.error(e)\n logger.error(traceback.format_exc())\n # 收到终止信号或抛出异常时,关闭所有进程后再退出\n logger.info(\"出现异常!正在关闭所有进程...\")\n for process in processes:\n if process.is_alive():\n process.terminate()\n if process.is_alive():\n process.kill()\n logger.info(\"关闭完成!正在重试...!\")\n time.sleep(10)\n\n\nif __name__ == \"__main__\":\n run()\n" ]
[ [ "tensorflow.config.experimental.list_physical_devices", "tensorflow.config.experimental.set_memory_growth" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mohsenhariri/ml-simple-models
[ "32b469eadd2880f3f55cfe104cea314b326cedd6" ]
[ "data_augmentation/ds_builder_3visualization.py" ]
[ "from data_augmentation.ds_builder_2builder import train_loader\nimport torchvision\nimport matplotlib.pyplot as plt\n\nsample_dataset_batch = next(iter(train_loader))\nsample_input_batch = sample_dataset_batch[0]\nsample_label_batch = sample_dataset_batch[1]\n\nimg_grid = torchvision.utils.make_grid(sample_input_batch)\n# print(img_grid.size())\n# print(img_grid.permute(1, 2, 0).size())\nplt.imshow(img_grid.permute(1, 2, 0))\n\"\"\"\nabout permute: \nhttps://stackoverflow.com/questions/51329159/how-can-i-generate-and-display-a-grid-of-images-in-pytorch-with-plt-imshow-and-t\ntorchvision.utils.make_grid() returns a tensor which contains the grid of images. But the channel dimension has to be moved to the end since that's what matplotlib recognizes\n\"\"\"\nplt.show()" ]
[ [ "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
j-adamczyk/Numerical-Algorithms
[ "47cfa8154bab448d1bf87b892d83e45c68dd2e2a" ]
[ "lab4_simulated_annealing/task_1/plotter.py" ]
[ "import matplotlib.pyplot as plt\n\n\ndef plot_data(first_path, best_path, distances_plot_data, temperatures_plot_data):\n first_path_xs = []\n first_path_ys = []\n for city in first_path:\n first_path_xs.append(city[0])\n first_path_ys.append(city[1])\n\n first_path_xs.append(first_path_xs[0])\n first_path_ys.append(first_path_ys[0])\n\n best_path_xs = []\n best_path_ys = []\n for city in best_path:\n best_path_xs.append(city[0])\n best_path_ys.append(city[1])\n\n best_path_xs.append(best_path_xs[0])\n best_path_ys.append(best_path_ys[0])\n\n temperatures_xs = temperatures_plot_data[0]\n temperatures_ys = temperatures_plot_data[1]\n\n distances_xs = distances_plot_data[0]\n distances_ys = distances_plot_data[1]\n\n f, axarr = plt.subplots(2, 2)\n axarr[0, 0].plot(first_path_xs, first_path_ys, marker=\"o\", markerfacecolor=\"red\")\n axarr[0, 0].set_title(\"Before annealing\")\n axarr[0, 1].plot(best_path_xs, best_path_ys, marker=\"o\", markerfacecolor=\"red\")\n axarr[0, 1].set_title(\"After annealing\")\n axarr[1, 0].plot(temperatures_xs, temperatures_ys)\n axarr[1, 0].set_title(\"Temperature\")\n axarr[1, 1].plot(distances_xs, distances_ys)\n axarr[1, 1].set_title(\"Distance\")\n\n plt.show()\n\n\ndef plot_iterations_and_distances(iterations, distances):\n plt.plot(iterations, distances)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lukepfister/scico
[ "c849c4fa6089b99d9a4dec520c9a04cca426d2d7", "c849c4fa6089b99d9a4dec520c9a04cca426d2d7", "c849c4fa6089b99d9a4dec520c9a04cca426d2d7" ]
[ "scico/test/linop/test_matrix.py", "examples/scripts/pgm_stepsize_poisson.py", "scico/_generic_operators.py" ]
[ "import operator as op\n\nimport numpy as np\n\nimport jax\nfrom jax.interpreters.xla import DeviceArray\n\nimport pytest\n\nimport scico.numpy as snp\nfrom scico import linop\nfrom scico.linop import MatrixOperator\nfrom scico.random import randn\nfrom scico.test.linop.test_linop import AbsMatOp\n\n\nclass TestMatrix:\n def setup_method(self, method):\n self.key = jax.random.PRNGKey(12345)\n\n @pytest.mark.parametrize(\"input_dtype\", [np.float32, np.complex64])\n @pytest.mark.parametrize(\"input_shape\", [(3, 3), (3, 4)])\n def test_eval(self, input_shape, input_dtype):\n\n A, key = randn(input_shape, dtype=input_dtype, key=self.key)\n Ao = MatrixOperator(A)\n\n x, key = randn(Ao.input_shape, dtype=Ao.input_dtype, key=key)\n np.testing.assert_allclose(A @ x, Ao @ x)\n\n # Invalid shapes\n with pytest.raises(TypeError):\n y, key = randn((64,), dtype=Ao.input_dtype, key=key)\n Ao @ y\n\n @pytest.mark.parametrize(\"input_dtype\", [np.float32, np.complex64])\n @pytest.mark.parametrize(\"input_shape\", [(3, 3), (3, 4)])\n def test_adjoint(self, input_shape, input_dtype):\n\n A, key = randn(input_shape, dtype=input_dtype, key=self.key)\n Ao = MatrixOperator(A)\n\n x, key = randn(Ao.output_shape, dtype=Ao.input_dtype, key=key)\n np.testing.assert_allclose(A.conj().T @ x, Ao.conj().T @ x)\n\n @pytest.mark.parametrize(\"input_dtype\", [np.float32, np.complex64])\n @pytest.mark.parametrize(\"input_shape\", [(3, 3), (3, 4)])\n def test_adjoint_method(self, input_shape, input_dtype):\n A, key = randn(input_shape, dtype=input_dtype, key=self.key)\n Ao = MatrixOperator(A)\n x, key = randn(Ao.output_shape, dtype=Ao.input_dtype, key=key)\n np.testing.assert_allclose(Ao.adj(x), Ao.conj().T @ x)\n\n @pytest.mark.parametrize(\"input_dtype\", [np.float32, np.complex64])\n @pytest.mark.parametrize(\"input_shape\", [(3, 3), (3, 4)])\n def test_hermetian_method(self, input_shape, input_dtype):\n A, key = randn(input_shape, dtype=input_dtype, key=self.key)\n Ao = MatrixOperator(A)\n x, key = randn(Ao.output_shape, dtype=Ao.input_dtype, key=key)\n np.testing.assert_allclose(Ao.H @ x, Ao.conj().T @ x)\n\n @pytest.mark.parametrize(\"input_dtype\", [np.float32, np.complex64])\n @pytest.mark.parametrize(\"input_shape\", [(3, 3), (3, 4)])\n def test_gram_method(self, input_shape, input_dtype):\n A, key = randn(input_shape, dtype=input_dtype, key=self.key)\n Ao = MatrixOperator(A)\n x, key = randn(Ao.input_shape, dtype=Ao.input_dtype, key=key)\n np.testing.assert_allclose(Ao.gram(x), A.conj().T @ A @ x, rtol=5e-5)\n\n @pytest.mark.parametrize(\"input_dtype\", [np.float32, np.complex64])\n @pytest.mark.parametrize(\"input_shape\", [(3, 3), (3, 4)])\n def test_gram_op(self, input_shape, input_dtype):\n A, key = randn(input_shape, dtype=input_dtype, key=self.key)\n Ao = MatrixOperator(A)\n G = Ao.gram_op\n x, key = randn(Ao.input_shape, dtype=Ao.input_dtype, key=key)\n np.testing.assert_allclose(G @ x, A.conj().T @ A @ x, rtol=5e-5)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub])\n def test_add_sub(self, operator):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((4, 6), key=key)\n C, key = randn((4, 4), key=key)\n x, key = randn((6,), key=key)\n Ao = MatrixOperator(A)\n Bo = MatrixOperator(B)\n Co = MatrixOperator(C)\n\n ABx = operator(Ao, Bo) @ x\n AxBx = operator(Ao @ x, Bo @ x)\n np.testing.assert_allclose(ABx, AxBx, rtol=5e-5)\n\n with pytest.raises(ValueError):\n operator(Ao, Co)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub, op.mul, op.truediv])\n def test_scalar_left(self, operator):\n scalar = np.float32(np.random.randn())\n\n A, key = randn((4, 6), key=self.key)\n x, key = randn((6,), key=key)\n Ao = MatrixOperator(A)\n\n np.testing.assert_allclose(operator(scalar, Ao) @ x, operator(scalar, A) @ x, rtol=5e-5)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub, op.mul, op.truediv])\n def test_scalar_right(self, operator):\n scalar = np.float32(np.random.randn())\n\n A, key = randn((4, 6), key=self.key)\n x, key = randn((6,), key=key)\n Ao = MatrixOperator(A)\n\n np.testing.assert_allclose(operator(Ao, scalar) @ x, operator(A, scalar) @ x, rtol=5e-5)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub, op.mul, op.truediv])\n def test_elementwise_matops(self, operator):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((4, 6), key=key)\n\n Ao = MatrixOperator(A)\n Bo = MatrixOperator(B)\n\n np.testing.assert_allclose(operator(Ao, Bo).A, operator(A, B), rtol=5e-5)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub, op.mul, op.truediv])\n def test_elementwise_array_left(self, operator):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((4, 6), key=key)\n Ao = MatrixOperator(A)\n Bo = MatrixOperator(B)\n np.testing.assert_allclose(operator(Ao, B).A, operator(A, B), rtol=5e-5)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub, op.mul, op.truediv])\n def test_elementwise_array_right(self, operator):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((4, 6), key=key)\n Ao = MatrixOperator(A)\n Bo = MatrixOperator(B)\n np.testing.assert_allclose(operator(A, Bo).A, operator(A, B), rtol=5e-5)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub, op.mul, op.truediv])\n def test_elementwise_matop_shape_mismatch(self, operator):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((4, 4), key=key)\n Ao = MatrixOperator(A)\n Bo = MatrixOperator(B)\n with pytest.raises(ValueError):\n operator(Ao, Bo)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub, op.mul, op.truediv])\n def test_elementwise_array_shape_mismatch(self, operator):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((4, 4), key=key)\n Ao = MatrixOperator(A)\n Bo = MatrixOperator(B)\n with pytest.raises(ValueError):\n operator(Ao, B)\n\n with pytest.raises(ValueError):\n operator(B, Ao)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub])\n def test_elementwise_linop(self, operator):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((4, 6), key=key)\n Ao = MatrixOperator(A)\n Bo = AbsMatOp(B)\n x, key = randn(Ao.input_shape, dtype=Ao.input_dtype, key=key)\n\n np.testing.assert_allclose(operator(Ao, Bo) @ x, operator(Ao @ x, Bo @ x), rtol=5e-5)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub])\n def test_elementwise_linop_mismatch(self, operator):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((4, 4), key=key)\n Ao = MatrixOperator(A)\n Bo = AbsMatOp(B)\n with pytest.raises(ValueError):\n operator(Ao, Bo)\n\n @pytest.mark.parametrize(\"operator\", [op.mul, op.truediv])\n def test_elementwise_linop_invalid(self, operator):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((4, 6), key=key)\n Ao = MatrixOperator(A)\n Bo = AbsMatOp(B)\n with pytest.raises(TypeError):\n operator(Ao, Bo)\n\n with pytest.raises(TypeError):\n operator(Bo, Ao)\n\n def test_matmul(self):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((6, 3), key=key)\n Ao = MatrixOperator(A)\n Bo = MatrixOperator(B)\n x, key = randn(Bo.input_shape, dtype=Ao.input_dtype, key=key)\n\n AB = Ao @ Bo\n np.testing.assert_allclose((Ao @ Bo) @ x, Ao @ (Bo @ x), rtol=5e-5)\n\n def test_matmul_linop(self):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((6, 3), key=key)\n Ao = MatrixOperator(A)\n Bo = AbsMatOp(B)\n x, key = randn(Bo.input_shape, dtype=Ao.input_dtype, key=key)\n\n AB = Ao @ Bo\n np.testing.assert_allclose((Ao @ Bo) @ x, Ao @ (Bo @ x), rtol=5e-5)\n\n def test_matmul_linop_shape_mismatch(self):\n A, key = randn((4, 6), key=self.key)\n B, key = randn((5, 3), key=key)\n Ao = MatrixOperator(A)\n Bo = AbsMatOp(B)\n with pytest.raises(ValueError):\n Ao @ Bo\n\n def test_matmul_identity(self):\n A, key = randn((4, 6), key=self.key)\n Ao = MatrixOperator(A)\n I = linop.Identity(input_shape=(6,))\n assert Ao == Ao @ I\n\n def test_init_devicearray(self):\n A = np.random.randn(4, 6)\n Ao = MatrixOperator(A)\n assert isinstance(Ao.A, DeviceArray)\n\n with pytest.raises(TypeError):\n MatrixOperator([1.0, 3.0])\n\n @pytest.mark.parametrize(\"input_shape\", [(3,), (2, 3, 4)])\n def test_init_wrong_dims(self, input_shape):\n A = np.random.randn(*input_shape)\n with pytest.raises(TypeError):\n Ao = MatrixOperator(A)\n\n def test_to_array(self):\n A = np.random.randn(4, 6)\n Ao = MatrixOperator(A)\n A_array = Ao.to_array()\n assert isinstance(A_array, np.ndarray)\n np.testing.assert_allclose(A_array, A)\n\n @pytest.mark.parametrize(\"ord\", [\"fro\", 2])\n @pytest.mark.parametrize(\"axis\", [None, 0, 1])\n @pytest.mark.parametrize(\"keepdims\", [True, False])\n @pytest.mark.parametrize(\"input_dtype\", [np.float32, np.complex64])\n def test_norm(self, ord, axis, keepdims, input_dtype):\n A, key = randn((4, 6), dtype=input_dtype, key=self.key)\n Ao = MatrixOperator(A)\n\n if ord == \"fro\" and axis is not None:\n # Not defined;\n pass\n else:\n x = Ao.norm(ord=ord, axis=axis, keepdims=keepdims)\n y = snp.linalg.norm(A, ord=ord, axis=axis, keepdims=keepdims)\n np.testing.assert_allclose(x, y, rtol=5e-5)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This file is part of the SCICO package. Details of the copyright\n# and user license can be found in the 'LICENSE.txt' file distributed\n# with the package.\n\nr\"\"\"\nNon-negative Poisson Loss Reconstruction (AcceleratedPGM w/ adaptive PGMStepSize)\n=============================================\n\nThis example demonstrates the use of class [pgm.PGMStepSize](../_autosummary/scico.pgm.rst#scico.pgm.PGMStepSize) to solve the non-negative reconstruction problem with Poisson negative log likelihood loss\n\n $$\\mathrm{argmin}_{\\mathbf{x}} \\; \\frac{1}{2} \\left ( A \\mathbf{x} - \\mathbf{y} \\log\\left( A \\mathbf{x} \\right) + \\log(\\mathbf{y}!) \\right ) + I(\\mathbf{x} \\geq 0)\\;,$$\n\nwhere $A$ is the forward operator, $\\mathbf{y}$ is the measurement, $\\mathbf{x}$ is the signal reconstruction, and $I(\\mathbf{x} \\geq 0)$ is the non-negative indicator.\n\"\"\"\n\nimport jax\n\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\n\nimport scico.numpy as snp\nimport scico.random\nfrom scico import functional, linop, loss, plot\nfrom scico.pgm import (\n AcceleratedPGM,\n AdaptiveBBStepSize,\n BBStepSize,\n LineSearchStepSize,\n RobustLineSearchStepSize,\n)\nfrom scipy.linalg import dft\n\n\"\"\"\nConstruct a dictionary, a reference random reconstruction, and a test measurement signal consisting of the synthesis of the reference reconstruction.\n\"\"\"\nm = 1024 # Signal size\nn = 8 # Dictionary size\n\n# Create dictionary with bump-like features\nD = ((snp.real(dft(m))[1 : n + 1, :m]) ** 12).T\n\nx_gt, key = scico.random.uniform((n,), seed=12345) # true coefficients\n\nlam = D @ x_gt\ny, key = scico.random.poisson(lam, shape=lam.shape, key=key) # synthetic signal\n\nx_gt = jax.device_put(x_gt) # Convert to jax array, push to GPU\ny = jax.device_put(y) # Convert to jax array, push to GPU\n\n\"\"\"\nSet up the forward operator, the loss function and the regularization.\n\"\"\"\nA = linop.MatrixOperator(D)\nf = loss.PoissonLoss(y=y, A=A)\nf.is_smooth = True\ng = functional.NonNegativeIndicator()\n\n\"\"\"\nDefine common setup: maximum of iterations and initial estimation of solution.\n\"\"\"\nmaxiter = 50\n\nx0, key = scico.random.uniform((n,), key=key)\nx0 = jax.device_put(x0) # initial solution estimate\n\n\"\"\"\nDefine plotting functionality.\n\"\"\"\n\n\ndef plot_results(hist, str_ss, L0, xsol, xgt, Amat):\n # Plot signal, coefficients and convergence statistics.\n fig = plot.figure(\n figsize=(12, 6),\n tight_layout=True,\n )\n gs = gridspec.GridSpec(nrows=2, ncols=3)\n\n fig.suptitle(\n \"Results for PGM Solver and \" + str_ss + r\" ($L_0$: \" + \"{:4.2f}\".format(L0) + \")\",\n fontsize=16,\n )\n\n ax0 = fig.add_subplot(gs[0, 0])\n plot.plot(\n hist.Objective,\n ptyp=\"semilogy\",\n title=\"Objective\",\n xlbl=\"Iteration\",\n fig=fig,\n ax=ax0,\n )\n\n ax1 = fig.add_subplot(gs[0, 1])\n plot.plot(\n hist.Residual,\n ptyp=\"semilogy\",\n title=\"Residual\",\n xlbl=\"Iteration\",\n fig=fig,\n ax=ax1,\n )\n\n ax2 = fig.add_subplot(gs[0, 2])\n plot.plot(\n hist.L,\n ptyp=\"semilogy\",\n title=\"L\",\n xlbl=\"Iteration\",\n fig=fig,\n ax=ax2,\n )\n\n ax3 = fig.add_subplot(gs[1, 0])\n plt.stem(xgt, linefmt=\"C1-\", markerfmt=\"C1o\", basefmt=\"C1-\")\n plt.stem(xsol, linefmt=\"C2-\", markerfmt=\"C2x\", basefmt=\"C1-\")\n plt.legend([\"Ground Truth\", \"Recovered\"])\n plt.xlabel(\"Index\")\n plt.title(\"Coefficients\")\n\n ax4 = fig.add_subplot(gs[1, 1:])\n plot.plot(\n snp.vstack((y, Amat @ xgt, Amat @ xsol)).T,\n title=\"Fit\",\n xlbl=\"Index\",\n lgnd=(\"y\", \"A @ x_gt\", \"A @ x\"),\n fig=fig,\n ax=ax4,\n )\n fig.show()\n\n\n\"\"\"\nUse default PGMStepSize object, set L0 based on norm of Forward operator and set up AcceleratedPGM solver object. Run the solver and plot the recontructed signal and convergence statistics.\n\"\"\"\nL0 = snp.linalg.norm(D, 2) ** 2 # initial reciprocal of gradient descent step size\nstr_L0 = \"(Estimation based on norm of Forward operator)\"\n\nsolver = AcceleratedPGM(\n f=f,\n g=g,\n L0=L0,\n x0=x0,\n maxiter=maxiter,\n verbose=True,\n)\nstr_ss = type(solver.step_size).__name__\n\nprint(\"============================================================\")\nprint(\"Running solver with step size of class: \", str_ss)\nprint(\"L0 \" + str_L0 + \": \", L0, \"\\n\")\n\nx = solver.solve() # Run the solver.\nhist = solver.itstat_object.history(transpose=True)\nplot_results(hist, str_ss, L0, x, x_gt, A)\n\n\"\"\"\nUse BBStepSize object, set L0 with arbitary initial value and set up AcceleratedPGM solver object. Run the solver and plot the recontructed signal and convergence statistics.\n\"\"\"\nL0 = 90.0 # initial reciprocal of gradient descent step size\nstr_L0 = \"(Arbitrary Initialization)\"\n\nsolver = AcceleratedPGM(\n f=f,\n g=g,\n L0=L0,\n x0=x0,\n maxiter=maxiter,\n verbose=True,\n step_size=BBStepSize(),\n)\nstr_ss = type(solver.step_size).__name__\n\nprint(\"===================================================\")\nprint(\"Running solver with step size of class: \", str_ss)\nprint(\"L0 \" + str_L0 + \": \", L0, \"\\n\")\n\nx = solver.solve() # Run the solver.\nhist = solver.itstat_object.history(transpose=True)\nplot_results(hist, str_ss, L0, x, x_gt, A)\n\n\"\"\"\nUse AdaptiveBBStepSize object, set L0 with arbitary initial value and set up AcceleratedPGM solver object. Run the solver and plot the recontructed signal and convergence statistics.\n\"\"\"\nL0 = 90.0 # initial reciprocal of gradient descent step size\nstr_L0 = \"(Arbitrary Initialization)\"\n\nsolver = AcceleratedPGM(\n f=f,\n g=g,\n L0=L0,\n x0=x0,\n maxiter=maxiter,\n verbose=True,\n step_size=AdaptiveBBStepSize(kappa=0.75),\n)\nstr_ss = type(solver.step_size).__name__\n\nprint(\"===========================================================\")\nprint(\"Running solver with step size of class: \", str_ss)\nprint(\"L0 \" + str_L0 + \": \", L0, \"\\n\")\n\nx = solver.solve() # Run the solver.\nhist = solver.itstat_object.history(transpose=True)\nplot_results(hist, str_ss, L0, x, x_gt, A)\n\n\"\"\"\nUse LineSearchStepSize object, set L0 with arbitary initial value and set up AcceleratedPGM solver object. Run the solver and plot the recontructed signal and convergence statistics.\n\"\"\"\nL0 = 90.0 # initial reciprocal of gradient descent step size\nstr_L0 = \"(Arbitrary Initialization)\"\n\nsolver = AcceleratedPGM(\n f=f,\n g=g,\n L0=L0,\n x0=x0,\n maxiter=maxiter,\n verbose=True,\n step_size=LineSearchStepSize(gamma_u=1.01),\n)\nstr_ss = type(solver.step_size).__name__\n\nprint(\"===========================================================\")\nprint(\"Running solver with step size of class: \", str_ss)\nprint(\"L0 \" + str_L0 + \": \", L0, \"\\n\")\n\nx = solver.solve() # Run the solver.\nhist = solver.itstat_object.history(transpose=True)\nplot_results(hist, str_ss, L0, x, x_gt, A)\n\n\"\"\"\nUse RobustLineSearchStepSize object, set L0 with arbitary initial value and set up AcceleratedPGM solver object. Run the solver and plot the recontructed signal and convergence statistics.\n\"\"\"\nL0 = 90.0 # initial reciprocal of gradient descent step size\nstr_L0 = \"(Arbitrary Initialization)\"\n\nsolver = AcceleratedPGM(\n f=f,\n g=g,\n L0=L0,\n x0=x0,\n maxiter=maxiter,\n verbose=True,\n step_size=RobustLineSearchStepSize(),\n)\nstr_ss = type(solver.step_size).__name__\n\nprint(\"=================================================================\")\nprint(\"Running solver with step size of class: \", str_ss)\nprint(\"L0 \" + str_L0 + \": \", L0, \"\\n\")\n\nx = solver.solve() # Run the solver.\nhist = solver.itstat_object.history(transpose=True)\nplot_results(hist, str_ss, L0, x, x_gt, A)\n\ninput(\"\\nWaiting for input to close figures and exit\")\n", "# Copyright (C) 2020-2021 by SCICO Developers\n# All rights reserved. BSD 3-clause License.\n# This file is part of the SCICO package. Details of the copyright and\n# user license can be found in the 'LICENSE' file distributed with the\n# package.\n\n\"\"\"Operator and LinearOperator base class.\"\"\"\n\n\n# needed to annotate a class method that returns the encapsulating class\n# see https://www.python.org/dev/peps/pep-0563/\nfrom __future__ import annotations\n\nimport operator\nfrom functools import partial, wraps\nfrom typing import Callable, Optional, Tuple, Union\n\nimport numpy as np\n\nimport jax\nfrom jax.dtypes import result_type\nfrom jax.interpreters.xla import DeviceArray\n\nimport scico.numpy as snp\nfrom scico._autograd import linear_adjoint\nfrom scico.blockarray import BlockArray, block_sizes\nfrom scico.math import is_complex_dtype\nfrom scico.typing import BlockShape, DType, JaxArray, Shape\nfrom scico.util import is_nested\n\n\n# Wrapper function for defining mul, rmul, truediv between a scalar and a Operator\n# If one of these binary operations are called in the form binop(Operator, other)\n# and 'b' is a scalar, specialized Operator constructors can be called.\n# Otherwise, if other is not a scalar, an exception is raised\ndef _wrap_mul_div_scalar(func):\n @wraps(func)\n def wrapper(a, b):\n if np.isscalar(b) or isinstance(b, jax.core.Tracer):\n return func(a, b)\n else:\n raise TypeError(\n f\"Operation {func.__name__} not defined between {type(a)} and {type(b)}\"\n )\n\n return wrapper\n\n\nclass Operator:\n \"\"\"Generic Operator class\"\"\"\n\n def __repr__(self):\n return f\"\"\"{type(self)}\nshape : {self.shape}\nmatrix_shape : {self.matrix_shape}\ninput_dtype : {self.input_dtype}\noutput_dtype : {self.output_dtype}\n \"\"\"\n\n # See https://docs.scipy.org/doc/numpy-1.10.1/user/c-info.beyond-basics.html#ndarray.__array_priority__\n __array_priority__ = 1\n\n def __init__(\n self,\n input_shape: Union[Shape, BlockShape],\n output_shape: Optional[Union[Shape, BlockShape]] = None,\n eval_fn: Optional[Callable] = None,\n input_dtype: DType = np.float32,\n output_dtype: Optional[DType] = None,\n jit: bool = False,\n is_smooth: bool = None,\n ):\n r\"\"\"Operator init method.\n\n Args:\n input_shape: Shape of input array.\n output_shape: Shape of output array.\n Defaults to ``None``. If ``None``, `output_shape` is determined by evaluating\n `self.__call__` on an input array of zeros.\n eval_fn: Function used in evaluating this Operator.\n Defaults to ``None``. If ``None``, then `self.__call__` must be defined in any\n derived classes.\n input_dtype: `dtype` for input argument.\n Defaults to `float32`. If Operator implements complex-valued operations,\n this must be `complex64` for proper adjoint and gradient calculation.\n output_dtype: `dtype` for output argument.\n Defaults to ``None``. If ``None``, `output_shape` is determined by evaluating\n `self.__call__` on an input array of zeros.\n jit: If ``True``, call :meth:`Operator.jit()` on this Operator to jit the forward,\n adjoint, and gram functions. Same as calling :meth:`Operator.jit` after\n the Operator is created.\n \"\"\"\n\n #: Shape of input array or :class:`.BlockArray`.\n self.input_shape: Union[Shape, BlockShape]\n\n #: Size of flattened input. Sum of product of `input_shape` tuples.\n self.input_size: int\n\n #: Shape of output array or :class:`.BlockArray`\n self.output_shape: Union[Shape, BlockShape] # Something\n\n #: Size of flattened output. Sum of product of `output_shape` tuples.\n self.output_size: int\n\n #: Shape Operator would take if it operated on flattened arrays.\n #: Consists of (output_size, input_size)\n self.matrix_shape: Tuple[int, int]\n\n #: Shape of Operator. Consists of (output_shape, input_shape).\n self.shape: Tuple[Union[Shape, BlockShape], Union[Shape, BlockShape]]\n\n #: Dtype of input\n self.input_dtype: DType\n\n if isinstance(input_shape, int):\n self.input_shape = (input_shape,)\n else:\n self.input_shape = input_shape\n self.input_dtype = input_dtype\n\n # Allows for dynamic creation of new Operator/LinearOperator, eg for adjoints\n if eval_fn:\n self._eval = eval_fn # type: ignore\n\n # If the shape isn't specified by user we can infer it using by invoking the function\n if output_shape is None or output_dtype is None:\n tmp = self(snp.zeros(self.input_shape, dtype=input_dtype))\n if output_shape is None:\n self.output_shape = tmp.shape\n else:\n self.output_shape = (output_shape,) if isinstance(output_shape, int) else output_shape\n\n if output_dtype is None:\n self.output_dtype = tmp.dtype\n else:\n self.output_dtype = output_dtype\n\n # Determine the shape of the \"vectorized\" operator (as an element of ℝ^{n × m}\n # If the function returns a BlockArray we need to compute the size of each block,\n # then sum.\n self.input_size = int(np.sum(block_sizes(self.input_shape)))\n self.output_size = int(np.sum(block_sizes(self.output_shape)))\n\n self.shape = (self.output_shape, self.input_shape)\n self.matrix_shape = (self.output_size, self.input_size)\n\n #: True if this is a smooth mapping; false otherwise\n self.is_smooth = is_smooth\n\n if jit:\n self.jit()\n\n def jit(self):\n self._eval = jax.jit(self._eval)\n\n def __call__(\n self, x: Union[Operator, JaxArray, BlockArray]\n ) -> Union[Operator, JaxArray, BlockArray]:\n r\"\"\"Evaluates this Operator at the point :math:`\\mb{x}`.\n\n Args:\n x: Point at which to evaluate this Operator.\n If `x` is a :class:`DeviceArray` or :class:`.BlockArray`, must have\n `shape == self.input_shape`. If `x` is a :class:`.Operator` or :class:`.LinearOperator`, must have\n `x.output_shape == self.input_shape`.\n \"\"\"\n\n if isinstance(x, Operator):\n # Compose the two operators if shapes conform\n if self.input_shape == x.output_shape:\n return Operator(\n input_shape=x.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda z: self(x(z)),\n input_dtype=self.input_dtype,\n output_dtype=x.output_dtype,\n is_smooth=(self.is_smooth and x.is_smooth),\n )\n else:\n raise ValueError(f\"\"\"Incompatible shapes {self.shape}, {x.shape} \"\"\")\n\n elif isinstance(x, (np.ndarray, DeviceArray, BlockArray)):\n if self.input_shape == x.shape:\n return self._eval(x)\n else:\n raise ValueError(\n f\"\"\"Cannot evaluate {type(self)} with input_shape={self.input_shape} on array with shape={x.shape}\"\"\"\n )\n else:\n # What is the context under which this gets called?\n # Currently: in jit and grad tracers\n return self._eval(x)\n\n def __add__(self, other):\n if isinstance(other, Operator):\n if self.shape == other.shape:\n return Operator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: self(x) + other(x),\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other.output_dtype),\n is_smooth=(self.is_smooth and other.is_smooth),\n )\n else:\n raise ValueError(f\"shapes {self.shape} and {other.shape} do not match\")\n else:\n raise TypeError(f\"Operation __add__ not defined between {type(self)} and {type(other)}\")\n\n def __sub__(self, other):\n if isinstance(other, Operator):\n if self.shape == other.shape:\n return Operator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: self(x) - other(x),\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other.output_dtype),\n is_smooth=(self.is_smooth and other.is_smooth),\n )\n else:\n raise ValueError(f\"shapes {self.shape} and {other.shape} do not match\")\n else:\n raise TypeError(f\"Operation __sub__ not defined between {type(self)} and {type(other)}\")\n\n @_wrap_mul_div_scalar\n def __mul__(self, other):\n return Operator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: other * self(x),\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other),\n is_smooth=self.is_smooth,\n )\n\n def __neg__(self):\n # -self = -1. * self\n return -1.0 * self\n\n @_wrap_mul_div_scalar\n def __rmul__(self, other):\n return Operator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: other * self(x),\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other),\n is_smooth=self.is_smooth,\n )\n\n @_wrap_mul_div_scalar\n def __truediv__(self, other):\n return Operator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: self(x) / other,\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other),\n is_smooth=self.is_smooth,\n )\n\n def jvp(self, primals, tangents):\n return jax.jvp(self, primals, tangents)\n\n def vjp(self, *primals, has_aux: bool = False):\n primals, self_vjp = jax.vjp(self, *primals)\n return primals, self_vjp\n\n def freeze(self, argnum: int, val: Union[JaxArray, BlockArray]) -> Operator:\n \"\"\"Returns a new Operator with block argument `argnum` fixed to value `val`.\n\n Args:\n argnum: Index of block to freeze. Must be less than or equal to the\n number of blocks in an input array.\n val: Value to fix the `argnum`-th input to.\n\n \"\"\"\n\n if not is_nested(self.input_shape):\n raise ValueError(\n \"The `freeze` method can only be applied to Operators that take BlockArray inputs\"\n )\n\n input_ndim = len(self.input_shape)\n if argnum > input_ndim - 1:\n raise ValueError(\n f\"argnum to freeze must be less than the number of input arguments to \"\n f\"this operator ({input_ndim}); got {argnum}\"\n )\n\n if val.shape != self.input_shape[argnum]:\n raise ValueError(\n f\"value to be frozen at position {argnum} must have shape \"\n f\"{self.input_shape[argnum]}, got {val.shape}\"\n )\n\n input_shape = tuple(s for i, s in enumerate(self.input_shape) if i != argnum)\n\n if len(input_shape) == 1:\n input_shape = input_shape[0]\n\n def concat_args(args):\n # Creates a blockarray with args and the frozen value in the correct place\n # Eg if this operator takes a blockarray with two blocks, then\n # concat_args(args) = BlockArray.array([val, args]) if argnum = 0\n # concat_args(args) = BlockArray.array([args, val]) if argnum = 1\n\n if isinstance(args, DeviceArray) or isinstance(args, np.ndarray):\n # In the case that the original operator takes a blcokarray with two\n # blocks, wrap in a list so we can use the same indexing as >2 block case\n args = [args]\n\n arg_list = []\n for i in range(input_ndim):\n if i < argnum:\n arg_list.append(args[i])\n elif i > argnum:\n arg_list.append(args[i - 1])\n else:\n arg_list.append(val)\n return BlockArray.array(arg_list)\n\n return Operator(\n input_shape=input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: self(concat_args(x)),\n is_smooth=self.is_smooth,\n )\n\n\n# Wrapper function for defining __add__, __sub__ between LinearOperator and other objects\n# Handles shape checking and dispatching based on operand types.\n# If one of the two operands is an Operator, an Operator is returned.\n# If both operands are LinearOperators of different types, a generic LinearOperator is returned.\n# If both operands are LinearOperators of the same type, a special constructor can be called\n# see, eg, Convolve.__add__\ndef _wrap_add_sub(func: Callable, op: Callable) -> Callable:\n # func should be either .__add__() or .__sub__()\n # op should be the functional equivalent of the same (op.add for func = __add__)\n @wraps(func)\n def wrapper(\n a: LinearOperator, b: Union[Operator, LinearOperator]\n ) -> Union[Operator, LinearOperator]:\n if isinstance(b, Operator):\n if a.shape == b.shape:\n if isinstance(b, type(a)):\n # same type of linop, eg convolution can have special\n # behavior (see Conv2d.__add__)\n return func(a, b)\n elif isinstance(\n b, LinearOperator\n ): # LinearOperator + LinearOperator -> LinearOperator\n return LinearOperator(\n input_shape=a.input_shape,\n output_shape=a.output_shape,\n eval_fn=lambda x: op(a(x), b(x)),\n adj_fn=lambda x: op(a(x), b(x)),\n input_dtype=a.input_dtype,\n output_dtype=result_type(a.output_dtype, b.output_dtype),\n )\n else: # LinearOperator + Operator -> Operator\n return Operator(\n input_shape=a.input_shape,\n output_shape=a.output_shape,\n eval_fn=lambda x: op(a(x), b(x)),\n input_dtype=a.input_dtype,\n output_dtype=result_type(a.output_dtype, b.output_dtype),\n )\n else:\n raise ValueError(f\"shapes {a.shape} and {b.shape} do not match\")\n else:\n raise TypeError(\n f\"Operation {func.__name__} not defined between {type(a)} and {type(b)}\"\n )\n\n return wrapper\n\n\nclass LinearOperator(Operator):\n \"\"\"Generic Linear Operator base class\"\"\"\n\n def __init__(\n self,\n input_shape: Union[Shape, BlockShape],\n output_shape: Optional[Union[Shape, BlockShape]] = None,\n eval_fn: Optional[Callable] = None,\n adj_fn: Optional[Callable] = None,\n input_dtype: DType = np.float32,\n output_dtype: Optional[DType] = None,\n jit: bool = False,\n ):\n r\"\"\"LinearOperator init method.\n\n Args:\n input_shape: Shape of input array.\n output_shape: Shape of output array.\n Defaults to None. If None, ``output_shape`` is determined by evaluating\n ``self.__call__`` on an input array of zeros.\n eval_fn: Function used in evaluating this LinearOperator.\n Defaults to None. If None, then ``self.__call__`` must be defined in any\n derived classes.\n adj_fn: Function used to evaluate the adjoint of this LinearOperator.\n Defaults to None. If None, the adjoint\n is not set, and the :meth:`._set_adjoint`\n will be called silently at the first :meth:`.adj` call or\n can be called manually.\n input_dtype: `dtype` for input argument.\n Defaults to `float32`. If ``LinearOperator`` implements complex-valued operations,\n this must be `complex64` for proper adjoint and gradient calculation.\n output_dtype: `dtype` for output argument.\n Defaults to None. If None, ``output_shape`` is determined by evaluating\n ``self.__call__`` on an input array of zeros.\n jit: If ``True``, call :meth:`.jit()` on this LinearOperator to jit the forward,\n adjoint, and gram functions. Same as calling :meth:`.jit` after\n the LinearOperator is created.\n \"\"\"\n\n super().__init__(\n input_shape=input_shape,\n output_shape=output_shape,\n eval_fn=eval_fn,\n input_dtype=input_dtype,\n output_dtype=output_dtype,\n jit=False,\n is_smooth=True,\n )\n\n if not hasattr(self, \"_adj\"):\n self._adj = None\n if not hasattr(self, \"_gram\"):\n self._gram = None\n if callable(adj_fn):\n self._adj = adj_fn\n self._gram = lambda x: self.adj(self(x))\n elif adj_fn is not None:\n raise TypeError(f\"Parameter adj_fn must be either a Callable or None; got {adj_fn}\")\n\n if jit:\n self.jit()\n\n def _set_adjoint(self):\n adj_fun = linear_adjoint(self.__call__, snp.zeros(self.input_shape, dtype=self.input_dtype))\n self._adj = lambda x: adj_fun(x)[0]\n self._gram = lambda x: self.adj(self(x))\n\n def jit(self):\n \"\"\"Replaces the private functions :meth:`._eval`, :meth:`_adj`, :meth:`._gram`\n with jitted versions.\n \"\"\"\n if (self._adj is None) or (self._gram is None):\n self._set_adjoint()\n\n self._eval = jax.jit(self._eval)\n self._adj = jax.jit(self._adj)\n self._gram = jax.jit(self._gram)\n\n @partial(_wrap_add_sub, op=operator.add)\n def __add__(self, other):\n return LinearOperator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: self(x) + other(x),\n adj_fn=lambda x: self.adj(x) + other.adj(x),\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other.output_dtype),\n )\n\n @partial(_wrap_add_sub, op=operator.sub)\n def __sub__(self, other):\n return LinearOperator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: self(x) - other(x),\n adj_fn=lambda x: self.adj(x) - other.adj(x),\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other.output_dtype),\n )\n\n @_wrap_mul_div_scalar\n def __mul__(self, other):\n return LinearOperator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: other * self(x),\n adj_fn=lambda x: snp.conj(other) * self.adj(x),\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other),\n )\n\n @_wrap_mul_div_scalar\n def __rmul__(self, other):\n return LinearOperator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: other * self(x),\n adj_fn=lambda x: snp.conj(other) * self.adj(x),\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other),\n )\n\n @_wrap_mul_div_scalar\n def __truediv__(self, other):\n return LinearOperator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: self(x) / other,\n adj_fn=lambda x: self.adj(x) / snp.conj(other),\n input_dtype=self.input_dtype,\n output_dtype=result_type(self.output_dtype, other),\n )\n\n def __matmul__(self, other):\n # self @ other\n return self(other)\n\n def __rmatmul__(self, other):\n # other @ self\n if isinstance(other, LinearOperator):\n return other(self)\n elif isinstance(other, (np.ndarray, DeviceArray)):\n # for real valued inputs: y @ self == (self.T @ y.T).T\n # for complex: y @ self == (self.conj().T @ y.conj().T).conj().T\n # self.conj().T == self.adj\n return self.adj(other.conj().T).conj().T\n else:\n raise NotImplementedError(\n f\"Operation __rmatmul__ not defined between {type(self)} and {type(other)}\"\n )\n\n def __call__(\n self, x: Union[LinearOperator, JaxArray, BlockArray]\n ) -> Union[LinearOperator, JaxArray, BlockArray]:\n r\"\"\"Evaluates this LinearOperator at the point :math:`\\mb{x}`.\n\n Args:\n x: Point at which to evaluate this ``LinearOperator``.\n If ``x`` is a :class:`DeviceArray` or :class:`.BlockArray`, must have\n ``shape == self.input_shape``. If ``x`` is a :class:`.LinearOperator`, must have\n ``x.output_shape == self.input_shape``.\n \"\"\"\n if isinstance(x, LinearOperator):\n return ComposedLinearOperator(self, x)\n else:\n # Use Operator __call__ for LinearOperator @ array or LinearOperator @ Operator\n return super().__call__(x)\n\n def adj(\n self, y: Union[LinearOperator, JaxArray, BlockArray]\n ) -> Union[LinearOperator, JaxArray, BlockArray]:\n \"\"\"Computes the adjoint of this :class:`.LinearOperator` applied to input ``y``\n\n Args:\n y: Point at which to compute adjoint.\n If `y` is :class:`DeviceArray` or :class:`.BlockArray`, must have\n ``shape == self.output_shape``. If `y` is a :class:`.LinearOperator`, must have\n ``y.output_shape == self.output_shape``.\n\n Returns:\n Result of adjoint evaluated at ``y``.\n\n \"\"\"\n if self._adj is None:\n self._set_adjoint()\n\n if isinstance(y, LinearOperator):\n return ComposedLinearOperator(self.H, y)\n elif self.output_dtype != y.dtype:\n raise ValueError(f\"dtype error: expected {self.output_dtype}, got {y.dtype}\")\n elif self.output_shape != y.shape:\n raise ValueError(\n f\"\"\"Shapes do not conform: input array with shape {y.shape} does not match\n LinearOperator output_shape {self.output_shape}\"\"\"\n )\n else:\n return self._adj(y)\n\n @property\n def T(self) -> LinearOperator:\n \"\"\"Returns a new :class:`LinearOperator` that implements the transpose of this :class:`LinearOperator`.\n\n For a real-valued LinearOperator ``A`` (``A.input_dtype=np.float32` or ``np.float64``), the\n LinearOperator ``A.T`` implements the adjoint: ``A.T(y) == A.adj(y)``.\n\n For a complex-valued LinearOperator ``A`` (``A.input_dtype``=`np.complex64` or ``np.complex128``), the\n LinearOperator ``A.T`` is not the adjoint. For the conjugate transpose, use ``.conj().T``\n or :meth:`.H`.\n\n \"\"\"\n if is_complex_dtype(self.input_dtype):\n return LinearOperator(\n input_shape=self.output_shape,\n output_shape=self.input_shape,\n eval_fn=lambda x: self.adj(x.conj()).conj(),\n adj_fn=lambda x: self(x),\n input_dtype=self.input_dtype,\n output_dtype=self.output_dtype,\n )\n else:\n return LinearOperator(\n input_shape=self.output_shape,\n output_shape=self.input_shape,\n eval_fn=lambda x: self.adj(x),\n adj_fn=lambda x: self(x),\n input_dtype=self.output_dtype,\n output_dtype=self.input_dtype,\n )\n\n @property\n def H(self) -> LinearOperator:\n \"\"\"Returns a new :class:`LinearOperator` that is the Hermitian transpose of this :class:`LinearOperator`.\n\n For a real-valued LinearOperator ``A`` (``A.input_dtype=np.float32`` or ``np.float64``), the\n LinearOperator ``A.H`` is equivalent to ``A.T``.\n\n For a complex-valued LinearOperator ``A`` (``A.input_dtype = np.complex64`` or ``np.complex128``), the\n LinearOperator ``A.H`` implements the adjoint of ``A : A.H @ y == A.adj(y) == A.conj().T @ y)``.\n\n For the non-conjugate transpose, see :meth:`.T`.\n \"\"\"\n return LinearOperator(\n input_shape=self.output_shape,\n output_shape=self.input_shape,\n eval_fn=lambda x: self.adj(x),\n adj_fn=lambda x: self(x),\n input_dtype=self.output_dtype,\n output_dtype=self.input_dtype,\n )\n\n def conj(self) -> LinearOperator:\n \"\"\"Returns a new :class:`.LinearOperator` ``Ac`` such that ``Ac(x) = conj(A)(x)``\"\"\"\n # A.conj() x == (A @ x.conj()).conj()\n return LinearOperator(\n input_shape=self.input_shape,\n output_shape=self.output_shape,\n eval_fn=lambda x: self(x.conj()).conj(),\n adj_fn=lambda x: self.adj(x.conj()).conj(),\n input_dtype=self.input_dtype,\n output_dtype=self.output_dtype,\n )\n\n @property\n def gram_op(self) -> LinearOperator:\n \"\"\"Returns a new :class:`.LinearOperator` ``G`` such that ``G(x) = A.adj(A(x)))``\"\"\"\n\n if self._gram is None:\n self._set_adjoint()\n\n return LinearOperator(\n input_shape=self.input_shape,\n output_shape=self.input_shape,\n eval_fn=lambda x: self.gram(x),\n adj_fn=lambda x: self.gram(x),\n input_dtype=self.input_dtype,\n output_dtype=self.output_dtype,\n )\n\n def gram(\n self, x: Union[LinearOperator, JaxArray, BlockArray]\n ) -> Union[LinearOperator, JaxArray, BlockArray]:\n \"\"\"Computes ``A.adj(A(x))``\n\n Args:\n x: Point at which to evaluate the gram operator.\n If ``x`` is a :class:`DeviceArray` or :class:`.BlockArray`, must have\n ``shape == self.input_shape``. If ``x`` is a :class:`.LinearOperator`, must have\n ``x.output_shape == self.input_shape``.\n\n Returns:\n Result of ``A.adj(A(x))``.\n \"\"\"\n if self._gram is None:\n self._set_adjoint()\n return self._gram(x)\n\n\nclass ComposedLinearOperator(LinearOperator):\n \"\"\"A LinearOperator formed by the composition of two LinearOperators.\"\"\"\n\n def __init__(self, A: LinearOperator, B: LinearOperator, jit: bool = False):\n r\"\"\"ComposedLinearOperator init method.\n\n A ComposedLinearOperator ``AB`` implements ``AB @ x == A @ B @ x``.\n The LinearOperators ``A`` and ``B`` are stored as attributes of the ComposedLinearOperator.\n\n The LinearOperators ``A`` and ``B`` must have compatible shapes and dtypes:\n ``A.input_shape == B.output_shape`` and ``A.input_dtype == B.input_dtype``.\n\n\n Args:\n A: First (left) LinearOperator.\n B: Second (right) LinearOperator\n jit: If ``True``, call :meth:`.jit()` on this LinearOperator to jit the forward,\n adjoint, and gram functions. Same as calling :meth:`.jit` after\n the LinearOperator is created.\n \"\"\"\n if not isinstance(A, LinearOperator):\n raise TypeError(\n f\"The first argument to ComposedLinearOpeator must be a LinearOperator; got {type(A)}\"\n )\n if not isinstance(B, LinearOperator):\n raise TypeError(\n f\"The second argument to ComposedLinearOpeator must be a LinearOperator; got {type(B)}\"\n )\n if A.input_shape != B.output_shape:\n raise ValueError(f\"Incompatable LinearOperator shapes {A.shape}, {B.shape}\")\n if A.input_dtype != B.output_dtype:\n raise ValueError(\n f\"Incompatable LinearOperator dtypes {A.input_dtype}, {B.output_dtype}\"\n )\n\n self.A = A\n self.B = B\n\n super().__init__(\n input_shape=self.B.input_shape,\n output_shape=self.A.output_shape,\n input_dtype=self.B.input_dtype,\n output_dtype=self.A.output_dtype,\n eval_fn=lambda x: self.A(self.B(x)),\n adj_fn=lambda z: self.B.adj(self.A.adj(z)),\n jit=jit,\n )\n" ]
[ [ "numpy.random.randn", "numpy.testing.assert_allclose" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.stem", "matplotlib.pyplot.title", "scipy.linalg.dft", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.xlabel" ], [ "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
praneethgb/rasa
[ "5bf227f165d0b041a367d2c0bbf712ebb6a54792", "5bf227f165d0b041a367d2c0bbf712ebb6a54792" ]
[ "rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py", "rasa/nlu/classifiers/_diet_classifier.py" ]
[ "import logging\nimport re\nfrom typing import Any, Dict, List, Optional, Text, Type, Tuple\nfrom pathlib import Path\nimport numpy as np\nimport scipy.sparse\n\nimport rasa.shared.utils.io\nimport rasa.utils.io\nimport rasa.nlu.utils.pattern_utils as pattern_utils\nfrom rasa.nlu import utils\nfrom rasa.nlu.components import Component\nfrom rasa.nlu.config import RasaNLUModelConfig\nfrom rasa.nlu.constants import (\n TOKENS_NAMES,\n FEATURIZER_CLASS_ALIAS,\n)\nfrom rasa.shared.nlu.constants import (\n TEXT,\n RESPONSE,\n FEATURE_TYPE_SENTENCE,\n FEATURE_TYPE_SEQUENCE,\n ACTION_TEXT,\n)\nfrom rasa.nlu.featurizers.featurizer import SparseFeaturizer\nfrom rasa.shared.nlu.training_data.features import Features\nfrom rasa.nlu.model import Metadata\nfrom rasa.nlu.tokenizers.tokenizer import Tokenizer\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\nfrom rasa.shared.nlu.training_data.message import Message\n\nlogger = logging.getLogger(__name__)\n\n\nclass RegexFeaturizer(SparseFeaturizer):\n @classmethod\n def required_components(cls) -> List[Type[Component]]:\n return [Tokenizer]\n\n defaults = {\n # text will be processed with case sensitive as default\n \"case_sensitive\": True,\n # use lookup tables to generate features\n \"use_lookup_tables\": True,\n # use regexes to generate features\n \"use_regexes\": True,\n # use match word boundaries for lookup table\n \"use_word_boundaries\": True,\n # Additional number of patterns to consider\n # for incremental training\n \"number_additional_patterns\": None,\n }\n\n def __init__(\n self,\n component_config: Optional[Dict[Text, Any]] = None,\n known_patterns: Optional[List[Dict[Text, Text]]] = None,\n finetune_mode: bool = False,\n ) -> None:\n \"\"\"Constructs new features for regexes and lookup table using regex expressions.\n\n Args:\n component_config: Configuration for the component\n known_patterns: Regex Patterns the component should pre-load itself with.\n finetune_mode: Load component in finetune mode.\n \"\"\"\n super().__init__(component_config)\n\n self.known_patterns = known_patterns if known_patterns else []\n self.case_sensitive = self.component_config[\"case_sensitive\"]\n self.finetune_mode = finetune_mode\n if self.component_config[\"number_additional_patterns\"]:\n rasa.shared.utils.io.raise_deprecation_warning(\n \"The parameter `number_additional_patterns` has been deprecated \"\n \"since the pipeline does not create an extra buffer for new vocabulary \"\n \"anymore. Any value assigned to this parameter will be ignored. \"\n \"You can omit specifying `number_additional_patterns` in future runs.\"\n )\n\n def _merge_new_patterns(self, new_patterns: List[Dict[Text, Text]]) -> None:\n \"\"\"Updates already known patterns with new patterns extracted from data.\n\n New patterns should always be added to the end of the existing\n patterns and the order of the existing patterns should not be disturbed.\n\n Args:\n new_patterns: Patterns extracted from training data and to be merged with\n known patterns.\n \"\"\"\n pattern_name_index_map = {\n pattern[\"name\"]: index for index, pattern in enumerate(self.known_patterns)\n }\n for extra_pattern in new_patterns:\n new_pattern_name = extra_pattern[\"name\"]\n\n # Some patterns may have just new examples added\n # to them. These do not count as additional pattern.\n if new_pattern_name in pattern_name_index_map:\n self.known_patterns[pattern_name_index_map[new_pattern_name]][\n \"pattern\"\n ] = extra_pattern[\"pattern\"]\n else:\n self.known_patterns.append(extra_pattern)\n\n def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Trains the component with all patterns extracted from training data.\n\n Args:\n training_data: Training data consisting of training examples and patterns\n available.\n config: NLU Pipeline config\n **kwargs: Any other arguments\n \"\"\"\n patterns_from_data = pattern_utils.extract_patterns(\n training_data,\n use_lookup_tables=self.component_config[\"use_lookup_tables\"],\n use_regexes=self.component_config[\"use_regexes\"],\n use_word_boundaries=self.component_config[\"use_word_boundaries\"],\n )\n if self.finetune_mode:\n # Merge patterns extracted from data with known patterns\n self._merge_new_patterns(patterns_from_data)\n else:\n self.known_patterns = patterns_from_data\n\n for example in training_data.training_examples:\n for attribute in [TEXT, RESPONSE, ACTION_TEXT]:\n self._text_features_with_regex(example, attribute)\n\n def process(self, message: Message, **kwargs: Any) -> None:\n self._text_features_with_regex(message, TEXT)\n\n def _text_features_with_regex(self, message: Message, attribute: Text) -> None:\n \"\"\"Helper method to extract features and set them appropriately in the message.\n\n Args:\n message: Message to be featurized.\n attribute: Attribute of message to be featurized.\n \"\"\"\n if self.known_patterns:\n sequence_features, sentence_features = self._features_for_patterns(\n message, attribute\n )\n\n if sequence_features is not None:\n final_sequence_features = Features(\n sequence_features,\n FEATURE_TYPE_SEQUENCE,\n attribute,\n self.component_config[FEATURIZER_CLASS_ALIAS],\n )\n message.add_features(final_sequence_features)\n\n if sentence_features is not None:\n final_sentence_features = Features(\n sentence_features,\n FEATURE_TYPE_SENTENCE,\n attribute,\n self.component_config[FEATURIZER_CLASS_ALIAS],\n )\n message.add_features(final_sentence_features)\n\n def _features_for_patterns(\n self, message: Message, attribute: Text\n ) -> Tuple[Optional[scipy.sparse.coo_matrix], Optional[scipy.sparse.coo_matrix]]:\n \"\"\"Checks which known patterns match the message.\n\n Given a sentence, returns a vector of {1,0} values indicating which\n regexes did match. Furthermore, if the\n message is tokenized, the function will mark all tokens with a dict\n relating the name of the regex to whether it was matched.\n\n Args:\n message: Message to be featurized.\n attribute: Attribute of message to be featurized.\n\n Returns:\n Token and sentence level features of message attribute.\n \"\"\"\n # Attribute not set (e.g. response not present)\n if not message.get(attribute):\n return None, None\n\n tokens = message.get(TOKENS_NAMES[attribute], [])\n\n if not tokens:\n # nothing to featurize\n return None, None\n\n flags = 0 # default flag\n if not self.case_sensitive:\n flags = re.IGNORECASE\n\n sequence_length = len(tokens)\n\n num_patterns = len(self.known_patterns)\n\n sequence_features = np.zeros([sequence_length, num_patterns])\n sentence_features = np.zeros([1, num_patterns])\n\n for pattern_index, pattern in enumerate(self.known_patterns):\n matches = re.finditer(\n pattern[\"pattern\"], message.get(attribute), flags=flags\n )\n matches = list(matches)\n\n for token_index, t in enumerate(tokens):\n patterns = t.get(\"pattern\", default={})\n patterns[pattern[\"name\"]] = False\n\n for match in matches:\n if t.start < match.end() and t.end > match.start():\n patterns[pattern[\"name\"]] = True\n sequence_features[token_index][pattern_index] = 1.0\n if attribute in [RESPONSE, TEXT, ACTION_TEXT]:\n # sentence vector should contain all patterns\n sentence_features[0][pattern_index] = 1.0\n\n t.set(\"pattern\", patterns)\n return (\n scipy.sparse.coo_matrix(sequence_features),\n scipy.sparse.coo_matrix(sentence_features),\n )\n\n @classmethod\n def load(\n cls,\n meta: Dict[Text, Any],\n model_dir: Text,\n model_metadata: Optional[Metadata] = None,\n cached_component: Optional[\"RegexFeaturizer\"] = None,\n should_finetune: bool = False,\n **kwargs: Any,\n ) -> \"RegexFeaturizer\":\n \"\"\"Loads a previously trained component.\n\n Args:\n meta: Configuration of trained component.\n model_dir: Path where trained pipeline is stored.\n model_metadata: Metadata for the trained pipeline.\n cached_component: Previously cached component(if any).\n should_finetune: Indicates whether to load the component for further\n finetuning.\n **kwargs: Any other arguments.\n \"\"\"\n file_name = meta[\"file\"]\n\n patterns_file_name = Path(model_dir) / (file_name + \".patterns.pkl\")\n\n known_patterns = None\n if patterns_file_name.exists():\n known_patterns = rasa.shared.utils.io.read_json_file(patterns_file_name)\n\n return RegexFeaturizer(\n meta, known_patterns=known_patterns, finetune_mode=should_finetune,\n )\n\n def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:\n \"\"\"Persist this model into the passed directory.\n\n Args:\n file_name: Prefix to add to all files stored as part of this component.\n model_dir: Path where files should be stored.\n\n Returns:\n Metadata necessary to load the model again.\n \"\"\"\n patterns_file_name = file_name + \".patterns.pkl\"\n regex_file = Path(model_dir) / patterns_file_name\n utils.write_json_to_file(regex_file, self.known_patterns, indent=4)\n\n return {\"file\": file_name}\n", "# flake8: noqa\n# WARNING: This module will be dropped before Rasa Open Source 3.0 is released.\n# Please don't do any changes in this module and rather adapt DIETClassifier2\n# from the regular `rasa.nlu.classifiers.diet_classifier` module.\n# This module is a workaround to defer breaking changes due to the architecture\n# revamp in 3.0.\nimport copy\nimport logging\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nimport os\nimport scipy.sparse\nimport tensorflow as tf\n\nfrom typing import Any, Dict, List, Optional, Text, Tuple, Union, Type\n\nimport rasa.shared.utils.io\nimport rasa.utils.io as io_utils\nimport rasa.nlu.utils.bilou_utils as bilou_utils\nfrom rasa.shared.constants import DIAGNOSTIC_DATA\nfrom rasa.nlu.featurizers.featurizer import Featurizer\nfrom rasa.nlu.components import Component\nfrom rasa.nlu.classifiers.classifier import IntentClassifier\nfrom rasa.nlu.extractors.extractor import EntityExtractor, EntityTagSpec\nfrom rasa.nlu.classifiers import LABEL_RANKING_LENGTH\nfrom rasa.utils import train_utils\nfrom rasa.utils.tensorflow import rasa_layers\nfrom rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel\nfrom rasa.utils.tensorflow.model_data import (\n RasaModelData,\n FeatureSignature,\n FeatureArray,\n)\nfrom rasa.nlu.constants import TOKENS_NAMES\nfrom rasa.shared.nlu.constants import (\n TEXT,\n INTENT,\n INTENT_RESPONSE_KEY,\n ENTITIES,\n ENTITY_ATTRIBUTE_TYPE,\n ENTITY_ATTRIBUTE_GROUP,\n ENTITY_ATTRIBUTE_ROLE,\n NO_ENTITY_TAG,\n SPLIT_ENTITIES_BY_COMMA,\n)\nfrom rasa.nlu.config import RasaNLUModelConfig\nfrom rasa.shared.exceptions import InvalidConfigException\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\nfrom rasa.shared.nlu.training_data.message import Message\nfrom rasa.nlu.model import Metadata\nfrom rasa.utils.tensorflow.constants import (\n LABEL,\n IDS,\n HIDDEN_LAYERS_SIZES,\n SHARE_HIDDEN_LAYERS,\n TRANSFORMER_SIZE,\n NUM_TRANSFORMER_LAYERS,\n NUM_HEADS,\n BATCH_SIZES,\n BATCH_STRATEGY,\n EPOCHS,\n RANDOM_SEED,\n LEARNING_RATE,\n RANKING_LENGTH,\n LOSS_TYPE,\n SIMILARITY_TYPE,\n NUM_NEG,\n SPARSE_INPUT_DROPOUT,\n DENSE_INPUT_DROPOUT,\n MASKED_LM,\n ENTITY_RECOGNITION,\n TENSORBOARD_LOG_DIR,\n INTENT_CLASSIFICATION,\n EVAL_NUM_EXAMPLES,\n EVAL_NUM_EPOCHS,\n UNIDIRECTIONAL_ENCODER,\n DROP_RATE,\n DROP_RATE_ATTENTION,\n CONNECTION_DENSITY,\n NEGATIVE_MARGIN_SCALE,\n REGULARIZATION_CONSTANT,\n SCALE_LOSS,\n USE_MAX_NEG_SIM,\n MAX_NEG_SIM,\n MAX_POS_SIM,\n EMBEDDING_DIMENSION,\n BILOU_FLAG,\n KEY_RELATIVE_ATTENTION,\n VALUE_RELATIVE_ATTENTION,\n MAX_RELATIVE_POSITION,\n AUTO,\n BALANCED,\n CROSS_ENTROPY,\n TENSORBOARD_LOG_LEVEL,\n CONCAT_DIMENSION,\n FEATURIZERS,\n CHECKPOINT_MODEL,\n SEQUENCE,\n SENTENCE,\n SEQUENCE_LENGTH,\n DENSE_DIMENSION,\n MASK,\n CONSTRAIN_SIMILARITIES,\n MODEL_CONFIDENCE,\n SOFTMAX,\n)\n\nlogger = logging.getLogger(__name__)\n\nSPARSE = \"sparse\"\nDENSE = \"dense\"\nLABEL_KEY = LABEL\nLABEL_SUB_KEY = IDS\n\nPOSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]\n\n\nclass DIETClassifier(IntentClassifier, EntityExtractor):\n \"\"\"A multi-task model for intent classification and entity extraction.\n\n DIET is Dual Intent and Entity Transformer.\n The architecture is based on a transformer which is shared for both tasks.\n A sequence of entity labels is predicted through a Conditional Random Field (CRF)\n tagging layer on top of the transformer output sequence corresponding to the\n input sequence of tokens. The transformer output for the ``__CLS__`` token and\n intent labels are embedded into a single semantic vector space. We use the\n dot-product loss to maximize the similarity with the target label and minimize\n similarities with negative samples.\n \"\"\"\n\n @classmethod\n def required_components(cls) -> List[Type[Component]]:\n return [Featurizer]\n\n # please make sure to update the docs when changing a default parameter\n defaults = {\n # ## Architecture of the used neural network\n # Hidden layer sizes for layers before the embedding layers for user message\n # and labels.\n # The number of hidden layers is equal to the length of the corresponding list.\n HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []},\n # Whether to share the hidden layer weights between user message and labels.\n SHARE_HIDDEN_LAYERS: False,\n # Number of units in transformer\n TRANSFORMER_SIZE: 256,\n # Number of transformer layers\n NUM_TRANSFORMER_LAYERS: 2,\n # Number of attention heads in transformer\n NUM_HEADS: 4,\n # If 'True' use key relative embeddings in attention\n KEY_RELATIVE_ATTENTION: False,\n # If 'True' use value relative embeddings in attention\n VALUE_RELATIVE_ATTENTION: False,\n # Max position for relative embeddings. Only in effect if key- or value relative\n # attention are turned on\n MAX_RELATIVE_POSITION: 5,\n # Use a unidirectional or bidirectional encoder.\n UNIDIRECTIONAL_ENCODER: False,\n # ## Training parameters\n # Initial and final batch sizes:\n # Batch size will be linearly increased for each epoch.\n BATCH_SIZES: [64, 256],\n # Strategy used when creating batches.\n # Can be either 'sequence' or 'balanced'.\n BATCH_STRATEGY: BALANCED,\n # Number of epochs to train\n EPOCHS: 300,\n # Set random seed to any 'int' to get reproducible results\n RANDOM_SEED: None,\n # Initial learning rate for the optimizer\n LEARNING_RATE: 0.001,\n # ## Parameters for embeddings\n # Dimension size of embedding vectors\n EMBEDDING_DIMENSION: 20,\n # Dense dimension to use for sparse features.\n DENSE_DIMENSION: {TEXT: 128, LABEL: 20},\n # Default dimension to use for concatenating sequence and sentence features.\n CONCAT_DIMENSION: {TEXT: 128, LABEL: 20},\n # The number of incorrect labels. The algorithm will minimize\n # their similarity to the user input during training.\n NUM_NEG: 20,\n # Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.\n SIMILARITY_TYPE: AUTO,\n # The type of the loss function, either 'cross_entropy' or 'margin'.\n LOSS_TYPE: CROSS_ENTROPY,\n # Number of top intents to normalize scores for. Applicable with\n # loss type 'cross_entropy' and 'softmax' confidences. Set to 0\n # to turn off normalization.\n RANKING_LENGTH: 10,\n # Indicates how similar the algorithm should try to make embedding vectors\n # for correct labels.\n # Should be 0.0 < ... < 1.0 for 'cosine' similarity type.\n MAX_POS_SIM: 0.8,\n # Maximum negative similarity for incorrect labels.\n # Should be -1.0 < ... < 1.0 for 'cosine' similarity type.\n MAX_NEG_SIM: -0.4,\n # If 'True' the algorithm only minimizes maximum similarity over\n # incorrect intent labels, used only if 'loss_type' is set to 'margin'.\n USE_MAX_NEG_SIM: True,\n # If 'True' scale loss inverse proportionally to the confidence\n # of the correct prediction\n SCALE_LOSS: False,\n # ## Regularization parameters\n # The scale of regularization\n REGULARIZATION_CONSTANT: 0.002,\n # The scale of how important is to minimize the maximum similarity\n # between embeddings of different labels,\n # used only if 'loss_type' is set to 'margin'.\n NEGATIVE_MARGIN_SCALE: 0.8,\n # Dropout rate for encoder\n DROP_RATE: 0.2,\n # Dropout rate for attention\n DROP_RATE_ATTENTION: 0,\n # Fraction of trainable weights in internal layers.\n CONNECTION_DENSITY: 0.2,\n # If 'True' apply dropout to sparse input tensors\n SPARSE_INPUT_DROPOUT: True,\n # If 'True' apply dropout to dense input tensors\n DENSE_INPUT_DROPOUT: True,\n # ## Evaluation parameters\n # How often calculate validation accuracy.\n # Small values may hurt performance.\n EVAL_NUM_EPOCHS: 20,\n # How many examples to use for hold out validation set\n # Large values may hurt performance, e.g. model accuracy.\n # Set to 0 for no validation.\n EVAL_NUM_EXAMPLES: 0,\n # ## Model config\n # If 'True' intent classification is trained and intent predicted.\n INTENT_CLASSIFICATION: True,\n # If 'True' named entity recognition is trained and entities predicted.\n ENTITY_RECOGNITION: True,\n # If 'True' random tokens of the input message will be masked and the model\n # should predict those tokens.\n MASKED_LM: False,\n # 'BILOU_flag' determines whether to use BILOU tagging or not.\n # If set to 'True' labelling is more rigorous, however more\n # examples per entity are required.\n # Rule of thumb: you should have more than 100 examples per entity.\n BILOU_FLAG: True,\n # If you want to use tensorboard to visualize training and validation metrics,\n # set this option to a valid output directory.\n TENSORBOARD_LOG_DIR: None,\n # Define when training metrics for tensorboard should be logged.\n # Either after every epoch or for every training step.\n # Valid values: 'epoch' and 'batch'\n TENSORBOARD_LOG_LEVEL: \"epoch\",\n # Perform model checkpointing\n CHECKPOINT_MODEL: False,\n # Specify what features to use as sequence and sentence features\n # By default all features in the pipeline are used.\n FEATURIZERS: [],\n # Split entities by comma, this makes sense e.g. for a list of ingredients\n # in a recipie, but it doesn't make sense for the parts of an address\n SPLIT_ENTITIES_BY_COMMA: True,\n # If 'True' applies sigmoid on all similarity terms and adds\n # it to the loss function to ensure that similarity values are\n # approximately bounded. Used inside softmax loss only.\n CONSTRAIN_SIMILARITIES: False,\n # Model confidence to be returned during inference. Possible values -\n # 'softmax' and 'linear_norm'.\n MODEL_CONFIDENCE: SOFTMAX,\n }\n\n # init helpers\n def _check_masked_lm(self) -> None:\n if (\n self.component_config[MASKED_LM]\n and self.component_config[NUM_TRANSFORMER_LAYERS] == 0\n ):\n raise ValueError(\n f\"If number of transformer layers is 0, \"\n f\"'{MASKED_LM}' option should be 'False'.\"\n )\n\n def _check_share_hidden_layers_sizes(self) -> None:\n if self.component_config.get(SHARE_HIDDEN_LAYERS):\n first_hidden_layer_sizes = next(\n iter(self.component_config[HIDDEN_LAYERS_SIZES].values())\n )\n # check that all hidden layer sizes are the same\n identical_hidden_layer_sizes = all(\n current_hidden_layer_sizes == first_hidden_layer_sizes\n for current_hidden_layer_sizes in self.component_config[\n HIDDEN_LAYERS_SIZES\n ].values()\n )\n if not identical_hidden_layer_sizes:\n raise ValueError(\n f\"If hidden layer weights are shared, \"\n f\"{HIDDEN_LAYERS_SIZES} must coincide.\"\n )\n\n def _check_config_parameters(self) -> None:\n self.component_config = train_utils.check_deprecated_options(\n self.component_config\n )\n\n self._check_masked_lm()\n self._check_share_hidden_layers_sizes()\n\n self.component_config = train_utils.update_confidence_type(\n self.component_config\n )\n\n train_utils.validate_configuration_settings(self.component_config)\n\n self.component_config = train_utils.update_deprecated_loss_type(\n self.component_config\n )\n\n self.component_config = train_utils.update_deprecated_sparsity_to_density(\n self.component_config\n )\n\n self.component_config = train_utils.update_similarity_type(\n self.component_config\n )\n self.component_config = train_utils.update_evaluation_parameters(\n self.component_config\n )\n\n # package safety checks\n @classmethod\n def required_packages(cls) -> List[Text]:\n return [\"tensorflow\"]\n\n def __init__(\n self,\n component_config: Optional[Dict[Text, Any]] = None,\n index_label_id_mapping: Optional[Dict[int, Text]] = None,\n entity_tag_specs: Optional[List[EntityTagSpec]] = None,\n model: Optional[RasaModel] = None,\n finetune_mode: bool = False,\n sparse_feature_sizes: Optional[Dict[Text, Dict[Text, List[int]]]] = None,\n ) -> None:\n \"\"\"Declare instance variables with default values.\"\"\"\n if component_config is not None and EPOCHS not in component_config:\n rasa.shared.utils.io.raise_warning(\n f\"Please configure the number of '{EPOCHS}' in your configuration file.\"\n f\" We will change the default value of '{EPOCHS}' in the future to 1. \"\n )\n\n super().__init__(component_config)\n\n self._check_config_parameters()\n\n # transform numbers to labels\n self.index_label_id_mapping = index_label_id_mapping or {}\n\n self._entity_tag_specs = entity_tag_specs\n\n self.model = model\n\n self.tmp_checkpoint_dir = None\n if self.component_config[CHECKPOINT_MODEL]:\n self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())\n\n self._label_data: Optional[RasaModelData] = None\n self._data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None\n\n self.split_entities_config = self.init_split_entities()\n\n self.finetune_mode = finetune_mode\n self._sparse_feature_sizes = sparse_feature_sizes\n\n if not self.model and self.finetune_mode:\n raise rasa.shared.exceptions.InvalidParameterException(\n f\"{self.__class__.__name__} was instantiated \"\n f\"with `model=None` and `finetune_mode=True`. \"\n f\"This is not a valid combination as the component \"\n f\"needs an already instantiated and trained model \"\n f\"to continue training in finetune mode.\"\n )\n\n @property\n def label_key(self) -> Optional[Text]:\n \"\"\"Return key if intent classification is activated.\"\"\"\n return LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None\n\n @property\n def label_sub_key(self) -> Optional[Text]:\n \"\"\"Return sub key if intent classification is activated.\"\"\"\n return LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None\n\n @staticmethod\n def model_class() -> Type[RasaModel]:\n return DIET\n\n # training data helpers:\n @staticmethod\n def _label_id_index_mapping(\n training_data: TrainingData, attribute: Text\n ) -> Dict[Text, int]:\n \"\"\"Create label_id dictionary.\"\"\"\n\n distinct_label_ids = {\n example.get(attribute) for example in training_data.intent_examples\n } - {None}\n return {\n label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))\n }\n\n @staticmethod\n def _invert_mapping(mapping: Dict) -> Dict:\n return {value: key for key, value in mapping.items()}\n\n def _create_entity_tag_specs(\n self, training_data: TrainingData\n ) -> List[EntityTagSpec]:\n \"\"\"Create entity tag specifications with their respective tag id mappings.\"\"\"\n\n _tag_specs = []\n\n for tag_name in POSSIBLE_TAGS:\n if self.component_config[BILOU_FLAG]:\n tag_id_index_mapping = bilou_utils.build_tag_id_dict(\n training_data, tag_name\n )\n else:\n tag_id_index_mapping = self._tag_id_index_mapping_for(\n tag_name, training_data\n )\n\n if tag_id_index_mapping:\n _tag_specs.append(\n EntityTagSpec(\n tag_name=tag_name,\n tags_to_ids=tag_id_index_mapping,\n ids_to_tags=self._invert_mapping(tag_id_index_mapping),\n num_tags=len(tag_id_index_mapping),\n )\n )\n\n return _tag_specs\n\n @staticmethod\n def _tag_id_index_mapping_for(\n tag_name: Text, training_data: TrainingData\n ) -> Optional[Dict[Text, int]]:\n \"\"\"Create mapping from tag name to id.\"\"\"\n if tag_name == ENTITY_ATTRIBUTE_ROLE:\n distinct_tags = training_data.entity_roles\n elif tag_name == ENTITY_ATTRIBUTE_GROUP:\n distinct_tags = training_data.entity_groups\n else:\n distinct_tags = training_data.entities\n\n distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None}\n\n if not distinct_tags:\n return None\n\n tag_id_dict = {\n tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1)\n }\n # NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index\n # needed for correct prediction for padding\n tag_id_dict[NO_ENTITY_TAG] = 0\n\n return tag_id_dict\n\n @staticmethod\n def _find_example_for_label(\n label: Text, examples: List[Message], attribute: Text\n ) -> Optional[Message]:\n for ex in examples:\n if ex.get(attribute) == label:\n return ex\n return None\n\n def _check_labels_features_exist(\n self, labels_example: List[Message], attribute: Text\n ) -> bool:\n \"\"\"Checks if all labels have features set.\"\"\"\n\n return all(\n label_example.features_present(\n attribute, self.component_config[FEATURIZERS]\n )\n for label_example in labels_example\n )\n\n def _extract_features(\n self, message: Message, attribute: Text\n ) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:\n\n (\n sparse_sequence_features,\n sparse_sentence_features,\n ) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])\n dense_sequence_features, dense_sentence_features = message.get_dense_features(\n attribute, self.component_config[FEATURIZERS]\n )\n\n if dense_sequence_features is not None and sparse_sequence_features is not None:\n if (\n dense_sequence_features.features.shape[0]\n != sparse_sequence_features.features.shape[0]\n ):\n raise ValueError(\n f\"Sequence dimensions for sparse and dense sequence features \"\n f\"don't coincide in '{message.get(TEXT)}'\"\n f\"for attribute '{attribute}'.\"\n )\n if dense_sentence_features is not None and sparse_sentence_features is not None:\n if (\n dense_sentence_features.features.shape[0]\n != sparse_sentence_features.features.shape[0]\n ):\n raise ValueError(\n f\"Sequence dimensions for sparse and dense sentence features \"\n f\"don't coincide in '{message.get(TEXT)}'\"\n f\"for attribute '{attribute}'.\"\n )\n\n # If we don't use the transformer and we don't want to do entity recognition,\n # to speed up training take only the sentence features as feature vector.\n # We would not make use of the sequence anyway in this setup. Carrying over\n # those features to the actual training process takes quite some time.\n if (\n self.component_config[NUM_TRANSFORMER_LAYERS] == 0\n and not self.component_config[ENTITY_RECOGNITION]\n and attribute not in [INTENT, INTENT_RESPONSE_KEY]\n ):\n sparse_sequence_features = None\n dense_sequence_features = None\n\n out = {}\n\n if sparse_sentence_features is not None:\n out[f\"{SPARSE}_{SENTENCE}\"] = sparse_sentence_features.features\n if sparse_sequence_features is not None:\n out[f\"{SPARSE}_{SEQUENCE}\"] = sparse_sequence_features.features\n if dense_sentence_features is not None:\n out[f\"{DENSE}_{SENTENCE}\"] = dense_sentence_features.features\n if dense_sequence_features is not None:\n out[f\"{DENSE}_{SEQUENCE}\"] = dense_sequence_features.features\n\n return out\n\n def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:\n \"\"\"Checks if features have same dimensionality if hidden layers are shared.\"\"\"\n if self.component_config.get(SHARE_HIDDEN_LAYERS):\n num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)\n num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)\n num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)\n num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)\n\n if (0 < num_text_sentence_features != num_label_sentence_features > 0) or (\n 0 < num_text_sequence_features != num_label_sequence_features > 0\n ):\n raise ValueError(\n \"If embeddings are shared text features and label features \"\n \"must coincide. Check the output dimensions of previous components.\"\n )\n\n def _extract_labels_precomputed_features(\n self, label_examples: List[Message], attribute: Text = INTENT\n ) -> Tuple[List[FeatureArray], List[FeatureArray]]:\n \"\"\"Collects precomputed encodings.\"\"\"\n features = defaultdict(list)\n\n for e in label_examples:\n label_features = self._extract_features(e, attribute)\n for feature_key, feature_value in label_features.items():\n features[feature_key].append(feature_value)\n sequence_features = []\n sentence_features = []\n for feature_name, feature_value in features.items():\n if SEQUENCE in feature_name:\n sequence_features.append(\n FeatureArray(np.array(feature_value), number_of_dimensions=3)\n )\n else:\n sentence_features.append(\n FeatureArray(np.array(feature_value), number_of_dimensions=3)\n )\n return sequence_features, sentence_features\n\n @staticmethod\n def _compute_default_label_features(\n labels_example: List[Message],\n ) -> List[FeatureArray]:\n \"\"\"Computes one-hot representation for the labels.\"\"\"\n logger.debug(\"No label features found. Computing default label features.\")\n\n eye_matrix = np.eye(len(labels_example), dtype=np.float32)\n # add sequence dimension to one-hot labels\n return [\n FeatureArray(\n np.array([np.expand_dims(a, 0) for a in eye_matrix]),\n number_of_dimensions=3,\n )\n ]\n\n def _create_label_data(\n self,\n training_data: TrainingData,\n label_id_dict: Dict[Text, int],\n attribute: Text,\n ) -> RasaModelData:\n \"\"\"Create matrix with label_ids encoded in rows as bag of words.\n\n Find a training example for each label and get the encoded features\n from the corresponding Message object.\n If the features are already computed, fetch them from the message object\n else compute a one hot encoding for the label as the feature vector.\n \"\"\"\n # Collect one example for each label\n labels_idx_examples = []\n for label_name, idx in label_id_dict.items():\n label_example = self._find_example_for_label(\n label_name, training_data.intent_examples, attribute\n )\n labels_idx_examples.append((idx, label_example))\n\n # Sort the list of tuples based on label_idx\n labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])\n labels_example = [example for (_, example) in labels_idx_examples]\n # Collect features, precomputed if they exist, else compute on the fly\n if self._check_labels_features_exist(labels_example, attribute):\n (\n sequence_features,\n sentence_features,\n ) = self._extract_labels_precomputed_features(labels_example, attribute)\n else:\n sequence_features = None\n sentence_features = self._compute_default_label_features(labels_example)\n\n label_data = RasaModelData()\n label_data.add_features(LABEL, SEQUENCE, sequence_features)\n label_data.add_features(LABEL, SENTENCE, sentence_features)\n if label_data.does_feature_not_exist(\n LABEL, SENTENCE\n ) and label_data.does_feature_not_exist(LABEL, SEQUENCE):\n raise ValueError(\n \"No label features are present. Please check your configuration file.\"\n )\n\n label_ids = np.array([idx for (idx, _) in labels_idx_examples])\n # explicitly add last dimension to label_ids\n # to track correctly dynamic sequences\n label_data.add_features(\n LABEL_KEY,\n LABEL_SUB_KEY,\n [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],\n )\n\n label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)\n\n return label_data\n\n def _use_default_label_features(self, label_ids: np.ndarray) -> List[FeatureArray]:\n feature_arrays: List[FeatureArray] = self._label_data.get(LABEL, SENTENCE)\n all_label_features = feature_arrays[0]\n return [\n FeatureArray(\n np.array([all_label_features[label_id] for label_id in label_ids]),\n number_of_dimensions=all_label_features.number_of_dimensions,\n )\n ]\n\n def _create_model_data(\n self,\n training_data: List[Message],\n label_id_dict: Optional[Dict[Text, int]] = None,\n label_attribute: Optional[Text] = None,\n training: bool = True,\n ) -> RasaModelData:\n \"\"\"Prepare data for training and create a RasaModelData object.\"\"\"\n from rasa.utils.tensorflow import model_data_utils\n\n attributes_to_consider = [TEXT]\n if training and self.component_config[INTENT_CLASSIFICATION]:\n # we don't have any intent labels during prediction, just add them during\n # training\n attributes_to_consider.append(label_attribute)\n if (\n training\n and self.component_config[ENTITY_RECOGNITION]\n and self._entity_tag_specs\n ):\n # Add entities as labels only during training and only if there was\n # training data added for entities with DIET configured to predict entities.\n attributes_to_consider.append(ENTITIES)\n\n if training and label_attribute is not None:\n # only use those training examples that have the label_attribute set\n # during training\n training_data = [\n example for example in training_data if label_attribute in example.data\n ]\n\n if not training_data:\n # no training data are present to train\n return RasaModelData()\n\n (\n features_for_examples,\n sparse_feature_sizes,\n ) = model_data_utils.featurize_training_examples(\n training_data,\n attributes_to_consider,\n entity_tag_specs=self._entity_tag_specs,\n featurizers=self.component_config[FEATURIZERS],\n bilou_tagging=self.component_config[BILOU_FLAG],\n )\n attribute_data, _ = model_data_utils.convert_to_data_format(\n features_for_examples, consider_dialogue_dimension=False\n )\n\n model_data = RasaModelData(\n label_key=self.label_key, label_sub_key=self.label_sub_key\n )\n model_data.add_data(attribute_data)\n model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)\n # Current implementation doesn't yet account for updating sparse\n # feature sizes of label attributes. That's why we remove them.\n sparse_feature_sizes = self._remove_label_sparse_feature_sizes(\n sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute\n )\n model_data.add_sparse_feature_sizes(sparse_feature_sizes)\n\n self._add_label_features(\n model_data, training_data, label_attribute, label_id_dict, training\n )\n\n # make sure all keys are in the same order during training and prediction\n # as we rely on the order of key and sub-key when constructing the actual\n # tensors from the model data\n model_data.sort()\n\n return model_data\n\n @staticmethod\n def _remove_label_sparse_feature_sizes(\n sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],\n label_attribute: Optional[Text] = None,\n ) -> Dict[Text, Dict[Text, List[int]]]:\n\n if label_attribute in sparse_feature_sizes:\n del sparse_feature_sizes[label_attribute]\n return sparse_feature_sizes\n\n def _add_label_features(\n self,\n model_data: RasaModelData,\n training_data: List[Message],\n label_attribute: Text,\n label_id_dict: Dict[Text, int],\n training: bool = True,\n ) -> None:\n label_ids = []\n if training and self.component_config[INTENT_CLASSIFICATION]:\n for example in training_data:\n if example.get(label_attribute):\n label_ids.append(label_id_dict[example.get(label_attribute)])\n # explicitly add last dimension to label_ids\n # to track correctly dynamic sequences\n model_data.add_features(\n LABEL_KEY,\n LABEL_SUB_KEY,\n [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],\n )\n\n if (\n label_attribute\n and model_data.does_feature_not_exist(label_attribute, SENTENCE)\n and model_data.does_feature_not_exist(label_attribute, SEQUENCE)\n ):\n # no label features are present, get default features from _label_data\n model_data.add_features(\n LABEL, SENTENCE, self._use_default_label_features(np.array(label_ids))\n )\n\n # as label_attribute can have different values, e.g. INTENT or RESPONSE,\n # copy over the features to the LABEL key to make\n # it easier to access the label features inside the model itself\n model_data.update_key(label_attribute, SENTENCE, LABEL, SENTENCE)\n model_data.update_key(label_attribute, SEQUENCE, LABEL, SEQUENCE)\n model_data.update_key(label_attribute, MASK, LABEL, MASK)\n\n model_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)\n\n # train helpers\n def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:\n \"\"\"Prepares data for training.\n\n Performs sanity checks on training data, extracts encodings for labels.\n \"\"\"\n if self.component_config[BILOU_FLAG]:\n bilou_utils.apply_bilou_schema(training_data)\n\n label_id_index_mapping = self._label_id_index_mapping(\n training_data, attribute=INTENT\n )\n\n if not label_id_index_mapping:\n # no labels are present to train\n return RasaModelData()\n\n self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)\n\n self._label_data = self._create_label_data(\n training_data, label_id_index_mapping, attribute=INTENT\n )\n\n self._entity_tag_specs = self._create_entity_tag_specs(training_data)\n\n label_attribute = (\n INTENT if self.component_config[INTENT_CLASSIFICATION] else None\n )\n model_data = self._create_model_data(\n training_data.nlu_examples,\n label_id_index_mapping,\n label_attribute=label_attribute,\n )\n\n self._check_input_dimension_consistency(model_data)\n\n return model_data\n\n @staticmethod\n def _check_enough_labels(model_data: RasaModelData) -> bool:\n return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2\n\n def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Train the embedding intent classifier on a data set.\"\"\"\n model_data = self.preprocess_train_data(training_data)\n if model_data.is_empty():\n logger.debug(\n f\"Cannot train '{self.__class__.__name__}'. No data was provided. \"\n f\"Skipping training of the classifier.\"\n )\n return\n if self.component_config.get(INTENT_CLASSIFICATION):\n if not self._check_enough_labels(model_data):\n logger.error(\n f\"Cannot train '{self.__class__.__name__}'. \"\n f\"Need at least 2 different intent classes. \"\n f\"Skipping training of classifier.\"\n )\n return\n if self.component_config.get(ENTITY_RECOGNITION):\n self.check_correct_entity_annotations(training_data)\n\n # keep one example for persisting and loading\n self._data_example = model_data.first_data_example()\n\n if not self.finetune_mode:\n # No pre-trained model to load from. Create a new instance of the model.\n self.model = self._instantiate_model_class(model_data)\n self.model.compile(\n optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE])\n )\n else:\n self.model.adjust_for_incremental_training(\n data_example=self._data_example,\n new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(),\n old_sparse_feature_sizes=self._sparse_feature_sizes,\n )\n self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()\n\n data_generator, validation_data_generator = train_utils.create_data_generators(\n model_data,\n self.component_config[BATCH_SIZES],\n self.component_config[EPOCHS],\n self.component_config[BATCH_STRATEGY],\n self.component_config[EVAL_NUM_EXAMPLES],\n self.component_config[RANDOM_SEED],\n )\n callbacks = train_utils.create_common_callbacks(\n self.component_config[EPOCHS],\n self.component_config[TENSORBOARD_LOG_DIR],\n self.component_config[TENSORBOARD_LOG_LEVEL],\n self.tmp_checkpoint_dir,\n )\n\n self.model.fit(\n data_generator,\n epochs=self.component_config[EPOCHS],\n validation_data=validation_data_generator,\n validation_freq=self.component_config[EVAL_NUM_EPOCHS],\n callbacks=callbacks,\n verbose=False,\n shuffle=False, # we use custom shuffle inside data generator\n )\n\n # process helpers\n def _predict(\n self, message: Message\n ) -> Optional[Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]]:\n if self.model is None:\n logger.debug(\n f\"There is no trained model for '{self.__class__.__name__}': The \"\n f\"component is either not trained or didn't receive enough training \"\n f\"data.\"\n )\n return None\n\n # create session data from message and convert it into a batch of 1\n model_data = self._create_model_data([message], training=False)\n return self.model.run_inference(model_data)\n\n def _predict_label(\n self, predict_out: Optional[Dict[Text, tf.Tensor]]\n ) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]:\n \"\"\"Predicts the intent of the provided message.\"\"\"\n label: Dict[Text, Any] = {\"name\": None, \"id\": None, \"confidence\": 0.0}\n label_ranking = []\n\n if predict_out is None:\n return label, label_ranking\n\n message_sim = predict_out[\"i_scores\"]\n\n message_sim = message_sim.flatten() # sim is a matrix\n\n label_ids = message_sim.argsort()[::-1]\n\n if (\n self.component_config[RANKING_LENGTH] > 0\n and self.component_config[MODEL_CONFIDENCE] == SOFTMAX\n ):\n # TODO: This should be removed in 3.0 when softmax as\n # model confidence and normalization is completely deprecated.\n message_sim = train_utils.normalize(\n message_sim, self.component_config[RANKING_LENGTH]\n )\n message_sim[::-1].sort()\n message_sim = message_sim.tolist()\n\n # if X contains all zeros do not predict some label\n if label_ids.size > 0:\n label = {\n \"id\": hash(self.index_label_id_mapping[label_ids[0]]),\n \"name\": self.index_label_id_mapping[label_ids[0]],\n \"confidence\": message_sim[0],\n }\n\n if (\n self.component_config[RANKING_LENGTH]\n and 0 < self.component_config[RANKING_LENGTH] < LABEL_RANKING_LENGTH\n ):\n output_length = self.component_config[RANKING_LENGTH]\n else:\n output_length = LABEL_RANKING_LENGTH\n\n ranking = list(zip(list(label_ids), message_sim))\n ranking = ranking[:output_length]\n label_ranking = [\n {\n \"id\": hash(self.index_label_id_mapping[label_idx]),\n \"name\": self.index_label_id_mapping[label_idx],\n \"confidence\": score,\n }\n for label_idx, score in ranking\n ]\n\n return label, label_ranking\n\n def _predict_entities(\n self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message\n ) -> List[Dict]:\n if predict_out is None:\n return []\n\n predicted_tags, confidence_values = train_utils.entity_label_to_tags(\n predict_out, self._entity_tag_specs, self.component_config[BILOU_FLAG]\n )\n\n entities = self.convert_predictions_into_entities(\n message.get(TEXT),\n message.get(TOKENS_NAMES[TEXT], []),\n predicted_tags,\n self.split_entities_config,\n confidence_values,\n )\n\n entities = self.add_extractor_name(entities)\n entities = message.get(ENTITIES, []) + entities\n\n return entities\n\n def process(self, message: Message, **kwargs: Any) -> None:\n \"\"\"Augments the message with intents, entities, and diagnostic data.\"\"\"\n out = self._predict(message)\n\n if self.component_config[INTENT_CLASSIFICATION]:\n label, label_ranking = self._predict_label(out)\n\n message.set(INTENT, label, add_to_output=True)\n message.set(\"intent_ranking\", label_ranking, add_to_output=True)\n\n if self.component_config[ENTITY_RECOGNITION]:\n entities = self._predict_entities(out, message)\n\n message.set(ENTITIES, entities, add_to_output=True)\n\n if out and DIAGNOSTIC_DATA in out:\n message.add_diagnostic_data(self.unique_name, out.get(DIAGNOSTIC_DATA))\n\n def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]:\n \"\"\"Persist this model into the passed directory.\n\n Return the metadata necessary to load the model again.\n \"\"\"\n import shutil\n\n if self.model is None:\n return {\"file\": None}\n\n model_dir_path = Path(model_dir)\n tf_model_file = model_dir_path / f\"{file_name}.tf_model\"\n\n rasa.shared.utils.io.create_directory_for_file(tf_model_file)\n\n if self.component_config[CHECKPOINT_MODEL]:\n shutil.move(self.tmp_checkpoint_dir, model_dir_path / \"checkpoints\")\n self.model.save(str(tf_model_file))\n\n io_utils.pickle_dump(\n model_dir_path / f\"{file_name}.data_example.pkl\", self._data_example\n )\n io_utils.pickle_dump(\n model_dir_path / f\"{file_name}.sparse_feature_sizes.pkl\",\n self._sparse_feature_sizes,\n )\n io_utils.pickle_dump(\n model_dir_path / f\"{file_name}.label_data.pkl\", dict(self._label_data.data)\n )\n io_utils.json_pickle(\n model_dir_path / f\"{file_name}.index_label_id_mapping.json\",\n self.index_label_id_mapping,\n )\n\n entity_tag_specs = (\n [tag_spec._asdict() for tag_spec in self._entity_tag_specs]\n if self._entity_tag_specs\n else []\n )\n rasa.shared.utils.io.dump_obj_as_json_to_file(\n model_dir_path / f\"{file_name}.entity_tag_specs.json\", entity_tag_specs\n )\n\n return {\"file\": file_name}\n\n @classmethod\n def load(\n cls,\n meta: Dict[Text, Any],\n model_dir: Text,\n model_metadata: Metadata = None,\n cached_component: Optional[\"DIETClassifier\"] = None,\n should_finetune: bool = False,\n **kwargs: Any,\n ) -> \"DIETClassifier\":\n \"\"\"Loads the trained model from the provided directory.\"\"\"\n if not meta.get(\"file\"):\n logger.debug(\n f\"Failed to load model for '{cls.__name__}'. \"\n f\"Maybe you did not provide enough training data and no model was \"\n f\"trained or the path '{os.path.abspath(model_dir)}' doesn't exist?\"\n )\n return cls(component_config=meta)\n\n (\n index_label_id_mapping,\n entity_tag_specs,\n label_data,\n meta,\n data_example,\n sparse_feature_sizes,\n ) = cls._load_from_files(meta, model_dir)\n\n meta = train_utils.override_defaults(cls.defaults, meta)\n meta = train_utils.update_confidence_type(meta)\n meta = train_utils.update_similarity_type(meta)\n meta = train_utils.update_deprecated_loss_type(meta)\n\n model = cls._load_model(\n entity_tag_specs,\n label_data,\n meta,\n data_example,\n model_dir,\n finetune_mode=should_finetune,\n )\n\n return cls(\n component_config=meta,\n index_label_id_mapping=index_label_id_mapping,\n entity_tag_specs=entity_tag_specs,\n model=model,\n finetune_mode=should_finetune,\n sparse_feature_sizes=sparse_feature_sizes,\n )\n\n @classmethod\n def _load_from_files(\n cls, meta: Dict[Text, Any], model_dir: Text\n ) -> Tuple[\n Dict[int, Text],\n List[EntityTagSpec],\n RasaModelData,\n Dict[Text, Any],\n Dict[Text, Dict[Text, List[FeatureArray]]],\n Dict[Text, Dict[Text, List[int]]],\n ]:\n file_name = meta[\"file\"]\n\n model_dir_path = Path(model_dir)\n\n data_example = io_utils.pickle_load(\n model_dir_path / f\"{file_name}.data_example.pkl\"\n )\n label_data = io_utils.pickle_load(\n model_dir_path / f\"{file_name}.label_data.pkl\"\n )\n label_data = RasaModelData(data=label_data)\n sparse_feature_sizes = io_utils.pickle_load(\n model_dir_path / f\"{file_name}.sparse_feature_sizes.pkl\"\n )\n index_label_id_mapping = io_utils.json_unpickle(\n model_dir_path / f\"{file_name}.index_label_id_mapping.json\"\n )\n entity_tag_specs = rasa.shared.utils.io.read_json_file(\n model_dir_path / f\"{file_name}.entity_tag_specs.json\"\n )\n entity_tag_specs = [\n EntityTagSpec(\n tag_name=tag_spec[\"tag_name\"],\n ids_to_tags={\n int(key): value for key, value in tag_spec[\"ids_to_tags\"].items()\n },\n tags_to_ids={\n key: int(value) for key, value in tag_spec[\"tags_to_ids\"].items()\n },\n num_tags=tag_spec[\"num_tags\"],\n )\n for tag_spec in entity_tag_specs\n ]\n\n # jsonpickle converts dictionary keys to strings\n index_label_id_mapping = {\n int(key): value for key, value in index_label_id_mapping.items()\n }\n\n return (\n index_label_id_mapping,\n entity_tag_specs,\n label_data,\n meta,\n data_example,\n sparse_feature_sizes,\n )\n\n @classmethod\n def _load_model(\n cls,\n entity_tag_specs: List[EntityTagSpec],\n label_data: RasaModelData,\n meta: Dict[Text, Any],\n data_example: Dict[Text, Dict[Text, List[FeatureArray]]],\n model_dir: Text,\n finetune_mode: bool = False,\n ) -> \"RasaModel\":\n file_name = meta[\"file\"]\n tf_model_file = os.path.join(model_dir, file_name + \".tf_model\")\n\n label_key = LABEL_KEY if meta[INTENT_CLASSIFICATION] else None\n label_sub_key = LABEL_SUB_KEY if meta[INTENT_CLASSIFICATION] else None\n\n model_data_example = RasaModelData(\n label_key=label_key, label_sub_key=label_sub_key, data=data_example\n )\n\n model = cls._load_model_class(\n tf_model_file,\n model_data_example,\n label_data,\n entity_tag_specs,\n meta,\n finetune_mode=finetune_mode,\n )\n\n return model\n\n @classmethod\n def _load_model_class(\n cls,\n tf_model_file: Text,\n model_data_example: RasaModelData,\n label_data: RasaModelData,\n entity_tag_specs: List[EntityTagSpec],\n meta: Dict[Text, Any],\n finetune_mode: bool,\n ) -> \"RasaModel\":\n\n predict_data_example = RasaModelData(\n label_key=model_data_example.label_key,\n data={\n feature_name: features\n for feature_name, features in model_data_example.items()\n if TEXT in feature_name\n },\n )\n\n return cls.model_class().load(\n tf_model_file,\n model_data_example,\n predict_data_example,\n data_signature=model_data_example.get_signature(),\n label_data=label_data,\n entity_tag_specs=entity_tag_specs,\n config=copy.deepcopy(meta),\n finetune_mode=finetune_mode,\n )\n\n def _instantiate_model_class(self, model_data: RasaModelData) -> \"RasaModel\":\n return self.model_class()(\n data_signature=model_data.get_signature(),\n label_data=self._label_data,\n entity_tag_specs=self._entity_tag_specs,\n config=self.component_config,\n )\n\n\nclass DIET(TransformerRasaModel):\n def __init__(\n self,\n data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],\n label_data: RasaModelData,\n entity_tag_specs: Optional[List[EntityTagSpec]],\n config: Dict[Text, Any],\n ) -> None:\n # create entity tag spec before calling super otherwise building the model\n # will fail\n super().__init__(\"DIET\", config, data_signature, label_data)\n self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs)\n\n self.predict_data_signature = {\n feature_name: features\n for feature_name, features in data_signature.items()\n if TEXT in feature_name\n }\n\n # tf training\n self._create_metrics()\n self._update_metrics_to_log()\n\n # needed for efficient prediction\n self.all_labels_embed: Optional[tf.Tensor] = None\n\n self._prepare_layers()\n\n @staticmethod\n def _ordered_tag_specs(\n entity_tag_specs: Optional[List[EntityTagSpec]],\n ) -> List[EntityTagSpec]:\n \"\"\"Ensure that order of entity tag specs matches CRF layer order.\"\"\"\n if entity_tag_specs is None:\n return []\n\n crf_order = [\n ENTITY_ATTRIBUTE_TYPE,\n ENTITY_ATTRIBUTE_ROLE,\n ENTITY_ATTRIBUTE_GROUP,\n ]\n\n ordered_tag_spec = []\n\n for tag_name in crf_order:\n for tag_spec in entity_tag_specs:\n if tag_name == tag_spec.tag_name:\n ordered_tag_spec.append(tag_spec)\n\n return ordered_tag_spec\n\n def _check_data(self) -> None:\n if TEXT not in self.data_signature:\n raise InvalidConfigException(\n f\"No text features specified. \"\n f\"Cannot train '{self.__class__.__name__}' model.\"\n )\n if self.config[INTENT_CLASSIFICATION]:\n if LABEL not in self.data_signature:\n raise InvalidConfigException(\n f\"No label features specified. \"\n f\"Cannot train '{self.__class__.__name__}' model.\"\n )\n\n if self.config[SHARE_HIDDEN_LAYERS]:\n different_sentence_signatures = False\n different_sequence_signatures = False\n if (\n SENTENCE in self.data_signature[TEXT]\n and SENTENCE in self.data_signature[LABEL]\n ):\n different_sentence_signatures = (\n self.data_signature[TEXT][SENTENCE]\n != self.data_signature[LABEL][SENTENCE]\n )\n if (\n SEQUENCE in self.data_signature[TEXT]\n and SEQUENCE in self.data_signature[LABEL]\n ):\n different_sequence_signatures = (\n self.data_signature[TEXT][SEQUENCE]\n != self.data_signature[LABEL][SEQUENCE]\n )\n\n if different_sentence_signatures or different_sequence_signatures:\n raise ValueError(\n \"If hidden layer weights are shared, data signatures \"\n \"for text_features and label_features must coincide.\"\n )\n\n if self.config[ENTITY_RECOGNITION] and (\n ENTITIES not in self.data_signature\n or ENTITY_ATTRIBUTE_TYPE not in self.data_signature[ENTITIES]\n ):\n logger.debug(\n f\"You specified '{self.__class__.__name__}' to train entities, but \"\n f\"no entities are present in the training data. Skipping training of \"\n f\"entities.\"\n )\n self.config[ENTITY_RECOGNITION] = False\n\n def _create_metrics(self) -> None:\n # self.metrics will have the same order as they are created\n # so create loss metrics first to output losses first\n self.mask_loss = tf.keras.metrics.Mean(name=\"m_loss\")\n self.intent_loss = tf.keras.metrics.Mean(name=\"i_loss\")\n self.entity_loss = tf.keras.metrics.Mean(name=\"e_loss\")\n self.entity_group_loss = tf.keras.metrics.Mean(name=\"g_loss\")\n self.entity_role_loss = tf.keras.metrics.Mean(name=\"r_loss\")\n # create accuracy metrics second to output accuracies second\n self.mask_acc = tf.keras.metrics.Mean(name=\"m_acc\")\n self.intent_acc = tf.keras.metrics.Mean(name=\"i_acc\")\n self.entity_f1 = tf.keras.metrics.Mean(name=\"e_f1\")\n self.entity_group_f1 = tf.keras.metrics.Mean(name=\"g_f1\")\n self.entity_role_f1 = tf.keras.metrics.Mean(name=\"r_f1\")\n\n def _update_metrics_to_log(self) -> None:\n debug_log_level = logging.getLogger(\"rasa\").level == logging.DEBUG\n\n if self.config[MASKED_LM]:\n self.metrics_to_log.append(\"m_acc\")\n if debug_log_level:\n self.metrics_to_log.append(\"m_loss\")\n if self.config[INTENT_CLASSIFICATION]:\n self.metrics_to_log.append(\"i_acc\")\n if debug_log_level:\n self.metrics_to_log.append(\"i_loss\")\n if self.config[ENTITY_RECOGNITION]:\n for tag_spec in self._entity_tag_specs:\n if tag_spec.num_tags != 0:\n name = tag_spec.tag_name\n self.metrics_to_log.append(f\"{name[0]}_f1\")\n if debug_log_level:\n self.metrics_to_log.append(f\"{name[0]}_loss\")\n\n self._log_metric_info()\n\n def _log_metric_info(self) -> None:\n metric_name = {\n \"t\": \"total\",\n \"i\": \"intent\",\n \"e\": \"entity\",\n \"m\": \"mask\",\n \"r\": \"role\",\n \"g\": \"group\",\n }\n logger.debug(\"Following metrics will be logged during training: \")\n for metric in self.metrics_to_log:\n parts = metric.split(\"_\")\n name = f\"{metric_name[parts[0]]} {parts[1]}\"\n logger.debug(f\" {metric} ({name})\")\n\n def _prepare_layers(self) -> None:\n # For user text, prepare layers that combine different feature types, embed\n # everything using a transformer and optionally also do masked language\n # modeling.\n self.text_name = TEXT\n self._tf_layers[\n f\"sequence_layer.{self.text_name}\"\n ] = rasa_layers.RasaSequenceLayer(\n self.text_name, self.data_signature[self.text_name], self.config\n )\n if self.config[MASKED_LM]:\n self._prepare_mask_lm_loss(self.text_name)\n\n # Intent labels are treated similarly to user text but without the transformer,\n # without masked language modelling, and with no dropout applied to the\n # individual features, only to the overall label embedding after all label\n # features have been combined.\n if self.config[INTENT_CLASSIFICATION]:\n self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL\n\n # disable input dropout applied to sparse and dense label features\n label_config = self.config.copy()\n label_config.update(\n {SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}\n )\n\n self._tf_layers[\n f\"feature_combining_layer.{self.label_name}\"\n ] = rasa_layers.RasaFeatureCombiningLayer(\n self.label_name, self.label_signature[self.label_name], label_config\n )\n\n self._prepare_ffnn_layer(\n self.label_name,\n self.config[HIDDEN_LAYERS_SIZES][self.label_name],\n self.config[DROP_RATE],\n )\n\n self._prepare_label_classification_layers(predictor_attribute=TEXT)\n\n if self.config[ENTITY_RECOGNITION]:\n self._prepare_entity_recognition_layers()\n\n def _prepare_mask_lm_loss(self, name: Text) -> None:\n # for embedding predicted tokens at masked positions\n self._prepare_embed_layers(f\"{name}_lm_mask\")\n\n # for embedding the true tokens that got masked\n self._prepare_embed_layers(f\"{name}_golden_token\")\n\n # mask loss is additional loss\n # set scaling to False, so that it doesn't overpower other losses\n self._prepare_dot_product_loss(f\"{name}_mask\", scale_loss=False)\n\n def _create_bow(\n self,\n sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],\n sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],\n sequence_feature_lengths: tf.Tensor,\n name: Text,\n ) -> tf.Tensor:\n\n x, _ = self._tf_layers[f\"feature_combining_layer.{name}\"](\n (sequence_features, sentence_features, sequence_feature_lengths),\n training=self._training,\n )\n\n # convert to bag-of-words by summing along the sequence dimension\n x = tf.reduce_sum(x, axis=1)\n\n return self._tf_layers[f\"ffnn.{name}\"](x, self._training)\n\n def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:\n all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]\n\n sequence_feature_lengths = self._get_sequence_feature_lengths(\n self.tf_label_data, LABEL\n )\n\n x = self._create_bow(\n self.tf_label_data[LABEL][SEQUENCE],\n self.tf_label_data[LABEL][SENTENCE],\n sequence_feature_lengths,\n self.label_name,\n )\n all_labels_embed = self._tf_layers[f\"embed.{LABEL}\"](x)\n\n return all_label_ids, all_labels_embed\n\n def _mask_loss(\n self,\n outputs: tf.Tensor,\n inputs: tf.Tensor,\n seq_ids: tf.Tensor,\n mlm_mask_boolean: tf.Tensor,\n name: Text,\n ) -> tf.Tensor:\n # make sure there is at least one element in the mask\n mlm_mask_boolean = tf.cond(\n tf.reduce_any(mlm_mask_boolean),\n lambda: mlm_mask_boolean,\n lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(mlm_mask_boolean)),\n )\n\n mlm_mask_boolean = tf.squeeze(mlm_mask_boolean, -1)\n\n # Pick elements that were masked, throwing away the batch & sequence dimension\n # and effectively switching from shape (batch_size, sequence_length, units) to\n # (num_masked_elements, units).\n outputs = tf.boolean_mask(outputs, mlm_mask_boolean)\n inputs = tf.boolean_mask(inputs, mlm_mask_boolean)\n ids = tf.boolean_mask(seq_ids, mlm_mask_boolean)\n\n tokens_predicted_embed = self._tf_layers[f\"embed.{name}_lm_mask\"](outputs)\n tokens_true_embed = self._tf_layers[f\"embed.{name}_golden_token\"](inputs)\n\n # To limit the otherwise computationally expensive loss calculation, we\n # constrain the label space in MLM (i.e. token space) to only those tokens that\n # were masked in this batch. Hence the reduced list of token embeddings\n # (tokens_true_embed) and the reduced list of labels (ids) are passed as\n # all_labels_embed and all_labels, respectively. In the future, we could be less\n # restrictive and construct a slightly bigger label space which could include\n # tokens not masked in the current batch too.\n return self._tf_layers[f\"loss.{name}_mask\"](\n inputs_embed=tokens_predicted_embed,\n labels_embed=tokens_true_embed,\n labels=ids,\n all_labels_embed=tokens_true_embed,\n all_labels=ids,\n )\n\n def _calculate_label_loss(\n self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor\n ) -> tf.Tensor:\n all_label_ids, all_labels_embed = self._create_all_labels()\n\n text_embed = self._tf_layers[f\"embed.{TEXT}\"](text_features)\n label_embed = self._tf_layers[f\"embed.{LABEL}\"](label_features)\n\n return self._tf_layers[f\"loss.{LABEL}\"](\n text_embed, label_embed, label_ids, all_labels_embed, all_label_ids\n )\n\n def batch_loss(\n self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]\n ) -> tf.Tensor:\n \"\"\"Calculates the loss for the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The loss of the given batch.\n \"\"\"\n tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)\n\n sequence_feature_lengths = self._get_sequence_feature_lengths(\n tf_batch_data, TEXT\n )\n\n (\n text_transformed,\n text_in,\n mask_combined_sequence_sentence,\n text_seq_ids,\n mlm_mask_boolean_text,\n _,\n ) = self._tf_layers[f\"sequence_layer.{self.text_name}\"](\n (\n tf_batch_data[TEXT][SEQUENCE],\n tf_batch_data[TEXT][SENTENCE],\n sequence_feature_lengths,\n ),\n training=self._training,\n )\n\n losses = []\n\n # Lengths of sequences in case of sentence-level features are always 1, but they\n # can effectively be 0 if sentence-level features aren't present.\n sentence_feature_lengths = self._get_sentence_feature_lengths(\n tf_batch_data, TEXT\n )\n\n combined_sequence_sentence_feature_lengths = (\n sequence_feature_lengths + sentence_feature_lengths\n )\n\n if self.config[MASKED_LM]:\n loss, acc = self._mask_loss(\n text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT\n )\n self.mask_loss.update_state(loss)\n self.mask_acc.update_state(acc)\n losses.append(loss)\n\n if self.config[INTENT_CLASSIFICATION]:\n loss = self._batch_loss_intent(\n combined_sequence_sentence_feature_lengths,\n text_transformed,\n tf_batch_data,\n )\n losses.append(loss)\n\n if self.config[ENTITY_RECOGNITION]:\n losses += self._batch_loss_entities(\n mask_combined_sequence_sentence,\n sequence_feature_lengths,\n text_transformed,\n tf_batch_data,\n )\n\n return tf.math.add_n(losses)\n\n def _batch_loss_intent(\n self,\n combined_sequence_sentence_feature_lengths_text: tf.Tensor,\n text_transformed: tf.Tensor,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n ) -> tf.Tensor:\n # get sentence features vector for intent classification\n sentence_vector = self._last_token(\n text_transformed, combined_sequence_sentence_feature_lengths_text\n )\n\n sequence_feature_lengths_label = self._get_sequence_feature_lengths(\n tf_batch_data, LABEL\n )\n\n label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]\n label = self._create_bow(\n tf_batch_data[LABEL][SEQUENCE],\n tf_batch_data[LABEL][SENTENCE],\n sequence_feature_lengths_label,\n self.label_name,\n )\n loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids)\n\n self._update_label_metrics(loss, acc)\n\n return loss\n\n def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:\n\n self.intent_loss.update_state(loss)\n self.intent_acc.update_state(acc)\n\n def _batch_loss_entities(\n self,\n mask_combined_sequence_sentence: tf.Tensor,\n sequence_feature_lengths: tf.Tensor,\n text_transformed: tf.Tensor,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n ) -> List[tf.Tensor]:\n losses = []\n\n entity_tags = None\n\n for tag_spec in self._entity_tag_specs:\n if tag_spec.num_tags == 0:\n continue\n\n tag_ids = tf_batch_data[ENTITIES][tag_spec.tag_name][0]\n # add a zero (no entity) for the sentence features to match the shape of\n # inputs\n tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]])\n\n loss, f1, _logits = self._calculate_entity_loss(\n text_transformed,\n tag_ids,\n mask_combined_sequence_sentence,\n sequence_feature_lengths,\n tag_spec.tag_name,\n entity_tags,\n )\n\n if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE:\n # use the entity tags as additional input for the role\n # and group CRF\n entity_tags = tf.one_hot(\n tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags\n )\n\n self._update_entity_metrics(loss, f1, tag_spec.tag_name)\n\n losses.append(loss)\n\n return losses\n\n def _update_entity_metrics(\n self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text\n ) -> None:\n if tag_name == ENTITY_ATTRIBUTE_TYPE:\n self.entity_loss.update_state(loss)\n self.entity_f1.update_state(f1)\n elif tag_name == ENTITY_ATTRIBUTE_GROUP:\n self.entity_group_loss.update_state(loss)\n self.entity_group_f1.update_state(f1)\n elif tag_name == ENTITY_ATTRIBUTE_ROLE:\n self.entity_role_loss.update_state(loss)\n self.entity_role_f1.update_state(f1)\n\n def prepare_for_predict(self) -> None:\n \"\"\"Prepares the model for prediction.\"\"\"\n if self.config[INTENT_CLASSIFICATION]:\n _, self.all_labels_embed = self._create_all_labels()\n\n def batch_predict(\n self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]\n ) -> Dict[Text, tf.Tensor]:\n \"\"\"Predicts the output of the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The output to predict.\n \"\"\"\n tf_batch_data = self.batch_to_model_data_format(\n batch_in, self.predict_data_signature\n )\n\n sequence_feature_lengths = self._get_sequence_feature_lengths(\n tf_batch_data, TEXT\n )\n sentence_feature_lengths = self._get_sentence_feature_lengths(\n tf_batch_data, TEXT,\n )\n\n text_transformed, _, _, _, _, attention_weights = self._tf_layers[\n f\"sequence_layer.{self.text_name}\"\n ](\n (\n tf_batch_data[TEXT][SEQUENCE],\n tf_batch_data[TEXT][SENTENCE],\n sequence_feature_lengths,\n ),\n training=self._training,\n )\n predictions = {\n DIAGNOSTIC_DATA: {\n \"attention_weights\": attention_weights,\n \"text_transformed\": text_transformed,\n }\n }\n\n if self.config[INTENT_CLASSIFICATION]:\n predictions.update(\n self._batch_predict_intents(\n sequence_feature_lengths + sentence_feature_lengths,\n text_transformed,\n )\n )\n\n if self.config[ENTITY_RECOGNITION]:\n predictions.update(\n self._batch_predict_entities(sequence_feature_lengths, text_transformed)\n )\n\n return predictions\n\n def _batch_predict_entities(\n self, sequence_feature_lengths: tf.Tensor, text_transformed: tf.Tensor\n ) -> Dict[Text, tf.Tensor]:\n predictions: Dict[Text, tf.Tensor] = {}\n\n entity_tags = None\n\n for tag_spec in self._entity_tag_specs:\n # skip crf layer if it was not trained\n if tag_spec.num_tags == 0:\n continue\n\n name = tag_spec.tag_name\n _input = text_transformed\n\n if entity_tags is not None:\n _tags = self._tf_layers[f\"embed.{name}.tags\"](entity_tags)\n _input = tf.concat([_input, _tags], axis=-1)\n\n _logits = self._tf_layers[f\"embed.{name}.logits\"](_input)\n pred_ids, confidences = self._tf_layers[f\"crf.{name}\"](\n _logits, sequence_feature_lengths\n )\n\n predictions[f\"e_{name}_ids\"] = pred_ids\n predictions[f\"e_{name}_scores\"] = confidences\n\n if name == ENTITY_ATTRIBUTE_TYPE:\n # use the entity tags as additional input for the role\n # and group CRF\n entity_tags = tf.one_hot(\n tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags\n )\n\n return predictions\n\n def _batch_predict_intents(\n self,\n combined_sequence_sentence_feature_lengths: tf.Tensor,\n text_transformed: tf.Tensor,\n ) -> Dict[Text, tf.Tensor]:\n\n if self.all_labels_embed is None:\n raise ValueError(\n \"The model was not prepared for prediction. \"\n \"Call `prepare_for_predict` first.\"\n )\n\n # get sentence feature vector for intent classification\n sentence_vector = self._last_token(\n text_transformed, combined_sequence_sentence_feature_lengths\n )\n sentence_vector_embed = self._tf_layers[f\"embed.{TEXT}\"](sentence_vector)\n\n _, scores = self._tf_layers[\n f\"loss.{LABEL}\"\n ].get_similarities_and_confidences_from_embeddings(\n sentence_vector_embed[:, tf.newaxis, :],\n self.all_labels_embed[tf.newaxis, :, :],\n )\n\n return {\"i_scores\": scores}\n" ]
[ [ "numpy.zeros" ], [ "tensorflow.boolean_mask", "numpy.expand_dims", "tensorflow.concat", "tensorflow.shape", "tensorflow.reduce_any", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.squeeze", "tensorflow.math.add_n", "tensorflow.keras.optimizers.Adam", "tensorflow.pad", "numpy.array", "tensorflow.keras.metrics.Mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pavpanchekha/bitrate-lab
[ "f9f804ad08bb544a90d5191d3db3f78398e1f51a" ]
[ "plots/bar.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nN = 4\nsamplerate1 = (16.474, 13.585, 5.42, 16.138, 7.455)\nminstrel1 = (12.653, 10.208, 7.587, 10.867, 8.430)\nminproved1 = (17.037, 14.879, 11.107, 15.846, 12.162)\n\nsamplerate2 = (13.107, 9.688, 7.982, 13.894)\nminstrel2 = (11.575, 10.837, 8.320, 11.729)\nminproved2 =(16.869, 15.156, 12.570, 16.292)\n\n\n\nind = np.arange(N) # the x locations for the groups\nwidth = 0.25 # the width of the bars\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nsr1 = ax.bar(ind, samplerate2, width, color='r')\nmn1 = ax.bar(ind+width, minstrel2, width, color='b')\nmp1 = ax.bar(ind+2*width, minproved2, width, color='y')\n\n# add some\nax.set_ylim([0,20])\nax.set_xlim([-0.5, 6])\nax.set_ylabel('Throughput in Mbps')\nax.set_xticks(ind+width+width)\nax.set_xticklabels( ('clear', 'moving', 'corner', 'interference') )\n\nax.legend( (mn1[0], sr1[0], mp1[0]), ('Minstrel', 'SampleRate', 'Minproved') )\n\ndef autolabel(rects):\n # attach some text labels\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%r'% (round(height,1)),\n ha='center', va='bottom', rotation=60)\n\nautolabel(mn1)\nautolabel(sr1)\nautolabel(mp1)\n\nplt.show()\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kzbnb/numerical_bugs
[ "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b", "bc22e72bcc06df6ce7889a25e0aeed027bde910b" ]
[ "scripts/study_case/ID_4/benchmark/runtime/gat.py", "scripts/study_case/ID_36/ch10_04_03_Pic_10_05_exp_grist.py", "scripts/study_case/ID_13/torch_geometric/nn/prop/agnn_prop.py", "scripts/study_case/ID_21/mnist_softmax_grist.py", "scripts/study_case/ID_4/examples/qm9_nn_conv.py", "scripts/study_case/ID_12/skorch/dataset.py", "scripts/study_case/ID_12/skorch/classifier.py", "scripts/study_case/ID_5/matchzoo/dataloader/dataset.py", "scripts/study_case/ID_4/torch_geometric/data/dataloader.py", "scripts/study_case/ID_4/torch_geometric/datasets/coma.py", "scripts/study_case/ID_4/benchmark/kernel/global_attention.py", "scripts/study_case/ID_7/soips2.py", "scripts/study_case/ID_4/test/utils/test_softmax.py", "scripts/study_case/ID_48/day01_2.py", "scripts/study_case/ID_5/matchzoo/models/bert.py", "scripts/study_case/ID_22/My_pytorch1.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom scripts.study_case.ID_4.torch_geometric.nn import GATConv\n\n\nclass GAT(torch.nn.Module):\n def __init__(self, in_channels, out_channels):\n super(GAT, self).__init__()\n self.conv1 = GATConv(in_channels, 8, heads=8, dropout=0.6)\n self.conv2 = GATConv(8 * 8, out_channels, dropout=0.6)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x = F.dropout(x, p=0.6, training=self.training)\n x = F.elu(self.conv1(x, edge_index))\n x = F.dropout(x, p=0.6, training=self.training)\n x = self.conv2(x, edge_index)\n return F.log_softmax(x, dim=1)\n", "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport sys\nsys.path.append(\"/data\")\n\nimage_size = 28 * 28\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\nn_samples = mnist.train.num_examples\n\n\ndef xavier_init(fan_in, fan_out, constant=1):\n low = -constant * np.sqrt(6.0 / (fan_in + fan_out))\n high = constant * np.sqrt(6.0 / (fan_in + fan_out))\n return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)\n\n\nw, n_input, n_z = {}, image_size, 2 # 20\nn_hidden_recog_1, n_hidden_recog_2 = 500, 500\nn_hidden_gener_1, n_hidden_gener_2 = 500, 500\nw['w_recog'] = {\n 'h1': tf.Variable(xavier_init(n_input, n_hidden_recog_1)),\n 'h2': tf.Variable(xavier_init(n_hidden_recog_1, n_hidden_recog_2)),\n 'out_mean': tf.Variable(xavier_init(n_hidden_recog_2, n_z)),\n 'out_log_sigma': tf.Variable(xavier_init(n_hidden_recog_2, n_z))}\nw['b_recog'] = {\n 'b1': tf.Variable(tf.zeros([n_hidden_recog_1], dtype=tf.float32)),\n 'b2': tf.Variable(tf.zeros([n_hidden_recog_2], dtype=tf.float32)),\n 'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32)),\n 'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32))}\nw['w_gener'] = {\n 'h1': tf.Variable(xavier_init(n_z, n_hidden_gener_1)),\n 'h2': tf.Variable(xavier_init(n_hidden_gener_1, n_hidden_gener_2)),\n 'out_mean': tf.Variable(xavier_init(n_hidden_gener_2, n_input)),\n 'out_log_sigma': tf.Variable(xavier_init(n_hidden_gener_2, n_input))}\nw['b_gener'] = {\n 'b1': tf.Variable(tf.zeros([n_hidden_gener_1], dtype=tf.float32)),\n 'b2': tf.Variable(tf.zeros([n_hidden_gener_2], dtype=tf.float32)),\n 'out_mean': tf.Variable(tf.zeros([n_input], dtype=tf.float32)),\n 'out_log_sigma': tf.Variable(tf.zeros([n_input], dtype=tf.float32))}\n\nl_rate = 0.001\nbatch_size = 100\n\nx = tf.placeholder(tf.float32, [None, n_input])\nenc_layer_1 = tf.nn.softplus(tf.add(tf.matmul(x, w[\"w_recog\"]['h1']), w[\"b_recog\"]['b1']))\nenc_layer_2 = tf.nn.softplus(tf.add(tf.matmul(enc_layer_1, w[\"w_recog\"]['h2']), w[\"b_recog\"]['b2']))\nz_mean = tf.add(tf.matmul(enc_layer_2, w[\"w_recog\"]['out_mean']), w[\"b_recog\"]['out_mean'])\nz_log_sigma_sq = tf.add(tf.matmul(enc_layer_2, w[\"w_recog\"]['out_log_sigma']), w[\"b_recog\"]['out_log_sigma'])\n\n# eps = tf.random_normal((batch_size, n_z), 0, 1, dtype=tf.float32)\neps = tf.placeholder(tf.float32, [None, n_z])\n\nsuspect_func = z_log_sigma_sq\nz = tf.add(z_mean, tf.multiply(tf.sqrt(tf.exp(z_log_sigma_sq)), eps))\n\ndec_layer_1 = tf.nn.softplus(tf.add(tf.matmul(z, w[\"w_gener\"]['h1']), w[\"b_gener\"]['b1']))\ndec_layer_2 = tf.nn.softplus(tf.add(tf.matmul(dec_layer_1, w[\"w_gener\"]['h2']), w[\"b_gener\"]['b2']))\nx_reconstr_mean = tf.nn.sigmoid(tf.add(tf.matmul(dec_layer_2, w[\"w_gener\"]['out_mean']), w[\"b_gener\"]['out_mean']))\n\n#MUTATION#\nreconstr_loss = -tf.reduce_sum(x * tf.log(x_reconstr_mean + 1e-8) + (1 - x) * tf.log(1 - x_reconstr_mean + 1e-8), 1)\n\nlatent_loss = -0.5 * tf.reduce_sum(1 + z_log_sigma_sq - tf.square(z_mean) - tf.exp(z_log_sigma_sq), 1)\ncost = tf.reduce_mean(reconstr_loss + latent_loss)\nreconstr_loss_mean = tf.reduce_mean(reconstr_loss)\nlatent_loss_mean = tf.reduce_mean(latent_loss)\noptimizer = tf.train.AdamOptimizer(learning_rate=l_rate).minimize(cost)\n\nshow_step = [20]\nxs_show, _ = mnist.test.next_batch(5000)\n\n\ndef train(sess, batch_size=100, training_epochs=10):\n \"\"\"insert code\"\"\"\n from scripts.utils.tf_utils import GradientSearcher\n gradient_search = GradientSearcher(name=\"ch10_04_03_Pic_10_05_exp_grist\")\n obj_function = -1 * tf.reduce_max(suspect_func)\n obj_grads = tf.gradients(obj_function, x)[0]\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n max_val, min_val = np.max(batch_xs), np.min(batch_xs)\n gradient_search.build(batch_size=batch_size, min_val=min_val, max_val=max_val)\n \"\"\"insert code\"\"\"\n while True:\n avg_cost = 0.\n total_batch = int(n_samples / batch_size)\n for i in range(total_batch):\n\n \"\"\"inserted code\"\"\"\n monitor_vars = {'loss': cost, 'obj_function': obj_function, 'obj_grad': obj_grads}\n feed_dict = {x: batch_xs, eps: np.random.normal(loc=0.0, scale=1.0, size=(batch_size, n_z))}\n batch_xs, scores_rank = gradient_search.update_batch_data(session=sess, monitor_var=monitor_vars,\n feed_dict=feed_dict, input_data=batch_xs, )\n \"\"\"inserted code\"\"\"\n\n _, loss_val, r_loss, l_loss = sess.run((optimizer, cost, reconstr_loss_mean, latent_loss_mean),\n feed_dict=feed_dict)\n\n \"\"\"inserted code\"\"\"\n new_batch_xs, new_batch_ys = mnist.train.next_batch(batch_size)\n new_data_dict = {'x': new_batch_xs, 'y': new_batch_ys}\n old_data_dict = {'x': batch_xs, 'y': batch_ys}\n batch_xs, batch_ys = gradient_search.switch_new_data(new_data_dict=new_data_dict,\n old_data_dict=old_data_dict,\n scores_rank=scores_rank)\n gradient_search.check_time()\n \"\"\"inserted code\"\"\"\n\n\ninit = tf.global_variables_initializer()\nsess = tf.InteractiveSession()\nsess.run(init)\ntrain(sess, training_epochs=200, batch_size=batch_size)\n\nsess.close()\n", "import torch\nfrom torch.nn import Parameter\nfrom torch_sparse import spmm\nfrom scripts.study_case.ID_13.torch_geometric.utils import remove_self_loops, add_self_loops, softmax\n\n\nclass AGNNProp(torch.nn.Module):\n \"\"\"Graph Attentional Propagation Layer from the\n `\"Attention-based Graph Neural Network for Semi-Supervised Learning (AGNN)\"\n <https://arxiv.org/abs/1803.03735>`_ paper.\n\n Args:\n requires_grad (bool, optional): If set to :obj:`False`, the propagation\n layer will not be trainable. (default: :obj:`True`)\n \"\"\"\n\n def __init__(self, requires_grad=True):\n super(AGNNProp, self).__init__()\n\n if requires_grad:\n self.beta = Parameter(torch.Tensor(1))\n else:\n self.register_buffer('beta', torch.ones(1))\n\n self.requires_grad = requires_grad\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.requires_grad:\n self.beta.data.uniform_(0, 1)\n\n def forward(self, x, edge_index):\n num_nodes = x.size(0)\n\n x = x.unsqueeze(-1) if x.dim() == 1 else x\n beta = self.beta if self.requires_grad else self._buffers['beta']\n\n # Add self-loops to adjacency matrix.\n edge_index, edge_attr = remove_self_loops(edge_index)\n edge_index = add_self_loops(edge_index, num_nodes=x.size(0))\n row, col = edge_index\n\n # Compute attention coefficients.\n norm = torch.norm(x, p=2, dim=1)\n alpha = (x[row] * x[col]).sum(dim=1) / (norm[row] * norm[col])\n alpha = softmax(alpha * beta, row, num_nodes=x.size(0))\n\n # Perform the propagation.\n out = spmm(edge_index, alpha, num_nodes, x)\n\n return out\n\n def __repr__(self):\n return '{}()'.format(self.__class__.__name__)\n", "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A very simple MNIST classifier.\nSee extensive documentation at\nhttp://tensorflow.org/tutorials/mnist/beginners/index.md\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Import data\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom datetime import datetime\nimport numpy as np\nimport sys\n\nsys.path.append(\"/data\")\nimport tensorflow as tf\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nsess = tf.InteractiveSession()\n\n# Create the model\nx = tf.placeholder(tf.float32, [None, 784])\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\n# Define loss and optimizer\ny_ = tf.placeholder(tf.float32, [None, 10])\n# MUTATION#\nobj_y = y\ncross_entropy = -tf.reduce_sum(y_ * tf.log(y))\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n\n# Train\ntf.initialize_all_variables().run()\n\nbatch_size = 100\n\n'''inserted code'''\nfrom scripts.utils.tf_utils import GradientSearcher\ngradient_search = GradientSearcher(name=\"tensorflow-examples-tutorials-mnist_grist\")\nobj_function = tf.reduce_min(tf.abs(obj_y))\nobj_grads = tf.gradients(obj_function, x)[0]\nbatch_xs, batch_ys = mnist.train.next_batch(batch_size)\nmax_val, min_val = np.max(batch_xs), np.min(batch_xs)\ngradient_search.build(batch_size=batch_size, min_val=min_val, max_val=max_val)\n'''inserted code'''\n\nstep = 0\nwhile True:\n '''inserted code'''\n monitor_vars = {'loss': cross_entropy, 'obj_function': obj_function, 'obj_grad': obj_grads}\n feed_dict = {x: batch_xs, y_: batch_ys}\n batch_xs, scores_rank = gradient_search.update_batch_data(session=sess, monitor_var=monitor_vars,\n feed_dict=feed_dict, input_data=batch_xs)\n '''inserted code'''\n\n train_step.run(feed_dict)\n obj_function_val, obj_grads_val, loss_val = sess.run([obj_function, obj_grads, cross_entropy], feed_dict=feed_dict)\n # if step % 100 == 0:\n # print(\"obj func val:\", obj_function_val, \"loss:\", loss_val, \"obj grads val:\", obj_grads_val)\n # print(f\"x_val: {batch_xs}, y_val: {batch_ys}\")\n\n '''inserted code'''\n new_batch_xs, new_batch_ys = mnist.train.next_batch(batch_size)\n new_data_dict = {'x': new_batch_xs, 'y': new_batch_ys}\n old_data_dict = {'x': batch_xs, 'y': batch_ys}\n batch_xs, batch_ys = gradient_search.switch_new_data(new_data_dict=new_data_dict, old_data_dict=old_data_dict,\n scores_rank=scores_rank)\n gradient_search.check_time()\n '''inserted code'''\n step += 1\n", "import os.path as osp\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import Sequential, Linear, ReLU, GRU\n\nimport scripts.study_case.ID_4.torch_geometric.transforms as T\nfrom scripts.study_case.ID_4.torch_geometric.datasets import QM9\nfrom scripts.study_case.ID_4.torch_geometric.nn import NNConv, Set2Set\nfrom scripts.study_case.ID_4.torch_geometric.data import DataLoader\nfrom scripts.study_case.ID_4.torch_geometric.utils import remove_self_loops\n\ntarget = 0\ndim = 64\n\n\nclass MyTransform(object):\n def __call__(self, data):\n # Specify target.\n data.y = data.y[:, target]\n return data\n\n\nclass Complete(object):\n def __call__(self, data):\n device = data.edge_index.device\n\n row = torch.arange(data.num_nodes, dtype=torch.long, device=device)\n col = torch.arange(data.num_nodes, dtype=torch.long, device=device)\n\n row = row.view(-1, 1).repeat(1, data.num_nodes).view(-1)\n col = col.repeat(data.num_nodes)\n edge_index = torch.stack([row, col], dim=0)\n\n edge_attr = None\n if data.edge_attr is not None:\n idx = data.edge_index[0] * data.num_nodes + data.edge_index[1]\n size = list(data.edge_attr.size())\n size[0] = data.num_nodes * data.num_nodes\n edge_attr = data.edge_attr.new_zeros(size)\n edge_attr[idx] = data.edge_attr\n\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n data.edge_attr = edge_attr\n data.edge_index = edge_index\n\n return data\n\n\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'QM9')\ntransform = T.Compose([MyTransform(), Complete(), T.Distance(norm=False)])\ndataset = QM9(path, transform=transform).shuffle()\n\n# Normalize targets to mean = 0 and std = 1.\nmean = dataset.data.y[:, target].mean().item()\nstd = dataset.data.y[:, target].std().item()\ndataset.data.y[:, target] = (dataset.data.y[:, target] - mean) / std\n\n# Split datasets.\ntest_dataset = dataset[:10000]\nval_dataset = dataset[10000:20000]\ntrain_dataset = dataset[20000:]\ntest_loader = DataLoader(test_dataset, batch_size=64)\nval_loader = DataLoader(val_dataset, batch_size=64)\ntrain_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.lin0 = torch.nn.Linear(dataset.num_features, dim)\n\n nn = Sequential(Linear(5, 128), ReLU(), Linear(128, dim * dim))\n self.conv = NNConv(dim, dim, nn, aggr='mean', root_weight=False)\n self.gru = GRU(dim, dim)\n\n self.set2set = Set2Set(dim, processing_steps=3)\n self.lin1 = torch.nn.Linear(2 * dim, dim)\n self.lin2 = torch.nn.Linear(dim, 1)\n\n def forward(self, data):\n out = F.relu(self.lin0(data.x))\n h = out.unsqueeze(0)\n\n for i in range(3):\n m = F.relu(self.conv(out, data.edge_index, data.edge_attr))\n out, h = self.gru(m.unsqueeze(0), h)\n out = out.squeeze(0)\n\n out = self.set2set(out, data.batch)\n out = F.relu(self.lin1(out))\n out = self.lin2(out)\n return out.view(-1)\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net().to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode='min', factor=0.7, patience=5, min_lr=0.00001)\n\n\ndef train(epoch):\n model.train()\n loss_all = 0\n\n for data in train_loader:\n data = data.to(device)\n optimizer.zero_grad()\n loss = F.mse_loss(model(data), data.y)\n loss.backward()\n loss_all += loss.item() * data.num_graphs\n optimizer.step()\n return loss_all / len(train_loader.dataset)\n\n\ndef test(loader):\n model.eval()\n error = 0\n\n for data in loader:\n data = data.to(device)\n error += (model(data) * std - data.y * std).abs().sum().item() # MAE\n return error / len(loader.dataset)\n\n\nbest_val_error = None\nfor epoch in range(1, 301):\n lr = scheduler.optimizer.param_groups[0]['lr']\n loss = train(epoch)\n val_error = test(val_loader)\n scheduler.step(val_error)\n\n if best_val_error is None or val_error <= best_val_error:\n test_error = test(test_loader)\n best_val_error = val_error\n\n print('Epoch: {:03d}, LR: {:7f}, Loss: {:.7f}, Validation MAE: {:.7f}, '\n 'Test MAE: {:.7f}'.format(epoch, lr, loss, val_error, test_error))\n", "\"\"\"Contains custom skorch Dataset and CVSplit.\"\"\"\n\nfrom functools import partial\nfrom numbers import Number\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import check_cv\nimport torch\nimport torch.utils.data\n\nfrom scripts.study_case.ID_12.skorch.utils import flatten\nfrom scripts.study_case.ID_12.skorch.utils import is_pandas_ndframe\nfrom scripts.study_case.ID_12.skorch.utils import check_indexing\nfrom scripts.study_case.ID_12.skorch.utils import multi_indexing\nfrom scripts.study_case.ID_12.skorch.utils import to_numpy\n\n\nERROR_MSG_1_ITEM = (\n \"You are using a non-skorch dataset that returns 1 value. \"\n \"Remember that for skorch, Dataset.__getitem__ must return exactly \"\n \"2 values, X and y (more info: \"\n \"https://skorch.readthedocs.io/en/stable/user/dataset.html).\")\n\n\nERROR_MSG_MORE_THAN_2_ITEMS = (\n \"You are using a non-skorch dataset that returns {} values. \"\n \"Remember that for skorch, Dataset.__getitem__ must return exactly \"\n \"2 values, X and y (more info: \"\n \"https://skorch.readthedocs.io/en/stable/user/dataset.html).\")\n\n\ndef _apply_to_data(data, func, unpack_dict=False):\n \"\"\"Apply a function to data, trying to unpack different data\n types.\n\n \"\"\"\n apply_ = partial(_apply_to_data, func=func, unpack_dict=unpack_dict)\n\n if isinstance(data, dict):\n if unpack_dict:\n return [apply_(v) for v in data.values()]\n return {k: apply_(v) for k, v in data.items()}\n\n if isinstance(data, (list, tuple)):\n try:\n # e.g.list/tuple of arrays\n return [apply_(x) for x in data]\n except TypeError:\n return func(data)\n\n return func(data)\n\n\ndef _is_sparse(x):\n try:\n return sparse.issparse(x) or x.is_sparse\n except AttributeError:\n return False\n\n\ndef _len(x):\n if _is_sparse(x):\n return x.shape[0]\n return len(x)\n\n\ndef get_len(data):\n lens = [_apply_to_data(data, _len, unpack_dict=True)]\n lens = list(flatten(lens))\n len_set = set(lens)\n if len(len_set) != 1:\n raise ValueError(\"Dataset does not have consistent lengths.\")\n return list(len_set)[0]\n\n\ndef uses_placeholder_y(ds):\n \"\"\"If ``ds`` is a ``skorch.dataset.Dataset`` or a\n ``skorch.dataset.Dataset`` nested inside a\n ``torch.utils.data.Subset`` and uses\n y as a placeholder, return ``True``.\"\"\"\n\n if isinstance(ds, torch.utils.data.Subset):\n return uses_placeholder_y(ds.dataset)\n return isinstance(ds, Dataset) and hasattr(ds, \"y\") and ds.y is None\n\n\ndef unpack_data(data):\n \"\"\"Unpack data returned by the net's iterator into a 2-tuple.\n\n If the wrong number of items is returned, raise a helpful error\n message.\n\n \"\"\"\n # Note: This function cannot detect it when a user only returns 1\n # item that is exactly of length 2 (e.g. because the batch size is\n # 2). In that case, the item will be erroneously split into X and\n # y.\n try:\n X, y = data\n return X, y\n except ValueError:\n # if a 1-tuple/list or something else like a torch tensor\n if not isinstance(data, (tuple, list)) or len(data) < 2:\n raise ValueError(ERROR_MSG_1_ITEM)\n raise ValueError(ERROR_MSG_MORE_THAN_2_ITEMS.format(len(data)))\n\n\nclass Dataset(torch.utils.data.Dataset):\n # pylint: disable=anomalous-backslash-in-string\n \"\"\"General dataset wrapper that can be used in conjunction with\n PyTorch :class:`~torch.utils.data.DataLoader`.\n\n The dataset will always yield a tuple of two values, the first\n from the data (``X``) and the second from the target (``y``).\n However, the target is allowed to be ``None``. In that case,\n :class:`.Dataset` will currently return a dummy tensor, since\n :class:`~torch.utils.data.DataLoader` does not work with\n ``None``\\s.\n\n :class:`.Dataset` currently works with the following data types:\n\n * numpy ``array``\\s\n * PyTorch :class:`~torch.Tensor`\\s\n * scipy sparse CSR matrices\n * pandas NDFrame\n * a dictionary of the former three\n * a list/tuple of the former three\n\n Parameters\n ----------\n X : see above\n Everything pertaining to the input data.\n\n y : see above or None (default=None)\n Everything pertaining to the target, if there is anything.\n\n length : int or None (default=None)\n If not ``None``, determines the length (``len``) of the data.\n Should usually be left at ``None``, in which case the length is\n determined by the data itself.\n\n \"\"\"\n def __init__(\n self,\n X,\n y=None,\n length=None,\n ):\n self.X = X\n self.y = y\n\n self.X_indexing = check_indexing(X)\n self.y_indexing = check_indexing(y)\n self.X_is_ndframe = is_pandas_ndframe(X)\n\n if length is not None:\n self._len = length\n return\n\n # pylint: disable=invalid-name\n len_X = get_len(X)\n if y is not None:\n len_y = get_len(y)\n if len_y != len_X:\n raise ValueError(\"X and y have inconsistent lengths.\")\n self._len = len_X\n\n def __len__(self):\n return self._len\n\n def transform(self, X, y):\n # pylint: disable=anomalous-backslash-in-string\n \"\"\"Additional transformations on ``X`` and ``y``.\n\n By default, they are cast to PyTorch :class:`~torch.Tensor`\\s.\n Override this if you want a different behavior.\n\n Note: If you use this in conjuction with PyTorch\n :class:`~torch.utils.data.DataLoader`, the latter will call\n the dataset for each row separately, which means that the\n incoming ``X`` and ``y`` each are single rows.\n\n \"\"\"\n # pytorch DataLoader cannot deal with None so we use 0 as a\n # placeholder value. We only return a Tensor with one value\n # (as opposed to ``batchsz`` values) since the pytorch\n # DataLoader calls __getitem__ for each row in the batch\n # anyway, which results in a dummy ``y`` value for each row in\n # the batch.\n y = torch.Tensor([0]) if y is None else y\n\n # pytorch cannot convert sparse matrices, for now just make it\n # dense; squeeze because X[i].shape is (1, n) for csr matrices\n if sparse.issparse(X):\n X = X.toarray().squeeze(0)\n return X, y\n\n def __getitem__(self, i):\n X, y = self.X, self.y\n if self.X_is_ndframe:\n X = {k: X[k].values.reshape(-1, 1) for k in X}\n\n Xi = multi_indexing(X, i, self.X_indexing)\n yi = multi_indexing(y, i, self.y_indexing)\n return self.transform(Xi, yi)\n\n\nclass CVSplit:\n \"\"\"Class that performs the internal train/valid split on a dataset.\n\n The ``cv`` argument here works similarly to the regular sklearn ``cv``\n parameter in, e.g., ``GridSearchCV``. However, instead of cycling\n through all splits, only one fixed split (the first one) is\n used. To get a full cycle through the splits, don't use\n ``NeuralNet``'s internal validation but instead the corresponding\n sklearn functions (e.g. ``cross_val_score``).\n\n We additionally support a float, similar to sklearn's\n ``train_test_split``.\n\n Parameters\n ----------\n cv : int, float, cross-validation generator or an iterable, optional\n (Refer sklearn's User Guide for cross_validation for the various\n cross-validation strategies that can be used here.)\n\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a ``(Stratified)KFold``,\n - float, to represent the proportion of the dataset to include\n in the validation split.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train, validation splits.\n\n stratified : bool (default=False)\n Whether the split should be stratified. Only works if ``y`` is\n either binary or multiclass classification.\n\n random_state : int, RandomState instance, or None (default=None)\n Control the random state in case that ``(Stratified)ShuffleSplit``\n is used (which is when a float is passed to ``cv``). For more\n information, look at the sklearn documentation of\n ``(Stratified)ShuffleSplit``.\n\n \"\"\"\n def __init__(\n self,\n cv=5,\n stratified=False,\n random_state=None,\n ):\n self.stratified = stratified\n self.random_state = random_state\n\n if isinstance(cv, Number) and (cv <= 0):\n raise ValueError(\"Numbers less than 0 are not allowed for cv \"\n \"but CVSplit got {}\".format(cv))\n self.cv = cv\n\n def _is_stratified(self, cv):\n return isinstance(cv, (StratifiedKFold, StratifiedShuffleSplit))\n\n def _is_float(self, x):\n if not isinstance(x, Number):\n return False\n return not float(x).is_integer()\n\n def _check_cv_float(self):\n cv_cls = StratifiedShuffleSplit if self.stratified else ShuffleSplit\n return cv_cls(test_size=self.cv, random_state=self.random_state)\n\n def _check_cv_non_float(self, y):\n return check_cv(\n self.cv,\n y=y,\n classifier=self.stratified,\n )\n\n def check_cv(self, y):\n \"\"\"Resolve which cross validation strategy is used.\"\"\"\n y_arr = None\n if self.stratified:\n # Try to convert y to numpy for sklearn's check_cv; if conversion\n # doesn't work, still try.\n try:\n y_arr = to_numpy(y)\n except (AttributeError, TypeError):\n y_arr = y\n\n if self._is_float(self.cv):\n return self._check_cv_float()\n return self._check_cv_non_float(y_arr)\n\n def _is_regular(self, x):\n return (x is None) or isinstance(x, np.ndarray) or is_pandas_ndframe(x)\n\n def __call__(self, dataset, y=None, groups=None):\n bad_y_error = ValueError(\n \"Stratified CV requires explicitely passing a suitable y.\")\n if (y is None) and self.stratified:\n raise bad_y_error\n\n cv = self.check_cv(y)\n if self.stratified and not self._is_stratified(cv):\n raise bad_y_error\n\n # pylint: disable=invalid-name\n len_dataset = get_len(dataset)\n if y is not None:\n len_y = get_len(y)\n if len_dataset != len_y:\n raise ValueError(\"Cannot perform a CV split if dataset and y \"\n \"have different lengths.\")\n\n args = (np.arange(len_dataset),)\n if self._is_stratified(cv):\n args = args + (to_numpy(y),)\n\n idx_train, idx_valid = next(iter(cv.split(*args, groups=groups)))\n dataset_train = torch.utils.data.Subset(dataset, idx_train)\n dataset_valid = torch.utils.data.Subset(dataset, idx_valid)\n return dataset_train, dataset_valid\n\n def __repr__(self):\n # pylint: disable=useless-super-delegation\n return super(CVSplit, self).__repr__()\n", "\"\"\"NeuralNet subclasses for classification tasks.\"\"\"\n\nimport re\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom scripts.study_case.ID_12.skorch.net import NeuralNet\nfrom scripts.study_case.ID_12.skorch.callbacks import EpochTimer\nfrom scripts.study_case.ID_12.skorch.callbacks import PrintLog\nfrom scripts.study_case.ID_12.skorch.callbacks import EpochScoring\nfrom scripts.study_case.ID_12.skorch.callbacks import BatchScoring\nfrom scripts.study_case.ID_12.skorch.dataset import CVSplit\nfrom scripts.study_case.ID_12.skorch.utils import get_dim\nfrom scripts.study_case.ID_12.skorch.utils import is_dataset\nfrom scripts.study_case.ID_12.skorch.utils import noop\nfrom scripts.study_case.ID_12.skorch.utils import to_numpy\nfrom scripts.study_case.ID_12.skorch.utils import train_loss_score\nfrom scripts.study_case.ID_12.skorch.utils import valid_loss_score\nfrom sklearn.base import ClassifierMixin\n\n\nneural_net_clf_doc_start = \"\"\"NeuralNet for classification tasks\n\n Use this specifically if you have a standard classification task,\n with input data X and target y.\n\n\"\"\"\n\nneural_net_clf_criterion_text = \"\"\"\n\n criterion : torch criterion (class, default=torch.nn.NLLLoss)\n Negative log likelihood loss. Note that the module should return\n probabilities, the log is applied during ``get_loss``.\"\"\"\n\n\ndef get_neural_net_clf_doc(doc):\n doc = neural_net_clf_doc_start + \" \" + doc.split(\"\\n \", 4)[-1]\n pattern = re.compile(r'(\\n\\s+)(criterion .*\\n)(\\s.+){1,99}')\n start, end = pattern.search(doc).span()\n doc = doc[:start] + neural_net_clf_criterion_text + doc[end:]\n return doc\n\n\n# pylint: disable=missing-docstring\nclass NeuralNetClassifier(NeuralNet, ClassifierMixin):\n __doc__ = get_neural_net_clf_doc(NeuralNet.__doc__)\n\n def __init__(\n self,\n module,\n *args,\n criterion=torch.nn.NLLLoss,\n train_split=CVSplit(5, stratified=True),\n **kwargs\n ):\n super(NeuralNetClassifier, self).__init__(\n module,\n *args,\n criterion=criterion,\n train_split=train_split,\n **kwargs\n )\n\n @property\n def _default_callbacks(self):\n return [\n ('epoch_timer', EpochTimer()),\n ('train_loss', BatchScoring(\n train_loss_score,\n name='train_loss',\n on_train=True,\n target_extractor=noop,\n )),\n ('valid_loss', BatchScoring(\n valid_loss_score,\n name='valid_loss',\n target_extractor=noop,\n )),\n ('valid_acc', EpochScoring(\n 'accuracy',\n name='valid_acc',\n lower_is_better=False,\n )),\n ('print_log', PrintLog()),\n ]\n\n # pylint: disable=signature-differs\n def check_data(self, X, y):\n if (\n (y is None) and\n (not is_dataset(X)) and\n (self.iterator_train is DataLoader)\n ):\n msg = (\"No y-values are given (y=None). You must either supply a \"\n \"Dataset as X or implement your own DataLoader for \"\n \"training (and your validation) and supply it using the \"\n \"``iterator_train`` and ``iterator_valid`` parameters \"\n \"respectively.\")\n raise ValueError(msg)\n\n # pylint: disable=arguments-differ\n def get_loss(self, y_pred, y_true, *args, **kwargs):\n if isinstance(self.criterion_, torch.nn.NLLLoss):\n y_pred = torch.log(y_pred)\n return super().get_loss(y_pred, y_true, *args, **kwargs)\n\n # pylint: disable=signature-differs\n def fit(self, X, y, **fit_params):\n \"\"\"See ``NeuralNet.fit``.\n\n In contrast to ``NeuralNet.fit``, ``y`` is non-optional to\n avoid mistakenly forgetting about ``y``. However, ``y`` can be\n set to ``None`` in case it is derived dynamically from\n ``X``.\n\n \"\"\"\n # pylint: disable=useless-super-delegation\n # this is actually a pylint bug:\n # https://github.com/PyCQA/pylint/issues/1085\n return super(NeuralNetClassifier, self).fit(X, y, **fit_params)\n\n def predict_proba(self, X):\n \"\"\"Where applicable, return probability estimates for\n samples.\n\n If the module's forward method returns multiple outputs as a\n tuple, it is assumed that the first output contains the\n relevant information and the other values are ignored. If all\n values are relevant, consider using\n :func:`~skorch.NeuralNet.forward` instead.\n\n Parameters\n ----------\n X : input data, compatible with skorch.dataset.Dataset\n By default, you should be able to pass:\n\n * numpy arrays\n * torch tensors\n * pandas DataFrame or Series\n * scipy sparse CSR matrices\n * a dictionary of the former three\n * a list/tuple of the former three\n * a Dataset\n\n If this doesn't work with your data, you have to pass a\n ``Dataset`` that can deal with the data.\n\n Returns\n -------\n y_proba : numpy ndarray\n\n \"\"\"\n # Only the docstring changed from parent.\n # pylint: disable=useless-super-delegation\n return super().predict_proba(X)\n\n def predict(self, X):\n \"\"\"Where applicable, return class labels for samples in X.\n\n If the module's forward method returns multiple outputs as a\n tuple, it is assumed that the first output contains the\n relevant information and the other values are ignored. If all\n values are relevant, consider using\n :func:`~skorch.NeuralNet.forward` instead.\n\n Parameters\n ----------\n X : input data, compatible with skorch.dataset.Dataset\n By default, you should be able to pass:\n\n * numpy arrays\n * torch tensors\n * pandas DataFrame or Series\n * scipy sparse CSR matrices\n * a dictionary of the former three\n * a list/tuple of the former three\n * a Dataset\n\n If this doesn't work with your data, you have to pass a\n ``Dataset`` that can deal with the data.\n\n Returns\n -------\n y_pred : numpy ndarray\n\n \"\"\"\n y_preds = []\n for yp in self.forward_iter(X, training=False):\n yp = yp[0] if isinstance(yp, tuple) else yp\n y_preds.append(to_numpy(yp.max(-1)[-1]))\n y_pred = np.concatenate(y_preds, 0)\n return y_pred\n\n\nneural_net_binary_clf_doc_start = \"\"\"NeuralNet for binary classification tasks\n\n Use this specifically if you have a binary classification task,\n with input data X and target y. y must be 1d.\n\n\"\"\"\n\nneural_net_binary_clf_criterion_text = \"\"\"\n\n criterion : torch criterion (class, default=torch.nn.BCEWithLogitsLoss)\n Binary cross entropy loss with logits. Note that the module should return\n the logit of probabilities with shape (batch_size, ).\n\n threshold : float (default=0.5)\n Probabilities above this threshold is classified as 1. ``threshold``\n is used by ``predict`` and ``predict_proba`` for classification.\"\"\"\n\n\ndef get_neural_net_binary_clf_doc(doc):\n doc = neural_net_binary_clf_doc_start + \" \" + doc.split(\"\\n \", 4)[-1]\n pattern = re.compile(r'(\\n\\s+)(criterion .*\\n)(\\s.+){1,99}')\n start, end = pattern.search(doc).span()\n doc = doc[:start] + neural_net_binary_clf_criterion_text + doc[end:]\n return doc\n\n\nclass NeuralNetBinaryClassifier(NeuralNet, ClassifierMixin):\n # pylint: disable=missing-docstring\n __doc__ = get_neural_net_binary_clf_doc(NeuralNet.__doc__)\n\n def __init__(\n self,\n module,\n *args,\n criterion=torch.nn.BCEWithLogitsLoss,\n train_split=CVSplit(5, stratified=True),\n threshold=0.5,\n **kwargs\n ):\n super().__init__(\n module,\n criterion=criterion,\n train_split=train_split,\n *args,\n **kwargs\n )\n self.threshold = threshold\n\n @property\n def _default_callbacks(self):\n return [\n ('epoch_timer', EpochTimer()),\n ('train_loss', BatchScoring(\n train_loss_score,\n name='train_loss',\n on_train=True,\n target_extractor=noop,\n )),\n ('valid_loss', BatchScoring(\n valid_loss_score,\n name='valid_loss',\n target_extractor=noop,\n )),\n ('valid_acc', EpochScoring(\n 'accuracy',\n name='valid_acc',\n lower_is_better=False,\n )),\n ('print_log', PrintLog()),\n ]\n\n # pylint: disable=signature-differs\n def check_data(self, X, y):\n super().check_data(X, y)\n if get_dim(y) != 1:\n raise ValueError(\"The target data should be 1-dimensional.\")\n\n # pylint: disable=signature-differs\n def fit(self, X, y, **fit_params):\n \"\"\"See ``NeuralNet.fit``.\n\n In contrast to ``NeuralNet.fit``, ``y`` is non-optional to\n avoid mistakenly forgetting about ``y``. However, ``y`` can be\n set to ``None`` in case it is derived dynamically from\n ``X``.\n\n \"\"\"\n # pylint: disable=useless-super-delegation\n # this is actually a pylint bug:\n # https://github.com/PyCQA/pylint/issues/1085\n return super().fit(X, y, **fit_params)\n\n def predict(self, X):\n \"\"\"Where applicable, return class labels for samples in X.\n\n If the module's forward method returns multiple outputs as a\n tuple, it is assumed that the first output contains the\n relevant information and the other values are ignored. If all\n values are relevant, consider using\n :func:`~skorch.NeuralNet.forward` instead.\n\n Parameters\n ----------\n X : input data, compatible with skorch.dataset.Dataset\n By default, you should be able to pass:\n\n * numpy arrays\n * torch tensors\n * pandas DataFrame or Series\n * scipy sparse CSR matrices\n * a dictionary of the former three\n * a list/tuple of the former three\n * a Dataset\n\n If this doesn't work with your data, you have to pass a\n ``Dataset`` that can deal with the data.\n\n Returns\n -------\n y_pred : numpy ndarray\n\n \"\"\"\n return (self.predict_proba(X) > self.threshold).astype('uint8')\n\n # pylint: disable=missing-docstring\n def predict_proba(self, X):\n \"\"\"Where applicable, return probability estimates for\n samples.\n\n If the module's forward method returns multiple outputs as a\n tuple, it is assumed that the first output contains the\n relevant information and the other values are ignored. If all\n values are relevant, consider using\n :func:`~skorch.NeuralNet.forward` instead.\n\n Parameters\n ----------\n X : input data, compatible with skorch.dataset.Dataset\n By default, you should be able to pass:\n\n * numpy arrays\n * torch tensors\n * pandas DataFrame or Series\n * scipy sparse CSR matrices\n * a dictionary of the former three\n * a list/tuple of the former three\n * a Dataset\n\n If this doesn't work with your data, you have to pass a\n ``Dataset`` that can deal with the data.\n\n Returns\n -------\n y_proba : numpy ndarray\n\n \"\"\"\n y_probas = []\n bce_logits_loss = isinstance(\n self.criterion_, torch.nn.BCEWithLogitsLoss)\n\n for yp in self.forward_iter(X, training=False):\n yp = yp[0] if isinstance(yp, tuple) else yp\n if bce_logits_loss:\n yp = torch.sigmoid(yp)\n y_probas.append(to_numpy(yp))\n y_proba = np.concatenate(y_probas, 0)\n return y_proba\n", "\"\"\"A basic class representing a Dataset.\"\"\"\nimport typing\n\nimport functools\nimport numpy as np\nimport pandas as pd\nfrom torch.utils import data\n\nimport scripts.study_case.ID_5.matchzoo as mz\nfrom scripts.study_case.ID_5.matchzoo.engine.base_callback import BaseCallback\n\n\nclass Dataset(data.Dataset):\n \"\"\"\n Dataset that is built from a data pack.\n\n :param data_pack: DataPack to build the dataset.\n :param mode: One of \"point\", \"pair\", and \"list\". (default: \"point\")\n :param num_dup: Number of duplications per instance, only effective when\n `mode` is \"pair\". (default: 1)\n :param num_neg: Number of negative samples per instance, only effective\n when `mode` is \"pair\". (default: 1)\n :param callbacks: Callbacks. See `matchzoo.data_generator.callbacks` for\n more details.\n\n Examples:\n >>> import scripts.study_case.ID_5.matchzoo as mz\n >>> data_pack = mz.datasets.toy.load_data(stage='train')\n >>> preprocessor = mz.preprocessors.BasicPreprocessor()\n >>> data_processed = preprocessor.fit_transform(data_pack)\n >>> dataset_point = mz.dataloader.Dataset(data_processed, mode='point')\n >>> len(dataset_point)\n 100\n >>> dataset_pair = mz.dataloader.Dataset(\n ... data_processed, mode='pair', num_neg=2)\n >>> len(dataset_pair)\n 5\n\n \"\"\"\n\n def __init__(\n self,\n data_pack: mz.DataPack,\n mode='point',\n num_dup: int = 1,\n num_neg: int = 1,\n callbacks: typing.List[BaseCallback] = None\n ):\n \"\"\"Init.\"\"\"\n if callbacks is None:\n callbacks = []\n\n if mode not in ('point', 'pair', 'list'):\n raise ValueError(f\"{mode} is not a valid mode type.\"\n f\"Must be one of `point`, `pair` or `list`.\")\n\n self._mode = mode\n self._num_dup = num_dup\n self._num_neg = num_neg\n self._orig_relation = data_pack.relation\n self._callbacks = callbacks\n self._data_pack = data_pack\n self._index_pool = None\n self.sample()\n\n def __len__(self) -> int:\n \"\"\"Get the total number of instances.\"\"\"\n return len(self._index_pool)\n\n def __getitem__(self, item: int) -> typing.Tuple[dict, np.ndarray]:\n \"\"\"Get a set of instances from index idx.\n\n :param item: the index of the instance.\n \"\"\"\n item_data_pack = self._data_pack[item]\n self._handle_callbacks_on_batch_data_pack(item_data_pack)\n x, y = item_data_pack.unpack()\n self._handle_callbacks_on_batch_unpacked(x, y)\n return x, y\n\n def _handle_callbacks_on_batch_data_pack(self, batch_data_pack):\n for callback in self._callbacks:\n callback.on_batch_data_pack(batch_data_pack)\n\n def _handle_callbacks_on_batch_unpacked(self, x, y):\n for callback in self._callbacks:\n callback.on_batch_unpacked(x, y)\n\n def get_index_pool(self):\n \"\"\"\n Set the:attr:`_index_pool`.\n\n Here the :attr:`_index_pool` records the index of all the instances.\n \"\"\"\n if self._mode == 'point':\n num_instances = len(self._data_pack)\n index_pool = np.expand_dims(range(num_instances), axis=1).tolist()\n return index_pool\n elif self._mode == 'pair':\n index_pool = []\n step_size = self._num_neg + 1\n num_instances = int(len(self._data_pack) / step_size)\n for i in range(num_instances):\n lower = i * step_size\n upper = (i + 1) * step_size\n indices = list(range(lower, upper))\n if indices:\n index_pool.append(indices)\n return index_pool\n elif self._mode == 'list':\n raise NotImplementedError(\n f'{self._mode} data generator not implemented.')\n else:\n raise ValueError(f\"{self._mode} is not a valid mode type\"\n f\"Must be one of `point`, `pair` or `list`.\")\n\n def sample(self):\n \"\"\"Resample the instances from data pack.\"\"\"\n if self._mode == 'pair':\n self._data_pack.relation = self._reorganize_pair_wise(\n relation=self._orig_relation,\n num_dup=self._num_dup,\n num_neg=self._num_neg\n )\n self._index_pool = self.get_index_pool()\n\n def shuffle(self):\n \"\"\"Shuffle the instances.\"\"\"\n np.random.shuffle(self._index_pool)\n\n def sort(self):\n \"\"\"Sort the instances by length_right.\"\"\"\n old_index_pool = self._index_pool\n max_instance_right_length = []\n for row in range(len(old_index_pool)):\n instance = self._data_pack[old_index_pool[row]].unpack()[0]\n max_instance_right_length.append(max(instance['length_right']))\n sort_index = np.argsort(max_instance_right_length)\n\n self._index_pool = [old_index_pool[index] for index in sort_index]\n\n @property\n def data_pack(self):\n \"\"\"`data_pack` getter.\"\"\"\n return self._data_pack\n\n @data_pack.setter\n def data_pack(self, value):\n \"\"\"`data_pack` setter.\"\"\"\n self._data_pack = value\n\n @property\n def callbacks(self):\n \"\"\"`callbacks` getter.\"\"\"\n return self._callbacks\n\n @callbacks.setter\n def callbacks(self, value):\n \"\"\"`callbacks` setter.\"\"\"\n self._callbacks = value\n\n @property\n def num_neg(self):\n \"\"\"`num_neg` getter.\"\"\"\n return self._num_neg\n\n @num_neg.setter\n def num_neg(self, value):\n \"\"\"`num_neg` setter.\"\"\"\n self._num_neg = value\n\n @property\n def num_dup(self):\n \"\"\"`num_dup` getter.\"\"\"\n return self._num_dup\n\n @num_dup.setter\n def num_dup(self, value):\n \"\"\"`num_dup` setter.\"\"\"\n self._num_dup = value\n\n @property\n def mode(self):\n \"\"\"`mode` getter.\"\"\"\n return self._mode\n\n @mode.setter\n def mode(self, value):\n \"\"\"`mode` setter.\"\"\"\n self._mode = value\n\n @property\n def index_pool(self):\n \"\"\"`index_pool` getter.\"\"\"\n return self._index_pool\n\n @classmethod\n def _reorganize_pair_wise(\n cls,\n relation: pd.DataFrame,\n num_dup: int = 1,\n num_neg: int = 1\n ):\n \"\"\"Re-organize the data pack as pair-wise format.\"\"\"\n pairs = []\n groups = relation.sort_values(\n 'label', ascending=False).groupby('id_left')\n for _, group in groups:\n labels = group.label.unique()\n for label in labels[:-1]:\n pos_samples = group[group.label == label]\n pos_samples = pd.concat([pos_samples] * num_dup)\n neg_samples = group[group.label < label]\n for _, pos_sample in pos_samples.iterrows():\n pos_sample = pd.DataFrame([pos_sample])\n neg_sample = neg_samples.sample(num_neg, replace=True)\n pairs.extend((pos_sample, neg_sample))\n new_relation = pd.concat(pairs, ignore_index=True)\n return new_relation\n", "import torch.utils.data\nfrom torch.utils.data.dataloader import default_collate\n\nfrom scripts.study_case.ID_4.torch_geometric.data import Batch, GraphLevelBatch\n\n\nclass GraphLevelDataLoader(torch.utils.data.DataLoader):\n r\"\"\"Data loader which merges data objects from a\n :class:`torch_geometric.data.dataset` to a mini-batch.\n\n Args:\n dataset (Dataset): The dataset from which to load the data.\n batch_size (int, optional): How may samples per batch to load.\n (default: :obj:`1`)\n shuffle (bool, optional): If set to :obj:`True`, the data will be\n reshuffled at every epoch (default: :obj:`False`)\n follow_batch (list or tuple, optional): Creates assignment batch\n vectors for each key in the list. (default: :obj:`[]`)\n \"\"\"\n\n def __init__(self,\n dataset,\n batch_size=1,\n shuffle=False,\n follow_batch=[],\n **kwargs):\n super(GraphLevelDataLoader, self).__init__(\n dataset,\n batch_size,\n shuffle,\n collate_fn=lambda data_list: GraphLevelBatch.from_graph_data_list(\n data_list, follow_batch),\n **kwargs)\n\n\n\nclass DataLoader(torch.utils.data.DataLoader):\n r\"\"\"Data loader which merges data objects from a\n :class:`torch_geometric.data.dataset` to a mini-batch.\n\n Args:\n dataset (Dataset): The dataset from which to load the data.\n batch_size (int, optional): How many samples per batch to load.\n (default: :obj:`1`)\n shuffle (bool, optional): If set to :obj:`True`, the data will be\n reshuffled at every epoch. (default: :obj:`False`)\n follow_batch (list or tuple, optional): Creates assignment batch\n vectors for each key in the list. (default: :obj:`[]`)\n \"\"\"\n\n def __init__(self,\n dataset,\n batch_size=1,\n shuffle=False,\n follow_batch=[],\n **kwargs):\n super(DataLoader, self).__init__(\n dataset,\n batch_size,\n shuffle,\n collate_fn=lambda data_list: Batch.from_data_list(\n data_list, follow_batch),\n **kwargs)\n\n\nclass DataListLoader(torch.utils.data.DataLoader):\n r\"\"\"Data loader which merges data objects from a\n :class:`torch_geometric.data.dataset` to a python list.\n\n .. note::\n\n This data loader should be used for multi-gpu support via\n :class:`torch_geometric.nn.DataParallel`.\n\n Args:\n dataset (Dataset): The dataset from which to load the data.\n batch_size (int, optional): How many samples per batch to load.\n (default: :obj:`1`)\n shuffle (bool, optional): If set to :obj:`True`, the data will be\n reshuffled at every epoch (default: :obj:`False`)\n \"\"\"\n\n def __init__(self, dataset, batch_size=1, shuffle=False, **kwargs):\n super(DataListLoader, self).__init__(\n dataset,\n batch_size,\n shuffle,\n collate_fn=lambda data_list: data_list,\n **kwargs)\n\n\nclass DenseDataLoader(torch.utils.data.DataLoader):\n r\"\"\"Data loader which merges data objects from a\n :class:`torch_geometric.data.dataset` to a mini-batch.\n\n .. note::\n\n To make use of this data loader, all graphs in the dataset needs to\n have the same shape for each its attributes.\n Therefore, this data loader should only be used when working with\n *dense* adjacency matrices.\n\n Args:\n dataset (Dataset): The dataset from which to load the data.\n batch_size (int, optional): How many samples per batch to load.\n (default: :obj:`1`)\n shuffle (bool, optional): If set to :obj:`True`, the data will be\n reshuffled at every epoch (default: :obj:`False`)\n \"\"\"\n\n def __init__(self, dataset, batch_size=1, shuffle=False, **kwargs):\n def dense_collate(data_list):\n batch = Batch()\n for key in data_list[0].keys:\n batch[key] = default_collate([d[key] for d in data_list])\n return batch\n\n super(DenseDataLoader, self).__init__(\n dataset, batch_size, shuffle, collate_fn=dense_collate, **kwargs)\n", "import os.path as osp\nfrom glob import glob\n\nimport torch\nfrom scripts.study_case.ID_4.torch_geometric.data import InMemoryDataset, extract_zip\nfrom scripts.study_case.ID_4.torch_geometric.read import read_ply\n\n\nclass CoMA(InMemoryDataset):\n r\"\"\"The CoMA 3D faces dataset from the `\"Generating 3D faces using\n Convolutional Mesh Autoencoders\" <https://arxiv.org/abs/1807.10267>`_\n paper, containing 20,466 meshes of extreme expressions captured over 12\n different subjects.\n\n .. note::\n\n Data objects hold mesh faces instead of edge indices.\n To convert the mesh to a graph, use the\n :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.\n To convert the mesh to a point cloud, use the\n :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to\n sample a fixed number of points on the mesh faces according to their\n face area.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n \"\"\"\n\n url = 'https://coma.is.tue.mpg.de/'\n\n categories = [\n 'bareteeth',\n 'cheeks_in',\n 'eyebrow',\n 'high_smile',\n 'lips_back',\n 'lips_up',\n 'mouth_down',\n 'mouth_extreme',\n 'mouth_middle',\n 'mouth_open',\n 'mouth_side',\n 'mouth_up',\n ]\n\n def __init__(self,\n root,\n train=True,\n transform=None,\n pre_transform=None,\n pre_filter=None):\n super(CoMA, self).__init__(root, transform, pre_transform, pre_filter)\n path = self.processed_paths[0] if train else self.processed_paths[1]\n self.data, self.slices = torch.load(path)\n\n @property\n def raw_file_names(self):\n return 'COMA_data.zip'\n\n @property\n def processed_file_names(self):\n return ['training.pt', 'test.pt']\n\n def download(self):\n raise RuntimeError(\n 'Dataset not found. Please download COMA_data.zip from {} and '\n 'move it to {}'.format(self.url, self.raw_dir))\n\n def process(self):\n folders = sorted(glob(osp.join(self.raw_dir, 'FaceTalk_*')))\n if len(folders) == 0:\n extract_zip(self.raw_paths[0], self.raw_dir, log=False)\n folders = sorted(glob(osp.join(self.raw_dir, 'FaceTalk_*')))\n\n train_data_list, test_data_list = [], []\n for folder in folders:\n for i, category in enumerate(self.categories):\n files = sorted(glob(osp.join(folder, category, '*.ply')))\n for j, f in enumerate(files):\n data = read_ply(f)\n data.y = torch.tensor([i], dtype=torch.long)\n if self.pre_filter is not None and\\\n not self.pre_filter(data):\n continue\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n\n if (j % 100) < 90:\n train_data_list.append(data)\n else:\n test_data_list.append(data)\n\n torch.save(self.collate(train_data_list), self.processed_paths[0])\n torch.save(self.collate(test_data_list), self.processed_paths[1])\n", "import torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nfrom scripts.study_case.ID_4.torch_geometric.nn import SAGEConv, GlobalAttention\n\n\nclass GlobalAttentionNet(torch.nn.Module):\n def __init__(self, dataset, num_layers, hidden):\n super(GlobalAttentionNet, self).__init__()\n self.conv1 = SAGEConv(dataset.num_features, hidden)\n self.convs = torch.nn.ModuleList()\n for i in range(num_layers - 1):\n self.convs.append(SAGEConv(hidden, hidden))\n self.att = GlobalAttention(Linear(hidden, 1))\n self.lin1 = Linear(hidden, hidden)\n self.lin2 = Linear(hidden, dataset.num_classes)\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n self.att.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n x = F.relu(self.conv1(x, edge_index))\n for conv in self.convs:\n x = F.relu(conv(x, edge_index))\n x = self.att(x, batch)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1)\n\n def __repr__(self):\n return self.__class__.__name__\n", "from tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport sys\nsys.path.append(\"/data\")\nfrom datetime import datetime\n# assert tf.__version__ == \"1.8.0\"\ntf.set_random_seed(20180130)\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\nsess = tf.InteractiveSession()\n\nx = tf.placeholder(tf.float32, shape=[None, 784])\ny_ = tf.placeholder(tf.float32, shape=[None, 10])\n\nW_conv1 = weight_variable([5, 5, 1, 32])\nb_conv1 = bias_variable([32])\n\nx_image = tf.reshape(x, [-1, 28, 28, 1])\n\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1)\n\nW_conv2 = weight_variable([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2)\n\nW_fc1 = weight_variable([7 * 7 * 64, 1024])\nb_fc1 = bias_variable([1024])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\nW_fc2 = weight_variable([1024, 10])\nb_fc2 = bias_variable([10])\n\ny_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n#MUTATION#\ncross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))\nobj_var = tf.reduce_min(tf.abs(y_conv))\n\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\ncorrect_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ntf.train.write_graph(sess.graph_def, '/data/scripts/study_case/pbtxt_files', 'SOIPS2.pbtxt')\nsess.run(tf.initialize_all_variables())\n\nbatch_size = 50\n'''inserted code'''\nfrom scripts.utils.tf_utils import TensorFlowScheduler\nscheduler = TensorFlowScheduler(name=\"soips2\")\n'''inserted code'''\n\nwhile True:\n batch = mnist.train.next_batch(batch_size)\n loss, train_accuracy,obj_var_val = sess.run([cross_entropy, accuracy,obj_var], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n\n '''inserted code'''\n scheduler.loss_checker(loss)\n '''inserted code'''\n\n train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n\n '''inserted code'''\n scheduler.check_time()\n '''inserted code'''", "import torch\nfrom scripts.study_case.ID_4.torch_geometric.utils import softmax\n\n\ndef test_softmax():\n src = torch.Tensor([1, 1, 1, 1])\n index = torch.tensor([0, 0, 1, 2])\n\n out = softmax(src, index)\n assert out.tolist() == [0.5, 0.5, 1, 1]\n", "from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport numpy as np\nimport sys\n\nsys.path.append(\"/data\")\n\n\ndef easy_NN():\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n sess = tf.InteractiveSession()\n x = tf.placeholder(tf.float32, [None, 784])\n w = tf.Variable(tf.zeros([784, 10]))\n b = tf.Variable(tf.zeros([10]))\n # tf 自动的实现 FP BP\n # softmax 返回的是每个类别的预测的概率\n y = tf.nn.softmax(tf.matmul(x, w) + b)\n y_true = tf.placeholder(tf.float32, [None, 10])\n # loss\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_true * tf.log(y), reduction_indices=[1]))\n # optimizor\n opt = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n tf.train.write_graph(sess.graph_def, '/data/scripts/study_case/pbtxt_files', 'Dive_python2.pbtxt')\n\n # init\n tf.global_variables_initializer().run()\n # train\n\n '''inserted code'''\n from scripts.utils.tf_utils import TensorFlowScheduler\n scheduler = TensorFlowScheduler(name=\"Dive_python\")\n '''inserted code'''\n\n while True:\n batch_xs, batch_ys = mnist.train.next_batch(100)\n # opt.run({x: batch_xs, y_true: batch_ys, keep_prob: 0.75})\n _, loss = sess.run([opt, cross_entropy], feed_dict={x: batch_xs, y_true: batch_ys})\n\n '''inserted code'''\n scheduler.loss_checker(loss)\n scheduler.check_time()\n '''inserted code'''\n\n # correct_prediction\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_true, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print(accuracy.eval({x: mnist.test.images, y_true: mnist.test.labels}))\n\n\n'''\nf多层神经网络\n'''\n\n\ndef deep_NN():\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n # batch_xs, batch_ys = mnist.train.next_batch(100)\n # test_data = mnist.test.images\n sess = tf.InteractiveSession()\n in_units = 784\n h1_units = 300\n # Outputs random values from a truncated normal distribution\n # 产生正太分布的权重\n w1 = tf.Variable(tf.truncated_normal([in_units, h1_units], stddev=0.1))\n b1 = tf.Variable(tf.zeros([h1_units]))\n w2 = tf.Variable(tf.zeros([h1_units, 10]))\n b2 = tf.Variable(tf.zeros([10]))\n x = tf.placeholder(tf.float32, [None, in_units])\n # dropout 的比率\n keep_prob = tf.placeholder(tf.float32)\n # tf 自动的实现 FP BP\n # softmax 返回的是每个类别的预测的概率\n # 构建一个隐层, 加入高斯噪声\n hidden1 = tf.nn.relu(tf.matmul(x + 0.01 * tf.random_normal((in_units,)), w1) + b1)\n # 训练时,dropput 保存的神经元个数小于100%, 预测时,要等于100%\n hidden1_drop = tf.nn.dropout(hidden1, keep_prob=keep_prob)\n # 输出层\n y = tf.nn.softmax(tf.matmul(hidden1_drop, w2) + b2)\n\n y_true = tf.placeholder(tf.float32, [None, 10])\n # loss\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_true * tf.log(y), reduction_indices=[1]))\n # optimizor\n opt = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)\n # init\n tf.global_variables_initializer().run()\n # TRAINNING\n\n '''inserted code'''\n from scripts.utils.tf_utils import TensorFlowScheduler\n scheduler = TensorFlowScheduler(name=\"Dive_python\")\n '''inserted code'''\n\n while True:\n batch_xs, batch_ys = mnist.train.next_batch(100)\n # opt.run({x: batch_xs, y_true: batch_ys, keep_prob: 0.75})\n _, loss = sess.run([opt, cross_entropy], feed_dict={x: batch_xs, y_true: batch_ys, keep_prob: 0.75})\n\n '''inserted code'''\n scheduler.loss_checker(loss)\n scheduler.check_time()\n '''inserted code'''\n\n # PREDICTING\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_true, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print(accuracy.eval({x: mnist.test.images, y_true: mnist.test.labels, keep_prob: 1}))\n\n\nif __name__ == '__main__':\n easy_NN()\n # deep_NN()\n", "\"\"\"An implementation of Bert Model.\"\"\"\nimport typing\n\nimport torch\nimport torch.nn as nn\nfrom pytorch_transformers import BertModel\n\nfrom scripts.study_case.ID_5.matchzoo import preprocessors\nfrom scripts.study_case.ID_5.matchzoo.engine.param_table import ParamTable\nfrom scripts.study_case.ID_5.matchzoo.engine.param import Param\nfrom scripts.study_case.ID_5.matchzoo.engine.base_model import BaseModel\nfrom scripts.study_case.ID_5.matchzoo.engine.base_preprocessor import BasePreprocessor\nfrom scripts.study_case.ID_5.matchzoo.engine import hyper_spaces\nfrom scripts.study_case.ID_5.matchzoo.dataloader import callbacks\nfrom scripts.study_case.ID_5.matchzoo.modules import BertModule\n\n\nclass Bert(BaseModel):\n \"\"\"Bert Model.\"\"\"\n\n @classmethod\n def get_default_params(cls) -> ParamTable:\n \"\"\":return: model default parameters.\"\"\"\n params = super().get_default_params()\n params.add(Param(name='mode', value='bert-base-uncased',\n desc=\"Pretrained Bert model.\"))\n params.add(Param(\n 'dropout_rate', 0.0,\n hyper_space=hyper_spaces.quniform(\n low=0.0, high=0.8, q=0.01),\n desc=\"The dropout rate.\"\n ))\n return params\n\n @classmethod\n def get_default_preprocessor(\n cls,\n mode: str = 'bert-base-uncased'\n ) -> BasePreprocessor:\n \"\"\":return: Default preprocessor.\"\"\"\n return preprocessors.BertPreprocessor(mode=mode)\n\n @classmethod\n def get_default_padding_callback(\n cls,\n fixed_length_left: int = None,\n fixed_length_right: int = None,\n pad_value: typing.Union[int, str] = 0,\n pad_mode: str = 'pre'\n ):\n \"\"\":return: Default padding callback.\"\"\"\n return callbacks.BertPadding(\n fixed_length_left=fixed_length_left,\n fixed_length_right=fixed_length_right,\n pad_value=pad_value,\n pad_mode=pad_mode)\n\n def build(self):\n \"\"\"Build model structure.\"\"\"\n self.bert = BertModule(mode=self._params['mode'])\n self.dropout = nn.Dropout(p=self._params['dropout_rate'])\n if 'base' in self._params['mode']:\n dim = 768\n elif 'large' in self._params['mode']:\n dim = 1024\n self.out = self._make_output_layer(dim)\n\n def forward(self, inputs):\n \"\"\"Forward.\"\"\"\n\n input_left, input_right = inputs['text_left'], inputs['text_right']\n\n bert_output = self.bert(input_left, input_right)[1]\n\n out = self.out(self.dropout(bert_output))\n\n return out\n", "import torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nfrom torchvision.utils import make_grid , save_image\nimport sys\n\nsys.path.append(\"/data\")\nbatch_size = 64\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST('My_RBM', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor()])), batch_size=batch_size)\n\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('My_RBM', train=False, transform=transforms.Compose([\n transforms.ToTensor()])), batch_size=batch_size)\n\nclass RBM(nn.Module):\n def __init__(self, n_vis, n_hid, k):\n super(RBM, self).__init__()\n self.W = nn.Parameter(torch.randn(n_hid, n_vis) * 1e-2)\n self.v_bias = nn.Parameter(torch.zeros(n_vis))\n self.h_bias = nn.Parameter(torch.zeros(n_hid))\n self.k = k\n\n def vis_hid(self, v):\n p_h = F.sigmoid(F.linear(v, self.W, self.h_bias))\n sample_h = F.relu(torch.sign(p_h - Variable(torch.rand(p_h.size()))))\n return p_h, sample_h\n\n def hid_vis(self, h):\n p_v = F.sigmoid(F.linear(h, self.W.t(), self.v_bias))\n sample_v = F.relu(torch.sign(p_v - Variable(torch.rand(p_v.size()))))\n return p_v, sample_v\n\n def forward(self, v):\n h0, h_ = self.vis_hid(v)\n for _ in range(self.k):\n v0_, v_ = self.hid_vis(h_)\n h0_, h_ = self.vis_hid(v_)\n return v, v_\n\n def free_energy(self, v):\n wx_b = F.linear(v, self.W, self.h_bias)\n vbias_term = v.mv(self.v_bias)\n hidden_term = wx_b.exp().add(1).log().sum(1)\n return (-hidden_term - vbias_term).mean()\n\n\nrbm = RBM(n_vis=784, n_hid=500, k=1)\ntrain_op = optim.SGD(rbm.parameters(), 0.1)\n\n'''inserted code'''\nfrom scripts.utils.torch_utils import TorchScheduler\nscheduler = TorchScheduler(name=\"git1_rbm\")\n'''inserted code'''\n\nwhile True:\n loss_ = []\n for _, (data, target) in enumerate(train_loader):\n sample_data = Variable(data.view(-1, 784)).bernoulli()\n v, v1 = rbm(sample_data)\n loss = rbm.free_energy(v) - rbm.free_energy(v1)\n\n '''inserted code'''\n scheduler.loss_checker(loss)\n '''inserted code'''\n\n loss_.append(loss.item())\n train_op.zero_grad()\n loss.backward()\n train_op.step()\n\n '''inserted code'''\n scheduler.check_time()\n '''inserted code'''\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.functional.dropout" ], [ "tensorflow.matmul", "tensorflow.reduce_max", "tensorflow.InteractiveSession", "numpy.sqrt", "tensorflow.reduce_mean", "tensorflow.zeros", "numpy.min", "tensorflow.gradients", "tensorflow.placeholder", "tensorflow.exp", "tensorflow.global_variables_initializer", "numpy.max", "numpy.random.normal", "tensorflow.log", "tensorflow.train.AdamOptimizer", "tensorflow.square", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.random_uniform" ], [ "torch.norm", "torch.ones", "torch.Tensor" ], [ "tensorflow.matmul", "tensorflow.InteractiveSession", "tensorflow.zeros", "numpy.min", "tensorflow.gradients", "tensorflow.placeholder", "numpy.max", "tensorflow.initialize_all_variables", "tensorflow.train.GradientDescentOptimizer", "tensorflow.log", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.abs" ], [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.nn.GRU", "torch.nn.Linear", "torch.cuda.is_available", "torch.arange", "torch.stack", "torch.nn.ReLU" ], [ "scipy.sparse.issparse", "torch.Tensor", "numpy.arange", "sklearn.model_selection.check_cv", "torch.utils.data.Subset" ], [ "numpy.concatenate", "torch.sigmoid", "torch.log" ], [ "numpy.argsort", "pandas.concat", "numpy.random.shuffle", "pandas.DataFrame" ], [ "torch.utils.data.dataloader.default_collate" ], [ "torch.tensor", "torch.load" ], [ "torch.nn.Linear", "torch.nn.ModuleList", "torch.nn.functional.log_softmax", "torch.nn.functional.dropout" ], [ "tensorflow.nn.max_pool", "tensorflow.cast", "tensorflow.train.AdamOptimizer", "tensorflow.nn.conv2d", "tensorflow.Variable", "tensorflow.initialize_all_variables", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.train.write_graph", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.InteractiveSession", "tensorflow.placeholder", "tensorflow.set_random_seed", "tensorflow.constant", "tensorflow.reshape", "tensorflow.log", "tensorflow.abs" ], [ "torch.Tensor", "torch.tensor" ], [ "tensorflow.matmul", "tensorflow.train.AdagradOptimizer", "tensorflow.truncated_normal", "tensorflow.InteractiveSession", "tensorflow.zeros", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.random_normal", "tensorflow.log", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.train.write_graph", "tensorflow.nn.dropout" ], [ "torch.nn.Dropout" ], [ "torch.randn", "torch.nn.functional.linear", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gicheonkang/sglkt-visdial
[ "b2927e8bc8e45c2d2a2a76fbf75a15f8ecb78b88", "b2927e8bc8e45c2d2a2a76fbf75a15f8ecb78b88", "b2927e8bc8e45c2d2a2a76fbf75a15f8ecb78b88" ]
[ "visdialch/decoders/gen.py", "visdialch/utils/initialization.py", "visdialch/encoders/sparse.py" ]
[ "\"\"\"\nThis code is from batra-mlp-lab's repository.\nhttps://github.com/batra-mlp-lab/visdial-challenge-starter-pytorch\n\"\"\"\n\nimport torch\nfrom torch import nn\n\n\nclass GenerativeDecoder(nn.Module):\n def __init__(self, config, vocabulary):\n super().__init__()\n self.config = config\n\n self.word_embed = nn.Embedding(\n len(vocabulary),\n config[\"word_embedding_size\"],\n padding_idx=vocabulary.PAD_INDEX,\n )\n self.answer_rnn = nn.LSTM(\n config[\"word_embedding_size\"],\n config[\"lstm_hidden_size\"],\n config[\"lstm_num_layers\"],\n batch_first=True,\n dropout=config[\"lstm_dropout\"],\n )\n\n self.lstm_to_words = nn.Linear(\n self.config[\"lstm_hidden_size\"], len(vocabulary)\n )\n\n self.dropout = nn.Dropout(p=config[\"lstm_dropout\"])\n self.logsoftmax = nn.LogSoftmax(dim=-1)\n\n def forward(self, encoder_output, batch):\n \"\"\"Given `encoder_output`, learn to autoregressively predict\n ground-truth answer word-by-word during training.\n\n During evaluation, assign log-likelihood scores to all answer options.\n\n Parameters\n ----------\n encoder_output: torch.Tensor\n Output from the encoder through its forward pass.\n (batch_size, num_rounds, lstm_hidden_size)\n \"\"\"\n\n if self.training:\n\n ans_in = batch[\"ans_in\"]\n batch_size, num_rounds, max_sequence_length = ans_in.size()\n\n ans_in = ans_in.view(batch_size * num_rounds, max_sequence_length)\n\n # shape: (batch_size * num_rounds, max_sequence_length,\n # word_embedding_size)\n ans_in_embed = self.word_embed(ans_in)\n\n # reshape encoder output to be set as initial hidden state of LSTM.\n # shape: (lstm_num_layers, batch_size * num_rounds,\n # lstm_hidden_size)\n init_hidden = encoder_output.view(1, batch_size * num_rounds, -1)\n init_hidden = init_hidden.repeat(\n self.config[\"lstm_num_layers\"], 1, 1\n )\n init_cell = torch.zeros_like(init_hidden)\n\n # shape: (batch_size * num_rounds, max_sequence_length,\n # lstm_hidden_size)\n ans_out, (hidden, cell) = self.answer_rnn(\n ans_in_embed, (init_hidden, init_cell)\n )\n ans_out = self.dropout(ans_out)\n\n # shape: (batch_size * num_rounds, max_sequence_length,\n # vocabulary_size)\n ans_word_scores = self.lstm_to_words(ans_out)\n return ans_word_scores\n\n else:\n\n ans_in = batch[\"opt_in\"]\n batch_size, num_rounds, num_options, max_sequence_length = (\n ans_in.size()\n )\n\n ans_in = ans_in.view(\n batch_size * num_rounds * num_options, max_sequence_length\n )\n\n # shape: (batch_size * num_rounds * num_options, max_sequence_length\n # word_embedding_size)\n ans_in_embed = self.word_embed(ans_in)\n\n # reshape encoder output to be set as initial hidden state of LSTM.\n # shape: (lstm_num_layers, batch_size * num_rounds * num_options,\n # lstm_hidden_size)\n init_hidden = encoder_output.view(batch_size, num_rounds, 1, -1)\n init_hidden = init_hidden.repeat(1, 1, num_options, 1)\n init_hidden = init_hidden.view(\n 1, batch_size * num_rounds * num_options, -1\n )\n init_hidden = init_hidden.repeat(\n self.config[\"lstm_num_layers\"], 1, 1\n )\n init_cell = torch.zeros_like(init_hidden)\n\n # shape: (batch_size * num_rounds * num_options,\n # max_sequence_length, lstm_hidden_size)\n ans_out, (hidden, cell) = self.answer_rnn(\n ans_in_embed, (init_hidden, init_cell)\n )\n\n # shape: (batch_size * num_rounds * num_options,\n # max_sequence_length, vocabulary_size)\n ans_word_scores = self.logsoftmax(self.lstm_to_words(ans_out))\n\n # shape: (batch_size * num_rounds * num_options,\n # max_sequence_length)\n target_ans_out = batch[\"opt_out\"].view(\n batch_size * num_rounds * num_options, -1\n )\n\n # shape: (batch_size * num_rounds * num_options,\n # max_sequence_length)\n ans_word_scores = torch.gather(\n ans_word_scores, -1, target_ans_out.unsqueeze(-1)\n ).squeeze()\n ans_word_scores = (\n ans_word_scores * (target_ans_out > 0).float().cuda()\n ) # ugly\n\n ans_scores = torch.sum(ans_word_scores, -1)\n ans_scores = ans_scores.view(batch_size, num_rounds, num_options)\n\n return ans_scores\n", "import torch\n\ndef initialize_model_weights(model, initialization=\"he\", lstm_initialization=\"he\"):\n if initialization == \"he\":\n print(\"kaiming normal initialization.\")\n elif initialization == \"xavier\":\n print(\"xavier normal initialization.\")\n else:\n print(\"default initialization, no changes made.\")\n if(initialization):\n for name, param in model.named_parameters():\n # Bias params\n if(\"bias\" in name.split(\".\")[-1]):\n param.data.zero_()\n\n # Batchnorm weight params\n elif(\"weight\" in name.split(\".\")[-1] and len(param.size())==1):\n continue\n # LSTM weight params\n elif(\"weight\" in name.split(\".\")[-1] and \"lstm\" in name):\n if \"xavier\" in lstm_initialization:\n torch.nn.init.xavier_normal_(param)\n elif \"he\" in lstm_initialization:\n torch.nn.init.kaiming_normal_(param)\n # Other weight params\n elif(\"weight\" in name.split(\".\")[-1] and \"lstm\" not in name):\n if \"xavier\" in initialization:\n torch.nn.init.xavier_normal_(param)\n elif \"he\" in initialization:\n torch.nn.init.kaiming_normal_(param)", "\"\"\"\nReasoning Visual Dialog with Sparse Graph Learning and Knowledge Transfer\nGi-Cheon Kang, Junseok Park, Hwaran Lee, Byoung-Tak Zhang, Jin-Hwa Kim\nhttps://arxiv.org/abs/2004.06698\n\"\"\"\nimport numpy as np\nimport torch, math\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .net_utils import MLP\nfrom torch.autograd import Variable\nfrom torch.nn.utils.weight_norm import weight_norm\n\nclass SANet(nn.Module):\n def __init__(self, __C):\n super(SANet, self).__init__()\n self.n_head = 8\n self.d_hid = __C['hidden_size']\n self.d_hid_head = __C['hidden_size'] // 8\n self.gs = GumbelSoftmax(d_in=self.d_hid_head, num_cls=2, dropout=__C['model_dropout'])\n\n self.linear_q = nn.Linear(__C['hidden_size'], __C['hidden_size'])\n self.linear_k = nn.Linear(__C['hidden_size'], __C['hidden_size'])\n self.fc = nn.Linear(__C['hidden_size'], __C['hidden_size'])\n\n def attention(self, q, k):\n logit = q.unsqueeze(2) * k.unsqueeze(3)\n logit = logit.transpose(2, 3)\n attn = logit.sum(-1) / math.sqrt(self.d_hid_head)\n\n binary = self.gs(logit)\n attn = attn * binary\n attn = F.normalize(attn, p=2, dim=-1)**2\n return binary, attn\n\n def forward(self, q, k):\n n_batch = q.size(0)\n q = self.linear_q(q).view(\n n_batch,\n -1, \n self.n_head, \n self.d_hid_head\n ).transpose(1, 2)\n\n k = self.linear_k(k).view(\n n_batch, \n -1, \n self.n_head, \n self.d_hid_head\n ).transpose(1, 2)\n\n binary, attn = self.attention(q, k)\n binary = binary.mean(dim=1)\n attn = attn.mean(dim=1)\n return binary, attn\n\nclass GumbelSoftmax(nn.Module):\n '''\n Softmax Relaxation for Gumbel Max Trick \n '''\n def __init__(self, d_in, num_cls, dropout):\n super().__init__()\n self.linear_g = MLP(\n in_size=d_in,\n mid_size=d_in//2,\n out_size=num_cls,\n dropout_r=dropout,\n use_relu=True\n ) \n\n self.logsoftmax = nn.LogSoftmax(dim=-1)\n\n def st_gumbel_softmax(self, x, temperature=0.5):\n '''\n Straight Throught Gumbel Softmax\n '''\n eps = 1e-20\n noise = Variable(torch.rand(x.size()).cuda())\n\n noise.data.add_(eps).log_().neg_()\n noise.data.add_(eps).log_().neg_()\n\n y = (x + noise) / temperature\n y = F.softmax(y, dim=-1)\n\n shape = y.size()\n _, ind = y.max(dim=-1)\n y_hard = torch.zeros_like(y).view(-1, shape[-1])\n y_hard.scatter_(1, ind.view(-1, 1), 1)\n y_hard = y_hard.view(*shape)\n y_hard = (y_hard - y).detach() + y\n return y_hard\n\n def forward(self, rel):\n x = self.linear_g(rel)\n x = self.logsoftmax(x)\n \n if self.training:\n mask = self.st_gumbel_softmax(x)\n else:\n _, ind = x.detach().max(4, keepdim=True) \n mask = x.detach().clone().zero_().scatter_(4, ind, 1) \n mask = mask[:, :, :, :, -1]\n return mask\n" ]
[ [ "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.nn.LSTM", "torch.sum", "torch.zeros_like" ], [ "torch.nn.init.xavier_normal_", "torch.nn.init.kaiming_normal_" ], [ "torch.nn.functional.normalize", "torch.nn.functional.softmax", "torch.nn.LogSoftmax", "torch.zeros_like", "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dballesteros7/master-thesis-2015
[ "8c0bf9a6eef172fc8167a30780ae0666f8ea2d88" ]
[ "src/utils/file.py" ]
[ "import numpy as np\n\n\ndef load_csv_test_data(filename):\n with open(filename, 'r') as input_file:\n return [[item for item in line.strip().split(',')]\n for line in input_file]\n\n\ndef load_csv_data(filename:str) -> np.ndarray:\n if not filename.endswith('.csv'):\n filename += '.csv'\n with open(filename, 'r') as input_file:\n return [[int(item) for item in line.strip().split(',')]\n for line in input_file]\n\n\ndef load_set_data(filename: str) -> np.ndarray:\n with open(filename, 'r') as input_file:\n loaded_set = []\n for line in input_file:\n if line.strip() != '':\n tokens = line.strip().split(',')\n set_items = np.array(list(map(int, tokens)))\n else:\n set_items = []\n loaded_set.append(set_items)\n return np.array(loaded_set)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
itaigat/MEMM_POS_Tagger
[ "9a095720f50dea2a9dba444ebdb0b325a51ea130" ]
[ "tests/test_clf.py" ]
[ "import numpy as np\nfrom numpy.testing import assert_array_almost_equal\n\n\ndef test_first_loss(clf, feature_matrix, X, y, sentences):\n \"\"\"\n test first loss evaluation on train_dev.wtag (unigram features)\n\n NOTICE: no regularization (lambda = 0)\n\n first test: clf produces feature matrix of size (29,45)\n\n second test: loss = 0 - 29*np.log(45) = -110.39321220333923\n \"\"\"\n assert (feature_matrix.shape == (29, 45))\n\n m = feature_matrix.shape[1]\n v_init = np.zeros(m)\n # reg\n clf.reg = 0\n loss = clf.loss(v_init, feature_matrix, X, y, sentences)\n\n assert_array_almost_equal(loss, 110.39321220333923)\n\n\ndef test_first_loss_(clf, feature_matrix, X, y, sentences):\n # assumes train_dev_testclf and unigram features only\n assert (feature_matrix.shape == (2, 2))\n m = feature_matrix.shape[1]\n v_init = np.zeros(m)\n # reg\n clf.reg = 0\n loss = clf.loss(v_init, feature_matrix, X, y, sentences)\n\n assert_array_almost_equal(loss, 1.3862943611198906)\n\n\ndef test_first_grad_(clf, feature_matrix, X, y, sentences):\n # assumes train_dev_testclf and unigram features only\n m = feature_matrix.shape[1]\n v_init = np.zeros(m)\n clf.reg = 0\n out = clf.grad(v_init, feature_matrix, X, y, sentences)\n first_grad = [0, 0] # (1-2*0.5, 1-2*0.5) , all get same probability when v == 0\n assert_array_almost_equal(out, first_grad)\n\n\ndef test_first_loss_grad_nonzero_v(clf, feature_matrix, X, y, sentences):\n # v_init = [0.5,1]\n m = feature_matrix.shape[1]\n v_init = np.ones(m)\n v_init[0] = 0.5\n # loss\n first_term = 0.5 + 1\n second_term = 2 * np.log(np.exp(0.5) + np.exp(1))\n first_loss = -(first_term - second_term)\n out = clf.loss(v_init, feature_matrix, X, y, sentences)\n assert_array_almost_equal(out, first_loss)\n # grad\n p0 = np.exp(0.5) / (np.exp(0.5) + np.exp(1))\n p1 = 1 - p0\n first_grad = [-(1 - 2 * p0), -(1 - 2 * p1)]\n out = clf.grad(v_init, feature_matrix, X, y, sentences)\n assert_array_almost_equal(out, first_grad)\n\n\ndef test_first_grad(clf, feature_matrix, X, y, sentences):\n \"\"\"\n test first grad evaluation on train_dev.wtag (unigram features)\n\n basically the grad is composed of two terms, grad = A - B , vector of shape (m,)\n where A is the number of times the specific feature was enabled (on all samples), what's called empirical counts\n and B is the expected counts (according to the current v) for that feature, over all samples\n\n for example, the first tag (CC) wasn't seen on training data, then its A=0\n its B=sum_i (1/45) = 29/45 , => grad_CC = 0 - 29/45\n \"\"\"\n m = feature_matrix.shape[1]\n v_init = np.zeros(m)\n # reg\n clf.reg = 0\n out = clf.grad(v_init, feature_matrix, X, y, sentences)\n first_grad = [-0.64444444, 1.35555556, 3.35555556, - 0.64444444, - 0.64444444, 1.35555556,\n 2.35555556, - 0.64444444, 0.35555556, - 0.64444444, - 0.64444444, 3.35555556,\n 1.35555556, 1.35555556, - 0.64444444, - 0.64444444, - 0.64444444, - 0.64444444,\n - 0.64444444, - 0.64444444, 0.35555556, - 0.64444444, - 0.64444444, - 0.64444444,\n 0.35555556, - 0.64444444, - 0.64444444, 1.35555556, - 0.64444444, 0.35555556,\n - 0.64444444, - 0.64444444, - 0.64444444, - 0.64444444, - 0.64444444, - 0.64444444,\n - 0.64444444, - 0.64444444, - 0.64444444, - 0.64444444, - 0.64444444, - 0.64444444,\n 1.35555556, 1.35555556, - 0.64444444]\n first_grad = np.array(first_grad)\n first_grad = -1 * first_grad\n\n # up to 6 decimal dots\n assert_array_almost_equal(out, first_grad)\n\n\ndef run_clf_tests(clf, feature_matrix, X, y, sentences):\n \"\"\"\n run all classifier tests\n \"\"\"\n # test_first_loss_(clf, feature_matrix, X, y, sentences)\n # test_first_grad_(clf, feature_matrix, X, y, sentences)\n # test_first_loss_grad_nonzero_v(clf, feature_matrix, X, y, sentences) # breaks with numeric stability fix\n" ]
[ [ "numpy.ones", "numpy.array", "numpy.exp", "numpy.zeros", "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jskhu/probdet-1
[ "b8bda3bd7cdd573aa9f70a62453d147664211af6" ]
[ "src/core/visualization_tools/results_processing_tools.py" ]
[ "import glob\nimport itertools\nimport numpy as np\nimport os\nimport pickle\nimport torch\n\nfrom collections import defaultdict\n\n# Project imports\nfrom core.setup import setup_config, setup_arg_parser\nfrom probabilistic_inference.inference_utils import get_inference_output_dir\n\n\ndef get_clean_results_dict(config_names,\n configs_list,\n inference_configs_list):\n\n # Level 0 is coco validation set with no corruption, level 10 is open\n # images, level 11 is open images ood\n image_corruption_levels = [0, 1, 3, 5, 10, 11]\n\n test_dataset_coco = \"coco_2017_custom_val\"\n test_dataset_open_images = \"openimages_val\"\n test_dataset_open_images_odd = \"openimages_odd_val\"\n\n arg_parser = setup_arg_parser()\n args = arg_parser.parse_args()\n\n # Initiate dataframe dict\n res_dict_clean = defaultdict(lambda: defaultdict(list))\n\n for config_name, config, inference_config_name in zip(\n config_names, configs_list, inference_configs_list):\n # Setup config\n args.config_file = config\n args.inference_config = inference_config_name\n args.test_dataset = test_dataset_coco\n cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)\n cfg.defrost()\n\n # Read coco dataset results\n cfg.ACTUAL_TEST_DATASET = args.test_dataset\n\n for image_corruption_level in image_corruption_levels:\n # Build path to gt instances and inference output\n args.image_corruption_level = image_corruption_level\n\n if image_corruption_level == 0:\n image_corruption_level = 'Val'\n elif image_corruption_level == 10:\n image_corruption_level = 'OpenIm'\n elif image_corruption_level == 11:\n image_corruption_level = 'OpenIm OOD'\n else:\n image_corruption_level = 'C' + str(image_corruption_level)\n if 'OpenIm' not in image_corruption_level:\n inference_output_dir = get_inference_output_dir(\n cfg['OUTPUT_DIR'],\n args.test_dataset,\n args.inference_config,\n args.image_corruption_level)\n\n dictionary_file_name = glob.glob(\n os.path.join(\n inference_output_dir,\n 'probabilistic_scoring_res_averaged_*.pkl'))[0]\n else:\n args.image_corruption_level = 0\n args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd\n inference_output_dir = get_inference_output_dir(\n cfg['OUTPUT_DIR'],\n args.test_dataset,\n args.inference_config,\n args.image_corruption_level)\n prob_dict_name = 'probabilistic_scoring_res_averaged_*.pkl' if image_corruption_level == 'OpenIm' else 'probabilistic_scoring_res_odd_*.pkl'\n dictionary_file_name = glob.glob(\n os.path.join(\n inference_output_dir,\n prob_dict_name))[0]\n\n with open(dictionary_file_name, \"rb\") as pickle_file:\n res_dict = pickle.load(pickle_file)\n\n if image_corruption_level != 'OpenIm OOD':\n # True Positives Results\n res_dict_clean['True Positives']['Negative Log Likelihood (Classification)'].extend(\n res_dict['true_positives_cls_analysis']['ignorance_score_mean'])\n res_dict_clean['True Positives']['Brier Score'].extend(\n res_dict['true_positives_cls_analysis']['brier_score_mean'])\n res_dict_clean['True Positives']['Negative Log Likelihood (Regression)'].extend(\n res_dict['true_positives_reg_analysis']['ignorance_score_mean'])\n res_dict_clean['True Positives']['Mean Squared Error'].extend(\n res_dict['true_positives_reg_analysis']['mean_squared_error'])\n res_dict_clean['True Positives']['Energy Score'].extend(\n res_dict['true_positives_reg_analysis']['energy_score_mean'])\n res_dict_clean['True Positives']['Image Corruption Level'].extend(\n [image_corruption_level] *\n res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])\n res_dict_clean['True Positives']['Method Name'].extend(\n [config_name] * res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])\n\n # Duplicates Results\n res_dict_clean['Duplicates']['Negative Log Likelihood (Classification)'].extend(\n res_dict['duplicates_cls_analysis']['ignorance_score_mean'])\n res_dict_clean['Duplicates']['Brier Score'].extend(\n res_dict['duplicates_cls_analysis']['brier_score_mean'])\n res_dict_clean['Duplicates']['Negative Log Likelihood (Regression)'].extend(\n res_dict['duplicates_reg_analysis']['ignorance_score_mean'])\n res_dict_clean['Duplicates']['Mean Squared Error'].extend(\n res_dict['duplicates_reg_analysis']['mean_squared_error'])\n res_dict_clean['Duplicates']['Energy Score'].extend(\n res_dict['duplicates_reg_analysis']['energy_score_mean'])\n res_dict_clean['Duplicates']['Image Corruption Level'].extend(\n [image_corruption_level] *\n res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])\n res_dict_clean['Duplicates']['Method Name'].extend(\n [config_name] * res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])\n\n # Localization Error Results\n res_dict_clean['Localization Errors']['Negative Log Likelihood (Classification)'].extend(\n res_dict['localization_errors_cls_analysis']['ignorance_score_mean'])\n res_dict_clean['Localization Errors']['Brier Score'].extend(\n res_dict['localization_errors_cls_analysis']['brier_score_mean'])\n res_dict_clean['Localization Errors']['Negative Log Likelihood (Regression)'].extend(\n res_dict['localization_errors_reg_analysis']['ignorance_score_mean'])\n res_dict_clean['Localization Errors']['Mean Squared Error'].extend(\n res_dict['localization_errors_reg_analysis']['mean_squared_error'])\n res_dict_clean['Localization Errors']['Energy Score'].extend(\n res_dict['localization_errors_reg_analysis']['energy_score_mean'])\n res_dict_clean['Localization Errors']['Image Corruption Level'].extend(\n [image_corruption_level] *\n res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])\n res_dict_clean['Localization Errors']['Method Name'].extend(\n [config_name] *\n res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])\n\n # False Positives Results\n res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].extend(\n res_dict['false_positives_cls_analysis']['ignorance_score_mean'])\n res_dict_clean['False Positives']['Brier Score'].extend(\n res_dict['false_positives_cls_analysis']['brier_score_mean'])\n res_dict_clean['False Positives']['Entropy'].extend(\n res_dict['false_positives_reg_analysis']['total_entropy_mean'])\n res_dict_clean['False Positives']['Image Corruption Level'].extend(\n [image_corruption_level] *\n res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])\n res_dict_clean['False Positives']['Method Name'].extend(\n [config_name] *\n res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])\n else:\n # False Positives Results\n res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].append(\n res_dict['ignorance_score_mean'])\n res_dict_clean['False Positives']['Brier Score'].append(\n res_dict['brier_score_mean'])\n res_dict_clean['False Positives']['Entropy'].append(\n res_dict['total_entropy_mean'])\n res_dict_clean['False Positives']['Image Corruption Level'].append(\n image_corruption_level)\n res_dict_clean['False Positives']['Method Name'].append(\n config_name)\n return res_dict_clean\n\n\ndef get_mAP_results(config_names,\n configs_list,\n inference_configs_list):\n # Level 0 is coco validation set with no corruption, level 10 is open\n # images, level 11 is open images ood\n image_corruption_levels = [0, 1, 2, 3, 4, 5, 10]\n\n test_dataset_coco = \"coco_2017_custom_val\"\n test_dataset_open_images = \"openimages_val\"\n\n arg_parser = setup_arg_parser()\n args = arg_parser.parse_args()\n\n # Initiate dataframe dict\n mAP_results = defaultdict(list)\n\n for config_name, config, inference_config_name in zip(\n config_names, configs_list, inference_configs_list):\n # Setup config\n args.config_file = config\n args.inference_config = inference_config_name\n args.test_dataset = test_dataset_coco\n cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)\n cfg.defrost()\n\n # Read coco dataset results\n cfg.ACTUAL_TEST_DATASET = args.test_dataset\n\n for image_corruption_level in image_corruption_levels:\n # Build path to gt instances and inference output\n args.image_corruption_level = image_corruption_level\n if image_corruption_level == 0:\n image_corruption_level = 'Val'\n elif image_corruption_level == 10:\n image_corruption_level = 'OpenIm'\n else:\n image_corruption_level = 'C' + str(image_corruption_level)\n\n if 'OpenIm' not in image_corruption_level:\n inference_output_dir = get_inference_output_dir(\n cfg['OUTPUT_DIR'],\n args.test_dataset,\n args.inference_config,\n args.image_corruption_level)\n else:\n args.image_corruption_level = 0\n args.test_dataset = test_dataset_open_images\n inference_output_dir = get_inference_output_dir(\n cfg['OUTPUT_DIR'],\n args.test_dataset,\n args.inference_config,\n args.image_corruption_level)\n\n text_file_name = glob.glob(\n os.path.join(\n inference_output_dir,\n 'mAP_res.txt'))[0]\n with open(text_file_name, \"r\") as f:\n mAP = f.read().strip('][\\n').split(', ')[0]\n mAP = float(mAP) * 100\n\n mAP_results['Method Name'].append(config_name)\n mAP_results['Image Corruption Level'].append(\n image_corruption_level)\n mAP_results['mAP'].append(mAP)\n\n return mAP_results\n\n\ndef get_matched_results_dicts(config_names,\n configs_list,\n inference_configs_list,\n iou_min=0.1,\n iou_correct=0.5):\n\n # Level 0 is coco validation set with no corruption, level 10 is open\n # images, level 11 is open images ood\n image_corruption_levels = [0, 10, 11]\n\n test_dataset_coco = \"coco_2017_custom_val\"\n test_dataset_open_images = \"openimages_val\"\n test_dataset_open_images_odd = \"openimages_odd_val\"\n\n arg_parser = setup_arg_parser()\n args = arg_parser.parse_args()\n\n # Initiate dataframe dict\n res_dict_clean = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n\n for config_name, config, inference_config_name in zip(\n config_names, configs_list, inference_configs_list):\n # Setup config\n args.config_file = config\n args.inference_config = inference_config_name\n args.test_dataset = test_dataset_coco\n cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)\n cfg.defrost()\n\n # Read coco dataset results\n cfg.ACTUAL_TEST_DATASET = args.test_dataset\n\n for image_corruption_level in image_corruption_levels:\n # Build path to gt instances and inference output\n args.image_corruption_level = image_corruption_level\n\n if image_corruption_level == 0:\n image_corruption_level = 'Val'\n elif image_corruption_level == 10:\n image_corruption_level = 'OpenIm'\n elif image_corruption_level == 11:\n image_corruption_level = 'OpenIm OOD'\n else:\n image_corruption_level = 'C' + str(image_corruption_level)\n if 'OpenIm' not in image_corruption_level:\n inference_output_dir = get_inference_output_dir(\n cfg['OUTPUT_DIR'],\n args.test_dataset,\n args.inference_config,\n args.image_corruption_level)\n\n # Get matched results by either generating them or loading from\n # file.\n\n dictionary_file_name = glob.glob(\n os.path.join(\n inference_output_dir,\n \"matched_results_{}_{}_*.pth\".format(\n iou_min,\n iou_correct)))[0]\n\n matched_results = torch.load(\n dictionary_file_name, map_location='cuda')\n elif image_corruption_level == 'OpenIm':\n args.image_corruption_level = 0\n args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd\n inference_output_dir = get_inference_output_dir(\n cfg['OUTPUT_DIR'],\n args.test_dataset,\n args.inference_config,\n args.image_corruption_level)\n dictionary_file_name = glob.glob(\n os.path.join(\n inference_output_dir,\n \"matched_results_{}_{}_*.pth\".format(\n iou_min,\n iou_correct)))[0]\n matched_results = torch.load(\n dictionary_file_name, map_location='cuda')\n else:\n args.image_corruption_level = 0\n args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd\n inference_output_dir = get_inference_output_dir(\n cfg['OUTPUT_DIR'],\n args.test_dataset,\n args.inference_config,\n args.image_corruption_level)\n dictionary_file_name = glob.glob(\n os.path.join(\n inference_output_dir,\n \"preprocessed_predicted_instances_odd_*.pth\"))[0]\n preprocessed_predicted_instances = torch.load(\n dictionary_file_name, map_location='cuda')\n\n predicted_boxes = preprocessed_predicted_instances['predicted_boxes']\n predicted_cov_mats = preprocessed_predicted_instances['predicted_covar_mats']\n predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs']\n\n predicted_boxes = list(itertools.chain.from_iterable(\n [predicted_boxes[key] for key in predicted_boxes.keys()]))\n predicted_cov_mats = list(itertools.chain.from_iterable(\n [predicted_cov_mats[key] for key in predicted_cov_mats.keys()]))\n predicted_cls_probs = list(itertools.chain.from_iterable(\n [predicted_cls_probs[key] for key in predicted_cls_probs.keys()]))\n\n predicted_boxes = torch.stack(\n predicted_boxes, 1).transpose(\n 0, 1)\n predicted_cov_mats = torch.stack(\n predicted_cov_mats, 1).transpose(0, 1)\n predicted_cls_probs = torch.stack(\n predicted_cls_probs,\n 1).transpose(\n 0,\n 1)\n matched_results = {\n 'predicted_box_means': predicted_boxes,\n 'predicted_box_covariances': predicted_cov_mats,\n 'predicted_cls_probs': predicted_cls_probs}\n\n if image_corruption_level != 'OpenIm OOD':\n all_results_means = torch.cat(\n (matched_results['true_positives']['predicted_box_means'],\n matched_results['localization_errors']['predicted_box_means'],\n matched_results['duplicates']['predicted_box_means'],\n matched_results['false_positives']['predicted_box_means']))\n\n all_results_covs = torch.cat(\n (matched_results['true_positives']['predicted_box_covariances'],\n matched_results['localization_errors']['predicted_box_covariances'],\n matched_results['duplicates']['predicted_box_covariances'],\n matched_results['false_positives']['predicted_box_covariances']))\n\n all_gt_means = torch.cat(\n (matched_results['true_positives']['gt_box_means'],\n matched_results['localization_errors']['gt_box_means'],\n matched_results['duplicates']['gt_box_means'],\n matched_results['false_positives']['predicted_box_means']*np.NaN))\n\n predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(\n all_results_means.to('cpu'),\n all_results_covs.to('cpu') +\n 1e-2 *\n torch.eye(all_results_covs.shape[2]).to('cpu'))\n predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(\n 'cuda')\n predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(\n 'cuda')\n predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(\n 'cuda')\n predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(\n 'cuda')\n predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(\n 'cuda')\n all_entropy = predicted_multivariate_normal_dists.entropy()\n\n all_log_prob = -predicted_multivariate_normal_dists.log_prob(all_gt_means)\n # Energy Score.\n sample_set = predicted_multivariate_normal_dists.sample((3,)).to('cuda')\n sample_set_1 = sample_set[:-1]\n sample_set_2 = sample_set[1:]\n\n energy_score = torch.norm(\n (sample_set_1 - all_gt_means),\n dim=2).mean(0) - 0.5 * torch.norm(\n (sample_set_1 - sample_set_2),\n dim=2).mean(0)\n\n mse_loss = torch.nn.MSELoss(reduction='none')\n mse = mse_loss(all_gt_means, all_results_means).mean(1)\n\n res_dict_clean[config_name][image_corruption_level]['Entropy'].extend(\n all_entropy.cpu().numpy())\n\n res_dict_clean[config_name][image_corruption_level]['MSE'].extend(\n mse.cpu().numpy())\n res_dict_clean[config_name][image_corruption_level]['NLL'].extend(\n all_log_prob.cpu().numpy())\n res_dict_clean[config_name][image_corruption_level]['ED'].extend(\n energy_score.cpu().numpy())\n\n res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.cat(\n (matched_results['true_positives']['iou_with_ground_truth'],\n matched_results['localization_errors']['iou_with_ground_truth'][:, 0],\n matched_results['duplicates']['iou_with_ground_truth'],\n torch.zeros(\n matched_results['false_positives']['predicted_box_means'].shape[0]).to('cuda')*np.NaN)).cpu().numpy())\n\n predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(\n matched_results['false_positives']['predicted_box_means'].to('cpu'),\n matched_results['false_positives']['predicted_box_covariances'].to('cpu') +\n 1e-2 *\n torch.eye(matched_results['false_positives']['predicted_box_covariances'].shape[2]).to('cpu'))\n predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(\n 'cuda')\n predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(\n 'cuda')\n predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(\n 'cuda')\n predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(\n 'cuda')\n predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(\n 'cuda')\n FP_Entropy = predicted_multivariate_normal_dists.entropy()\n res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(\n FP_Entropy.cpu().numpy())\n\n predicted_cat_dists_fp = matched_results['false_positives']['predicted_cls_probs']\n\n if predicted_cat_dists_fp.shape[1] == 80:\n predicted_cat_dists_fp, _ = predicted_cat_dists_fp.max(dim=1)\n predicted_cat_dists_fp = 1-predicted_cat_dists_fp\n predicted_categorical_dists = torch.distributions.Bernoulli(\n probs=predicted_cat_dists_fp)\n else:\n predicted_categorical_dists = torch.distributions.Categorical(\n probs=matched_results['false_positives']['predicted_cls_probs'])\n\n all_pred_ent = predicted_categorical_dists.entropy()\n res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(\n all_pred_ent.cpu().numpy())\n\n if image_corruption_level == 'OpenIm':\n res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(\n torch.cat(\n (matched_results['true_positives']['is_truncated'],\n matched_results['localization_errors']['is_truncated'],\n matched_results['duplicates']['is_truncated'],\n torch.full((\n matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())\n res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(\n torch.cat(\n (matched_results['true_positives']['is_occluded'],\n matched_results['localization_errors']['is_occluded'],\n matched_results['duplicates']['is_occluded'],\n torch.full((\n matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())\n else:\n res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(\n torch.cat(\n (torch.full((\n matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,\n torch.full((\n matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,\n dtype=torch.float32).to('cuda'),\n torch.full((\n matched_results['duplicates']['predicted_box_means'].shape[0],), -1,\n dtype=torch.float32).to('cuda'),\n torch.full((\n matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())\n res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(\n torch.cat(\n (torch.full((\n matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,\n torch.full((\n matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,\n dtype=torch.float32).to('cuda')*np.NaN,\n torch.full((\n matched_results['duplicates']['predicted_box_means'].shape[0],), -1,\n dtype=torch.float32).to('cuda')*np.NaN,\n torch.full((\n matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())\n else:\n predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(\n matched_results['predicted_box_means'].to('cpu'),\n matched_results['predicted_box_covariances'].to('cpu') +\n 1e-2 *\n torch.eye(matched_results['predicted_box_covariances'].shape[2]).to('cpu'))\n predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(\n 'cuda')\n predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(\n 'cuda')\n predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(\n 'cuda')\n predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(\n 'cuda')\n predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(\n 'cuda')\n all_entropy = predicted_multivariate_normal_dists.entropy()\n res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(\n all_entropy.cpu().numpy())\n res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.zeros(\n matched_results['predicted_box_means'].shape[0]).cpu().numpy())\n res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(torch.full((\n matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)\n res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(torch.full((\n matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)\n\n all_results_cat = matched_results['predicted_cls_probs']\n if all_results_cat.shape[1] == 80:\n predicted_cat_dists_fp, _ = all_results_cat.max(dim=1)\n predicted_cat_dists_fp = 1-predicted_cat_dists_fp\n predicted_categorical_dists = torch.distributions.Bernoulli(\n probs=predicted_cat_dists_fp)\n else:\n predicted_categorical_dists = torch.distributions.Categorical(\n probs=all_results_cat)\n\n all_pred_ent = predicted_categorical_dists.entropy()\n res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(\n all_pred_ent.cpu().numpy())\n\n return res_dict_clean\n\n\ndef mean_reject_outliers(x, outlierConstant=1.5):\n a = np.array(x)\n upper_quartile = np.percentile(a, 75)\n lower_quartile = np.percentile(a, 25)\n IQR = (upper_quartile - lower_quartile) * outlierConstant\n quartileSet = (lower_quartile - IQR, upper_quartile + IQR)\n\n result = a[np.where((a >= quartileSet[0]) & (a <= quartileSet[1]))]\n return np.nanmean(result)\n" ]
[ [ "torch.norm", "torch.full", "torch.load", "torch.cat", "torch.zeros", "torch.distributions.Bernoulli", "torch.eye", "numpy.percentile", "torch.distributions.Categorical", "numpy.nanmean", "torch.stack", "numpy.array", "numpy.where", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pengyang486868/PY-read-Document
[ "8d1d145cc6c384a64b3a28781dbcce1733b77513", "8d1d145cc6c384a64b3a28781dbcce1733b77513", "8d1d145cc6c384a64b3a28781dbcce1733b77513" ]
[ "analysis_pdf.py", "testrun.py", "docDAL/mysql.py" ]
[ "import pandas as pd\nfrom cloudservice import get_documenttask, download_doc, get_new_doc_task_db\nfrom cloudservice import get_doctag, create_doctag, delete_doctag\nfrom cloudservice import create_doctagrel, delete_doctagrel\nfrom cloudservice import change_step\nfrom cloudservice import get_docs_byid, fill_docinfo\nfrom cloudservice import get_all_projs, get_file_projs\nfrom cloudservice import add_attachment\nimport time, os\nimport config\nimport core\nimport utils\nfrom datetime import datetime\n\n\ndef analysis_log(info, info_obj):\n print(info, info_obj)\n\n\ndef on_loop(project_id):\n # docresponse = get_documenttask(projid=project_id)\n # docdata = pd.DataFrame(docresponse)\n docdata = get_new_doc_task_db(project_id, 'pdf')\n if len(docdata) == 0:\n return\n\n # docdata = docdata[(docdata['step'] == 1) & (docdata['fileType'] == 'dwg')]\n docdata = docdata.tail(config.n_for_project_in_loop)\n docdata.columns = [s[0].lower() + s[1:] for s in docdata.columns]\n\n docdata = (docdata.dropna(subset=['fileUrl', 'step'])\n .reset_index()\n )\n\n # docdata = (docdata.sort_values('name')\n # .dropna(subset=['fileUrl', 'step'])\n # .reset_index()\n # )\n\n # basepath = os.path.join(config.root_dir, str(project_id))\n basepath = config.root_dir\n imgdir = os.path.join(config.root_dir, 'images')\n for indx, dt in docdata.iterrows():\n dt['createTime'] = str(dt['createTime'].asm8)\n print(datetime.now())\n info_log_obj = {'id': dt['fileId'], 'name': dt['name']}\n # analysis_log('开始', info_log_obj)\n if not dt['fileUrl'].startswith('http'):\n dt['step'] = 6\n change_step(dt['id'], dt.to_dict(), projid=project_id)\n analysis_log('无文件', info_log_obj)\n continue\n\n # 不分析一些类型\n no_analysis = False\n for tp in config.skip_file_types:\n if not dt['fileType'] or tp in dt['fileType']:\n dt['step'] = 5\n change_step(dt['id'], dt.to_dict(), projid=project_id)\n info_log_obj['type'] = dt['fileType']\n analysis_log('跳过类型', info_log_obj)\n no_analysis = True\n break\n if no_analysis:\n continue\n\n try:\n # 下载文件到本地文件夹\n curpath = os.path.join(basepath, dt['name'])\n download_doc(dt['fileUrl'], curpath)\n except:\n analysis_log('下载文件', info_log_obj)\n continue\n\n # 转换文件\n try:\n # 很大的\n if os.path.getsize(curpath) > 300 * 1000 * 1000:\n analysis_log('文件过大', info_log_obj)\n dt['step'] = 4\n change_step(dt['id'], dt.to_dict(), projid=project_id)\n analysis_log('完成', info_log_obj)\n continue\n\n ext_tuple = os.path.splitext(dt['name'])\n fname = ext_tuple[0]\n extname = ext_tuple[1]\n transformed = core.transform(curpath, basepath, extname)\n except:\n analysis_log('转换文件', info_log_obj)\n continue\n\n # 分析成字段\n try:\n kwords, kwfreq, pharr, nwarr, sumarr, attaimges, *drawing_none = core.analysis(\n curpath, extname, imgdir=imgdir, do_drawings=True)\n\n kwords_arr = kwords.split(',')\n real_kwords = []\n for kw in kwords_arr:\n if is_real_kw(kw):\n real_kwords.append(kw)\n if len(real_kwords) > 5:\n low_kw = real_kwords[5:]\n else:\n low_kw = []\n except Exception as e:\n dt['step'] = 7\n change_step(dt['id'], dt.to_dict(), projid=project_id)\n analysis_log('分析成字段', info_log_obj)\n print(e)\n continue\n\n # 图片附件\n try:\n # 上传oss\n upload_result = core.upload_images(attaimges)\n\n # 写入附件表\n for atta in upload_result:\n atta_obj = {\n \"name\": atta['name'],\n \"remark\": \"\",\n \"keyword\": \"\",\n \"abstract\": utils.remove_blank(atta['abstract']),\n \"url\": atta['url'],\n \"fileSize\": atta['fileSize'],\n \"fileType\": atta['fileType'],\n \"newWords\": \"\",\n \"wordFrequency\": \"\",\n \"phrases\": \"\",\n \"linkType\": \"文件关联图片\",\n \"fileId\": dt['fileId']\n }\n add_attachment(atta_obj, projid=project_id)\n except Exception as e:\n print(e)\n analysis_log('图片附件', info_log_obj)\n continue\n\n # 文件表写入字段\n file_table_write_success = False\n try:\n doc_record = get_docs_byid(dt['fileId'], projid=project_id)\n\n # choose summary\n real_summary = []\n for su in sumarr:\n if is_real_summary(su):\n real_summary.append(su)\n summarylimit = 3\n if len(real_summary) > summarylimit:\n real_summary = sorted(real_summary, key=lambda x: len(x), reverse=True)[:summarylimit]\n\n nwlimit = 900\n nwarr = utils.remove_blank(nwarr)\n if len(nwarr) > nwlimit:\n nwarr = nwarr[:nwlimit]\n updated = {\n # \"keyWord\": kwords,\n \"keyWord\": ','.join(low_kw),\n \"abstract\": ','.join(real_summary),\n \"newWords\": nwarr,\n \"wordFrequency\": kwfreq,\n \"phrases\": pharr\n }\n\n doc_record.update(updated)\n # print(doc_record)\n fill_docinfo(doc_record['id'], doc_record, projid=project_id)\n file_table_write_success = True\n except Exception as e:\n analysis_log('文件表填入', info_log_obj)\n print(e)\n continue\n\n # 创建新标签并关联\n try:\n if not real_kwords:\n analysis_log('无内容', info_log_obj)\n else:\n alltags = get_doctag(projid=project_id)\n if len(real_kwords) >= config.web_keywords_num:\n curtags = real_kwords[:config.web_keywords_num]\n else:\n curtags = real_kwords\n dtrels = []\n for curtag in curtags:\n existq = False\n for t in alltags:\n if str(t['name']).upper() == str(curtag).upper():\n dtrels.append((dt['fileId'], t['id']))\n existq = True\n break\n if not existq:\n tagid = create_doctag(curtag, projid=project_id)\n dtrels.append((dt['fileId'], tagid))\n # 写入关联文件和标签\n create_doctagrel(dtrels, projid=project_id)\n except Exception as e:\n analysis_log('标签', info_log_obj)\n print(e)\n continue\n\n # 更改task的阶段为已完成\n if file_table_write_success:\n dt['step'] = 2\n change_step(dt['id'], dt.to_dict(), projid=project_id)\n\n # 删除本地下载文件\n pass\n analysis_log('完成', info_log_obj)\n\n # delete_doctagrel(13, projid=project_id)\n print('end proj')\n\n\ndef is_real_kw(kw: str) -> bool:\n if len(kw) < 2:\n return False\n\n undercount = 0\n for c in kw:\n if c == '_':\n undercount += 1\n if undercount / len(kw) > 0.499:\n return False\n return True\n\n\ndef is_real_summary(su) -> bool:\n if len(su) < 6:\n return False\n return True\n\n\ndef find_needed_project_ids():\n # docresponse = get_documenttask(projid=0)\n allproj = get_all_projs()\n if len(allproj) == 0:\n return []\n projs = pd.DataFrame(allproj)['id'].tolist()\n\n if len(projs) == 0:\n return []\n\n return sorted(set([p for p in projs if p not in config.exclude_projects]), reverse=True)\n\n\ndef exitq() -> bool:\n with open('stop.cms') as sf:\n sign = sf.readline()\n sign = int(sign)\n # print(sign)\n if sign > 0:\n return True\n return False\n\n\nif __name__ == '__main__':\n # servicetest()\n # projects = find_needed_project_ids() # with exclude\n projects = [687]\n # projects = [26, 193, 406, 53]\n have_file_projects = projects\n # have_file_projects = get_file_projs()\n\n loop_id = 0\n while True:\n if exitq():\n print('exit')\n print(datetime.now())\n break\n loop_id += 1\n print('loop: ' + str(loop_id))\n for pid in projects:\n try:\n print('loop: ' + str(loop_id) + ' / proj: ' + str(pid))\n if pid not in have_file_projects:\n continue\n time.sleep(0.1)\n on_loop(project_id=pid)\n print()\n except Exception as e:\n print(e)\n\n time.sleep(2)\n", "import os\nfrom docDAL import mysql as conn\nimport config\nimport pandas as pd\nimport core\nimport shutil\n\nusername = config.test_username\nrawfiledir = os.path.join(config.root_dir, username, 'raw')\nfiledir = os.path.join(config.root_dir, username, 'f')\nimgdir = os.path.join(config.root_dir, username, 'image')\nfname_arr = os.listdir(rawfiledir)\ntransname_arr = os.listdir(filedir)\ntransname_arr_noext = list(map(lambda x: os.path.splitext(x)[0], transname_arr))\n\nretransform = False\nif retransform:\n print('transform')\n for fullname in fname_arr:\n print(fullname)\n ext_tuple = os.path.splitext(fullname)\n fname = ext_tuple[0]\n extname = ext_tuple[1]\n if fname not in transname_arr_noext:\n fpath = os.path.join(rawfiledir, fullname)\n transformed = core.transform(fpath, filedir, extname)\n if not transformed:\n shutil.copy(fpath, filedir)\n\nreanalysis = False\nif reanalysis:\n print('analysis')\n result = []\n imgresult = []\n drawingresult = []\n for indx, fullname in enumerate(fname_arr):\n print(fullname)\n ext_tuple = os.path.splitext(fullname)\n fname = ext_tuple[0]\n extname = ext_tuple[1]\n fpath = os.path.join(filedir, fullname)\n kwords, kwfreq, pharr, nwarr, sumarr, curimg, curdrawing = core.analysis(fpath, extname, imgdir)\n fid = indx + 100\n result.append({'id': fid, 'fname': fname, 'extname': extname, 'username': username,\n 'keywords': kwords, 'kwfreq': kwfreq,\n 'phrase': pharr, 'newwords': nwarr, 'summary': sumarr})\n imgresult += curimg\n for d in curdrawing:\n d['drawing_id'] = fid\n d['title'] = ''\n drawingresult += curdrawing\n\n resultdf = pd.DataFrame(result)\n imgresultdf = pd.DataFrame(imgresult)[['fname', 'keywords', 'newwords', 'relatedtxt', 'docname']]\n drawingresultdf = pd.DataFrame(drawingresult)\n\n cnt = conn.clear_file_info()\n conn.write_file_info(resultdf)\n conn.write_img_info(imgresultdf)\n conn.write_drawingsplit_info(drawingresultdf)\n print('del', str(cnt), 'write', str(len(resultdf)))\n\naiq = True\nif aiq:\n # load from db\n fobjs = conn.get_file_info(returnobj=True)\n\n # print('cluster')\n # cluster_result = core.file_cluster(fobjs)\n\n # print('classify')\n # core.file_classify_demo(fobjs)\n\n print('KG')\n core.knowledge_graph_demo(fobjs)\n", "import os\nfrom sqlalchemy import create_engine\nimport pandas as pd\nimport config\nfrom model import FileInfo, ImageInfo\n\n\ndef get_dw_constr():\n conStr = os.environ.get(\"DBCONSTR\")\n return conStr if conStr is not None else config.default_constr\n\n\ndef get_dw_engine():\n return create_engine(get_dw_constr()).connect()\n\n\ndef get_file_info(returnobj=False):\n dw = get_dw_engine()\n query = '''\n SELECT * FROM file_info\n '''\n data = pd.read_sql_query(query, dw)\n\n if not returnobj:\n return data\n\n objs = []\n for indx, row in data.iterrows():\n curobj = FileInfo()\n curobj.id = row['id']\n curobj.fname = row['fname']\n curobj.extname = row['extname']\n curobj.username = row['username']\n curobj.keywords = row['keywords'].split(',')\n if row['kwfreq']:\n curobj.kwfreq = list(map(int, row['kwfreq'].split(',')))\n else:\n curobj.kwfreq = []\n curobj.phrase = row['phrase'].split(',')\n curobj.newwords = row['newwords'].split(',')\n curobj.label = row['label']\n if row['istest'] == 1:\n curobj.istest = True\n objs.append(curobj)\n return objs\n\n\ndef clear_file_info():\n dw = get_dw_engine()\n res = dw.execute('DELETE FROM file_info WHERE id>0')\n return res.rowcount\n\n\ndef write_file_info(data: pd.DataFrame):\n dw = get_dw_engine()\n data.to_sql('file_info', dw, if_exists='append', index=False, method=\"multi\")\n\n\ndef get_img_info(returnobj=False):\n dw = get_dw_engine()\n query = '''\n SELECT * FROM file_image\n '''\n data = pd.read_sql_query(query, dw)\n\n if not returnobj:\n return data\n\n objs = []\n for indx, row in data.iterrows():\n curobj = ImageInfo()\n curobj.id = row['id']\n curobj.fname = row['fname']\n curobj.keywords = row['keywords'].split(',')\n curobj.newwords = row['newwords'].split(',')\n objs.append(curobj)\n return objs\n\n\ndef clear_img_info():\n dw = get_dw_engine()\n res = dw.execute('DELETE FROM file_image WHERE id>0')\n return res.rowcount\n\n\ndef write_img_info(data: pd.DataFrame):\n dw = get_dw_engine()\n data.to_sql('file_image', dw, if_exists='append', index=False, method=\"multi\")\n\n\ndef write_drawingsplit_info(data: pd.DataFrame):\n dw = get_dw_engine()\n data.to_sql('file_drawingsplit', dw, if_exists='append', index=False, method=\"multi\")\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.DataFrame" ], [ "pandas.read_sql_query" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mansueto-institute/National_GDPpc_Urbanization
[ "c83d33b2db0c3c9eae2b77013deb9bc1367e440b" ]
[ "Figure_1A.py" ]
[ "import csv\nimport scipy.optimize\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm\nimport matplotlib.colors as colors\nfrom colorsys import hsv_to_rgb\nimport datetime as dt\n\n\ndef lin_fit(x, y):\n '''Fits a linear fit of the form mx+b to the data'''\n fitfunc = lambda params, x: params[0] * x #create fitting function of form mx+b\n errfunc = lambda p, x, y: fitfunc(p, x) - y #create error function for least squares fit\n\n init_a = 0.5 #find initial value for a (gradient)\n init_p = np.array((init_a)) #bundle initial values in initial parameters\n\n #calculate best fitting parameters (i.e. m and b) using the error function\n p1, success = scipy.optimize.leastsq(errfunc, init_p.copy(), args = (x, y))\n f = fitfunc(p1, x) #create a fit with those parameters\n return p1, f\n\ndef linreg(X, Y):\n \"\"\"\n Summary\n Linear regression of y = ax + b\n Usage\n real, real, real = linreg(list, list)\n Returns coefficients to the regression line \"y=ax+b\" from x[] and y[], and R^2 Value\n \"\"\"\n if len(X) != len(Y): raise ValueError(\"unequal length\")\n N = len(X)\n Sx = Sy = Sxx = Syy = Sxy = 0.0\n for x, y in zip(X, Y):\n Sx = Sx + x\n Sy = Sy + y\n Sxx = Sxx + x*x\n Syy = Syy + y*y\n Sxy = Sxy + x*y\n det = Sxx * N - Sx * Sx\n a, b = (Sxy * N - Sy * Sx)/det, (Sxx * Sy - Sx * Sxy)/det\n meanerror = residual = 0.0\n for x, y in zip(X, Y):\n meanerror = meanerror + (y - Sy/N)**2\n residual = residual + (y - a * x - b)**2\n RR = 1 - residual/meanerror\n ss = residual / (N-2)\n Var_a, Var_b = ss * N / det, ss * Sxx / det\n return a, b, RR, Var_a, Var_b\n\n\nyall=[]\niall=[]\ngall=[]\npoptot=[]\navx=[]\navy=[]\n\nxx_tot=[]\nyy_tot=[]\n\n\nf=open('WB_data_clean.csv', 'r',encoding='latin-1')\nreader=csv.reader(f,delimiter=',')\n\ncount=0\nc2=0\nc3=0\nc4=0\nnation=[]\nGDP=[]\npop=[]\nurban=[]\nurbanr=[]\nurban1m=[]\nimports=[]\nexports=[]\n\n\n# possible countries:\ncountries = ['Afghanistan', 'Albania', 'Algeria','Angola', 'Argentina', 'Armenia', 'Australia', 'Austria', 'Azerbaijan', 'Bangladesh', 'Belarus', 'Belgium', 'Belize', 'Benin', 'Bhutan', 'Bolivia', 'Bosnia and Herzegovina', 'Botswana', 'Brazil', 'Bulgaria', 'Burkina Faso', 'Burundi', 'Cabo Verde', 'Cambodia', 'Cameroon', 'Canada', 'Central African Republic', 'Chad', 'Chile', 'China', 'Colombia', 'Congo, Dem. Rep.', 'Congo, Rep.', 'Costa Rica', \"Cote d'Ivoire\", 'Croatia', 'Cuba', 'Cyprus', 'Czech Republic', 'Denmark', 'Ecuador', 'Egypt, Arab Rep.', 'El Salvador', 'Equatorial Guinea', 'Eritrea', 'Estonia', 'Ethiopia', 'Finland','France', 'Gabon', 'Gambia, The', 'Georgia', 'Germany', 'Ghana','Greece','Grenada','Guatemala', 'Guinea', 'Guinea-Bissau', 'Guyana', 'Haiti', 'Honduras', 'Hungary', 'Iceland', 'India', 'Indonesia', 'Iran, Islamic Rep.', 'Iraq', 'Ireland', 'Israel', 'Italy', 'Jamaica', 'Japan', 'Jordan', 'Kazakhstan', 'Kenya', 'Korea, Rep.', 'Kyrgyz Republic', 'Lao PDR', 'Latvia', 'Lebanon', 'Lesotho', 'Liberia', 'Libya', 'Lithuania', 'Luxembourg', 'Macedonia, FYR', 'Madagascar', 'Malawi', 'Malaysia', 'Maldives', 'Mali', 'Malta', 'Mauritania', 'Mauritius', 'Mexico', 'Moldova', 'Mongolia', 'Montenegro', 'Morocco', 'Mozambique', 'Myanmar', 'Namibia', 'Nepal', 'Netherlands', 'New Zealand', 'Nicaragua', 'Niger', 'Nigeria', 'Norway', 'Pakistan', 'Panama', 'Papua New Guinea', 'Paraguay', 'Peru', 'Philippines', 'Poland', 'Portugal', 'Puerto Rico', 'Romania', 'Russian Federation', 'Rwanda', 'Saudi Arabia', 'Senegal', 'Serbia', 'Seychelles', 'Sierra Leone', 'Slovak Republic', 'Slovenia', 'South Africa', 'South Sudan', 'Spain', 'Sri Lanka', 'Sudan', 'Suriname', 'Sweden', 'Switzerland', 'Tajikistan', 'Tanzania', 'Thailand', 'Timor-Leste', 'Togo', 'Tunisia', 'Turkey', 'Turkmenistan', 'Uganda', 'Ukraine', 'United Kingdom', 'United States', 'Uruguay', 'Uzbekistan', 'Vanuatu', 'Venezuela, RB', 'Vietnam', 'Yemen, Rep.', 'Zambia', 'Zimbabwe']\n\n\n#countries=['United Kingdom', 'United States','Brazil','China','India','Argentina','Indonesia']\n\n\nnorm = colors.Normalize(vmin=1, vmax=2*len(countries))\nsm = cm.ScalarMappable(norm, cmap=cm.Paired)\ncnt = 1\n\nnames=[]\ngradients=[]\nintercepts=[]\nxtot=[]\nytot=[]\nymin=1960\nymax=2017\nymin=ymin-1960\nymax=ymax-1960\n\nfor country in countries:\n print(country)\n f=open('WB_data_clean.csv', 'r',encoding='latin-1')\n reader=csv.reader(f,delimiter=',')\n \n count=0\n c2=0\n c3=0\n c4=0\n \n nation=[]\n GDP=[] # GDP in constant 2010 $\n pop=[]\n urban=[] # urban population\n urbanr=[] # urban population (fraction)\n imports=[]\n exports=[]\n \n for row in reader:\n \n if (country==row[0] and row[2]=='Urban population'):\n for i in range(4,len(row)):\n urban.append( (row[i].replace(\",\",\"\")) )\n count+=1\n\n if (country==row[0] and row[2]=='Urban population (% of total)'): \n # variables = city, nation, population, GDP, Urbanized area,employment,patents,\n for i in range(4,len(row)):\n urbanr.append( (row[i].replace(\",\",\"\")) )\n\n if (country==row[0] and row[2]=='GDP (constant 2010 US$)'):\n for i in range(4,len(row)):\n GDP.append((row[i].replace(\",\",\"\")))\n c3+=1\n\n x=[]\n y=[]\n\n for i in range(len(urban)):\n if (urban[i] !='..' and GDP[i] !='..'): # if not empty.\n #print(urban[i],GDP[i])\n aux=float(urbanr[i]) # fraction of population in urban\n aux2 = float(urban[i])/float(urbanr[i])*100. # this is the total population.\n aux3=float(urban[i]) # total urban population\n #aux4=float(urban1m[i])\n pop.append(aux)\n# what is plotted:\n x.append(aux)\n y.append( float(GDP[i])/float(aux2)) # GDP per capita\n xlog=x #np.log10(x)\n ylog=np.log10(y)\n xx=xlog #[ymin:ymax]\n yy=ylog #-ylog[0] #[ymin:ymax] #-ylog[0] $ need to be careful with what years this is available for\n for i in range(len(xx)):\n xtot.append(xx[i])\n ytot.append(yy[i])\n\n print(\"There are,\",count,\"years of data\")\n if (len(xx)<70):\n edge_color, color = sm.to_rgba(cnt), sm.to_rgba(cnt+1)\n edge_color=color\n cnt += 2\n plt.plot(xx,yy,marker='o',ms=3,ls='None',c=color,markeredgecolor=edge_color,markeredgewidth=1,alpha=0.6,label=country)\n\n #print('Totals', len(xx), len(yy))\n gradient, intercept, r_value, var_gr, var_it = linreg(xx,yy)\n\n #print(country)\n #print(\"Gradient=\", gradient, \", 95 % CI = [\",gradient- 2.*np.sqrt(var_gr),\",\",gradient+2.*np.sqrt(var_gr),\"]\")\n #print(\"intercept=\", intercept, \", 95 % CI = [\",intercept- 2.*np.sqrt(var_it),\",\",intercept+2.*np.sqrt(var_it),\"]\")\n #print(\"R-squared\", r_value**2)\n names.append(country)\n gradients.append(gradient)\n intercepts.append(intercept)\n \n# show models and best fit\n tt=xx\n tt.sort()\n fitx=np.arange(float(tt[0])-0.25,float(tt[-1])+0.5,0.1,dtype=float)\n fity=intercept + gradient*fitx\n\n #plt.plot(fitx,fity,'r-', linewidth=2, alpha=0.5)\n f.close()\ngradient, intercept, r_value, var_gr, var_it = linreg(xtot,ytot)\nprint(\"Global Fit\")\nprint(\"Gradient=\", gradient, \", 95 % CI = [\",gradient- 2.*np.sqrt(var_gr),\",\",gradient+2.*np.sqrt(var_gr),\"]\")\nprint(\"intercept=\", intercept, \", 95 % CI = [\",intercept- 2.*np.sqrt(var_it),\",\",intercept+2.*np.sqrt(var_it),\"]\")\nprint(\"R-squared\", r_value**2)\ntt=xtot\ntt.sort()\nfitx=np.arange(float(tt[0])-0.25,float(tt[-1])+0.5,0.1,dtype=float)\nfity=intercept + gradient*fitx\nplt.plot(fitx,fity,'k-', linewidth=5, alpha=0.6)\n\nxdim = np.sum(plt.xlim())/2\nydim = np.min(plt.ylim())*1.01\n#plt.text(xdim, ydim, dt.datetime.now(), ha='right', va='baseline')\nplt.ylim(2,5.5)\n\nplt.ylabel('$\\log_{10} \\ g \\ $(2010)',fontsize=20)\nplt.xlabel('Percent Urban',fontsize=20)\n#plt.legend()\n#plt.show()\nplt.savefig('WB_Trajectories_Global_Fit.pdf', format='pdf')\n\n\n\n\n\n\n" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "numpy.log10", "matplotlib.cm.ScalarMappable", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fractalsproject/solaris
[ "ac1facb1daa661ddf6ab1ff13dba36ff88ef1c0f" ]
[ "solaris/nets/infer.py" ]
[ "import os\nimport skimage\nimport torch\nfrom warnings import warn\nfrom .model_io import get_model\nfrom .transform import process_aug_dict\nfrom .datagen import InferenceTiler\nfrom ..raster.image import stitch_images\nfrom ..utils.core import get_data_paths\n\n\nclass Inferer(object):\n \"\"\"Object for training `solaris` models using PyTorch or Keras.\"\"\"\n\n def __init__(self, config, custom_model_dict=None):\n self.config = config\n self.batch_size = self.config['batch_size']\n self.framework = self.config['nn_framework']\n self.model_name = self.config['model_name']\n # check if the model was trained as part of the same pipeline; if so,\n # use the output from that. If not, use the pre-trained model directly.\n print(\"Inferer config\", self.config)\n if self.config['train']:\n warn('Because the configuration specifies both training and '\n 'inference, solaris is switching the model weights path '\n 'to the training output path.')\n self.model_path = self.config['training']['model_dest_path']\n if custom_model_dict is not None:\n custom_model_dict['weight_path'] = self.config[\n 'training']['model_dest_path']\n else:\n self.model_path = self.config.get('model_path', None)\n self.model = get_model(self.model_name, self.framework,\n self.model_path, pretrained=True,\n custom_model_dict=custom_model_dict)\n self.window_step_x = self.config['inference'].get('window_step_size_x',\n None)\n self.window_step_y = self.config['inference'].get('window_step_size_y',\n None)\n if self.window_step_x is None:\n self.window_step_x = self.config['data_specs']['width']\n if self.window_step_y is None:\n self.window_step_y = self.config['data_specs']['height']\n self.stitching_method = self.config['inference'].get(\n 'stitching_method', 'average')\n self.output_dir = self.config['inference']['output_dir']\n if not os.path.isdir(self.output_dir):\n os.makedirs(self.output_dir)\n\n def __call__(self, infer_df=None):\n \"\"\"Run inference.\n\n Arguments\n ---------\n infer_df : :class:`pandas.DataFrame` or `str`\n A :class:`pandas.DataFrame` with a column, ``'image'``, specifying\n paths to images for inference. Alternatively, `infer_df` can be a\n path to a CSV file containing the same information. Defaults to\n ``None``, in which case the file path specified in the Inferer's\n configuration dict is used.\n\n \"\"\"\n\n if infer_df is None:\n infer_df = get_infer_df(self.config)\n\n inf_tiler = InferenceTiler(\n self.framework,\n width=self.config['data_specs']['width'],\n height=self.config['data_specs']['height'],\n x_step=self.window_step_x,\n y_step=self.window_step_y,\n augmentations=process_aug_dict(\n self.config['inference_augmentation'])\n )\n \n # check if final image was already processed...if so, assume the whole batch finished\n fin = len(infer_df['image'])\n im_path = infer_df['image'][fin-1] \n outpath = os.path.join(self.output_dir, os.path.split(im_path)[1])\n print(\"Checking for last %s\" % outpath )\n if os.path.exists(outpath):\n print(\"file exists %s. assuming entire batch finished.\" % outpath )\n return\n \n for idx, im_path in enumerate(infer_df['image']):\n print(\"processing %d/%d, %s\" % (idx,len(infer_df['image']), im_path ) )\n outpath = os.path.join(self.output_dir, os.path.split(im_path)[1])\n if os.path.exists(outpath):\n print(\"file exists %s\" % outpath )\n continue\n inf_input, idx_refs, (\n src_im_height, src_im_width) = inf_tiler(im_path)\n\n if self.framework == 'keras':\n subarr_preds = self.model.predict(inf_input,\n batch_size=self.batch_size)\n\n elif self.framework in ['torch', 'pytorch']:\n with torch.no_grad():\n self.model.eval()\n if torch.cuda.is_available():\n device = torch.device('cuda')\n self.model = self.model.cuda()\n else:\n device = torch.device('cpu')\n inf_input = torch.from_numpy(inf_input).float().to(device)\n # add additional input data, if applicable\n if self.config['data_specs'].get('additional_inputs',\n None) is not None:\n inf_input = [inf_input]\n for i in self.config['data_specs']['additional_inputs']:\n inf_input.append(\n infer_df[i].iloc[idx].to(device))\n\n subarr_preds = self.model(inf_input)\n subarr_preds = subarr_preds.cpu().data.numpy()\n stitched_result = stitch_images(subarr_preds,\n idx_refs=idx_refs,\n out_width=src_im_width,\n out_height=src_im_height,\n method=self.stitching_method)\n skimage.io.imsave(os.path.join(self.output_dir,\n os.path.split(im_path)[1]),\n stitched_result)\n\n\ndef get_infer_df(config):\n \"\"\"Get the inference df based on the contents of ``config`` .\n\n This function uses the logic described in the documentation for the config\n file to determine where to find images to be used for inference.\n See the docs and the comments in solaris/data/config_skeleton.yml for\n details.\n\n Arguments\n ---------\n config : dict\n The loaded configuration dict for model training and/or inference.\n\n Returns\n -------\n infer_df : :class:`dict`\n :class:`dict` containing at least one column: ``'image'`` . The values\n in this column correspond to the path to filenames to perform inference\n on.\n \"\"\"\n\n infer_df = get_data_paths(config['inference_data_csv'], infer=True)\n return infer_df\n" ]
[ [ "torch.device", "torch.no_grad", "torch.from_numpy", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sarahmish/Cardea
[ "85c4246c12178e6d1b9cc12eb39c264f3c20f3e9", "85c4246c12178e6d1b9cc12eb39c264f3c20f3e9", "85c4246c12178e6d1b9cc12eb39c264f3c20f3e9" ]
[ "tests/cardea/fhir/test_fhirbase.py", "tests/cardea/problem_definition/test_readmission.py", "tests/cardea/modeling/test_modeler.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport pytest\n\nfrom cardea.fhir import Patient\n\n\[email protected]()\ndef patient_df():\n return pd.DataFrame({\"identifier\": [0, 1, 2, 3],\n \"gender\": ['female', 'female', 'male', 'female'],\n \"birthDate\": ['10/21/2000', '7/2/2000', '1/10/2000', '9/16/2000'],\n \"active\": ['True', 'True', 'False', 'False']})\n\n\[email protected]()\ndef patient_object(patient_df):\n object_values = patient_df.to_dict('list')\n return Patient(object_values)\n\n\[email protected]()\ndef patient_object_df(patient_object):\n return patient_object.get_dataframe()\n\n\ndef test_object_number_of_attributes(patient_object_df, patient_df):\n assert len(patient_object_df.columns) == len(patient_df.columns)\n\n\ndef test_object_number_of_tuples(patient_object_df, patient_df):\n assert len(patient_object_df) == len(patient_df)\n\n\ndef test_get_id(patient_object):\n assert patient_object.get_id() == 'identifier'\n\n\ndef test_get_relationships(patient_object):\n relationships = patient_object.get_relationships()\n assert len(relationships) == 12\n\n\ndef test_get_eligible_relationships(patient_object):\n elig_relationships = patient_object.get_eligible_relationships()\n assert len(elig_relationships) == 1\n\n\ndef test_get_id_lookup_error(patient_df):\n df = patient_df[['gender', 'birthDate']]\n object_values = df.to_dict('list')\n object = Patient(object_values)\n with pytest.raises(LookupError):\n object.get_id()\n\n\ndef test_assert_type_enum():\n df = pd.DataFrame({\"identifier\": [0, 1], \"gender\": ['female', 'F']}) # F should be female\n object_values = df.to_dict('list')\n with pytest.raises(ValueError):\n Patient(object_values)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport featuretools as ft\nimport pandas as pd\nimport pytest\nfrom numpy import nan\n\nfrom cardea.data_loader import EntitySetLoader\nfrom cardea.problem_definition import Readmission\n\n\[email protected]()\ndef readmission():\n return Readmission()\n\n\[email protected]()\ndef es_loader():\n return EntitySetLoader()\n\n\[email protected]()\ndef cutoff_times():\n temp = pd.DataFrame({\"instance_id\": [10, 11, 12, 13, 14, 15],\n \"time\": ['9/22/2018', '9/21/2018', '10/4/2018',\n '9/28/2018', '10/30/2018', '11/18/2018'],\n \"label\": [False, False, False, True, False, True]\n })\n temp['time'] = pd.to_datetime(temp['time'])\n return temp\n\n\[email protected]()\ndef objects(es_loader):\n\n encounter_df = pd.DataFrame({\"identifier\": [10, 11, 12, 13, 14, 15],\n \"subject\": [0, 1, 2, 0, 0, 0],\n \"period\": [120, 121, 122, 125, 123, 124],\n \"length\": [2, 1, 7, 0, 0, 0]})\n\n period_df = pd.DataFrame({\"object_id\": [120, 121, 122, 125, 123, 124],\n \"start\": ['9/20/2018', '9/20/2018', '9/27/2018',\n '9/28/2018', '10/30/2018', '11/18/2018'],\n \"end\": ['9/22/2018', '9/21/2018', '10/4/2018',\n '9/28/2018', '10/30/2018', '11/18/2018']\n })\n\n duration_df = pd.DataFrame({\"object_id\": [0, 2, 1, 7]})\n\n patient_df = pd.DataFrame({\"identifier\": [0, 1, 2],\n \"gender\": ['female', 'female', 'male'],\n \"birthDate\": ['10/21/2000', '7/2/2000', '1/10/2000'],\n \"active\": ['True', 'True', 'nan']})\n\n encounter = es_loader.create_object(encounter_df, 'Encounter')\n period = es_loader.create_object(period_df, 'Period')\n patient = es_loader.create_object(patient_df, 'Patient')\n duration = es_loader.create_object(duration_df, 'Duration')\n\n return [encounter, period, patient, duration]\n\n\[email protected]()\ndef objects_fail(es_loader):\n\n encounter_df = pd.DataFrame({\"identifier\": [10, 11, 12],\n \"subject\": [0, 1, 2],\n \"period\": [120, 121, 122]})\n\n period_df = pd.DataFrame({\"object_id\": [120, 121, 122],\n \"start\": ['9/18/2018', '9/19/2018', '9/20/2018'],\n \"end\": ['9/20/2018', '9/20/2018', '9/27/2018']})\n\n patient_df = pd.DataFrame({\"identifier\": [0, 1, 2],\n \"gender\": ['female', 'female', 'male'],\n \"birthDate\": ['10/21/2000', '7/2/2000', '1/10/2000'],\n \"active\": ['True', 'True', 'nan']})\n\n encounter = es_loader.create_object(encounter_df, 'Encounter')\n period = es_loader.create_object(period_df, 'Period')\n patient = es_loader.create_object(patient_df, 'Patient')\n\n return [encounter, period, patient]\n\n\[email protected]()\ndef objects_missing_generation_table(es_loader):\n\n encounter_df = pd.DataFrame({\"identifier\": [10, 11, 12, 13, 14, 15],\n \"subject\": [0, 1, 2, 0, 0, 0],\n \"length\": [2, 1, 7, 0, 0, 0]})\n\n duration_df = pd.DataFrame({\"object_id\": [0, 2, 1, 7]})\n\n patient_df = pd.DataFrame({\"identifier\": [0, 1, 2],\n \"gender\": ['female', 'female', 'male'],\n \"birthDate\": ['10/21/2000', '7/2/2000', '1/10/2000'],\n \"active\": ['True', 'True', 'nan']})\n\n encounter = es_loader.create_object(encounter_df, 'Encounter')\n patient = es_loader.create_object(patient_df, 'Patient')\n duration = es_loader.create_object(duration_df, 'Duration')\n\n return [encounter, patient, duration]\n\n\[email protected]()\ndef objects_missing_generation_value(es_loader):\n\n encounter_df = pd.DataFrame({\"identifier\": [10, 11, 12],\n \"subject\": [0, 1, 2],\n \"period\": [120, 121, 122]})\n\n period_df = pd.DataFrame({\"object_id\": [120, 121, 122],\n \"start\": ['9/18/2018', '9/19/2018', '9/20/2018'],\n \"end\": ['9/18/2018', '9/19/2018', nan]})\n\n patient_df = pd.DataFrame({\"identifier\": [0, 1, 2],\n \"gender\": ['female', 'female', 'male'],\n \"birthDate\": ['10/21/2000', '7/2/2000', '1/10/2000'],\n \"active\": ['True', 'True', 'nan']})\n\n encounter = es_loader.create_object(encounter_df, 'Encounter')\n patient = es_loader.create_object(patient_df, 'Patient')\n period = es_loader.create_object(period_df, 'Period')\n\n return [encounter, patient, period]\n\n\[email protected]()\ndef relationships():\n return[('Encounter', 'period', 'Period', 'object_id'),\n ('Encounter', 'subject', 'Patient', 'identifier'),\n ('Encounter', 'length', 'Duration', 'object_id')]\n\n\[email protected]()\ndef entityset_success(objects, es_loader):\n es = ft.EntitySet(id=\"test\")\n\n identifiers = es_loader.get_object_ids(objects)\n\n fhir_dict = es_loader.get_dataframes(objects)\n es_loader.create_entity(fhir_dict, identifiers, entity_set=es)\n\n relationships = es_loader.get_relationships(objects, list(fhir_dict.keys()))\n es_loader.create_relationships(relationships, entity_set=es)\n\n return es\n\n\[email protected]()\ndef entityset_fail_missing_generation_value(objects_missing_generation_value, es_loader):\n es = ft.EntitySet(id=\"test\")\n\n identifiers = es_loader.get_object_ids(objects_missing_generation_value)\n\n fhir_dict = es_loader.get_dataframes(objects_missing_generation_value)\n es_loader.create_entity(fhir_dict, identifiers, entity_set=es)\n\n relationships = es_loader.get_relationships(\n objects_missing_generation_value, list(fhir_dict.keys()))\n es_loader.create_relationships(relationships, entity_set=es)\n\n return es\n\n\[email protected]()\ndef entityset_fail_missing_generation_table(objects_missing_generation_table, es_loader):\n es = ft.EntitySet(id=\"test\")\n\n identifiers = es_loader.get_object_ids(objects_missing_generation_table)\n\n fhir_dict = es_loader.get_dataframes(objects_missing_generation_table)\n es_loader.create_entity(fhir_dict, identifiers, entity_set=es)\n\n relationships = es_loader.get_relationships(\n objects_missing_generation_table, list(fhir_dict.keys()))\n es_loader.create_relationships(relationships, entity_set=es)\n return es\n\n\[email protected]()\ndef entityset_fail(objects_fail, es_loader):\n es = ft.EntitySet(id=\"test\")\n\n identifiers = es_loader.get_object_ids(objects_fail)\n\n fhir_dict = es_loader.get_dataframes(objects_fail)\n es_loader.create_entity(fhir_dict, identifiers, entity_set=es)\n\n relationships = es_loader.get_relationships(objects_fail, list(fhir_dict.keys()))\n es_loader.create_relationships(relationships, entity_set=es)\n return es\n\n\ndef test_generate_cutoff_times_success(entityset_success, readmission, cutoff_times):\n _, _, generated_df = readmission.generate_cutoff_times(\n entityset_success)\n generated_df.index = cutoff_times.index # both should have the same index\n generated_df = generated_df[cutoff_times.columns] # same columns order\n assert generated_df.equals(cutoff_times)\n\n\ndef test_generate_labels_success(entityset_success, readmission, cutoff_times):\n es, _, generated_df = readmission.generate_cutoff_times(\n entityset_success)\n generated_df.index = cutoff_times.index # both should have the same index\n\n labels = list(generated_df['label'])\n\n assert labels == [False, False, False, True, False, True]\n\n\ndef test_generate_labels_success_threshold(entityset_success, cutoff_times):\n\n es, _, generated_df = Readmission(6).generate_cutoff_times(\n entityset_success)\n generated_df.index = cutoff_times.index # both should have the same index\n\n labels = list(generated_df['label'])\n assert labels == [False, False, False, True, False, False]\n\n\ndef test_generate_cutoff_times_missing_generation_label(entityset_success, readmission):\n entityset_success['Period'].delete_variables(['end'])\n with pytest.raises(ValueError):\n readmission.generate_cutoff_times(\n entityset_success)\n\n\ndef test_generate_label_with_missing_values(entityset_fail_missing_generation_value, readmission):\n with pytest.raises(ValueError):\n readmission.generate_cutoff_times(entityset_fail_missing_generation_value)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pytest\nfrom mlblocks import MLPipeline\nfrom sklearn.datasets import load_iris\n\nfrom cardea.modeling.modeler import Modeler\n\n\[email protected]()\ndef get_pipeline():\n pipeline = MLPipeline(['sklearn.preprocessing.MinMaxScaler',\n 'sklearn.ensemble.RandomForestClassifier'])\n return pipeline\n\n\[email protected]()\ndef get_model(get_pipeline):\n model = Modeler(get_pipeline, problem_type='classification')\n return model\n\n\[email protected]()\ndef get_data():\n return load_iris(return_X_y=True)\n\n\ndef test_k_fold_validation(get_model, get_data):\n X, y = get_data\n score = get_model.k_fold_validation(hyperparameters=None, X=X, y=y)\n assert 1 >= score > 0\n\n\ndef test_tune(get_model, get_data):\n X, y = get_data\n get_model.tune(X, y, max_evals=10, scoring='F1 Macro', verbose=False)\n\n\ndef test_evaluate_without_tuning(get_model, get_data):\n X, y = get_data\n scores = get_model.evaluate(X, y, tune=False)\n assert isinstance(scores, dict) and len(scores) > 0\n\n\ndef test_evaluate_with_tuning(get_model, get_data):\n X, y = get_data\n scores = get_model.evaluate(X, y, tune=True)\n assert isinstance(scores, dict) and len(scores) > 0\n\n\ndef test_fit_predict(get_model, get_data):\n X, y = get_data\n X_train, X_test, y_train, y_test = Modeler.train_test_split(X, y)\n model = get_model\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n assert y_pred.shape == y_test.shape\n\n\ndef test_regression_metrics_property(get_model):\n regression_metrics = get_model.regression_metrics\n assert isinstance(regression_metrics, dict) and len(regression_metrics) > 0\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.to_datetime", "pandas.DataFrame" ], [ "sklearn.datasets.load_iris" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yoavfreund/DAPPER
[ "c2fa5cc446a2b22a1efc174afc7e091363c9375d", "c2fa5cc446a2b22a1efc174afc7e091363c9375d", "c2fa5cc446a2b22a1efc174afc7e091363c9375d" ]
[ "examples/param_estim.py", "dapper/mods/LA/evensen2009.py", "dapper/mods/LorenzUV/wilks05.py" ]
[ "# # Illustrate usage of DAPPER to do parameter estimation.\n#\n# In DA terminology, \"parameters\" (in contrast to \"state\") are\n# (unknown) variables that (generally) do not change in time.\n# Our approach (to parameter estimation) is the typical one in DA of augmenting\n# (concatenating) the state vector with the parameter (vector).\n#\n# This particular experiment is a reproduction of section 3.3 of\n# `bib.bocquet2013joint`. Briefly: the unknown parameter is the forcing used in\n# Lorenz-96; only the state is observed; both parameter and state get estimated.\n#\n# This example builds mostly on `examples/basic_2.py`. For brevity, it does not\n# contain some of the facilities of `examples/basic_3.py` to run and analyse a\n# larger numbers of experiments.\n#\n\n# #### Imports\n\n# %matplotlib notebook\nimport numpy as np\nfrom mpl_tools import is_notebook_or_qt as nb\n\nimport dapper as dpr\nimport dapper.da_methods as da\nimport dapper.mods as modelling\nimport dapper.mods.Lorenz96 as core\nimport dapper.tools.liveplotting as LP\n\n# #### Augmented dynamics\n\n# Dims of state and parameter\nNx = 40\nNp = 1\n\n\n# Wrap core.dxdt so as to process an \"augmented\" state vector\n# that also contains parameter values.\[email protected]_compatible\ndef dxdt_augmented(x):\n d = np.full_like(x, np.nan) # Allocate. Use nan's to detect bugs.\n core.Force = x[Nx:].T # Set param\n d[:Nx] = core.dxdt(x[:Nx].T).T # Dynamics\n d[Nx:] = 0 # No dynamics (\"persistence\") for param.\n return d\n\n\n# Note that the Force is a module-level variable,\n# that gets set repeatedly during the course of the assimilation.\n# This seems like it could easily lead to bugs.\n# A more robust approach would be if the Force was part of the\n# input arguments of `core.dxdt`. This could be achieved, for example, by\n# - re-writing core.dxdt so to process the augmented vector\n# `[x, Force]` as the state variable.\n# - enclosing `dxdt` in an outer function which binds the value of the Force\n# - creating `dxdt` in an outer \"factory\" function which determines the\n# shape and allocation of the input vector to dxdt.\n# - Do the latter using OOP. This would probably be more verbose,\n# but even more flexible. In particular, OOP excels at working with multiple\n# realisations of models at the same time. However, when using ensemble methods,\n# the number of realisations, and the required level of automation\n# (input vector/ensemble --> output vector/ensemble) is very high.\n# It is not immediately clear if OOP is then more convenient.\n# - There are also some intriguing possibilities relating to namedtuples.\n# TODO 4: revise the above text.\n\n\n# Turn dxdt into `step` such that `x_{k+1} = step(x_k, t, dt)`\nstep = modelling.with_rk4(dxdt_augmented, autonom=True)\n\n\n# #### HMM\n\n# Define the sequence of the experiment\n# See `modelling.Chronology` for more details.\ntseq = modelling.Chronology(\n dt=0.05, # Integrational time step\n dko=1, # Steps of duration dt between obs\n Ko=10**3, # Total number of obs in experiment\n BurnIn=5, # Omit from averages the period t=0 --> BurnIn\n Tplot=7) # Default plot length\n\n# Define dynamical model\nDyn = {\n 'M': Nx+Np, # Length of (total/augmented) state vector\n 'model': step, # Actual model\n 'noise': 0, # Additive noise (variance)\n # 'noise': GaussRV(C=.1*np.eye(Nx+Np)),\n}\n\n# Define observation model using convenience function partial_Id_Obs\njj = np.arange(Nx) # obs indices (y = x[jj])\nObs = modelling.partial_Id_Obs(Nx+Np, jj)\nObs['noise'] = 1\n\n# Specify liveplotting (and replay) functionality.\nLP = [\n (1, LP.spatial1d(jj)),\n (1, LP.sliding_marginals(\n jj, zoomy=0.8, dims=[0, Nx], labels=[\"$x_0$\", \"Force\"]),\n ),\n]\n\n# Labels for sectors of state vector.\n# DAPPER will compute diagnostic statistics for the full state vector,\n# but also for each sector of it (averaged in space according to the\n# methods specified in your .dpr_config.yaml:field_summaries key).\n# The name \"sector\" comes from its typical usage to distinguish\n# \"ocean\" and \"land\" parts of the state vector.\n# Here we use it to get individual statistics of the parameter and state.\nparts = dict(state=np.arange(Nx),\n param=np.arange(Np)+Nx)\n\n# Wrap-up model specification\nHMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, sectors=parts, LP=LP)\n\n\n# #### Treat truth and DA methods differently\n\n# Bocquet et al. do not sample the true parameter value from the\n# Bayesian (random) prior / initial cond's (ICs), given to the DA methods.\n# Instead it is simply set to 8.\n\nTRUTH = 8\nGUESS = 7\n\n# Seeing how far off the intial guess (and its uncertainty, defined below)\n# is from the truth, this constitutes a kind of model error.\n# It is not a feature required to make this experiment interesting.\n# However, our goal here is to reproduce the results of Bocquet et al.,\n# so we will follow suit.\n#\n# PS: It often doesn't matter (for the time-averaged statistics)\n# what exact ICs are in play as long as the BurnIn is sufficiently large.\n# However, the setup defined here does make for pretty plots\n# at the beginning of the experiment.\n\n\n# Let's define the prior/ICs as a Gaussian with diagonal covariance,\n# where the last part of the diagonal (corresponding to the parm.\n# component of the state vector) may be specified as an argument...\ndef X0(param_mean, param_var):\n # State\n x0 = np.zeros(Nx)\n C0 = .01*np.ones(Nx)\n # Append param params\n x0 = np.hstack([x0, param_mean*np.ones(Np)])\n C0 = np.hstack([C0, param_var*np.ones(Np)])\n return modelling.GaussRV(x0, C0)\n\n\n# ... Thus we can distinguish between the behaviour of the truth simulation,\n# and the dynamics for the DA methods.\ndef set_X0_and_simulate(hmm, xp):\n dpr.set_seed(3000)\n hmm.X0 = X0(TRUTH, 0)\n xx, yy = hmm.simulate()\n hmm.X0 = X0(GUESS, 0.1**2)\n return hmm, xx, yy\n\n# Note: An alternative approach might be to simply\n# write our own `simulate()` which merely sets the `Force` parameter,\n# rather than sampling it.\n\n\n# #### DA methods configurations\n\nxps = dpr.xpList()\n# xps += da.PartFilt(N=1000, reg=1) # works OK with Nx=6\nfor N in [20, 50]:\n # xps += da.EnKF('Sqrt', N, infl=1.04)\n xps += da.EnKF_N(N, xN=2)\n for Lag in [1, 4, 20]:\n xps += da.iEnKS(\"Sqrt\", N, Lag=Lag, xN=2, wtol=1e-5)\n# TODO 4: Add 3D- and 4D-Var\n\n\n# #### Launch experiments\n\nscriptname = \"basic_3\" if nb else __file__\nsave_as = xps.launch(\n HMM, scriptname, setup=set_X0_and_simulate,\n mp=False, # Multiprocessing\n fail_gently=False, # Facilitate debugging\n liveplots=False, # NB: Turn off if running iEnKS\n free=False, # Don't delete time series (enables replay)\n)\n\n\n# #### Show results\n\n# Load data\nxps = dpr.xpList(dpr.load_xps(save_as))\n\n\n# These scores may be validated by cross-checking with those\n# reported by bib.bocquet2013joint in their ...\nprint(xps.tabulate_avrgs([\n \"rmse.state.a\", \"rmv.state.a\", # ... figure 6, and\n \"rmse.param.a\", \"rmv.param.a\", # ... figure 7.\n]))\n\n# Note that only the data points at `Lag` (data assimilation window length) 0 and\n# 1 are reproduced by DAPPER, because the IEnKS in DAPPER does not have MDA\n# (in the same sense as bib.bocquet2013joint), which applies for `Lag>=2`.\n# Nevertheless, DAPPER's IEnKS accuracy also improves (both for state and param)\n# with increasing Lag. However, `Lag=100` is too much (causes divergence)\n# without the MDA component of by Bocquet et al.\n\n# Plots\nxps[-1].stats.replay(\n # t1=0,\n # t2=np.inf,\n)\n\n# #### Excercise:\n# Change this script so as to use Np == Nx parameters\n# (one for each state variable).\n", "\"\"\"A mix of `bib.evensen2009ensemble` and `bib.sakov2008implications`.\n\n.. note::\n Since there is no noise, and the system is stable,\n the rmse's from this HMM go to zero as `T` goes to infinity.\n Thus, benchmarks largely depend on the initial error,\n and so these absolute rmse values are not so useful\n for quantatative evaluation of DA methods.\n For that purpose, see `dapper.mods.LA.raanes2015` instead.\n\"\"\"\n\nimport numpy as np\n\nimport dapper.mods as modelling\nfrom dapper.mods.LA import Fmat, sinusoidal_sample\nfrom dapper.mods.Lorenz96 import LPs\n\nNx = 1000\nNy = 4\njj = modelling.linspace_int(Nx, Ny)\n\ntseq = modelling.Chronology(dt=1, dko=5, T=300, BurnIn=-1, Tplot=100)\n\n# WITHOUT explicit matrix (assumes dt == dx/c):\n# step = lambda x,t,dt: np.roll(x,1,axis=x.ndim-1)\n# WITH:\nFm = Fmat(Nx, c=-1, dx=1, dt=tseq.dt)\n\n\ndef step(x, t, dt):\n assert dt == tseq.dt\n return x @ Fm.T\n\n\nDyn = {\n 'M': Nx,\n 'model': step,\n 'linear': lambda x, t, dt: Fm,\n 'noise': 0,\n}\n\n# In the animation, it can sometimes/somewhat occur\n# that the truth is outside 3*sigma !!!\n# Yet this is not so implausible because sinusoidal_sample()\n# yields (multivariate) uniform (random numbers) -- not Gaussian.\nwnum = 25\na = np.sqrt(5)/10\nX0 = modelling.RV(M=Nx, func = lambda N: a*sinusoidal_sample(Nx, wnum, N))\n\nObs = modelling.partial_Id_Obs(Nx, jj)\nObs['noise'] = 0.01\n\nHMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0, LP=LPs(jj))\n\n\n####################\n# Suggested tuning\n####################\n# xp = EnKF('PertObs',N=100,infl=1.02)\n", "\"\"\"Uses `nU`, `J`, `F` as in `dapper.mods.LorenzUV` ie. from `bib.wilks2005effects`.\n\nObs settings taken from different places (=> quasi-linear regime).\n\"\"\"\n\nimport numpy as np\n\nimport dapper.mods as modelling\nfrom dapper.mods.LorenzUV import model_instance\n\nfrom ..utils import rel2mods\n\nLUV = model_instance()\nnU = LUV.nU\n\n# Wilks2005 uses dt=1e-4 with RK4 for the full model,\n# and dt=5e-3 with RK2 for the forecast/truncated model.\n# As berry2014linear notes, this is possible coz\n# \"numerical stiffness disappears when fast processes are removed\".\n\n################\n# Full\n################\n\n# tseq = modelling.Chronology(dt=0.001,dto=0.05,T=4**3,BurnIn=6) # allows using rk2\ntseq = modelling.Chronology(dt=0.005, dto=0.05, T=4**3, BurnIn=6) # requires rk4\n\n\nDyn = {\n 'M': LUV.M,\n 'model': modelling.with_rk4(LUV.dxdt, autonom=True),\n 'noise': 0,\n 'linear': LUV.dstep_dx,\n}\n\nX0 = modelling.GaussRV(mu=LUV.x0, C=0.01)\n\nR = 0.1\njj = np.arange(nU)\nObs = modelling.partial_Id_Obs(LUV.M, jj)\nObs['noise'] = R\n\nother = {'name': rel2mods(__file__)+'_full'}\nHMM_full = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0, LP=LUV.LPs(jj), **other)\n\n\n################\n# Truncated\n################\n\n# Just change dt from 005 to 05\ntseq = modelling.Chronology(dt=0.05, dto=0.05, T=4**3, BurnIn=6)\n\nDyn = {\n 'M': nU,\n 'model': modelling.with_rk4(LUV.dxdt_parameterized),\n 'noise': 0,\n}\n\nX0 = modelling.GaussRV(mu=LUV.x0[:nU], C=0.01)\n\njj = np.arange(nU)\nObs = modelling.partial_Id_Obs(nU, jj)\nObs['noise'] = R\n\nother = {'name': rel2mods(__file__)+'_trunc'}\nHMM_trunc = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0, LP=LUV.LPs(jj), **other)\n\nLUV.prmzt = lambda x, t: polynom_prmzt(x, t, 1)\n\n\ndef polynom_prmzt(x, t, order):\n \"\"\"\n Polynomial (deterministic) parameterization of fast variables (Y).\n\n NB: Only valid for system settings of Wilks'2005.\n\n Note: In order to observe an improvement in DA performance w\n higher orders, the EnKF must be reasonably tuned with\n There is very little improvement gained above order=1.\n \"\"\"\n if order == 4:\n # From Wilks\n d = 0.262 + 1.45*x - 0.0121*x**2 - 0.00713*x**3 + 0.000296*x**4\n elif order == 3:\n # From Arnold\n d = 0.341 + 1.30*x - 0.0136*x**2 - 0.00235*x**3\n elif order == 1:\n # From me -- see AdInf/illust_parameterizations.py\n d = 0.74 + 0.82*x\n elif order == 0:\n # From me -- see AdInf/illust_parameterizations.py\n d = 3.82\n elif order == -1:\n # Leave as dxdt_trunc\n d = 0\n else:\n raise NotImplementedError\n return d\n\n\n####################\n# Suggested tuning\n####################\n# Using HMM_full # Expected rmse.a:\n# xps += Climatology() # 0.93\n# xps += Var3D(xB=2.0) # 0.39\n# xps += EnKF_N(N=20) # 0.27\n" ]
[ [ "numpy.full_like", "numpy.arange", "numpy.zeros", "numpy.ones" ], [ "numpy.sqrt" ], [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yifu-yang/ShiningLight
[ "9bd756807c537a2fc0ef2bc348215d14c4499880" ]
[ "shininglight/face_alignment.py" ]
[ "#coding=utf-8\nimport face_comm\nimport face_detect\nimport cv2\nimport numpy as np\nimport os\nimport time\nimport random\n\nclass Alignment:\n def align_face(self,opic,faceKeyPoint):\n img = cv2.imread(opic)\n faceKeyPoint = faceKeyPoint[0]\n\n #根据两个鼻子和眼睛进行3点对齐\n eye1 = faceKeyPoint[0]\n eye2 = faceKeyPoint[1]\n noise = faceKeyPoint[2]\n source_point = np.array(\n [eye1, eye2, noise], dtype=np.float32\n )\n\n eye1_noraml= [int(x) for x in face_comm.get_conf('alignment','left_eye').split(',')]\n eye2_noraml=[int(x) for x in face_comm.get_conf('alignment','right_eye').split(',')]\n noise_normal=[int(x) for x in face_comm.get_conf('alignment','noise').split(',')]\n #设置的人脸标准模型\n\n dst_point = np.array(\n [eye1_noraml,\n eye2_noraml,\n noise_normal],\n dtype=np.float32)\n\n tranform = cv2.getAffineTransform(source_point, dst_point)\n\n imagesize=tuple([int(x) for x in face_comm.get_conf('alignment','imgsize').split(',')])\n img_new = cv2.warpAffine(img, tranform, imagesize)\n new_image= os.path.abspath(face_comm.get_conf('alignment','aligment_face_dir'))\n new_image= new_image+'/'+'%d_%d.png'%(time.time(),random.randint(0,100))\n if cv2.imwrite(new_image, img_new):\n return new_image\n return None\n\nif __name__=='__main__':\n pic='/Users/chenlinzhong/Downloads/laji.png'\n detect = face_detect.Detect()\n result = detect.detect_face(pic)\n if len(result['boxes']):\n align = Alignment()\n print ('align face: '+ align.align_face(pic,result['face_key_point']))\n else:\n print ('not found face')" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
automl/nas-bench-x11
[ "ebf64ce3c30cc2ad0909508b5e25652011179956" ]
[ "naslib/optimizers/discrete/ls_svr/optimizer.py" ]
[ "import collections\nimport logging\nimport torch\nimport copy\nimport random\nimport numpy as np\n\nfrom sklearn.svm import NuSVR\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.ensemble import RandomForestRegressor\nimport time\nfrom sklearn.model_selection import cross_val_score, train_test_split\nfrom scipy import stats\n\nfrom naslib.optimizers.core.metaclasses import MetaOptimizer\n\nfrom naslib.search_spaces.core.query_metrics import Metric\nfrom naslib.search_spaces.nasbench201.graph import NasBench201SearchSpace\n\nfrom naslib.utils.utils import AttrDict, count_parameters_in_MB\nfrom naslib.utils.logging import log_every_n_seconds\n\nlogger = logging.getLogger(__name__)\n\ndef loguniform(low=0, high=1, size=None):\n return np.exp(np.random.uniform(np.log(low), np.log(high), size))\n\nclass LS_SVR(MetaOptimizer):\n \n # training the models is not implemented\n using_step_function = False\n def __init__(self, config,\n metric=Metric.VAL_ACCURACY,\n all_curve=True,\n model_name='svr',\n best_hyper=None,\n n_hypers=1000):\n\n super().__init__()\n self.n_hypers = n_hypers\n self.all_curve = all_curve\n self.model_name = model_name\n self.best_hyper = best_hyper\n self.name = 'ls-svr'\n self.metric=metric\n self.info = []\n self.y_train = []\n self.fidelity = config.search.single_fidelity\n if config.search_space == 'nasbench101':\n self.extrapolation = config.search.fidelity\n self.top_n_percent = 0.2\n elif config.search_space in ['nasbench201', 'nasbench211']:\n self.extrapolation = config.search.fidelity // 2\n self.top_n_percent = 0.5\n elif config.search_space == 'darts':\n self.extrapolation = config.search.fidelity // 2\n self.top_n_percent = 0.2\n elif config.search_space == 'nlp':\n self.extrapolation = config.search.fidelity\n self.top_n_percent = 0.2\n else:\n raise NotImplementedError('{} is not yet implemented yet'.format(config.search_space))\n\n self.train_svr = True\n\n self.config = config\n self.epochs = config.search.epochs\n \n self.performance_metric = metric\n self.dataset = config.dataset\n \n self.num_init = config.search.num_init\n self.nbhd = []\n self.chosen = None\n self.best_arch = None\n \n self.history = torch.nn.ModuleList()\n\n\n def adapt_search_space(self, search_space, scope=None, dataset_api=None):\n assert search_space.QUERYABLE, \"Local search is currently only implemented for benchmarks.\"\n self.search_space = search_space.clone()\n self.scope = scope if scope else search_space.OPTIMIZER_SCOPE\n self.dataset_api = dataset_api\n\n\n def collate_inputs(self, VC_all_archs_list, AP_all_archs_list):\n \"\"\"\n Args:\n VC_all_archs_list: a list of validation accuracy curves for all archs\n AP_all_archs_list: a list of architecture features for all archs\n\n Returns:\n X: an collated array of all input information used for extrapolation model\n\n \"\"\"\n VC = np.vstack(VC_all_archs_list) # dimension: n_archs x n_epochs\n DVC = np.diff(VC, n=1, axis=1)\n DDVC = np.diff(DVC, n=1, axis=1)\n\n mVC = np.mean(VC, axis=1)[:, None]\n stdVC = np.std(VC, axis=1)[:, None]\n mDVC = np.mean(DVC, axis=1)[:, None]\n stdDVC = np.std(DVC, axis=1)[:, None]\n mDDVC = np.mean(DDVC, axis=1)[:, None]\n stdDDVC = np.std(DDVC, axis=1)[:, None]\n\n if self.all_curve:\n TS_list = [VC, DVC, DDVC, mVC, stdVC]\n else:\n TS_list = [mVC, stdVC, mDVC, stdDVC, mDDVC, stdDDVC]\n\n if self.metric == Metric.TRAIN_LOSS:\n sumVC = np.sum(VC, axis=1)[:, None]\n TS_list += [sumVC]\n\n TS = np.hstack(TS_list)\n\n if len(AP_all_archs_list) != 0:\n AP = np.vstack(AP_all_archs_list)\n X = np.hstack([AP, TS])\n else:\n X = TS\n\n return X\n\n def get_data_reqs(self):\n \"\"\"\n Returns a dictionary with info about whether the predictor needs\n extra info to train/query.\n \"\"\"\n reqs = {'requires_partial_lc':True,\n 'metric':self.metric,\n 'requires_hyperparameters':True,\n 'hyperparams':['flops', 'latency', 'params']\n }\n return reqs\n\n def prepare_data(self, info):\n # todo: this can be added at the top of collate_inputs\n val_acc_curve = []\n arch_params = []\n\n for i in range(len(info)):\n acc_metric = info[i]\n val_acc_curve.append(acc_metric)\n return self.collate_inputs(val_acc_curve, arch_params)\n\n def fit(self, ytrain, info, learn_hyper=True):\n\n # prepare training data\n xtrain_data = self.prepare_data(info) # dimension: n_archs x n_epochs\n y_train = np.array(ytrain)\n\n # learn hyperparameters of the extrapolator by cross validation\n if self.best_hyper is None or learn_hyper:\n # specify model hyper-parameters\n if self.model_name == 'svr':\n C = loguniform(1e-5, 10, self.n_hypers)\n nu = np.random.uniform(0, 1, self.n_hypers)\n gamma = loguniform(1e-5, 10, self.n_hypers)\n hyper = np.vstack([C, nu, gamma]).T\n elif self.model_name == 'blr':\n alpha_1 = np.random.uniform(1e-7, 1e-5, self.n_hypers)\n alpha_2 = np.random.uniform(1e-7, 1e-5, self.n_hypers)\n lambda_1 = np.random.uniform(1e-7, 1e-5, self.n_hypers)\n lambda_2 = np.random.uniform(1e-7, 1e-5, self.n_hypers)\n hyper = np.vstack([alpha_1, alpha_2, lambda_1, lambda_2]).T\n elif self.model_name == 'rf':\n n_trees = np.random.randint(10, 800, self.n_hypers)\n frac_feature = np.random.uniform(0.1, 0.5, self.n_hypers)\n hyper = np.vstack([n_trees, frac_feature]).T\n\n print(f'start CV on {self.model_name}')\n mean_score_list = []\n t_start = time.time()\n for i in range(self.n_hypers):\n # define model\n if self.model_name == 'svr':\n model = NuSVR(C=hyper[i, 0], nu=hyper[i, 1], gamma=hyper[i, 2], kernel='rbf')\n # model = SVR(C=hyper[i, 0], nu=hyper[i, 1], gamma= ,kernel='linear')\n elif self.model_name == 'blr':\n model = BayesianRidge(alpha_1=hyper[i, 0], alpha_2=hyper[i, 1],\n lambda_1=hyper[i, 2], lambda_2=hyper[i, 3])\n elif self.model_name == 'rf':\n model = RandomForestRegressor(n_estimators=int(hyper[i, 0]), max_features=hyper[i, 1])\n # perform cross validation to learn the best hyper value\n scores = cross_val_score(model, xtrain_data, y_train, cv=3)\n mean_scores = np.mean(scores)\n mean_score_list.append(mean_scores)\n # print(f'hper={hyper[i]}, score={mean_scores}')\n t_end = time.time()\n best_hyper_idx = np.argmax(mean_score_list)\n best_hyper = hyper[best_hyper_idx]\n max_score = np.max(mean_score_list)\n time_taken = t_end - t_start\n print(f'{self.model_name}'\n f'best_hyper={best_hyper}, score={max_score}, time={time_taken}')\n self.best_hyper = best_hyper\n\n # fit the extrapolator with the best hyperparameters to the training data\n if self.model_name == 'svr':\n best_model = NuSVR(C=self.best_hyper[0], nu=self.best_hyper[1], gamma=self.best_hyper[2], kernel='rbf')\n # model = SVR(C=hyper[i, 0], nu=hyper[i, 1], gamma= ,kernel='linear')\n elif self.model_name == 'blr':\n best_model = BayesianRidge(alpha_1=self.best_hyper[0], alpha_2=self.best_hyper[1],\n lambda_1=self.best_hyper[2], lambda_2=self.best_hyper[3])\n elif self.model_name == 'rf':\n best_model = RandomForestRegressor(n_estimators=int(self.best_hyper[0]), max_features=self.best_hyper[1])\n\n best_model.fit(xtrain_data, y_train)\n self.best_model = best_model\n\n def query(self, info):\n data = self.prepare_data(info)\n pred_on_test_set = self.best_model.predict(data)\n return pred_on_test_set\n\n def new_epoch(self, epoch):\n\n if epoch < self.num_init:\n # randomly sample initial architectures \n model = torch.nn.Module() # hacky way to get arch and accuracy checkpointable\n model.arch = self.search_space.clone()\n model.arch.sample_random_architecture(dataset_api=self.dataset_api)\n model.epoch = model.arch.get_max_epochs()\n model.full_lc = model.arch.query(self.performance_metric,\n self.dataset,\n epoch=model.epoch+1,\n dataset_api=self.dataset_api,\n full_lc=True)\n model.accuracy = model.full_lc[-1]\n self.info.append(model.full_lc[:self.fidelity])\n self.y_train.append(model.full_lc[self.extrapolation])\n\n if not self.best_arch or model.accuracy > self.best_arch.accuracy:\n self.best_arch = model\n self._update_history(model)\n\n else:\n if len(self.nbhd) == 0 and self.chosen and self.best_arch.accuracy <= self.chosen.accuracy:\n logger.info('Reached local minimum. Starting from new random architecture.')\n model = torch.nn.Module() # hacky way to get arch and accuracy checkpointable\n model.arch = self.search_space.clone()\n model.arch.sample_random_architecture(dataset_api=self.dataset_api)\n model.epoch = model.arch.get_max_epochs()\n model.full_lc = model.arch.query(self.performance_metric,\n self.dataset,\n epoch=model.epoch + 1,\n dataset_api=self.dataset_api,\n full_lc=True)\n model.accuracy = model.full_lc[-1]\n self.info.append(model.full_lc[:self.fidelity])\n self.y_train.append(model.full_lc[self.extrapolation])\n self.train_svr = True\n\n self.chosen = model\n self.best_arch = model\n self.nbhd = self.chosen.arch.get_nbhd(dataset_api=self.dataset_api)\n\n else:\n if len(self.nbhd) == 0:\n logger.info('Start a new iteration. Pick the best architecture and evaluate its neighbors.')\n if self.train_svr:\n self.fit(self.y_train, self.info)\n self.train_svr = False\n self.chosen = self.best_arch\n self.nbhd = self.chosen.arch.get_nbhd(dataset_api=self.dataset_api)\n\n model = self.nbhd.pop()\n model.epoch = self.fidelity\n model.partial_lc = model.arch.query(self.performance_metric,\n self.dataset,\n epoch=model.epoch,\n dataset_api=self.dataset_api,\n full_lc=True)\n model.accuracy = model.partial_lc[-1]\n prediction = self.query(np.array(model.partial_lc).reshape(1, -1))\n topk = np.sort(np.array(self.y_train))[-int(len(self.y_train) * self.top_n_percent):]\n if prediction > min(topk):\n model.epoch = model.arch.get_max_epochs()\n model.full_lc = model.arch.query(self.performance_metric,\n self.dataset,\n epoch=model.epoch+1,\n dataset_api=self.dataset_api,\n full_lc=True)\n self.info.append(model.full_lc[:self.fidelity])\n self.y_train.append(model.full_lc[self.extrapolation])\n self.train_svr = True\n model.accuracy = model.full_lc[-1]\n if model.accuracy > self.best_arch.accuracy:\n self.best_arch = model\n logger.info('Found new best architecture.')\n self._update_history(model) \n \n def _update_history(self, child):\n self.history.append(child)\n\n\n def train_statistics(self):\n best_arch, best_arch_epoch = self.get_final_architecture()\n latest_arch, latest_arch_epoch = self.get_latest_architecture()\n return (\n best_arch.query(Metric.TRAIN_ACCURACY, self.dataset, dataset_api=self.dataset_api, epoch=best_arch_epoch-1),\n best_arch.query(Metric.VAL_ACCURACY, self.dataset, dataset_api=self.dataset_api, epoch=best_arch_epoch),\n best_arch.query(Metric.TEST_ACCURACY, self.dataset, dataset_api=self.dataset_api, epoch=best_arch_epoch),\n latest_arch.query(Metric.TRAIN_TIME, self.dataset, dataset_api=self.dataset_api, epoch=latest_arch_epoch),\n )\n\n\n def test_statistics(self):\n best_arch, epoch = self.get_final_architecture()\n return best_arch.query(Metric.RAW, self.dataset, dataset_api=self.dataset_api, epoch=epoch)\n\n\n def get_final_architecture(self):\n\n # Returns the sampled architecture with the lowest validation error.\n best_arch = max(self.history, key=lambda x: x.accuracy)\n return best_arch.arch, best_arch.epoch\n\n\n def get_latest_architecture(self):\n\n # Returns the architecture from the most recent epoch\n latest_arch = self.history[-1]\n return latest_arch.arch, latest_arch.epoch\n\n def get_op_optimizer(self):\n raise NotImplementedError()\n\n \n def get_checkpointables(self):\n return {'model': self.history}\n \n\n def get_model_size(self):\n return count_parameters_in_MB(self.history)" ]
[ [ "numpy.hstack", "numpy.log", "sklearn.model_selection.cross_val_score", "torch.nn.ModuleList", "sklearn.svm.NuSVR", "torch.nn.Module", "numpy.max", "numpy.std", "numpy.argmax", "numpy.diff", "numpy.mean", "sklearn.linear_model.BayesianRidge", "numpy.random.uniform", "numpy.array", "numpy.sum", "numpy.vstack", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KelvinKan/gluon-ts
[ "72e99c7f4c2538bf1fefaa78ee548139cfa5907a", "72e99c7f4c2538bf1fefaa78ee548139cfa5907a" ]
[ "src/gluonts/nursery/SCott/pts/dataset/utils.py", "src/gluonts/nursery/SCott/pts/dataset/process.py" ]
[ "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\nimport shutil\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport rapidjson as json\n\nfrom .common import TrainDatasets, MetaData\nfrom .file_dataset import FileDataset\n\n\ndef frequency_add(ts: pd.Timestamp, amount: int) -> pd.Timestamp:\n return ts + ts.freq * amount\n\n\ndef forecast_start(entry):\n return frequency_add(entry[\"start\"], len(entry[\"target\"]))\n\n\ndef to_pandas(instance: dict, freq: str = None) -> pd.Series:\n \"\"\"\n Transform a dictionary into a pandas.Series object, using its\n \"start\" and \"target\" fields.\n\n Parameters\n ----------\n instance\n Dictionary containing the time series data.\n freq\n Frequency to use in the pandas.Series index.\n\n Returns\n -------\n pandas.Series\n Pandas time series object.\n \"\"\"\n target = instance[\"target\"]\n start = instance[\"start\"]\n if not freq:\n freq = start.freqstr\n index = pd.date_range(start=start, periods=len(target), freq=freq)\n return pd.Series(target, index=index)\n\n\ndef load_datasets(metadata, train, test, shuffle: bool = False) -> TrainDatasets:\n \"\"\"\n Loads a dataset given metadata, train and test path.\n Parameters\n ----------\n metadata\n Path to the metadata file\n train\n Path to the training dataset files.\n test\n Path to the test dataset files.\n shuffle\n Return shuffled train data.\n Returns\n -------\n TrainDatasets\n An object collecting metadata, training data, test data.\n \"\"\"\n meta = MetaData.parse_file(metadata)\n train_ds = FileDataset(train, meta.freq, shuffle=shuffle)\n test_ds = FileDataset(test, meta.freq) if test else None\n\n return TrainDatasets(metadata=meta, train=train_ds, test=test_ds)\n\n\ndef save_datasets(dataset: TrainDatasets, path_str: str, overwrite=True) -> None:\n \"\"\"\n Saves an TrainDatasets object to a JSON Lines file.\n\n Parameters\n ----------\n dataset\n The training datasets.\n path_str\n Where to save the dataset.\n overwrite\n Whether to delete previous version in this folder.\n \"\"\"\n path = Path(path_str)\n\n if overwrite:\n shutil.rmtree(path, ignore_errors=True)\n\n def dump_line(f, line):\n f.write(json.dumps(line).encode(\"utf-8\"))\n f.write(\"\\n\".encode(\"utf-8\"))\n\n (path / \"metadata\").mkdir(parents=True)\n with open(path / \"metadata/metadata.json\", \"wb\") as f:\n dump_line(f, dataset.metadata.dict())\n\n (path / \"train\").mkdir(parents=True)\n with open(path / \"train/data.json\", \"wb\") as f:\n for entry in dataset.train:\n dump_line(f, serialize_data_entry(entry))\n\n if dataset.test is not None:\n (path / \"test\").mkdir(parents=True)\n with open(path / \"test/data.json\", \"wb\") as f:\n for entry in dataset.test:\n dump_line(f, serialize_data_entry(entry))\n\n\ndef serialize_data_entry(data):\n \"\"\"\n Encode the numpy values in the a DataEntry dictionary into lists so the\n dictionary can be JSON serialized.\n\n Parameters\n ----------\n data\n The dictionary to be transformed.\n\n Returns\n -------\n Dict\n The transformed dictionary, where all fields where transformed into\n strings.\n \"\"\"\n\n def serialize_field(field):\n if isinstance(field, np.ndarray):\n # circumvent https://github.com/micropython/micropython/issues/3511\n nan_ix = np.isnan(field)\n field = field.astype(np.object_)\n field[nan_ix] = \"NaN\"\n return field.tolist()\n return str(field)\n\n return {k: serialize_field(v) for k, v in data.items() if v is not None}\n", "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\nfrom functools import lru_cache\nfrom typing import Callable, List, cast\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries.offsets import Tick\n\nfrom .common import DataEntry\n\n\nclass ProcessStartField:\n def __init__(self, name: str, freq: str) -> None:\n self.name = name\n self.freq = freq\n\n def __call__(self, data: DataEntry) -> DataEntry:\n try:\n value = ProcessStartField.process(data[self.name], self.freq)\n except (TypeError, ValueError) as e:\n raise Exception(f'Error \"{e}\" occurred when reading field \"{self.name}\"')\n\n data[self.name] = value\n\n return data\n\n @staticmethod\n @lru_cache(maxsize=10000)\n def process(string: str, freq: str) -> pd.Timestamp:\n timestamp = pd.Timestamp(string, freq=freq)\n\n # operate on time information (days, hours, minute, second)\n if isinstance(timestamp.freq, Tick):\n return pd.Timestamp(timestamp.floor(timestamp.freq), timestamp.freq)\n\n # since we are only interested in the data piece, we normalize the\n # time information\n timestamp = timestamp.replace(\n hour=0, minute=0, second=0, microsecond=0, nanosecond=0\n )\n\n return timestamp.freq.rollforward(timestamp)\n\n\nclass ProcessTimeSeriesField:\n def __init__(self, name, is_required: bool, is_static: bool, is_cat: bool) -> None:\n self.name = name\n self.is_required = is_required\n self.req_ndim = 1 if is_static else 2\n self.dtype = np.int64 if is_cat else np.float32\n\n def __call__(self, data: DataEntry) -> DataEntry:\n value = data.get(self.name, None)\n\n if value is not None:\n value = np.asarray(value, dtype=self.dtype)\n dim_diff = self.req_ndim - value.ndim\n if dim_diff == 1:\n value = np.expand_dims(a=value, axis=0)\n elif dim_diff != 0:\n raise Exception(\n f\"JSON array has bad shape - expected {self.req_ndim} dimensions got {dim_diff}\"\n )\n\n data[self.name] = value\n return data\n elif not self.is_required:\n return data\n else:\n raise Exception(f\"JSON object is missing a required field `{self.name}`\")\n\n\nclass ProcessDataEntry:\n def __init__(self, freq: str, one_dim_target: bool = True) -> None:\n self.trans = cast(\n List[Callable[[DataEntry], DataEntry]],\n [\n ProcessStartField(\"start\", freq=freq),\n ProcessTimeSeriesField(\n \"target\", is_required=True, is_cat=False, is_static=one_dim_target\n ),\n ProcessTimeSeriesField(\n \"feat_dynamic_cat\", is_required=False, is_cat=True, is_static=False\n ),\n ProcessTimeSeriesField(\n \"feat_dynamic_real\",\n is_required=False,\n is_cat=False,\n is_static=False,\n ),\n ProcessTimeSeriesField(\n \"feat_static_cat\", is_required=False, is_cat=True, is_static=True\n ),\n ProcessTimeSeriesField(\n \"feat_static_real\", is_required=False, is_cat=False, is_static=True\n ),\n ],\n )\n\n def __call__(self, data: DataEntry) -> DataEntry:\n for t in self.trans:\n data = t(data)\n return data\n" ]
[ [ "numpy.isnan", "pandas.Series" ], [ "numpy.asarray", "pandas.Timestamp", "numpy.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zaman13/Brownian-dynamics-in-a-time-varying-force-field
[ "1dce268fcc4f27e066be0ec0b511178cbc1437c5", "1dce268fcc4f27e066be0ec0b511178cbc1437c5" ]
[ "Codes/Version 1.9.1/force_DEP.py", "Codes/Version 1.6/forceDEP.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 23 17:44:46 2021\n\n@author: Mohammad Asif Zaman\n\n- April 10, 2021\n - Added active electrode text\n- April 11, 2021\n - Changed axis limit units to microns\n- May 28, 2021\n - Functionalized fluid velocity \n\"\"\"\n\nimport numpy as np\nimport pylab as py\nfrom parameters import *\n\n\n# Setting fluid flow velocity\ndef fluid_vel(r_in, t):\n \n Np = r_in[0,:].size\n # xin = r_in[0,:]\n # yin = r_in[1,:]\n # zin = r_in[2,:]\n \n v_fluid = np.zeros((3,Np))\n \n return v_fluid\n\n\n\n\n# Parameters:\n# =============================================================================\n\n# Electrode array geometry parameters (units in um)\ndef geo_force_params():\n elec_width = 15\n elec_spacing = 50\n \n return elec_width, elec_spacing\n# =============================================================================\n\n\n\n\n# # Read force data from data file\n# Mdata = np.genfromtxt('xsweep_ro=10u,zo=25u.csv',delimiter=',',skip_header=5)\n# xdata = Mdata[:,0]*1e-6\n# Fxdata = Mdata[:,3]*1e-12\n# Fydata = 6*Mdata[:,2]*1e-12\n# Fzdata = Mdata[:,4]*1e-12\n# # Note that the axis has been changed. The axis from the csv data file is different.\n\n\n\n\n# Interpolation function when using force value from data file \ndef force_interp(ri,ri_active,Np):\n # Interpolate function for the imported force data(csv file)\n fi = np.zeros((3,Np))\n fi[0,:] = np.interp((ri[1,:]-ri_active),xdata,Fxdata) \n fi[1,:] = np.interp((ri[1,:]-ri_active),xdata,Fydata)\n fi[2,:] = np.interp((ri[1,:]-ri_active),xdata,Fzdata)\n return fi\n\n\n\n\n\n# Simplified spring force model\ndef force_model(rm,r_active,Np):\n k = .3e-6 # spring constant\n r_mag = 0 # magnitude of the random force component\n fm = np.zeros((3,Np))\n \n fm[0,:] = 0\n fm[1,:] = -k*(rm[1,:]-r_active)\n fm[2,:] = -k*(rm[2,:]) \n return fm\n\n\n\n# Random force component\ndef force_random(rr,r_active,Np,t):\n # This force component comes from random origins with random magnitude\n k = .3e-6 # spring constant\n r_mag = 1/50 # magnitude of the random force component\n fr = np.zeros((3,Np))\n if t> 0.1 and 200*t%1 == 0:\n fr[0,:] = -5*r_mag*k*(rr[0,:]-np.random.normal(0,1,Np)*x_lim[1])*np.random.normal(0,1,Np)\n fr[1,:] = -r_mag*k*(rr[1,:]-np.random.normal(0,1,Np)*x_lim[1])*np.random.normal(0,1,Np) # random y force 1/20th the magnitude of the x component\n fr[2,:] = 0 # setting the z component of the force to be 0\n else:\n fr[0,:] = 0\n fr[1,:] = 0\n fr[2,:] = 0\n \n \n return fr\n\n\n\n\n# Force profile centered around the origin \ndef force_origin(r,r_active,t,Np):\n # return force_interp(r,r_active,Np) + force_random(r,r_active,Np,t)\n return force_model(r,r_active,Np) + force_random(r,r_active,Np,t)\n\n\n\n\ndef active_electrode(t):\n \n elec_width, elec_spacing = geo_force_params()\n \n x_e5 = elec_spacing*2e-6\n x_e4 = elec_spacing*1e-6\n x_e3 = elec_spacing*0\n x_e2 = -elec_spacing*1e-6\n x_e1 = -elec_spacing*2e-6\n \n \n r_active = x_e5\n ts = np.linspace(0,38,20) \n strn = 'Active electrode = 5'\n # Assigning active electrode location based on time. \n # Note the a if statement can be overridden by the subsequent if statement \n\n \n if t < ts[0]:\n r_active = x_e5\n strn = 'Active electrode = 5'\n if t >= ts[1]:\n r_active = x_e4\n strn = 'Active electrode = 4'\n if t >= ts[2]:\n r_active = x_e3\n strn = 'Active electrode = 3'\n if t >= ts[3]:\n r_active = x_e2\n strn = 'Active electrode = 2'\n if t >= ts[4]:\n r_active = x_e1\n strn = 'Active electrode = 1'\n if t >= ts[5]:\n r_active = x_e2\n strn = 'Active electrode = 2'\n if t >= ts[6]:\n r_active = x_e3\n strn = 'Active electrode = 3'\n if t >= ts[7]:\n r_active = x_e4\n strn = 'Active electrode = 4'\n if t >= ts[8]:\n r_active = x_e5\n strn = 'Active electrode = 5'\n if t >= ts[9]:\n r_active = x_e4\n strn = 'Active electrode = 4'\n if t >= ts[10]:\n r_active = x_e3\n strn = 'Active electrode = 3'\n if t >= ts[11]:\n r_active = x_e2\n strn = 'Active electrode = 2'\n if t >= ts[12]:\n r_active = x_e1\n strn = 'Active electrode = 1'\n if t >= ts[13]:\n r_active = x_e2\n strn = 'Active electrode = 2'\n if t >= ts[14]:\n r_active = x_e3\n strn = 'Active electrode = 3'\n if t >= ts[15]:\n r_active = x_e4\n strn = 'Active electrode = 4'\n if t >= ts[16]:\n r_active = x_e5\n strn = 'Active electrode = 5'\n if t >= ts[17]:\n r_active = x_e4\n strn = 'Active electrode = 4'\n if t >= ts[18]:\n r_active = x_e3\n strn = 'Active electrode = 3'\n if t >= ts[19]:\n r_active = x_e2\n strn = 'Active electrode = 2'\n \n return r_active, strn\n\n\n# This is function that is called from the main program\n# Time dependent force field. Implementation of the switching sequence\ndef force_profile(r,t):\n \n # define switching instances\n r_active, str1 = active_electrode(t)\n Np = r[0,:].size\n \n return force_origin(r,r_active,t,Np)", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 23 17:44:46 2021\n\n@author: asif\n\"\"\"\n\nimport numpy as np\nimport pylab as py\n\n\n\nro = 10e-6\ntfinal = 38\n\n\nxrange_limit = 250e-6 # Max and min of x axis range for plotting animation\nzlow_limit = -20e-6\nzhigh_limit = 150e-6\n\n# Electrode array geometry (units in um)\n# =============================================================================\nelec_width = 15\nelec_spacing = 50\n# =============================================================================\n\n\n\n# Draw electrodes for the animation\ndef draw_geo(tm, ax_xy, ax_yz, ax_xz):\n # tm is a dummy argument for this case (to make it consistent with other force functions)\n \n # The flag_source_state variable is used to draw/erase the source geometry only once\n # This is necessary to speed up the animation.\n \n if 'flag_source_state' not in globals():\n global flag_source_state # Make this variable global so that the assigned value remains saved globally as t changes\n flag_source_state = 0 # initialize with OFF state\n print('Defining global flag for source geometry \\n')\n \n \n \n \n if flag_source_state == 0:\n py.sca(ax_xy)\n substrate_xy = py.Rectangle((-xrange_limit*1e6, -xrange_limit*1e6),2*xrange_limit*1e6,2*xrange_limit*1e6,fc='#f9f9f9')\n py.gca().add_patch(substrate_xy)\n for kk in range(-2,3):\n rectangle = py.Rectangle((-xrange_limit*1e6/2, -elec_width/2+kk*elec_spacing),xrange_limit*1e6,elec_width,fc='#002366')\n py.gca().add_patch(rectangle)\n # ax.add_patch(rectangle)\n \n py.sca(ax_yz)\n substrate_yz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')\n py.gca().add_patch(substrate_yz)\n \n py.sca(ax_xz)\n substrate_xz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')\n py.gca().add_patch(substrate_xz)\n\n \n \n print('Drawing source\\n')\n flag_source_state = 1\n\n\n\n\n\n\n# # Read force data from data file\n# Mdata = np.genfromtxt('xsweep_ro=10u,zo=25u.csv',delimiter=',',skip_header=5)\n# xdata = Mdata[:,0]*1e-6\n# Fxdata = Mdata[:,3]*1e-12\n# Fydata = 6*Mdata[:,2]*1e-12\n# Fzdata = Mdata[:,4]*1e-12\n# # Note that the axis has been changed. The axis from the csv data file is different.\n\n\n\n\n# # Interpolation function when using force value from data file \n# def force_interp(ri,ri_active,Np):\n# # Interpolate function for the imported force data(csv file)\n# fi = np.zeros((3,Np))\n# fi[0,:] = np.interp((ri[1,:]-ri_active),xdata,Fxdata) \n# fi[1,:] = np.interp((ri[1,:]-ri_active),xdata,Fydata)\n# fi[2,:] = np.interp((ri[1,:]-ri_active),xdata,Fzdata)\n# return fi\n\n\n\n\n\n\n\n# Simplified spring force model\ndef force_model(rm,r_active,Np):\n k = .3e-6 # spring constant\n r_mag = 0 # magnitude of the random force component\n fm = np.zeros((3,Np))\n \n fm[0,:] = 0\n fm[1,:] = -k*(rm[1,:]-r_active)\n fm[2,:] = -k*(rm[2,:]) \n return fm\n\n# Random force component\ndef force_random(rr,r_active,Np,t):\n # This force component comes from random origins with random magnitude\n k = .3e-6 # spring constant\n r_mag = 1/50 # magnitude of the random force component\n fr = np.zeros((3,Np))\n if t> 0.1 and 200*t%1 == 0:\n fr[0,:] = -5*r_mag*k*(rr[0,:]-np.random.normal(0,1,Np)*xrange_limit)*np.random.normal(0,1,Np)\n fr[1,:] = -r_mag*k*(rr[1,:]-np.random.normal(0,1,Np)*xrange_limit)*np.random.normal(0,1,Np) # random y force 1/20th the magnitude of the x component\n fr[2,:] = 0 # setting the z component of the force to be 0\n else:\n fr[0,:] = 0\n fr[1,:] = 0\n fr[2,:] = 0\n \n \n return fr\n\n\n\n\n# Force profile centered around the origin \ndef force_origin(r,r_active,t,Np):\n # return force_interp(r,r_active,Np) + force_random(r,r_active,Np,t)\n return force_model(r,r_active,Np) + force_random(r,r_active,Np,t)\n\n\n\n\n# This is function that is called from the main program\n# Time dependent force field. Implementation of the switching sequence\ndef force_profile(r,t):\n \n # define switching instances\n \n Np = r[0,:].size\n \n ts = np.linspace(0,38,20) \n \n # define active electrode position at switching\n x_e5 = elec_spacing*2e-6\n x_e4 = elec_spacing*1e-6\n x_e3 = elec_spacing*0\n x_e2 = -elec_spacing*1e-6\n x_e1 = -elec_spacing*2e-6\n \n r_active = x_e5 \n\n # Assigning active electrode location based on time. \n # Note the a if statement can be overridden by the subsequent if statement \n if t < ts[0]:\n r_active = x_e5\n if t >= ts[1]:\n r_active = x_e4\n if t >= ts[2]:\n r_active = x_e3\n if t >= ts[3]:\n r_active = x_e2\n if t >= ts[4]:\n r_active = x_e1\n if t >= ts[5]:\n r_active = x_e2\n if t >= ts[6]:\n r_active = x_e3\n if t >= ts[7]:\n r_active = x_e4\n if t >= ts[8]:\n r_active = x_e5\n if t >= ts[9]:\n r_active = x_e4\n if t >= ts[10]:\n r_active = x_e3\n if t >= ts[11]:\n r_active = x_e2\n if t >= ts[12]:\n r_active = x_e1\n if t >= ts[13]:\n r_active = x_e2\n if t >= ts[14]:\n r_active = x_e3\n if t >= ts[15]:\n r_active = x_e4\n if t >= ts[16]:\n r_active = x_e5\n if t >= ts[17]:\n r_active = x_e4\n if t >= ts[18]:\n r_active = x_e3\n if t >= ts[19]:\n r_active = x_e2\n\n \n \n \n return force_origin(r,r_active,t,Np)" ]
[ [ "numpy.random.normal", "numpy.zeros", "numpy.interp", "numpy.linspace" ], [ "numpy.random.normal", "numpy.zeros", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CBIIT/NCI-DOE-Collab-Pilot2-Autoencoder_MD_Simulation_Data
[ "2b1213f944cf5f2c60799099a469989a1f0a6d3a", "2b1213f944cf5f2c60799099a469989a1f0a6d3a" ]
[ "common/viz_utils.py", "common/darts/modules/network.py" ]
[ "from pathlib import Path\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\nfrom scipy import interpolate\n\n\ndef plot_history(out, history, metric='loss', val=True, title=None, width=8, height=6):\n title = title or 'model {}'.format(metric)\n val_metric = 'val_{}'.format(metric)\n plt.figure(figsize=(width, height))\n plt.plot(history.history[metric], marker='o')\n if val:\n plt.plot(history.history[val_metric], marker='d')\n plt.title(title)\n plt.ylabel(metric)\n plt.xlabel('epoch')\n if val:\n plt.legend(['train_{}'.format(metric), 'val_{}'.format(metric)], loc='upper center')\n else:\n plt.legend(['train_{}'.format(metric)], loc='upper center')\n png = '{}.plot.{}.png'.format(out, metric)\n plt.savefig(png, bbox_inches='tight')\n plt.close()\n\n\ndef plot_scatter(data, classes, out, width=10, height=8):\n cmap = plt.cm.get_cmap('gist_rainbow')\n plt.figure(figsize=(width, height))\n plt.scatter(data[:, 0], data[:, 1], c=classes, cmap=cmap, lw=0.5, edgecolor='black', alpha=0.7)\n plt.colorbar()\n png = '{}.png'.format(out)\n plt.savefig(png, bbox_inches='tight')\n plt.close()\n\n\ndef plot_error(y_true, y_pred, batch, file_ext, file_pre='output_dir', subsample=1000):\n if batch % 10:\n return\n\n total = len(y_true)\n if subsample and subsample < total:\n usecols = np.random.choice(total, size=subsample, replace=False)\n y_true = y_true[usecols]\n y_pred = y_pred[usecols]\n\n y_true = y_true * 100\n y_pred = y_pred * 100\n diffs = y_pred - y_true\n\n bins = np.linspace(-200, 200, 100)\n if batch == 0:\n y_shuf = np.random.permutation(y_true)\n plt.hist(y_shuf - y_true, bins, alpha=0.5, label='Random')\n\n plt.hist(diffs, bins, alpha=0.3, label='Epoch {}'.format(batch + 1))\n plt.title(\"Histogram of errors in percentage growth\")\n plt.legend(loc='upper right')\n plt.savefig(file_pre + '.histogram' + file_ext + '.b' + str(batch) + '.png')\n plt.close()\n\n # Plot measured vs. predicted values\n fig, ax = plt.subplots()\n plt.grid('on')\n ax.scatter(y_true, y_pred, color='red', s=10)\n ax.plot([y_true.min(), y_true.max()],\n [y_true.min(), y_true.max()], 'k--', lw=4)\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n plt.savefig(file_pre + '.diff' + file_ext + '.b' + str(batch) + '.png')\n plt.close()\n\n\ndef plot_array(nparray, xlabel, ylabel, title, fname):\n\n plt.figure()\n plt.plot(nparray, lw=3.)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.savefig(fname, bbox_inches='tight')\n plt.close()\n\n\n# UTILS for UQ / CALIBRATION VISUALIZATION\n\n\nfrom matplotlib.colors import LogNorm\n\n\ndef plot_density_observed_vs_predicted(Ytest, Ypred, pred_name=None, figprefix=None):\n \"\"\"Functionality to plot a 2D histogram of the distribution of observed (ground truth)\n values vs. predicted values. The plot generated is stored in a png file.\n\n Parameters\n ----------\n Ytest : numpy array\n Array with (true) observed values\n Ypred : numpy array\n Array with predicted values.\n pred_name : string\n Name of data colum or quantity predicted (e.g. growth, AUC, etc.)\n figprefix : string\n String to prefix the filename to store the figure generated.\n A '_density_predictions.png' string will be appended to the\n figprefix given.\n \"\"\"\n\n xbins = 51\n\n fig = plt.figure(figsize=(24, 18)) # (30, 16)\n ax = plt.gca()\n plt.rc('xtick', labelsize=16) # fontsize of the tick labels\n ax.plot([Ytest.min(), Ytest.max()], [Ytest.min(), Ytest.max()], 'r--', lw=4.)\n plt.hist2d(Ytest, Ypred, bins=xbins, norm=LogNorm())\n cb = plt.colorbar()\n ax.set_xlabel('Observed ' + pred_name, fontsize=38, labelpad=15.)\n ax.set_ylabel('Mean ' + pred_name + ' Predicted', fontsize=38, labelpad=15.)\n ax.axis([Ytest.min() * 0.98, Ytest.max() * 1.02, Ytest.min() * 0.98, Ytest.max() * 1.02])\n plt.setp(ax.get_xticklabels(), fontsize=32)\n plt.setp(ax.get_yticklabels(), fontsize=32)\n cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=28)\n plt.grid(True)\n plt.savefig(figprefix + '_density_predictions.png', bbox_inches='tight')\n plt.close()\n print('Generated plot: ', figprefix + '_density_predictions.png')\n\n\ndef plot_2d_density_sigma_vs_error(sigma, yerror, method=None, figprefix=None):\n \"\"\"Functionality to plot a 2D histogram of the distribution of\n the standard deviations computed for the predictions vs. the\n computed errors (i.e. values of observed - predicted).\n The plot generated is stored in a png file.\n\n Parameters\n ----------\n sigma : numpy array\n Array with standard deviations computed.\n yerror : numpy array\n Array with errors computed (observed - predicted).\n method : string\n Method used to comput the standard deviations (i.e. dropout,\n heteroscedastic, etc.).\n figprefix : string\n String to prefix the filename to store the figure generated.\n A '_density_sigma_error.png' string will be appended to the\n figprefix given.\n \"\"\"\n\n xbins = 51\n ybins = 31\n\n fig = plt.figure(figsize=(24, 18)) # (30, 16)\n ax = plt.gca()\n plt.rc('xtick', labelsize=16) # fontsize of the tick labels\n plt.hist2d(sigma, yerror, bins=[xbins, ybins], norm=LogNorm())\n cb = plt.colorbar()\n ax.set_xlabel('Standard Deviation (' + method + ')', fontsize=38, labelpad=15.)\n ax.set_ylabel('Error: Observed - Mean Predicted', fontsize=38, labelpad=15.)\n ax.axis([sigma.min() * 0.98, sigma.max() * 1.02, -yerror.max(), yerror.max()])\n plt.setp(ax.get_xticklabels(), fontsize=32)\n plt.setp(ax.get_yticklabels(), fontsize=32)\n cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=28)\n plt.grid(True)\n plt.savefig(figprefix + '_density_std_error.png', bbox_inches='tight')\n plt.close()\n print('Generated plot: ', figprefix + '_density_std_error.png')\n\n\ndef plot_histogram_error_per_sigma(sigma, yerror, method=None, figprefix=None):\n \"\"\"Functionality to plot a 1D histogram of the distribution of\n computed errors (i.e. values of observed - predicted) observed\n for specific values of standard deviations computed. The range of\n standard deviations computed is split in xbins values and the\n 1D histograms of error distributions for the smallest six\n standard deviations are plotted.\n The plot generated is stored in a png file.\n\n Parameters\n ----------\n sigma : numpy array\n Array with standard deviations computed.\n yerror : numpy array\n Array with errors computed (observed - predicted).\n method : string\n Method used to comput the standard deviations (i.e. dropout,\n heteroscedastic, etc.).\n figprefix : string\n String to prefix the filename to store the figure generated.\n A '_histogram_error_per_sigma.png' string will be appended to\n the figprefix given.\n \"\"\"\n\n xbins = 21\n ybins = 31\n\n H, xedges, yedges, img = plt.hist2d(sigma, yerror, # normed=True,\n bins=[xbins, ybins])\n\n fig = plt.figure(figsize=(18, 24))\n legend = []\n for ii in range(4): # (H.shape[0]):\n if ii != 1:\n plt.plot(yedges[0:H.shape[1]], H[ii, :] / np.sum(H[ii, :]),\n marker='o', markersize=12, lw=6.)\n legend.append(str((xedges[ii] + xedges[ii + 1]) / 2))\n plt.legend(legend, fontsize=28)\n ax = plt.gca()\n plt.title('Error Dist. per Standard Deviation for ' + method, fontsize=40)\n ax.set_xlabel('Error: Observed - Mean Predicted', fontsize=38, labelpad=15.)\n ax.set_ylabel('Density', fontsize=38, labelpad=15.)\n plt.setp(ax.get_xticklabels(), fontsize=32)\n plt.setp(ax.get_yticklabels(), fontsize=32)\n plt.grid(True)\n plt.savefig(figprefix + '_histogram_error_per_std.png', bbox_inches='tight')\n plt.close()\n print('Generated plot: ', figprefix + '_histogram_error_per_std.png')\n\n\ndef plot_decile_predictions(Ypred, Ypred_Lp, Ypred_Hp, decile_list, pred_name=None, figprefix=None):\n \"\"\"Functionality to plot the mean of the deciles predicted.\n The plot generated is stored in a png file.\n\n Parameters\n ----------\n Ypred : numpy array\n Array with median predicted values.\n Ypred_Lp : numpy array\n Array with low decile predicted values.\n Ypred_Hp : numpy array\n Array with high decile predicted values.\n decile_list : string list\n List of deciles predicted (e.g. '1st', '9th', etc.)\n pred_name : string\n Name of data colum or quantity predicted (e.g. growth, AUC, etc.)\n figprefix : string\n String to prefix the filename to store the figure generated.\n A '_decile_predictions.png' string will be appended to the\n figprefix given.\n \"\"\"\n\n index_ = np.argsort(Ypred)\n fig = plt.figure(figsize=(24, 18))\n plt.scatter(range(index_.shape[0]), Ypred[index_])\n plt.scatter(range(index_.shape[0]), Ypred_Lp[index_])\n plt.scatter(range(index_.shape[0]), Ypred_Hp[index_])\n plt.legend(decile_list, fontsize=28)\n plt.xlabel('Index', fontsize=38.)\n plt.ylabel(pred_name, fontsize=38.)\n plt.title('Predicted ' + pred_name + ' Deciles', fontsize=40)\n plt.grid()\n ax = plt.gca()\n plt.setp(ax.get_xticklabels(), fontsize=32)\n plt.setp(ax.get_yticklabels(), fontsize=32)\n plt.savefig(figprefix + '_decile_predictions.png', bbox_inches='tight')\n plt.close()\n print('Generated plot: ', figprefix + '_decile_predictions.png')\n\n\ndef plot_calibration_interpolation(mean_sigma, error, splineobj1, splineobj2, method='', figprefix=None, steps=False):\n \"\"\"Functionality to plot empirical calibration curves\n estimated by interpolation of the computed\n standard deviations and errors. Since the estimations\n are very noisy, two levels of smoothing are used. Both\n can be plotted independently, if requested.\n The plot(s) generated is(are) stored in png file(s).\n\n Parameters\n ----------\n mean_sigma : numpy array\n Array with the mean standard deviations computed in inference.\n error : numpy array\n Array with the errors computed from the means predicted in inference.\n splineobj1 : scipy.interpolate python object\n A python object from scipy.interpolate that computes a\n cubic Hermite spline (PchipInterpolator) to express\n the interpolation after the first smoothing. This\n spline is a partial result generated during the empirical\n calibration procedure.\n splineobj2 : scipy.interpolate python object\n A python object from scipy.interpolate that computes a\n cubic Hermite spline (PchipInterpolator) to express\n the mapping from standard deviation to error. This\n spline is generated for interpolating the predictions\n after a process of smoothing-interpolation-smoothing\n computed during the empirical calibration procedure.\n method : string\n Method used to comput the standard deviations (i.e. dropout,\n heteroscedastic, etc.).\n figprefix : string\n String to prefix the filename to store the figure generated.\n A '_empirical_calibration_interpolation.png' string will be appended to\n the figprefix given.\n steps : boolean\n Besides the complete empirical calibration (including the interpolating\n spline), also generates partial plots with only the spline of\n the interpolating spline after the first smoothing level (smooth1).\n \"\"\"\n\n xmax = np.max(mean_sigma)\n xmin = np.min(mean_sigma)\n xp23 = np.linspace(xmin, xmax, 200)\n yp23 = splineobj2(xp23)\n\n if steps:\n # Plot first smoothing\n yp23_1 = splineobj1(xp23)\n fig = plt.figure(figsize=(24, 18))\n ax = plt.gca()\n ax.plot(mean_sigma, error, 'kx')\n ax.plot(xp23, yp23_1, 'gx', ms=20)\n plt.legend(['True', 'Cubic Spline'], fontsize=28)\n plt.xlabel('Standard Deviation Predicted (' + method + ')', fontsize=38.)\n plt.ylabel('Error: ABS Observed - Mean Predicted', fontsize=38.)\n plt.title('Calibration (by Interpolation)', fontsize=40)\n plt.setp(ax.get_xticklabels(), fontsize=32)\n plt.setp(ax.get_yticklabels(), fontsize=32)\n plt.grid()\n fig.tight_layout()\n plt.savefig(figprefix + '_empirical_calibration_interp_smooth1.png', bbox_inches='tight')\n plt.close()\n print('Generated plot: ', figprefix + '_empirical_calibration_interp_smooth1.png')\n\n fig = plt.figure(figsize=(24, 18))\n ax = plt.gca()\n ax.plot(mean_sigma, error, 'kx')\n ax.plot(xp23, yp23, 'rx', ms=20)\n plt.legend(['True', 'Cubic Spline'], fontsize=28)\n plt.xlabel('Standard Deviation Predicted (' + method + ')', fontsize=38.)\n plt.ylabel('Error: ABS Observed - Mean Predicted', fontsize=38.)\n plt.title('Calibration (by Interpolation)', fontsize=40)\n plt.setp(ax.get_xticklabels(), fontsize=32)\n plt.setp(ax.get_yticklabels(), fontsize=32)\n plt.grid()\n fig.tight_layout()\n plt.savefig(figprefix + '_empirical_calibration_interpolation.png', bbox_inches='tight')\n plt.close()\n print('Generated plot: ', figprefix + '_empirical_calibration_interpolation.png')\n\n\ndef plot_calibrated_std(y_test, y_pred, std_calibrated, thresC, pred_name=None, figprefix=None):\n \"\"\"Functionality to plot values in testing set after calibration. An estimation of the lower-confidence samples is made. The plot generated is stored in a png file.\n\n Parameters\n ----------\n y_test : numpy array\n Array with (true) observed values.\n y_pred : numpy array\n Array with predicted values.\n std_calibrated : numpy array\n Array with standard deviation values after calibration.\n thresC : float\n Threshold to label low confidence predictions (low\n confidence predictions are the ones with std > thresC).\n pred_name : string\n Name of data colum or quantity predicted (e.g. growth, AUC, etc.).\n figprefix : string\n String to prefix the filename to store the figure generated.\n A '_calibrated.png' string will be appended to the\n figprefix given.\n \"\"\"\n\n N = y_test.shape[0]\n index = np.argsort(y_pred)\n x = np.array(range(N))\n\n indexC = std_calibrated > thresC\n alphafill = 0.5\n if N > 2000:\n alphafill = 0.7\n\n scale = 120\n fig = plt.figure(figsize=(24, 18))\n ax = plt.gca()\n ax.scatter(x, y_test[index], color='red', s=scale, alpha=0.5)\n plt.fill_between(x, y_pred[index] - 1.28 * std_calibrated[index],\n y_pred[index] + 1.28 * std_calibrated[index],\n color='gray', alpha=alphafill)\n plt.scatter(x, y_pred[index], color='orange', s=scale)\n plt.scatter(x[indexC], y_test[indexC], color='green', s=scale, alpha=0.5)\n plt.legend(['True', '1.28 Std', 'Pred', 'Low conf'], fontsize=28)\n plt.xlabel('Index', fontsize=38.)\n plt.ylabel(pred_name + ' Predicted', fontsize=38.)\n plt.title('Calibrated Standard Deviation', fontsize=40)\n plt.setp(ax.get_xticklabels(), fontsize=32)\n plt.setp(ax.get_yticklabels(), fontsize=32)\n plt.grid()\n fig.tight_layout()\n plt.savefig(figprefix + '_calibrated.png', bbox_inches='tight')\n plt.close()\n print('Generated plot: ', figprefix + '_calibrated.png')\n\n\ndef plot_contamination(y_true, y_pred, sigma, T=None, thresC=0.1, pred_name=None, figprefix=None):\n \"\"\"Functionality to plot results for the contamination model.\n This includes the latent variables T if they are given (i.e.\n if the results provided correspond to training results). Global\n parameters for the normal distribution are used for shading 80%\n confidence interval.\n If results for training (i.e. T available), samples determined to\n be outliers (i.e. samples whose probability of membership to the\n heavy tailed distribution (Cauchy) is greater than the threshold\n given) are highlighted.\n The plot(s) generated is(are) stored in a png file.\n\n Parameters\n ----------\n y_true : numpy array\n Array with observed values.\n y_pred : numpy array\n Array with predicted values.\n sigma : float\n Standard deviation of the normal distribution.\n T : numpy array\n Array with latent variables (i.e. membership to normal and heavy-tailed\n distributions). If in testing T is not available (i.e. None)\n thresC : float\n Threshold to label outliers (outliers are the ones\n with probability of membership to heavy-tailed distribution,\n i.e. T[:,1] > thresC).\n pred_name : string\n Name of data colum or quantity predicted (e.g. growth, AUC, etc.).\n figprefix : string\n String to prefix the filename to store the figures generated.\n A '_contamination.png' string will be appended to the\n figprefix given.\n \"\"\"\n\n N = y_true.shape[0]\n index = np.argsort(y_pred)\n x = np.array(range(N))\n\n if T is not None:\n indexG = T[:, 0] > (1. - thresC)\n indexC = T[:, 1] > thresC\n ss = sigma * indexG\n prefig = '_outTrain'\n else:\n ss = sigma\n prefig = '_outTest'\n auxGh = y_pred + 1.28 * ss\n auxGl = y_pred - 1.28 * ss\n\n # Plotting Outliers\n scale = 120\n fig = plt.figure(figsize=(24, 18))\n ax = plt.gca()\n ax.scatter(x, y_true[index], color='red', s=scale)\n if T is not None:\n plt.scatter(x[indexC], y_true[indexC], color='green', s=scale) # , alpha=0.8)\n plt.scatter(x, y_pred[index], color='orange', s=scale)\n plt.fill_between(x, auxGl[index], auxGh[index], color='gray', alpha=0.5)\n if T is not None:\n plt.legend(['True', 'Outlier', 'Pred', '1.28 Std'], fontsize=28)\n else:\n plt.legend(['True', 'Pred', '1.28 Std'], fontsize=28)\n plt.xlabel('Index', fontsize=38.)\n plt.ylabel(pred_name + ' Predicted', fontsize=38.)\n plt.title('Contamination Results', fontsize=40)\n plt.setp(ax.get_xticklabels(), fontsize=32)\n plt.setp(ax.get_yticklabels(), fontsize=32)\n plt.grid()\n fig.tight_layout()\n plt.savefig(figprefix + prefig + '_contamination.png', bbox_inches='tight')\n plt.close()\n print('Generated plot: ', figprefix + prefig + '_contamination.png')\n\n if T is not None:\n # Plotting Latent Variables vs error\n error = np.abs(y_true - y_pred)\n fig = plt.figure(figsize=(24, 18))\n ax = plt.gca()\n ax.scatter(error, T[:, 0], color='blue', s=scale)\n ax.scatter(error, T[:, 1], color='orange', s=scale)\n plt.legend(['Normal', 'Heavy-Tailed'], fontsize=28)\n plt.xlabel('ABS Error', fontsize=38.)\n plt.ylabel('Membership Probability', fontsize=38.)\n plt.title('Contamination: Latent Variables', fontsize=40)\n plt.setp(ax.get_xticklabels(), fontsize=32)\n plt.setp(ax.get_yticklabels(), fontsize=32)\n plt.grid()\n fig.tight_layout()\n plt.savefig(figprefix + '_T_contamination.png', bbox_inches='tight')\n plt.close()\n print('Generated plot: ', figprefix + '_T_contamination.png')\n\n\n# plot training and validation metrics together and generate one chart per metrics\ndef plot_metrics(history, title=None, skip_ep=0, outdir='.', add_lr=False):\n \"\"\" Plots keras training curves history.\n Args:\n skip_ep: number of epochs to skip when plotting metrics\n add_lr: add curve of learning rate progression over epochs\n \"\"\"\n\n def capitalize_metric(met):\n return ' '.join(s.capitalize() for s in met.split('_'))\n\n all_metrics = list(history.history.keys())\n pr_metrics = ['_'.join(m.split('_')[1:]) for m in all_metrics if 'val' in m]\n\n epochs = np.asarray(history.epoch) + 1\n if len(epochs) <= skip_ep:\n skip_ep = 0\n eps = epochs[skip_ep:]\n hh = history.history\n\n for p, m in enumerate(pr_metrics):\n metric_name = m\n metric_name_val = 'val_' + m\n\n y_tr = hh[metric_name][skip_ep:]\n y_vl = hh[metric_name_val][skip_ep:]\n\n ymin = min(set(y_tr).union(y_vl))\n ymax = max(set(y_tr).union(y_vl))\n lim = (ymax - ymin) * 0.1\n ymin, ymax = ymin - lim, ymax + lim\n\n # Start figure\n fig, ax1 = plt.subplots()\n\n # Plot metrics\n ax1.plot(eps, y_tr, color='b', marker='.', linestyle='-', linewidth=1, alpha=0.6, label=capitalize_metric(metric_name))\n ax1.plot(eps, y_vl, color='r', marker='.', linestyle='--', linewidth=1, alpha=0.6, label=capitalize_metric(metric_name_val))\n ax1.set_xlabel('Epoch')\n ax1.set_ylabel(capitalize_metric(metric_name))\n ax1.set_xlim([min(eps) - 1, max(eps) + 1])\n ax1.set_ylim([ymin, ymax])\n ax1.tick_params('y', colors='k')\n\n # Add learning rate\n if (add_lr is True) and ('lr' in hh):\n ax2 = ax1.twinx()\n ax2.plot(eps, hh['lr'][skip_ep:], color='g', marker='.', linestyle=':', linewidth=1,\n alpha=0.6, markersize=5, label='LR')\n ax2.set_ylabel('Learning rate', color='g', fontsize=12)\n\n ax2.set_yscale('log')\n ax2.tick_params('y', colors='g')\n\n ax1.grid(True)\n legend = ax1.legend(loc='best', prop={'size': 10})\n frame = legend.get_frame()\n frame.set_facecolor('0.95')\n if title is not None:\n plt.title(title)\n\n figpath = Path(outdir) / (metric_name + '.png')\n plt.savefig(figpath, bbox_inches='tight')\n plt.close()\n", "from typing import Dict, List, Callable\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom darts.api import Model\nfrom darts.modules import Cell\nfrom darts.modules.classifier import MultitaskClassifier\nfrom darts.genotypes import Genotype\n\n\nclass Hyperparameters:\n c = 1\n num_nodes = 2\n num_cells = 3\n channel_multiplier = 1\n\n\nclass Network(Model):\n \"\"\" Collection of cells\n\n Args:\n stem: nn.Module that takes the input data\n and outputs `cell_dim` number of features\n\n classifier_dim: number of features from\n Darts.modules.mixed_layer.MixedLayer. This\n depends upon the choice of primitives specified\n by `ops`.\n\n ops: Constructor for all of the primitive nn.Modules. This\n should be a dictionary of lambda function used to construct\n your nn.Modules. The parameters of the lamdas must be `c`, the\n number of input channels of each primitive, `stride`, the stride for\n convolution blocks, and `affine`, whether to use `affine` in\n batch norm.\n\n tasks: a dictionary whose keys are the names of the classification\n tasks, and whose keys are the number of classes in each task.\n\n criterion: Pytorch loss criterion\n\n device: Either \"cpu\" or \"gpu\n\n hyperparams: instance of Hyperparameters. This hyperparamters for DARTS.\n \"\"\"\n\n def __init__(self,\n stem: nn.Module,\n cell_dim: int,\n classifier_dim: int,\n ops: Dict[str, Callable[[int, int, bool], nn.Module]],\n tasks: Dict[str, int],\n criterion,\n device=\"cpu\",\n hyperparams=Hyperparameters()):\n super(Network, self).__init__()\n self.ops = ops\n self.cell_dim = cell_dim\n self.tasks = tasks\n self.criterion = criterion\n self.device = device\n self.num_cells = hyperparams.num_cells\n self.num_nodes = hyperparams.num_nodes\n self.primitives = list(ops.keys())\n self.stem = stem\n self.channel_multiplier = hyperparams.channel_multiplier\n self.c = hyperparams.c\n\n # c_curr means a factor of the output channels of current cell\n c_curr = cell_dim * self.channel_multiplier * hyperparams.c\n cpp, cp, c_curr = c_curr, c_curr, hyperparams.c\n self.cells = nn.ModuleList()\n for i in range(hyperparams.num_cells):\n\n cell = Cell(\n hyperparams.num_nodes,\n hyperparams.channel_multiplier,\n cpp,\n cp,\n c_curr,\n self.primitives,\n self.ops\n ).to(self.device)\n\n self.cells += [cell]\n\n self.classifier = MultitaskClassifier(classifier_dim, tasks)\n\n # k is the total number of edges inside single cell, 14\n k = sum(1 for i in range(self.num_nodes) for j in range(2 + i))\n num_ops = len(self.primitives)\n\n self.alpha_normal = nn.Parameter(torch.randn(k, num_ops))\n\n with torch.no_grad():\n # initialize to smaller value\n self.alpha_normal.mul_(1e-3)\n\n self._arch_parameters = [\n self.alpha_normal,\n ]\n\n def new(self):\n \"\"\" Create a new model initialzed with current alpha parameters.\n\n Weights are left untouched.\n\n Returns\n -------\n model : Network\n New model initialized with current alpha.\n \"\"\"\n model = Network(\n self.stem,\n self.cell_dim,\n self.ops,\n self.tasks,\n self.criterion\n ).to(self.device)\n\n for x, y in zip(model.arch_parameters(), self.arch_parameters()):\n x.data.copy_(y.data)\n\n return model\n\n def forward(self, x):\n # s0 & s1 means the last cells' output\n s0 = s1 = self.stem(x) # [b, 3, 32, 32] => [b, 48, 32, 32]\n\n for i, cell in enumerate(self.cells):\n weights = F.softmax(self.alpha_normal, dim=-1) # [14, 8]\n # execute cell() firstly and then assign s0=s1, s1=result\n s0, out = s1, cell(s0, s1, weights) # [40, 64, 32, 32]\n\n logits = self.classifier(out.view(out.size(0), -1))\n\n return logits\n\n def loss_value(self, x_data, y_true, y_pred, reduce='mean'):\n \"\"\" Calculate a value of loss function \"\"\"\n y_pred = self(x_data)\n\n losses = {}\n for key, value in y_true.items():\n losses[key] = F.nll_loss(F.log_softmax(y_pred[key], dim=1), y_true[key])\n\n if reduce:\n total = 0\n for _, value in losses.items():\n total += value\n\n if reduce == \"mean\":\n losses = total / len(losses)\n elif reduce == \"sum\":\n losses = total\n\n return losses\n\n def arch_parameters(self):\n return self._arch_parameters\n\n def genotype(self):\n \"\"\"\n :return:\n \"\"\"\n def _parse(weights):\n gene = []\n n = 2\n start = 0\n for i in range(self.num_nodes): # for each node\n end = start + n\n W = weights[start:end].copy()\n edges = sorted(range(i + 2), # i+2 is the number of connection for node i\n key=lambda x: -max(W[x][k] # by descending order\n for k in range(len(W[x])) # get strongest ops\n if k != self.primitives.index('none'))\n )[:2] # only has two inputs\n for j in edges: # for every input nodes j of current node i\n k_best = None\n for k in range(len(W[j])): # get strongest ops for current input j->i\n if k != self.primitives.index('none'):\n if k_best is None or W[j][k] > W[j][k_best]:\n k_best = k\n gene.append((self.primitives[k_best], j)) # save ops and input node\n start = end\n n += 1\n return gene\n\n gene_normal = _parse(F.softmax(self.alpha_normal, dim=-1).data.cpu().numpy())\n concat = range(2 + self.num_nodes - self.channel_multiplier, self.num_nodes + 2)\n\n genotype = Genotype(\n normal=gene_normal, normal_concat=concat,\n reduce=gene_normal, reduce_concat=concat\n )\n\n return genotype\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "numpy.asarray", "matplotlib.pyplot.rc", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.gca", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.cm.get_cmap", "numpy.min", "numpy.random.choice", "matplotlib.pyplot.savefig", "matplotlib.pyplot.hist2d", "matplotlib.pyplot.fill_between", "numpy.argsort", "matplotlib.pyplot.hist", "numpy.sum", "matplotlib.pyplot.ylabel", "matplotlib.colors.LogNorm", "numpy.abs", "matplotlib.pyplot.scatter", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "numpy.random.permutation", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel" ], [ "torch.nn.functional.softmax", "torch.nn.functional.log_softmax", "torch.randn", "torch.nn.ModuleList", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexcere/syrup_dash_visualizer
[ "0559d3ee0fb3cda5b344c806b9a9cfaa222d14dc" ]
[ "plots.py" ]
[ "import itertools\n\nimport numpy as np\nimport plotly.graph_objects as go\nimport pandas as pd\nimport pathlib\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\n\nPATH = pathlib.Path(__file__).parent\nDATA_PATH = (PATH.joinpath(\"data\")).resolve()\nencoding_names = {'initial_configuration': \"O'<sub>SFS</sub> + C<sub>L</sub><br>(Initial encoding)\",\n \"no_output_before_pop\": \"O'<sub>SFS</sub> + C<sub>L</sub> + C<sub>R</sub>\",\n 'at_most': \"O'<sub>SFS</sub> + C<sub>L</sub> + C<sub>U</sub>\",\n 'pushed_once': \"O'<sub>SFS</sub> + C<sub>L</sub> + C<sub>N</sub>\",\n \"alternative_gas_model\": \"O<sub>SFS</sub> + C<sub>L</sub>\",\n \"final_encoding\": \"Final encoding\",\n \"60s\": \"60s\", \"30s\": \"30s\", \"15s\": \"15s\", \"10s\": \"10s\", \"1s\": \"1s\",\n \"no_output_before_pop_at_most\": \"O'<sub>SFS</sub> + C<sub>L</sub> + C<sub>R</sub> + C<sub>U</sub>\",\n \"no_output_before_pop_pushed_once\": \"O'<sub>SFS</sub> + C<sub>L</sub> + C<sub>R</sub> + C<sub>N</sub>\",\n \"no_output_before_pop_at_most_pushed_once\": \"O'<sub>SFS</sub> + C<sub>L</sub> \"\n \"+ C<sub>R</sub> + C<sub>U</sub> + C<sub>N</sub>\",\n \"final_setup\": \"Final setup\"}\n\noptimality_names = {'already_optimal': \"Already optimal\", 'discovered_optimal': \"Discovered optimal\",\n 'non_optimal_with_less_gas': \"Non optimal<br> with less gas\",\n 'non_optimal_with_same_gas': \"Non optimal<br> with same gas\",\n 'no_solution_found': \"No solution found\"}\n\nencoding_names_abbreviated = {'initial_configuration': \"initial\", \"no_output_before_pop\": \"restrict POP\",\n 'at_most': \"at most\", 'pushed_once': \"values pushed\",\n \"alternative_gas_model\": \"alt. gas model\", \"60s\": \"60s\", \"30s\": \"30s\", \"15s\": \"15s\",\n \"10s\": \"10s\", \"1s\": \"1s\", \"final_encoding\": \"final\"}\n\nanalyzed_parameters_names = {'saved_gas': \"Gas saved\", 'time': \"Time in min\"}\n\nsolver_name_abbreviated = {\"combined\": \"port\", \"z3\": \"z3\", \"oms\": \"oms\", \"barcelogic\": \"bclg\"}\n\nsolver_name = {\"combined\": \"portfolio\", \"z3\": \"z3\", \"oms\": \"oms\", \"barcelogic\": \"barcelogic\"}\n\ndef plot_time(folder_name, encodings):\n fig = go.Figure()\n for encoding in encodings:\n times = np.empty(0, dtype=float)\n labels = []\n for name in folder_name:\n csv_name = str(DATA_PATH) + \"/\" + encoding + \"_\" + name + \".csv\"\n df = pd.read_csv(csv_name)\n arr = df['time'].to_numpy() / 60\n times = np.append(times, arr)\n labels.extend([solver_name[name]] * len(arr))\n fig.add_trace(go.Box(y=times, x=labels, name=encoding_names[encoding]))\n fig.update_layout(\n yaxis_title='Times per contract (minutes)',\n boxmode='group' # group together boxes of the different traces for each value of x\n )\n return fig\n\n\ndef plot_gas(folder_name, encodings):\n fig = go.Figure()\n for encoding in encodings:\n times = np.empty(0, dtype=float)\n labels = []\n for name in folder_name:\n csv_name = str(DATA_PATH) + \"/\" + encoding + \"_\" + name + \".csv\"\n df = pd.read_csv(csv_name)\n arr = df['saved_gas'].to_numpy()\n times = np.append(times, arr)\n labels.extend([name] * len(arr))\n fig.add_trace(go.Box(y=times, x=labels, name=encoding_names[encoding]))\n fig.update_layout(\n yaxis_title='Saved gas per contract',\n boxmode='group' # group together boxes of the different traces for each value of x\n )\n return fig\n\n\ndef plot_statistics(folder_name, encodings):\n fig = go.Figure()\n statistics = ['already_optimal', 'discovered_optimal', 'non_optimal_with_less_gas', 'non_optimal_with_same_gas',\n 'no_solution_found']\n for statistic in statistics:\n labels_x = []\n labels_y = []\n results = []\n for name in folder_name:\n for encoding in encodings:\n csv_name = str(DATA_PATH) + \"/\" + encoding + \"_\" + name + \".csv\"\n df = pd.read_csv(csv_name)\n total_sum = 0\n for other_statistics in statistics:\n total_sum += df[other_statistics].sum()\n labels_x.append(solver_name_abbreviated[name])\n labels_y.append(encoding_names_abbreviated[encoding])\n results.append((df[statistic].sum() * 100) / total_sum)\n fig.add_trace(go.Bar(y=results, x=[labels_x, labels_y], name=optimality_names[statistic]))\n fig.update_layout(\n yaxis_title='Comparison in outputs',\n barmode='stack',\n )\n return fig\n\n\ndef select_comparison(df, comparison_category):\n if comparison_category == \"init_progr_len\":\n return df[comparison_category].to_numpy()\n elif comparison_category == \"initial_size_relation\":\n return df[comparison_category].to_numpy()\n elif comparison_category == \"number_of_necessary_push\":\n return df[comparison_category].to_numpy()\n elif comparison_category == \"number_of_necessary_uninterpreted_instructions\":\n return df[comparison_category].to_numpy()\n elif comparison_category == \"push_per_initial\":\n return df[\"number_of_necessary_push\"].to_numpy() / df[\"init_progr_len\"].to_numpy()\n elif comparison_category == \"uninterpreted_per_initial\":\n return df[\"number_of_necessary_uninterpreted_instructions\"].to_numpy() / df[\"init_progr_len\"].to_numpy()\n elif comparison_category == \"push_per_expected\":\n expected_size = np.array(list(map(lambda x: 1 if x == 0 else x, df['inferred_size'])))\n return df[\"number_of_necessary_push\"].to_numpy() / expected_size\n elif comparison_category == \"uninterpreted_per_expected\":\n expected_size = np.array(list(map(lambda x: 1 if x == 0 else x, df['inferred_size'])))\n return df[\"number_of_necessary_uninterpreted_instructions\"].to_numpy() / expected_size\n\n\ndef plot_comparison(cat1, cat2, relation):\n csv_name1 = str(DATA_PATH) + \"/\" + \"comparison_\" + cat1 + \"_\" + cat2 + \".csv\"\n csv_name2 = str(DATA_PATH) + \"/\" + \"comparison_\" + cat2 + \"_\" + cat1 + \".csv\"\n y1 = select_comparison(pd.read_csv(csv_name1), relation)\n y2 = select_comparison(pd.read_csv(csv_name2), relation)\n fig = go.Figure()\n fig.add_trace(go.Box(y=y1, name=\"Default encoding<br>works better\", boxpoints='all', marker_size=3))\n fig.add_trace(go.Box(y=y2, name=\"Selected encoding<br>works better\", boxpoints='all', marker_size=3))\n fig.update_layout(yaxis_title=\"Comparison between encodings\")\n return fig\n\n\ndef plot_configuration_comparison(category_comparison):\n csv_name = str(DATA_PATH) + \"/\" + category_comparison + \"_parameter_comparison.csv\"\n df = pd.read_csv(csv_name)\n x = [encoding_names[encoding] for encoding in df['name'].to_list()]\n y = df['time'].to_list()\n fig = go.Figure(data=[go.Bar(x=x, y=y)])\n fig.update_layout(yaxis_title=\"Total time in minutes\")\n return fig\n\n\ndef plot_statistics_pie_chart(solver):\n syrup_csv_name = str(DATA_PATH) + \"/final_setup_\" + solver + \".csv\"\n cav_csv_name = str(DATA_PATH) + \"/CAV_\" + solver + \".csv\"\n labels = ['already_optimal', 'discovered_optimal', 'non_optimal_with_less_gas',\n 'non_optimal_with_same_gas', 'no_solution_found']\n labels_to_desplay = [optimality_names[name] for name in labels]\n cav_df = pd.read_csv(cav_csv_name).sum()\n syrup_df = pd.read_csv(syrup_csv_name).sum()\n\n cav_values = [cav_df[label] for label in labels]\n syrup_values = [syrup_df[label] for label in labels]\n\n fig = make_subplots(rows=1, cols=2, specs=[[{'type': 'domain'}, {'type': 'domain'}]])\n fig.add_trace(go.Pie(labels=labels_to_desplay, values=cav_values, name=\"CAV'20 Setup\"),\n 1, 1)\n fig.add_trace(go.Pie(labels=labels_to_desplay, values=syrup_values, name=\"New setup\"),\n 1, 2)\n\n # Use `hole` to create a donut-like pie chart\n fig.update_traces(hole=.4, hoverinfo=\"label+percent+name\")\n\n fig.update_layout(\n title_text=\"Comparison between Syrup 1.0 setup (15 min) vs Syrup 2.0 setup (10 s)\",\n # Add annotations in the center of the donut pies.\n annotations=[dict(text='syrup 1.0', x=0.14, y=0.5, font_size=15, showarrow=False),\n dict(text='syrup 2.0', x=0.85, y=0.5, font_size=15, showarrow=False)])\n return fig\n\n\ndef plot_bar_comparison(solver, category_name):\n syrup_csv_name = str(DATA_PATH) + \"/final_setup_\" + solver + \".csv\"\n cav_csv_name = str(DATA_PATH) + \"/CAV_\" + solver + \".csv\"\n cav_rows = pd.read_csv(cav_csv_name).to_dict('records')\n syrup_rows = pd.read_csv(syrup_csv_name).to_dict('records')\n labels = list(range(len(syrup_rows)))\n cav_values = []\n syrup_values = []\n for i, cav_row in enumerate(cav_rows):\n block_name = cav_row['name']\n syrup_row = list(filter(lambda row: row['name'] == block_name, syrup_rows))[0]\n if category_name == \"time\":\n cav_values.append(cav_row[category_name] / 60)\n syrup_values.append(syrup_row[category_name] / 60)\n else:\n cav_values.append(cav_row[category_name])\n syrup_values.append(syrup_row[category_name])\n fig = go.Figure(data=[go.Bar(name=\"syrup 1.0\", x=labels, y=cav_values),\n go.Bar(name='syrup 2.0', x=labels, y=syrup_values)])\n fig.update_layout(barmode='group', yaxis_title=analyzed_parameters_names[category_name] + ' per contract',)\n if category_name == \"time\":\n fig.update_layout(yaxis_type=\"log\")\n return fig\n" ]
[ [ "numpy.append", "pandas.read_csv", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
shikhar2707/datasets
[ "c034a193967d6d72152196708a5638e546e320f4", "c034a193967d6d72152196708a5638e546e320f4", "c034a193967d6d72152196708a5638e546e320f4" ]
[ "tensorflow_datasets/image_classification/imagenet2012_subset.py", "tensorflow_datasets/structured/rock_you.py", "tensorflow_datasets/image_classification/colorectal_histology.py" ]
[ "# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Imagenet subset datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport os\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_datasets.image_classification.imagenet import Imagenet2012\nimport tensorflow_datasets.public_api as tfds\n\n\n_DESCRIPTION = '''\\\nImagenet2012Subset is a subset of original ImageNet ILSVRC 2012 dataset.\nThe dataset share the *same* validation set as the original ImageNet ILSVRC 2012\ndataset. However, the training set is subsampled in a label balanced fashion.\nIn `1pct` configuration, 1%, or 12811, images are sampled, most classes have\nthe same number of images (average 12.8), some classes randomly have 1 more\nexample than others; and in `10pct` configuration, ~10%, or 128116, most classes\nhave the same number of images (average 128), and some classes randomly have 1\nmore example than others.\n\nThis is supposed to be used as a benchmark for semi-supervised learning, and\nhas been originally used in SimCLR paper (https://arxiv.org/abs/2002.05709).\n'''\n\n_CITATION = '''\\\n@article{chen2020simple,\n title={A Simple Framework for Contrastive Learning of Visual Representations},\n author={Chen, Ting and Kornblith, Simon and Norouzi, Mohammad and Hinton, Geoffrey},\n journal={arXiv preprint arXiv:2002.05709},\n year={2020}\n}\n@article{ILSVRC15,\n Author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},\n Title = {{ImageNet Large Scale Visual Recognition Challenge}},\n Year = {2015},\n journal = {International Journal of Computer Vision (IJCV)},\n doi = {10.1007/s11263-015-0816-y},\n volume={115},\n number={3},\n pages={211-252}\n}\n'''\n\n# pylint: disable=line-too-long\n_LABELS_FNAME = 'image_classification/imagenet2012_labels.txt'\nSUBSET2FILES = {\n '1pct': 'https://raw.githubusercontent.com/google-research/simclr/master/imagenet_subsets/1percent.txt',\n '10pct': 'https://raw.githubusercontent.com/google-research/simclr/master/imagenet_subsets/10percent.txt'\n}\n\n\nclass Imagenet2012Subset(Imagenet2012):\n \"\"\"Class balanced subset of Imagenet 2012 dataset.\"\"\"\n\n BUILDER_CONFIGS = [\n tfds.core.BuilderConfig( # pylint: disable=g-complex-comprehension\n name=subset_size,\n description='{} of total ImageNet training set.'.format(subset_size),\n version=tfds.core.Version(\n '5.0.0', ''),\n ) for subset_size in SUBSET2FILES\n ]\n\n def _info(self):\n names_file = tfds.core.get_tfds_path(_LABELS_FNAME)\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'image': tfds.features.Image(),\n 'label': tfds.features.ClassLabel(names_file=names_file),\n 'file_name': tfds.features.Text(), # Eg: 'n15075141_54.JPEG'\n }),\n supervised_keys=('image', 'label'),\n homepage='http://image-net.org/',\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n train_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_train.tar')\n val_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_val.tar')\n\n # We don't import the original test split, as it doesn't include labels.\n # These were never publicly released.\n if not tf.io.gfile.exists(train_path) or not tf.io.gfile.exists(val_path):\n raise AssertionError(\n 'ImageNet requires manual download of the data. Please download '\n 'the train and val set and place them into: {}, {}'.format(\n train_path, val_path))\n\n # Download and load subset file.\n subset_file = dl_manager.download(SUBSET2FILES[self.builder_config.name])\n if isinstance(subset_file, list): # it will only be a list during testing,\n subset_file = subset_file[0] # where the first entry is 1percent.txt.\n with tf.io.gfile.GFile(subset_file) as fp:\n subset = set(fp.read().split('\\n'))\n\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\n 'archive': dl_manager.iter_archive(train_path),\n 'subset': subset,\n },\n ),\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n gen_kwargs={\n 'archive': dl_manager.iter_archive(val_path),\n 'validation_labels': self._get_validation_labels(val_path),\n },\n ),\n ]\n\n def _generate_examples(self, archive, subset=None, validation_labels=None):\n \"\"\"Yields examples.\"\"\"\n if validation_labels: # Validation split\n for key, example in self._generate_examples_validation(archive,\n validation_labels):\n yield key, example\n # Training split. Main archive contains archives names after a synset noun.\n # Each sub-archive contains pictures associated to that synset.\n for fname, fobj in archive:\n label = fname[:-4] # fname is something like 'n01632458.tar'\n # TODO(b/117643231): in py3, the following lines trigger tarfile module\n # to call `fobj.seekable()`, which Gfile doesn't have. We should find an\n # alternative, as this loads ~150MB in RAM.\n fobj_mem = io.BytesIO(fobj.read())\n for image_fname, image in tfds.download.iter_archive(\n fobj_mem, tfds.download.ExtractMethod.TAR_STREAM):\n image = self._fix_image(image_fname, image)\n if subset is None or image_fname in subset: # filtering using subset.\n record = {\n 'file_name': image_fname,\n 'image': image,\n 'label': label,\n }\n yield image_fname, record\n\n", "# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"The rockyou dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\\\n\"\"\"\n\n_DESCRIPTION = \"\"\"\\\nThis dataset contains 14,344,391 passwords that were leaked or stolen from from various sites. The author of this dataset states that \"I'm hosting them because it seems like nobody else does (hopefully it isn't because hosting them is illegal :)). Naturally, I'm not the one who stole these; I simply found them online, removed any names/email addresses/etc.\". This dataset is used to train Machine Learning models for password guessing and cracking.\n\"\"\"\n\n_DOWNLOAD_URL = \"https://github.com/brannondorsey/naive-hashcat/releases/download/data/rockyou.txt\"\n\n\nclass RockYou(tfds.core.GeneratorBasedBuilder):\n \"\"\"This dataset contains passwords that were leaked or stolen from from various sites.\"\"\"\n\n VERSION = tfds.core.Version(\"0.1.0\")\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"password\":\n tfds.features.Text(encoder=tfds.features.text.ByteTextEncoder()\n ),\n }),\n supervised_keys=None,\n homepage=\"https://wiki.skullsecurity.org/Passwords\",\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(_DOWNLOAD_URL)\n return [\n tfds.core.SplitGenerator(\n name=\"train\",\n gen_kwargs={\n \"path\": dl_path,\n },\n )\n ]\n\n def _generate_examples(self, path):\n\n with tf.io.gfile.GFile(path, \"rb\") as f:\n blines = f.readlines()\n\n for i, bline in enumerate(blines):\n yield i, {\n \"password\": bline.strip(),\n }\n", "# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Texture tiles from colorectal cancer histology.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nimport tensorflow_datasets.public_api as tfds\n\n\n_URL = \"https://zenodo.org/record/53169#.XGZemKwzbmG\"\n_TILES_DL_URL = \"https://zenodo.org/record/53169/files/Kather_texture_2016_image_tiles_5000.zip\"\n_LARGE_DL_URL = \"https://zenodo.org/record/53169/files/Kather_texture_2016_larger_images_10.zip\"\n\n_TILES_SUBDIR = \"Kather_texture_2016_image_tiles_5000\"\n_LARGE_SUBDIR = \"Kather_texture_2016_larger_images_10\"\n\n_CLASS_NAMES = (\n \"tumor\",\n \"stroma\",\n \"complex\",\n \"lympho\",\n \"debris\",\n \"mucosa\",\n \"adipose\",\n \"empty\",\n)\n_TILES_SIZE = 150\n_LARGE_SIZE = 5000\n\n_CITATION = \"\"\"\\\n@article{kather2016multi,\n title={Multi-class texture analysis in colorectal cancer histology},\n author={Kather, Jakob Nikolas and Weis, Cleo-Aron and Bianconi, Francesco and Melchers, Susanne M and Schad, Lothar R and Gaiser, Timo and Marx, Alexander and Z{\\\"o}llner, Frank Gerrit},\n journal={Scientific reports},\n volume={6},\n pages={27988},\n year={2016},\n publisher={Nature Publishing Group}\n}\n\"\"\"\n\n\ndef _class_subdir(class_index, class_name):\n return \"%02d_%s\" % (class_index + 1, class_name.upper())\n\n\ndef _load_tif(path):\n with tf.io.gfile.GFile(path, \"rb\") as fp:\n image = tfds.core.lazy_imports.PIL_Image.open(fp)\n return np.array(image)\n\n\nclass ColorectalHistology(tfds.core.GeneratorBasedBuilder):\n \"\"\"Biological 8-class classification problem.\"\"\"\n VERSION = tfds.core.Version(\n \"2.0.0\", \"New split API (https://tensorflow.org/datasets/splits)\")\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=(\n \"Classification of textures in colorectal cancer histology. \"\n \"Each example is a 150 x 150 x 3 RGB image of one of 8 classes.\"),\n features=tfds.features.FeaturesDict({\n \"image\": tfds.features.Image(shape=(_TILES_SIZE,) * 2 + (3,)),\n \"label\": tfds.features.ClassLabel(\n names=_CLASS_NAMES),\n \"filename\": tfds.features.Text(),\n }),\n homepage=_URL,\n citation=_CITATION,\n supervised_keys=(\"image\", \"label\"),\n )\n\n def _split_generators(self, dl_manager):\n folder = dl_manager.download_and_extract(_TILES_DL_URL)\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs=dict(root_dir=folder),\n ),\n ]\n\n def _generate_examples(self, root_dir):\n root_dir = os.path.join(root_dir, _TILES_SUBDIR)\n for i, class_name in enumerate(_CLASS_NAMES):\n class_dir = os.path.join(root_dir, _class_subdir(i, class_name))\n fns = tf.io.gfile.listdir(class_dir)\n\n for fn in sorted(fns):\n image = _load_tif(os.path.join(class_dir, fn))\n record = {\n \"image\": image,\n \"label\": class_name,\n \"filename\": fn,\n }\n yield \"%s/%s\" % (class_name, fn), record\n\n\nclass ColorectalHistologyLarge(tfds.core.GeneratorBasedBuilder):\n \"\"\"10 Large 5000 x 5000 colorectal histology images without labels.\"\"\"\n VERSION = tfds.core.Version(\n \"2.0.0\", \"New split API (https://tensorflow.org/datasets/splits)\")\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=(\n \"10 large 5000 x 5000 textured colorectal cancer histology images\"),\n features=tfds.features.FeaturesDict({\n \"image\": tfds.features.Image(shape=(_LARGE_SIZE,)*2 + (3,)),\n \"filename\": tfds.features.Text(),\n }),\n homepage=_URL,\n citation=_CITATION\n )\n\n def _split_generators(self, dl_manager):\n folder = dl_manager.download_and_extract(_LARGE_DL_URL)\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TEST,\n gen_kwargs=dict(folder=folder)\n )\n ]\n\n def _generate_examples(self, folder):\n folder = os.path.join(folder, _LARGE_SUBDIR)\n for fn in tf.io.gfile.listdir(folder):\n image = _load_tif(os.path.join(folder, fn))\n record = dict(image=image, filename=fn)\n yield fn, record\n" ]
[ [ "tensorflow.compat.v2.io.gfile.GFile", "tensorflow.compat.v2.io.gfile.exists" ], [ "tensorflow.compat.v2.io.gfile.GFile" ], [ "tensorflow.compat.v2.io.gfile.GFile", "numpy.array", "tensorflow.compat.v2.io.gfile.listdir" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]