repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
MosesDastmard/ADM2019_HW04 | [
"5deba2d26b41f8b9a1433c81a1f1aa5bcef949e1"
] | [
"theoretical_lib.py"
] | [
"### The MILP problems can be solved using pulp packages that powered by B&B algorithm\r\n### provides the ability to get the global optimum for not only LP but also MILP\r\nimport pandas as pd\r\nfrom numpy.linalg import norm\r\nimport numpy as np\r\nimport pulp as plp\r\n#%%\r\ndef dist(df):\r\n dist_mat = np.zeros((df.shape[0], df.shape[0]))\r\n for i in range(df.shape[0]):\r\n for j in range(df.shape[0]):\r\n dist_mat[i,j] = norm(df.iloc[i,:].values - df.iloc[j,:].values)**2\r\n return dist_mat\r\ndef k_median(df, K):\r\n dist_mat = dist(df)\r\n n = df.shape[0]\r\n k = K\r\n set_I = range(n)\r\n set_J = range(n)\r\n opt_model = plp.LpProblem(name=\"MILP_k-median_Model\")\r\n # if x is Binary\r\n x_vars = {\"x_{0}_{1}\".format(i,j): plp.LpVariable(cat = plp.LpBinary, name=\"x_{0}_{1}\".format(i,j)) for i in set_I for j in set_J}\r\n y_vars = {\"y_{0}\".format(j): plp.LpVariable(cat = plp.LpBinary, name=\"y_{0}\".format(j)) for j in set_J}\r\n vars_ = {**x_vars, **y_vars}\r\n # == constraints\r\n con1 = {\"con1_{0}\".format(i) :\r\n opt_model.addConstraint(plp.LpConstraint(\r\n e = plp.lpSum(vars_[\"x_{0}_{1}\".format(i,j)] \r\n for j in set_J), \r\n sense = plp.LpConstraintEQ,\r\n rhs = 1,\r\n name=\"con1_{0}\".format(i))) \r\n for i in set_I}\r\n \r\n con2 = {\"con2\" : opt_model.addConstraint(plp.LpConstraint(e = plp.lpSum(vars_[\"y_{0}\".format(j)] for j in set_J), sense = plp.LpConstraintEQ , rhs = k, name=\"con2\"))}\r\n \r\n \r\n con3 = {\"con3_{0}_{1}\".format(i,j) : opt_model.addConstraint(plp.LpConstraint(e = plp.lpSum([vars_[\"x_{0}_{1}\".format(i,j)], -vars_[\"y_{0}\".format(j)]]), sense = plp.LpConstraintLE, rhs = 0,name=\"con3_{0}_{1}\".format(i,j))) for i in set_I for j in set_J}\r\n con = {**con1, **con2, **con3}\r\n objective = plp.lpSum(vars_[\"x_{0}_{1}\".format(i,j)] * dist_mat[i,j] \r\n for i in set_I \r\n for j in set_J)\r\n # for minimization\r\n opt_model.sense = plp.LpMinimize\r\n opt_model.setObjective(objective)\r\n \r\n # solving with local cplex\r\n opt_model.solve()\r\n \r\n opt_model.numConstraints()\r\n \r\n opt_df = pd.DataFrame.from_dict(x_vars, orient=\"index\", \r\n columns = [\"variable_object\"])\r\n \r\n opt_df[\"solution_value\"] = opt_df[\"variable_object\"].apply(lambda item: item.varValue)\r\n \r\n opt_df['i'] = opt_df['variable_object'].apply(lambda x: int(x.name.split(\"_\")[1]))\r\n opt_df['j'] = opt_df['variable_object'].apply(lambda x: int(x.name.split(\"_\")[2]))\r\n \r\n \r\n return (pd.pivot_table(data = opt_df, values='solution_value', index ='i', columns='j').values*dist_mat).sum()\r\n \r\n#%%\r\ndef plot_res():\r\n p1 = 40.41\r\n p2 = -816.4\r\n p3 = 6109\r\n p4 = -2.054e+04\r\n p5 = 2.95e+04\r\n p6 = -1.069e+04\r\n \r\n def f(t):\r\n return p1*t**5 + p2*t**4 + p3*t**3 + p4*t**2 + p5*t**1 + p6\r\n \r\n import matplotlib.pyplot as plt\r\n x = np.arange(2,7,.1)\r\n y = [f(t) for t in x]\r\n #print(x)\r\n #print(y)\r\n plt.figure(figsize=(10,6))\r\n plt.ylim(1200,3250)\r\n plt.plot(x,y)\r\n plt.scatter(x[10],y[10], marker=\"d\", lw = 5, color = 'blue')\r\n plt.scatter(x[12],y[12], marker=\"*\", lw = 5, color = 'r')\r\n plt.scatter(x[43],y[43], marker=\"o\", lw = 5, color = 'green')\r\n plt.grid(color='r', linestyle='-', linewidth=.1, axis='x')\r\n plt.text(x=2.1, y=1556, s='k-median', color = 'blue')\r\n plt.text(x=3.1, y=1356, s='Global minimum', color = 'r')\r\n plt.text(x=6.2, y=2276, s='k-means', color = 'green')\r\n plt.ylabel('Cost')\r\n plt.xticks([1,2,3,4,5,6,7],[\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"])\r\n #print(np.array(y).argmin())\r\n \r\n"
] | [
[
"matplotlib.pyplot.scatter",
"numpy.arange",
"matplotlib.pyplot.ylim",
"numpy.linalg.norm",
"matplotlib.pyplot.plot",
"pandas.pivot_table",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.text",
"numpy.zeros",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
]
] |
sarrrrry/M2Det | [
"ab3b87f207953b2c128ef74b1f787525426b9105"
] | [
"logger.py"
] | [
"# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514\nimport tensorflow as tf\nimport numpy as np\nimport scipy.misc \ntry:\n from StringIO import StringIO # Python 2.7\nexcept ImportError:\n from io import BytesIO # Python 3.x\n\n\nclass Logger(object):\n \n def __init__(self, log_dir):\n \"\"\"Create a summary writer logging to log_dir.\"\"\"\n self.writer = tf.summary.FileWriter(log_dir)\n\n def scalar_summary(self, tag, value, step):\n \"\"\"Log a scalar variable.\"\"\"\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n self.writer.add_summary(summary, step)\n\n def image_summary(self, tag, images, step):\n \"\"\"Log a list of images.\"\"\"\n\n img_summaries = []\n for i, img in enumerate(images):\n # Write the image to a string\n try:\n s = StringIO()\n except:\n s = BytesIO()\n scipy.misc.toimage(img).save(s, format=\"png\")\n\n # Create an Image object\n img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),\n height=img.shape[0],\n width=img.shape[1])\n # Create a Summary value\n img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))\n\n # Create and write Summary\n summary = tf.Summary(value=img_summaries)\n self.writer.add_summary(summary, step)\n \n def histo_summary(self, tag, values, step, bins=1000):\n \"\"\"Log a histogram of the tensor of values.\"\"\"\n\n # Create a histogram using numpy\n counts, bin_edges = np.histogram(values, bins=bins)\n\n # Fill the fields of the histogram proto\n hist = tf.HistogramProto()\n hist.min = float(np.min(values))\n hist.max = float(np.max(values))\n hist.num = int(np.prod(values.shape))\n hist.sum = float(np.sum(values))\n hist.sum_squares = float(np.sum(values**2))\n\n # Drop the start of the first bin\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])\n self.writer.add_summary(summary, step)\n self.writer.flush()\n"
] | [
[
"tensorflow.summary.FileWriter",
"numpy.min",
"numpy.max",
"tensorflow.Summary.Value",
"numpy.prod",
"tensorflow.HistogramProto",
"tensorflow.Summary",
"numpy.histogram",
"numpy.sum"
]
] |
zwy19/easydl | [
"0da972f25db53b63e5ab7109ad7baf79a8bdb12c"
] | [
"easydl/common/scheduler.py"
] | [
"import numpy as np\n\ndef inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75, max_iter=1000):\n '''\n change as initial_lr * (1 + gamma * min(1.0, iter / max_iter) ) ** (- power)\n as known as inv learning rate sheduler in caffe,\n see https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto\n\n the default gamma and power come from <Domain-Adversarial Training of Neural Networks>\n\n code to see how it changes(decays to %20 at %10 * max_iter under default arg)::\n\n from matplotlib import pyplot as plt\n\n ys = [inverseDecaySheduler(x, 1e-3) for x in range(10000)]\n xs = [x for x in range(10000)]\n\n plt.plot(xs, ys)\n plt.show()\n\n '''\n return initial_lr * ((1 + gamma * min(1.0, step / float(max_iter))) ** (- power))\n\n\ndef aToBSheduler(step, A, B, gamma=10, max_iter=10000):\n '''\n change gradually from A to B, according to the formula (from <Importance Weighted Adversarial Nets for Partial Domain Adaptation>)\n A + (2.0 / (1 + exp(- gamma * step * 1.0 / max_iter)) - 1.0) * (B - A)\n\n =code to see how it changes(almost reaches B at %40 * max_iter under default arg)::\n\n from matplotlib import pyplot as plt\n\n ys = [aToBSheduler(x, 1, 3) for x in range(10000)]\n xs = [x for x in range(10000)]\n\n plt.plot(xs, ys)\n plt.show()\n\n '''\n ans = A + (2.0 / (1 + np.exp(- gamma * step * 1.0 / max_iter)) - 1.0) * (B - A)\n return float(ans)"
] | [
[
"numpy.exp"
]
] |
liang-tool/pytext | [
"5ee6ac4c1b935849ec5d775fb7aa29917fb43096"
] | [
"pytext/trainers/trainer.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport itertools\nimport time\nfrom contextlib import ExitStack as contextlib_ExitStack\nfrom typing import Any, Iterable, List, Optional, Tuple\n\nimport torch\nfrom pytext.common.constants import BatchContext, Stage\nfrom pytext.config import PyTextConfig\nfrom pytext.config.component import (\n Component,\n ComponentType,\n create_optimizer,\n create_privacy_engine,\n create_scheduler,\n create_sparsifier,\n)\nfrom pytext.config.pytext_config import ConfigBase\nfrom pytext.data.data_handler import BatchIterator\nfrom pytext.metric_reporters import MetricReporter\nfrom pytext.models.distributed_model import DistributedModel\nfrom pytext.models.model import Model\nfrom pytext.optimizer import Adam, Optimizer, PrivacyEngine, learning_rates\nfrom pytext.optimizer.fp16_optimizer import FP16Optimizer, FP16OptimizerFairseq\nfrom pytext.optimizer.scheduler import Scheduler\nfrom pytext.optimizer.sparsifiers.sparsifier import Sparsifier\nfrom pytext.task.serialize import save\nfrom pytext.trainers.training_state import TrainingState\nfrom pytext.utils import cuda, distributed, precision, timing\n\n\nclass TrainerBase(Component):\n __COMPONENT_TYPE__ = ComponentType.TRAINER\n\n\ndef cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n \"\"\"Like itertools.cycle, but will call iter on the original iterable instead.\n This limits it to not be able to run on say raw generators, but also doesn't\n store a copy of the iterable in memory for repetition.\"\"\"\n while True:\n yield from iterator\n\n\ndef maybe_accumulate_gradients(exit_stack, model, index, sample_size):\n # index == sample_size - 1 represents the last backward pass\n if (\n cuda.DISTRIBUTED_WORLD_SIZE > 1\n and hasattr(model, \"no_sync\")\n and index < sample_size - 1\n ):\n \"\"\"\n Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),\n we want to accumulate gradients locally and only call all-reduce in the\n last backwards pass.\n \"\"\"\n exit_stack.enter_context(model.no_sync())\n\n if precision.FP16_ENABLED and index < sample_size - 1:\n \"\"\"\n Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),\n we want to accumulate gradients in FP16 parameters (e.g delay unscale)\n and only unscale to FP32 parameters after the last backward pass.\n \"\"\"\n exit_stack.enter_context(precision.delay_unscale())\n\n\nclass Trainer(TrainerBase):\n \"\"\"\n Base Trainer class that provide ways to\n 1 Train model, compute metrics against eval set and use the metrics for\n model selection.\n 2 Test trained model, compute and publish metrics against a blind test set.\n\n Attributes:\n epochs (int): Training epochs\n early_stop_after (int): Stop after how many epochs when the eval metric\n is not improving\n max_clip_norm (Optional[float]): Clip gradient norm if set\n report_train_metrics (bool): Whether metrics on training data should be\n computed and reported.\n target_time_limit_seconds (float): Target time limit for training in seconds. If\n the expected time to train another epoch exceeds this limit, stop training.\n \"\"\"\n\n class Config(ConfigBase):\n #: Training epochs\n epochs: int = 10\n #: Stop after how many epochs when the eval metric is not improving\n early_stop_after: int = 0\n #: Clip gradient norm if set\n max_clip_norm: Optional[float] = None\n #: Whether metrics on training data should be computed and reported.\n report_train_metrics: bool = True\n #: Target time limit for training, default (None) to no time limit.\n target_time_limit_seconds: Optional[int] = None\n #: Whether to do evaluation and model selection based on it.\n do_eval: bool = True\n #: if do_eval, do we load the best model state dict after training or just\n # use the latest model state\n load_best_model_after_train: bool = True\n #: Number of samples for logging training progress.\n num_samples_to_log_progress: int = 1000\n #: Number of forward & backward per batch before update gradients, the\n #: actual_batch_size = batch_size x num_accumulated_batches\n num_accumulated_batches: int = 1\n #: Define epoch as a fixed number of batches. Subsequent epochs will continue\n #: to iterate through the data, cycling through it when they reach the end.\n #: If not set, use exactly one pass through the dataset as one epoch.\n #: This configuration only affects the train epochs, test and eval\n #: will always test their entire datasets.\n num_batches_per_epoch: Optional[int] = None\n #: config for optimizer, used in parameter update\n optimizer: Optimizer.Config = Adam.Config()\n scheduler: Optional[Scheduler.Config] = None\n sparsifier: Optional[Sparsifier.Config] = None\n #: Define arguments for fp16 training. A fp16_optimizer will be created\n #: and wraps the original optimizer, which will scale loss during\n #: backward and master weight will be maintained on original optimizer.\n #: https://arxiv.org/abs/1710.03740\n fp16_args: FP16Optimizer.Config = FP16OptimizerFairseq.Config()\n # PrivacyEngine related args\n privacy_engine: Optional[PrivacyEngine.Config] = None\n use_tensorboard: bool = False\n\n def __init__(self, config: Config, model: torch.nn.Module):\n if config.early_stop_after > 0:\n assert config.do_eval, \"can't do early stopping when not running evalution\"\n\n if precision.FP16_ENABLED:\n self.optimizer: torch.optim.Optimizer = create_optimizer(\n config.fp16_args,\n model,\n config.optimizer,\n config.num_accumulated_batches,\n )\n else:\n self.optimizer: torch.optim.Optimizer = create_optimizer(\n config.optimizer, model\n )\n self.privacy_engine: PrivacyEngine = (\n create_privacy_engine(config.privacy_engine, model, self.optimizer)\n if config.privacy_engine\n else None\n )\n\n self.scheduler: torch.optim.lr_scheduler = (\n create_scheduler(config.scheduler, self.optimizer)\n if config.scheduler\n else Scheduler()\n )\n self.sparsifier: Sparsifier = (\n create_sparsifier(config.sparsifier) if config.sparsifier else Sparsifier()\n )\n self.config = config\n\n @classmethod\n def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):\n return cls(config, model)\n\n @timing.time(\"Trainer.test\")\n def test(self, test_iter, model, metric_reporter: MetricReporter):\n state = TrainingState(stage=Stage.TEST, model=model, epoch=1)\n if cuda.CUDA_ENABLED:\n state.model.cuda()\n state.model.eval()\n with torch.no_grad():\n return self.run_epoch(state, test_iter, metric_reporter)\n\n @timing.time(\"pre-training\")\n def set_up_training(self, state: TrainingState, training_data: BatchIterator):\n if cuda.CUDA_ENABLED:\n state.model.cuda()\n state.scheduler.prepare(training_data, self.config.epochs)\n\n if cuda.DISTRIBUTED_WORLD_SIZE > 1:\n device_id = torch.cuda.current_device()\n state.model = DistributedModel(\n module=state.model,\n device_ids=[device_id],\n output_device=device_id,\n broadcast_buffers=False,\n find_unused_parameters=state.model.find_unused_parameters,\n process_group=distributed._round_robin_process_group,\n )\n state.start_time = time.time()\n\n if self.config.num_batches_per_epoch:\n # Set the training_data iterator to cycle, so it will never run out,\n # but rather after reaching the end will loop back to the beginning.\n training_data = cycle(training_data)\n return training_data\n\n @timing.time(\"zero gradients\")\n def zero_grads(self, state):\n if state.stage != Stage.TRAIN:\n return\n state.optimizer.zero_grad()\n\n @timing.time(\"backprop\")\n def backprop(self, state, loss):\n if state.stage != Stage.TRAIN:\n return\n\n with timing.time(\"loss.backward\"):\n state.optimizer.backward(loss)\n\n @timing.time(\"optimizer\")\n def optimizer_step(self, state):\n if state.stage != Stage.TRAIN:\n return\n\n try:\n grad_norm = state.optimizer.clip_grad_norm(\n self.config.max_clip_norm, state.model\n )\n except OverflowError as e:\n print(f\"Gradient overflow. Skipping step, {e}\")\n return None\n\n state.scheduler.step_batch()\n with timing.time(\"optimizer.step\"):\n state.optimizer.step()\n\n state.step_counter += 1\n # grad_norm could be used to check grads sync in distributed training\n return grad_norm\n\n @timing.time(\"sparsifier\")\n def sparsification_step(self, state):\n # sparsification only if sparifier is used\n if not self.config.sparsifier:\n return\n\n self.sparsifier.sparsify(state)\n\n def continue_training(self, state: TrainingState) -> bool:\n # Are we done?\n if state.epoch >= self.config.epochs:\n return False\n\n # Check whether the model has improved recently enough\n # Only do this if we're bothering to evaluate the model\n if self.config.do_eval and state.epochs_since_last_improvement >= (\n self.config.early_stop_after or float(\"inf\")\n ):\n print(\n f\"Worker {state.rank}: Eval metric hasn't changed for \"\n + f\"{state.epochs_since_last_improvement} epochs. Stopping now.\"\n )\n return False\n\n # Check whether we think the next epoch will put us over the configured\n # time limit.\n epochs_run = state.epoch + 1\n time_elapsed = time.time() - state.start_time\n mean_epoch_time = time_elapsed / epochs_run\n expected_next_epoch_time = time_elapsed + mean_epoch_time\n target_time_limit = (\n float(\"inf\")\n if self.config.target_time_limit_seconds is None\n else self.config.target_time_limit_seconds\n )\n if expected_next_epoch_time > target_time_limit:\n print(\n f\"Worker {state.rank}: Stopping training after {epochs_run} epochs \"\n f\"and {int(time_elapsed)} seconds, due to the target max training \"\n f\"time of {self.config.target_time_limit_seconds} seconds.\"\n )\n return False\n\n return True\n\n def move_state_dict_to_cpu(self, state_dict):\n for key, maybe_parameter in state_dict.items():\n if isinstance(maybe_parameter, torch.Tensor):\n state_dict[key] = maybe_parameter.cpu()\n else:\n self.move_state_dict_to_cpu(maybe_parameter)\n return state_dict\n\n def move_state_dict_to_gpu(self, state_dict):\n for key, maybe_parameter in state_dict.items():\n if isinstance(maybe_parameter, torch.Tensor):\n state_dict[key] = maybe_parameter.cuda()\n else:\n self.move_state_dict_to_gpu(maybe_parameter)\n return state_dict\n\n def update_best_model(\n self, state: TrainingState, train_config: PyTextConfig, eval_metric\n ):\n # This should be updated by all workers so they agree on when to stop training\n # when `early_stop_after` is specified.\n state.epochs_since_last_improvement = 0\n state.best_model_metric = eval_metric\n print(f\"Found a better model!\")\n\n # Only one worker should save checkpoints\n # unless doing iterative pruning\n if state.rank != 0 and not self.sparsifier.save_model_state_for_all_rank():\n return\n\n model_state = state.model.state_dict()\n # save to cpu to avoid multiple model copies in gpu memory\n if cuda.CUDA_ENABLED:\n self.move_state_dict_to_cpu(model_state)\n state.best_model_state = model_state\n\n @timing.time(\"save checkpoint\")\n def save_checkpoint(self, state: TrainingState, train_config: PyTextConfig) -> str:\n # Only one worker should save checkpoints\n if state.rank != 0:\n return\n\n if train_config.save_module_checkpoints or train_config.save_all_checkpoints:\n # saves per-epoch sub-modules when save_all_checkpoints or\n # save_module_checkpoints is enabled\n state.model.save_modules(\n base_path=train_config.modules_save_dir, suffix=f\"-ep{state.epoch}\"\n )\n if state.epochs_since_last_improvement == 0:\n # state.epochs_since_last_improvement == 0 means found a better\n # model in current epoch, thus update best model's sub-modules\n state.model.save_modules(base_path=train_config.modules_save_dir)\n\n # next to add new config and implementation of frequency on checkpointing\n if train_config.save_all_checkpoints:\n return save(\n config=train_config,\n model=state.model,\n meta=None,\n tensorizers=None,\n training_state=state,\n identifier=str(state.epoch),\n )\n\n def load_best_model(self, state: TrainingState):\n if cuda.CUDA_ENABLED:\n # Move current model to CPU to avoid multiple models in GPU memory\n state.model.cpu()\n state.model.load_state_dict(state.best_model_state)\n # Move model back to GPU\n state.model.cuda()\n else:\n state.model.load_state_dict(state.best_model_state)\n\n def train(\n self,\n training_data: BatchIterator,\n eval_data: BatchIterator,\n model: Model,\n metric_reporter: MetricReporter,\n train_config: PyTextConfig,\n rank: int = 0,\n ) -> Tuple[torch.nn.Module, Any]:\n \"\"\"\n Train and eval a model, the model states will be modified.\n Args:\n train_iter (BatchIterator): batch iterator of training data\n eval_iter (BatchIterator): batch iterator of evaluation data\n model (Model): model to be trained\n metric_reporter (MetricReporter): compute metric based on training\n output and report results to console, file.. etc\n train_config (PyTextConfig): training config\n training_result (Optional): only meaningful for Hogwild training. default\n is None\n rank (int): only used in distributed training, the rank of the current\n training thread, evaluation will only be done in rank 0\n\n Returns:\n model, best_metric: the trained model together with the best metric\n \"\"\"\n state = TrainingState(\n model=model,\n optimizer=self.optimizer,\n scheduler=self.scheduler,\n sparsifier=self.sparsifier,\n privacy_engine=self.privacy_engine,\n rank=rank,\n )\n return self.train_from_state(\n state, training_data, eval_data, metric_reporter, train_config\n )\n\n @timing.time(\"Trainer.train_from_state\")\n def train_from_state(\n self,\n state: TrainingState,\n training_data: BatchIterator,\n eval_data: BatchIterator,\n metric_reporter: MetricReporter,\n train_config: PyTextConfig,\n ) -> Tuple[torch.nn.Module, Any]:\n \"\"\"\n Train and eval a model from a given training state will be modified.\n This function iterates epochs specified in config, and for each epoch do:\n\n 1. Train model using training data, aggregate and report training results\n 2. Adjust learning rate if scheduler is specified\n 3. Evaluate model using evaluation data\n 4. Calculate metrics based on evaluation results and select best model\n\n Args:\n training_state (TrainingState): contrains stateful information to be\n able to restore a training job\n train_iter (BatchIterator): batch iterator of training data\n eval_iter (BatchIterator): batch iterator of evaluation data\n model (Model): model to be trained\n metric_reporter (MetricReporter): compute metric based on training\n output and report results to console, file.. etc\n train_config (PyTextConfig): training config\n\n Returns:\n model, best_metric: the trained model together with the best metric\n \"\"\"\n training_data = self.set_up_training(state, training_data)\n model = state.model\n rank = state.rank\n trainable_params = sum(\n p.numel() for p in state.model.parameters() if p.requires_grad\n )\n print(f\"Model :{model}\")\n print(f\"Num trainable parameters: {trainable_params}\")\n\n self.sparsifier.initialize(\n self, state, eval_data, metric_reporter, train_config\n )\n\n while self.continue_training(state):\n self.sparsifier.op_pre_epoch(self, state)\n state.epoch += 1\n state.epochs_since_last_improvement += 1\n lrs = learning_rates(state.optimizer)\n print(f\"\\nWorker {state.rank} starting epoch {state.epoch}\")\n print(f\"Learning rate(s): {', '.join(map(str, lrs))}\")\n\n with timing.time(\"train epoch\"):\n state.stage = Stage.TRAIN\n state.model.train()\n print(f\"start training epoch {state.epoch}\")\n epoch_data = training_data\n if self.config.num_batches_per_epoch:\n # We want to limit the number of batches in the epoch;\n # equivalent to epoch_data[:num_batches_per_epoch] for iterators.\n # In this case we set the training data iterator to cycle earlier\n # in the training process, so when it reaches the end it will\n # loop back to the beginning.\n epoch_data = itertools.islice(\n epoch_data, self.config.num_batches_per_epoch\n )\n self.run_epoch(state, epoch_data, metric_reporter)\n\n if not self.config.do_eval:\n continue\n\n with timing.time(\"eval epoch\"):\n state.stage = Stage.EVAL\n model.eval(Stage.EVAL)\n print(f\"start evaluating epoch {state.epoch}\")\n with torch.no_grad():\n eval_metric = self.run_epoch(state, eval_data, metric_reporter)\n\n # Step the learning rate scheduler(s)\n assert eval_metric is not None\n state.scheduler.step_epoch(\n metrics=metric_reporter.get_model_select_metric(eval_metric),\n epoch=state.epoch,\n )\n\n # Did we train a better model?\n better_model = metric_reporter.compare_metric(\n eval_metric, state.best_model_metric\n )\n if better_model:\n self.update_best_model(state, train_config, eval_metric)\n if better_model or train_config.save_all_checkpoints:\n self.save_checkpoint(state, train_config)\n\n if self.optimizer.finalize():\n should_update_model = True\n eval_metric = None\n if self.config.do_eval:\n state.stage = Stage.EVAL\n model.eval(Stage.EVAL)\n print(f\"start evaluating finalized state\")\n with torch.no_grad():\n eval_metric = self.run_epoch(state, eval_data, metric_reporter)\n should_update_model = metric_reporter.compare_metric(\n eval_metric, state.best_model_metric\n )\n if should_update_model:\n self.update_best_model(state, train_config, eval_metric)\n if should_update_model or train_config.save_all_checkpoints:\n self.save_checkpoint(state, train_config)\n # Only bother loading the best model for master worker\n if (\n rank == 0\n and state.best_model_state is not None\n and self.config.load_best_model_after_train\n ):\n self.load_best_model(state)\n\n return state.model, state.best_model_metric\n\n @timing.report_snapshot\n def run_epoch(\n self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter\n ):\n # This method is due for some refactoring, pushing it off because it interacts\n # with the metric reporter too much. Much of the logic here either changes in\n # the NewTaskTrainer or should change with a better metric reporter design.\n report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics\n model = state.model\n samples = []\n is_data_empty = True\n\n \"\"\"\n Sometimes, a batch of inputs is too large to fit into GPU, which has to\n be split into several micro-batches. However, to improve efficiency,\n it would be helpful to only apply params/gradients sync at original batch\n boundaries instead of micro-batch boundaries.\n num_accumulated_batches specified the number of accumulating gradients\n locally before sync gradients, total training_batch_size =\n train_batch_size x num_accumulated_batches and it will improve the system\n performance by reduce the total network transfer bytes.\n \"\"\"\n for sample in enumerate(data):\n is_data_empty = False\n samples.append(sample)\n if (\n state.stage != Stage.TRAIN\n or len(samples) == self.config.num_accumulated_batches\n ):\n self.run_step(samples, state, metric_reporter, report_metric)\n samples = []\n if samples:\n self.run_step(samples, state, metric_reporter, report_metric)\n samples = []\n\n metrics = None\n if report_metric:\n if is_data_empty:\n error_msg = (\n f\"Trying to report metric for stage {state.stage}, but no data was \"\n \"found. Either disable metric reporting for this stage, pass in \"\n \"non-empty data, or see if data fields are misnamed (warnings \"\n \"would appear in preceding stdout logs).\"\n )\n raise ValueError(error_msg)\n\n with timing.time(\"report metrics\"):\n metrics = metric_reporter.report_metric(\n model,\n state.stage,\n state.epoch,\n print_to_channels=(state.rank == 0),\n optimizer=getattr(\n state, \"optimizer\", None\n ), # optimizer is not present during test\n )\n else:\n metric_reporter._reset()\n\n if state.rank == 0 and self.config.sparsifier:\n current_sparsity = self.sparsifier.get_current_sparsity(state.model)\n print(f\"sparsity in the model: {current_sparsity}\")\n\n return metrics\n\n @timing.time(\"run_step\")\n def run_step(\n self,\n samples: List[Any],\n state: TrainingState,\n metric_reporter: MetricReporter,\n report_metric: bool,\n ):\n sample_size = len(samples)\n assert sample_size <= self.config.num_accumulated_batches\n\n model = state.model\n self.zero_grads(state)\n for idx, (batch_id, (inputs, targets, context)) in enumerate(samples):\n with contextlib_ExitStack() as exit_stack:\n maybe_accumulate_gradients(exit_stack, model, idx, sample_size)\n # pass context to model to use in forward call if needed\n model.contextualize(context)\n with timing.time(\"model.forward\"):\n logits = model(*inputs)\n\n with timing.time(\"compute loss\"):\n loss = precision.maybe_float(\n model.get_loss(logits, targets, context)\n )\n if BatchContext.IGNORE_LOSS in context:\n loss *= 0\n elif sample_size > 1:\n # gradients averaged per batch and accumulated across samples.\n # divide sample_size to let gradients averaged per example\n loss = loss / sample_size\n\n self.backprop(state, loss)\n\n if report_metric:\n with timing.time(\"get pred\"):\n preds, scores = model.get_pred(\n logits, targets, context, state.stage, *inputs\n )\n\n with timing.time(\"add metrics\"):\n metric_reporter.add_batch_stats(\n batch_id, preds, targets, scores, loss.item(), inputs, **context\n )\n\n if batch_id % self.config.num_samples_to_log_progress == 0:\n print(\n f\"Running batch {batch_id} for epoch {state.epoch} \\\n in {state.stage} stage\",\n flush=True,\n )\n # update gradients after len(samples) forward & backward\n self.optimizer_step(state)\n with timing.time(\"add gradients\"):\n if report_metric and state.stage == Stage.TRAIN:\n metric_reporter.add_gradients(state.model)\n self.sparsification_step(state)\n\n\nclass TaskTrainer(Trainer):\n __EXPANSIBLE__ = True\n\n class Config(Trainer.Config):\n \"\"\"Make mypy happy\"\"\"\n\n @timing.time(\"run_step\")\n def run_step(\n self,\n samples: List[Any],\n state: TrainingState,\n metric_reporter: MetricReporter,\n report_metric: bool,\n ):\n \"\"\"Our run_step is a bit different, because we're wrapping the model forward\n call with model.train_batch, which arranges tensors and gets loss, etc.\n\n Whenever \"samples\" contains more than one mini-batch (sample_size > 1),\n we want to accumulate gradients locally and only call all-reduce in the\n last backwards pass.\n \"\"\"\n sample_size = len(samples)\n assert sample_size <= self.config.num_accumulated_batches\n\n model = state.model\n self.zero_grads(state)\n for idx, (batch_id, (raw_batch, batch)) in enumerate(samples):\n with contextlib_ExitStack() as exit_stack:\n # enter ddp no_sync context and fp16 delay_scale context if needed\n maybe_accumulate_gradients(exit_stack, model, idx, sample_size)\n with timing.time(\"model.train_batch\"):\n loss, metric_data = model.train_batch(model, batch, state)\n if sample_size > 1:\n # gradients averaged per batch and accumulated across samples.\n # divide sample_size to let gradients averaged per example\n loss = loss / sample_size\n self.backprop(state, loss)\n\n if report_metric:\n with timing.time(\"add metrics\"):\n metric_reporter.add_batch_stats(\n batch_id,\n *metric_data,\n # TODO merge this step into add_batch_stats once all data\n # migration is done\n **metric_reporter.batch_context(raw_batch, batch),\n )\n if batch_id % self.config.num_samples_to_log_progress == 0:\n metric_reporter.report_realtime_metric(state.stage)\n # update gradients after #len(samples) forward & backward\n self.optimizer_step(state)\n with timing.time(\"add gradients\"):\n if report_metric and state.stage == Stage.TRAIN:\n metric_reporter.add_gradients(state.model)\n self.sparsification_step(state)\n\n def _prepare_scheduler(self, training_batches, scheduler=None):\n \"\"\"Batch based schedulers require knowing the number of batches in\n the data. We're not supporting that yet with the Data api, need to figure out\n how to expose this info or restructure batch-based schedulers to not need it.\"\"\"\n if scheduler.batch_based_schedulers:\n raise Exception(\"New tasks don't yet support batch-based scheduling\")\n return scheduler\n"
] | [
[
"torch.no_grad",
"torch.cuda.current_device"
]
] |
yakouyang/Multilevel_Wavelet_Decomposition_Network_Pytorch | [
"62b28433abeea2e773991197341a2d907ea478f1"
] | [
"model.py"
] | [
"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F \nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport numpy as np \nimport h5py\nfrom utils import ToVariable\n\nclass Wavelet_LSTM(nn.Module):\n def __init__(self,seq_len, hidden_size,output_size):\n super(Wavelet_LSTM,self).__init__()\n self.seq_len = seq_len\n self.hidden_size = hidden_size\n self.output_size = output_size\n\n self.mWDN1_H = nn.Linear(seq_len,seq_len)\n self.mWDN1_L = nn.Linear(seq_len,seq_len)\n self.mWDN2_H = nn.Linear(int(seq_len/2),int(seq_len/2))\n self.mWDN2_L = nn.Linear(int(seq_len/2),int(seq_len/2))\n self.a_to_x = nn.AvgPool1d(2)\n self.sigmoid = nn.Sigmoid()\n self.lstm_xh1 = nn.LSTM(1,hidden_size,batch_first=True)\n self.lstm_xh2 = nn.LSTM(1,hidden_size,batch_first=True)\n self.lstm_xl2 = nn.LSTM(1,hidden_size,batch_first=True)\n self.output = nn.Linear(hidden_size,output_size)\n\n self.l_filter = [-0.0106,0.0329,0.0308,-0.187,-0.028,0.6309,0.7148,0.2304]\n self.h_filter = [-0.2304,0.7148,-0.6309,-0.028,0.187,0.0308,-0.0329,-0.0106]\n\n self.cmp_mWDN1_H = ToVariable(self.create_W(seq_len,False,is_comp=True))\n self.cmp_mWDN1_L = ToVariable(self.create_W(seq_len,True,is_comp=True))\n self.cmp_mWDN2_H = ToVariable(self.create_W(int(seq_len/2),False,is_comp=True))\n self.cmp_mWDN2_L = ToVariable(self.create_W(int(seq_len/2),True,is_comp=True))\n\n self.mWDN1_H.weight = torch.nn.Parameter(ToVariable(self.create_W(seq_len,False)))\n self.mWDN1_L.weight = torch.nn.Parameter(ToVariable(self.create_W(seq_len,True)))\n self.mWDN2_H.weight = torch.nn.Parameter(ToVariable(self.create_W(int(seq_len/2),False)))\n self.mWDN2_L.weight = torch.nn.Parameter(ToVariable(self.create_W(int(seq_len/2),True)))\n\n def forward(self,input,h1,c1,h2,c2,h3,c3):\n input = input.view(input.shape[0],input.shape[1])\n ah_1 = self.sigmoid(self.mWDN1_H(input))\n al_1 = self.sigmoid(self.mWDN1_L(input))\n xh_1 = self.a_to_x(ah_1.view(ah_1.shape[0],1,-1))\n xl_1 = self.a_to_x(al_1.view(al_1.shape[0],1,-1))\n \n ah_2 = self.sigmoid(self.mWDN2_H(xl_1))\n al_2 = self.sigmoid(self.mWDN2_L(xl_1))\n \n xh_2 = self.a_to_x(ah_2)\n xl_2 = self.a_to_x(al_2)\n\n xh_1 = xh_1.transpose(1,2)\n xh_2 = xh_2.transpose(1,2)\n xl_2 = xl_2.transpose(1,2)\n\n level1_lstm,(h1,c1) = self.lstm_xh1(xh_1,(h1,c1))\n level2_lstm_h,(h2,c2) = self.lstm_xh2(xh_2,(h2,c2))\n level2_lstm_l,(h3,c3) = self.lstm_xl2(xl_2,(h3,c3))\n\n output = self.output(torch.cat((level1_lstm,level2_lstm_h,level2_lstm_l), 1))\n #output = output.view(-1,1)\n return output,h1,c1,h2,c2,h3,c3\n\n def init_state(self,batch_size):\n h1 = Variable(torch.zeros(1,batch_size,self.hidden_size)).double()\n c1 = Variable(torch.zeros(1,batch_size,self.hidden_size)).double()\n\n h2 = Variable(torch.zeros(1,batch_size,self.hidden_size)).double()\n c2 = Variable(torch.zeros(1,batch_size,self.hidden_size)).double()\n\n h3 = Variable(torch.zeros(1,batch_size,self.hidden_size)).double()\n c3 = Variable(torch.zeros(1,batch_size,self.hidden_size)).double()\n return h1,c1,h2,c2,h3,c3\n\n def create_W(self,P,is_l,is_comp=False):\n if is_l : \n filter_list = self.l_filter\n else:\n filter_list = self.h_filter\n\n list_len = len(filter_list)\n\n max_epsilon = np.min(np.abs(filter_list))\n if is_comp:\n weight_np = np.zeros((P,P))\n else:\n weight_np = np.random.randn(P,P)*0.1*max_epsilon\n\n for i in range(0,P):\n filter_index = 0\n for j in range(i,P):\n if filter_index < len(filter_list):\n weight_np[i][j] = filter_list[filter_index]\n filter_index += 1\n return weight_np"
] | [
[
"numpy.abs",
"torch.nn.LSTM",
"torch.cat",
"torch.zeros",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"numpy.random.randn",
"numpy.zeros",
"torch.nn.AvgPool1d"
]
] |
continental/hybrid_learning | [
"37b9fc83d7b14902dfe92e0c45071c150bcf3779"
] | [
"test/test_datasets/test_dataset_base.py"
] | [
"\"\"\"Tests for the basic dataset manipulation methods.\"\"\"\n# Copyright (c) 2020 Continental Automotive GmbH\n\n# Pylint seems to think the code libraries are external:\n# pylint: disable=wrong-import-order\n\nimport pytest\nimport torch\nfrom PIL import Image\n\nfrom hybrid_learning.datasets import cross_validation_splits\nfrom hybrid_learning.datasets import data_visualization as datavis\n\n\ndef test_cross_validation_splits():\n \"\"\"Test whether cross_validation_splits are calculated correctly.\"\"\"\n # Sample datasets and settings to split;\n # Format: List of tuples of\n # ({args for cross_validation_splits}, [tuples len(train), len(test)])\n samples = [\n ({\"train_val_data\": list(range(50)), \"num_splits\": 2},\n [(25, 25), (25, 25)]),\n ({\"train_val_data\": list(range(50)), \"num_splits\": 3},\n [(33, 17), (33, 17), (34, 16)]),\n ({\"train_val_data\": list(range(50)), \"num_splits\": 3},\n [(33, 17), (33, 17), (34, 16)]),\n ]\n\n for args, lens in samples:\n splits = cross_validation_splits(**args)\n result_lens = [(len(train), len(test)) for train, test in splits]\n assert list(lens) == result_lens\n\n for invalid_num_splits in (-1, 0, 1):\n with pytest.raises(ValueError):\n cross_validation_splits(list(range(50)),\n num_splits=invalid_num_splits)\n\n\ndef test_apply_mask():\n \"\"\"Does mask application fulfill basic properties?\"\"\"\n size = (400, 300)\n\n # If mask and image size do not match, error is raised\n img = Image.new(mode='RGB', size=size)\n mask = Image.new(mode='L', size=(size[0], size[1] + 100))\n with pytest.raises(ValueError):\n datavis.apply_mask(img, mask)\n\n # Original image and mask are not overwritten\n img = Image.new(mode='RGB', size=size, color='red')\n mask = Image.new(mode='L', size=size, color=100)\n masked_img = datavis.apply_mask(img, mask)\n assert id(masked_img) != id(img) and id(masked_img) != id(mask)\n\n # If black mask is applied, nothing happens.\n img = Image.new(mode='RGB', size=size, color='red')\n mask = Image.new(mode='L', size=size)\n masked_img = datavis.apply_mask(img, mask)\n assert masked_img == img\n\n # If alpha == 0, nothing happens\n img = Image.new(mode='RGB', size=size, color='red')\n mask = Image.new(mode='L', size=size, color=100)\n masked_img = datavis.apply_mask(img, mask, alpha=0)\n assert masked_img == img\n\n # If alpha == 1, image sections are overwritten\n img = Image.new(mode='RGB', size=size,\n color='red') # color different from green\n mask = Image.new(mode='L', size=size, color='white') # mask everything\n masked_img = datavis.apply_mask(img, mask, alpha=1)\n assert masked_img == Image.new(mode='RGB', size=size,\n color='green') # all masked 100%\n\n\ndef test_tensor_from_img():\n \"\"\"Test basic properties of tensor to monochrome image conversion.\"\"\"\n width: int = 400\n height: int = 300\n # pylint: disable=no-member\n img_t: torch.Tensor = torch.ones((height, width))\n # pylint: enable=no-member\n img: Image.Image = datavis.to_monochrome_img(img_t)\n\n # Correct size\n assert img.size == (width, height)\n\n # Correct mode\n assert img.mode == 'L'\n\n # Correct color\n assert img == Image.new(mode='L', size=(width, height), color='white')\n\n# Model hashing\n# -------------\n# from torchvision.models.alexnet import alexnet\n# from code.datasets.base import model_parameter_hash\n# def test_model_parameter_hash(tmpdir):\n# \"\"\"Test basic properties of model hashing.\"\"\"\n# model = alexnet(pretrained=True)\n# model_description = '0xa4d71c890adc03bed2ab88af5c8d9afa'\n# m_fname = os.path.join(tmpdir, 'm.pkl')\n#\n# # Test some exemplary full length hash\n# assert model_parameter_hash(model) == model_description\n#\n# # Test truncation option\n# hash_len = 8\n# assert (model_parameter_hash(model, hash_len=hash_len) ==\n# model_description[:hash_len + 2])\n#\n# # Two identical models loaded from different files should yield same hash\n# hashes = []\n# for _ in range(2):\n# if os.path.isfile(m_fname):\n# os.remove(m_fname)\n# torch.save(model, m_fname)\n# mod = torch.load(m_fname)\n# hashes.append(model_parameter_hash(mod))\n# assert len(hashes) == 2\n# assert hashes[0] == hashes[1], \\\n# (\"Identical models did not yield the same hash: \"\n# \"Got {}, {}\").format(*hashes)\n#\n# # Two models loaded from the same file should yield the same result\n# hashes = []\n# if os.path.isfile(m_fname):\n# os.remove(m_fname)\n# torch.save(model, m_fname)\n# for _ in range(2):\n# hashes.append(model_parameter_hash(torch.load(m_fname)))\n# assert len(hashes) == 2\n# assert hashes[0] == hashes[1], \\\n# (\"Models loaded from the same file did not yield the same hash: \"\n# \"Got {}, {}\").format(*hashes)\n#\n# # The hash does not change if saved and loaded again\n# assert hashes[0] == model_description, \\\n# (\"Hash changed when saved and reloaded: \"\n# \"From {} to {}\").format(model_description, hashes[0])\n"
] | [
[
"torch.ones"
]
] |
RobRomijnders/bayes_nn | [
"f0052fd6610fb9bb00344b52745ca47bcc0cd453"
] | [
"bayes_nn/training_lang.py"
] | [
"import torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom bayes_nn.data_loader import Dataloader\nfrom torch.autograd import Variable\nfrom bayes_nn.util.util import to_tensor\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom os.path import join\nimport os\n\nfrom bayes_nn.model.model_definition import Net\nfrom math import sqrt\n\nfrom bayes_nn import conf\nimport time\n\ndataloader = Dataloader('data/raw')\nconf.num_samples = dataloader.data['X_train'].shape[0]\n\n\ndef langevin(model, epsilon):\n \"\"\"\n Implements both weight decay and the Langevin dynamics\n\n In Langevin dynamics, we add noise to the gradient. After the burn in phase,\n this makes steps from SGD actually samples from the posterior\n :param model:\n :param epsilon:\n :return:\n \"\"\"\n\n for name, param in dict(model.named_parameters()).items():\n # Add weight decay\n param.grad.data.add_(conf.weight_decay*param.data)\n\n # Inject the noise on the gradient\n param.grad.data.add_((torch.randn(param.grad.data.size()).cuda() * sqrt(epsilon)))\n return\n\n\ndef test(step, model):\n # model.eval()\n test_loss = 0\n correct = 0\n num_batches = 100\n num_samples = dataloader.data['X_test'].shape[0]\n for batch_idx in range(num_batches):\n # Get a new output\n data, target = to_tensor(*dataloader.sample_NCHW(dataset='test', batch_size=conf.batch_size))\n data, target = Variable(data, volatile=True), Variable(target)\n output = model(data)\n\n # Evaluate performance on the output\n test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_loss /= num_samples\n acc = correct/(num_batches*conf.batch_size)\n print('\\n TEST step %i: average loss %5.3f and accuracy %5.3f \\n' % (step, test_loss, acc))\n\n\ndef training_lang(save_path):\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n if os.path.exists(join(save_path, 'model0.pyt')):\n # Only train if there is no saved model in the save_path\n return\n\n # Make a new model and <maybe> put it to GPU\n t1 = time.time()\n model = Net()\n if conf.CUDA:\n model.cuda()\n\n # Set up optimizer with correct learning rate decay for the Langevin sampling\n optimizer = optim.SGD(model.parameters(), lr=conf.lr)\n\n def lambda_decay(step):\n if step < 1000:\n return 1.0\n else:\n return (1 + step/1000)**(-0.99)\n\n scheduler = LambdaLR(optimizer, lr_lambda=lambda_decay)\n steps_per_epoch = conf.sample_every\n max_steps = max((conf.burn_in + conf.num_runs*steps_per_epoch, conf.max_steps))\n\n num_saved = 0\n\n with open(join(save_path, 'weights.csv'), 'w') as f:\n # For the MC evaluation, the output must be weighted by the epsilons, so we save them in the loop\n for step in range(max_steps):\n data, target = to_tensor(*dataloader.sample_NCHW())\n data, target = Variable(data), Variable(target)\n\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n\n # This is where the Langevin method is implemented\n epsilon = 2 * conf.batch_size * optimizer.param_groups[0]['lr'] / conf.num_samples\n langevin(model, epsilon)\n optimizer.step()\n scheduler.step()\n\n if step % conf.log_interval == 0:\n print('At step %5i/%5i loss %5.3f, current epsilon %5.3e' % (step, max_steps, loss.data[0], epsilon))\n\n if step % steps_per_epoch == steps_per_epoch - 1:\n test(step, model)\n if step > conf.burn_in:\n current_save_path = join(save_path, 'model%i.pyt' % num_saved)\n torch.save(model.state_dict(), current_save_path)\n f.write('%.10f\\n' % epsilon)\n print('saved model at %s' % current_save_path)\n num_saved += 1\n print(time.time()-t1)\n"
] | [
[
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.functional.nll_loss",
"torch.autograd.Variable"
]
] |
papkov/pytorch-toolbelt | [
"71d03d907f93fa73fbfba5eb89d26ad801e47e03"
] | [
"tests/test_tta.py"
] | [
"from collections import defaultdict\n\nimport cv2\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import nn\n\nfrom pytorch_toolbelt.inference import tta\nfrom pytorch_toolbelt.utils.torch_utils import to_numpy\nfrom pytorch_toolbelt.zoo import resnet34_unet32\n\nskip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is not available\")\n\n\nclass NoOp(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, input):\n return input\n\n\nclass SumAll(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, input):\n return input.sum(dim=[1, 2, 3])\n\n\ndef test_d4_image2mask():\n x = torch.rand((4, 3, 224, 224))\n model = NoOp()\n\n output = tta.d4_image2mask(model, x)\n np.testing.assert_allclose(to_numpy(output), to_numpy(x), atol=1e-6, rtol=1e-6)\n\n\ndef test_d4_image2mask_v2():\n x = torch.rand((4, 3, 224, 224))\n x_a = tta.d4_image_augment(x)\n y = tta.d4_image_deaugment(x_a)\n\n np.testing.assert_allclose(to_numpy(y), to_numpy(x), atol=1e-6, rtol=1e-6)\n\n\[email protected]_grad()\n@skip_if_no_cuda()\ndef test_d4_speed():\n df = defaultdict(list)\n n = 100\n\n model = resnet34_unet32().cuda().eval()\n x = torch.rand((4, 3, 224, 224)).float().cuda()\n y1 = tta.d4_image2mask(model, x)\n y2 = tta.d4_image_deaugment(model(tta.d4_image_augment(x)))\n np.testing.assert_allclose(to_numpy(y1), to_numpy(y2), atol=1e-6, rtol=1e-6)\n\n for deterministic in [False, True]:\n for benchmark in [False, True]:\n for dtype in [torch.float16, torch.float32]:\n torch.cuda.empty_cache()\n torch.backends.cuda.deterministic = deterministic\n torch.backends.cuda.benchmark = benchmark\n\n model = resnet34_unet32().to(dtype).cuda().eval()\n\n speed_v1 = 0\n speed_v2 = 0\n for i in range(n):\n x = torch.rand((4, 3, 224, 224)).to(dtype).cuda(non_blocking=False)\n start = cv2.getTickCount()\n y = tta.d4_image2mask(model, x)\n v = y.sum().item()\n finish = cv2.getTickCount()\n speed_v1 += finish - start\n np.testing.assert_allclose(v, v, atol=1e-6, rtol=1e-6)\n\n for i in range(n):\n x = torch.rand((4, 3, 224, 224)).to(dtype).cuda(non_blocking=False)\n start = cv2.getTickCount()\n x_a = tta.d4_image_augment(x)\n x_a = model(x_a)\n y = tta.d4_image_deaugment(x_a)\n v = y.sum().item()\n finish = cv2.getTickCount()\n speed_v2 += finish - start\n np.testing.assert_allclose(v, v, atol=1e-6, rtol=1e-6)\n\n df[\"mode\"].append(\"fp16\" if dtype == torch.float16 else \"fp32\")\n df[\"deterministic\"].append(deterministic)\n df[\"benchmark\"].append(benchmark)\n df[\"d4_image2mask (ms)\"].append(1000.0 * speed_v1 / (cv2.getTickFrequency() * n))\n df[\"d4_augment (ms)\"].append(1000.0 * speed_v2 / (cv2.getTickFrequency() * n))\n\n import pandas as pd\n\n df = pd.DataFrame.from_dict(df)\n pd.set_option(\"display.max_columns\", None)\n pd.set_option(\"display.max_rows\", None)\n print(df)\n df.to_csv(\"tta_eval.csv\", index=False)\n\n\ndef test_fliplr_image2mask():\n x = torch.rand((4, 3, 224, 224))\n model = NoOp()\n\n output = tta.fliplr_image2mask(model, x)\n np.testing.assert_allclose(to_numpy(output), to_numpy(x), atol=1e-6, rtol=1e-6)\n\n\ndef test_d4_image2label():\n x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()\n model = SumAll()\n\n output = tta.d4_image2label(model, x)\n expected = int(x.sum())\n\n assert int(output) == expected\n\n\ndef test_fliplr_image2label():\n x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()\n model = SumAll()\n\n output = tta.fliplr_image2label(model, x)\n expected = int(x.sum())\n\n assert int(output) == expected\n\n\ndef test_fivecrop_image2label():\n x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()\n model = SumAll()\n\n output = tta.fivecrop_image2label(model, x, (2, 2))\n expected = ((1 + 2 + 5 + 6) + (3 + 4 + 7 + 8) + (9 + 0 + 3 + 4) + (1 + 2 + 5 + 6) + (6 + 7 + 0 + 1)) / 5\n\n assert int(output) == expected\n\n\ndef test_tencrop_image2label():\n x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2], [3, 4, 5, 6]]).unsqueeze(0).unsqueeze(0).float()\n model = SumAll()\n\n output = tta.tencrop_image2label(model, x, (2, 2))\n expected = (2 * ((1 + 2 + 5 + 6) + (3 + 4 + 7 + 8) + (9 + 0 + 3 + 4) + (1 + 2 + 5 + 6) + (6 + 7 + 0 + 1))) / 10\n\n assert int(output) == expected\n"
] | [
[
"torch.cuda.empty_cache",
"torch.tensor",
"torch.no_grad",
"torch.rand",
"torch.cuda.is_available",
"pandas.DataFrame.from_dict",
"pandas.set_option",
"numpy.testing.assert_allclose"
]
] |
facebookresearch/OTTER | [
"605d317f989c057c7141e727baf430663fe9e44f"
] | [
"loss/kl_div_loss.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\n\n\nclass KLDivLoss(nn.Module):\n \"\"\"\n Wrapped KLDivergence Loss with a learnable temperature.\n \"\"\"\n def __init__(self):\n super().__init__()\n self.logsoftmax = nn.LogSoftmax(dim=1)\n self.loss = nn.KLDivLoss(reduction='batchmean')\n self.T_s = nn.Parameter(torch.tensor(3.9, requires_grad=True))\n\n def forward(self, pred, target_prob):\n \"\"\"\n Pred is logits and target is probabilities.\n \"\"\"\n T_s = torch.clamp(torch.exp(self.T_s), min=1.0, max=100.0)\n pred_logprob = self.logsoftmax(pred * T_s)\n\n return self.loss(input=pred_logprob, target=target_prob)\n"
] | [
[
"torch.exp",
"torch.tensor",
"torch.nn.LogSoftmax",
"torch.nn.KLDivLoss"
]
] |
K-now978/pytorch-3dunet | [
"db9bddd1e924fa87faa7a04c4f9716a083c4c350"
] | [
"pytorch3dunet/datasets/dicom.py"
] | [
"import glob\nimport os\nfrom itertools import chain\nfrom multiprocessing import Lock\n\nimport dicom2nifti\nimport nibabel as nib\nimport numpy as np\n\nimport pytorch3dunet.augment.transforms as transforms\nfrom pytorch3dunet.datasets.utils import get_slice_builder, ConfigDataset, calculate_stats, sample_instances\nfrom pytorch3dunet.unet3d.utils import get_logger\n\nlogger = get_logger('HDF5Dataset')\nlock = Lock()\n\n\nclass StandardDicomDataset(ConfigDataset):\n\n\n def __init__(self, file_path,\n phase,\n slice_builder_config,\n transformer_config,\n mirror_padding=(16, 32, 32),\n raw_internal_path='raw',\n label_internal_path='label',\n weight_internal_path=None,\n instance_ratio=None,\n random_seed=0):\n \"\"\"\n :param file_path: path to nifti file from dicom file\n :param phase: 'train' for training, 'val' for validation, 'test' for testing; data augmentation is performed\n only during the 'train' phase\n :para'/home/adrian/workspace/ilastik-datasets/VolkerDeconv/train'm slice_builder_config: configuration of the SliceBuilder\n :param transformer_config: data augmentation configuration\n :param mirror_padding (int or tuple): number of voxels padded to each axis\n :param raw_internal_path (str or list): H5 internal path to the raw dataset\n :param label_internal_path (str or list): H5 internal path to the label dataset\n :param weight_internal_path (str or list): H5 internal path to the per pixel weights\n :param a number between (0, 1]: specifies a fraction of ground truth instances to be sampled from the dense ground truth labels\n \"\"\"\n assert phase in ['train', 'val', 'test']\n if phase in ['train', 'val']:\n raise \n mirror_padding = None\n\n if mirror_padding is not None:\n if isinstance(mirror_padding, int):\n mirror_padding = (mirror_padding,) * 3\n else:\n assert len(mirror_padding) == 3, f\"Invalid mirror_padding: {mirror_padding}\"\n\n self.mirror_padding = mirror_padding\n self.phase = phase\n self.file_path = file_path\n\n self.instance_ratio = instance_ratio\n\n # convert raw_internal_path, label_internal_path and weight_internal_path to list for ease of computation\n if isinstance(weight_internal_path, str):\n weight_internal_path = [weight_internal_path]\n\n nifti = nib.load(file_path)\n arr = nifti.get_fdata()\n self.raws = [np.fliplr(arr).transpose((2,1,0))]\n self.affine = [nifti.affine]\n self.header = [nifti.header]\n\n min_value, max_value, mean, std = None, None, None, None #self.ds_stats()\n\n self.transformer = transforms.get_transformer(transformer_config, min_value=min_value, max_value=max_value,\n mean=mean, std=std)\n self.raw_transform = self.transformer.raw_transform()\n\n if phase != 'test':\n raise\n # not yet\n\n # create label/weight transform only in train/val phase\n self.label_transform = self.transformer.label_transform()\n self.labels = self.fetch_and_check(input_file, label_internal_path)\n\n if self.instance_ratio is not None:\n assert 0 < self.instance_ratio <= 1\n rs = np.random.RandomState(random_seed)\n self.labels = [sample_instances(m, self.instance_ratio, rs) for m in self.labels]\n\n if weight_internal_path is not None:\n # look for the weight map in the raw file\n self.weight_maps = self.fetch_and_check(input_file, weight_internal_path)\n self.weight_transform = self.transformer.weight_transform()\n else:\n self.weight_maps = None\n\n self._check_dimensionality(self.raws, self.labels)\n else:\n # 'test' phase used only for predictions so ignore the label dataset\n self.labels = None\n self.weight_maps = None\n\n # add mirror padding if needed\n if self.mirror_padding is not None:\n z, y, x = self.mirror_padding\n pad_width = ((z, z), (y, y), (x, x))\n padded_volumes = []\n for raw in self.raws:\n if raw.ndim == 4:\n channels = [np.pad(r, pad_width=pad_width, mode='reflect') for r in raw]\n padded_volume = np.stack(channels)\n else:\n padded_volume = np.pad(raw, pad_width=pad_width, mode='reflect')\n\n padded_volumes.append(padded_volume)\n\n self.raws = padded_volumes\n\n # build slice indices for raw and label data sets\n assert 'name' in slice_builder_config\n if slice_builder_config['name'] == \"AroundCenterSliceBuilder\":\n assert 'centers_internal_path' in slice_builder_config\n centers_internal_path = slice_builder_config.get('centers_internal_path', 'centers')\n self.centers = self.fetch_and_check(input_file, [centers_internal_path])\n slice_builder = get_slice_builder(self.raws, self.labels, self.weight_maps, slice_builder_config, centers=self.centers)\n else:\n slice_builder = get_slice_builder(self.raws, self.labels, self.weight_maps, slice_builder_config) \n self.raw_slices = slice_builder.raw_slices\n self.label_slices = slice_builder.label_slices\n self.weight_slices = slice_builder.weight_slices\n\n self.patch_count = len(self.raw_slices)\n logger.info(f'Number of patches: {self.patch_count}')\n\n def __getitem__(self, idx):\n if idx >= len(self):\n raise StopIteration\n\n # get the slice for a given index 'idx'\n raw_idx = self.raw_slices[idx]\n # get the raw data patch for a given slice\n raw_patch_transformed = self._transform_patches(self.raws, raw_idx, self.raw_transform)\n\n if self.phase == 'test':\n # discard the channel dimension in the slices: predictor requires only the spatial dimensions of the volume\n if len(raw_idx) == 4:\n raw_idx = raw_idx[1:]\n return raw_patch_transformed, raw_idx\n else:\n raise # not yet\n # get the slice for a given index 'idx'\n label_idx = self.label_slices[idx]\n label_patch_transformed = self._transform_patches(self.labels, label_idx, self.label_transform)\n if self.weight_maps is not None:\n weight_idx = self.weight_slices[idx]\n # return the transformed weight map for a given patch together with raw and label data\n weight_patch_transformed = self._transform_patches(self.weight_maps, weight_idx, self.weight_transform)\n return raw_patch_transformed, label_patch_transformed, weight_patch_transformed\n # return the transformed raw and label patches\n return raw_patch_transformed, label_patch_transformed\n\n\n @classmethod\n def create_datasets(cls, dataset_config, phase):\n phase_config = dataset_config[phase]\n\n # load data augmentation configuration\n transformer_config = phase_config['transformer']\n # load slice builder config\n slice_builder_config = phase_config['slice_builder']\n # load files to process\n dicom_dir = phase_config['file_paths'][0]\n # file_paths must be directories of dicom files; \n file_path = cls.make_nifti(dicom_dir)\n\n # load instance sampling configuration\n instance_ratio = phase_config.get('instance_ratio', None)\n random_seed = phase_config.get('random_seed', 0)\n\n datasets = []\n try:\n logger.info(f'Loading {phase} set from: {dicom_dir}...')\n dataset = cls(file_path=file_path,\n phase=phase,\n slice_builder_config=slice_builder_config,\n transformer_config=transformer_config,\n mirror_padding=dataset_config.get('mirror_padding', None),\n weight_internal_path=dataset_config.get('weight_internal_path', None),\n instance_ratio=instance_ratio, random_seed=random_seed)\n datasets.append(dataset)\n except Exception:\n logger.error(f'Skipping {phase} set: {file_path}', exc_info=True)\n return datasets\n\n @staticmethod\n def make_nifti(dicom_dir):\n if not os.path.isdir('./tmp'):\n os.makedirs('./tmp')\n try:\n dicom2nifti.dicom_series_to_nifti(dicom_dir, './tmp/tmp.nii.gz') \n except Exception:\n logger.error(f\"Can't convert dicom dir '{dicom_dir}' to nifti\")\n return './tmp/tmp.nii.gz'\n\n @staticmethod\n def _transform_patches(datasets, label_idx, transformer):\n transformed_patches = []\n for dataset in datasets:\n # get the label data and apply the label transformer\n transformed_patch = transformer(dataset[label_idx])\n transformed_patches.append(transformed_patch)\n\n # if transformed_patches is a singleton list return the first element only\n if len(transformed_patches) == 1:\n return transformed_patches[0]\n else:\n return transformed_patches\n\n def __len__(self):\n return self.patch_count\n\n\n\n"
] | [
[
"numpy.fliplr",
"numpy.random.RandomState",
"numpy.pad",
"numpy.stack"
]
] |
d-v-b/fish | [
"1fc9ea6c37e308ba02bd3307443365941a60196a"
] | [
"fish/ephys/ephys.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Process electrophysiological recordings of fish behavior and trial structure\n#\n# Davis Bennett\n# [email protected]\n#\n# License: MIT\n#\n\n\ndef match_cam_time(events, frame_times):\n \"\"\"\n Helper function for mapping ephys events to camera times. For each event in events, we return the nearest\n camera frame before the event.\n\n\n Parameters\n ----------\n\n events : 1D numpy array\n Events of interest. Sampled at a higher rate than frame_times.\n\n frame_times : 1D numpy array\n Timepoints of camera frames to be assigned to events. Sampled at a lower rate than events.\n\n \"\"\"\n from numpy import array\n\n output = []\n for a in events:\n lags = array(a - frame_times)\n before = len(lags[lags > 0]) - 1\n\n if before >= 0:\n output.append(before)\n\n return array(output)\n\n\ndef chop_trials(signal, thr=2000):\n \"\"\"\n For each unique value in the input signal, return the start and stop of each epoch corresponding to that value.\n\n\n Parameters\n ----------\n signal : 1D numpy array\n Vector of categorical signals, e.g. [0,0,1,1,2,2,1,1]\n\n thr: Integer\n Minimum length of an epoch. Defaults to 2000 samples.\n\n \"\"\"\n from numpy import unique, where, concatenate, diff\n\n conditions = unique(signal)\n chopped = {}\n\n # todo: make this depend on estimate_onset\n for c in conditions:\n tmp = where(signal == c)[0]\n offs = where(diff(tmp) > 1)[0]\n offs = concatenate((offs, [tmp.size - 1]))\n ons = concatenate(([0], offs[0:-1] + 1))\n trLens = offs - ons\n keep_trials = where(trLens > thr)\n offs = offs[keep_trials]\n ons = ons[keep_trials]\n chopped[c] = (tmp[ons], tmp[offs])\n\n return chopped\n\n\ndef estimate_onset(signal, threshold, duration):\n \"\"\"\n Find indices in a vector when the values first cross a threshold. Useful when e.g. finding onset times for\n a ttl signal.\n\n\n Parameters\n ----------\n signal : numpy array, 1-dimensional\n Vector of values to be processed.\n\n threshold : instance of numeric data type contained in signal\n Onsets are counted as indices where the signal first crosses this value.\n\n duration : instance of numeric data type contained in signal\n Minimum distance between consecutive onsets.\n\n \"\"\"\n from numpy import where, diff, concatenate\n\n inits = 1 + where((signal[:-1] < threshold) * (signal[1:] > threshold))[0]\n valid = concatenate([[0], 1 + where(diff(inits) > duration)[0]])\n return inits[valid]\n\n\ndef estimate_swims(signal, fs=6000):\n \"\"\" Estimate swim timing from ephys recording of motor neurons\n\n Parameters\n __________\n\n signal : numpy array, 1 dimensional. Windowed variance of ephys signal.\n\n fs : int\n sampling rate of the data, in Hz\n\n \"\"\"\n\n from numpy import zeros, where, diff, concatenate\n\n # set dead time between peaks, in seconds. This prevents duplicate swims.\n dead_time = 0.010 * fs\n\n # set minimum duration between swim bursts in seconds\n inter_swim_min = 0.12 * fs\n\n # estimate swim threshold\n thr = estimate_threshold(signal, fs * 60)\n\n peaksT, peaksIndT = estimate_peaks(signal, dead_time)\n\n burstIndT = peaksIndT[where(signal[peaksIndT] > thr[peaksIndT])]\n burstT = zeros(signal.shape)\n burstT[burstIndT] = 1\n\n interSwims = diff(burstIndT)\n swimEndIndB = where(interSwims > inter_swim_min)[0]\n swimEndIndB = concatenate((swimEndIndB, [burstIndT.size - 1]))\n\n swimStartIndB = swimEndIndB[:-1] + 1\n swimStartIndB = concatenate(([0], swimStartIndB))\n nonShort = where(swimEndIndB != swimStartIndB)[0]\n swimStartIndB = swimStartIndB[nonShort]\n swimEndIndB = swimEndIndB[nonShort]\n\n bursts = zeros(signal.size)\n starts = zeros(signal.size)\n stops = zeros(signal.size)\n bursts[burstIndT] = 1\n starts[burstIndT[swimStartIndB]] = 1\n stops[burstIndT[swimEndIndB]] = 1\n\n return starts, stops, thr\n\n\ndef windowed_variance(signal, kern_mean=None, kern_var=None, fs=6000):\n \"\"\"\n Estimate smoothed sliding variance of the input signal\n\n signal : numpy array\n\n kern_mean : numpy array\n kernel to use for estimating baseline\n\n kern_var : numpy array\n kernel to use for estimating variance\n\n fs : int\n sampling rate of the data\n \"\"\"\n from scipy.signal import gaussian, fftconvolve\n\n # set the width of the kernels to use for smoothing\n kw = int(0.04 * fs)\n\n if kern_mean is None:\n kern_mean = gaussian(kw, kw // 10)\n kern_mean /= kern_mean.sum()\n\n if kern_var is None:\n kern_var = gaussian(kw, kw // 10)\n kern_var /= kern_var.sum()\n\n mean_estimate = fftconvolve(signal, kern_mean, \"same\")\n var_estimate = (signal - mean_estimate) ** 2\n fltch = fftconvolve(var_estimate, kern_var, \"same\")\n\n return fltch, var_estimate, mean_estimate\n\n\ndef estimate_peaks(signal, dead_time):\n \"\"\"\n Estimate peak times in a signal, with a minimum distance between estimated peaks.\n\n Parameters\n __________\n\n signal : numpy array, 1-dimensional\n\n dead_time : int\n minimum number of sample between estimated peaks\n\n \"\"\"\n\n from numpy import diff, where, zeros\n\n aa = diff(signal)\n peaks = (aa[:-1] > 0) * (aa[1:] < 0)\n inds = where(peaks)[0]\n\n # take the difference between consecutive indices\n d_inds = diff(inds)\n\n # find differences greater than deadtime\n to_keep = d_inds > dead_time\n\n # only keep the indices corresponding to differences greater than deadT\n inds[1:] = inds[1:] * to_keep\n inds = inds[inds.nonzero()]\n\n peaks = zeros(signal.shape[0])\n peaks[inds] = 1\n\n return peaks, inds\n\n\ndef load(in_file, num_channels=10, memmap=False):\n \"\"\"Load multichannel binary data from disk, return as a [channels,samples] sized numpy array\n \"\"\"\n from numpy import fromfile, float32\n\n if memmap:\n from numpy import memmap\n\n data = memmap(in_file, dtype=float32)\n else:\n with open(in_file, \"rb\") as fd:\n data = fromfile(file=fd, dtype=float32)\n trim = data.size % num_channels\n # transpose to make dimensions [channels, time]\n data = data[: (data.size - trim)].reshape(data.size // num_channels, num_channels).T\n if trim > 0:\n print(\"Data needed to be truncated!\")\n\n return data\n\n\ndef estimate_threshold(signal, window=180000, scaling=1.6, lower_percentile=0.01):\n \"\"\"\n Return non-sliding windowed threshold of input ndarray vec.\n\n Parameters\n ----------\n\n signal : ndarray, 1-dimensional\n Input array from which to estimate a threshold\n\n window : int\n Step size / window length for the resulting threshold.\n\n scaling : float or int\n scaling factor applied to estimated spread of the noise distribution of vec. Sets magnitude of threshold\n relative to the estimated upper bound of the noise distribution.\n\n lower_percentile : float\n Percentile of signal to use when estimating the lower bound of the noise distribution.\n \"\"\"\n from numpy import zeros, percentile, arange, median\n\n th = zeros(signal.shape)\n for t in arange(0, signal.size - window, window):\n plr = arange(t, min(t + window, signal.size))\n sig = signal[plr]\n med = median(sig)\n bottom = percentile(sig, lower_percentile)\n th[t:] = med + scaling * (med - bottom)\n\n return th\n"
] | [
[
"numpy.fromfile",
"scipy.signal.fftconvolve",
"numpy.unique",
"numpy.arange",
"numpy.memmap",
"numpy.median",
"numpy.percentile",
"numpy.concatenate",
"numpy.diff",
"numpy.array",
"numpy.zeros",
"numpy.where",
"scipy.signal.gaussian"
]
] |
fnardmann/pointnet2 | [
"787d39d8d1099298fb3970d0e845515ebd0939bf"
] | [
"scannet/testload.py"
] | [
"import numpy as np\r\nimport laspy\r\nimport os\r\nfrom scipy.spatial import KDTree\r\nfrom sklearn.preprocessing import normalize\r\nimport logging\r\nfrom pathlib import Path\r\n\r\n\r\nclass Dataset():\r\n ATTR_EXLUSION_LIST = ['X', 'Y', 'Z', 'raw_classification', 'Classification',\r\n 'flag_byte', 'scan_angle_rank', 'user_data',\r\n 'pt_src_id', 'gps_time']\r\n ATTR_EXTRA_LIST = ['num_returns', 'return_num']\r\n\r\n def __init__(self, file, load=True, multiclass=True, normalize=False):\r\n self.file = file\r\n self._features = self._xyz = self._classes = self._names = None\r\n self.xmax = self.xmin = self.ymax = self.ymin = None\r\n self._header = None\r\n self.multiclass = multiclass\r\n self.normalize = normalize\r\n if load:\r\n self.load_data()\r\n\r\n def load_data(self):\r\n file_h = laspy.file.File(self.file, mode='r')\r\n self._xyz = np.vstack([file_h.x, file_h.y, file_h.z]).transpose()\r\n self._classes = file_h.classification\r\n\r\n self.xmin = file_h.header.min[0]\r\n self.ymin = file_h.header.min[1]\r\n self.xmax = file_h.header.max[0]\r\n self.ymax = file_h.header.max[1]\r\n self._header = file_h.header\r\n file_h.close()\r\n\r\n def statistics(self):\r\n stats = {'absolute': {},\r\n 'relative': {}}\r\n for i in range(np.max(self.labels)):\r\n count = np.count_nonzero(self.labels == i)\r\n stats['absolute'][i] = count\r\n stats['relative'][i] = count/len(self)\r\n\r\n return stats\r\n\r\n @property\r\n def labels(self):\r\n if self._xyz is None:\r\n self.load_data()\r\n ret_val = self._classes if self.multiclass else (self._classes != 2).astype('int8') + 2\r\n return ret_val\r\n\r\n @property\r\n def names(self):\r\n return self._names\r\n\r\n @property\r\n def points_and_features(self):\r\n if self._xyz is None:\r\n self.load_data()\r\n ret_val = self._xyz if self._features is None else np.hstack((self._xyz, self._features))\r\n if self.normalize:\r\n normalize(ret_val)\r\n return ret_val\r\n\r\n @property\r\n def filename(self):\r\n return os.path.basename(self.file)\r\n\r\n def points_and_features_f(self):\r\n return self.points_and_features\r\n\r\n def labels_f(self):\r\n return self.labels\r\n\r\n def unload(self):\r\n self._features = self._xyz = self._classes = self._names = None\r\n self.xmax = self.xmin = self.ymax = self.ymin = None\r\n self._header = None\r\n\r\n def get_label_unique_count(self):\r\n return len(np.unique(self._classes))\r\n\r\n def get_feature_count(self):\r\n return self._features.shape[1]\r\n\r\n\r\n def __len__(self):\r\n return self.labels.shape[0]\r\n\r\n def getBatch(self, start_idx, batch_size, idx_randomizer=None):\r\n if idx_randomizer is not None:\r\n idx_range = idx_randomizer[start_idx:start_idx + batch_size]\r\n else:\r\n idx_range = range(start_idx, start_idx + batch_size)\r\n data = self.points_and_features[idx_range]\r\n labels = self.labels[idx_range]\r\n\r\n def save_with_new_classes(self, outFile, new_classes):\r\n inFile = laspy.file.File(self.file)\r\n outFile = laspy.file.File(outFile, mode='w', header=inFile.header)\r\n outFile.points = inFile.points\r\n outFile.Classification = new_classes[0]\r\n outFile.close()\r\n\r\n @staticmethod\r\n def Save(path, points_and_features, names=None, labels=None, new_classes=None, probs=None):\r\n hdr = laspy.header.Header()\r\n outfile = laspy.file.File(path, mode=\"w\", header=hdr)\r\n if new_classes is not None:\r\n outfile.define_new_dimension(name=\"estim_class\", data_type=5, description=\"estimated class\")\r\n if labels is not None and new_classes is not None:\r\n outfile.define_new_dimension(name=\"class_correct\", data_type=5, description=\"correctness of estimated class\")\r\n if probs is not None:\r\n for classid in range(probs.shape[1]):\r\n outfile.define_new_dimension(name=\"prob_class%02d\" % classid, data_type=9, description=\"p of estimated class %02d\"%classid)\r\n\r\n allx = points_and_features[:, 0]\r\n ally = points_and_features[:, 1]\r\n allz = points_and_features[:, 2]\r\n\r\n xmin = np.floor(np.min(allx))\r\n ymin = np.floor(np.min(ally))\r\n zmin = np.floor(np.min(allz))\r\n\r\n outfile.header.offset = [xmin, ymin, zmin]\r\n outfile.header.scale = [0.001, 0.001, 0.001]\r\n\r\n outfile.x = allx\r\n outfile.y = ally\r\n outfile.z = allz\r\n\r\n for featid in range(points_and_features.shape[1]-3):\r\n try:\r\n data = points_and_features[:, 3+featid]\r\n if names[featid] in ['num_returns', 'return_num']: # hack to treat int-values\r\n data = data.astype('int8')\r\n setattr(outfile, names[featid], data)\r\n except Exception as e:\r\n logging.warning(\"Could not save attribute %s to file %s: \\n%s\" % (names[featid], path, e))\r\n #raise\r\n\r\n if probs is not None:\r\n for classid in range(probs.shape[1]):\r\n setattr(outfile, \"prob_class%02d\" % classid, probs[:, classid])\r\n\r\n if labels is not None:\r\n outfile.classification = labels\r\n if new_classes is not None:\r\n outfile.estim_class = new_classes\r\n if labels is not None and new_classes is not None:\r\n outfile.class_correct = np.equal(labels, new_classes)*-1 + 6 # so that equal =5 --> green (veg)\r\n # and not equal =6 --> red (building)\r\n\r\n outfile.close()\r\n\r\n\r\ndef getFileList(dir, extension):\r\n \"\"\" Get all files in this directory with given extension (recursively).\r\n Input:\r\n String dir, directory\r\n String extension, valid extensions with leading point -> \".laz\"\r\n Return:\r\n List, with all path of files with given extension\r\n \"\"\"\r\n # get string from WindowsPath and return list\r\n return [p.__str__() for p in sorted(list(Path(dir).glob('**/*' + extension)))]\r\n\r\n\r\npath = '/home/felix/Desktop/MA/data/4529/debug/train/xyz'\r\n\r\ndatasets = getFileList(path, '.laz')\r\n\r\nlen(datasets)\r\n\r\nnp.random.shuffle(datasets)\r\ndatasets_th = []\r\nfor idx, dataset in enumerate(datasets):\r\n print(\"Loading dataset %s of %s (%s)\" % (idx+1, len(datasets), os.path.basename(dataset)))\r\n ds = Dataset(dataset, load=True, normalize=True)\r\n datasets_th.append(ds)\r\n break\r\nprint(\"%s datasets loaded.\" % len(datasets_th))\r\n\r\ndatasets_th[0].points_and_features.shape\r\ndatasets_th[0].labels.shape\r\n\r\ndatasets_th[0].points_and_features"
] | [
[
"numpy.hstack",
"numpy.unique",
"numpy.min",
"numpy.random.shuffle",
"numpy.max",
"sklearn.preprocessing.normalize",
"numpy.equal",
"numpy.count_nonzero",
"numpy.vstack"
]
] |
vishalbelsare/regreg | [
"d1b62cc43cdd83331f2b0817b0ae099d5ef97966"
] | [
"regreg/smooth/__init__.py"
] | [
"import numpy as np\nfrom scipy import sparse\nimport warnings\nimport inspect\n\nfrom ..problems.composite import smooth as smooth_composite\nfrom ..affine import affine_transform, linear_transform, astransform\nfrom ..identity_quadratic import identity_quadratic\n\n#TODO: create proximal methods for known smooth things\nclass smooth_atom(smooth_composite):\n\n \"\"\"\n A class for representing a smooth function and its gradient\n\n Parameters\n ----------\n\n shape : tuple\n Shape of argument to `smooth_objective`\n\n coef : float (optional)\n Scalar multiple to be applied (must be nonnegative)\n\n offset : ndarray (optional)\n Vector to be subtracted before evaluating `smooth_objective`. \n\n quadratic : `identity_quadratic` (optional)\n Instance of `identity_quadratic` to be added to overall\n objective.\n\n initial : ndarray (optional)\n Initial value for coefficients.\n\n \"\"\"\n\n objective_template = r'''f(%(var)s)'''\n objective_vars = {'var':r'\\beta',\n 'shape':'p',\n 'coef':'C',\n 'offset':r'\\alpha+'}\n\n def __init__(self, shape, coef=1, offset=None,\n quadratic=None, initial=None):\n smooth_composite.__init__(self, shape,\n offset=offset,\n quadratic=quadratic,\n initial=initial)\n self.coef = coef\n if coef < 0:\n raise ValueError('coefs must be nonnegative to ensure convexity (assuming all atoms are indeed convex)')\n self.coefs = np.zeros(self.shape)\n\n def smooth_objective(self, arg, mode='both', check_feasibility=False):\n \"\"\"\n\n Parameters\n ----------\n\n arg : ndarray\n The current parameter values.\n\n mode : str\n One of ['func', 'grad', 'both']. \n\n check_feasibility : bool\n If True, return `np.inf` when\n point is not feasible, i.e. when `beta` is not\n in the domain.\n\n Returns\n -------\n\n If `mode` is 'func' returns just the objective value \n at `beta`, else if `mode` is 'grad' returns the gradient\n else returns both.\n \"\"\"\n raise NotImplementedError(\"Abstract method.\")\n \n @classmethod\n def affine(cls, linear_operator, offset, coef=1, diag=False,\n quadratic=None, **kws):\n \"\"\"\n Keywords given in kws are passed to cls constructor along with other arguments\n \"\"\"\n if not isinstance(linear_operator, affine_transform):\n l = linear_transform(linear_operator, diag=diag)\n else:\n l = linear_operator\n if not acceptable_init_args(cls, kws):\n raise ValueError(\"Invalid arguments being passed to initialize \" + cls.__name__)\n \n # the minus signs below for offset is there until affine transforms SUBTRACT \n # their offset until add. \n # for atoms, the offset is really the \"center\"\n\n atom = cls(l.output_shape, coef=coef, offset=-offset, quadratic=quadratic, **kws)\n \n return affine_smooth(atom, l)\n\n @classmethod\n def linear(cls, linear_operator, coef=1, diag=False,\n offset=None, \n quadratic=None, **kws):\n \"\"\"\n Keywords given in kws are passed to cls constructor along with other arguments\n \"\"\"\n if not acceptable_init_args(cls, kws):\n raise ValueError(\"Invalid arguments being passed to initialize \" + cls.__name__)\n\n atransform = affine_transform(linear_operator, None, diag=diag)\n atom = cls(atransform.output_shape, coef=coef, quadratic=quadratic, offset=offset, **kws)\n \n return affine_smooth(atom, atransform)\n\n @classmethod\n def shift(cls, offset, coef=1, quadratic=None, **kws):\n \"\"\"\n Keywords given in kws are passed to cls constructor along with other arguments\n \"\"\"\n if not acceptable_init_args(cls, kws):\n raise ValueError(\"Invalid arguments being passed to initialize \" + cls.__name__)\n \n atom = cls(offset.shape, coef=coef, quadratic=quadratic, \n offset=offset, **kws)\n return atom\n\n def scale(self, obj, copy=False):\n if self.coef != 1:\n return obj * self.coef\n if copy:\n return obj.copy()\n return obj\n\n def get_conjugate(self):\n raise NotImplementedError('each smooth loss should implement its own get_conjugate')\n\n @property\n def conjugate(self):\n return self.get_conjugate()\n \n\ndef acceptable_init_args(cls, proposed_keywords):\n \"\"\"\n Check that the keywords in the dictionary proposed_keywords are arguments to __init__ of class cls\n\n Returns True/False\n \"\"\"\n args = inspect.getargspec(cls.__init__).args\n forbidden = ['self', 'shape', 'coef', 'quadratic', 'initial', 'offset']\n for kw in proposed_keywords.keys():\n if not kw in args:\n return False\n if kw in forbidden:\n return False\n return True\n\nclass affine_smooth(smooth_atom):\n\n \"\"\"\n\n Composition of a smooth objective with an affine transform.\n\n \"\"\"\n\n force_reshape = True\n\n objective_vars = {'linear':'X'}\n\n def __init__(self, smooth_atom, atransform, store_grad=True, diag=False):\n \"\"\"\n\n Parameters\n ----------\n\n smooth_atom : `regreg.smooth.smooth_atom`\n A smooth atom.\n\n atransform : `regreg.affine.affine_transform`\n An affine transformation, or cast to one\n using `regreg.affine.linear_transform`\n\n store_grad : bool\n If True, when computing the gradient,\n store a reference to the gradient of `smooth_atom`\n in the attribute `grad`.\n\n diag : bool\n Indicates if `atransform` acts diagonally,\n i.e. a rescaling.\n Passed to `regreg.affine.linear_transform`.\n\n \"\"\"\n self.store_grad = store_grad\n self.atom = smooth_atom\n if not isinstance(atransform, affine_transform):\n atransform = linear_transform(atransform, diag=diag)\n self.affine_transform = atransform\n self.shape = atransform.input_shape\n self.coefs = np.zeros(self.shape)\n\n def _get_coef(self):\n return self.atom.coef\n\n def _set_coef(self, coef):\n self.atom.coef = coef\n coef = property(_get_coef, _set_coef)\n\n def smooth_objective(self, arg, mode='both', check_feasibility=False):\n \"\"\"\n Compute the smooth objective at the point `self.transform.affine_map(arg)`.\n\n Parameters\n ----------\n\n arg : ndarray\n The current parameter values.\n\n mode : str\n One of ['func', 'grad', 'both']. \n\n check_feasibility : bool\n If True, return `np.inf` when\n point is not feasible, i.e. when `beta` is not\n in the domain.\n\n Returns\n -------\n\n If `mode` is 'func' returns just the objective value \n at `self.transform(arg)`, else if `mode` is 'grad' returns the gradient\n else returns both.\n \"\"\"\n\n eta = self.affine_transform.affine_map(arg)\n if mode == 'both':\n v, g = self.atom.smooth_objective(eta, mode='both')\n if self.store_grad:\n self.grad = g\n g = self.affine_transform.adjoint_map(g)\n if self.force_reshape:\n g = g.reshape(self.shape)\n return v, g\n elif mode == 'grad':\n g = self.atom.smooth_objective(eta, mode='grad')\n if self.store_grad:\n self.grad = g\n g = self.affine_transform.adjoint_map(g)\n if self.force_reshape:\n g = g.reshape(self.shape)\n return g \n elif mode == 'func':\n v = self.atom.smooth_objective(eta, mode='func')\n return v \n\n @property\n def dual(self):\n try: \n conj = self.atom.conjugate\n return self.affine_transform, conj\n except:\n return None\n\n def __repr__(self):\n return (\"affine_smooth(%s, %s, store_grad=%s)\" % \n (str(self.atom),\n str(self.affine_transform),\n self.store_grad))\n\n def latexify(self, var=None, idx=''):\n template_dict = self.atom.objective_vars.copy()\n template_dict['linear'] = self.objective_vars['linear']\n if var is not None:\n template_dict['var'] = var\n template_dict['idx'] = idx\n\n obj_latex = self.atom.latexify(var='%(linear)s_{%(idx)s}%(var)s' % template_dict, idx=idx)\n if not self.quadratic.iszero:\n return ' + '.join([obj_latex, self.quadratic.latexify(var=template_dict['var'], idx=idx)])\n else:\n return obj_latex\n\nclass zero(smooth_atom):\n\n \"\"\"\n The zero function.\n \"\"\"\n\n def smooth_objective(self, x, mode='both', check_feasibility=False):\n if mode == 'both':\n return 0., np.zeros(x.shape)\n elif mode == 'func':\n return 0.\n elif mode == 'grad':\n return np.zeros(x.shape)\n raise ValueError(\"Mode not specified correctly\")\n\nclass sum(smooth_atom):\n \"\"\"\n A simple way to combine smooth objectives\n \"\"\"\n def __init__(self, atoms, weights=None):\n \"\"\"\n Parameters\n ----------\n\n atoms : sequence\n A sequence of `regreg.smooth.smooth_atom` that will be summed\n to make a new atom.\n\n weights : ndarray (optional)\n If provided, these weights will appear as coefficients\n in front of each atom.\n\n \"\"\"\n self.offset = None\n self.atoms = atoms\n if weights is None:\n weights = np.ones(len(self.atoms))\n self.weights = np.asarray(weights).reshape(-1)\n if self.weights.shape[0] != len(atoms):\n raise ValueError('weights and atoms have different lengths')\n if np.any(self.weights < 0):\n raise ValueError('weights should be non-negative to maintain convexity')\n self.coefs = self.atoms[0].coefs\n self.shape = self.coefs.shape\n\n def smooth_objective(self, x, mode='both', check_feasibility=False):\n\n \"\"\"\n Compute the smooth objective at the point `self.transform.affine_map(arg)`,\n which is the sum of each `atom`'s objective with its respective weight.\n\n Parameters\n ----------\n\n arg : ndarray\n The current parameter values.\n\n mode : str\n One of ['func', 'grad', 'both']. \n\n check_feasibility : bool\n If True, return `np.inf` when\n point is not feasible, i.e. when `beta` is not\n in the domain.\n\n Returns\n -------\n\n If `mode` is 'func' returns just the objective value \n at `self.transform(arg)`, else if `mode` is 'grad' returns the gradient\n else returns both.\n \"\"\"\n\n x = self.apply_offset(x)\n f, g = 0, 0\n for w, atom in zip(self.weights, self.atoms):\n if mode == 'func':\n f += w * atom.smooth_objective(x, 'func')\n elif mode == 'grad':\n g += w * atom.smooth_objective(x, 'grad')\n elif mode == 'both':\n fa, ga = atom.smooth_objective(x, 'both')\n f += w * fa; g += w * ga\n\n if mode == 'func':\n return f\n elif mode == 'grad':\n return g\n elif mode == 'both':\n return f, g\n else:\n raise ValueError(\"mode incorrectly specified\")\n\n"
] | [
[
"numpy.asarray",
"numpy.zeros",
"numpy.any"
]
] |
DirkyJerky/Uni | [
"73ec1a84cdd59af9fc82a7bbb8af931305ec2ad3"
] | [
"514/hwk1/p2.py"
] | [
"import numpy as np\nfrom matplotlib import pyplot as plt\n\nf = lambda x: 1/2 * (x**2 + 0.3)\n\nplt.figure()\n\nfor x0 in [0, 1.83, -1]:\n idx = np.arange(21)\n iters = np.empty(21);\n\n iters[0] = x0\n for k in idx[1:]:\n iters[k] = f(iters[k-1])\n \n plt.plot(idx, iters, label = f'From {x0}')\n\nplt.xticks(np.arange(0, 21, 1.0))\nplt.yticks(np.arange(-1.0, 2, 0.5))\nplt.legend()\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] |
abhilash1910/sonnet | [
"0e25f47fac469c0c2180abba0b985aca46f529ce"
] | [
"sonnet/src/utils.py"
] | [
"# Copyright 2019 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utils for Sonnet.\"\"\"\n\nimport collections.abc\nimport functools\nimport inspect\nimport re\nfrom typing import Any, Callable, Dict, Generic, Optional, Sequence, Tuple, TypeVar, Union\n\nfrom absl import logging\nfrom sonnet.src import initializers\nimport tabulate\nimport tensorflow as tf\n\nT = TypeVar(\"T\")\n\n\ndef replicate(\n element: Union[T, Sequence[T]],\n num_times: int,\n name: str,\n) -> Tuple[T]:\n \"\"\"Replicates entry in `element` `num_times` if needed.\"\"\"\n if not isinstance(element, collections.abc.Sequence):\n return (element,) * num_times\n elif len(element) == 1:\n return tuple(element * num_times)\n elif len(element) == num_times:\n return tuple(element)\n raise TypeError(\n \"{} must be a scalar or sequence of length 1 or sequence of length {}.\"\n .format(name, num_times))\n\n\ndef _is_object(f: Any) -> bool:\n return not inspect.isfunction(f) and not inspect.ismethod(f)\n\n\n# TODO(b/123870292) Remove this and use wrapt.decorator when supported by TF.\ndef decorator(\n decorator_fn: Callable[[T, Any, Sequence[Any], Dict[str, Any]], Any],) -> T:\n \"\"\"Returns a wrapt style decorator.\"\"\"\n\n @functools.wraps(decorator_fn)\n def _decorator(f):\n \"\"\"Wraps f such that it returns the result of applying decorator_fn.\"\"\"\n if _is_object(f):\n\n @functools.wraps(f.__call__)\n def _decorate_object(*args, **kwargs):\n return decorator_fn(f.__call__, f, args, kwargs)\n\n return _decorate_object\n\n if inspect.ismethod(f):\n\n @functools.wraps(f)\n def _decorate_bound_method(*args, **kwargs):\n return decorator_fn(f, f.__self__, args, kwargs)\n\n return _decorate_bound_method\n\n argspec = inspect.getfullargspec(f)\n if argspec.args and argspec.args[0] == \"self\":\n\n @functools.wraps(f)\n def _decorate_unbound_method(self, *args, **kwargs):\n bound_method = f.__get__(self, self.__class__) # pytype: disable=attribute-error\n return decorator_fn(bound_method, self, args, kwargs)\n\n return _decorate_unbound_method\n\n @functools.wraps(f)\n def _decorate_fn(*args, **kwargs):\n return decorator_fn(f, None, args, kwargs)\n\n return _decorate_fn\n\n return _decorator\n\n\n_SPATIAL_CHANNELS_FIRST = re.compile(\"^NC[^C]*$\")\n_SPATIAL_CHANNELS_LAST = re.compile(\"^N[^C]*C$\")\n_SEQUENTIAL = re.compile(\"^((BT)|(TB))[^D]*D$\")\n\n\ndef get_channel_index(data_format: str) -> int:\n \"\"\"Returns the channel index when given a valid data format.\n\n Args:\n data_format: String, the data format to get the channel index from. Valid\n data formats are spatial (e.g.`NCHW`), sequential (e.g. `BTHWD`),\n `channels_first` and `channels_last`).\n\n Returns:\n The channel index as an int - either 1 or -1.\n\n Raises:\n ValueError: If the data format is unrecognised.\n \"\"\"\n if data_format == \"channels_first\":\n return 1\n if data_format == \"channels_last\":\n return -1\n if _SPATIAL_CHANNELS_FIRST.match(data_format):\n return 1\n if _SPATIAL_CHANNELS_LAST.match(data_format):\n return -1\n if _SEQUENTIAL.match(data_format):\n return -1\n raise ValueError(\n \"Unable to extract channel information from '{}'. Valid data formats are \"\n \"spatial (e.g.`NCHW`), sequential (e.g. `BTHWD`), `channels_first` and \"\n \"`channels_last`).\".format(data_format))\n\n\ndef assert_rank(inputs, rank: int):\n \"\"\"Asserts the rank of the input is `rank`.\"\"\"\n shape = tuple(inputs.shape)\n actual_rank = len(shape)\n if rank != actual_rank:\n raise ValueError(\"Shape %r must have rank %d\" % (shape, rank))\n\n\ndef assert_minimum_rank(inputs, rank: int):\n \"\"\"Asserts the rank of the input is at least `rank`.\"\"\"\n shape = tuple(inputs.shape)\n actual_rank = len(shape)\n if actual_rank < rank:\n raise ValueError(\"Shape %r must have rank >= %d\" % (shape, rank))\n\n\ndef smart_autograph(f: T) -> T:\n \"\"\"Wraps `f` such that in graph mode it uses autograph but not in eager.\n\n Whilst wrapping `f` in autograph is (intended to be) semantics preserving,\n some things (e.g. breakpoints) are not preserved. Using `smart_autograph`\n users can write code with eager syntax, add breakpoints and debug it as you\n might expect and still be compatible with code that uses\n `@tf.function(autograph=False)`.\n\n >>> @smart_autograph\n ... def f(x):\n ... if x > 0:\n ... y = x * x\n ... else:\n ... y = -x\n ... return y\n\n >>> f = tf.function(f, autograph=False)\n >>> f(tf.constant(2))\n <tf.Tensor: ... numpy=4>\n\n Args:\n f: A function to wrap conditionally in `tf.autograph`.\n\n Returns:\n A wrapper for `f` that dispatches to the original or autograph version of f.\n \"\"\"\n f_autograph = tf.autograph.to_graph(f)\n\n @functools.wraps(f)\n def smart_autograph_wrapper(*args, **kwargs):\n if tf.executing_eagerly():\n return f(*args, **kwargs)\n else:\n return f_autograph(*args, **kwargs)\n\n return smart_autograph_wrapper\n\n\ndef variable_like(inputs: Union[tf.Tensor, tf.Variable],\n initializer: initializers.Initializer = initializers.Zeros(),\n trainable: Optional[bool] = None,\n name: Optional[str] = None) -> tf.Variable:\n \"\"\"Creates a new variable with the same shape/dtype/device as the input.\"\"\"\n if trainable is None:\n trainable = getattr(inputs, \"trainable\", None)\n if name is None:\n name = getattr(inputs, \"name\", \"Variable\").split(\":\")[0]\n with tf.device(inputs.device):\n initial_value = initializer(inputs.shape, inputs.dtype)\n return tf.Variable(initial_value, trainable=trainable, name=name)\n\n\ndef _render_spec(shape: tf.TensorShape, dtype: tf.DType) -> str:\n \"\"\"Renders the given shape/dtype as a short specification.\"\"\"\n\n format_map = {\n tf.float16: \"f16\",\n tf.float32: \"f32\",\n tf.float64: \"f64\",\n tf.bfloat16: \"bf16\",\n tf.complex64: \"c64\",\n tf.complex128: \"c128\",\n tf.uint8: \"u8\",\n tf.uint16: \"u16\",\n tf.uint32: \"u32\",\n tf.uint64: \"u64\",\n tf.int8: \"i8\",\n tf.int16: \"i16\",\n tf.int32: \"i32\",\n tf.int64: \"i64\",\n tf.qint8: \"qi8\",\n tf.qint16: \"qi16\",\n tf.qint32: \"qi32\",\n tf.quint8: \"qu8\",\n tf.quint16: \"qu16\",\n }\n\n return \"{dtype}[{shape}]\".format(\n dtype=format_map.get(dtype, dtype.name),\n shape=\",\".join(str(d) for d in shape))\n\n\ndef _simple_device(var: tf.Variable) -> str:\n device = tf.DeviceSpec.from_string(var.device)\n if device.job == \"localhost\" and device.replica == 0 and device.task == 0:\n if device.device_index == 0:\n return device.device_type\n else:\n return \"{} {}\".format(device.device_type, device.device_index)\n return device\n\n\ndef _name_scope_then_rank(var: tf.Variable):\n name_scope = \"/\".join(var.name.split(\"/\")[:-1])\n rank = len(var.shape)\n return (name_scope, -rank, var.name)\n\n\ndef format_variables(variables: Sequence[tf.Variable],\n tablefmt: str = \"orgtbl\") -> str:\n \"\"\"Takes a collection of variables and formats it as a table.\"\"\"\n rows = []\n for var in sorted(variables, key=_name_scope_then_rank):\n name = var.name.split(\":\")[0] # Remove the \":0\" suffix.\n spec = _render_spec(var.shape, var.dtype)\n trainable = str(var.trainable)\n device = _simple_device(var)\n rows.append((name, spec, trainable, device))\n return tabulate.tabulate(\n rows,\n headers=(\"Variable\", \"Spec\", \"Trainable\", \"Device\"),\n tablefmt=tablefmt)\n\n\ndef log_variables(variables: Sequence[tf.Variable]):\n \"\"\"Logs variable information.\n\n This function logs the name, shape, type, trainability, and device for a\n given iterable of variables.\n\n Args:\n variables: iterable of variables (e.g., `module.variables`, if `module` is a\n `snt.Module` instance).\n \"\"\"\n for line in format_variables(variables).split(\"\\n\"):\n logging.info(line)\n\n\[email protected]_ordering\nclass CompareById(Generic[T]):\n \"\"\"Container providing hash/eq based on object id.\"\"\"\n\n def __init__(self, wrapped: T):\n self.wrapped = wrapped\n\n def __hash__(self):\n # NOTE: `dict` has special casing to allow for hash values that are\n # sequential ints (since `hash(i: int) -> i`) so the using `id` as a hash\n # code (at least with `dict` and `set`) does not have a big performance\n # penalty.\n # https://github.com/python/cpython/blob/master/Objects/dictobject.c#L135\n return id(self.wrapped)\n\n def __eq__(self, other):\n if other is None:\n return False\n return self.wrapped is getattr(other, \"wrapped\", None)\n\n def __lt__(self, other):\n return id(self.wrapped) < id(getattr(other, \"wrapped\", None))\n"
] | [
[
"tensorflow.device",
"tensorflow.executing_eagerly",
"tensorflow.Variable",
"tensorflow.DeviceSpec.from_string",
"tensorflow.autograph.to_graph"
]
] |
MaxRobinson/DistributedQMemory | [
"9714f46c367423d22da22958ea37f1b6c6d6a1a2"
] | [
"DistQL/results/resultsScript.py"
] | [
"import numpy as np\n\nimport matplotlib.pyplot as plt\n\n\n# one of 4 agents\n# y = [-308, -875, -236, -371, -596, -722, -245, -371, -812, -866, -371, -551, -569, -236, -992, -254, -245, -506, -299, -542, -308, -281, -641, -344, -218, -317, -794, -488, -245, -236, -227, -290, -236, -450, -254, -641, -389, -1010, -452, -317, -245, -281, -1253, -290, -281, -389, -281, -236, -263, -245, -317, -254, -272, -254, -857, -272, -290, -695, -299, -929, -218, -1208, -272, -299, -299, -1019, -389, -353, -15, -281, -398, -272, -263, -245, -299, -299, -254, -569, -245, -326, -209, -272, -416, -353, -326, -245, -335, -254, -263, -668, -281, -245, -183, -470, -263, -308, -281, -263, -254, -272, -245, -263, -659, -398, -470, -112, -326, -245, -263, -281, -227, -299, -650, -245, -281, -254, -245, -254, -398, -236, -272, -263, -281, -461, -254, -254, -254, -227, -308, -33, -245, -227, -236, -272, -281, -245, -218, -245, -254, -236, -227, -389, -326, -227, -245, -218, -245, -569, -227, -195, -245, -236, -176, -254, -290, -299, -136, -272, -236, -902, -434, -206, -236, -99, -99, -24, -15, -254, -263, -139, -51, -263, -261, -254, -290, -24, -23, -69, -236, -245, -122, -172, -44, -245, -26, -71, -39, -8, -110, -509, -126, -53, -281, -176, -227, 9, -533, -236, -31, -20, -272, -263, -272, -236, -95, -245, -290, -49, -29, -234, -902, -713, -236, -27, 9, -37, -8, -281, -236, -35, -13, -13, -1, -146, -227, -245, -434, -263, -211, -4, -16, -299, 1, -245, -245, -272, -76, -18, -299, -236, -236, -130, 3, -39, -63, -7, -227, -272, -245, -299, -12, -12, -218, -33, -245, -36, -29, -133, -254, -173, -127, -227, -47, -245, -69, -29, -254, -1, -254, -20, -209, -31, -15, -236, -29, 6, -9, -122, -40, -254, -33, -6, -227, -46, 6, -212, -1, -23, -19, -3, -35, -169, 1, -2, -254, -19, -69, -29, -218, 6, -218, 0, -236, -227, 2, -25, -38, 8, -218, -236, -263, -85, -15, -66, -5, -14, -17, -21, 10, -272, -254, -209, -10, -54, -51, -6, -93, -20, -2, 4, 4, -254, -227, -13, -245, -61, -200, 3, 1, -136, -28, -47, 1, -48, -7, 9, -7, 9, -41, -82, -153, -65, -17, -118, -40, 12, -2, 5, -23, -227, 13, -8, 8, 5, -236, -72, -18, -12, 4, -24, 11, -8, -227, -25, -4, -5, 13, -263, -4, -8, -33, 4, -254, 2, -245, -45, -2, -213, -13, -149, 13, -22, 7, -43, 8, 6, -80, 14, -7, -10, -14, -15, -263, 5, -29, -263, -6, 8, -73, -3, -83, 8, 8, -32, 2, -26, 4, -40, 12, -56, 6, 0, -34, -22, -18, -2, -6, -1, 10, -12, 8, -3, -12, 7, -25, -140, -41, -54, 4, -16, 1, 2, -49, 7, 7, -1, -38, -90, 7, -11, 10, -56, -33, -46, -76, 4, -69, 11, 7, -54, -64, 7, -8, -17, 9, -10, 1, -2, 10, 4, -5, -14, -26, 1, 10, 15, -58, -22, -3, -11, -21, -3, 2, -7, -11, -50, 8, -74, 0, -14, 10, -5, -16, -16, 11, -38, 4, -14, 8, 13, -45, -281, -10, -16, -38, -20, 11, 9, 7, 10, -23, -20, 7, 5, 10, 10, 6, -33, 7, -4, -2, -9, -77, 10, 2, -23, 10, 2, -78, 9, 8, -10, 3, 8, -16, 12, 5, 8, 5, -4, -59, -133, -2, -19, 8, -25, -6, -2, 7, 10, 11, -3, 10, 4, 8, -17, -12, -67, -4, -51, 9, 5, -6, -5, -74, 5, 0, -30, -109, 3, 7, -9, 7, 5, -8, -45, 4, 3, 0, 8, -10, -4, 5, -11, 6, 8, -1, 0, -4, 11, -11, 11, 11, 2, 7, 7, -25, 10, -105, 8, 9, -1, 4, 4, -56, -16, 9, 9, 6, -21, 12, 7, 5, 9, -7, -12, 9, -7, 1, -4, -13, 14, 12, -10, 9, -5, 1, 7, -43, -10, -14, -76, 5, 9, -12, 8, -40, 8, 6, -12, 9, 3, 0, 9, 9, -34, 6, 2, 10, -4, -58, 0, -18, -49, -5, 8, -4, -8, 6, -9, 11, -30, 5, -47, -13, 3, -93, 4, 2, -8, -8, 7, -1, 7, 4, -9, -10, 8, 2, 11, -2, 3, 5, 7, 7, 5, 5, 2, 4, -9, -3, 7, 8, -30, -12, 10, 15, -6, 5, -3, 7, 7, 9, 6, 10, -6, -2, -2, 7, -41, 3, 7, -2, 3, 10, 13, 8, 6, -10, -7, 0, 6, -5, -46, -15, 11, 6, 1, 9, 11, 4, -46, 2, 11, -16, 12, -3, 11, -14, -31, 4, -9, -6, -9, 9, -24, -9, 4, -2, -40, -6, 2, 7, 6, 0, 6, -104, 7, -1, 13, 10, -2, 4, -21, 9, 0, 5, -7, 2, 9, 11, -11, 12, -7, -5, -1, 5, 2, -9, 8, 10, 8, -6, 11, 5, 15, 2, 5, 5, 7, -6, -10, 4, 0, 0, 5, -3, 10, -20, 0, 12, -2, 8, 2, 2, -2, -1, 6, -12, 3, 4, 5, 4, -4, 10, 8, -3, 4, 8, 1, 8, -21, -2, 10, 7, 15, 10, -1, 6, 9, 9, 9, 4, 3, 11, 10, -2, 2, -29, 1, 1, 7, -6, -3, 6, 8, 5, 0, -8, 10, 13, 1, -13, -43, 1, 8, 12, 9, 8, 8, 9, 4, 11, 9, 6, 9, -93, 4, -3, 12, -26, 2, 3, 0, 12, 10, 9, 3, 6, 0, 7, -12, 7, 11, -7, -16, -19, 9, 0, 10, 5, 9, 8, 8, -63, 5, 8, -5, 9, -11, -3, 10, 11, 4, 1, -10, -5, -3, -7, 6, 14, 6, 2, 0, 8, 10, 1, 13, 6, -2, -40, 10, -1, 10, 5, 7, 3, 5, 7, 11, 9, 8, 11, 10, 8, 2, 6, 11, 5, -8, 14, -4, 11, 6, 4, 5, 5, 0, 8, 9, 10, 4, 6, 4, 3, 3, 1, 10, 12, 0, -16, 9, 14, 9, -1, 4, -7, 6, -28, 13, 6, -3, 7, 5, 11, 4, 6, 7, 3, 2, 1, 10, -13, 12, 10, 2, -2, 4, 0, 4, -5, -9, -12, 7, 2, 6, 10, -20, 4, -7, 10, 1, 9, -9, 0, -8, 9, 7, 9, 1, 5, 12, 7, 14, -6, -1, -11, 1, 9, 4, 7, 8, -24, 2, 9, 2, 9, 9, -8, -6, 5, 11, 13, 9, -4, 6, 4, 4, -19, -10, 7, 0, 5, 3, 6, 3, -8, 0, 6, 9, 4, 0, -9, 12, -3, 0, -2, 3, 4, -8, 3, 8, 7, 6, -1, 7, 4, 14, 1, 5, 1, 0, 0, 7, 4, 8, -5, 9, -14, -1, -6, 11, -18, 4, 13, 11, 10, 9, 9, 5, 6, 3, 7, 3, 4, -11, 7, 3, 10, 6, 2, 5, 7, 9, -9, 6, 4, 11, -3, 8, 8, -5, 8, -7, 5, 1, 12, 8, 9, -53, 6, 12, -7, 7, 10, 11, -5, 3, 5, 7, -6, 9, 4, 8, 11, 14, 1, 5, 9, 8, 10, 5, 10, 5, 7, 5, 9, 10, 9, 12, -5, 11, 10, 9, 14, -28, 5, 1, 11, -1, -15, 10, 12, -12, 15, -1, 7, 12, 9, 10, 1, 10, 8, 11, 12, -5, 3, 10, -9, 12, 10, 2, 4, 9, 10, 12, -11, -5, -7, 12, 12, 9, 5, -8, 6, 10, 1, 11, 7, 5, 11, 3, 9, 2, 0, -14, -5, 7, -3, 12, 13, 8, 5, 8, 6, 6, -4, -1, 3, 10, 0, 0, 1, -7, -5, 5, 7, 8, 10, 3, 4, 5, 0, 7, 12, -1, 10, 4, -11, 0, 12, 5, 7, 6, 0, 6, 3, 1, 10, -2, 14, 5, 6, 10, 11, 2, 9, 15, 8, 11, 8, 2, 6, 0, 10, 6, -2, 7, 4, 7, 5, 4, 11, 7, 9, 0, -1, 5, 2, 4, 9, 4, 0, 8, 6, 1, 6, 11, 9, 5, 8, 4, 4, 2, 12, 8, 9, -3, 3, -1, 8, 6, 8, -1, 6, 7, 13, 12, -22, 1, 9, 10, 4, 10, 7, 7, 3, 7, 4, 4, 7, 6, 7, 4, 7, 7, 7, 14, 0, 7, 10, 2, 13, 8, -5, -13, -6, 9, 3, -1, 0, 14, 11, 6, 7, 10, 11, 1, -1, 7, 10, 3, 8, 6, 8, -5, 7, 10, -2, 12, -2, 7, 8, 2, -1, 1, 6, -8, 8, 8, 9, 6, 9, 7, 1, 10, 5, 6, 5, 10, -1, 7, 2, 3, 4, 0, 2, 4, 4, 2, -3, 7, 2, 2, 2, 6, 6, 10, 8, 11, 15, 3, -3, 6, 8, 8, 12, 0, 1, 8, 3, 13, 10, 8, 9, 10, 5, 7, 4, 7, 11, 8, 7, 3, -1, -9, 9, -8, -4, 2, 13, 4, 7, 8, -3, 8, 9, 13, 9, 4, 7, 8, 2, 10, -4, 5, 8, 2, 4, 8, 11, 2, 8, 2, 1, 13, 2, 6, 9, 4, 5, -3, 5, 6, 10, -5, 3, 9, 10, -15, 11, 9, 8, 0, 8, 11, 8, 2, 3, 9, 11, 6, 10, 3, 2, 7, 10, 6, 7, 8, -6, 5, 3, 7, 7, 5, 8, 3, 8, 9, 11, 2, -6, 3, 10, -2, 7, 3, 11, 6, 5, 7, 8, 10, 5, 6, 14, 7, -5, 14, 10, 1, 6, -8, 10, 10, 8, 4, 7, 8, 5, 6, 10, 8, 6, 9, 2, 8, 6, 9, 12, 10, 3, 8, 0, -1, 13, 7, -6, 12, -15, 10, 9, 8, 8, 3, 5, -1, 5, 1, 6, -19, 11, 7, 4, 12, 6, 10, 7, 5, -1, 9, 6, 0, 8, 7, 0, 1, 5, -3, 1, 4, 7, 3, 11, 10, 4, 4, 9, 12, 11, 4, 3, 7, 9, 10, 6, 7, 8, 7, -1, 8, 7, 5, 4, 4, 10, 1, 1, 11, -2, 10, 10, 4, 7, 1, 10, 6, 1, 7, 12, 13, 10, 2, 13, 11, 5, -7, -21, 10, -1, 12, 7, 6, 2, 8, 7, 12, 9, 7, 7, 6, 6, 7, 4, 12, 9, 1, 3, 5, 1, 8, 10, 5, 2, 7, 6, 5, 8, 3, 8, -1, 8, 7, 1, 10, 4, 11, 0, 10, 7, 10, 10, 6, 7, 13, 9, 6, 7, 11, 7, 5, 4, 7, 6, 8, 6, 10, 8, 6, 11, 8, 12, 9, 9, 7, 7, 7, 7, 8, 4, 10, 4, 5, 14, 7, 0, 7, 9, 6, 7, 5, 5, 9, 7, 8, 5, 8, 5, 8, 15, 7, 9, 4, 4, 10, 8, 9, 8, 10, 6, 7, 5, 4, 0, 3, -3, 12, 8, 10, 11, 6, 1, 8, 7, 9, 3, 6, 11, 3, 4, 9, 3, 5, 6, 9, 5, 10, 7, 9, 3, 12, 11, 0, 6, 5, 5, 5, 11, 4, 10, 2, -27, 9, 7, 2, 6, 7, 11, 9, 6, 7, 10, 6, 12, 10, 14, 8, 5, 2, -7, 10, 8, 8, -3, 8, 6, 12, 12, -2, 3, 4, 7, 8, 9, 11, 8, 4, 8, 8, 9, 3, 10, -5, 12, 10, 3, 9, 9, 7, 5, 8, 10, 14, 11, 10, 6, 11, 6, -1, 6, 11, 13, 6, 13, 10, 5, 6, 10, -3, 5, 5, 7, 7, 2, -3, 8, 10, -2, 12, 7, 7, 4, 9, 9, 7, 9, 4, -1, 7, 14, 10, 5, 11, 6, 9, 9, 8, 4, 9, 4, 11, 8, 9, 5, 7, 4, 4, 10, 6, 2, 6, 5, 12, 7, 9, -1, 11, 3, 8, 10, 7, 4, 6, 10, 7, 7, 13, 6, 9, 7, 3, -3, 7, 2, 5, 2, 8, 7, 8, 4, 14, 7, 3, 13, 3, 3, 7, 9, 1, 1, -4, 4, -18, 7, 5, 7, 12, 3, 8, 9, 8, 11, 5, 12, 9, 11, 5, 7, 9, -1, 8, 8, 1, 7, 6, 5, 6, 7, 1, 5, 2, 7, 7, 10, 1, 6, 5, 10, 8, 8, 7, 9, 2, -1, 5, 0, 6, 9, 6, -2, 7, 7, 4, -3, 8, 7, 5, 8, 8, 10, 6, -10, 7, -3, 7, 9, 10, 10, 15, 6, 11, 5, 5, 7, -3, 10, 9, 8, 12, 10]\n\n# 1 agent\n# y = [-956, -668, -659, -533, -317, -650, -821, -965, -749, -740, -407, -281, -578, -920, -461, -578, -713, -263, -434, -299, -425, -452, -632, -438, -416, -830, -515, -227, -308, -245, -344, -461, -587, -344, -641, -362, -731, -461, -488, -272, -227, -668, -272, -254, -263, -245, -362, -236, -218, -281, -434, -254, -461, -272, -623, -272, -236, -245, -227, -236, -245, -497, -236, -254, -299, -263, -254, -236, -236, -416, -263, -245, -281, -443, -72, -272, -245, -23, -245, -254, -326, -272, -227, -236, -776, -389, -569, -236, -272, -362, -452, -236, -263, -281, -497, -263, -14, -263, -245, -227, -650, -308, -254, -281, -281, -227, -254, -254, -254, -15, -290, -236, -263, -263, -281, 13, 13, -31, -254, -245, -254, -245, -299, -425, -263, -254, -245, -281, -254, -97, -632, -272, -281, -13, -48, -27, -245, -245, -281, -290, -407, -254, -207, -61, -344, -254, -3, -18, -263, -133, -326, -227, -49, -236, -254, -695, -263, -290, -362, -272, -19, -5, -2, -299, 13, -254, -209, -353, -245, -299, -263, -245, -272, -272, -13, -254, -87, -236, -254, -20, -218, -63, -272, -272, -263, -236, -263, -290, -263, -9, -506, -245, -281, -767, -245, -263, -1127, -263, -254, -281, -227, -290, -227, -60, -44, -236, -236, -290, -758, -245, -245, -245, -209, -218, -245, -263, -127, -281, -236, -236, -227, -227, -245, -96, -290, -44, -218, -218, -272, 4, -26, -398, -272, -68, 10, -7, -299, -236, -236, -245, -263, -57, -236, -254, -290, -227, -31, -272, -245, -236, -263, -218, -32, -245, -254, -86, -91, -227, -114, -254, -71, -254, -245, -35, -236, -32, -227, -247, -108, -227, -263, -88, -254, -281, -218, -272, -245, -272, -227, -209, -209, -31, 5, -80, -272, -236, -272, -272, -20, -227, -281, -218, 6, -470, 1, -263, -245, 9, -10, -218, 8, -245, -254, -30, -272, 11, -227, -281, -290, -245, -272, -272, -245, -12, 3, -227, -218, -254, -245, -236, -254, -227, -227, -245, -245, -26, -263, -263, -245, 14, -69, 0, -64, -66, -254, -263, -272, -227, -218, -272, -272, -263, -254, -236, -184, -68, 5, -209, 3, -245, 2, -56, -254, -29, -227, -254, -245, -281, -11, -245, -3, -299, -12, -227, -218, -9, -16, -245, -272, -47, -227, -227, -263, -245, -254, -58, -236, -26, -218, -209, -28, -227, -95, -236, -209, -254, -236, -254, 8, -263, -236, -236, -245, -254, -263, -254, -218, -236, -263, -209, 1, -245, 4, -254, -67, -227, 0, -254, -272, -209, 6, 9, -263, -121, -13, -227, -227, 6, -19, -218, -272, 6, -218, -245, -5, -263, -263, -254, -254, -236, -263, -245, -9, -2, -218, -33, -59, -236, 11, -2, 8, -35, -227, -14, -2, -272, 9, -15, -263, -272, -227, -2, -236, -218, -245, -245, -25, -236, -263, 11, -272, 4, -218, -227, 1, -227, 8, -26, -15, -227, -227, -236, -263, -254, -227, -236, -218, -263, -9, -4, 9, -12, 10, -236, -218, -245, -254, -6, 13, -236, -263, -227, -3, -227, -227, -236, -245, -245, -254, -209, -254, -227, -263, -254, -236, 1, 7, -254, -31, -227, 9, -1, -227, -254, -42, 7, 3, -5, -236, -4, -227, 11, -209, -227, -10, -263, -227, -3, -209, -227, -2, -227, -19, 4, -227, -10, -30, 8, -15, -218, -116, -227, -227, -254, -227, 9, 7, -2, 0, -209, -11, -1, -218, -254, -236, -263, -236, 2, -10, -227, -245, -272, 4, 7, -245, 3, -245, -236, 13, 3, -236, -1, -227, -236, -1, -245, -236, -263, -236, -227, -218, -227, -227, -218, -218, -245, -227, -263, 13, -245, -16, -236, -245, 5, 2, -227, -6, -3, -236, -3, -227, -5, -281, 4, 7, 3, -209, -227, -218, -218, -2, -227, -2, -272, -11, -236, -272, -245, 10, 0, -209, -272, -236, 3, -218, 10, -254, -209, -218, -218, 10, -15, -236, 11, -236, -8, -218, 2, -236, -254, -13, -281, -12, -209, -227, -209, -236, -227, -236, -227, -227, -218, -7, -263, -218, -227, -254, 2, -236, 8, 0, -209, -245, 0, -18, 1, -227, -227, -227, 3, -263, -218, 6, -8, -245, -272, -180, -2, -227, -227, -254, -8, 1, -236, -166, -2, -227, -227, 6, -171, 10, -245, 4, -227, -227, 7, -1, -183, 5, 7, -4, -218, -11, 12, 5, -227, -2, -245, -236, -135, 11, -3, 8, 7, 10, -15, -254, -118, -263, -12, 1, 10, 8, -227, -212, -37, -8, 12, 2, -227, -1, -227, -218, -227, -209, -209, -209, -6, 5, 5, 7, 1, 1, -200, -218, -254, 4, -2, 3, 6, -4, 6, 7, -34, -173, 10, -227, 1, -16, -218, 11, -4, 3, 13, -4, -1, -227, 5, 11, -245, -128, -209, -227, 6, 7, -227, -263, -236, -123, -236, 8, -218, -209, -8, -3, -81, -209, -6, -227, -2, -227, -8, -90, -218, -236, -7, 8, -227, -209, -209, 4, -227, -200, -3, -152, 4, -200, 10, -1, -209, -245, -7, -113, -218, 8, -39, -254, 5, -236, -28, 4, 5, -3, -2, -209, -161, -227, -218, 1, 9, -28, -23, -33, -227, -86, -236, -2, 1, -28, -227, -227, -218, -15, -209, -227, 3, 8, -245, -7, -245, -245, -218, -73, 11, -32, -227, -6, 2, -97, 9, 6, -12, -245, -4, -47, 8, -44, 5, -80, -57, -218, -218, -105, -53, -209, -14, -18, -1, -102, 6, 7, -227, -236, -5, 2, -101, -13, -218, -2, -13, 7, -41, -99, 6, -35, 9, -120, 11, 6, -227, 15, -22, -120, -28, -106, 4, -161, -1, -17, 4, -115, -42, -38, 3, 9, 10, -218, -29, -7, -7, 14, 6, -93, 4, -136, 1, -209, 1, -159, 0, 3, 10, 12, -5, 9, 5, -17, 14, 4, -87, -99, 12, 8, -53, 0, -15, 13, 6, 6, -165, -113, -140, 11, 14, 8, 9, 11, 7, -19, -170, 5, -117, 6, 6, -19, -27, 6, -33, -13, 3, -190, -3, -57, -178, -97, -4, 10, 10, -1, 7, -12, -124, 9, 5, 11, -10, -14, 10, 5, -2, -71, -14, 7, -8, 13, -5, 7, -254, -17, -130, 6, -188, 3, -149, -16, -51, 9, 11, -45, -95, 9, 3, 12, -3, -227, -91, -5, 8, 10, 5, -2, -5, 4, -55, 11, 9, 6, -13, 4, -65, 0, 3, -15, -93, -62, 8, 3, -16, 7, -5, -64, 8, 11, -2, 11, 6, -56, -19, -1, 9, -3, -72, -13, 10, -107, 10, -9, 11, 11, -79, 6, -12, 7, 5, 6, -92, -17, 0, -131, -153, 9, 14, 6, 15, -73, 2, -1, 4, 9, 0, -14, 6, -6, -34, 11, 8, -12, 8, 9, -13, 11, -1, 1, 4, -76, -9, 5, 6, 11, -116, 5, -10, 8, 12, -30, 13, 10, 7, 12, -38, -4, -5, 13, 9, 7, -25, 0, -2, 10, 7, 7, -83, 11, -23, 11, 3, -6, -53, -47, -169, -2, 6, 4, 8, -3, 10, -31, 9, -6, -54, 3, 4, 9, -150, -10, 3, -44, 1, 6, 10, 10, -49, 6, 11, 3, 7, -23, -4, 8, 11, -46, -25, 11, 7, 8, -5, 1, 8, 8, 5, 10, -95, 12, -6, -10, -4, -6, 7, 8, 12, -57, -3, 11, 5, 11, 9, -4, -34, 6, 4, 5, 6, -2, 9, -3, 4, 5, 10, 9, -6, 11, 11, 9, 2, 6, 6, 8, 6, 9, -4, -22, 5, 8, 5, 7, 6, -6, -37, 9, 5, -8, -4, 3, -5, 5, 5, 0, -6, -9, -19, 2, 8, 13, -16, -15, -46, 11, -3, -30, 4, -57, 9, -37, 11, 11, 0, -6, 6, 12, 6, 4, -89, -19, 4, 12, -43, 7, -4, 4, 10, -62, 3, 6, -22, 9, -3, -13, 1, 11, 6, 9, -39, 0, 9, 9, -36, 11, -9, 8, 5, 3, 11, 4, 3, 0, -57, 5, -8, -11, 8, 7, 5, -6, 7, 9, -3, 1, 12, -35, 7, 6, 7, 5, 3, -3, 6, 8, 10, 8, 5, -7, 6, 6, 0, -1, 0, 7, -4, 10, 7, 8, 3, 9, 3, 6, 5, -10, 7, -8, -62, 4, -12, 4, 7, 14, 9, -4, 11, -4, -21, 4, 12, 9, 8, -4, 12, -6, 11, -1, -16, 12, 1, 14, -25, -5, 5, -17, 6, 9, -1, 10, 11, 11, 11, -17, 10, -9, -3, 10, 3, 6, -7, -20, 10, -9, 13, 7, 14, 4, 0, 5, 7, -21, 2, 0, 11, 7, 11, 9, 10, 7, 4, -1, 11, 10, 1, 11, 8, 9, 11, 3, 7, -3, -22, 8, -14, 1, 6, 9, 5, 7, 6, -18, -40, 7, 3, 5, 1, 1, 3, 11, -3, -6, 6, 14, 7, 13, 11, 15, 0, -6, -23, 2, 4, 6, 5, 4, 5, -20, 7, -41, 10, 7, -16, 3, -1, 6, 13, 4, 7, -21, -26, 6, -43, 7, -1, 7, 12, 7, 5, 9, 7, 9, -24, 7, -13, 3, 5, 4, 10, -10, 8, 5, 9, 10, 7, -24, 6, 9, 1, 5, -14, -6, 8, 6, 11, 8, -8, 3, 11, 6, 6, 11, 1, 12, 5, 7, 9, 5, 3, 7, 6, 8, -25, 11, 4, 5, -2, 8, 7, -3, 0, -22, 4, -1, 6, 5, 11, 11, 6, -6, 7, -6, 7, 11, 10, 6, 8, 15, -4, -11, 10, 8, 3, -5, 8, 9, 5, 1, 6, 9, -12, 10, 12, 3, 9, 14, 4, 3, -23, -5, 6, -3, -28, 1, 7, 9, 1, 6, 5, 2, 5, 6, 6, 7, 10, 12, 7, 9, 10, 5, 6, -1, 8, 13, 9, 7, 2, 7, 8, 8, 8, -3, 13, 9, 8, 13, 10, -1, 10, 5, -16, 10, -21, 5, 6, 3, 6, -1, 11, 8, 5, -8, 11, 8, -11, -1, 3, -5, 7, 9, -16, 11, 5, 5, 10, 8, 7, 6, 6, 2, 11, 7, 11, 6, 8, 7, 12, 8, 12, 9, -4, -15, 9, 5, 10, -39, 8, 3, -12, 4, -3, 6, 8, 11, 3, 3, 11, 11, 10, 8, 2, 6, 6, 14, 9, 6, 0, 12, 7, 7, 11, 11, 9, -1, 9, 9, 9, 15, 2, 9, 11, 2, 1, 5, 10, 5, 8, -5, -8, 13, 4, 5, 8, 6, 11, 7, -5, 11, 9, 7, 7, 10, -7, 14, 3, -4, 3, 5, 2, 12, 11, 7, 3, 9, 9, 15, -1, 10, 7, 13, 8, 12, 6, 6, 8, 7, 11, 6, -4, -5, 9, 7, 9, 4, 15, 5, 8, 6, 9, 9, 14, 6, 9, 12, 12, 5, 14, 6, 11, -1, 6, 6, 5, 4, 3, 8, -4, -2, 3, -5, -3, 0, 6, 9, 5, 4, 3, 12, 9, -12, 4, 5, 8, 11, 4, 7, 7, 8, 7, 3, 10, 5, 11, -1, 10, 3, 4, 11, 7, 5, -1, 11, 9, 9, 5, 4, 0, -4, 7, 5, 8, 6, -2, 13, 6, 6, 8, 5, 3, 5, -1, 1, 5, 11, 6, 5, 6, 7, -3, 3, 4, 0, 12, 9, 8, -1, 5, 9, 10, 13, 11, 5, 11, 4, 4, 4, 10, 9, 7, 9, 9, 14, 3, 5, 6, 3, 7, -3, 9, 11, 7, 9, 4, 5, 7, 4, -3, 14, 7, 13, 4, 8, 14, 15, 6, 12, 8, 10, 9, 10, 11, 10, 11, 11, 10, 2, 8, 7, 9, 6, 12, 3, 14, 5, 5, 9, 5, 7, -14, 9, 9, 4, 7, 11, 9, 9, -1, 7, 2, 10, 8, 4, 3, 8, 8, 9, 11, 7, 9, 6, 11, 1, 3, 9, 5, 10, 6, 12, -2, 6, 6, 8, 5, 6, 7, 4, 7, 5, 0, 3, 4, 9, 7, 5, 5, 4, 10, -1, 8, 15, 3, 7, 12, 13, 11, 10, 10, 5, 6, 3, -3, 7, 4, 9, 6, 5, 6, 6, 6, 9, -6, 9, 4, -13, 9, 11, 5, 8, 6, 4, -6, 8, 10, 6, 4, 8, 6, 11, 3, 9, 13, 8, -4, 5, 6, 8, 13, 14, 8, 8, 9, 15, 11, 10, 8, 2, 11, -4, 6, 7, 7, 8, 9, 15, 10, 11, 6, 7, 7, 11]\n\n# ref for 8 agents\n# y = [-236, -281, -245, -236, -281, -254, -245, -290, -272, -290, 1, -263, -218, -245, -272, -254, -272, -254, -272, -191, -254, -236, -245, -23, 8, -2, -3, 11, -13, 12, -234, -39, 11, 7, -3, -12, 7, -184, 14, -53, -4, 4, -4, -227, -75, 9, 8, -8, -217, -112, 11, 9, -3, -3, 11, 12, 0, 11, -78, -3, 8, -5, 10, -84, 7, 7, -9, 0, 1, 7, 9, 8, 9, 10, 10, 4, 3, 12, -15, -1, 6, -3, 11, 6, 5, 4, -19, 5, 7, -2, -3, 9, 9, -58, 11, -10, -3, -3, 9, 9, 5, 13, 8, 5, 11, 7, -3, 6, -4, 7, -25, 4, 1, 11, 0, -2, 9, 6, 4, 7, 9, 8, 8, -4, 11, 7, -1, 10, -2, 10, 9, 8, -23, 5, -20, 13, -4, -2, 3, -3, 6, 14, 9, 5, 2, 14, 10, 5, -4, 5, 12, 10, 8, -3, 12, 1, 10, -13, 12, 10, 7, 10, -3, 10, 6, 12, 9, 7, 8, -8, -4, -6, 14, 8, 7, -3, 10, 8, 6, 14, 5, 11, 12, -3, -4, 9, -3, -14, -13, 6, 7, 1, 7, 11, -17, 7, 3, 8, 6, 8, 7, 12]\n\n# one of 8 agents\ny = [-398, -326, -272, -317, -740, -443, -911, -596, -245, -272, -236, -740, -254, -740, -1226, -983, -317, -317, -794, -326, -479, -362, -560, -569, -263, -218, -425, -866, -650, -263, -308, -335, -344, -263, -263, -203, -362, -263, -353, -254, -272, -479, -263, -245, -632, -227, -245, -380, -551, -263, -290, -299, -830, -317, -497, -317, -226, -245, -371, -704, -254, -272, -362, -245, -236, -218, -479, -236, -272, -308, -425, -254, -623, -272, 13, -281, -236, -479, -22, -62, -254, -263, -299, -236, -19, -245, -91, 1, -299, -236, -245, -20, -335, -254, -263, -443, -1109, -218, -245, -308, -272, -263, -353, -677, -299, -227, -67, -218, 3, -236, -866, -254, -281, -206, -236, -127, -72, -194, -227, 5, -151, -290, -127, 6, -8, -43, -281, -70, -245, -45, -37, -398, -171, -154, -497, -236, -290, -198, -25, -12, -245, -281, -317, -236, -254, -65, -44, -173, -39, -164, 0, -113, -11, 7, 11, -8, 7, -245, -281, -104, -150, -120, -235, -174, -39, -22, 14, -272, -293, -263, -142, -6, -14, -93, -9, -272, -4, -18, -142, -740, -254, -150, -44, -245, -380, -44, -18, -245, -60, 7, -56, -209, -10, -398, -245, -79, 11, -254, -110, 7, -103, -281, -3, 3, -254, -16, -18, -2, -281, -245, -21, -174, -30, -272, -263, -68, -62, -124, 9, 2, 10, -15, -192, -132, -48, 10, 7, 3, -146, -16, -272, -47, -3, -272, -2, -10, -47, -4, -11, 6, -26, -4, -29, 5, -73, 3, -3, -7, -81, -63, -308, 0, -236, -272, 1, -19, -25, -1, -8, 1, -263, 12, -17, -24, -254, 8, -272, -209, -236, -41, -34, -88, 13, -47, -18, -3, -12, 7, -8, -263, -1, -12, -21, -9, -67, 7, -76, -1, -12, 9, -2, -5, 13, 6, -623, 7, -6, 3, 4, -19, -3, -236, -50, -57, -1, 9, 10, 1, 6, -22, -32, -15, 5, -371, -91, -14, 1, -227, -208, -22, -48, -62, -27, -106, -14, 11, -20, -4, -97, 8, -4, 3, 3, 5, -11, 14, -34, -70, 9, 9, -69, 5, -4, -2, -83, -7, -18, -65, 10, -41, -144, -102, 12, -6, -10, -28, 13, -94, -54, 2, -11, -23, 10, -14, -30, -134, -3, 11, -2, -2, -55, 3, -31, -36, -6, -3, 11, 8, -10, 1, -4, -52, 7, 8, 10, -47, 10, 9, 8, -7, 4, 7, -9, 9, 7, -5, 6, -9, 5, -14, -20, -12, -35, 12, -59, 2, -152, -8, -5, -2, 7, -3, 9, -8, -59, -1, 6, 1, -8, 8, -5, -7, 2, -6, -69, 6, -19, -17, -21, 8, -14, -46, 5, -3, -3, -3, 11, -9, -16, 7, -123, 5, -6, 0, -7, -23, 1, 8, 3, -1, 10, -6, 6, 3, -2, -3, -15, 7, 7, 10, -3, 8, 6, 5, 8, -9, -6, 9, -4, 8, 12, 8, -3, -5, 9, 8, 7, 2, 8, 7, 1, 12, -93, 5, 10, 3, 5, 7, -63, -9, 3, 9, -10, -378, -1, -10, 5, 5, 4, -94, -4, -8, 0, 8, -17, 7, -5, 8, -3, 4, 10, 0, 7, 6, -65, 15, -6, 1, 2, -3, 12, 8, 11, 7, -4, 2, 10, -8, -7, -2, 1, 0, -3, 5, 8, 4, -9, 10, 10, -9, -13, 2, -3, 11, -42, 10, 7, -4, 3, 13, 4, -11, 9, 3, 11, 7, 7, 4, 7, 0, -5, 5, -28, -7, 10, -16, 3, 10, -14, 6, -6, 4, -1, 4, 11, 3, 4, 11, -6, 10, 11, -13, -2, -3, 11, -9, 5, -2, 1, 11, -2, -9, 8, 11, 5, -22, 2, -1, 2, -8, 5, 1, 4, -48, -8, -25, -9, 1, -6, -8, -5, 10, 2, -2, 12, 4, 10, 7, 4, 6, 5, 1, 2, 12, 3, 4, -1, -2, 8, -9, 6, 2, -2, 4, 2, 6, -69, 5, -1, -13, 4, 3, 11, 3, -14, -19, -11, 1, 11, 6, 3, -21, 11, -2, 8, -43, 7, 4, -4, -3, 6, 4, -17, 3, 5, 5, 4, -18, 8, 6, -142, -8, 7, -371, 12, 7, -2, 4, 6, 5, 2, -1, -27, 4, 7, 4, 6, 8, 2, 2, 13, 8, 3, 10, 9, 7, -7, -9, -3, 4, 5, 5, 11, 3, 0, -227, 2, 5, 10, -10, 6, 1, 5, 2, -2, 6, 5, 8, -2, 7, 1, 8, 10, 5, 0, 5, 4, 10, 7, 1, -8, 0, 13, 3, 1, 8, 11, 4, 7, -12, 7, 9, 7, 6, 8, 11, 1, 0, -7, 10, 3, 6, 0, -1, -8, 9, 10, 6, -14, 1, 10, 5, -5, 4, 5, 9, 6, 7, 4, 4, 5, 8, -12, 8, 1, 13, 9, 3, 4, 5, 9, -6, 4, 7, 10, 6, 6, 3, 0, 8, 6, 8, 8, 9, 3, -1, 8, 12, 14, 7, 8, 7, 6, -14, 12, 5, 0, -1, -7, 10, 4, -12, 3, 2, 7, 3, 6, -1, 15, 3, -200, -7, 8, -5, 1, -6, -6, 6, -38, 9, -5, 7, -1, -16, 3, 11, 13, 9, -17, 9, 5, -209, -7, 0, 9, 7, -10, -4, -3, 3, 11, 2, -6, 4, 7, 1, 9, -8, -6, 5, -14, 7, 6, -2, -7, -7, 2, 9, -6, 8, -1, 10, 12, 2, 9, -8, 7, 5, 7, 4, 10, 5, 9, 4, 11, 5, -2, 7, 8, 3, 9, 4, -6, 9, 0, 12, 5, 7, 6, 3, 4, 8, 9, 7, 9, 7, 12, 8, 0, 0, 8, 11, 10, 6, 8, -5, 0, -2, 7, 8, -7, 2, -4, -2, -5, -11, 4, 5, 6, -36, 8, 7, 8, 9, 6, 11, -33, 2, 8, 2, 0, 6, -6, 7, 13, 8, -8, -9, 5, 9, -3, 4, 3, 6, 7, 0, 1, 3, 8, 3, 13, 12, -14, 5, 12, -94, 9, 3, 2, -12, 12, 2, 5, 3, 6, 3, 1, 11, 6, -6, -14, 11, 1, 10, -170, -6, 8, 12, -2, 3, 9, 5, 10, 4, 7, -6, 11, -5, 9, 7, 7, -9, 7, 10, 4, 10, 8, -4, 6, 12, -5, 11, 8, 4, 5, 6, 7, -1, 4, 8, 5, -4, 1, 9, 11, 3, 13, -8, 7, 5, -5, 8, -5, 8, -2, 5, -2, 7, 8, 8, 9, 0, 6, 3, 7, 7, 10, 10, 10, 3, 2, 9, 11, 8, 10, -6, 3, 3, 7, 7, -5, 4, 9, 11, 5, 11, 3, 2, 8, 7, -7, 3, 9, 5, 8, 5, 13, 6, 5, 9, 8, 7, 0, 7, -2, 8, 4, 7, 6, 9, 3, 7, 4, 3, 8, 0, 1, -5, 13, 8, 5, 9, 10, 10, 8, 9, 11, 8, 3, 8, 7, 5, 4, 8, 6, 7, 3, 1, 11, 9, 1, 5, 9, 5, 8, 2, 10, 0, 9, 3, 5, 7, 6, 5, 9, 9, -3, 14, 6, -1, 5, 10, 3, 6, 9, 8, 9, 7, 5, 6, 7, 8, -7, 5, 6, 11, -6, 9, -8, 10, 4, -5, 8, 12, 8, 6, -7, 8, 6, 8, 12, 5, 10, -3, 7, 6, 6, 5, 2, 6, 5, 4, 10, 10, 12, 5, 3, 6, -16, 3, 6, 8, 6, 7, 6, 2, 7, 6, 1, 9, -9, 7, 2, -1, 4, 7, 0, 8, 13, 5, 3, 4, 6, 2, 6, 7, 5, -5, 5, 11, 5, 12, 6, 5, 6, 8, 8, 6, 12, 10, 12, 13, 0, 3, 3, 6, 2, 9, 8, 7, 4, 0, 7, -8, 6, 12, 5, 2, -6, 4, 12, 10, 8, 8, 0, -1, 5, 13, 15, 4, 11, 11, 8, 6, 2, 11, -21, 7, 12, 9, 3, 7, -3, 7, -6, 10, 7, -1, 10, 7, 5, 5, -5, 3, 10, 6, 4, 12, 12, 8, 4, 12, 10, 10, 0, 5, -2, -1, 12, 0, 9, -3, -8, 12, -2, 3, 6, 10, 10, -2, 5, 2, 7, -13, 5, 2, 6, 8, 9, 6, -4, 5, 10, 7, 6, 11, -7, 8, 8, -3, 8, 9, 11, -3, 5, 3, 7, 11, 9, 2, 6, 8, 3, 10, 10, 3, 8, 6, 8, 11, 7, -7, 7, 10, 7, -5, 12, 4, 9, 5, -4, 7, 7, 8, 8, 5, 9, 4, 11, 8, 5, 1, 5, 11, -5, 8, 6, 8, 9, 5, 7, 12, 5, -1, 6, 11, 8, 5, -7, 4, 10, 3, 3, 5, 7, 7, 6, 6, -11, 5, 4, 7, 3, 5, 8, 9, 10, 7, 7, 8, 8, 7, -9, 3, 8, -1, 7, -4, 11, 7, 11, 0, 6, 4, 0, 6, -13, 6, 12, 7, -9, 9, 6, 2, -4, 8, -12, 7, 10, 9, 6, -3, 3, 13, 13, 8, -1, 3, -5, 7, 7, 2, 7, 11, 10, 6, 1, -6, 5, -19, 5, 7, 4, 2, 7, 7, 6, 12, 5, 9, 5, 0, 0, 15, 12, -3, 6, 4, 7, 6, 4, 6, 10, 4, 2, 3, 2, 8, 0, 7, 5, 11, -5, 11, 5, 6, 6, 2, 9, 9, 10, -8, 2, -6, 5, 3, 12, -8, 7, 10, 5, 0, 4, -3, 4, 5, 11, 8, 3, 13, 8, 12, -1, 5, 6, 6, 5, 4, 8, 7, 9, 10, 8, -8, 8, 9, -7, 7, 4, 9, 8, 3, 10, 5, 5, 8, 13, 9, 10, 13, 6, 7, 6, 7, 7, 3, 9, 12, 6, 9, 3, 6, -9, 8, 8, -27, -4, 0, 4, 3, 8, -2, 4, 9, 9, 12, 9, -2, 2, 9, 10, 6, 6, 9, 8, 5, 7, 13, 13, 7, 7, 13, 8, -6, 7, 3, 2, 4, 5, 7, 6, 2, 5, -6, 3, 6, -6, 7, -1, -4, 8, 10, 8, 9, 8, 5, 2, 9, 7, 11, 13, 8, -7, 7, 4, 6, 10, 7, 6, 5, 6, 12, 4, 6, 9, 10, 3, 12, 8, 8, 2, 8, 6, 5, 9, 5, 6, -6, 7, 8, 5, 5, 7, 8, 10, -7, 9, 4, 4, 2, 5, 6, -2, 3, 8, 12, 11, 9, 13, 10, -6, 3, 5, 8, 6, 5, 12, -8, 8, 7, 7, 6, 2, 5, 8, 12, 11, 8, 5, 0, -2, -6, 6, 11, 8, 1, 12, 7, 11, 5, 4, 12, 6, 10, 6, 12, 6, 1, 10, 8, 8, -14, 5, 5, 6, 7, 7, 7, 4, 10, 7, 5, -7, -6, -6, 3, 6, -5, 10, -4, 5, 1, 3, 10, 4, 10, 13, 15, 11, 10, 4, 6, -1, 3, 7, 0, 5, 10, 6, 7, 5, 8, 6, 11, 5, 6, 7, 13, 6, 7, 6, 5, 3, 11, 1, 9, -5, 6, 6, 6, -4, 7, 6, 9, 6, -4, 11, 6, 3, 9, -6, 5, 3, 9, 2, 7, 6, 12, 9, 11, 6, 1, 7, 5, 8, 8, 3, 12, 5, 0, -2, 5, 7, 10, 8, -6, 4, 4, 6, 11, 4, -1, 6, 10, 6, 6, 8, 2, 4, 9, -2, 4, 12, 7, 5, 4, 7, 5, 4, 7, 7, 8, 4, 5, 5, 8, 8, 3, 6, 10, 9, 10, 6, 9, -20, 6, 7, 8, 8, 4, 1, -6, 11, 5, 7, 5, 4, 2, 9, 8, 6, 8, 8, -6, 8, 10, 7, 4, 8, 9, 7, 6, 7, 4, 7, 13, 6, -4, 6, 3, 5, 6, 9, 5, 11, 7, -4, 7, -11, 6, 4, 5, 9, 7, 3, 11, 6, 3, 8, 4, 5, 12, 11, 7, 7, 6, 5, 11, 2, 7, 5, 7, 6, 5, 9, 7, 9, 5, 0, 10, 12, 4, 2, 8, 5, 8, 8, 9, 10, 5, 6, -2, 10, 7, 3, -2, 4, 8, 5, -6, 9, 6, 10, 10, 9, 6, 10, 11, 2, 6, 10, 4, 8, 4, 4, -1, 7, 7, 5, 9, 12, 10, 4, 10, 8, 8, 5, 5, -3, 6, 6, 1, 7, 6, 7, 7, -5, 0, 5, -7, 14, 2, 2, 3, 11, -4, 5, 8, 13, 6, -2, 5, 8, 12, 5, 2, 10, 5, 2, 3, 10]\n\n\nx = np.arange(len(y))\n\nplt.plot(x, y)\nplt.show()\nplt.close()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close"
]
] |
gony0/buffalo | [
"f93dc4f95a526ec711cd605cc39c6ff347d976ed"
] | [
"tests/parallel/test_algo.py"
] | [
"# -*- coding: utf-8 -*-\nimport os\nos.environ['OMP_NUM_THREADS'] = '1'\nimport time\nimport unittest\nfrom itertools import combinations\n\nimport numpy as np\n\nfrom buffalo.algo.als import ALS\nfrom buffalo.algo.w2v import W2V\nfrom buffalo.algo.bpr import BPRMF\nfrom buffalo.misc.log import set_log_level\nfrom buffalo.data.stream import StreamOptions\nfrom buffalo.data.mm import MatrixMarketOptions\nfrom buffalo.parallel.base import ParALS, ParBPRMF, ParW2V\nfrom buffalo.algo.options import ALSOption, BPRMFOption, W2VOption\n\nfrom .base import TestBase\n\n\nclass TestAlgo(TestBase):\n def load_text8_model(self):\n if os.path.isfile('text8.w2v.bin'):\n w2v = W2V()\n w2v.load('text8.w2v.bin')\n return w2v\n set_log_level(3)\n opt = W2VOption().get_default_option()\n opt.num_workers = 12\n opt.d = 40\n opt.min_count = 4\n opt.num_iters = 10\n opt.model_path = 'text8.w2v.bin'\n data_opt = StreamOptions().get_default_option()\n data_opt.input.main = self.text8 + 'main'\n data_opt.data.path = './text8.h5py'\n data_opt.data.use_cache = True\n data_opt.data.validation = {}\n\n c = W2V(opt, data_opt=data_opt)\n c.initialize()\n c.train()\n c.save()\n return c\n\n def get_ml100k_mm_opt(self):\n data_opt = MatrixMarketOptions().get_default_option()\n data_opt.input.main = self.ml_100k + 'main'\n data_opt.input.uid = self.ml_100k + 'uid'\n data_opt.input.iid = self.ml_100k + 'iid'\n data_opt.data.use_cache = True\n data_opt.data.path = './ml100k.h5py'\n return data_opt\n\n def test01_most_similar(self):\n set_log_level(2)\n data_opt = self.get_ml100k_mm_opt()\n opt = ALSOption().get_default_option()\n opt.d = 20\n opt.num_workers = 1\n als = ALS(opt, data_opt=data_opt)\n als.initialize()\n als.train()\n pals = ParALS(als)\n random_keys = [k for k, _ in als.most_similar('49.Star_Wars_(1977)', topk=128)]\n random_indexes = als.get_index_pool(random_keys)\n naive = [als.most_similar(k, topk=10) for k in random_keys]\n topks0 = [[k for k, _ in result] for result in naive]\n scores0 = np.array([[v for _, v in result] for result in naive])\n self.assertEqual(scores0.shape, (128, 10,), msg='check even size')\n scores0 = scores0.reshape(len(naive), 10)\n pals.num_workers = 1\n topks1, scores1 = pals.most_similar(random_keys, topk=10, repr=True)\n topks2, scores2 = pals.most_similar(random_indexes, topk=10, repr=True)\n\n for a, b in combinations([topks0, topks1, topks2], 2):\n self.assertEqual(a, b)\n for a, b in combinations([scores0, scores1, scores2], 2):\n self.assertTrue(np.allclose(a, b, atol=1e-07))\n\n def test02_most_similar(self):\n set_log_level(1)\n data_opt = self.get_ml100k_mm_opt()\n opt = ALSOption().get_default_option()\n opt.d = 20\n opt.num_workers = 1\n als = ALS(opt, data_opt=data_opt)\n als.initialize()\n als.train()\n als.build_itemid_map()\n pals = ParALS(als)\n\n all_keys = als._idmanager.itemids[::]\n start_t = time.time()\n [als.most_similar(k, topk=10) for k in all_keys]\n naive_elapsed = time.time() - start_t\n\n pals.num_workers = 4\n start_t = time.time()\n pals.most_similar(all_keys, topk=10, repr=True)\n parals_elapsed = time.time() - start_t\n\n self.assertTrue(naive_elapsed > parals_elapsed * 3.0)\n\n def test03_most_similar(self):\n set_log_level(1)\n data_opt = self.get_ml100k_mm_opt()\n opt = BPRMFOption().get_default_option()\n opt.d = 20\n opt.num_workers = 1\n bpr = BPRMF(opt, data_opt=data_opt)\n bpr.initialize()\n bpr.train()\n bpr.build_itemid_map()\n parbpr = ParBPRMF(bpr)\n\n all_keys = bpr._idmanager.itemids[::]\n start_t = time.time()\n [bpr.most_similar(k, topk=10) for k in all_keys]\n naive_elapsed = time.time() - start_t\n\n parbpr.num_workers = 4\n start_t = time.time()\n parbpr.most_similar(all_keys, topk=10, repr=True)\n parbpr_elapsed = time.time() - start_t\n\n self.assertTrue(naive_elapsed > parbpr_elapsed * 3.0)\n\n def test04_text8_most_similar(self):\n set_log_level(1)\n model = self.load_text8_model()\n par = ParW2V(model)\n\n model.opt.num_workers = 1\n all_keys = model._idmanager.itemids[::][:10000]\n start_t = time.time()\n [model.most_similar(k, topk=10) for k in all_keys]\n naive_elapsed = time.time() - start_t\n\n par.num_workers = 4\n start_t = time.time()\n par.most_similar(all_keys, topk=10, repr=True)\n par_elapsed = time.time() - start_t\n\n self.assertTrue(naive_elapsed > par_elapsed * 3.0)\n\n def test05_topk_MT(self):\n set_log_level(2)\n data_opt = self.get_ml100k_mm_opt()\n opt = ALSOption().get_default_option()\n opt.d = 20\n opt.num_workers = 1\n als = ALS(opt, data_opt=data_opt)\n als.initialize()\n als.train()\n\n als.build_userid_map()\n all_keys = als._idmanager.userids\n start_t = time.time()\n naive = als.topk_recommendation(all_keys, topk=5)\n naive_elapsed = time.time() - start_t\n\n pals = ParALS(als)\n pals.num_workers = 4\n start_t = time.time()\n qkeys1, topks1, scores1 = pals.topk_recommendation(all_keys, topk=5, repr=True)\n par_elapsed = time.time() - start_t\n self.assertEqual(len(qkeys1), len(naive))\n for q, t in zip(qkeys1, topks1):\n self.assertEqual(naive[q], t)\n self.assertTrue(naive_elapsed > par_elapsed * 1.5)\n\n def test06_topk_pool(self):\n set_log_level(2)\n data_opt = self.get_ml100k_mm_opt()\n opt = ALSOption().get_default_option()\n opt.d = 20\n opt.num_workers = 1\n als = ALS(opt, data_opt=data_opt)\n als.initialize()\n als.train()\n pals = ParALS(als)\n\n pool = np.array([i for i in range(5)], dtype=np.int32)\n als.build_userid_map()\n all_keys = als._idmanager.userids[::][:10]\n naive = als.topk_recommendation(all_keys, topk=10, pool=pool)\n qkeys1, topks1, scores1 = pals.topk_recommendation(all_keys, topk=10, pool=pool, repr=True)\n for q, t in zip(qkeys1, topks1):\n self.assertEqual(naive[q], t)\n\n def test07_topk_pool(self):\n set_log_level(2)\n data_opt = self.get_ml100k_mm_opt()\n opt = BPRMFOption().get_default_option()\n opt.d = 20\n opt.num_workers = 1\n model = BPRMF(opt, data_opt=data_opt)\n model.initialize()\n model.train()\n par = ParBPRMF(model)\n\n pool = np.array([i for i in range(5)], dtype=np.int32)\n model.build_userid_map()\n all_keys = model._idmanager.userids[::][:10]\n naive = model.topk_recommendation(all_keys, topk=10, pool=pool)\n qkeys1, topks1, scores1 = par.topk_recommendation(all_keys, topk=10, pool=pool, repr=True)\n for q, t in zip(qkeys1, topks1):\n self.assertEqual(naive[q], t)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.allclose"
]
] |
villawang/Continual_Learning_CV | [
"6715fa9c741df920e56aede11cbb85a4be41871e"
] | [
"applications/Gesture/action_recognition/R3D/dataset/spatial_transforms.py"
] | [
"import collections\nimport numbers\nimport random\n\nimport numpy as np\nimport scipy\nimport torch\nfrom PIL import Image\n\ntry:\n import accimage\nexcept ImportError:\n accimage = None\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def randomize_parameters(self):\n for t in self.transforms:\n t.randomize_parameters()\n\n\nclass ToTensor(object):\n \"\"\"Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor.\n Converts a PIL.Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].\n \"\"\"\n\n def __init__(self, norm_value=255):\n self.norm_value = norm_value\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.\n Returns:\n Tensor: Converted image.\n \"\"\"\n if isinstance(pic, np.ndarray):\n # handle numpy array\n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n # backward compatibility\n return img.float().div(self.norm_value)\n\n if accimage is not None and isinstance(pic, accimage.Image):\n nppic = np.zeros(\n [pic.channels, pic.height, pic.width], dtype=np.float32)\n pic.copyto(nppic)\n return torch.from_numpy(nppic)\n\n # handle PIL Image\n if pic.mode == 'I':\n img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n elif pic.mode == 'I;16':\n img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n else:\n img = torch.ByteTensor(\n torch.ByteStorage.from_buffer(pic.tobytes()))\n # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK\n if pic.mode == 'YCbCr':\n nchannel = 3\n elif pic.mode == 'I;16':\n nchannel = 1\n else:\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n # put it from HWC to CHW format\n # yikes, this transpose takes 80% of the loading time/CPU\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(img, torch.ByteTensor):\n return img.float().div(self.norm_value)\n else:\n return img\n\n def randomize_parameters(self):\n pass\n\n\nclass Normalize(object):\n \"\"\"Normalize an tensor image with mean and standard deviation.\n Given mean: (R, G, B) and std: (R, G, B),\n will normalize each channel of the torch.*Tensor, i.e.\n channel = (channel - mean) / std\n Args:\n mean (sequence): Sequence of means for R, G, B channels respecitvely.\n std (sequence): Sequence of standard deviations for R, G, B channels\n respecitvely.\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized image.\n \"\"\"\n # TODO: make efficient\n for t, m, s in zip(tensor, self.mean, self.std):\n t.sub_(m).div_(s)\n return tensor\n\n def randomize_parameters(self):\n pass\n\n\nclass Scale(object):\n \"\"\"Rescale the input PIL.Image to the given size.\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (w, h), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size)\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n \"\"\"\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n assert isinstance(size,\n int) or (isinstance(size, collections.Iterable) and\n len(size) == 2)\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL.Image): Image to be scaled.\n Returns:\n PIL.Image: Rescaled image.\n \"\"\"\n if isinstance(self.size, int):\n w, h = img.size\n if (w <= h and w == self.size) or (h <= w and h == self.size):\n return img\n if w < h:\n ow = self.size\n oh = int(self.size * h / w)\n return img.resize((ow, oh), self.interpolation)\n else:\n oh = self.size\n ow = int(self.size * w / h)\n return img.resize((ow, oh), self.interpolation)\n else:\n return img.resize(self.size, self.interpolation)\n\n def randomize_parameters(self):\n pass\n\n\nclass CenterCrop(object):\n \"\"\"Crops the given PIL.Image at the center.\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n \"\"\"\n\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL.Image): Image to be cropped.\n Returns:\n PIL.Image: Cropped image.\n \"\"\"\n w, h = img.size\n th, tw = self.size\n x1 = int(round((w - tw) / 2.))\n y1 = int(round((h - th) / 2.))\n return img.crop((x1, y1, x1 + tw, y1 + th))\n\n def randomize_parameters(self):\n pass\n\n\nclass CornerCrop(object):\n\n def __init__(self, size, crop_position=None):\n self.size = size\n if crop_position is None:\n self.randomize = True\n else:\n self.randomize = False\n self.crop_position = crop_position\n self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br']\n\n def __call__(self, img):\n image_width = img.size[0]\n image_height = img.size[1]\n\n if self.crop_position == 'c':\n th, tw = (self.size, self.size)\n x1 = int(round((image_width - tw) / 2.))\n y1 = int(round((image_height - th) / 2.))\n x2 = x1 + tw\n y2 = y1 + th\n elif self.crop_position == 'tl':\n x1 = 0\n y1 = 0\n x2 = self.size\n y2 = self.size\n elif self.crop_position == 'tr':\n x1 = image_width - self.size\n y1 = 0\n x2 = image_width\n y2 = self.size\n elif self.crop_position == 'bl':\n x1 = 0\n y1 = image_height - self.size\n x2 = self.size\n y2 = image_height\n elif self.crop_position == 'br':\n x1 = image_width - self.size\n y1 = image_height - self.size\n x2 = image_width\n y2 = image_height\n\n img = img.crop((x1, y1, x2, y2))\n\n return img\n\n def randomize_parameters(self):\n if self.randomize:\n self.crop_position = self.crop_positions[random.randint(\n 0,\n len(self.crop_positions) - 1)]\n\n\nclass RandomHorizontalFlip(object):\n \"\"\"Horizontally flip the given PIL.Image randomly with a probability of 0.5.\"\"\"\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL.Image): Image to be flipped.\n Returns:\n PIL.Image: Randomly flipped image.\n \"\"\"\n self.p = self.randomize_parameters()\n if self.p < 0.5:\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n return img\n\n def randomize_parameters(self):\n return random.random()\n\n\nclass MultiScaleCornerCrop(object):\n \"\"\"Crop the given PIL.Image to randomly selected size.\n A crop of size is selected from scales of the original size.\n A position of cropping is randomly selected from 4 corners and 1 center.\n This crop is finally resized to given size.\n Args:\n scales: cropping scales of the original size\n size: size of the smaller edge\n interpolation: Default: PIL.Image.BILINEAR\n \"\"\"\n\n def __init__(self,\n scales,\n size,\n interpolation=Image.BILINEAR,\n crop_positions=['c', 'tl', 'tr', 'bl', 'br']):\n self.scales = scales\n self.size = size\n self.interpolation = interpolation\n\n self.crop_positions = crop_positions\n\n def __call__(self, img):\n min_length = min(img.size[0], img.size[1])\n crop_size = int(min_length * self.scale)\n\n image_width = img.size[0]\n image_height = img.size[1]\n\n if self.crop_position == 'c':\n center_x = image_width // 2\n center_y = image_height // 2\n box_half = crop_size // 2\n x1 = center_x - box_half\n y1 = center_y - box_half\n x2 = center_x + box_half\n y2 = center_y + box_half\n elif self.crop_position == 'tl':\n x1 = 0\n y1 = 0\n x2 = crop_size\n y2 = crop_size\n elif self.crop_position == 'tr':\n x1 = image_width - crop_size\n y1 = 0\n x2 = image_width\n y2 = crop_size\n elif self.crop_position == 'bl':\n x1 = 0\n y1 = image_height - crop_size\n x2 = crop_size\n y2 = image_height\n elif self.crop_position == 'br':\n x1 = image_width - crop_size\n y1 = image_height - crop_size\n x2 = image_width\n y2 = image_height\n\n img = img.crop((x1, y1, x2, y2))\n\n return img.resize((self.size, self.size), self.interpolation)\n\n def randomize_parameters(self):\n self.scale = self.scales[random.randint(0, len(self.scales) - 1)]\n self.crop_position = self.crop_positions[random.randint(\n 0,\n len(self.crop_positions) - 1)]\n\n\nclass MultiScaleRandomCrop(object):\n\n def __init__(self, scales, size, interpolation=Image.BILINEAR):\n self.scales = scales\n self.size = size\n self.interpolation = interpolation\n self.randomize_parameters()\n\n def __call__(self, img):\n min_length = min(img.size[0], img.size[1])\n crop_size = int(min_length * self.scale)\n\n image_width = img.size[0]\n image_height = img.size[1]\n\n x1 = self.tl_x * (image_width - crop_size)\n y1 = self.tl_y * (image_height - crop_size)\n x2 = x1 + crop_size\n y2 = y1 + crop_size\n\n img = img.crop((x1, y1, x2, y2))\n\n return img.resize((self.size[0], self.size[1]), self.interpolation)\n\n def randomize_parameters(self):\n self.scale = self.scales[random.randint(0, len(self.scales) - 1)]\n self.tl_x = random.random()\n self.tl_y = random.random()\n\n\nclass SpatialElasticDisplacement(object):\n\n def __init__(self, sigma=2.0, alpha=1.0, order=0, cval=0, mode=\"constant\"):\n self.alpha = alpha\n self.sigma = sigma\n self.order = order\n self.cval = cval\n self.mode = mode\n self.randomize_parameters()\n\n def __call__(self, img):\n if self.p < 0.50:\n is_L = False\n is_PIL = isinstance(img, Image.Image)\n\n if is_PIL:\n img = np.asarray(img, dtype=np.uint8)\n if len(img.shape) == 2:\n is_L = True\n img = np.reshape(img, img.shape + (1,))\n\n image = img\n image_first_channel = np.squeeze(image[..., 0])\n indices_x, indices_y = self._generate_indices(image_first_channel.shape, alpha=self.alpha, sigma=self.sigma)\n ret_image = (self._map_coordinates(\n image,\n indices_x,\n indices_y,\n order=self.order,\n cval=self.cval,\n mode=self.mode))\n\n if is_PIL:\n if is_L:\n return Image.fromarray(ret_image.reshape(ret_image.shape[:2]), mode='L')\n else:\n return Image.fromarray(ret_image)\n else:\n return ret_image\n else:\n return img\n\n def _generate_indices(self, shape, alpha, sigma):\n assert (len(shape) == 2), \"shape: Should be of size 2!\"\n dx = scipy.ndimage.gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n dy = scipy.ndimage.gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n\n x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')\n return np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))\n\n def _map_coordinates(self, image, indices_x, indices_y, order=1, cval=0, mode=\"constant\"):\n assert (len(image.shape) == 3), \"image.shape: Should be of size 3!\"\n result = np.copy(image)\n height, width = image.shape[0:2]\n for c in range(image.shape[2]):\n remapped_flat = scipy.ndimage.interpolation.map_coordinates(\n image[..., c],\n (indices_x, indices_y),\n order=order,\n cval=cval,\n mode=mode\n )\n remapped = remapped_flat.reshape((height, width))\n result[..., c] = remapped\n return result\n\n def randomize_parameters(self):\n self.p = random.random()\n"
] | [
[
"numpy.reshape",
"numpy.arange",
"numpy.squeeze",
"numpy.asarray",
"torch.from_numpy",
"scipy.ndimage.interpolation.map_coordinates",
"numpy.copy",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
]
] |
PaulKGrimes/bolo-calc | [
"c2882d9b180dffe406db29253beb8307476b8c64"
] | [
"python/bolo/sky.py"
] | [
"\"\"\" Sky model \"\"\"\n\nfrom collections import OrderedDict as odict\n\nimport numpy as np\n\nimport h5py as hp\n\nfrom cfgmdl import Model, Property, cached\n\nfrom .utils import is_not_none, cfg_path\nfrom . import physics\n#from .unit import Unit\nfrom .cfg import Variable\n\nGHz_to_Hz = 1.e+09\nm_to_mm = 1.e+03\nmm_to_um = 1.e+03\n\n\ndef interp_spectra(freqs, freq_grid, vals):\n \"\"\" Interpolate a spectrum \"\"\"\n freq_grid = freq_grid * GHz_to_Hz\n return np.interp(freqs, freq_grid, vals)\n\n\nclass AtmModel:\n \"\"\" Atmospheric model using tabulated values\"\"\"\n\n def __init__(self, fname, site):\n \"\"\" Constructor \"\"\"\n self._file = hp.File(fname, 'r')\n self._data = self._file[site]\n\n @staticmethod\n def get_keys(pwv, elev):\n \"\"\" Get the keys in the tabulated data \"\"\"\n return [\"%d,%d\" % (int(round(pwv_ * m_to_mm, 1) * mm_to_um), int(round(elev_, 0))) for pwv_, elev_ in np.broadcast(pwv, elev)]\n\n def temp(self, keys, freqs):\n \"\"\" Get interpolated temperatures \"\"\"\n return np.array([interp_spectra(freqs, self._data[key_][0], self._data[key_][2]) for key_ in keys])\n\n def trans(self, keys, freqs):\n \"\"\" Get interpolated transmission coefs \"\"\"\n return np.array([interp_spectra(freqs, self._data[key_][0], self._data[key_][3]) for key_ in keys])\n\n\nclass CustomAtm:\n \"\"\" Atmospheric model using custom value from a txt file \"\"\"\n\n def __init__(self, fname):\n \"\"\" Constructor \"\"\"\n self._freqs, self._temps, self._trans = np.loadtxt(fname, unpack=True, usecols=[0, 2, 3], dtype=np.float)\n\n def temp(self, freqs):\n \"\"\" Get interpolated temperatures \"\"\"\n return interp_spectra(freqs, self._freqs, self._temps)\n\n def trans(self, freqs):\n \"\"\" Get interpolated transmission coefs \"\"\"\n return interp_spectra(freqs, self._freqs, self._trans)\n\n\nclass Atmosphere(Model):\n \"\"\" Atmosphere model \"\"\"\n atm_model_file = Property(dtype=str)\n\n def __init__(self, **kwargs):\n \"\"\" Constructor \"\"\"\n self._atm_model = None\n self._telescope = None\n self._sampled_keys = None\n self._nsamples = 1\n super(Atmosphere, self).__init__(**kwargs)\n\n def set_telescope(self, value):\n \"\"\" Set the telescope\n\n This is needed to sample elevation and PWV values\n \"\"\"\n self._telescope = value\n\n @cached(uses=[atm_model_file])\n def cached_model(self):\n \"\"\" Cache the Atmosphere model \"\"\"\n if is_not_none(self._telescope.custom_atm_file):\n return CustomAtm(cfg_path(self._telescope.custom_atm_file))\n if is_not_none(self.atm_model_file):\n return AtmModel(cfg_path(self.atm_model_file), self._telescope.site)\n return None\n\n def sample(self, nsamples):\n \"\"\" Sample the atmosphere \"\"\"\n model = self.cached_model\n if isinstance(model, CustomAtm):\n self._sampled_keys = None\n self._nsamples = nsamples\n return\n self._telescope.pwv.sample(nsamples)\n self._telescope.elevation.sample(nsamples)\n self._sampled_keys = model.get_keys(1e-6*np.atleast_1d(self._telescope.pwv()), np.atleast_1d(self._telescope.elevation())) #pylint: disable=no-member\n self._nsamples = max(nsamples, 1)\n\n def temp(self, freqs):\n \"\"\" Get sampled temperatures \"\"\"\n model = self.cached_model\n nfreqs = len(freqs)\n out_shape = (max(self._nsamples, 1), 1, nfreqs)\n if self._sampled_keys is None:\n ones = np.ones((max(self._nsamples, 1), 1, 1))\n return (ones*model.temp(freqs)).reshape(out_shape) #pylint: disable=no-member\n #out_shape = (1, 1, nfreqs)\n #return model.temp(freqs).reshape(out_shape) #pylint: disable=no-member\n return model.temp(self._sampled_keys, freqs).reshape(out_shape) #pylint: disable=no-member\n\n def trans(self, freqs):\n \"\"\" Get sampled transmission coefs \"\"\"\n model = self.cached_model\n nfreqs = len(freqs)\n out_shape = (max(self._nsamples, 1), 1, nfreqs)\n if self._sampled_keys is None:\n ones = np.ones((max(self._nsamples, 1), 1, 1))\n return (ones*model.trans(freqs)).reshape(out_shape) #pylint: disable=no-member\n #out_shape = (1, 1, nfreqs)\n #return model.trans(freqs).reshape(out_shape) #pylint: disable=no-member\n return model.trans(self._sampled_keys, freqs).reshape(out_shape) #pylint: disable=no-member\n\n\nclass Foreground(Model):\n \"\"\"\n Foreground model base class\n \"\"\"\n spectral_index = Variable(required=True, help=\"Powerlaw index\")\n scale_frequency = Variable(required=True, help=\"Frequency\", unit=\"GHz\")\n emiss = Variable(default=1., help=\"Emissivity\")\n\n\nclass Dust(Foreground):\n \"\"\"\n Dust emission model\n \"\"\"\n amplitude = Variable(required=True, help=\"Dust amplitude\", unit=\"MJy\")\n scale_temperature = Variable(required=True, help=\"Temperature\", unit='K')\n\n def __init__(self, **kwargs):\n \"\"\" Constructor \"\"\"\n self._nsamples = 1\n self.amp = None\n self.scale_temp = None\n super(Dust, self).__init__(**kwargs)\n\n def sample(self, nsamples):\n \"\"\" Sample this component \"\"\"\n self.amplitude.sample(nsamples)\n self.scale_temperature.sample(nsamples)\n\n self.amp = np.expand_dims(np.expand_dims(self.amplitude.SI, -1), -1)\n self.scale_temp = np.expand_dims(np.expand_dims(self.scale_temperature.SI, -1), -1)\n self._nsamples = max(self.amp.size, self.scale_temp.size)\n\n def temp(self, freqs):\n \"\"\" Get sampled temperatures \"\"\"\n out_shape = (self._nsamples, 1, len(freqs))\n return self.__temp(freqs, self.emiss.SI, self.amp, self.scale_frequency.SI, self.spectral_index.SI, self.scale_temp).reshape(out_shape)\n #self.__temp(freqs).reshape(out_shape)\n\n #@Function\n @staticmethod\n def __temp(freqs, emiss, amp, scale_frequency, spectral_index, scale_temp): #pylint: disable=too-many-arguments\n \"\"\"\n Return the galactic effective physical temperature\n \"\"\"\n # Passed amplitude [W/(m^2 sr Hz)] converted from [MJy]\n amp = emiss * amp\n # Frequency scaling\n # (freq / scale_freq)**dust_ind\n if np.isfinite(scale_frequency).all() and np.isfinite(spectral_index).all():\n freq_scale = (freqs / scale_frequency)**(spectral_index)\n else:\n freq_scale = 1.\n # Effective blackbody scaling\n # BB(freq, dust_temp) / BB(dust_freq, dust_temp)\n if np.isfinite(scale_temp).all() and np.isfinite(scale_frequency).all():\n spec_scale = physics.bb_spec_rad(freqs, scale_temp) / physics.bb_spec_rad(scale_frequency, scale_temp)\n else:\n spec_scale = 1.\n # Convert [W/(m^2 sr Hz)] to brightness temperature [K_RJ]\n pow_spec_rad = amp * freq_scale * spec_scale\n return physics.Tb_from_spec_rad(freqs, pow_spec_rad)\n\n\nclass Synchrotron(Foreground):\n \"\"\"\n Synchrotron emission model\n \"\"\"\n amplitude = Variable(required=True, help=\"Dust amplitude\", unit=\"K_RJ\")\n\n def __init__(self, **kwargs):\n \"\"\" Constructor \"\"\"\n self.amp = None\n self._nsamples = 1\n super(Synchrotron, self).__init__(**kwargs)\n\n def sample(self, nsamples):\n \"\"\" Sample this component \"\"\"\n self.amplitude.sample(nsamples)\n self.amp = np.expand_dims(np.expand_dims(self.amplitude.SI, -1), -1)\n self._nsamples = self.amp.size\n\n def temp(self, freqs):\n \"\"\" Get sampled temperatures \"\"\"\n out_shape = (self._nsamples, 1, len(freqs))\n return self.__temp(freqs, self.emiss.SI, self.amp, self.scale_frequency.SI, self.spectral_index.SI).reshape(out_shape)\n #self.__temp(freqs).reshape(out_shape)\n\n @staticmethod\n def __temp(freqs, emiss, amp, scale_frequency, spectral_index):\n \"\"\"\n Return the effective physical temperature\n \"\"\"\n bright_temp = emiss * amp\n # Frequency scaling (freq / sync_freq)**sync_ind\n freq_scale = (freqs / scale_frequency)**spectral_index\n scaled_bright_temp = bright_temp * freq_scale\n # Convert brightness temperature [K_RJ] to physical temperature [K]\n return physics.Tb_from_Trj(freqs, scaled_bright_temp)\n\n\nclass Universe(Model):\n \"\"\"\n Collection of emission models\n \"\"\"\n dust = Property(dtype=Dust, help='Dust model')\n synchrotron = Property(dtype=Synchrotron, help='Synchrotron model')\n atmosphere = Property(dtype=Atmosphere, help='Atmospheric model')\n\n sources = ['cmb', 'dust', 'synchrotron', 'atmosphere']\n\n def sample(self, nsamples):\n \"\"\" Sample the sky component \"\"\"\n self.dust.sample(nsamples)\n self.synchrotron.sample(nsamples)\n self.atmosphere.sample(nsamples)\n\n def temp(self, freqs):\n \"\"\" Get sampled temperatures \"\"\"\n ret = odict()\n ret['cmb'] = physics.Tcmb\n ret['dust'] = self.dust.temp(freqs)\n ret['synchrotron'] = self.synchrotron.temp(freqs)\n ret['atmosphere'] = self.atmosphere.temp(freqs)\n return ret\n\n def trans(self, freqs):\n \"\"\" Get sampled transmission coefs \"\"\"\n ret = odict()\n ret['atmosphere'] = self.atmosphere.trans(freqs)\n return ret\n"
] | [
[
"numpy.expand_dims",
"numpy.isfinite",
"numpy.broadcast",
"numpy.interp",
"numpy.loadtxt"
]
] |
FrankFlitton/autoyeai.com | [
"8fbc0b7b4db97ac1930af32f15e06cf844aeab0f"
] | [
"train-python/__main__.py"
] | [
"import tensorflow as tf\nfrom tensorflow.keras.layers.experimental import preprocessing\n\nimport numpy as np\nimport os\nimport time\n\npath_to_file = r\"../data/cleanData/masterSpaces.txt\"\n\ntext = open(path_to_file, 'rb').read().decode(encoding='utf-8')\n\nprint('Length of text: {} characters'.format(len(text)))\nprint(text[:250])\n\nvocab = sorted(set(text))\nprint('{} unique characters'.format(len(vocab)))\n\n# chars = tf.strings.unicode_split(example_texts, input_encoding='UTF-8')\n# chars\nchars = tf.strings.unicode_split(text, input_encoding='UTF-8')\n\nids_from_chars = preprocessing.StringLookup(\n vocabulary=list(vocab))\n\nids = ids_from_chars(chars)\n\nchars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(\n vocabulary=ids_from_chars.get_vocabulary(), invert=True)\n\nchars = chars_from_ids(ids)\n# tf.strings.reduce_join(chars, axis=-1).numpy()\n\n\ndef text_from_ids(ids):\n return tf.strings.reduce_join(chars_from_ids(ids), axis=-1)\n\n\nall_ids = ids_from_chars(tf.strings.unicode_split(text, 'UTF-8'))\nids_dataset = tf.data.Dataset.from_tensor_slices(all_ids)\nfor ids in ids_dataset.take(10):\n print(chars_from_ids(ids).numpy().decode('utf-8'))\n\nseq_length = 100\nexamples_per_epoch = len(text)//(seq_length+1)\n\nsequences = ids_dataset.batch(seq_length+1, drop_remainder=True)\n\nfor seq in sequences.take(5):\n print(text_from_ids(seq).numpy())\n\n\ndef split_input_target(sequence):\n input_text = sequence[:-1]\n target_text = sequence[1:]\n return input_text, target_text\n\n\ndataset = sequences.map(split_input_target)\n\n\nfor input_example, target_example in dataset.take(1):\n print(\"Input :\", text_from_ids(input_example).numpy())\n print(\"Target:\", text_from_ids(target_example).numpy())\n\n\n# Batch size\nBATCH_SIZE = 64\n\n# Buffer size to shuffle the dataset\n# (TF data is designed to work with possibly infinite sequences,\n# so it doesn't attempt to shuffle the entire sequence in memory. Instead,\n# it maintains a buffer in which it shuffles elements).\nBUFFER_SIZE = 10000\n\ndataset = (\n dataset\n .shuffle(BUFFER_SIZE)\n .batch(BATCH_SIZE, drop_remainder=True)\n .prefetch(tf.data.experimental.AUTOTUNE))\n\nprint(dataset)\n\n\n# Length of the vocabulary in chars\nvocab_size = len(vocab)\n\n# The embedding dimension\nembedding_dim = 256\n\n# Number of RNN units\nrnn_units = 1024\n\n\nclass MyModel(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, rnn_units):\n super().__init__(self)\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.gru = tf.keras.layers.GRU(rnn_units,\n return_sequences=True,\n return_state=True)\n self.dense = tf.keras.layers.Dense(vocab_size)\n\n def call(self, inputs, states=None, return_state=False, training=False):\n x = inputs\n x = self.embedding(x, training=training)\n if states is None:\n states = self.gru.get_initial_state(x)\n x, states = self.gru(x, initial_state=states, training=training)\n x = self.dense(x, training=training)\n\n if return_state:\n return x, states\n else:\n return x\n\n\nmodel = MyModel(\n # Be sure the vocabulary size matches the `StringLookup` layers.\n vocab_size=len(ids_from_chars.get_vocabulary()),\n embedding_dim=embedding_dim,\n rnn_units=rnn_units)\n\nfor input_example_batch, target_example_batch in dataset.take(1):\n example_batch_predictions = model(input_example_batch)\n print(example_batch_predictions.shape,\n \"# (batch_size, sequence_length, vocab_size)\")\n\nmodel.summary()\n\nsampled_indices = tf.random.categorical(\n example_batch_predictions[0], num_samples=1)\nsampled_indices = tf.squeeze(sampled_indices, axis=-1).numpy()\n\nprint(\"Input:\\n\", text_from_ids(input_example_batch[0]).numpy())\nprint()\nprint(\"Next Char Predictions:\\n\", text_from_ids(sampled_indices).numpy())\n\n\nloss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)\nexample_batch_loss = loss(target_example_batch, example_batch_predictions)\nmean_loss = example_batch_loss.numpy().mean()\nprint(\"Prediction shape: \", example_batch_predictions.shape,\n \" # (batch_size, sequence_length, vocab_size)\")\nprint(\"Mean loss: \", mean_loss)\n\ntf.exp(mean_loss).numpy()\n\nmodel.compile(optimizer='adam', loss=loss)\n\n# Directory where the checkpoints will be saved\ncheckpoint_dir = './training_checkpoints'\n# Name of the checkpoint files\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt_{epoch}\")\n\ncheckpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_prefix)\n\nEPOCHS = 1\n\nhistory = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])\n\n\n# Text Generation\nclass OneStep(tf.keras.Model):\n def __init__(self, model, chars_from_ids, ids_from_chars, temperature=0.34):\n super().__init__()\n self.temperature = temperature\n self.model = model\n self.chars_from_ids = chars_from_ids\n self.ids_from_chars = ids_from_chars\n\n # Create a mask to prevent \"\" or \"[UNK]\" from being generated.\n skip_ids = self.ids_from_chars(['', '[UNK]'])[:, None]\n sparse_mask = tf.SparseTensor(\n # Put a -inf at each bad index.\n values=[-float('inf')]*len(skip_ids),\n indices=skip_ids,\n # Match the shape to the vocabulary\n dense_shape=[len(ids_from_chars.get_vocabulary())])\n self.prediction_mask = tf.sparse.to_dense(sparse_mask)\n\n @tf.function\n def predict(self, inputs, states=None):\n # Convert strings to token IDs.\n input_chars = tf.strings.unicode_split(inputs, 'UTF-8')\n input_ids = self.ids_from_chars(input_chars).to_tensor()\n\n # Run the model.\n # predicted_logits.shape is [batch, char, next_char_logits]\n predicted_logits, states = self.model(inputs=input_ids, states=states,\n return_state=True)\n # Only use the last prediction.\n predicted_logits = predicted_logits[:, -1, :]\n predicted_logits = predicted_logits/self.temperature\n # Apply the prediction mask: prevent \"\" or \"[UNK]\" from being generated.\n predicted_logits = predicted_logits + self.prediction_mask\n\n # Sample the output logits to generate token IDs.\n predicted_ids = tf.random.categorical(predicted_logits, num_samples=1)\n predicted_ids = tf.squeeze(predicted_ids, axis=-1)\n\n # Convert from token ids to characters\n predicted_chars = self.chars_from_ids(predicted_ids)\n\n # Return the characters and model state.\n return predicted_chars, states\n\n\none_step_model = OneStep(model, chars_from_ids, ids_from_chars)\n\nstart = time.time()\nstates = None\nnext_char = tf.constant(['Gotta touch the sky'])\nresult = [next_char]\n\nfor n in range(1000):\n next_char, states = one_step_model.predict(\n next_char, states=states)\n result.append(next_char)\n\nresult = tf.strings.join(result)\nend = time.time()\n\nprint(result[0].numpy().decode('utf-8'), '\\n\\n' + '_'*80)\n\nprint(f\"\\nRun time: {end - start}\")\n\n# tf.saved_model.save(one_step_model, 'one_step.')\none_step_model.save('one_step', save_traces=True, include_optimizer=True)\none_step_reloaded = tf.saved_model.load('one_step')\n"
] | [
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.sparse.to_dense",
"tensorflow.strings.unicode_split",
"tensorflow.constant",
"tensorflow.saved_model.load",
"tensorflow.keras.layers.Embedding",
"tensorflow.random.categorical",
"tensorflow.losses.SparseCategoricalCrossentropy",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.layers.Dense",
"tensorflow.squeeze",
"tensorflow.exp",
"tensorflow.keras.layers.GRU",
"tensorflow.strings.join"
]
] |
YONGHAN-KIM/shap | [
"4c76cdb0a5ba2e5769a1d35c22eef117dd65e924"
] | [
"shap/plots/force_matplotlib.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import lines\nfrom matplotlib.font_manager import FontProperties\nfrom matplotlib.path import Path\nfrom matplotlib.patches import PathPatch\nimport matplotlib\n\n\ndef draw_bars(out_value, features, feature_type, width_separators, width_bar):\n \"\"\"Draw the bars and separators.\"\"\"\n rectangle_list = []\n separator_list = []\n \n pre_val = out_value\n for index, features in zip(range(len(features)), features):\n if feature_type == 'positive':\n left_bound = float(features[0])\n right_bound = pre_val\n pre_val = left_bound\n \n separator_indent = np.abs(width_separators)\n separator_pos = left_bound\n colors = ['#FF0D57', '#FFC3D5']\n else:\n left_bound = pre_val\n right_bound = float(features[0])\n pre_val = right_bound\n \n separator_indent = - np.abs(width_separators)\n separator_pos = right_bound\n colors = ['#1E88E5', '#D1E6FA']\n \n # Create rectangle\n if index == 0:\n if feature_type == 'positive':\n points_rectangle = [[left_bound, 0],\n [right_bound, 0],\n [right_bound, width_bar],\n [left_bound, width_bar],\n [left_bound + separator_indent, (width_bar / 2)]\n ]\n else:\n points_rectangle = [[right_bound, 0],\n [left_bound, 0],\n [left_bound, width_bar],\n [right_bound, width_bar],\n [right_bound + separator_indent, (width_bar / 2)]\n ]\n \n else:\n points_rectangle = [[left_bound, 0],\n [right_bound, 0],\n [right_bound + separator_indent * 0.90, (width_bar / 2)],\n [right_bound, width_bar],\n [left_bound, width_bar],\n [left_bound + separator_indent * 0.90, (width_bar / 2)]]\n\n line = plt.Polygon(points_rectangle, closed=True, fill=True,\n facecolor=colors[0], linewidth=0)\n rectangle_list += [line]\n\n # Create seperator\n points_separator = [[separator_pos, 0],\n [separator_pos + separator_indent, (width_bar / 2)],\n [separator_pos, width_bar]]\n \n line = plt.Polygon(points_separator, closed=None, fill=None,\n edgecolor=colors[1], lw=3)\n separator_list += [line]\n\n return rectangle_list, separator_list\n\n\ndef draw_labels(fig, ax, out_value, features, feature_type, offset_text, total_effect=0, min_perc=0.05):\n start_text = out_value\n pre_val = out_value\n \n # Define variables specific to positive and negative effect features\n if feature_type == 'positive':\n colors = ['#FF0D57', '#FFC3D5']\n alignement = 'right'\n sign = 1\n else:\n colors = ['#1E88E5', '#D1E6FA']\n alignement = 'left'\n sign = -1\n \n # Draw initial line\n if feature_type == 'positive':\n x, y = np.array([[pre_val, pre_val], [0, -0.18]])\n line = lines.Line2D(x, y, lw=1., alpha=0.5, color=colors[0])\n line.set_clip_on(False)\n ax.add_line(line)\n start_text = pre_val\n \n box_end = out_value\n val = out_value\n for feature in features:\n # Exclude all labels that do not contribute at least 10% to the total\n feature_contribution = np.abs(float(feature[0]) - pre_val) / np.abs(total_effect)\n if feature_contribution < min_perc:\n break\n \n # Compute value for current feature\n val = float(feature[0])\n \n # Draw labels\n text = feature[2] + ' = ' + feature[1]\n text_out_val = plt.text(start_text - sign * offset_text,\n -0.15, text,\n fontsize=12, color=colors[0],\n horizontalalignment=alignement)\n text_out_val.set_bbox(dict(facecolor='none', edgecolor='none'))\n \n # We need to draw the plot to be able to get the size of the\n # text box\n fig.canvas.draw()\n box_size = text_out_val.get_bbox_patch().get_extents()\\\n .transformed(ax.transData.inverted())\n if feature_type == 'positive':\n box_end_ = box_size.get_points()[0][0]\n else:\n box_end_ = box_size.get_points()[1][0]\n \n # If the feature goes over the side of the plot, we remove that label\n # and stop drawing labels\n if box_end_ > ax.get_xlim()[1]:\n text_out_val.remove()\n break\n \n # Create end line\n if (sign * box_end_) > (sign * val):\n x, y = np.array([[val, val], [0, -0.18]])\n line = lines.Line2D(x, y, lw=1., alpha=0.5, color=colors[0])\n line.set_clip_on(False)\n ax.add_line(line)\n start_text = val\n box_end = val\n\n else:\n box_end = box_end_ - sign * offset_text\n x, y = np.array([[val, box_end, box_end],\n [0, -0.08, -0.18]])\n line = lines.Line2D(x, y, lw=1., alpha=0.5, color=colors[0])\n line.set_clip_on(False)\n ax.add_line(line)\n start_text = box_end\n \n # Update previous value\n pre_val = float(feature[0])\n \n \n # Create line for labels\n extent_shading = [out_value, box_end, 0, -0.31]\n path = [[out_value, 0], [pre_val, 0], [box_end, -0.08],\n [box_end, -0.2], [out_value, -0.2],\n [out_value, 0]]\n \n path = Path(path)\n patch = PathPatch(path, facecolor='none', edgecolor='none')\n ax.add_patch(patch) \n \n # Extend axis if needed\n lower_lim, upper_lim = ax.get_xlim()\n if (box_end < lower_lim):\n ax.set_xlim(box_end, upper_lim)\n \n if (box_end > upper_lim):\n ax.set_xlim(lower_lim, box_end)\n \n # Create shading\n if feature_type == 'positive':\n colors = np.array([(255, 13, 87), (255, 255, 255)]) / 255.\n else:\n colors = np.array([(30, 136, 229), (255, 255, 255)]) / 255.\n \n cm = matplotlib.colors.LinearSegmentedColormap.from_list('cm', colors)\n \n Z, Z2 = np.meshgrid(np.linspace(0, 10), np.linspace(-10, 10))\n im = plt.imshow(Z2, interpolation='quadric', cmap=cm,\n vmax=0.01, alpha=0.3,\n origin='lower', extent=extent_shading,\n clip_path=patch, clip_on=True, aspect='auto')\n im.set_clip_path(patch)\n \n return fig, ax\n\n\ndef format_data(data):\n \"\"\"Format data.\"\"\"\n # Format negative features\n neg_features = np.array([[data['features'][x]['effect'],\n data['features'][x]['value'],\n data['featureNames'][x]]\n for x in data['features'].keys() if data['features'][x]['effect'] < 0])\n \n neg_features = np.array(sorted(neg_features, key=lambda x: float(x[0]), reverse=False))\n \n # Format postive features\n pos_features = np.array([[data['features'][x]['effect'],\n data['features'][x]['value'],\n data['featureNames'][x]]\n for x in data['features'].keys() if data['features'][x]['effect'] >= 0])\n pos_features = np.array(sorted(pos_features, key=lambda x: float(x[0]), reverse=True))\n \n # Define link function\n if data['link'] == 'identity':\n convert_func = lambda x: x\n elif data['link'] == 'logit':\n convert_func = lambda x: 1 / (1 + np.exp(-x))\n else:\n assert False, \"ERROR: Unrecognized link function: \" + str(data['link'])\n \n # Convert negative feature values to plot values\n neg_val = data['outValue']\n for i in neg_features:\n val = float(i[0])\n neg_val = neg_val + np.abs(val)\n i[0] = convert_func(neg_val)\n if len(neg_features) > 0:\n total_neg = np.max(neg_features[:, 0].astype(float)) - \\\n np.min(neg_features[:, 0].astype(float))\n else:\n total_neg = 0\n \n # Convert positive feature values to plot values\n pos_val = data['outValue']\n for i in pos_features:\n val = float(i[0])\n pos_val = pos_val - np.abs(val)\n i[0] = convert_func(pos_val)\n \n if len(pos_features) > 0:\n total_pos = np.max(pos_features[:, 0].astype(float)) - \\\n np.min(pos_features[:, 0].astype(float))\n else:\n total_pos = 0\n \n # Convert output value and base value\n data['outValue'] = convert_func(data['outValue'])\n data['baseValue'] = convert_func(data['baseValue'])\n \n return neg_features, total_neg, pos_features, total_pos\n\n\ndef draw_output_element(out_name, out_value, ax):\n # Add output value\n x, y = np.array([[out_value, out_value], [0, 0.24]])\n line = lines.Line2D(x, y, lw=2., color='#F2F2F2')\n line.set_clip_on(False)\n ax.add_line(line)\n \n font0 = FontProperties()\n font = font0.copy()\n font.set_weight('bold')\n text_out_val = plt.text(out_value, 0.25, '{0:.2f}'.format(out_value),\n fontproperties=font,\n fontsize=14,\n horizontalalignment='center')\n text_out_val.set_bbox(dict(facecolor='white', edgecolor='white'))\n \n text_out_val = plt.text(out_value, 0.33, out_name,\n fontsize=12, alpha=0.5,\n horizontalalignment='center')\n text_out_val.set_bbox(dict(facecolor='white', edgecolor='white'))\n\n\ndef draw_base_element(base_value, ax):\n x, y = np.array([[base_value, base_value], [0.13, 0.25]])\n line = lines.Line2D(x, y, lw=2., color='#F2F2F2')\n line.set_clip_on(False)\n ax.add_line(line)\n \n text_out_val = plt.text(base_value, 0.33, 'base value',\n fontsize=12, alpha=0.5,\n horizontalalignment='center')\n text_out_val.set_bbox(dict(facecolor='white', edgecolor='white'))\n\n\ndef draw_higher_lower_element(out_value, offset_text):\n plt.text(out_value - offset_text, 0.405, 'higher',\n fontsize=13, color='#FF0D57',\n horizontalalignment='right')\n\n plt.text(out_value + offset_text, 0.405, 'lower',\n fontsize=13, color='#1E88E5',\n horizontalalignment='left')\n \n plt.text(out_value, 0.4, r'$\\leftarrow$',\n fontsize=13, color='#1E88E5',\n horizontalalignment='center')\n \n plt.text(out_value, 0.425, r'$\\rightarrow$',\n fontsize=13, color='#FF0D57',\n horizontalalignment='center')\n\n\ndef update_axis_limits(ax, total_pos, pos_features, total_neg,\n neg_features, base_value):\n ax.set_ylim(-0.5, 0.15)\n padding = np.max([np.abs(total_pos) * 0.2,\n np.abs(total_neg) * 0.2])\n \n if len(pos_features) > 0:\n min_x = min(np.min(pos_features[:, 0].astype(float)), base_value) - padding\n else:\n min_x = 0\n if len(neg_features) > 0:\n max_x = max(np.max(neg_features[:, 0].astype(float)), base_value) + padding\n else:\n max_x = 0\n ax.set_xlim(min_x, max_x)\n\n plt.tick_params(top=True, bottom=False, left=False, right=False, labelleft=False,\n labeltop=True, labelbottom=False)\n plt.locator_params(axis='x', nbins=12)\n\n for key, spine in zip(plt.gca().spines.keys(), plt.gca().spines.values()):\n if key != 'top':\n spine.set_visible(False)\n\n\ndef draw_additive_plot(data, figsize, show):\n \"\"\"Draw additive plot.\"\"\"\n # Turn off interactive plot\n if show == False:\n plt.ioff()\n \n # Format data\n neg_features, total_neg, pos_features, total_pos = format_data(data)\n \n # Compute overall metrics\n base_value = data['baseValue']\n out_value = data['outValue']\n offset_text = (np.abs(total_neg) + np.abs(total_pos)) * 0.04\n \n # Define plots\n fig, ax = plt.subplots(figsize=figsize)\n \n # Compute axis limit\n update_axis_limits(ax, total_pos, pos_features, total_neg,\n neg_features, base_value)\n \n # Define width of bar\n width_bar = 0.1\n width_separators = (ax.get_xlim()[1] - ax.get_xlim()[0]) / 200\n \n # Create bar for negative shap values\n rectangle_list, separator_list = draw_bars(out_value, neg_features, 'negative',\n width_separators, width_bar)\n for i in rectangle_list:\n ax.add_patch(i)\n \n for i in separator_list:\n ax.add_patch(i)\n \n # Create bar for positive shap values\n rectangle_list, separator_list = draw_bars(out_value, pos_features, 'positive',\n width_separators, width_bar)\n for i in rectangle_list:\n ax.add_patch(i)\n \n for i in separator_list:\n ax.add_patch(i)\n\n # Add labels\n total_effect = np.abs(total_neg) + total_pos\n fig, ax = draw_labels(fig, ax, out_value, neg_features, 'negative',\n offset_text, total_effect, min_perc=0.05)\n \n fig, ax = draw_labels(fig, ax, out_value, pos_features, 'positive',\n offset_text, total_effect, min_perc=0.05)\n \n # higher lower legend\n draw_higher_lower_element(out_value, offset_text)\n \n # Add label for base value\n draw_base_element(base_value, ax)\n \n # Add output label\n out_names = data['outNames'][0]\n draw_output_element(out_names, out_value, ax)\n \n if show:\n plt.show()\n else:\n return plt.gcf()\n \n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.linspace",
"numpy.exp",
"matplotlib.patches.PathPatch",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.text",
"matplotlib.pyplot.Polygon",
"matplotlib.pyplot.locator_params",
"matplotlib.path.Path",
"matplotlib.font_manager.FontProperties",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.abs",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.tick_params"
]
] |
bxtkezhan/BAR4Py | [
"d8f26e155693db1c9268c4b1244aec50054a294b"
] | [
"bar4py/debugtools.py"
] | [
"import cv2\nimport numpy as np\n\n# Preview Functions\n\ndef drawCorners(points, frame):\n cv2.circle(frame, tuple(points[0]), 5, (0,0,255), 2)\n cv2.circle(frame, tuple(points[1]), 5, (0,255,0), 2)\n cv2.circle(frame, tuple(points[2]), 5, (0,255,255), 2)\n cv2.circle(frame, tuple(points[3]), 5, (255,0,0), 2)\n\ndef drawMarkersCorners(markers, frame):\n for marker in markers:\n points = marker.points\n drawCorners(points, frame)\n\ndef drawMarkers(markers, frame):\n for marker in markers:\n drawCorners(marker.points, frame)\n\n center = marker.calculateCenter()\n cv2.circle(frame, center, 3, (0,0,255), 2)\n cv2.circle(frame, center, 5, (0,255,0), 2)\n cv2.circle(frame, center, 8, (255,0,0), 2)\n marker_id = marker.marker_id\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame, str(marker_id), center, font, 0.8, (0,0,255), 2, cv2.LINE_AA)\n\ndef drawMarkersArea(area, frame):\n l, t, r, b = area\n cv2.rectangle(frame, (l, t), (r, b), (255, 64, 128), 2)\n\ndef drawAxis(camera_parameters, markers, frame):\n axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)\n mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff\n\n for marker in markers:\n rvec, tvec = marker.rvec, marker.tvec\n imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)\n corners = marker.corners\n corner = tuple(corners[0].ravel())\n cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)\n cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)\n cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)\n cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)\n cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)\n\ndef drawBox(camera_parameters, markers, frame):\n objpts = np.float32([[0,0,0], [1,0,0], [1,1,0], [0,1,0],\n [0,0,1], [1,0,1], [1,1,1], [0,1,1]]).reshape(-1,3)\n mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff\n\n for marker in markers:\n rvec, tvec = marker.rvec, marker.tvec\n imgpts, jac = cv2.projectPoints(objpts, rvec, tvec, mtx, dist)\n\n cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0,0,255), 2)\n cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 2)\n cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 2)\n cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[0].ravel()), (0,0,255), 2)\n\n cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)\n cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)\n cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)\n cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)\n\n cv2.line(frame, tuple(imgpts[0+4].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)\n cv2.line(frame, tuple(imgpts[1+4].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)\n cv2.line(frame, tuple(imgpts[2+4].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)\n cv2.line(frame, tuple(imgpts[3+4].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)\n"
] | [
[
"numpy.float32"
]
] |
timvink/whatlies | [
"4a17bc2d4e014069e7ff3626f2fccbd66431054f"
] | [
"tests/test_embeddingset.py"
] | [
"from operator import add, rshift, sub, or_\n\nimport pytest\nimport numpy as np\nfrom spacy.vocab import Vocab\nfrom spacy.language import Language\n\nfrom whatlies import Embedding, EmbeddingSet\nfrom whatlies.language import SpacyLanguage\n\n\[email protected]()\ndef lang():\n vector_data = {\n k: np.random.normal(0, 1, (2,))\n for k in [\"red\", \"blue\", \"cat\", \"dog\", \"green\", \"purple\"]\n }\n vector_data[\"cat\"] += 21.0\n vector_data[\"dog\"] += 20.0\n vocab = Vocab(strings=vector_data.keys())\n for word, vector in vector_data.items():\n vocab.set_vector(word, vector)\n nlp = Language(vocab=vocab)\n return SpacyLanguage(nlp)\n\n\ndef test_embeddingset_creation():\n foo = Embedding(\"foo\", [0, 1])\n bar = Embedding(\"bar\", [1, 1])\n\n emb = EmbeddingSet(foo)\n assert len(emb) == 1\n assert \"foo\" in emb\n emb = EmbeddingSet(foo, bar)\n assert len(emb) == 2\n assert \"foo\" in emb\n assert \"bar\" in emb\n emb = EmbeddingSet({\"foo\": foo})\n assert len(emb) == 1\n assert \"foo\" in emb\n\n\ndef test_embset_creation_error():\n foo = Embedding(\"foo\", [0, 1])\n # This vector has a different dimension. No bueno.\n bar = Embedding(\"bar\", [1, 1, 2])\n with pytest.raises(ValueError):\n EmbeddingSet(foo, bar)\n\n\ndef test_embset_creation_warning():\n foo = Embedding(\"foo\", [0, 1])\n # This vector has the same name dimension. Dangerzone.\n bar = Embedding(\"foo\", [1, 2])\n with pytest.raises(Warning):\n EmbeddingSet(foo, bar)\n\n\[email protected](\"operator\", [add, rshift, sub, or_])\ndef test_artificial_embset(lang, operator):\n emb = lang[[\"red\", \"blue\", \"orange\"]]\n v1 = operator(emb[\"red\"], emb[\"blue\"])\n v2 = operator(lang[\"red\"], lang[\"blue\"])\n assert np.array_equal(v1.vector, v2.vector)\n\n\ndef test_merge_basic(lang):\n emb1 = lang[[\"red\", \"blue\", \"orange\"]]\n emb2 = lang[[\"pink\", \"purple\", \"brown\"]]\n assert len(emb1.merge(emb2)) == 6\n\n\ndef test_average(lang):\n emb = lang[[\"red\", \"blue\", \"orange\"]]\n av = emb.average()\n assert av.name == \"EmbSet.average()\"\n v1 = av.vector\n v2 = (lang[\"red\"].vector + lang[\"blue\"].vector + lang[\"orange\"].vector) / 3\n assert np.array_equal(v1, v2)\n\n\ndef test_to_x_y():\n foo = Embedding(\"foo\", [0.1, 0.3])\n bar = Embedding(\"bar\", [0.7, 0.2])\n buz = Embedding(\"buz\", [0.1, 0.9])\n bla = Embedding(\"bla\", [0.2, 0.8])\n\n emb1 = EmbeddingSet(foo, bar).add_property(\"label\", lambda d: \"group-one\")\n emb2 = EmbeddingSet(buz, bla).add_property(\"label\", lambda d: \"group-two\")\n emb = emb1.merge(emb2)\n\n X, y = emb.to_X_y(y_label=\"label\")\n assert X.shape == emb.to_X().shape == (4, 2)\n assert list(y) == [\"group-one\", \"group-one\", \"group-two\", \"group-two\"]\n\n\ndef test_embset_similar_simple_len(lang):\n emb = lang[[\"red\", \"blue\", \"orange\"]]\n assert len(emb.embset_similar(\"red\", 1)) == 1\n assert len(emb.embset_similar(\"red\", 2)) == 2\n\n\ndef test_embset_similar_simple_contains(lang):\n emb = lang[[\"red\", \"blue\", \"orange\", \"cat\", \"dog\"]]\n subset_cat = emb.embset_similar(\"cat\", 2, metric=\"euclidean\").embeddings.keys()\n assert \"cat\" in subset_cat\n assert \"dog\" in subset_cat\n\n\ndef test_embset_similar_simple_distance(lang):\n emb = lang[[\"red\", \"blue\", \"orange\", \"cat\", \"dog\"]]\n emb_red, score_red = emb.score_similar(\"red\", 5)[0]\n assert np.isclose(score_red, 0.0, atol=0.0001)\n\n\ndef test_embset_raise_value_error_n(lang):\n emb = lang[[\"red\", \"blue\", \"orange\", \"cat\", \"dog\"]]\n with pytest.raises(ValueError):\n emb.score_similar(\"red\", 10)\n\n\ndef test_embset_raise_value_error_emb(lang):\n emb = lang[[\"red\", \"blue\", \"orange\", \"cat\", \"dog\"]]\n with pytest.raises(ValueError):\n emb.score_similar(\"dinosaurhead\", 1)\n\n\ndef test_corrplot_raise_error(lang):\n with pytest.raises(ValueError):\n emb = lang[[\"red\", \"blue\", \"orange\", \"pink\", \"purple\", \"brown\"]]\n emb.plot_correlation(metric=\"dinosaurhead\")\n\n\ndef test_filter(lang):\n emb = lang[[\"red\", \"blue\", \"orange\", \"pink\", \"purple\", \"brown\"]]\n assert len(emb) == 6\n assert len(emb.filter(lambda e: \"pink\" not in e.name)) == 5\n assert len(emb.filter(lambda e: \"pink\" in e.name)) == 1\n\n\ndef test_pipe(lang):\n embset = lang[[\"red\", \"blue\", \"orange\", \"pink\", \"purple\", \"brown\"]]\n assert embset.pipe(len) == 6\n\n\ndef test_to_names_X(lang):\n words = [\"red\", \"blue\", \"dog\"]\n embset = lang[words]\n names, X = embset.to_names_X()\n assert names == words\n assert np.array_equal(X, embset.to_X())\n\n\ndef test_from_names_X():\n names = [\"foo\", \"bar\", \"buz\"]\n X = [\n [1.0, 2],\n [3, 4.0],\n [0.5, 0.6],\n ]\n embset = EmbeddingSet.from_names_X(names, X)\n assert \"foo\" in embset\n assert len(embset) == 3\n assert np.array_equal(embset.to_X(), np.array(X))\n\n names = names[:2]\n with pytest.raises(ValueError, match=\"The number of given names\"):\n EmbeddingSet.from_names_X(names, X)\n\n\ndef test_ndim(lang):\n embset = lang[[\"red\", \"blue\", \"dog\"]]\n assert embset.ndim == 2\n\n\ndef test_compare_against(lang):\n embset = lang[[\"red\", \"blue\", \"cat\"]]\n compared = embset.compare_against(lang[\"green\"])\n true_values = np.array(\n [\n embset[\"red\"] > lang[\"green\"],\n embset[\"blue\"] > lang[\"green\"],\n embset[\"cat\"] > lang[\"green\"],\n ]\n )\n assert np.array_equal(compared, true_values)\n\n # Test with custom mapping function\n compared = embset.compare_against(\"cat\", mapping=np.dot)\n true_values = np.array(\n [\n np.dot(embset[\"red\"].vector, lang[\"cat\"].vector),\n np.dot(embset[\"blue\"].vector, lang[\"cat\"].vector),\n np.dot(embset[\"cat\"].vector, lang[\"cat\"].vector),\n ]\n )\n assert np.array_equal(compared, true_values)\n\n # Test with non-existent name or invalid mapping\n with pytest.raises(KeyError):\n embset.compare_against(\"purple\")\n with pytest.raises(ValueError, match=\"Unrecognized mapping value/type.\"):\n embset.compare_against(lang[\"green\"], mapping=\"cosine\")\n\n\ndef test_add_property():\n foo = Embedding(\"foo\", [0.1, 0.3, 0.10])\n bar = Embedding(\"bar\", [0.7, 0.2, 0.11])\n emb = EmbeddingSet(foo, bar)\n emb_with_property = emb.add_property(\"prop_a\", lambda d: \"prop-one\")\n assert all([e.prop_a == \"prop-one\" for e in emb_with_property])\n\n\ndef test_assign():\n foo = Embedding(\"foo\", [0.1, 0.3, 0.10])\n bar = Embedding(\"bar\", [0.7, 0.2, 0.11])\n emb = EmbeddingSet(foo, bar)\n emb_with_property = emb.assign(\n prop_a=lambda d: \"prop-one\", prop_b=lambda d: \"prop-two\"\n )\n assert all([e.prop_a == \"prop-one\" for e in emb_with_property])\n assert all([e.prop_b == \"prop-two\" for e in emb_with_property])\n"
] | [
[
"numpy.dot",
"numpy.array_equal",
"numpy.random.normal",
"numpy.array",
"numpy.isclose"
]
] |
jixiaojie/CarND-Behavioral-Cloning-P3 | [
"b245c73a2c68be2a9fa0a5daefb55eaafb14bd6e"
] | [
"model.py"
] | [
"import csv\n\nimport cv2\nimport numpy as np\nimport sklearn\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom keras.models import Sequential\nfrom keras.layers import *\nfrom keras.initializers import *\n\nfrom keras import backend as K\nK.clear_session()\n\n\nsamples = []\nwith open('./driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n #if steering value large than 0.98 , do not use to train set \n if abs(float(line[3])) <= 0.98:\n line[0] = line[0].replace('\\\\','/')\n line[1] = line[1].replace('\\\\','/')\n line[2] = line[2].replace('\\\\','/')\n samples.append(line)\n \n\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\n\n# Data generator \n# Because the generator is slower than load all data in the momery, so not to use here\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n name = './IMG/'+batch_sample[0].split('/')[-1]\n center_image = cv2.imread(name)\n center_angle = float(batch_sample[3])\n images.append(center_image)\n angles.append(center_angle)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield (X_train, y_train)\n\n\n# Getdata by driving_log.csv\ndef getdata(samples):\n sampleslen = len(samples) * 3\n correction = 0.2\n images = []\n angles = []\n num = 0\n for batch_sample in samples:\n #center\n name = './IMG/'+batch_sample[0].split('/')[-1]\n center_image = cv2.imread(name)\n center_angle = float(batch_sample[3])\n images.append(center_image)\n angles.append(center_angle)\n num += 1\n\n if (num % 1000 == 0):\n print('Loaded ', num , '\t', num * 1.0 / sampleslen)\n\n\n for batch_sample in samples:\n #left\n name = './IMG/'+batch_sample[1].split('/')[-1]\n left_image = cv2.imread(name)\n left_angle = float(batch_sample[3]) + correction\n images.append(left_image)\n angles.append(left_angle)\n num += 1\n\n if (num % 1000 == 0):\n print('Loaded ', num , '\t', num * 1.0 / sampleslen)\n\n\n for batch_sample in samples:\n #right\n name = './IMG/'+batch_sample[2].split('/')[-1]\n right_image = cv2.imread(name)\n right_angle = float(batch_sample[3]) - correction\n images.append(right_image)\n angles.append(right_angle)\n num += 1\n\n if (num % 1000 == 0):\n print('Loaded ', num , '\t', num * 1.0 / sampleslen)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n\n return X_train, y_train\n\n\n\n#Get train and vlidation set\nprint('Loading train data , total ', len(train_samples) * 3)\nX_train, y_train = getdata(train_samples)\nprint()\nprint('Loading vlidation data , total ', len(validation_samples) * 3)\nX_valid, y_valid = getdata(validation_samples)\n\n\n#Trimmed image format\nch, row, col = 3, 160, 320 \n\n\n#Use NVIDIA's model\nmodel = Sequential()\n\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Lambda(lambda x: x/127.5 - 1.,\n input_shape=(row, col, ch),\n output_shape=(row, col, ch)))\n\n#Crop image\nmodel.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(row, col, ch)))\n\n# Add Convolution layers\nmodel.add(Conv2D(24, (5, 5)))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))\nmodel.add(Activation('relu'))\n\nmodel.add(Conv2D(36, (5, 5)))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))\nmodel.add(Activation('relu'))\n\nmodel.add(Conv2D(48, (5, 5)))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))\nmodel.add(Activation('relu'))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))\nmodel.add(Activation('relu'))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))\nmodel.add(Activation('relu'))\n\n# Add flatten layer \nmodel.add(Flatten())\n\n# Add fully connection layer\nmodel.add(Dense(1164, kernel_initializer = TruncatedNormal(mean=0.0, stddev=0.1, seed=None), bias_initializer='zeros'))\nmodel.add(Activation('relu'))\n\nmodel.add(Dense(100))\nmodel.add(Activation('relu'))\n\nmodel.add(Dense(50))\nmodel.add(Activation('relu'))\n\nmodel.add(Dense(10))\nmodel.add(Activation('relu'))\n\n# Output layer\nmodel.add(Dense(1))\n\n# Print model summary\nmodel.summary()\n\n# Compile model\nmodel.compile(loss='mse', optimizer='adam')\n\n# Fit model\nmodel.fit(X_train, y_train, batch_size=64, epochs=2, validation_data=(X_valid, y_valid), shuffle=True) \n\n# Save model\nmodel.save('model.h5')\n\n\n\n\n\n"
] | [
[
"sklearn.utils.shuffle",
"numpy.array",
"sklearn.model_selection.train_test_split"
]
] |
TBC-TJU/brainda | [
"b46a33bcf3cb3625d09571fdeac8c8bc0480db69"
] | [
"demos/FBMsCCA.py"
] | [
"import sys\nimport numpy as np\nfrom brainda.datasets import Wang2016\nfrom brainda.paradigms import SSVEP\nfrom brainda.algorithms.utils.model_selection import (\n set_random_seeds,\n generate_kfold_indices, match_kfold_indices)\nfrom brainda.algorithms.decomposition import FBMsCCA\nfrom brainda.algorithms.decomposition.base import generate_filterbank, generate_cca_references\n\ndef generate_mscca_references(freqs, srate, T,\n phases,\n n_harmonics: int = 1):\n if isinstance(freqs, int) or isinstance(freqs, float):\n freqs = [freqs]\n freqs = np.array(freqs)[:, np.newaxis]\n if phases is None:\n phases = 0\n if isinstance(phases, int) or isinstance(phases, float):\n phases = [phases]\n phases = np.array(phases)[:, np.newaxis]\n t = np.linspace(0, T, int(T*srate))\n\n Yf = []\n for i in range(n_harmonics):\n Yf.append(np.stack([\n np.sin(2*np.pi*(i+1)*freqs*t + (i+1)*np.pi*phases), # different phases pre-defined\n np.cos(2*np.pi*(i+1)*freqs*t + (i+1)*np.pi*phases)], axis=1))\n Yf = np.concatenate(Yf, axis=1)\n return Yf\n\nwp=[(5,90),(14,90),(22,90),(30,90),(38,90)]\nws=[(3,92),(12,92),(20,92),(28,92),(36,92)]\n\nfilterbank = generate_filterbank(wp,ws,srate=250,order=15,rp=0.5)\n\ndataset = Wang2016()\n\nevents = dataset.events.keys()\nfreq_list = [dataset.get_freq(event) for event in events]\nphase_list = [dataset.get_phase(event) for event in events]\n\nYf = generate_mscca_references(freq_list, srate=250, T=0.5,phases=phase_list,n_harmonics = 5)\n\nparadigm = SSVEP(\n channels=['POZ', 'PZ', 'PO3', 'PO5', 'PO4', 'PO6', 'O1', 'OZ', 'O2'],\n intervals=[(0.14, 0.64)],\n srate=250\n)\n\n# add 5-90Hz bandpass filter in raw hook\ndef raw_hook(raw, caches):\n # do something with raw object\n raw.filter(5, 90, l_trans_bandwidth=2,h_trans_bandwidth=5,\n phase='zero-double')\n caches['raw_stage'] = caches.get('raw_stage', -1) + 1\n return raw, caches\n\ndef epochs_hook(epochs, caches):\n # do something with epochs object\n # print(epochs.event_id)\n caches['epoch_stage'] = caches.get('epoch_stage', -1) + 1\n return epochs, caches\n\ndef data_hook(X, y, meta, caches):\n # retrive caches from the last stage\n # print(\"Raw stage:{},Epochs stage:{}\".format(caches['raw_stage'], caches['epoch_stage']))\n # do something with X, y, and meta\n caches['data_stage'] = caches.get('data_stage', -1) + 1\n return X, y, meta, caches\n\nparadigm.register_raw_hook(raw_hook)\nparadigm.register_epochs_hook(epochs_hook)\nparadigm.register_data_hook(data_hook)\n\nX, y, meta = paradigm.get_data(\n dataset,\n subjects=[1],\n return_concat=True,\n n_jobs=None,\n verbose=False)\n\n# 6-fold cross validation\nset_random_seeds(38)\nkfold = 6\nindices = generate_kfold_indices(meta, kfold=kfold)\n\n# classifier\nfilterweights = [(idx_filter+1) ** (-1.25) + 0.25 for idx_filter in range(5)]\nestimator=FBMsCCA(filterbank=filterbank, n_components=1, filterweights=np.array(filterweights), n_jobs=-1)\n\naccs = []\nfor k in range(kfold):\n train_ind, validate_ind, test_ind = match_kfold_indices(k, meta, indices)\n # merge train and validate set\n train_ind = np.concatenate((train_ind, validate_ind))\n p_labels = estimator.fit(X=X[train_ind],y=y[train_ind], Yf=Yf).predict(X[test_ind])\n accs.append(np.mean(p_labels==y[test_ind]))\nprint(np.mean(accs))\n# If everything is fine, you will get the accuracy about 0.929.\n\n"
] | [
[
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.mean",
"numpy.array"
]
] |
yqhu/torchrec | [
"3dc5db08eb8a3962ebc16f844ede628a128456f3"
] | [
"torchrec/sparse/jagged_tensor.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport abc\nfrom typing import Optional, List, Dict, Tuple\n\nimport torch\nimport torch.fx\nfrom torchrec.streamable import Pipelineable\n\ntry:\n torch.ops.load_library(\"//deeplearning/fbgemm/fbgemm_gpu:sparse_ops\")\n torch.ops.load_library(\"//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu\")\nexcept OSError:\n pass\n\n# OSS\ntry:\n import fbgemm_gpu # @manual # noqa\nexcept ImportError:\n pass\n\n\ndef _cumsum(o: List[int]) -> List[int]:\n ret = [0] * (len(o) + 1)\n for i in range(len(o)):\n ret[i + 1] = ret[i] + o[i]\n return ret\n\n\ndef _to_offsets(lengths: torch.Tensor) -> torch.Tensor:\n return torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)\n\n\ndef _to_lengths(offsets: torch.Tensor) -> torch.Tensor:\n return offsets[1:] - offsets[:-1]\n\n\ndef _maybe_compute_lengths(\n lengths: Optional[torch.Tensor], offsets: Optional[torch.Tensor]\n) -> torch.Tensor:\n if lengths is None:\n assert offsets is not None\n lengths = _to_lengths(offsets)\n return lengths\n\n\ndef _maybe_compute_offsets(\n lengths: Optional[torch.Tensor], offsets: Optional[torch.Tensor]\n) -> torch.Tensor:\n if offsets is None:\n assert lengths is not None\n offsets = _to_offsets(lengths)\n return offsets\n\n\ndef _get_weights_or_throw(weights: Optional[torch.Tensor]) -> torch.Tensor:\n assert weights is not None, \"This (Keyed)JaggedTensor doesn't have weights.\"\n return weights\n\n\ndef _assert_offsets_or_lengths_is_provided(\n offsets: Optional[torch.Tensor], lengths: Optional[torch.Tensor]\n) -> None:\n assert offsets is not None or lengths is not None, \"Must provide lengths or offsets\"\n\n\ndef _regroup_keyed_tensors(\n keyed_tensors: List[\"KeyedTensor\"], groups: List[List[str]]\n) -> List[torch.Tensor]:\n # Shortcut for no re-grouping\n if len(keyed_tensors) == len(groups):\n match = True\n for kt, group in zip(keyed_tensors, groups):\n if kt.keys() != group:\n match = False\n break\n if match:\n return [kt.values() for kt in keyed_tensors]\n\n embedding_dicts = [keyed_tensor.to_dict() for keyed_tensor in keyed_tensors]\n lengths = [keyed_tensor.length_per_key() for keyed_tensor in keyed_tensors]\n indices = [keyed_tensor._key_indices() for keyed_tensor in keyed_tensors]\n key_dim = keyed_tensors[0].key_dim()\n\n key_to_idx: dict[str, int] = {}\n for (i, keyed_tensor) in enumerate(keyed_tensors):\n for key in keyed_tensor.keys():\n key_to_idx[key] = i\n\n # Rearrange values based on groups with a single torch.cat operation.\n cat_input: List[torch.Tensor] = []\n for group in groups:\n for name in group:\n cat_input.append(embedding_dicts[key_to_idx[name]][name])\n rearranged_values = torch.cat(cat_input, key_dim)\n\n # Provide views over the rearranged values with a single torch.split operation.\n split_lengths: List[int] = []\n for group in groups:\n group_length = 0\n for name in group:\n group_length += lengths[key_to_idx[name]][indices[key_to_idx[name]][name]]\n split_lengths.append(group_length)\n\n return list(rearranged_values.split(split_lengths, dim=key_dim))\n\n\ntorch.fx.wrap(\"_regroup_keyed_tensors\")\n\n\ndef _values_string(values: torch.Tensor, start: int, end: int) -> str:\n return \"[\" + \", \".join([str(value.item()) for value in values[start:end]]) + \"]\"\n\n\ndef _jagged_values_string(\n values: torch.Tensor,\n offsets: torch.Tensor,\n offset_start: int,\n offset_end: int,\n) -> str:\n return (\n \"[\"\n + \", \".join(\n [\n _values_string(values, offsets[index], offsets[index + 1])\n for index in range(offset_start, offset_end)\n ]\n )\n + \"]\"\n )\n\n\nclass JaggedTensorMeta(abc.ABCMeta, torch.fx.ProxyableClassMeta):\n pass\n\n\nclass JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):\n \"\"\"Represents an (optionally weighted) jagged tensor\n\n A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose\n slices may be of different lengths. See KeyedJaggedTensor for full example.\n\n Implementation is torch.jit.script-able\n \"\"\"\n\n def __init__(\n self,\n values: torch.Tensor,\n weights: Optional[torch.Tensor] = None,\n lengths: Optional[torch.Tensor] = None,\n offsets: Optional[torch.Tensor] = None,\n ) -> None:\n self._values: torch.Tensor = values\n self._weights: Optional[torch.Tensor] = weights\n _assert_offsets_or_lengths_is_provided(offsets, lengths)\n if offsets is not None:\n _assert_tensor_has_no_elements_or_has_integers(offsets, \"offsets\")\n if lengths is not None:\n _assert_tensor_has_no_elements_or_has_integers(lengths, \"lengths\")\n self._lengths: Optional[torch.Tensor] = lengths\n self._offsets: Optional[torch.Tensor] = offsets\n\n @staticmethod\n def empty(is_weighted: bool = False) -> \"JaggedTensor\":\n weights = torch.tensor([]) if is_weighted else None\n return JaggedTensor(\n values=torch.tensor([]),\n offsets=torch.tensor([]),\n lengths=torch.tensor([]),\n weights=weights,\n )\n\n @staticmethod\n def from_dense_lengths(\n values: torch.Tensor,\n lengths: torch.Tensor,\n weights: Optional[torch.Tensor] = None,\n ) -> \"JaggedTensor\":\n \"\"\"\n Constructs `JaggedTensor` from dense values/weights of shape (B, N,).\n\n Note that `lengths` is still of shape (B,).\n \"\"\"\n mask2d = torch.arange(values.size(1), device=values.device).expand(\n values.size(0), -1\n ) < lengths.unsqueeze(-1)\n return JaggedTensor(\n values=values[mask2d],\n weights=weights[mask2d] if weights is not None else None,\n lengths=lengths,\n )\n\n def lengths(self) -> torch.Tensor:\n _lengths = _maybe_compute_lengths(self._lengths, self._offsets)\n self._lengths = _lengths\n return _lengths\n\n def offsets(self) -> torch.Tensor:\n _offsets = _maybe_compute_offsets(self._lengths, self._offsets)\n self._offsets = _offsets\n return _offsets\n\n def values(self) -> torch.Tensor:\n return self._values\n\n def weights(self) -> torch.Tensor:\n return _get_weights_or_throw(self._weights)\n\n def weights_or_none(self) -> Optional[torch.Tensor]:\n return self._weights\n\n def to(self, device: torch.device, non_blocking: bool = False) -> \"JaggedTensor\":\n weights = self._weights\n lengths = self._lengths\n offsets = self._offsets\n return JaggedTensor(\n values=self._values.to(device, non_blocking=non_blocking),\n weights=weights.to(device, non_blocking=non_blocking)\n if weights is not None\n else None,\n lengths=lengths.to(device, non_blocking=non_blocking)\n if lengths is not None\n else None,\n offsets=offsets.to(device, non_blocking=non_blocking)\n if offsets is not None\n else None,\n )\n\n # pyre-ignore [56]\n @torch.jit.unused\n def record_stream(self, stream: torch.cuda.streams.Stream) -> None:\n self._values.record_stream(stream)\n weights = self._weights\n lengths = self._lengths\n offsets = self._offsets\n if weights is not None:\n weights.record_stream(stream)\n if lengths is not None:\n lengths.record_stream(stream)\n if offsets is not None:\n offsets.record_stream(stream)\n\n def __str__(self) -> str:\n offsets = self.offsets()\n\n if self._weights is None:\n return (\n \"JaggedTensor({\\n \"\n + _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)\n + \"\\n})\\n\"\n )\n\n return (\n \"JaggedTensor({\\n\"\n + ' \"values\": '\n + _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)\n + ',\\n \"weights\": '\n + _jagged_values_string(\n _get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1\n )\n + \"\\n})\\n\"\n )\n\n\ndef _assert_tensor_has_no_elements_or_has_integers(\n tensor: torch.Tensor, tensor_name: str\n) -> None:\n assert tensor.numel() == 0 or tensor.dtype in [\n torch.long,\n torch.int,\n torch.short,\n torch.int8,\n torch.uint8,\n ], \"{} must be of integer type, but got {}\".format(tensor_name, tensor.dtype)\n\n\ndef _maybe_compute_index_per_key(\n keys: List[str],\n index_per_key: Optional[Dict[str, int]],\n) -> Dict[str, int]:\n if index_per_key is None:\n index_per_key = {key: i for i, key in enumerate(keys)}\n return index_per_key\n\n\ndef _maybe_compute_stride_kjt(\n keys: List[str],\n stride: Optional[int],\n lengths: Optional[torch.Tensor],\n offsets: Optional[torch.Tensor],\n) -> int:\n if stride is None:\n if len(keys) == 0:\n stride = 0\n elif offsets is not None:\n stride = (offsets.numel() - 1) // len(keys)\n elif lengths is not None:\n stride = lengths.numel() // len(keys)\n else:\n stride = 1\n return stride\n\n\ndef _maybe_compute_length_per_key(\n keys: List[str],\n stride: int,\n length_per_key: Optional[List[int]],\n lengths: Optional[torch.Tensor],\n offsets: Optional[torch.Tensor],\n) -> List[int]:\n if length_per_key is None:\n if len(keys) and offsets is not None:\n _length: List[int] = (\n torch.sum((offsets[1:] - offsets[:-1]).view(-1, stride), dim=1)\n .cpu()\n .tolist()\n )\n elif len(keys) and lengths is not None:\n _length: List[int] = (\n torch.sum(lengths.view(-1, stride), dim=1).cpu().tolist()\n )\n else:\n _length: List[int] = []\n length_per_key = _length\n return length_per_key\n\n\ndef _maybe_compute_offset_per_key(\n keys: List[str],\n stride: int,\n length_per_key: Optional[List[int]],\n offset_per_key: Optional[List[int]],\n lengths: Optional[torch.Tensor],\n offsets: Optional[torch.Tensor],\n) -> Tuple[List[int], List[int]]:\n if length_per_key is None:\n _length_per_key: List[int] = _maybe_compute_length_per_key(\n keys, stride, length_per_key, lengths, offsets\n )\n return _length_per_key, _cumsum(_length_per_key)\n elif offset_per_key is None:\n return length_per_key, _cumsum(length_per_key)\n else:\n return length_per_key, offset_per_key\n\n\ndef _jagged_tensor_string(\n key: str,\n values: torch.Tensor,\n weights: Optional[torch.Tensor],\n offsets: torch.Tensor,\n offset_start: int,\n offset_end: int,\n) -> str:\n if weights is None:\n return '\"{}\": '.format(key) + _jagged_values_string(\n values, offsets, offset_start, offset_end\n )\n\n return (\n '\"{}\"'.format(key)\n + ': {\\n \"values\": '\n + _jagged_values_string(values, offsets, offset_start, offset_end)\n + ',\\n \"weights\": '\n + _jagged_values_string(\n _get_weights_or_throw(weights), offsets, offset_start, offset_end\n )\n + \"\\n }\"\n )\n\n\ndef _maybe_compute_kjt_to_jt_dict(\n stride: int,\n keys: List[str],\n length_per_key: List[int],\n values: torch.Tensor,\n lengths: torch.Tensor,\n offsets: torch.Tensor,\n weights: Optional[torch.Tensor],\n jt_dict: Optional[Dict[str, JaggedTensor]],\n) -> Dict[str, JaggedTensor]:\n if jt_dict is None:\n _jt_dict: Dict[str, JaggedTensor] = {}\n values_list = torch.split(values, length_per_key)\n lengths_tuple = torch.unbind(lengths.view(-1, stride), dim=0)\n if weights is not None:\n weights_list = torch.split(weights, length_per_key)\n for idx, key in enumerate(keys):\n length = lengths_tuple[idx]\n offset = _to_offsets(length)\n _jt_dict[key] = JaggedTensor(\n lengths=length,\n offsets=offset,\n values=values_list[idx],\n weights=weights_list[idx],\n )\n else:\n for idx, key in enumerate(keys):\n length = lengths_tuple[idx]\n offset = _to_offsets(length)\n _jt_dict[key] = JaggedTensor(\n lengths=length,\n offsets=offset,\n values=values_list[idx],\n )\n jt_dict = _jt_dict\n return jt_dict\n\n\ndef _merge_weights_or_none(\n a_weights: Optional[torch.Tensor],\n b_weights: Optional[torch.Tensor],\n) -> Optional[torch.Tensor]:\n assert not (\n (a_weights is None) ^ (b_weights is None)\n ), \"Can only merge weighted or unweighted KJTs.\"\n if a_weights is None:\n return None\n # pyre-ignore[6]\n return torch.cat([a_weights, b_weights], dim=0)\n\n\ntorch.fx.wrap(\"_merge_weights_or_none\")\n\n\nclass KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):\n \"\"\"Represents an (optionally weighted) keyed jagged tensor.\n\n A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose\n slices may be of different lengths. Keyed on first dimension and jagged on the last\n dimension.\n\n For example:\n 0 1 2 <-- dim_1\n \"Feature0\" [V0,V1] None [V2]\n \"Feature1\" [V3] [V4] [V5,V6,V7]\n ^\n dim_0\n\n dim_0: keyed dimension (ie. `Feature0`, `Feature1`)\n dim_1: optional second dimension (ie. batch size)\n dim_2: The jagged dimension which has slice lengths between 0-3 in the above example\n\n We represent this data with following inputs:\n\n values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7], V == any tensor datatype\n weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7], W == any tensor datatype\n lengths: torch.Tensor = [2, 0, 1, 1, 1, 3], representing the jagged slice\n offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8], offsets from 0 for each jagged slice\n keys: List[int] = [\"Feature0\", \"Feature1\"], which corresponds to each value of dim_0\n index_per_key: Dict[str, int] = {\"Feature0\": 0, \"Feature1\": 1}, index for each key\n offset_per_key: List[int] = [0, 3, 8], start offset for each key and final offset\n\n\n Implementation is torch.jit.script-able\n \"\"\"\n\n def __init__(\n self,\n keys: List[str],\n values: torch.Tensor,\n weights: Optional[torch.Tensor] = None,\n lengths: Optional[torch.Tensor] = None,\n offsets: Optional[torch.Tensor] = None,\n stride: Optional[int] = None,\n # Below exposed to ensure torch.script-able\n length_per_key: Optional[List[int]] = None,\n offset_per_key: Optional[List[int]] = None,\n index_per_key: Optional[Dict[str, int]] = None,\n jt_dict: Optional[Dict[str, JaggedTensor]] = None,\n ) -> None:\n self._keys: List[str] = keys\n self._values: torch.Tensor = values\n self._weights: Optional[torch.Tensor] = weights\n if offsets is not None:\n _assert_tensor_has_no_elements_or_has_integers(offsets, \"offsets\")\n if lengths is not None:\n _assert_tensor_has_no_elements_or_has_integers(lengths, \"lengths\")\n self._lengths: Optional[torch.Tensor] = lengths\n self._offsets: Optional[torch.Tensor] = offsets\n stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)\n self._stride: int = stride\n\n # lazy fields\n self._length_per_key: Optional[List[int]] = length_per_key\n self._offset_per_key: Optional[List[int]] = offset_per_key\n self._index_per_key: Optional[Dict[str, int]] = index_per_key\n self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict\n\n @staticmethod\n def from_offsets_sync(\n keys: List[str],\n values: torch.Tensor,\n offsets: torch.Tensor,\n weights: Optional[torch.Tensor] = None,\n stride: Optional[int] = None,\n ) -> \"KeyedJaggedTensor\":\n kjt = KeyedJaggedTensor(\n keys=keys,\n values=values,\n weights=weights,\n offsets=offsets,\n stride=stride,\n )\n return kjt.sync()\n\n @staticmethod\n def from_lengths_sync(\n keys: List[str],\n values: torch.Tensor,\n lengths: torch.Tensor,\n weights: Optional[torch.Tensor] = None,\n stride: Optional[int] = None,\n ) -> \"KeyedJaggedTensor\":\n kjt = KeyedJaggedTensor(\n keys=keys,\n values=values,\n weights=weights,\n lengths=lengths,\n stride=stride,\n )\n return kjt.sync()\n\n @staticmethod\n def concat(\n a: \"KeyedJaggedTensor\",\n b: \"KeyedJaggedTensor\",\n ) -> \"KeyedJaggedTensor\":\n if a.stride() != b.stride():\n raise ValueError(\n f\"Can only merge KJTs of the same stride ({a.stride()}, {b.stride()}).\"\n )\n length_per_key = (\n a._length_per_key + b._length_per_key\n if a._length_per_key is not None and b._length_per_key is not None\n else None\n )\n\n return KeyedJaggedTensor(\n keys=a.keys() + b.keys(),\n values=torch.cat([a.values(), b.values()], dim=0),\n weights=_merge_weights_or_none(a.weights_or_none(), b.weights_or_none()),\n lengths=torch.cat([a.lengths(), b.lengths()], dim=0),\n stride=a.stride(),\n length_per_key=length_per_key,\n )\n\n @staticmethod\n def empty(\n is_weighted: bool = False, device: Optional[torch.device] = None\n ) -> \"KeyedJaggedTensor\":\n weights = None\n if is_weighted is True:\n weights = torch.tensor([], device=device) if device else torch.tensor([])\n\n return KeyedJaggedTensor(\n keys=[],\n values=torch.tensor([], device=device) if device else torch.tensor([]),\n weights=weights,\n stride=0,\n )\n\n @staticmethod\n def empty_like(kjt: \"KeyedJaggedTensor\") -> \"KeyedJaggedTensor\":\n return KeyedJaggedTensor(\n keys=[],\n values=torch.tensor([], device=kjt.device(), dtype=kjt.values().dtype),\n weights=None\n if kjt.weights_or_none() is None\n else torch.tensor([], device=kjt.device(), dtype=kjt.weights().dtype),\n lengths=None,\n offsets=None,\n stride=kjt.stride(),\n )\n\n def sync(self) -> \"KeyedJaggedTensor\":\n self.length_per_key()\n self.offset_per_key()\n return self\n\n def device(self) -> torch.device:\n return self._values.device\n\n def lengths(self) -> torch.Tensor:\n _lengths = _maybe_compute_lengths(self._lengths, self._offsets)\n self._lengths = _lengths\n return _lengths\n\n def offsets(self) -> torch.Tensor:\n _offsets = _maybe_compute_offsets(self._lengths, self._offsets)\n self._offsets = _offsets\n return _offsets\n\n def keys(self) -> List[str]:\n return self._keys\n\n def values(self) -> torch.Tensor:\n return self._values\n\n def weights(self) -> torch.Tensor:\n return _get_weights_or_throw(self._weights)\n\n def weights_or_none(self) -> Optional[torch.Tensor]:\n return self._weights\n\n def stride(self) -> int:\n return self._stride\n\n def _key_indices(self) -> Dict[str, int]:\n _index_per_key: Dict[str, int] = _maybe_compute_index_per_key(\n self._keys,\n self._index_per_key,\n )\n self._index_per_key = _index_per_key\n return _index_per_key\n\n def length_per_key(self) -> List[int]:\n _length_per_key = _maybe_compute_length_per_key(\n self._keys,\n self.stride(),\n self._length_per_key,\n self._lengths,\n self._offsets,\n )\n self._length_per_key = _length_per_key\n return _length_per_key\n\n def offset_per_key(self) -> List[int]:\n _length_per_key, _offset_per_key = _maybe_compute_offset_per_key(\n self._keys,\n self.stride(),\n self._length_per_key,\n self._offset_per_key,\n self._lengths,\n self._offsets,\n )\n self._length_per_key = _length_per_key\n self._offset_per_key = _offset_per_key\n return _offset_per_key\n\n def split(self, segments: List[int]) -> List[\"KeyedJaggedTensor\"]:\n split_list: List[KeyedJaggedTensor] = []\n start = 0\n start_offset = 0\n _length_per_key = self.length_per_key()\n _offset_per_key = self.offset_per_key()\n for segment in segments:\n end = start + segment\n end_offset = _offset_per_key[end]\n keys: List[str] = self._keys[start:end]\n if segment == len(self._keys):\n # no torch slicing required\n split_list.append(\n KeyedJaggedTensor(\n keys=self._keys,\n values=self._values,\n weights=self.weights_or_none(),\n lengths=self._lengths,\n offsets=self._offsets,\n stride=self._stride,\n length_per_key=self._length_per_key,\n offset_per_key=self._offset_per_key,\n index_per_key=self._index_per_key,\n jt_dict=self._jt_dict,\n )\n )\n elif segment == 0:\n split_list.append(\n KeyedJaggedTensor(\n keys=keys,\n values=torch.tensor(\n [], device=self.device(), dtype=self._values.dtype\n ),\n weights=None\n if self.weights_or_none() is None\n else torch.tensor(\n [],\n device=self.device(),\n dtype=self.weights().dtype,\n ),\n lengths=torch.tensor([], device=self.device(), dtype=torch.int),\n offsets=torch.tensor([], device=self.device(), dtype=torch.int),\n stride=self._stride,\n length_per_key=None,\n offset_per_key=None,\n index_per_key=None,\n jt_dict=None,\n )\n )\n else:\n split_length_per_key = _length_per_key[start:end]\n split_list.append(\n KeyedJaggedTensor(\n keys=keys,\n values=self._values[start_offset:end_offset],\n weights=None\n if self.weights_or_none() is None\n else self.weights()[start_offset:end_offset],\n lengths=self.lengths()[\n start * self._stride : end * self._stride\n ],\n offsets=None,\n stride=self._stride,\n length_per_key=split_length_per_key,\n offset_per_key=None,\n index_per_key=None,\n jt_dict=None,\n )\n )\n start = end\n start_offset = end_offset\n return split_list\n\n def permute(\n self, indices: List[int], indices_tensor: Optional[torch.Tensor] = None\n ) -> \"KeyedJaggedTensor\":\n\n if indices_tensor is None:\n indices_tensor = torch.tensor(\n indices, dtype=torch.int, device=self.device()\n )\n\n length_per_key = self.length_per_key()\n permuted_keys: List[str] = []\n permuted_length_per_key: List[int] = []\n permuted_lengths_sum = 0\n seen: Dict[str, int] = {}\n for index in indices:\n key = self._keys[index]\n count = seen.get(key, 0)\n permuted_keys.append(key)\n permuted_lengths_sum += length_per_key[index]\n permuted_length_per_key.append(length_per_key[index])\n seen[key] = count + 1\n\n (\n permuted_lengths,\n permuted_values,\n permuted_weights,\n ) = torch.ops.fbgemm.permute_sparse_data(\n indices_tensor,\n self.lengths().view(len(self._keys), -1),\n self.values(),\n self.weights_or_none(),\n permuted_lengths_sum,\n )\n\n kjt = KeyedJaggedTensor(\n keys=permuted_keys,\n values=permuted_values,\n weights=permuted_weights,\n lengths=permuted_lengths.view(-1),\n offsets=None,\n stride=self._stride,\n length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,\n offset_per_key=None,\n index_per_key=None,\n jt_dict=None,\n )\n return kjt\n\n def __getitem__(self, key: str) -> JaggedTensor:\n offset_per_key = self.offset_per_key()\n index = self._key_indices()[key]\n start_offset = offset_per_key[index]\n end_offset = offset_per_key[index + 1]\n return JaggedTensor(\n values=self._values[start_offset:end_offset],\n weights=None\n if self.weights_or_none() is None\n else self.weights()[start_offset:end_offset],\n lengths=self.lengths()[index * self._stride : (index + 1) * self._stride],\n offsets=None,\n )\n\n def to_dict(self) -> Dict[str, JaggedTensor]:\n _jt_dict = _maybe_compute_kjt_to_jt_dict(\n self.stride(),\n self.keys(),\n self.length_per_key(),\n self.values(),\n self.lengths(),\n self.offsets(),\n self.weights_or_none(),\n self._jt_dict,\n )\n self._jt_dict = _jt_dict\n return _jt_dict\n\n # pyre-ignore [56]\n @torch.jit.unused\n def record_stream(self, stream: torch.cuda.streams.Stream) -> None:\n self._values.record_stream(stream)\n weights = self._weights\n lengths = self._lengths\n offsets = self._offsets\n if weights is not None:\n weights.record_stream(stream)\n if lengths is not None:\n lengths.record_stream(stream)\n if offsets is not None:\n offsets.record_stream(stream)\n\n def to(\n self, device: torch.device, non_blocking: bool = False\n ) -> \"KeyedJaggedTensor\":\n weights = self._weights\n lengths = self._lengths\n offsets = self._offsets\n length_per_key = self._length_per_key\n offset_per_key = self._offset_per_key\n index_per_key = self._index_per_key\n jt_dict = self._jt_dict\n\n return KeyedJaggedTensor(\n keys=self._keys,\n values=self._values.to(device, non_blocking=non_blocking),\n weights=weights.to(device, non_blocking=non_blocking)\n if weights is not None\n else None,\n lengths=lengths.to(device, non_blocking=non_blocking)\n if lengths is not None\n else None,\n offsets=offsets.to(device, non_blocking=non_blocking)\n if offsets is not None\n else None,\n stride=self._stride,\n length_per_key=length_per_key,\n offset_per_key=offset_per_key,\n index_per_key=index_per_key,\n jt_dict=jt_dict,\n )\n\n def __str__(self) -> str:\n if self._offsets is None and self._lengths is None:\n return \"KeyedJaggedTensor()\\n\"\n offsets = self.offsets()\n\n step = (len(offsets) - 1) // len(self._keys)\n return (\n \"KeyedJaggedTensor({\\n\"\n + \",\\n\".join(\n [\n \" \"\n + _jagged_tensor_string(\n self._keys[index],\n self._values,\n self._weights,\n offsets,\n index * step,\n (index + 1) * step,\n )\n for index in range(len(self._keys))\n ]\n )\n + \"\\n})\\n\"\n )\n\n def pin_memory(self) -> \"KeyedJaggedTensor\":\n weights = self._weights\n lengths = self._lengths\n offsets = self._offsets\n\n return KeyedJaggedTensor(\n keys=self._keys,\n values=self._values.pin_memory(),\n weights=weights.pin_memory() if weights is not None else None,\n lengths=lengths.pin_memory() if lengths is not None else None,\n offsets=offsets.pin_memory() if offsets is not None else None,\n stride=self._stride,\n length_per_key=self._length_per_key,\n offset_per_key=self._offset_per_key,\n index_per_key=self._index_per_key,\n jt_dict=None,\n )\n\n\ndef _maybe_compute_offset_per_key_kt(\n length_per_key: List[int],\n offset_per_key: Optional[List[int]],\n) -> List[int]:\n if offset_per_key is None:\n offset_per_key = _cumsum(length_per_key)\n return offset_per_key\n\n\ndef _keyed_values_string(values: torch.Tensor) -> str:\n return (\n \"[\"\n + \", \".join([_values_string(value, 0, len(value)) for value in values])\n + \"]\"\n )\n\n\nclass KeyedTensor(Pipelineable, metaclass=JaggedTensorMeta):\n \"\"\"\n KeyedTensor holds a concatenated list of dense tensors\n each of which can be accessed by a key.\n Keyed dimension can be variable length (length_per_key).\n Common use cases uses include storage of pooled embeddings of different dimensions.\n\n Constructor Args:\n keys (List[str]): list of keys\n length_per_key (List[int]): length of each key along key dimension\n values (torch.Tensor): dense tensor, concatenated typically along key dimension\n key_dim (int): key dimension, zero indexed - defaults to 1 (typically B is 0-dimension)\n\n Implementation is torch.jit.script-able\n\n\n Example:\n kt is KeyedTensor holding\n\n 0 1 2\n \"Embedding A\" [1,1] [1,1] [1,1]\n \"Embedding B\" [2,1,2] [2,1,2] [2,1,2]\n \"Embedding C\" [3,1,2,3] [3,1,2,3] [3,1,2,3]\n >>> tensor_list = [\n torch.tensor([[1,1]] * 3),\n torch.tensor([[2,1,2]] * 3),\n torch.tensor([[3,1,2,3]] * 3),\n ]\n >>> keys = [\"Embedding A\", \"Embedding B\", \"Embedding C\"]\n >>> kt = KeyedTensor.from_tensor_list(keys, tensor_list)\n >>> kt.values()\n tensor([[1, 1, 2, 1, 2, 3, 1, 2, 3],\n [1, 1, 2, 1, 2, 3, 1, 2, 3],\n [1, 1, 2, 1, 2, 3, 1, 2, 3]])\n >>> kt[\"Embedding B\"]\n tensor([[2, 1, 2],\n [2, 1, 2],\n [2, 1, 2]])\n \"\"\"\n\n def __init__(\n self,\n keys: List[str],\n length_per_key: List[int],\n values: torch.Tensor,\n key_dim: int = 1,\n # Below exposed to ensure torch.script-able\n offset_per_key: Optional[List[int]] = None,\n index_per_key: Optional[Dict[str, int]] = None,\n ) -> None:\n self._keys = keys\n self._length_per_key = length_per_key\n self._values = values\n self._key_dim = key_dim\n\n self._offset_per_key: Optional[List[int]] = offset_per_key\n self._index_per_key: Optional[Dict[str, int]] = index_per_key\n\n @staticmethod\n def from_tensor_list(\n keys: List[str], tensors: List[torch.Tensor], key_dim: int = 1, cat_dim: int = 1\n ) -> \"KeyedTensor\":\n length_per_key = [tensor.shape[key_dim] for tensor in tensors]\n return KeyedTensor(\n keys=keys,\n length_per_key=length_per_key,\n values=torch.cat(tensors, dim=cat_dim),\n key_dim=key_dim,\n )\n\n def keys(self) -> List[str]:\n return self._keys\n\n def values(self) -> torch.Tensor:\n return self._values\n\n def key_dim(self) -> int:\n return self._key_dim\n\n def offset_per_key(self) -> List[int]:\n _offset_per_key = _maybe_compute_offset_per_key_kt(\n self._length_per_key,\n self._offset_per_key,\n )\n self._offset_per_key = _offset_per_key\n return _offset_per_key\n\n def length_per_key(self) -> List[int]:\n return self._length_per_key\n\n def _key_indices(self) -> Dict[str, int]:\n _index_per_key = _maybe_compute_index_per_key(\n self._keys,\n self._index_per_key,\n )\n self._index_per_key = _index_per_key\n return _index_per_key\n\n def __getitem__(self, key: str) -> torch.Tensor:\n index = self._key_indices()[key]\n start = self.offset_per_key()[index]\n length = self._length_per_key[index]\n # pyre-ignore [16]: Undefined attribute `torch.Tensor` has no attribute `narrow`\n return self._values.narrow(dim=self._key_dim, start=start, length=length)\n\n def to_dict(self) -> Dict[str, torch.Tensor]:\n indices = self._key_indices()\n lengths = self._length_per_key\n split_values = self._values.split(lengths, dim=self._key_dim)\n return {key: split_values[index] for (key, index) in indices.items()}\n\n @staticmethod\n def regroup(\n keyed_tensors: List[\"KeyedTensor\"], groups: List[List[str]]\n ) -> List[torch.Tensor]:\n return _regroup_keyed_tensors(keyed_tensors, groups)\n\n # pyre-ignore [56]\n @torch.jit.unused\n def record_stream(self, stream: torch.cuda.streams.Stream) -> None:\n self._values.record_stream(stream)\n\n def to(self, device: torch.device, non_blocking: bool = False) -> \"KeyedTensor\":\n return KeyedTensor(\n keys=self._keys,\n length_per_key=self._length_per_key,\n values=self._values.to(device, non_blocking=non_blocking),\n key_dim=self._key_dim,\n offset_per_key=self._offset_per_key,\n index_per_key=self._index_per_key,\n )\n\n def __str__(self) -> str:\n if len(self._keys) == 0:\n return \"KeyedTensor()\\n\"\n\n return (\n \"KeyedTensor({\\n\"\n + \",\\n\".join(\n [\n ' \"{}\": '.format(key) + _keyed_values_string(self[key])\n for key in self._keys\n ]\n )\n + \"\\n})\\n\"\n )\n"
] | [
[
"torch.ops.load_library",
"torch.cat",
"torch.fx.wrap",
"torch.tensor",
"torch.split",
"torch.ops.fbgemm.asynchronous_complete_cumsum"
]
] |
mattboggess/eda-tools | [
"8bfc54a9072a4d789ee36d73f23e04adb2c3080e"
] | [
"examples/plot_univariate_datetime_summary.py"
] | [
"\"\"\"\nUnivariate Datetime Summary\n=======\n\nExample of univariate eda summary for a datetime variable. Here we look at posting times for TidyTuesday tweets.\n\nThe datetime summary computes the following:\n\n- A time seriesplot aggregated according to the `ts_freq` parameter\n- A boxplot and histogram of the time differences between successive observations. `delta_units` controls the units of this.\n- Barplots showing counts by day of week, month, hour of day, day of month\n- A table with summary statistics for the time differences and time series itself\n\"\"\"\nimport warnings\n\nimport pandas as pd\n\nimport intedact\n\nwarnings.filterwarnings(\"ignore\")\n\ndata = pd.read_csv(\n \"https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/tidytuesday_tweets/data.csv\"\n)\ndata[\"created_at\"] = pd.to_datetime(data.created_at)\ntable, fig = intedact.datetime_univariate_summary(data, \"created_at\", fontsize=10)\nfig.show()\ntable\n\n# %%\n# By default, the summary tries to infer reasonable units for the time differences and time series. We can change\n# these by using time unit strings for the `ts_freq` and `delta_units` parameters.\n#\n\ntable, fig = intedact.datetime_univariate_summary(\n data, \"created_at\", ts_freq=\"1 day\", delta_units=\"1 minute\", fontsize=10\n)\nfig.show()\ntable\n\n# %%\n# Example of changing plot type, removing trend line, and removing outliers.\n#\ntable, fig = intedact.datetime_univariate_summary(\n data,\n \"created_at\",\n ts_type=\"point\",\n trend_line=None,\n upper_quantile=0.99,\n fontsize=10,\n)\nfig.show()\ntable\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime"
]
] |
egonw/bioregistry | [
"5070e0310a4e9f695d9089b302e0a2421c155d02"
] | [
"src/bioregistry/curation/make_description_curation_sheet.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"Make a curation sheet for the bioregistry.\"\"\"\n\nimport pandas as pd\n\nimport bioregistry\nfrom bioregistry.constants import BIOREGISTRY_MODULE\n\n\ndef descriptions():\n \"\"\"Make a curation sheet for descriptions.\"\"\"\n columns = [\n \"prefix\",\n \"name\",\n \"homepage\",\n \"deprecated\",\n \"description\",\n ]\n path = BIOREGISTRY_MODULE.join(\"curation\", name=\"descriptions.tsv\")\n rows = []\n for prefix in bioregistry.read_registry():\n if bioregistry.get_description(prefix):\n continue\n homepage = bioregistry.get_homepage(prefix)\n if homepage is None:\n continue\n deprecated = bioregistry.is_deprecated(prefix)\n rows.append(\n (\n prefix,\n bioregistry.get_name(prefix),\n homepage,\n \"x\" if deprecated else \"\",\n \"\",\n )\n )\n df = pd.DataFrame(rows, columns=columns)\n df.to_csv(path, sep=\"\\t\")\n\n\ndef examples():\n \"\"\"Make a curation sheet for examples.\"\"\"\n columns = [\n \"prefix\",\n \"name\",\n \"homepage\",\n \"deprecated\",\n \"example\",\n ]\n rows = []\n for prefix in bioregistry.read_registry():\n if bioregistry.get_example(prefix):\n continue\n homepage = bioregistry.get_homepage(prefix)\n if homepage is None:\n continue\n deprecated = bioregistry.is_deprecated(prefix)\n rows.append(\n (\n prefix,\n bioregistry.get_name(prefix),\n homepage,\n \"x\" if deprecated else \"\",\n \"\",\n )\n )\n df = pd.DataFrame(rows, columns=columns)\n path = BIOREGISTRY_MODULE.join(\"curation\", name=\"examples.tsv\")\n df.to_csv(path, sep=\"\\t\")\n\n\ndef homepages():\n \"\"\"Make a curation sheet for homepages.\"\"\"\n columns = [\n \"prefix\",\n \"name\",\n \"deprecated\",\n \"homepage\",\n ]\n path = BIOREGISTRY_MODULE.join(\"curation\", name=\"homepages.tsv\")\n rows = []\n for prefix in bioregistry.read_registry():\n homepage = bioregistry.get_homepage(prefix)\n if homepage is not None:\n continue\n deprecated = bioregistry.is_deprecated(prefix)\n rows.append(\n (\n prefix,\n bioregistry.get_name(prefix),\n \"x\" if deprecated else \"\",\n homepage,\n )\n )\n df = pd.DataFrame(rows, columns=columns)\n df.to_csv(path, sep=\"\\t\")\n\n\nif __name__ == \"__main__\":\n descriptions()\n examples()\n homepages()\n"
] | [
[
"pandas.DataFrame"
]
] |
sudharsana-kjl/esmlab | [
"17d9788eb7b8c6a0f7cfd4e547b4057b3d25f3c9"
] | [
"esmlab/statistics.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import absolute_import, division, print_function\n\nfrom warnings import warn\n\nimport numpy as np\nimport xarray as xr\n\nfrom .utils.common import esmlab_xr_set_options\nfrom .utils.variables import (\n get_original_attrs,\n get_static_variables,\n get_variables,\n save_metadata,\n set_metadata,\n set_static_variables,\n update_attrs,\n)\n\n\ndef _apply_nan_mask(weights, x, y=None):\n # If y is specified, make sure x and y have same shape\n if y is not None and isinstance(y, xr.DataArray):\n assert x.shape == y.shape\n valid = x.notnull() & y.notnull()\n else:\n valid = x.notnull()\n\n # Apply nan mask\n return weights.where(valid)\n\n\ndef _get_weights_and_dims(x, y=None, weights=None, dim=None, apply_nan_mask=True):\n \"\"\" Get weights and dimensions \"\"\"\n\n if dim and isinstance(dim, str):\n dims = [dim]\n\n elif isinstance(dim, list):\n dims = dim\n\n else:\n dims = [k for k in x.dims]\n\n op_over_dims = [k for k in dims if k in x.dims]\n if not op_over_dims:\n raise ValueError('Unexpected dimensions for variable {0}'.format(x.name))\n\n dims_shape = tuple(l for i, l in enumerate(x.shape) if x.dims[i] in op_over_dims)\n if weights is None:\n weights = xr.DataArray(np.ones(dims_shape), dims=op_over_dims)\n weights = _apply_nan_mask(weights, x, y)\n\n else:\n assert weights.shape == dims_shape\n if apply_nan_mask:\n weights = _apply_nan_mask(weights, x, y)\n\n # Make sure weights add up to 1.0\n rtol = 1e-6 if weights.dtype == np.float32 else 1e-7\n np.testing.assert_allclose(\n (weights / weights.sum(op_over_dims)).sum(op_over_dims), 1.0, rtol=rtol\n )\n return weights, op_over_dims\n\n\n@esmlab_xr_set_options(arithmetic_join='exact')\ndef weighted_sum(x, weights=None, dim=None, apply_nan_mask=True):\n \"\"\"Reduce `xarray.DataArray` by applying `weighted sum` along some dimension(s).\n\n Parameters\n ----------\n\n x : `xarray.DataArray`\n xarray object for which to compute `weighted sum`.\n\n weights : array_like, optional\n weights to use. By default, weights=`None`\n\n dim : str or sequence of str, optional\n Dimension(s) over which to apply mean. By default `weighted sum`\n is applied over all dimensions.\n\n apply_nan_mask : bool, default: True\n\n Returns\n -------\n\n Weighted_sum : `xarray.DataArray`\n New DataArray object with `weighted sum` applied to its data\n and the indicated dimension(s) removed. If `weights` is None,\n returns regular sum using equal weights for all data points.\n \"\"\"\n if weights is None:\n warn('Computing sum using equal weights for all data points')\n\n weights, op_over_dims = _get_weights_and_dims(\n x, weights=weights, dim=dim, apply_nan_mask=apply_nan_mask\n )\n x_w_sum = (x * weights).sum(op_over_dims)\n\n original_attrs, original_encoding = get_original_attrs(x)\n return update_attrs(x_w_sum, original_attrs, original_encoding)\n\n\n@esmlab_xr_set_options(arithmetic_join='exact')\ndef weighted_mean(x, weights=None, dim=None, apply_nan_mask=True):\n \"\"\"Reduce `xarray.DataArray` by applying weighted mean along some dimension(s).\n\n Parameters\n ----------\n\n x : `xarray.DataArray`\n xarray object for which to compute `weighted mean`.\n\n weights : array_like, optional\n weights to use. By default, weights=`None`\n\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `weighted mean`. By default weighted mean\n is applied over all dimensions.\n\n apply_nan_mask : bool, default: True\n\n\n Returns\n -------\n\n weighted_mean : `xarray.DataArray`\n New DataArray object with ` weighted mean` applied to its data\n and the indicated dimension(s) removed. If `weights` is None,\n returns regular mean using equal weights for all data points.\n \"\"\"\n if weights is None:\n warn('Computing mean using equal weights for all data points')\n\n weights, op_over_dims = _get_weights_and_dims(\n x, weights=weights, dim=dim, apply_nan_mask=apply_nan_mask\n )\n\n x_w_mean = (x * weights).sum(op_over_dims) / weights.sum(op_over_dims)\n original_attrs, original_encoding = get_original_attrs(x)\n return update_attrs(x_w_mean, original_attrs, original_encoding)\n\n\n@esmlab_xr_set_options(arithmetic_join='exact')\ndef weighted_std(x, weights=None, dim=None, ddof=0, apply_nan_mask=True):\n \"\"\"Reduce `xarray.DataArray` by applying `weighted std` along some dimension(s).\n\n Parameters\n ----------\n\n x : `xarray.DataArray`\n xarray object for which to compute `weighted std`.\n\n weights : array_like, optional\n weights to use. By default, weights=`None`\n\n dim : str or sequence of str, optional\n Dimension(s) over which to apply mean. By default `weighted std`\n is applied over all dimensions.\n\n\n ddof : int, optional\n Means Delta Degrees of Freedom. By default ddof is zero.\n\n apply_nan_mask : bool, default: True\n\n Returns\n -------\n\n weighted_standard_deviation : `xarray.DataArray`\n New DataArray object with `weighted std` applied to its data\n and the indicated dimension(s) removed. If `weights` is None,\n returns regular standard deviation using equal weights for all data points.\n \"\"\"\n if weights is None:\n warn('Computing standard deviation using equal weights for all data points')\n\n weights, op_over_dims = _get_weights_and_dims(\n x, weights=weights, dim=dim, apply_nan_mask=apply_nan_mask\n )\n\n # If the mask is applied in previous operation,\n # disable it for subseqent operations\n if apply_nan_mask:\n apply_nan_mask_flag = False\n else:\n apply_nan_mask_flag = True\n\n x_w_mean = weighted_mean(\n x, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag\n )\n\n x_w_std = np.sqrt(\n (weights * (x - x_w_mean) ** 2).sum(op_over_dims) / (weights.sum(op_over_dims) - ddof)\n )\n original_attrs, original_encoding = get_original_attrs(x)\n\n return update_attrs(x_w_std, original_attrs, original_encoding)\n\n\n@esmlab_xr_set_options(arithmetic_join='exact')\ndef weighted_rmsd(x, y, weights=None, dim=None, apply_nan_mask=True):\n \"\"\" Compute weighted root-mean-square-deviation between two `xarray.DataArray` objects.\n\n Parameters\n ----------\n\n x, y : `xarray.DataArray` objects\n xarray objects for which to compute `weighted_rmsd`.\n\n weights : array_like, optional\n weights to use. By default, weights=`None`\n\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `weighted rmsd` By default weighted rmsd\n is applied over all dimensions.\n\n apply_nan_mask : bool, default: True\n\n Returns\n -------\n\n weighted_root_mean_square deviation : float\n If `weights` is None, returns root mean square deviation using equal weights for all data points.\n\n \"\"\"\n\n if weights is None:\n warn('Computing root-mean-square-deviation using equal weights for all data points')\n\n weights, op_over_dims = _get_weights_and_dims(\n x, weights=weights, dim=dim, apply_nan_mask=apply_nan_mask\n )\n\n # If the mask is applied in previous operation,\n # disable it for subseqent operations to speed up computation\n if apply_nan_mask:\n apply_nan_mask_flag = False\n else:\n apply_nan_mask_flag = True\n\n dev = (x - y) ** 2\n dev_mean = weighted_mean(\n dev, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag\n )\n return np.sqrt(dev_mean)\n\n\n@esmlab_xr_set_options(arithmetic_join='exact')\ndef weighted_cov(x, y, weights=None, dim=None, apply_nan_mask=True):\n \"\"\" Compute weighted covariance between two `xarray.DataArray` objects.\n\n Parameters\n ----------\n\n x, y : `xarray.DataArray` objects\n xarray objects for which to compute `weighted covariance`.\n\n weights : array_like, optional\n weights to use. By default, weights=`None`\n\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `weighted covariance`\n By default weighted covariance is applied over all dimensions.\n\n apply_nan_mask : bool, default: True\n\n Returns\n -------\n\n weighted_covariance : float\n If `weights` is None, returns covariance using equal weights for all data points.\n\n\n \"\"\"\n if weights is None:\n warn('Computing weighted covariance using equal weights for all data points')\n\n weights, op_over_dims = _get_weights_and_dims(\n x, weights=weights, dim=dim, apply_nan_mask=apply_nan_mask\n )\n\n # If the mask is applied in previous operation,\n # disable it for subseqent operations to speed up computation\n if apply_nan_mask:\n apply_nan_mask_flag = False\n else:\n apply_nan_mask_flag = True\n\n mean_x = weighted_mean(x, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag)\n mean_y = weighted_mean(y, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag)\n\n dev_x = x - mean_x\n dev_y = y - mean_y\n dev_xy = dev_x * dev_y\n cov_xy = weighted_mean(\n dev_xy, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag\n )\n return cov_xy\n\n\n@esmlab_xr_set_options(arithmetic_join='exact')\ndef weighted_corr(x, y, weights=None, dim=None, apply_nan_mask=True):\n \"\"\" Compute weighted correlation between two `xarray.DataArray` objects.\n\n Parameters\n ----------\n\n x, y : `xarray.DataArray` objects\n xarray objects for which to compute `weighted correlation`.\n\n weights : array_like, optional\n weights to use. By default, weights=`None`\n\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `weighted correlation`\n By default weighted correlation is applied over all dimensions.\n\n apply_nan_mask : bool, default: True\n\n Returns\n -------\n\n weighted_correlation : float\n If `weights` is None, returns correlation using equal weights for all data points.\n\n\n \"\"\"\n if weights is None:\n warn('Computing weighted correlation using equal weights for all data points')\n\n weights, op_over_dims = _get_weights_and_dims(\n x, weights=weights, dim=dim, apply_nan_mask=apply_nan_mask\n )\n\n # If the mask is applied in previous operation,\n # disable it for subseqent operations to speed up computation\n if apply_nan_mask:\n apply_nan_mask_flag = False\n else:\n apply_nan_mask_flag = True\n\n numerator = weighted_cov(\n x=x, y=y, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag\n )\n denominator = np.sqrt(\n weighted_cov(x, x, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag)\n * weighted_cov(y, y, weights=weights, dim=op_over_dims, apply_nan_mask=apply_nan_mask_flag)\n )\n corr_xy = numerator / denominator\n return corr_xy\n"
] | [
[
"numpy.sqrt",
"numpy.ones"
]
] |
daletovar/csparse | [
"93cacae13bf8fe1c65c6658779939f585d14dc8f"
] | [
"GCRS2/csr_indexing.py"
] | [
"import numpy as np \r\nimport numba\r\n\r\n\r\n\r\[email protected](nopython=True,nogil=True)\r\ndef csr_row_array_col_array(arr_data,arr_indices,arr_indptr,indptr,row,col):\r\n \"\"\"\r\n This is a very general algorithm to be used when more optimized methods don't apply. \r\n It performs a binary search for each of the requested elements. \r\n Consequently it roughly scales by O(n log nnz per row) where n is the number of requested elements and\r\n nnz per row is the number of nonzero elements in that row.\r\n \"\"\"\r\n indices = []\r\n ind_list = []\r\n for i,r in enumerate(row):\r\n inds = []\r\n current_row = arr_indices[arr_indptr[r]:arr_indptr[r+1]]\r\n if len(current_row) == 0:\r\n indptr[i+1] = indptr[i]\r\n continue\r\n for c in range(len(col)):\r\n s = np.searchsorted(current_row,col[c]) \r\n if not (s >= current_row.size or current_row[s] != col[c]):\r\n s += arr_indptr[r]\r\n inds.append(s)\r\n indices.append(c)\r\n ind_list.extend(inds)\r\n indptr[i+1] = indptr[i] + len(inds)\r\n ind_list = np.array(ind_list,dtype=np.int64)\r\n indices = np.array(indices) \r\n data = arr_data[ind_list]\r\n return (data,indices,indptr)\r\n\r\n\r\[email protected](nopython=True,nogil=True)\r\ndef csr_full_col_slices(arr_data,arr_indices,arr_indptr,indptr,row):\r\n \"\"\"\r\n This algorithm is used for when all column dimensions are full slices with a step of one.\r\n It might be worth it to make two passes over the array and use static arrays instead of lists. \r\n \"\"\"\r\n indices = []\r\n data = []\r\n for i,r in enumerate(row,1):\r\n indices.extend(arr_indices[arr_indptr[r]:arr_indptr[r+1]])\r\n data.extend(arr_data[arr_indptr[r]:arr_indptr[r+1]])\r\n indptr[i] = indptr[i-1] + len(arr_indices[arr_indptr[r]:arr_indptr[r+1]])\r\n data = np.array(data)\r\n indices = np.array(indices)\r\n return (data,indices,indptr)\r\n\r\[email protected](nopython=True,nogil=True)\r\ndef csr_partial_col_slices(arr_data,arr_indices,arr_indptr,indptr,row,col_start,col_stop):\r\n \"\"\"\r\n This algorithm is used for partial column slices with a step of one. It is currently only used for 2d arrays.\r\n \"\"\"\r\n indices = []\r\n data = []\r\n for i,r in enumerate(row,1):\r\n start = np.searchsorted(arr_indices[arr_indptr[r]:arr_indptr[r+1]],col_start) + arr_indptr[r]\r\n stop = np.searchsorted(arr_indices[arr_indptr[r]:arr_indptr[r+1]],col_stop) + arr_indptr[r]\r\n inds = arr_indices[start:stop] - col_stop\r\n indices.extend(inds)\r\n data.extend(arr_data[start:stop])\r\n indptr[i] = indptr[i-1] + inds.size\r\n data = np.array(data)\r\n indices = np.array(indices)\r\n return (data,indices,indptr)\r\n\r\n\r\n"
] | [
[
"numpy.array",
"numpy.searchsorted"
]
] |
zviri/pdftotree | [
"3daca0feb17fc3a0136a4098e5f9a8e72794d527"
] | [
"pdftotree/utils/display_utils.py"
] | [
"import numpy as np\nfrom wand.color import Color\nfrom wand.display import display\nfrom wand.drawing import Drawing\nfrom wand.image import Image\n\n\ndef display_bounding_boxes(img, blocks, alternatecolors=False, color=Color(\"blue\")):\n \"\"\"\n Displays each of the bounding boxes passed in 'boxes' on an image of the pdf\n pointed to by pdf_file\n boxes is a list of 5-tuples (page, top, left, bottom, right)\n \"\"\"\n draw = Drawing()\n draw.fill_color = Color(\"rgba(0, 0, 0, 0)\")\n draw.stroke_color = color\n for block in blocks:\n top, left, bottom, right = block[-4:]\n if alternatecolors:\n draw.stroke_color = Color(\n \"rgba({},{},{}, 1)\".format(\n str(np.random.randint(255)),\n str(np.random.randint(255)),\n str(np.random.randint(255)),\n )\n )\n draw.rectangle(\n left=float(left), top=float(top), right=float(right), bottom=float(bottom)\n )\n draw(img)\n display(img)\n\n\ndef display_bounding_boxes_within_notebook(\n page_num, extractor, blocks, alternatecolors=False, color=Color(\"blue\")\n):\n \"\"\"\n Displays each of the bounding boxes passed in 'boxes' on an image of the pdf\n pointed to by pdf_file\n boxes is a list of 5-tuples (page, top, left, bottom, right)\n \"\"\"\n elems = extractor.elems[page_num]\n page_width, page_height = int(elems.layout.width), int(elems.layout.height)\n img = pdf_to_img(extractor.pdf_file, page_num, page_width, page_height)\n draw = Drawing()\n draw.fill_color = Color(\"rgba(0, 0, 0, 0)\")\n draw.stroke_color = color\n for block in blocks:\n top, left, bottom, right = block[-4:]\n if alternatecolors:\n draw.stroke_color = Color(\n \"rgba({},{},{}, 1)\".format(\n str(np.random.randint(255)),\n str(np.random.randint(255)),\n str(np.random.randint(255)),\n )\n )\n draw.rectangle(\n left=float(left), top=float(top), right=float(right), bottom=float(bottom)\n )\n draw(img)\n return img\n\n\ndef pdf_to_img(pdf_file, page_num, page_width, page_height):\n \"\"\"\n Converts pdf file into image\n :param pdf_file: path to the pdf file\n :param page_num: page number to convert (index starting at 1)\n :return: wand image object\n \"\"\"\n img = Image(filename=\"{}[{}]\".format(pdf_file, page_num - 1))\n img.resize(page_width, page_height)\n return img\n"
] | [
[
"numpy.random.randint"
]
] |
XuyangBai/mmdetection3d | [
"53370467c1b88f163cbe7b7300a1f588a6761e35"
] | [
"mmdet3d/core/visualizer/show_result.py"
] | [
"import mmcv\nimport numpy as np\nimport trimesh\nfrom os import path as osp\n\n\ndef _write_ply(points, out_filename):\n \"\"\"Write points into ``ply`` format for meshlab visualization.\n\n Args:\n points (np.ndarray): Points in shape (N, dim).\n out_filename (str): Filename to be saved.\n \"\"\"\n N = points.shape[0]\n fout = open(out_filename, 'w')\n for i in range(N):\n if points.shape[1] == 6:\n c = points[i, 3:].astype(int)\n fout.write(\n 'v %f %f %f %d %d %d\\n' %\n (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))\n\n else:\n fout.write('v %f %f %f\\n' %\n (points[i, 0], points[i, 1], points[i, 2]))\n fout.close()\n\n\ndef _write_oriented_bbox(scene_bbox, out_filename):\n \"\"\"Export oriented (around Z axis) scene bbox to meshes.\n\n Args:\n scene_bbox(list[ndarray] or ndarray): xyz pos of center and\n 3 lengths (dx,dy,dz) and heading angle around Z axis.\n Y forward, X right, Z upward. heading angle of positive X is 0,\n heading angle of positive Y is 90 degrees.\n out_filename(str): Filename.\n \"\"\"\n\n def heading2rotmat(heading_angle):\n rotmat = np.zeros((3, 3))\n rotmat[2, 2] = 1\n cosval = np.cos(heading_angle)\n sinval = np.sin(heading_angle)\n rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])\n return rotmat\n\n def convert_oriented_box_to_trimesh_fmt(box):\n ctr = box[:3]\n lengths = box[3:6]\n trns = np.eye(4)\n trns[0:3, 3] = ctr\n trns[3, 3] = 1.0\n trns[0:3, 0:3] = heading2rotmat(box[6])\n box_trimesh_fmt = trimesh.creation.box(lengths, trns)\n return box_trimesh_fmt\n\n if len(scene_bbox) == 0:\n scene_bbox = np.zeros((1, 7))\n scene = trimesh.scene.Scene()\n for box in scene_bbox:\n scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))\n\n mesh_list = trimesh.util.concatenate(scene.dump())\n # save to ply file\n trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')\n\n return\n\n\ndef show_result(points, gt_bboxes, pred_bboxes, out_dir, filename, show=True):\n \"\"\"Convert results into format that is directly readable for meshlab.\n\n Args:\n points (np.ndarray): Points.\n gt_bboxes (np.ndarray): Ground truth boxes.\n pred_bboxes (np.ndarray): Predicted boxes.\n out_dir (str): Path of output directory\n filename (str): Filename of the current frame.\n show (bool): Visualize the results online.\n \"\"\"\n if show:\n from .open3d_vis import Visualizer\n\n vis = Visualizer(points)\n if pred_bboxes is not None:\n vis.add_bboxes(bbox3d=pred_bboxes)\n if gt_bboxes is not None:\n vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1))\n vis.show()\n\n result_path = osp.join(out_dir, filename)\n mmcv.mkdir_or_exist(result_path)\n\n if points is not None:\n _write_ply(points, osp.join(result_path, f'{filename}_points.obj'))\n\n if gt_bboxes is not None:\n # bottom center to gravity center\n gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2\n # the positive direction for yaw in meshlab is clockwise\n gt_bboxes[:, 6] *= -1\n _write_oriented_bbox(gt_bboxes,\n osp.join(result_path, f'{filename}_gt.ply'))\n\n if pred_bboxes is not None:\n # bottom center to gravity center\n pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2\n # the positive direction for yaw in meshlab is clockwise\n pred_bboxes[:, 6] *= -1\n _write_oriented_bbox(pred_bboxes,\n osp.join(result_path, f'{filename}_pred.ply'))\n"
] | [
[
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.array",
"numpy.zeros"
]
] |
agesb/TransQuest | [
"84fb49b2e8d3dfae6caacc378e9764e610452aad"
] | [
"transquest/algo/sentence_level/multitransquest/utils.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport csv\nimport json\nimport linecache\nimport os\nfrom collections import Counter\nfrom io import open\nfrom multiprocessing import Pool, cpu_count\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom tqdm.auto import tqdm\n\ntry:\n import torchvision\n import torchvision.transforms as transforms\n\n torchvision_available = True\n from PIL import Image\nexcept ImportError:\n torchvision_available = False\n\ncsv.field_size_limit(2147483647)\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None, x0=None, y0=None, x1=None, y1=None):\n \"\"\"\n Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n if x0 is None:\n self.bboxes = None\n else:\n self.bboxes = [[a, b, c, d] for a, b, c, d in zip(x0, y0, x1, y1)]\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id, bboxes=None):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n if bboxes:\n self.bboxes = bboxes\n\n\ndef convert_example_to_feature(\n example_row,\n pad_token=0,\n sequence_a_segment_id=0,\n sequence_b_segment_id=1,\n cls_token_segment_id=1,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n sep_token_extra=False,\n):\n (\n example,\n max_seq_length,\n tokenizer,\n output_mode,\n cls_token_at_end,\n cls_token,\n sep_token,\n cls_token_segment_id,\n pad_on_left,\n pad_token_segment_id,\n sep_token_extra,\n multi_label,\n stride,\n pad_token,\n add_prefix_space,\n pad_to_max_length,\n ) = example_row\n\n bboxes = []\n if example.bboxes:\n tokens_a = []\n for word, bbox in zip(example.text_a.split(), example.bboxes):\n word_tokens = tokenizer.tokenize(word)\n tokens_a.extend(word_tokens)\n bboxes.extend([bbox] * len(word_tokens))\n\n cls_token_box = [0, 0, 0, 0]\n sep_token_box = [1000, 1000, 1000, 1000]\n pad_token_box = [0, 0, 0, 0]\n\n else:\n if add_prefix_space and not example.text_a.startswith(\" \"):\n tokens_a = tokenizer.tokenize(\" \" + example.text_a)\n else:\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n if add_prefix_space and not example.text_b.startswith(\" \"):\n tokens_b = tokenizer.tokenize(\" \" + example.text_b)\n else:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\". \" -4\" for RoBERTa.\n special_tokens_count = 4 if sep_token_extra else 3\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)\n else:\n # Account for [CLS] and [SEP] with \"- 2\" and with \"- 3\" for RoBERTa.\n special_tokens_count = 3 if sep_token_extra else 2\n if len(tokens_a) > max_seq_length - special_tokens_count:\n tokens_a = tokens_a[: (max_seq_length - special_tokens_count)]\n if example.bboxes:\n bboxes = bboxes[: (max_seq_length - special_tokens_count)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = tokens_a + [sep_token]\n segment_ids = [sequence_a_segment_id] * len(tokens)\n\n if bboxes:\n bboxes += [sep_token_box]\n\n if tokens_b:\n if sep_token_extra:\n tokens += [sep_token]\n segment_ids += [sequence_b_segment_id]\n\n tokens += tokens_b + [sep_token]\n\n segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)\n\n if cls_token_at_end:\n tokens = tokens + [cls_token]\n segment_ids = segment_ids + [cls_token_segment_id]\n else:\n tokens = [cls_token] + tokens\n segment_ids = [cls_token_segment_id] + segment_ids\n if bboxes:\n bboxes = [cls_token_box] + bboxes\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n if pad_to_max_length:\n padding_length = max_seq_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask\n segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)\n if bboxes:\n bboxes += [pad_token_box] * padding_length\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n if bboxes:\n assert len(bboxes) == max_seq_length\n # if output_mode == \"classification\":\n # label_id = label_map[example.label]\n # elif output_mode == \"regression\":\n # label_id = float(example.label)\n # else:\n # raise KeyError(output_mode)\n\n # if output_mode == \"regression\":\n # label_id = float(example.label)\n\n if bboxes:\n return InputFeatures(\n input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=example.label, bboxes=bboxes\n )\n else:\n return InputFeatures(\n input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=example.label,\n )\n\n\ndef convert_example_to_feature_sliding_window(\n example_row,\n pad_token=0,\n sequence_a_segment_id=0,\n sequence_b_segment_id=1,\n cls_token_segment_id=1,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n sep_token_extra=False,\n):\n (\n example,\n max_seq_length,\n tokenizer,\n output_mode,\n cls_token_at_end,\n cls_token,\n sep_token,\n cls_token_segment_id,\n pad_on_left,\n pad_token_segment_id,\n sep_token_extra,\n multi_label,\n stride,\n pad_token,\n add_prefix_space,\n pad_to_max_length,\n ) = example_row\n\n if stride < 1:\n stride = int(max_seq_length * stride)\n\n bucket_size = max_seq_length - (3 if sep_token_extra else 2)\n token_sets = []\n\n if add_prefix_space and not example.text_a.startswith(\" \"):\n tokens_a = tokenizer.tokenize(\" \" + example.text_a)\n else:\n tokens_a = tokenizer.tokenize(example.text_a)\n\n if len(tokens_a) > bucket_size:\n token_sets = [tokens_a[i: i + bucket_size] for i in range(0, len(tokens_a), stride)]\n else:\n token_sets.append(tokens_a)\n\n if example.text_b:\n raise ValueError(\"Sequence pair tasks not implemented for sliding window tokenization.\")\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n\n input_features = []\n for tokens_a in token_sets:\n tokens = tokens_a + [sep_token]\n segment_ids = [sequence_a_segment_id] * len(tokens)\n\n if cls_token_at_end:\n tokens = tokens + [cls_token]\n segment_ids = segment_ids + [cls_token_segment_id]\n else:\n tokens = [cls_token] + tokens\n segment_ids = [cls_token_segment_id] + segment_ids\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_seq_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask\n segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n # if output_mode == \"classification\":\n # label_id = label_map[example.label]\n # elif output_mode == \"regression\":\n # label_id = float(example.label)\n # else:\n # raise KeyError(output_mode)\n\n input_features.append(\n InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=example.label, )\n )\n\n return input_features\n\n\ndef convert_examples_to_features(\n examples,\n max_seq_length,\n tokenizer,\n output_mode,\n cls_token_at_end=False,\n sep_token_extra=False,\n pad_on_left=False,\n cls_token=\"[CLS]\",\n sep_token=\"[SEP]\",\n pad_token=0,\n sequence_a_segment_id=0,\n sequence_b_segment_id=1,\n cls_token_segment_id=1,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n process_count=cpu_count() - 2,\n multi_label=False,\n silent=False,\n use_multiprocessing=True,\n sliding_window=False,\n flatten=False,\n stride=None,\n add_prefix_space=False,\n pad_to_max_length=True,\n args=None,\n):\n \"\"\" Loads a data file into a list of `InputBatch`s\n `cls_token_at_end` define the location of the CLS token:\n - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]\n - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]\n `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)\n \"\"\"\n\n examples = [\n (\n example,\n max_seq_length,\n tokenizer,\n output_mode,\n cls_token_at_end,\n cls_token,\n sep_token,\n cls_token_segment_id,\n pad_on_left,\n pad_token_segment_id,\n sep_token_extra,\n multi_label,\n stride,\n pad_token,\n add_prefix_space,\n pad_to_max_length,\n )\n for example in examples\n ]\n\n if use_multiprocessing:\n if sliding_window:\n with Pool(process_count) as p:\n features = list(\n tqdm(\n p.imap(\n convert_example_to_feature_sliding_window,\n examples,\n chunksize=args.multiprocessing_chunksize,\n ),\n total=len(examples),\n disable=silent,\n )\n )\n if flatten:\n features = [feature for feature_set in features for feature in feature_set]\n else:\n with Pool(process_count) as p:\n features = list(\n tqdm(\n p.imap(convert_example_to_feature, examples, chunksize=args.multiprocessing_chunksize),\n total=len(examples),\n disable=silent,\n )\n )\n else:\n if sliding_window:\n features = [\n convert_example_to_feature_sliding_window(example) for example in tqdm(examples, disable=silent)\n ]\n if flatten:\n features = [feature for feature_set in features for feature in feature_set]\n else:\n features = [convert_example_to_feature(example) for example in tqdm(examples, disable=silent)]\n\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\nPOOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}\n\n\nclass ImageEncoder(nn.Module):\n def __init__(self, args):\n super().__init__()\n model = torchvision.models.resnet152(pretrained=True)\n modules = list(model.children())[:-2]\n self.model = nn.Sequential(*modules)\n self.pool = nn.AdaptiveAvgPool2d(POOLING_BREAKDOWN[args.num_image_embeds])\n\n def forward(self, x):\n # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048\n out = self.pool(self.model(x))\n out = torch.flatten(out, start_dim=2)\n out = out.transpose(1, 2).contiguous()\n return out # BxNx2048\n\n\nclass JsonlDataset(Dataset):\n def __init__(\n self,\n data_path,\n tokenizer,\n transforms,\n labels,\n max_seq_length,\n files_list=None,\n image_path=None,\n text_label=None,\n labels_label=None,\n images_label=None,\n image_type_extension=None,\n data_type_extension=None,\n multi_label=False,\n ):\n\n self.text_label = text_label if text_label else \"text\"\n self.labels_label = labels_label if labels_label else \"labels\"\n self.images_label = images_label if images_label else \"images\"\n self.image_type_extension = image_type_extension if image_type_extension else \"\"\n self.data_type_extension = data_type_extension if data_type_extension else \"\"\n self.multi_label = multi_label\n\n if isinstance(files_list, str):\n files_list = json.load(open(files_list))\n if isinstance(data_path, str):\n if not files_list:\n files_list = [f for f in os.listdir(data_path) if f.endswith(self.data_type_extension)]\n self.data = [\n dict(\n json.load(open(os.path.join(data_path, l + self.data_type_extension))),\n **{\"images\": l + image_type_extension}\n )\n for l in files_list\n ]\n self.data_dir = os.path.dirname(data_path)\n else:\n data_path[self.images_label] = data_path[self.images_label].apply(lambda x: x + self.image_type_extension)\n self.data = data_path.to_dict(\"records\")\n self.data_dir = image_path\n self.tokenizer = tokenizer\n self.labels = labels\n self.n_classes = len(labels)\n self.max_seq_length = max_seq_length\n\n self.transforms = transforms\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n sentence = torch.LongTensor(self.tokenizer.encode(self.data[index][self.text_label], add_special_tokens=True))\n start_token, sentence, end_token = sentence[0], sentence[1:-1], sentence[-1]\n sentence = sentence[: self.max_seq_length]\n\n if self.multi_label:\n label = torch.zeros(self.n_classes)\n label[[self.labels.index(tgt) for tgt in self.data[index][self.labels_label]]] = 1\n else:\n label = torch.tensor(self.labels.index(self.data[index][self.labels_label]))\n\n image = Image.open(os.path.join(self.data_dir, self.data[index][\"images\"])).convert(\"RGB\")\n image = self.transforms(image)\n\n return {\n \"image_start_token\": start_token,\n \"image_end_token\": end_token,\n \"sentence\": sentence,\n \"image\": image,\n \"label\": label,\n }\n\n def get_label_frequencies(self):\n label_freqs = Counter()\n for row in self.data:\n label_freqs.update(row[self.labels_label])\n return label_freqs\n\n\ndef collate_fn(batch):\n lens = [len(row[\"sentence\"]) for row in batch]\n bsz, max_seq_len = len(batch), max(lens)\n\n mask_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long)\n text_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long)\n\n for i_batch, (input_row, length) in enumerate(zip(batch, lens)):\n text_tensor[i_batch, :length] = input_row[\"sentence\"]\n mask_tensor[i_batch, :length] = 1\n\n img_tensor = torch.stack([row[\"image\"] for row in batch])\n tgt_tensor = torch.stack([row[\"label\"] for row in batch])\n img_start_token = torch.stack([row[\"image_start_token\"] for row in batch])\n img_end_token = torch.stack([row[\"image_end_token\"] for row in batch])\n\n return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor\n\n\ndef get_image_transforms():\n return transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.46777044, 0.44531429, 0.40661017], std=[0.12221994, 0.12145835, 0.14380469], ),\n ]\n )\n\n\nclass LazyClassificationDataset(Dataset):\n def __init__(self, data_file, tokenizer, args):\n self.data_file = data_file\n self.start_row = args.lazy_loading_start_line\n self.num_entries = self._get_n_lines(self.data_file, self.start_row)\n self.tokenizer = tokenizer\n self.args = args\n self.delimiter = args.lazy_delimiter\n if args.lazy_text_a_column is not None and args.lazy_text_b_column is not None:\n self.text_a_column = args.lazy_text_a_column\n self.text_b_column = args.lazy_text_b_column\n self.text_column = None\n else:\n self.text_column = args.lazy_text_column\n self.text_a_column = None\n self.text_b_column = None\n self.labels_column = args.lazy_labels_column\n\n @staticmethod\n def _get_n_lines(data_file, start_row):\n with open(data_file, encoding=\"utf-8\") as f:\n for line_idx, _ in enumerate(f, 1):\n pass\n\n return line_idx - start_row\n\n def __getitem__(self, idx):\n line = linecache.getline(self.data_file, idx + 1 + self.start_row).rstrip(\"\\n\").split(self.delimiter)\n\n if not self.text_a_column and not self.text_b_column:\n text = line[self.text_column]\n label = line[self.labels_column]\n\n # If labels_map is defined, then labels need to be replaced with ints\n if self.args.labels_map:\n label = self.args.labels_map[label]\n if self.args.regression:\n label = torch.tensor(float(label), dtype=torch.float)\n else:\n label = torch.tensor(int(label), dtype=torch.long)\n\n return (\n self.tokenizer.encode_plus(\n text,\n max_length=self.args.max_seq_length,\n pad_to_max_length=self.args.max_seq_length,\n return_tensors=\"pt\",\n ),\n label,\n )\n else:\n text_a = line[self.text_a_column]\n text_b = line[self.text_b_column]\n label = line[self.labels_column]\n if self.args.regression:\n label = torch.tensor(float(label), dtype=torch.float)\n else:\n label = torch.tensor(int(label), dtype=torch.long)\n\n return (\n self.tokenizer.encode_plus(\n text_a,\n text_pair=text_b,\n max_length=self.args.max_seq_length,\n pad_to_max_length=self.args.max_seq_length,\n return_tensors=\"pt\",\n ),\n label,\n )\n\n def __len__(self):\n return self.num_entries\n\n\ndef sweep_config_to_sweep_values(sweep_config):\n \"\"\"\n Converts an instance of wandb.Config to plain values map.\n wandb.Config varies across versions quite significantly,\n so we use the `keys` method that works consistently.\n \"\"\"\n\n return {key: sweep_config[key] for key in sweep_config.keys()}"
] | [
[
"torch.nn.Sequential",
"torch.zeros",
"torch.nn.AdaptiveAvgPool2d",
"torch.flatten",
"torch.stack"
]
] |
Maddonix/data_store | [
"0e48e9b25ce8bf0628af714af7888fa47c82f303"
] | [
"src/utils/audio_utils.py"
] | [
"import torch\nimport torchaudio\n\ndef resample(aud, newsr):\n \"\"\"Resamples the audiofile to given samplerate\n\n Args:\n aud (tuple): tuple containing (audiodata_array, samplerate)\n newsr (int): new samplerate\n\n Returns:\n tuple: (audiodata_array, samplerate)\n \"\"\"\n sig, sr = aud\n\n if sr == newsr:\n # Nothing to do\n return aud\n\n num_channels = sig.shape[0]\n # Resample first channel\n resig = torchaudio.transforms.Resample(sr, newsr)(sig[:1, :])\n if num_channels > 1:\n # Resample the second channel and merge both channels\n retwo = torchaudio.transforms.Resample(sr, newsr)(sig[1:, :])\n resig = torch.cat([resig, retwo])\n\n return (resig, newsr)"
] | [
[
"torch.cat"
]
] |
YeLyuUT/FastVOD | [
"707dcf0d88a901d2db0b7cf24096801fbdd8735c"
] | [
"lib/model/siamese_net/template_target_proposal_layer.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nfrom model.utils.config import cfg\r\nfrom model.siamese_net.template_proposal_layer import _TemplateProposalLayer\r\nfrom model.siamese_net.weight_cropping_layer import weight_crop_layer\r\n\r\nclass _TemplateTargetProposalLayer(nn.Module):\r\n '''\r\n prepare template and target training pairs.\r\n '''\r\n\r\n def __init__(self):\r\n super(_TemplateTargetProposalLayer, self).__init__()\r\n self.template_proposal_layer = _TemplateProposalLayer()\r\n self.weights_extractor = weight_crop_layer() # hyper-parameters are defined by cfg.\r\n\r\n def forward(self, feats1, feats2, rpn_rois_1, gt_boxes_1, gt_boxes_2):\r\n '''\r\n\r\n :param feats1: size (N,C,H,W)\r\n :param feats2: size (N,C,H,W)\r\n :param rpn_rois_1: size (N,n,5) default n==256\r\n :param rpn_rois_2: size (N,n,5)\r\n :param gt_boxes_1: size (N,n,6) default n==128\r\n :param gt_boxes_2: size (N,n,6)\r\n :return:\r\n '''\r\n # feats 1 is template source, feats2 is target source.\r\n batch_size = feats1.size(0)\r\n # template_rois size (N,n,5) N:number of batches. n:number of rois.\r\n # template_labels size (N,n)\r\n # template_track_ids size (N,n)\r\n template_rois_all, template_labels_all, template_track_ids_all = self.template_proposal_layer(\r\n (rpn_rois_1,\r\n gt_boxes_1,\r\n feats1.size(3)/cfg.SIAMESE.WEIGHT_CROPPING_LAYER_SCALE,\r\n feats1.size(2)/cfg.SIAMESE.WEIGHT_CROPPING_LAYER_SCALE))\r\n template_weights_all = self.crop_weights_from_feats(feats1, template_rois_all).view(\r\n batch_size,\r\n template_rois_all.size(1),\r\n feats1.size(1),\r\n cfg.SIAMESE.TEMPLATE_SZ,\r\n cfg.SIAMESE.TEMPLATE_SZ)\r\n #print('template_weights_all:',template_weights_all.shape)\r\n #print('template_weights_all:', template_weights_all[0][0])\r\n # for each item, it is (target_feat, template_weights, gt_boxes for each weight).\r\n # target gt_boxes should be of shape (n, 1, 6).\r\n rtv_training_tuples = []\r\n for idx in range(batch_size):\r\n nonzero_coords = torch.nonzero(template_labels_all[idx] > 0)\r\n fg_obj_inds = None\r\n if nonzero_coords.size(0) > 0:\r\n fg_obj_inds = nonzero_coords[:, 0] # extracting rows.\r\n else:\r\n continue\r\n target_feat = feats2[idx:idx + 1]\r\n template_weights = template_weights_all[idx]\r\n template_track_ids = template_track_ids_all[idx]\r\n target_gt_boxes_all = gt_boxes_2[idx, :, :]\r\n target_gt_boxes = []\r\n\r\n for template_id in range(template_weights.size(0)):\r\n template_track_id = template_track_ids[template_id]\r\n has_gt = False\r\n if template_track_id >= 0:\r\n for gt_box_2 in target_gt_boxes_all:\r\n if gt_box_2[5] == template_track_id:\r\n has_gt = True\r\n target_gt_boxes.append(gt_box_2.view(1, -1))\r\n break\r\n if not has_gt:\r\n target_gt_boxes.append(target_feat.new_zeros(1, 6))\r\n\r\n target_gt_boxes = torch.stack(target_gt_boxes)\r\n\r\n template_weights = template_weights.index_select(0, fg_obj_inds)\r\n target_gt_boxes = target_gt_boxes.index_select(0, fg_obj_inds)\r\n rtv_training_tuples.append((idx, target_feat, template_weights, target_gt_boxes))\r\n\r\n return rtv_training_tuples\r\n\r\n def backward(self, top, propagate_down, bottom):\r\n \"\"\"This layer does not propagate gradients.\"\"\"\r\n pass\r\n\r\n def reshape(self, bottom, top):\r\n \"\"\"Reshaping happens during the call to forward.\"\"\"\r\n pass\r\n\r\n def crop_weights_from_feats(self, feats, rois):\r\n return self.weights_extractor(feats, rois.view(-1, 5))"
] | [
[
"torch.stack",
"torch.nonzero"
]
] |
dani-garcia/multiview_gpu | [
"b592b6bdf5d9bbd4ab08f4e5bf48c0435b1c8d7f",
"b592b6bdf5d9bbd4ab08f4e5bf48c0435b1c8d7f"
] | [
"multiview_gpu/tests/test_util.py",
"multiview_gpu/tests/conftest.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom numpy.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_raises\nimport multiview_gpu.util as util\n\n\ndef test_hbeta(sess):\n data = np.arange(25, dtype=float).reshape((5, 5))\n data = tf.convert_to_tensor(data, dtype=tf.float32)\n\n H, P = sess.run(util.Hbeta(data, 2))\n\n real_H = 100.14586747777895\n real_P = np.array([[-0.15651764, -1.02118236, -1.138202, -1.15403889, -1.15618218],\n [-1.15647224, -1.1565115, -\n 1.15651681, -1.15651753, -1.15651763],\n [-1.15651764, -1.15651764, -\n 1.15651764, -1.15651764, -1.15651764],\n [-1.15651764, -1.15651764, -\n 1.15651764, -1.15651764, -1.15651764],\n [-1.15651764, -1.15651764, -1.15651764, -1.15651764, -1.15651764]])\n\n assert_array_almost_equal(H, real_H, decimal=4)\n assert_array_almost_equal(P, real_P, decimal=4)\n\n\ndef _test_whiten(sess):\n data = np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 1, 3], [1, 3, 2, 4]])\n data = tf.convert_to_tensor(data, dtype=tf.float32)\n\n whitened = sess.run(util.whiten(data, n_comp=4))\n\n real_whitened = np.array([[9.63475981e-01, 1.11961253e+00, 1.49011612e-08,\n 0.00000000e+00],\n [-1.55893688e+00, 6.91958598e-01, 0.00000000e+00,\n 0.00000000e+00],\n [-1.84007539e-01, -1.46559183e+00,\n -1.49011612e-08, 0.00000000e+00],\n [7.79468442e-01, -3.45979299e-01, 0.00000000e+00,\n 0.00000000e+00]])\n\n assert_array_almost_equal(whitened, real_whitened, decimal=0)\n",
"import tensorflow as tf\nimport pytest\n\[email protected](scope=\"function\")\ndef sess():\n with tf.Session() as sess:\n yield sess # provide the fixture value"
] | [
[
"tensorflow.convert_to_tensor",
"numpy.arange",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
],
[
"tensorflow.Session"
]
] |
artemyk/dit | [
"72e3aa1b128c2ccacfe14f6f73043ef772b81788"
] | [
"setup.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nInstallation script for dit.\n\"\"\"\n\nimport ast\nimport re\nimport sys\nimport warnings\n\nfrom setuptools import Extension, find_packages, setup\n\nfrom distutils.command import install_data\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nNAME = \"dit\"\nAUTHOR = \"Humans\"\nEMAIL = \"[email protected]\"\nURL = \"http://dit.io\"\n\nwith open('dit/__init__.py', 'rb') as f:\n VERSION = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nDESCRIPTION = \"Python package for information theory.\"\n\n\nclass my_install_data(install_data.install_data):\n # A custom install_data command, which will install it's files\n # into the standard directories (normally lib/site-packages).\n def finalize_options(self):\n if self.install_dir is None:\n installobj = self.distribution.get_command_obj('install')\n self.install_dir = installobj.install_lib\n print('Installing data files to {0}'.format(self.install_dir))\n install_data.install_data.finalize_options(self)\n\n\ndef has_cython():\n \"\"\"Returns True if Cython is found on the system.\"\"\"\n try:\n import Cython # noqa: F401\n return True\n except ImportError:\n return False\n\n\ndef check_opt(name):\n x = eval('has_{0}()'.format(name.lower())) # noqa: S307\n msg = \"%(name)s not found. %(name)s extensions will not be built.\"\n if not x:\n warnings.warn(msg % {'name': name})\n return x\n\n\ndef hack_distutils(debug=False, fast_link=True):\n # hack distutils.sysconfig to eliminate debug flags\n # stolen from mpi4py\n\n def remove_prefixes(optlist, bad_prefixes):\n for bad_prefix in bad_prefixes:\n for i, flag in enumerate(optlist):\n if flag.startswith(bad_prefix):\n optlist.pop(i)\n break\n return optlist\n\n import sys\n if not sys.platform.lower().startswith(\"win\"):\n from distutils import sysconfig\n\n cvars = sysconfig.get_config_vars()\n cflags = cvars.get('OPT')\n if cflags:\n cflags = remove_prefixes(cflags.split(),\n ['-g', '-O', '-Wstrict-prototypes', '-DNDEBUG'])\n if debug:\n cflags.append(\"-g\")\n else:\n cflags.append(\"-O3\")\n cflags.append(\"-DNDEBUG\")\n cvars['OPT'] = str.join(' ', cflags)\n cvars[\"CFLAGS\"] = cvars[\"BASECFLAGS\"] + \" \" + cvars[\"OPT\"]\n\n if fast_link:\n for varname in [\"LDSHARED\", \"BLDSHARED\"]:\n ldsharedflags = cvars.get(varname)\n if ldsharedflags:\n ldsharedflags = remove_prefixes(ldsharedflags.split(),\n ['-Wl,-O'])\n cvars[varname] = str.join(' ', ldsharedflags)\n\n\ndef main():\n ## Probably, we don't need this anymore?\n hack_distutils()\n\n # Handle optional extensions.\n opt = {}\n for name, option in [('Cython', 'nocython')]:\n lname = name.lower()\n\n # Determine if the Python module exists\n opt[lname] = check_opt(name)\n\n if not opt[lname]:\n continue\n else:\n # Disable installation of extensions, if user requested.\n try:\n idx = sys.argv.index(\"--{0}\".format(option))\n except ValueError:\n pass\n else:\n opt[lname] = False\n del sys.argv[idx]\n\n cmdclass = {'install_data': my_install_data}\n\n cython_modules = []\n if opt['cython']:\n import Cython.Distutils\n try:\n import numpy as np\n except ImportError:\n msg = \"Please install NumPy first.\"\n print(msg)\n raise\n\n cmdclass['build_ext'] = Cython.Distutils.build_ext\n\n close = Extension(\n \"dit.math._close\",\n [\"dit/math/_close.pyx\"]\n )\n\n pycounts = Extension(\n \"dit.inference.pycounts\",\n [\"dit/inference/pycounts.pyx\", \"dit/inference/counts.c\"],\n include_dirs=[np.get_include()],\n libraries=['m'],\n extra_compile_args=['-std=c99'],\n )\n\n samplediscrete = Extension(\n \"dit.math._samplediscrete\",\n [\"dit/math/_samplediscrete.pyx\"],\n include_dirs=[np.get_include()]\n )\n\n # Active Cython modules\n cython_modules = [\n close,\n pycounts,\n samplediscrete,\n ]\n\n other_modules = []\n\n ext_modules = cython_modules + \\\n other_modules\n\n data_files = [\n \"dit/inference/counts.h\",\n ]\n\n with open('requirements.txt') as reqs:\n install_requires = reqs.read().splitlines()\n\n python_requires = \"!=3.0.*, !=3.1.*, !=3.2.*, <4\"\n\n packages = find_packages(exclude=['site', 'examples', 'docs', '*tests*'])\n\n # Tests\n # This includes for bdist only. sdist uses MANIFEST.in\n package_data = dict(zip(packages, [['tests/*.py']] * len(packages)))\n\n setup(\n name=NAME,\n version=VERSION,\n url=URL,\n python_requires=python_requires,\n packages=packages,\n package_data=package_data,\n provides=[\"dit\"],\n install_requires=install_requires,\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n data_files=data_files,\n include_package_data=True,\n author=AUTHOR,\n author_email=EMAIL,\n description=DESCRIPTION,\n long_description=open(\"README.rst\").read(),\n license=\"BSD\",\n classifiers=[\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Physics\",\n ],\n )\n\n\nif __name__ == '__main__':\n if sys.argv[-1] == 'setup.py':\n print(\"To install, run 'python setup.py install'\\n\")\n\n v = sys.version_info[:2]\n if v < (3, 3):\n msg = \"dit requires Python version >= 3.4\"\n print(msg.format(v))\n sys.exit(-1)\n\n main()\n"
] | [
[
"numpy.get_include"
]
] |
VulRepairTeam/VulRepair | [
"9cf2abd7ca27d84445ddfc7ab323745a5b676cce"
] | [
"M9_CodeBERT_word_level/codebert_wordlevel_main.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\nusing a masked language modeling (MLM) loss.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nimport argparse\nimport logging\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom codebert_model import Seq2Seq\nfrom torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler\nfrom transformers import (AdamW, get_linear_schedule_with_warmup, \n RobertaConfig, RobertaModel)\nfrom tqdm import tqdm\nimport pandas as pd\nfrom tokenizers import Tokenizer\n\ncpu_cont = 16\nlogger = logging.getLogger(__name__)\n\nclass InputFeatures(object):\n \"\"\"A single training/test features for a example.\"\"\"\n def __init__(self,\n input_ids,\n label,\n decoder_input_ids):\n self.input_ids = input_ids\n self.label=label\n self.decoder_input_ids = decoder_input_ids\n \n\nclass TextDataset(Dataset):\n def __init__(self, tokenizer, args, file_type=\"train\"):\n if file_type == \"train\":\n file_path = args.train_data_file\n elif file_type == \"eval\":\n file_path = args.eval_data_file\n elif file_type == \"test\":\n file_path = args.test_data_file\n self.examples = []\n df = pd.read_csv(file_path)\n sources = df[\"source\"].tolist()\n labels = df[\"target\"].tolist()\n for i in tqdm(range(len(sources))):\n self.examples.append(convert_examples_to_features(sources[i], labels[i], tokenizer, args))\n if file_type == \"train\":\n for example in self.examples[:3]:\n logger.info(\"*** Example ***\")\n logger.info(\"label: {}\".format(example.label))\n logger.info(\"input_ids: {}\".format(' '.join(map(str, example.input_ids))))\n logger.info(\"decoder_input_ids: {}\".format(' '.join(map(str, example.decoder_input_ids))))\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i): \n return self.examples[i].input_ids, self.examples[i].input_ids.ne(1), self.examples[i].label, self.examples[i].decoder_input_ids, self.examples[i].decoder_input_ids.ne(1)\n\ndef convert_examples_to_features(source, label, tokenizer, args):\n # encode\n source_ids = tokenizer.encode(source)\n source_ids = source_ids.ids\n if len(source_ids) > 510:\n source_ids = source_ids[:510]\n source_ids = [0] + source_ids + [2]\n elif len(source_ids) < 510:\n padding = 510 - len(source_ids)\n source_ids = [0] + source_ids + [2]\n for _ in range(padding):\n source_ids.append(1)\n elif len(source_ids) == 510:\n source_ids = [0] + source_ids + [2]\n source_ids = torch.tensor(source_ids)\n \n decoder_input_ids = tokenizer.encode(label)\n decoder_input_ids = decoder_input_ids.ids\n if len(decoder_input_ids) > 254:\n decoder_input_ids = decoder_input_ids[:254]\n decoder_input_ids = [0] + decoder_input_ids + [2]\n elif len(decoder_input_ids) < 254:\n padding = 254 - len(decoder_input_ids)\n decoder_input_ids = [0] + decoder_input_ids + [2]\n for _ in range(padding):\n decoder_input_ids.append(1)\n elif len(decoder_input_ids) == 254:\n decoder_input_ids = [0] + decoder_input_ids + [2]\n \n assert len(decoder_input_ids) == 256 and len(source_ids) == 512\n decoder_input_ids = torch.tensor(decoder_input_ids)\n label = decoder_input_ids\n return InputFeatures(source_ids, label, decoder_input_ids)\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\ndef train(args, train_dataset, model, tokenizer, eval_dataset):\n \"\"\" Train the model \"\"\"\n # build dataloader\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=0)\n \n args.max_steps = args.epochs * len(train_dataloader)\n # evaluate the model per epoch\n args.save_steps = len(train_dataloader) * 1\n \n args.warmup_steps = args.max_steps // 5\n model.to(args.device)\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,\n num_training_steps=args.max_steps)\n\n # multi-gpu training\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.train_batch_size//max(args.n_gpu, 1))\n logger.info(\" Total train batch size = %d\",args.train_batch_size*args.gradient_accumulation_steps)\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", args.max_steps)\n \n global_step = 0\n tr_loss, logging_loss, avg_loss, tr_nb, tr_num, train_loss = 0.0, 0.0, 0.0, 0, 0, 0\n best_loss = 100\n\n model.zero_grad()\n\n for idx in range(args.epochs): \n bar = tqdm(train_dataloader, total=len(train_dataloader))\n tr_num = 0\n train_loss = 0\n for step, batch in enumerate(bar):\n (input_ids, attention_mask, labels, decoder_input_ids, target_mask) = [x.squeeze(1).to(args.device) for x in batch]\n model.train()\n # the forward function automatically creates the correct decoder_input_ids\n loss, _, _ = model(source_ids=input_ids, source_mask=attention_mask, target_ids=decoder_input_ids, target_mask=target_mask)\n if args.n_gpu > 1:\n loss = loss.mean()\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n tr_loss += loss.item()\n tr_num += 1\n train_loss += loss.item()\n if avg_loss == 0:\n avg_loss = tr_loss\n avg_loss = round(train_loss/tr_num,5)\n bar.set_description(\"epoch {} loss {}\".format(idx,avg_loss))\n \n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step() \n global_step += 1\n output_flag = True\n avg_loss = round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)), 4)\n if global_step % args.save_steps == 0:\n # placeholder of evaluation\n result = evaluate(args, model, tokenizer, eval_dataset, eval_when_training=True) \n # Save model checkpoint\n if result < best_loss:\n best_loss = result\n logger.info(\" \"+\"*\"*20) \n logger.info(\" Best Loss:%s\",round(best_loss,4))\n logger.info(\" \"+\"*\"*20) \n checkpoint_prefix = 'checkpoint-best-loss'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n if not os.path.exists(output_dir):\n os.makedirs(output_dir) \n model_to_save = model.module if hasattr(model,'module') else model\n output_dir = os.path.join(output_dir, '{}'.format(args.model_name)) \n torch.save(model_to_save.state_dict(), output_dir)\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\ndef clean_tokens(tokens):\n tokens = tokens.replace(\"[PAD]\", \"\")\n tokens = tokens.replace(\"[CLS]\", \"\")\n tokens = tokens.replace(\"[SEP]\", \"\")\n tokens = tokens.strip(\"\\n\")\n tokens = tokens.strip()\n return tokens\n\ndef evaluate(args, model, tokenizer, eval_dataset, eval_when_training=False):\n #build dataloader\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=0)\n # multi-gpu evaluate\n if args.n_gpu > 1 and eval_when_training is False:\n model = torch.nn.DataParallel(model)\n # Eval!\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n model.eval()\n bar = tqdm(eval_dataloader, total=len(eval_dataloader))\n \n eval_loss, tokens_num = 0, 0\n for batch in bar:\n (input_ids, attention_mask, labels, decoder_input_ids, target_mask) = [x.squeeze(1).to(args.device) for x in batch]\n with torch.no_grad():\n _, loss, num = model(source_ids=input_ids,source_mask=attention_mask,\n target_ids=decoder_input_ids,target_mask=target_mask) \n eval_loss += loss.sum().item()\n tokens_num += num.sum().item()\n eval_loss = eval_loss / tokens_num\n # show loss of dev dataset \n model.train()\n logger.info(\"***** Eval results *****\")\n logger.info(f\"Evaluation Loss: {str(eval_loss)}\") \n return eval_loss\n\ndef test(args, model, tokenizer, test_dataset, best_threshold=0.5):\n # build dataloader\n test_sampler = SequentialSampler(test_dataset)\n test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.eval_batch_size, num_workers=0)\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n # Test!\n logger.info(\"***** Running Test *****\")\n logger.info(\" Num examples = %d\", len(test_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n model.eval()\n accuracy = []\n raw_predictions = []\n correct_prediction = \"\"\n bar = tqdm(test_dataloader, total=len(test_dataloader))\n for batch in bar:\n correct_pred = False\n (input_ids, attention_mask, labels, decoder_input_ids, target_mask) = [x.squeeze(1).to(args.device) for x in batch]\n\n print\n\n with torch.no_grad():\n beam_outputs = model(source_ids=input_ids, source_mask=attention_mask)\n beam_outputs = beam_outputs.detach().cpu().tolist()[0]\n decoder_input_ids = decoder_input_ids.detach().cpu().tolist()\n for single_output in beam_outputs:\n # pred\n prediction = tokenizer.decode(single_output, skip_special_tokens=False)\n prediction = clean_tokens(prediction)\n # truth\n ground_truth = tokenizer.decode(decoder_input_ids[0], skip_special_tokens=False)\n ground_truth = clean_tokens(ground_truth)\n if prediction == ground_truth:\n correct_prediction = prediction\n correct_pred = True\n break\n if correct_pred:\n raw_predictions.append(correct_prediction)\n accuracy.append(1)\n else:\n # if not correct, use the first output in the beam as the raw prediction\n raw_pred = tokenizer.decode(beam_outputs[0], skip_special_tokens=False)\n raw_pred = clean_tokens(raw_pred)\n raw_predictions.append(raw_pred)\n accuracy.append(0)\n # calculate accuracy\n test_result = round(sum(accuracy) / len(accuracy), 4)\n logger.info(\"***** Test results *****\")\n logger.info(f\"Test Accuracy: {str(test_result)}\")\n\n # write prediction to file\n df = pd.read_csv(args.test_data_file)\n df[\"raw_predictions\"] = raw_predictions\n df[\"correctly_predicted\"] = accuracy\n f_name = args.test_data_file.split(\"/\")[-1].split(\"_\")[:2]\n f_name = \"_\".join(f_name)\n df.to_csv(f\"../data/raw_predictions/Roberta-no-pretraining/{f_name}_raw_preds.csv\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n # Params\n parser.add_argument(\"--train_data_file\", default=None, type=str, required=False,\n help=\"The input training data file (a csv file).\")\n parser.add_argument(\"--output_dir\", default=None, type=str, required=False,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n parser.add_argument(\"--model_type\", default=\"t5\", type=str,\n help=\"The model architecture to be fine-tuned.\")\n parser.add_argument(\"--encoder_block_size\", default=-1, type=int,\n help=\"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\")\n parser.add_argument(\"--decoder_block_size\", default=-1, type=int,\n help=\"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\")\n parser.add_argument(\"--num_beams\", default=50, type=int,\n help=\"Beam size to use when decoding.\") \n parser.add_argument(\"--eval_data_file\", default=None, type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\")\n parser.add_argument(\"--test_data_file\", default=None, type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\")\n parser.add_argument(\"--model_name\", default=\"model.bin\", type=str,\n help=\"Saved model name.\")\n parser.add_argument(\"--checkpoint_model_name\", default=\"non_domain_model.bin\", type=str,\n help=\"Checkpoint model name.\")\n parser.add_argument(\"--model_name_or_path\", default=None, type=str,\n help=\"The model checkpoint for weights initialization.\")\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path\")\n\n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_test\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--load_model_from_checkpoint\", default=False, action='store_true',\n help=\"Whether to load model from checkpoint.\")\n parser.add_argument(\"--evaluate_during_training\", action='store_true',\n help=\"Run evaluation during training at each logging step.\")\n\n parser.add_argument(\"--train_batch_size\", default=4, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--eval_batch_size\", default=4, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--epochs', type=int, default=1,\n help=\"training epochs\")\n args = parser.parse_args()\n # Setup CUDA, GPU\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n args.device = device\n\n # to remove\n args.n_gpu = 1\n args.device = \"cuda:0\"\n ###\n\n # Setup logging\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',datefmt='%m/%d/%Y %H:%M:%S',level=logging.INFO)\n logger.warning(\"device: %s, n_gpu: %s\",device, args.n_gpu,)\n # Set seed\n set_seed(args)\n\n config = RobertaConfig.from_pretrained(args.config_name)\n tokenizer = Tokenizer.from_file('./wordlevel_tokenizer/wordlevel.json')\n\n # build models\n encoder = RobertaModel.from_pretrained('microsoft/codebert-base')\n \n encoder.resize_token_embeddings(50265)\n decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads)\n decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)\n model=Seq2Seq(encoder=encoder,decoder=decoder,config=config,\n beam_size=args.beam_size,max_length=args.decoder_block_size,\n sos_id=0,eos_id=2)\n \n logger.info(\"Training/evaluation parameters %s\", args)\n # Training\n if args.do_train:\n train_dataset = TextDataset(tokenizer, args, file_type='train')\n eval_dataset = TextDataset(tokenizer, args, file_type='eval')\n if args.load_model_from_checkpoint:\n checkpoint_prefix = f'checkpoint-best-loss/{args.checkpoint_model_name}'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n model.load_state_dict(torch.load(output_dir))\n model.to(args.device)\n train(args, train_dataset, model, tokenizer, eval_dataset)\n # Evaluation\n results = {}\n if args.do_eval:\n checkpoint_prefix = f'checkpoint-best-loss/{args.model_name}'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n model.load_state_dict(torch.load(output_dir))\n model.to(args.device)\n eval_dataset = TextDataset(tokenizer, args, file_type='eval')\n result=evaluate(args, model, tokenizer, eval_dataset) \n if args.do_test:\n checkpoint_prefix = f'checkpoint-best-loss/{args.model_name}'\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) \n model.load_state_dict(torch.load(output_dir, map_location=args.device))\n model.to(args.device)\n test_dataset = TextDataset(tokenizer, args, file_type='test')\n test(args, model, tokenizer, test_dataset, best_threshold=0.5)\n return results\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.read_csv",
"numpy.random.seed",
"torch.nn.TransformerDecoderLayer",
"torch.load",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler",
"torch.nn.TransformerDecoder",
"torch.tensor",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.cuda.device_count",
"numpy.exp"
]
] |
Chenglin-Yang/PatchAttack | [
"56941df37c6840aca1c98d091e75968ffece42af"
] | [
"PatchAttack/TextureDict_extractor.py"
] | [
"import os\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n# torch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as Models\nimport torch.optim as optim\n\nimport PatchAttack.utils as utils\n\n# global variables\ntorch_cuda = 0\n\n\nclass GradCam():\n \n class model_wrapper(nn.Module):\n def __init__(self, features, classifier):\n super().__init__()\n self.features = features\n self.classifier = classifier\n \n def forward(self, x):\n x = self.features(x)\n x = self.classifier(x)\n return x\n\n class ModelOutputs():\n \"\"\" Class for making a forward pass, and getting:\n 1. The network output.\n 2. Activations from intermeddiate targetted layers.\n 3. Gradients from intermeddiate targetted layers. \"\"\"\n def __init__(self, model, target_layers):\n self.model = model\n self.feature_extractor = self.FeatureExtractor(self.model.features, target_layers)\n\n def get_gradients(self):\n return self.feature_extractor.gradients\n\n def __call__(self, x):\n target_activations, output = self.feature_extractor(x)\n # for non-square input\n #if output.size(-1) != 7 or output.size(-2) != 7:, but this only suits VGG\n if output.size(-1) != output.size(-2): # if I append avgpool in features for VGG, this will never be met\n output = self.model.avgpool(output)\n output = output.view(output.size(0), -1)\n output = self.model.classifier(output)\n return target_activations, output\n \n class FeatureExtractor():\n \"\"\" Class for extracting activations and \n registering gradients from targetted intermediate layers \"\"\"\n def __init__(self, model, target_layers):\n self.model = model\n self.target_layers = target_layers\n self.gradients = []\n\n def save_gradient(self, grad):\n self.gradients.append(grad)\n\n def __call__(self, x):\n del self.gradients\n self.gradients = []\n \n outputs = []\n self.gradients = []\n for name, module in self.model._modules.items():\n x = module(x)\n if name in self.target_layers:\n x.register_hook(self.save_gradient)\n outputs += [x]\n x.retain_grad()\n return outputs, x\n \n def __init__(self, model, target_layer_names, use_cuda=True):\n self.model = model\n self.model.eval()\n self.cuda = use_cuda\n if self.cuda:\n self.model = model.cuda(torch_cuda)\n\n self.extractor = self.ModelOutputs(self.model, target_layer_names)\n\n def forward(self, input):\n return self.model(input) \n\n def __call__(self, input, index = None):\n cam_H, cam_W = input.size(-2), input.size(-1)\n \n if self.cuda:\n features, output = self.extractor(input.cuda(torch_cuda))\n else:\n features, output = self.extractor(input)\n\n if index == None:\n index = np.argmax(output.cpu().data.numpy())\n\n one_hot = np.zeros((1, output.size()[-1]), dtype = np.float32)\n one_hot[0][index] = 1\n one_hot = torch.from_numpy(one_hot).requires_grad_()\n if self.cuda:\n one_hot = torch.sum(one_hot.cuda(torch_cuda) * output)\n else:\n one_hot = torch.sum(one_hot * output)\n\n self.model.features.zero_grad()\n self.model.classifier.zero_grad()\n one_hot.backward(retain_graph=True)\n\n grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy()\n \n target = features[-1]\n target = target.cpu().data.numpy()[0, :]\n\n weights = np.mean(grads_val, axis = (2, 3))[0, :]\n cam = np.zeros(target.shape[1 : ], dtype = np.float32)\n\n for i, w in enumerate(weights):\n cam += w * target[i, :, :]\n\n cam = np.maximum(cam, 0)\n ori_cam = cam\n cam = cv2.resize(cam, (cam_W, cam_H)) # bilinear interpolation\n cam = cam - np.min(cam)\n cam = cam / np.max(cam)\n \n self.current_cam = cam\n self.current_input = input\n \n return cam, ori_cam\n\n def show_current_cam(self, show=True, save_dir=None, dpi=300, tight=True):\n heatmap = cv2.applyColorMap(np.uint8(255*self.current_cam), cv2.COLORMAP_JET)\n heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)/255\n cv2_img = utils.data_agent.inv_normalize(self.current_input.squeeze(0)).permute((1, 2, 0)).cpu().numpy()\n \n show_img = heatmap + cv2_img\n show_img -= show_img.min()\n show_img /= show_img.max()\n \n plt.figure()\n\n if show:\n plt.imshow(show_img)\n\n if save_dir is not None:\n if tight:\n plt.xticks([])\n plt.yticks([])\n plt.subplots_adjust(left=0, right=1, bottom=0, top=1)\n plt.savefig(fname=save_dir,\n dpi=dpi, facecolor='w', edgecolor='w', format='png')\n \n return show_img\n\n\nclass vgg19_extractor(nn.Module):\n\n # default values, and are overwritten in practical use\n style_choice = [1, 6, 11, 20, 29]\n content_choice = [22]\n attention_threshold = 0.7\n normal_spatial_size = [50176, 12544, 3136, 784, 196]\n \n def __init__(self):\n super().__init__()\n self.model = Models.vgg19(pretrained=True).cuda(torch_cuda)\n self.style_layers = self.style_choice\n self.content_layers = self.content_choice\n print('style_layer choice', self.style_layers)\n self.activations = []\n self.contents = []\n \n # change max pool to average pool\n for name, child in self.model.features.named_children():\n if isinstance(child, nn.MaxPool2d):\n self.model.features[int(name)] = nn.AvgPool2d(kernel_size=2, stride=2)\n \n # lock the gradients\n for param in self.model.parameters():\n param.requires_grad = False\n \n def forward(self, x):\n del self.activations\n del self.contents\n self.activations = []\n self.contents = []\n for name, child in self.model.features.named_children():\n x = child(x)\n if int(name) in self.style_layers:\n self.activations.append(x)\n # add content\n if int(name) in self.content_layers:\n self.contents.append(x)\n return x\n \n def get_style(self, x):\n '''\n the standard scale corresponds to the case where the input size is 224 by 224\n '''\n _ = self.forward(x)\n alphas = [i/(j.size(-1)*j.size(-2)) \n for i, j in zip(self.normal_spatial_size, self.activations)]\n return [self.gram_matrix(item)*alpha \n for item, alpha in zip(self.activations, alphas)]\n \n def get_attention_style(self, x, cam):\n _ = self.forward(x)\n attention_style = []\n \n # offset for conv4 or lower style_choice\n offset = int(round(np.log2(self.activations[-1].size()[-1] / cam.shape[-1])))\n\n for i in range(len(self.activations)):\n activation = self.activations[-(i+1)]\n #mask = self.choose_region(self.bilinear_upsample(cam, \n # H_scale=pow(2, i+offset), \n # W_scale=pow(2, i+offset)))\n mask = self.choose_region(self.bilinear_upsample(cam, \n target_H=activation.size(-2), \n target_W=activation.size(-1)))\n mask = torch.from_numpy(mask).expand(activation.size()).contiguous()\n attention_style.append(self.gram_matrix_with_mask(activation, mask))\n # add attention_content\n if i+offset == 1: # conv4-2\n self.attention_contents = self.content_with_mask(self.contents, mask)\n \n r_attention_style = [attention_style[-(i+1)] for i in range(len(attention_style))]\n alphas = [i/(j.size(-1)*j.size(-2)) \n for i, j in zip(self.normal_spatial_size, self.activations)]\n\n return [item*alpha for item, alpha in zip(r_attention_style, alphas)]\n \n def get_mask_style(self, x, mask):\n _ = self.forward(x)\n mask_style = []\n # check mask\n if len(mask.size()) == 3:\n mask = mask.unsqueeze(0)\n assert len(mask.size()) == 4,\\\n 'the mask should be 3 or 4 dims'\n assert mask.size(1) == 1,\\\n 'channel number of mask should be 1'\n \n h, w = mask.size()[-2:]\n for i in range(len(self.activations)):\n #temp_mask = F.upsample_bilinear(mask.float(), size=(int(h/pow(2, i)), \n # int(w/pow(2, i))))\n temp_mask = F.upsample_bilinear(mask.float(), size=(self.activations[i].size(-2), \n self.activations[i].size(-1)))\n temp_mask = temp_mask.expand(self.activations[i].size()).contiguous()\n mask_style.append(self.gram_matrix_with_mask(self.activations[i], temp_mask.bool()))\n \n return mask_style\n \n @staticmethod\n def spatial_repeat(x, scale):\n '''x: torch.floattensor with size (bs, c, h, w) or (c, h, w)'''\n temp = torch.cat([x]*scale, dim=-2)\n temp = torch.cat([temp]*scale, dim=-1)\n return temp\n \n @staticmethod\n def generate_image_from_style(x, save_dir, attention=True, iterations=10000, \n label=None, cls_w=0, lr=0.01, scale=1,\n noise_dims=[1, 3, 224, 224], noise_init='randn',\n noise_optimization_mode='normal', pgd_eps=None, \n custom_noise_init=None, mask=None, observer=None):\n '''\n currently, I can limit the optimization space of the pixels\n '''\n \n if os.path.exists(save_dir):\n return torch.load(os.path.join(save_dir, 'iter_{}.pt'.format(iterations-1)))\n else:\n cnn = Models.vgg19(pretrained=True).cuda(torch_cuda).eval()\n # create the folder for the generated images\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n # prepare extractor to extract the styles\n style_extractor = vgg19_extractor()\n # check input: from style image or just style\n if type(x) == torch.Tensor:\n style_image = x\n # get dims\n bs, c, h, w = style_image.size()\n if attention:\n grad_cam = GradCam(model=Models.vgg19(pretrained=True), \n target_layer_names=[\"35\"])\n _, ori_cam = grad_cam(style_image)\n target_style = style_extractor.get_attention_style(style_image, ori_cam)\n else:\n target_style = style_extractor.get_style(style_image)\n else:\n bs, c, h, w = noise_dims\n target_style = x\n # generate starting point: noise\n if noise_init == 'randn':\n noise = torch.randn(bs, c, int(h/scale), int(w/scale)).cuda(torch_cuda)\n elif noise_init == 'zeros':\n noise = torch.zeros(bs, c, int(h/scale), int(w/scale)).cuda(torch_cuda)\n elif noise_init == 'custom':\n noise = custom_noise_init\n vgg19_extractor.normalize(noise)\n noise = noise.requires_grad_()\n # set upper bound and lower bound if necessary\n if noise_optimization_mode == 'pgd':\n anchor_max = noise.detach().clone() + pgd_eps\n anchor_min = noise.detach().clone() - pgd_eps\n # set optimizer\n optimizer = optim.Adam(params=[noise], lr=lr)\n # optimize\n for iteration in range(iterations):\n # zero grad\n optimizer.zero_grad()\n # repeat noise\n if scale != 1:\n noise_image = style_extractor.spatial_repeat(noise, scale)\n else:\n noise_image = noise\n # extract style from noise\n if type(mask) == torch.Tensor:\n noise_style = style_extractor.get_mask_style(noise_image, mask)\n else:\n noise_style = style_extractor.get_style(noise_image)\n # calculate style loss\n style_loss = vgg19_extractor.style_loss(noise_style, \n target_style,\n style_extractor.activations)\n style_loss = 1e6 * style_loss / len(noise_style)\n # classification loss\n if cls_w != 0:\n l_c = F.cross_entropy(input=cnn(noise_image), target=label.cuda(torch_cuda)) * cls_w\n else:\n l_c = torch.Tensor([0]).cuda(torch_cuda)\n \n # overall loss\n loss = style_loss + l_c\n # backward and update params\n loss.backward()\n # applying mask if necessary\n if type(mask) == torch.Tensor:\n noise.grad.data[~mask.expand(noise.size())] = 0\n # take a step\n optimizer.step()\n \n if noise_optimization_mode == 'normal':\n # normalize the image\n vgg19_extractor.normalize(noise)\n elif noise_optimization_mode == 'pgd':\n # pgd clamping\n vgg19_extractor.clamp_optimizer(noise, mode='pgd', \n anchor_min=anchor_min, \n anchor_max=anchor_max)\n # show progress\n if iteration % 3000 == 0 or iteration == iterations-1:\n print(\"Iteration: {}, Style Loss: {:.3f}, classification Loss: {:.3f}\"\n .format(iteration, style_loss.item(), l_c.item()))\n # check noise prediction\n with torch.no_grad():\n output = F.softmax(cnn(noise), dim=1)\n print('vgg19 predict: {} | vgg19 confidence: {:.3f}'.format(output.argmax().item(), \n output.max().item()))\n if observer != None:\n output_obs = F.softmax(observer(noise), dim=1)\n print('observer predict: {} | observer confidence: {:.3f}'\n .format(output_obs.argmax().item(), \n output_obs.max().item()))\n \n # generate the image\n if iteration % 3000 == 0 or iteration == iterations-1:\n torch.save(noise.squeeze(0).cpu().detach(), \n save_dir+'/iter_{}.pt'.format(iteration))\n # release memory\n torch.cuda.empty_cache()\n return noise\n \n @staticmethod\n def get_kmeans_style(indices, dataset, save_dir, n_clusters=30):\n '''\n input:\n indices: return of data_agent.get_indices()\n dataset: which dataset to use to extract the styles, should be same as \n that used in data_agent.get_indices()\n save_dir: dir to save the .pt file\n return:\n kmeans: numpy object\n '''\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n file_name = os.path.join(save_dir, 'style_kmeans_{}.pt'\n .format(n_clusters))\n if os.path.exists(file_name):\n kmeans = torch.load(file_name)\n else:\n # get attention styles from all indices\n grad_cam = GradCam(model=Models.vgg19(pretrained=True), target_layer_names=[\"35\"])\n style_extractor = vgg19_extractor()\n indices_style = []\n for index in indices:\n image, _ = dataset.__getitem__(index)\n image = image.cuda(torch_cuda).unsqueeze(0)\n _, ori_cam = grad_cam(image)\n attention_style = style_extractor.get_attention_style(image, ori_cam)\n # move attention_style to cpu in case of memory issues\n attention_style = [item.cpu() for item in attention_style]\n indices_style.append(attention_style)\n \n # clustering\n from sklearn.cluster import KMeans\n # memory issue may arise here if indices sytle is on GPU\n X = style_extractor.flatten_style(indices_style).cpu().numpy()\n print('clustering...')\n kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(X)\n print('finished! Saving...')\n torch.save(kmeans, file_name)\n # check the assignment situation of each cluster\n assignment = [(kmeans.labels_ == i).sum() for i in range(n_clusters)]\n print('the assignment of the clusters are: ', assignment)\n print('Remeber to inversely flatten the style clusters,\\\n \\nif you need to process the clusters of the returned numpy kmeans object')\n return kmeans\n \n @staticmethod\n def style_loss(style_1, style_2, activations):\n '''\n input:\n style_1 : list of torch.floattensor with size (bs, c_i, c_i)\n style_2 : list of torch.floattensor with size (bs, c_i, c_i)\n activations: list of activation, torch.floattensor with size (bs, c_i, h_i, w_i) \n return:\n loss\n '''\n assert len(style_1) == len(style_2),\\\n 'inconsistent dims of two styles'\n layer_num = len(style_1)\n \n loss = 0\n for i in range(layer_num):\n bs, c, h, w = activations[i].size()\n loss += (style_1[i]-style_2[i]).pow(2).sum() / (4*(c**2)*((h*w)**2))\n return loss\n\n @staticmethod\n def normalize(noise):\n if len(noise.size()) == 4:\n noise.data[:, 0, :, :].clamp_(min=-2.1179, max=2.2490)\n noise.data[:, 1, :, :].clamp_(min=-2.0357, max=2.4285)\n noise.data[:, 2, :, :].clamp_(min=-1.8044, max=2.6400)\n else:\n noise.data[0, :, :].clamp_(min=-2.1179, max=2.2490)\n noise.data[1, :, :].clamp_(min=-2.0357, max=2.4285)\n noise.data[2, :, :].clamp_(min=-1.8044, max=2.6400)\n \n @staticmethod\n def gram_matrix_with_mask(matrix, mask):\n '''\n input:\n matrix: torch.floattensor with size (bs, c, h, w)\n mask: torch.floattensor with size (bs, c, h, w)\n return:\n gram: torch.tensor tensor with size (bs, c, c)\n '''\n bs, c, h, w = matrix.size()\n mx = matrix.view(bs, c, -1)\n mk = mask.view(bs, c, -1)\n \n alpha = mk.sum(dim=2, keepdims=True).float()\n alpha = float(h*w)/alpha[0, 0]\n alpha = alpha.cuda(torch_cuda)\n \n mx = mx[mk].view(bs, c, -1)\n return torch.bmm(mx, mx.permute(0, 2, 1)) * alpha\n \n @staticmethod\n def content_with_mask(contents, mask):\n '''\n input:\n contents: list with one element, torch.floattensor with size (bs, c, h, w)\n mask: torch.floattensor with size (bs, c, h, w)\n return: attention_content, torch.floattensor with size (bs, c, h', w')\n '''\n return [contents[0][mask].view(contents[0].size(0), contents[0].size(1), -1)]\n \n @staticmethod\n def bilinear_upsample(cam, H_scale=2, W_scale=2, target_H=None, target_W=None):\n if target_H is None or target_W is None:\n h, w = cam.shape\n temp_cam = cv2.resize(cam, (w*W_scale, h*H_scale))\n else:\n temp_cam = cv2.resize(cam, (target_W, target_H))\n temp_cam = temp_cam - np.min(temp_cam)\n temp_cam = temp_cam / np.max(temp_cam)\n return temp_cam\n \n @staticmethod\n def choose_region(cam):\n threshold = vgg19_extractor.attention_threshold\n temp_cam = cam - cam.min()\n temp_cam = temp_cam / temp_cam.max()\n mask = temp_cam >= threshold\n return mask\n \n @staticmethod\n def gram_matrix(matrix):\n '''\n input:\n matrix: torch.floattensor with size (bs, c, h, w)\n return:\n gram: torch.floattensor with size (bs, c, c)\n '''\n bs, c, h, w = matrix.size()\n \n m = matrix.view(bs, c, -1)\n return torch.bmm(m, m.permute(0, 2, 1))\n \n @staticmethod\n def flatten_style(x, inv=False, dims=[64, 128, 256, 512, 512]):\n '''\n input:\n x: [inv=False] torch.floattensor with size (bs, 64**2+...+512**2)\n x': [inv=True] list of size bs, each element is a list of size 5 containing: \n torch.floattensor with size (1, 64, 64),..., (1, 512, 512) \n return:\n x to x'\n x' to x\n CURRENTLY DO NOT SUPPORT BATCH CALCULATION\n '''\n if not inv:\n indices_style = x\n return torch.stack([torch.cat([item.view(-1) for item in style], dim=0) \n for style in indices_style], dim=0)\n else:\n periods = [0]\n counter = 0\n for dim in dims:\n counter += dim*dim\n periods.append(counter)\n\n index1 = periods[:-1]\n index2 = periods[1:]\n\n if x.size(0) == 1:\n activations = []\n for dim,i1,i2 in zip(dims, index1, index2):\n activations.append(x[0, i1:i2].view(1, dim, dim))\n return activations\n else:\n multi_activations = []\n for item in x:\n activations = []\n for dim,i1,i2 in zip(dims, index1, index2):\n activations.append(item[i1:i2].view(1, dim, dim))\n multi_activations.append(activations)\n return multi_activations\n \n @staticmethod\n def clamp_optimizer(noise, mode, anchor_min=None, anchor_max=None):\n if mode == 'iter_gs':\n pass\n elif mode == 'pgd':\n temp_index = noise < anchor_min\n noise.data[temp_index] = anchor_min.data[temp_index]\n temp_index = noise > anchor_max\n noise.data[temp_index] = anchor_max.data[temp_index]\n vgg19_extractor.normalize(noise)\n \n"
] | [
[
"matplotlib.pyplot.imshow",
"sklearn.cluster.KMeans",
"torch.cat",
"torch.load",
"torch.sum",
"numpy.max",
"numpy.mean",
"torch.no_grad",
"torch.save",
"numpy.uint8",
"torch.from_numpy",
"matplotlib.pyplot.subplots_adjust",
"numpy.zeros",
"matplotlib.pyplot.figure",
"torch.optim.Adam",
"numpy.min",
"torch.cuda.empty_cache",
"matplotlib.pyplot.savefig",
"torch.nn.AvgPool2d",
"matplotlib.pyplot.xticks",
"numpy.maximum",
"torch.Tensor",
"matplotlib.pyplot.yticks"
]
] |
greglan/python_scripts | [
"f2e98ed3fd975d79b0a6b569b65c850a7f4f3ab3"
] | [
"maths/euler_totient_function.py"
] | [
"# -*- coding: utf-8 -*-\n# !/usr/bin/env python\n\n\"\"\"\n Plot Euler's totient function\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\n\n\ndef primes(a, b):\n r = b % a\n while r != 0:\n b = a\n a = r\n r = b % a\n return a == 1\n\n\ndef phi(n):\n i = 1\n for k in range(2, n):\n if primes(k, n):\n i += 1\n return i\n\n\nN = 1000\nstep = 2\n\nx = [k for k in range(0, N, step)]\ny = [phi(k) for k in x]\n\nplt.xlabel(r'$n$')\nplt.ylabel(r\"$\\varphi(n)$\")\nplt.xlim(0, N)\nplt.scatter(x, y)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
KatharinaBuelow/cmip5_cmip6_euro-cordex-plotting-routines | [
"b265f9c77ed5b9fb01a76e6549b5c621d14e96b5"
] | [
"py_plotting_cmip_cordex/cmip5_cmip6_cordex_scatter_plot.py"
] | [
"#! /usr/bin/python\n# coding: utf-8\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport subprocess\nimport os\nfrom matplotlib import markers\nfrom scattertable import scattertable\nimport pandas as pd\nimport seaborn as sns\nfrom dp_cmip_plotting_tools import scatter_plot , scatter_plot_cordex, add_column_for_plot_cmip_cordex, add_column_for_plot_cordex\n\n'''\nThis program makes scatterplots for different ensembles \nfor each scenario and timeslice:\na) CMIP5 and CORDEX\nb) CMIP5 and CMIP6\n'''\n#-------------------\n# Select Variable\n#------------------\n\nvar1_dict = {'Precipitation':[r'$\\Delta$' +' Precipitation', 'pr', '%', (-70,70), 'pro','pro_diff_pr_'],}\n#var1_dict = {'Precipitation':[r'$\\Delta$' +' Precipitation', 'pr', 'mm/day', (-1.5,1.5), 'mm', 'diff_pr_'],}\n\n#var2_dict = {'Temperature':[r'$\\Delta$' +' Temperature', 'tas', 'K', (-2 , 11),('MED','CEU','NEU'),60],}\nvar2_dict = {'Temperature':[r'$\\Delta$' +' Temperature', 'tas', 'K', (-2 , 10),('BI','IP','FR','ME','SC','AL','MD','EA'),80],}\n\n#Deuschland\n#var1_dict = {'Precipitation':[r'$\\Delta$' +' Niederschlag', 'pr', '%', (-60,60), 'pro','pro_diff_pr_'],}\n#var2_dict = {'Temperature':[r'$\\Delta$' +' Temperatur', 'tas', 'K', (-2 , 10),('deutschland',), 60],}\n\nprint(os.getcwd())\nworkdir=os.getcwd()\n#-------------------------------------------\n# Select input data and output directory\n#-------------------------------------------\n#\n# This program requires, that the data files exist\n# which can be calculated with Creat_df_for_plots.py\n#\ndatadir=workdir.replace('py_plotting_cmip_cordex','SCATTER/data')\nprint(' ')\nprint('datafile is read from: ', datadir)\n#\n# Make Outputdir and\n# Select, if you want to plot CMIP5 & CORDEX or CMIP5 & CMIP6\n#\n# cmip5-cordex:\n#\n#plotdir=workdir.replace('py_plotting_cmip_cordex','SCATTER/plots_cordex')\n#ensemble='EURO-CORDEX'\n#\n# cmip5-cmip6:\n#\nplotdir=workdir.replace('py_plotting_cmip_cordex','SCATTER/plots')\nensemble='CMIP6'\n\nif not os.path.exists(plotdir):\n os.makedirs(plotdir)\nprint(' ')\nprint('Output will be stored in : ', plotdir)\n\n#-----------------------------------------------\n# nothing needs to be changed below:\n#----------------------------------------------\n# \n\nrcp = ['rcp26', 'rcp45', 'rcp85']\nssp = ['ssp126','ssp245','ssp585']\n\ntimehist = '1981-01-01_to_2010-12-31'\ntimeslice = ['2036-01-01_to_2065-12-31',\n\t '2070-01-01_to_2099-12-31',\n ]\ntimehistp = '(1981 to 2010)'\ntimeslicep = ['(2036 to 2065)',\n\t '(2070 to 2099)', \n ]\n \nseasons = ('JJA','DJF','ANN', 'MAM', 'SON')\n\n\n# Variable 1 pr\nfor parameter in var1_dict.keys():\n var1 = var1_dict[parameter][1]\n einheit1 = var1_dict[parameter][2]\n version = var1_dict[parameter][4]\n xcoln = var1_dict[parameter][5]\n\n# Variable 2 tas\nfor parameter in var2_dict.keys():\n var2 = var2_dict[parameter][1]\n einheit2 = var2_dict[parameter][2]\n regions=var2_dict[parameter][4]\n print('regions: ',regions)\n\n\nfor i in range(len(regions)):\n print('region: ',regions[i])\n reg=regions[i]\n reg=reg.replace('CEU','WCE')\n for seas in seasons:\n print('season:',seas)\n for r in range(len(rcp)):\n print('scenario:', rcp[r],ssp[r])\n for time in range(len(timeslice)):\n print(timeslice[time])\n timen=timeslice[time].replace('_',' ')\n title= rcp[r].upper()+' '+reg.upper()+' '+seas+' '+timeslicep[time]+' - '+timehistp\n print(title)\n # infile, concat dataframe\n FileName5='df_CMIP5_'+seas+'_'+regions[i]+'_'+rcp[r]+'_'+timeslice[time]+'.csv'\n print(FileName5)\n if ensemble == 'EURO-CORDEX':\n FileName6='df_'+ensemble+'_'+seas+'_'+regions[i]+'_'+rcp[r]+'_'+timeslice[time]+'.csv'\n else: \n FileName6='df_'+ensemble+'_'+seas+'_'+regions[i]+'_'+ssp[r]+'_'+timeslice[time]+'.csv'\n print(FileName6)\n InFile5=os.path.join(datadir,FileName5)\n InFile6=os.path.join(datadir,FileName6)\n PlotName='CMIP5_'+ensemble+'_'+var2+'_'+var1+'_'+version+'_'+regions[i]+'_'+seas+'_'+rcp[r]+'_'+ssp[r]+'_'+timeslice[time]+'.png'\n if ensemble == 'EURO-CORDEX':\n PlotName='CMIP5_'+ensemble+'_'+var2+'_'+var1+'_'+version+'_'+regions[i]+'_'+seas+'_'+rcp[r]+'_'+timeslice[time]+'.png'\n OutFile=os.path.join(plotdir,PlotName)\n if ensemble == 'EURO-CORDEX':\n df5 = pd.read_csv(InFile5)\n InData5=add_column_for_plot_cmip_cordex(df5)\n df6 = pd.read_csv(InFile6)\n InData6=add_column_for_plot_cordex(df6)\n df=pd.DataFrame()\n df=pd.concat([InData6,InData5],axis=0)\n InData6=pd.DataFrame()\n InData5=pd.DataFrame()\n else:\n InData5 = pd.read_csv(InFile5)\n InData6 = pd.read_csv(InFile6) \n df=pd.DataFrame()\n df=pd.concat([InData6,InData5],axis=0)\n \n x_column=xcoln+timen\n if xcoln == 'diff_pr_':\n df[x_column]=df[x_column]*86400 # needed to plot mm, data is in kg m-2s-1\n y_column='diff_tas_'+timen\n if ensemble == 'EURO-CORDEX':\n print('EURO-CORDEX')\n print(x_column)\n print(y_column)\n scatter_plot_cordex(df, x_column, y_column, rcp[r], var1_dict, var2_dict, OutFile, title)\n else:\n scatter_plot(df, x_column, y_column, rcp[r], ssp[r], var1_dict, var2_dict, OutFile, title)\n \n\n \n\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
ahiihu/frustum-pointnets | [
"f638473c950ed7693f5522e61f9eea7d0ca191a7"
] | [
"prepare_data.py"
] | [
"''' Prepare KITTI data for 3D object detection.\n\nAuthor: Charles R. Qi\nDate: September 2017\n\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport cv2\nfrom PIL import Image\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\n\n#sys.path.append(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'mayavi_1'))\nimport kitti_util as utils\nimport _pickle as pickle\nfrom kitti_object import *\nimport argparse\n#from mayavi_1.viz_util\nprint(sys.path)\nprint(1)\ndef in_hull(p, hull):\n from scipy.spatial import Delaunay\n if not isinstance(hull,Delaunay):\n hull = Delaunay(hull)\n return hull.find_simplex(p)>=0\n\ndef extract_pc_in_box3d(pc, box3d):\n ''' pc: (N,3), box3d: (8,3) '''\n box3d_roi_inds = in_hull(pc[:,0:3], box3d)\n return pc[box3d_roi_inds,:], box3d_roi_inds\n\ndef extract_pc_in_box2d(pc, box2d):\n ''' pc: (N,2), box2d: (xmin,ymin,xmax,ymax) '''\n box2d_corners = np.zeros((4,2))\n box2d_corners[0,:] = [box2d[0],box2d[1]] \n box2d_corners[1,:] = [box2d[2],box2d[1]] \n box2d_corners[2,:] = [box2d[2],box2d[3]] \n box2d_corners[3,:] = [box2d[0],box2d[3]] \n box2d_roi_inds = in_hull(pc[:,0:2], box2d_corners)\n return pc[box2d_roi_inds,:], box2d_roi_inds\n\n\ndef demo():\n import mayavi.mlab as mlab\n from mayavi_1 import viz_util\n # from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'))\n data_idx = 4\n\n # Load data from dataset\n objects = dataset.get_label_objects(data_idx) # 返回了object3d类的数据,里面存储txt文件信息\n objects[0].print_object()\n img = dataset.get_image(data_idx)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_height, img_width, img_channel = img.shape\n print(('Image shape: ', img.shape))\n pc_velo = dataset.get_lidar(data_idx)[:,0:3]\n calib = dataset.get_calibration(data_idx)\n\n # Draw lidar in rect camera coord\n #print(' -------- LiDAR points in rect camera coordination --------')\n #pc_rect = calib.project_velo_to_rect(pc_velo)\n #fig = draw_lidar_simple(pc_rect)\n #raw_input()\n\n # Draw 2d and 3d boxes on image\n print(' -------- 2D/3D bounding boxes in images --------')\n show_image_with_boxes(img, objects, calib)\n raw_input()\n\n # Show all LiDAR points. Draw 3d box in LiDAR point cloud\n print(' -------- LiDAR points and 3D boxes in velodyne coordinate --------')\n #show_lidar_with_boxes(pc_velo, objects, calib)\n #raw_input()\n show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height)\n raw_input()\n\n # Visualize LiDAR points on images\n print(' -------- LiDAR points projected to image plane --------')\n show_lidar_on_image(pc_velo, img, calib, img_width, img_height) \n raw_input()\n \n # Show LiDAR points that are in the 3d box\n print(' -------- LiDAR points in a 3D bounding box --------')\n box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P) \n box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)\n box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo)\n print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0]))\n\n fig = mlab.figure(figure=None, bgcolor=(0,0,0),\n fgcolor=None, engine=None, size=(1000, 500))\n draw_lidar(box3droi_pc_velo, fig=fig)\n draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)\n mlab.show(1)\n raw_input()\n \n # UVDepth Image and its backprojection to point clouds\n print(' -------- LiDAR points in a frustum from a 2D box --------')\n imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(pc_velo,\n calib, 0, 0, img_width, img_height, True)\n imgfov_pts_2d = pts_2d[fov_inds,:]\n imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo)\n\n cameraUVDepth = np.zeros_like(imgfov_pc_rect)\n cameraUVDepth[:,0:2] = imgfov_pts_2d\n cameraUVDepth[:,2] = imgfov_pc_rect[:,2]\n\n # Show that the points are exactly the same\n backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth)\n print(imgfov_pc_velo[0:20])\n print(backprojected_pc_velo[0:20])\n\n fig = mlab.figure(figure=None, bgcolor=(0,0,0),\n fgcolor=None, engine=None, size=(1000, 500))\n draw_lidar(backprojected_pc_velo, fig=fig)\n raw_input()\n\n # Only display those points that fall into 2d box\n print(' -------- LiDAR points in a frustum from a 2D box --------')\n xmin,ymin,xmax,ymax = \\\n objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax\n boxfov_pc_velo = \\\n get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax)\n print(('2d box FOV point num: ', boxfov_pc_velo.shape[0]))\n\n fig = mlab.figure(figure=None, bgcolor=(0,0,0),\n fgcolor=None, engine=None, size=(1000, 500))\n draw_lidar(boxfov_pc_velo, fig=fig)\n mlab.show(1)\n raw_input()\n\ndef random_shift_box2d(box2d, shift_ratio=0.1):\n ''' Randomly shift box center, randomly scale width and height \n '''\n r = shift_ratio\n xmin,ymin,xmax,ymax = box2d\n h = ymax-ymin\n w = xmax-xmin\n cx = (xmin+xmax)/2.0\n cy = (ymin+ymax)/2.0\n cx2 = cx + w*r*(np.random.random()*2-1)\n cy2 = cy + h*r*(np.random.random()*2-1)\n h2 = h*(1+np.random.random()*2*r-r) # 0.9 to 1.1\n w2 = w*(1+np.random.random()*2*r-r) # 0.9 to 1.1\n return np.array([cx2-w2/2.0, cy2-h2/2.0, cx2+w2/2.0, cy2+h2/2.0])\n \ndef extract_frustum_data(idx_filename, split, output_filename, viz=False,\n perturb_box2d=False, augmentX=1, type_whitelist=['Car']):\n ''' Extract point clouds and corresponding annotations in frustums\n defined generated from 2D bounding boxes\n Lidar points and 3d boxes are in *rect camera* coord system\n (as that in 3d box label files)\n \n Input:\n idx_filename: string, each line of the file is a sample ID\n split: string, either trianing or testing\n output_filename: string, the name for output .pickle file\n viz: bool, whether to visualize extracted data\n perturb_box2d: bool, whether to perturb the box2d\n (used for data augmentation in train set)\n augmentX: scalar, how many augmentations to have for each 2D box.\n type_whitelist: a list of strings, object types we are interested in.\n Output:\n None (will write a .pickle file to the disk)\n '''\n dataset = kitti_object(os.path.join(ROOT_DIR,'dataset/KITTI/object'), split)\n data_idx_list = [int(line.rstrip()) for line in open(idx_filename)] # 取出训练数据的索引,rstrip删除字符串后回车符\n\n id_list = [] # int number\n box2d_list = [] # [xmin,ymin,xmax,ymax]\n box3d_list = [] # (8,3) array in rect camera coord\n input_list = [] # channel number = 4, xyz,intensity in rect camera coord\n label_list = [] # 1 for roi object, 0 for clutter\n type_list = [] # string e.g. Car\n heading_list = [] # ry (along y-axis in rect camera coord) radius of\n # (cont.) clockwise angle from positive x axis in velo coord.\n box3d_size_list = [] # array of l,w,h\n frustum_angle_list = [] # angle of 2d box center from pos x-axis\n\n pos_cnt = 0\n all_cnt = 0\n for data_idx in data_idx_list:\n print('------------- ', data_idx) # 根据索引取出相关数据\n calib = dataset.get_calibration(data_idx) # 3 by 4 matrix\n objects = dataset.get_label_objects(data_idx)\n pc_velo = dataset.get_lidar(data_idx)\n pc_rect = np.zeros_like(pc_velo)\n # 将点云投影到0号相机的修正坐标系中, velo->reference->rect\n pc_rect[:,0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])\n pc_rect[:,3] = pc_velo[:, 3]\n\n img = dataset.get_image(data_idx)\n img_height, img_width, img_channel = img.shape\n # 核心部分,过滤并得到图像视角下的点云数据\n # 将点云从velo->rect->imag,其中rect to iamge是核心,相关函数维kitti_util.py/def project_rect_to_image\n _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(pc_velo[:,0:3],\n calib, 0, 0, img_width, img_height, True)\n\n for obj_idx in range(len(objects)):\n if objects[obj_idx].type not in type_whitelist :continue\n\n # 2D BOX: Get pts rect backprojected \n box2d = objects[obj_idx].box2d\n for _ in range(augmentX):\n # Augment data by box2d perturbation\n if perturb_box2d:\n xmin,ymin,xmax,ymax = random_shift_box2d(box2d)\n print(box2d)\n print(xmin,ymin,xmax,ymax)\n else:\n xmin,ymin,xmax,ymax = box2d\n box_fov_inds = (pc_image_coord[:,0]<xmax) & \\\n (pc_image_coord[:,0]>=xmin) & \\\n (pc_image_coord[:,1]<ymax) & \\\n (pc_image_coord[:,1]>=ymin)\n box_fov_inds = box_fov_inds & img_fov_inds\n pc_in_box_fov = pc_rect[box_fov_inds,:]\n # Get frustum angle (according to center pixel in 2D BOX)\n box2d_center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0])\n uvdepth = np.zeros((1,3))\n uvdepth[0,0:2] = box2d_center\n uvdepth[0,2] = 20 # some random depth\n box2d_center_rect = calib.project_image_to_rect(uvdepth)\n frustum_angle = -1 * np.arctan2(box2d_center_rect[0,2],\n box2d_center_rect[0,0])\n # 3D BOX: Get pts velo in 3d box\n obj = objects[obj_idx]\n box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) \n _,inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)\n label = np.zeros((pc_in_box_fov.shape[0]))\n label[inds] = 1\n # Get 3D BOX heading\n heading_angle = obj.ry\n # Get 3D BOX size\n box3d_size = np.array([obj.l, obj.w, obj.h])\n\n # Reject too far away object or object without points\n if ymax-ymin<25 or np.sum(label)==0:\n continue\n\n id_list.append(data_idx)\n box2d_list.append(np.array([xmin,ymin,xmax,ymax]))\n box3d_list.append(box3d_pts_3d)\n input_list.append(pc_in_box_fov)\n label_list.append(label)\n type_list.append(objects[obj_idx].type)\n heading_list.append(heading_angle)\n box3d_size_list.append(box3d_size)\n frustum_angle_list.append(frustum_angle)\n \n # collect statistics\n pos_cnt += np.sum(label)\n all_cnt += pc_in_box_fov.shape[0]\n \n print('Average pos ratio: %f' % (pos_cnt/float(all_cnt)))\n print('Average npoints: %f' % (float(all_cnt)/len(id_list)))\n \n with open(output_filename,'wb') as fp:\n pickle.dump(id_list, fp)\n pickle.dump(box2d_list,fp)\n pickle.dump(box3d_list,fp)\n pickle.dump(input_list, fp)\n pickle.dump(label_list, fp)\n pickle.dump(type_list, fp)\n pickle.dump(heading_list, fp)\n pickle.dump(box3d_size_list, fp)\n pickle.dump(frustum_angle_list, fp)\n \n if viz:\n import mayavi.mlab as mlab\n for i in range(10):\n p1 = input_list[i]\n seg = label_list[i] \n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,0], p1[:,1], p1[:,2], seg, mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,2], -p1[:,0], -p1[:,1], seg, mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n raw_input()\n\ndef get_box3d_dim_statistics(idx_filename):\n ''' Collect and dump 3D bounding box statistics '''\n dataset = kitti_object(os.path.join(ROOT_DIR,'dataset/KITTI/object'))\n dimension_list = []\n type_list = []\n ry_list = []\n data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]\n for data_idx in data_idx_list:\n print('------------- ', data_idx)\n calib = dataset.get_calibration(data_idx) # 3 by 4 matrix\n objects = dataset.get_label_objects(data_idx)\n for obj_idx in range(len(objects)):\n obj = objects[obj_idx]\n if obj.type=='DontCare':continue\n dimension_list.append(np.array([obj.l,obj.w,obj.h])) \n type_list.append(obj.type) \n ry_list.append(obj.ry)\n\n with open('box3d_dimensions.pickle','wb') as fp:\n pickle.dump(type_list, fp)\n pickle.dump(dimension_list, fp)\n pickle.dump(ry_list, fp)\n\ndef read_det_file(det_filename):\n ''' Parse lines in 2D detection output files '''\n det_id2str = {1:'Pedestrian', 2:'Car', 3:'Cyclist'}\n id_list = []\n type_list = []\n prob_list = []\n box2d_list = []\n for line in open(det_filename, 'r'):\n t = line.rstrip().split(\" \")\n id_list.append(int(os.path.basename(t[0]).rstrip('.png')))\n type_list.append(det_id2str[int(t[1])])\n prob_list.append(float(t[2]))\n box2d_list.append(np.array([float(t[i]) for i in range(3,7)]))\n return id_list, type_list, box2d_list, prob_list\n\n \ndef extract_frustum_data_rgb_detection(det_filename, split, output_filename,\n viz=False,\n type_whitelist=['Car'],\n img_height_threshold=25,\n lidar_point_threshold=5):\n ''' Extract point clouds in frustums extruded from 2D detection boxes.\n Update: Lidar points and 3d boxes are in *rect camera* coord system\n (as that in 3d box label files)\n \n Input:\n det_filename: string, each line is\n img_path typeid confidence xmin ymin xmax ymax\n split: string, either trianing or testing\n output_filename: string, the name for output .pickle file\n type_whitelist: a list of strings, object types we are interested in.\n img_height_threshold: int, neglect image with height lower than that.\n lidar_point_threshold: int, neglect frustum with too few points.\n Output:\n None (will write a .pickle file to the disk)\n '''\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'), split)\n det_id_list, det_type_list, det_box2d_list, det_prob_list = \\\n read_det_file(det_filename)\n cache_id = -1\n cache = None\n \n id_list = []\n type_list = []\n box2d_list = []\n prob_list = []\n input_list = [] # channel number = 4, xyz,intensity in rect camera coord\n frustum_angle_list = [] # angle of 2d box center from pos x-axis\n\n for det_idx in range(len(det_id_list)):\n data_idx = det_id_list[det_idx]\n print('det idx: %d/%d, data idx: %d' % \\\n (det_idx, len(det_id_list), data_idx))\n if cache_id != data_idx:\n calib = dataset.get_calibration(data_idx) # 3 by 4 matrix\n pc_velo = dataset.get_lidar(data_idx)\n pc_rect = np.zeros_like(pc_velo)\n pc_rect[:,0:3] = calib.project_velo_to_rect(pc_velo[:,0:3])\n pc_rect[:,3] = pc_velo[:,3]\n img = dataset.get_image(data_idx)\n img_height, img_width, img_channel = img.shape\n _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(\\\n pc_velo[:,0:3], calib, 0, 0, img_width, img_height, True)\n cache = [calib,pc_rect,pc_image_coord,img_fov_inds]\n cache_id = data_idx\n else:\n calib,pc_rect,pc_image_coord,img_fov_inds = cache\n\n if det_type_list[det_idx] not in type_whitelist: continue\n\n # 2D BOX: Get pts rect backprojected \n xmin,ymin,xmax,ymax = det_box2d_list[det_idx]\n box_fov_inds = (pc_image_coord[:,0]<xmax) & \\\n (pc_image_coord[:,0]>=xmin) & \\\n (pc_image_coord[:,1]<ymax) & \\\n (pc_image_coord[:,1]>=ymin)\n box_fov_inds = box_fov_inds & img_fov_inds\n pc_in_box_fov = pc_rect[box_fov_inds,:]\n # Get frustum angle (according to center pixel in 2D BOX)\n box2d_center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0])\n uvdepth = np.zeros((1,3))\n uvdepth[0,0:2] = box2d_center\n uvdepth[0,2] = 20 # some random depth\n box2d_center_rect = calib.project_image_to_rect(uvdepth)\n frustum_angle = -1 * np.arctan2(box2d_center_rect[0,2],\n box2d_center_rect[0,0])\n \n # Pass objects that are too small\n if ymax-ymin<img_height_threshold or \\\n len(pc_in_box_fov)<lidar_point_threshold:\n continue\n \n id_list.append(data_idx)\n type_list.append(det_type_list[det_idx])\n box2d_list.append(det_box2d_list[det_idx])\n prob_list.append(det_prob_list[det_idx])\n input_list.append(pc_in_box_fov)\n frustum_angle_list.append(frustum_angle)\n \n with open(output_filename,'wb') as fp:\n pickle.dump(id_list, fp)\n pickle.dump(box2d_list,fp)\n pickle.dump(input_list, fp)\n pickle.dump(type_list, fp)\n pickle.dump(frustum_angle_list, fp)\n pickle.dump(prob_list, fp)\n \n if viz:\n import mayavi.mlab as mlab\n for i in range(10):\n p1 = input_list[i]\n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,0], p1[:,1], p1[:,2], p1[:,1], mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,2], -p1[:,0], -p1[:,1], seg, mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n raw_input()\n\ndef write_2d_rgb_detection(det_filename, split, result_dir):\n ''' Write 2D detection results for KITTI evaluation.\n Convert from Wei's format to KITTI format. \n \n Input:\n det_filename: string, each line is\n img_path typeid confidence xmin ymin xmax ymax\n split: string, either trianing or testing\n result_dir: string, folder path for results dumping\n Output:\n None (will write <xxx>.txt files to disk)\n\n Usage:\n write_2d_rgb_detection(\"val_det.txt\", \"training\", \"results\")\n '''\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'), split)\n det_id_list, det_type_list, det_box2d_list, det_prob_list = \\\n read_det_file(det_filename)\n # map from idx to list of strings, each string is a line without \\n\n results = {} \n for i in range(len(det_id_list)):\n idx = det_id_list[i]\n typename = det_type_list[i]\n box2d = det_box2d_list[i]\n prob = det_prob_list[i]\n output_str = typename + \" -1 -1 -10 \"\n output_str += \"%f %f %f %f \" % (box2d[0],box2d[1],box2d[2],box2d[3])\n output_str += \"-1 -1 -1 -1000 -1000 -1000 -10 %f\" % (prob)\n if idx not in results: results[idx] = []\n results[idx].append(output_str)\n if not os.path.exists(result_dir): os.mkdir(result_dir)\n output_dir = os.path.join(result_dir, 'data')\n if not os.path.exists(output_dir): os.mkdir(output_dir)\n for idx in results:\n pred_filename = os.path.join(output_dir, '%06d.txt'%(idx))\n fout = open(pred_filename, 'w')\n for line in results[idx]:\n fout.write(line+'\\n')\n fout.close() \n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--demo', action='store_true', help='Run demo.')\n parser.add_argument('--gen_train', action='store_true', help='Generate train split frustum data with perturbed GT 2D boxes')\n parser.add_argument('--gen_val', action='store_true', help='Generate val split frustum data with GT 2D boxes')\n parser.add_argument('--gen_val_rgb_detection', action='store_true', help='Generate val split frustum data with RGB detection 2D boxes')\n parser.add_argument('--car_only', action='store_true', help='Only generate cars; otherwise cars, peds and cycs')\n args = parser.parse_args()\n\n if args.demo:\n demo()\n exit()\n\n if args.car_only:\n type_whitelist = ['Car']\n output_prefix = 'frustum_caronly_'\n else:\n type_whitelist = ['Car', 'Pedestrian', 'Cyclist']\n output_prefix = 'frustum_carpedcyc_'\n\n #if args.gen_train:\n if True:\n extract_frustum_data(\\\n os.path.join(BASE_DIR, 'image_sets/train.txt'),\n 'training',\n os.path.join(BASE_DIR, output_prefix+'train.pickle'), \n viz=False, perturb_box2d=True, augmentX=5,\n type_whitelist=type_whitelist)\n\n if args.gen_val:\n extract_frustum_data(\\\n os.path.join(BASE_DIR, 'image_sets/val.txt'),\n 'training',\n os.path.join(BASE_DIR, output_prefix+'val.pickle'),\n viz=False, perturb_box2d=False, augmentX=1,\n type_whitelist=type_whitelist)\n\n if args.gen_val_rgb_detection:\n extract_frustum_data_rgb_detection(\\\n os.path.join(BASE_DIR, 'rgb_detections/rgb_detection_val.txt'),\n 'training',\n os.path.join(BASE_DIR, output_prefix+'val_rgb_detection.pickle'),\n viz=False,\n type_whitelist=type_whitelist) \n"
] | [
[
"numpy.random.random",
"scipy.spatial.Delaunay",
"numpy.arctan2",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
hareeshreddi/Computer-Networks-Lab-Assignments | [
"c86665a4fb673fd53b636f552e02e6d06c94ba22"
] | [
"Networks Lab-04/Scripts-Data-Graphs/CodesToGenerateGraphs/tcp_ack_sta1.py"
] | [
"import matplotlib.pyplot as plt\nfrom pylab import MaxNLocator\n# x axis values\nx = [0,256,512,1000]\n# corresponding y axis values\ny = [18.2365701294,18.398790741,18.4004942322,18.398790741]\n\n# plotting the points\nplt.plot(x, y, color='green',linewidth = 3,\n marker='o', markerfacecolor='blue', markersize=6)\n#plt.xlim(0,24)\n#plt.ylim(26,31)\n# setting x and y axis range\n# naming the x axis\nplt.xlabel('RTS Threshold in bytes',color=\"red\")\n# naming the y axis\nplt.ylabel('Average Bandwidth spent in transmitting TCP ACK in Mbps',color=\"red\")\n# giving a title to my graph\nplt.title('Graph of Average Bandwidth spent vs RTS Threshold',color=\"magenta\")\n# function to show the plot\nplt.show()\n\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
VMReyes/isthisdaniel | [
"9bb1bd24652c5afd585d1437dcd1ef5c532f10de"
] | [
"src/sample/cv/im_capture.py"
] | [
"import cv2\nimport numpy as np\nfrom PIL import ImageGrab\n\n# set some global variables\nis_clicked = False\ncoors = list()\nloop = True\nfilename = 'tmp'\n\ndef run(filepath='tmp'):\n global img, loop, filename\n\n # set the file path if it was passed\n filename = filepath\n\n try:\n # create a window to hold the feed, and set up the mouse callback\n cv2.namedWindow('Feed', cv2.WINDOW_NORMAL)\n cv2.setMouseCallback('Feed', click_and_crop)\n\n # infinitely monitor the screen (albeit somewhat slowly)\n while loop:\n # grabs a screenshot of the entire screen\n raw_grab = ImageGrab.grab()\n\n # converts that screenshot to a NumPy array (and array of numbers)\n img = np.array(raw_grab)\n\n # show the image on the display window we created earlier\n cv2.imshow('Feed', img)\n\n # if the user quits as with pressing 'q'\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n\n # close any open windows\n cv2.destroyAllWindows()\n\n # some basic error handling\n except KeyboardInterrupt:\n cv2.destroyAllWindows()\n\n except Exception as err:\n cv2.destroyAllWindows()\n print(err)\n exit(1)\n\n# a mouse callback that allows our mouse events to be registered and perform some actions\ndef click_and_crop(event, x, y, flags, param):\n global coors, is_clicked, loop\n\n # if the button is pressed\n if event == cv2.EVENT_LBUTTONDOWN and not is_clicked:\n coors.append((x, y))\n is_clicked = True\n\n # if the button is released\n elif event == cv2.EVENT_LBUTTONUP and is_clicked:\n coors.append((x, y))\n is_clicked = False\n\n # if we have two coordinates\n if len(coors) == 2 and not is_clicked:\n # take the selection and write it to file\n cv2.imwrite(filename + '.png', img[coors[0][1]:coors[1][1], coors[0][0]:coors[1][0]])\n\n # stop looping\n loop = False\n"
] | [
[
"numpy.array"
]
] |
ioyy900205/MSDNet-PyTorch | [
"1df47bb193f9392a54fec42d2591a337ca997619"
] | [
"adaptive_inference.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport os\nimport math\n\ndef dynamic_evaluate(model, test_loader, val_loader, args):\n tester = Tester(model, args)\n\n #######################################################################################\n ## 注释:\n ## 每个分类器对于每一个样本的一个预测结果置信度\n #######################################################################################\n if os.path.exists(os.path.join(args.save, 'logits_single.pth')): \n val_pred, val_target, test_pred, test_target = \\\n torch.load(os.path.join(args.save, 'logits_single.pth')) \n else: \n val_pred, val_target = tester.calc_logit(val_loader) #val_pred size(5,50000,1000) val_target size(50000)\n test_pred, test_target = tester.calc_logit(test_loader) #test_pred size(5,50000,1000) test_target size(50000)\n torch.save((val_pred, val_target, test_pred, test_target), \n os.path.join(args.save, 'logits_single.pth'))\n\n flops = torch.load(os.path.join(args.save, 'flops.pth'))\n\n with open(os.path.join(args.save, 'dynamic.txt'), 'w') as fout:\n for p in range(1, 40):\n print(\"*********************\")\n _p = torch.FloatTensor(1).fill_(p * 1.0 / 20)\n probs = torch.exp(torch.log(_p) * torch.range(1, args.nBlocks))\n probs /= probs.sum()\n acc_val, _, T = tester.dynamic_eval_find_threshold(\n val_pred, val_target, probs, flops)\n acc_test, exp_flops = tester.dynamic_eval_with_threshold(\n test_pred, test_target, flops, T)\n print('valid acc: {:.3f}, test acc: {:.3f}, test flops: {:.2f}M'.format(acc_val, acc_test, exp_flops / 1e6))\n fout.write('{}\\t{}\\n'.format(acc_test, exp_flops.item()))\n\n\n\nclass Tester(object):\n def __init__(self, model, args=None):\n self.args = args\n self.model = model\n self.softmax = nn.Softmax(dim=1).cuda()\n\n def calc_logit(self, dataloader):\n self.model.eval()\n n_stage = self.args.nBlocks\n logits = [[] for _ in range(n_stage)]\n targets = []\n for i, (input, target) in enumerate(dataloader):\n targets.append(target)\n with torch.no_grad():\n input_var = torch.autograd.Variable(input)\n output = self.model(input_var)\n if not isinstance(output, list):\n output = [output]\n for b in range(n_stage):\n _t = self.softmax(output[b])\n\n logits[b].append(_t) \n\n if i % self.args.print_freq == 0: \n print('Generate Logit: [{0}/{1}]'.format(i, len(dataloader)))\n\n for b in range(n_stage):\n logits[b] = torch.cat(logits[b], dim=0)\n\n size = (n_stage, logits[0].size(0), logits[0].size(1))\n ts_logits = torch.Tensor().resize_(size).zero_()\n for b in range(n_stage):\n ts_logits[b].copy_(logits[b])\n\n targets = torch.cat(targets, dim=0)\n ts_targets = torch.Tensor().resize_(size[1]).copy_(targets)\n\n return ts_logits, ts_targets\n\n def dynamic_eval_find_threshold(self, logits, targets, p, flops):\n #(val_pred, val_target, probs, flops)\n \"\"\"\n logits: m * n * c\n m: Stages\n n: Samples\n c: Classes\n \"\"\"\n n_stage, n_sample, c = logits.size()\n\n max_preds, argmax_preds = logits.max(dim=2, keepdim=False) #[5,50000]\n\n _, sorted_idx = max_preds.sort(dim=1, descending=True)\n\n filtered = torch.zeros(n_sample)\n T = torch.Tensor(n_stage).fill_(1e8)\n\n for k in range(n_stage - 1):\n acc, count = 0.0, 0\n out_n = math.floor(n_sample * p[k])\n for i in range(n_sample):\n ori_idx = sorted_idx[k][i]\n if filtered[ori_idx] == 0:\n count += 1\n if count == out_n:\n T[k] = max_preds[k][ori_idx]\n break\n filtered.add_(max_preds[k].ge(T[k]).type_as(filtered))\n\n T[n_stage -1] = -1e8 # accept all of the samples at the last stage\n\n acc_rec, exp = torch.zeros(n_stage), torch.zeros(n_stage)\n acc, expected_flops = 0, 0\n for i in range(n_sample):\n gold_label = targets[i]\n for k in range(n_stage):\n if max_preds[k][i].item() >= T[k]: # force the sample to exit at k\n if int(gold_label.item()) == int(argmax_preds[k][i].item()):\n acc += 1\n acc_rec[k] += 1\n exp[k] += 1\n break\n acc_all = 0\n for k in range(n_stage):\n _t = 1.0 * exp[k] / n_sample\n expected_flops += _t * flops[k]\n acc_all += acc_rec[k]\n\n return acc * 100.0 / n_sample, expected_flops, T\n\n def dynamic_eval_with_threshold(self, logits, targets, flops, T): #(test_pred, test_target, flops, T)\n n_stage, n_sample, _ = logits.size()\n max_preds, argmax_preds = logits.max(dim=2, keepdim=False) # take the max logits as confidence\n\n acc_rec, exp = torch.zeros(n_stage), torch.zeros(n_stage)\n acc, expected_flops = 0, 0\n for i in range(n_sample):\n gold_label = targets[i]\n for k in range(n_stage):\n if max_preds[k][i].item() >= T[k]: # force to exit at k\n _g = int(gold_label.item())\n _pred = int(argmax_preds[k][i].item())\n if _g == _pred:\n acc += 1\n acc_rec[k] += 1\n exp[k] += 1\n break\n acc_all, sample_all = 0, 0\n for k in range(n_stage):\n _t = exp[k] * 1.0 / n_sample\n sample_all += exp[k]\n expected_flops += _t * flops[k]\n acc_all += acc_rec[k]\n\n return acc * 100.0 / n_sample, expected_flops\n"
] | [
[
"torch.nn.Softmax",
"torch.range",
"torch.Tensor",
"torch.zeros",
"torch.cat",
"torch.no_grad",
"torch.FloatTensor",
"torch.log",
"torch.autograd.Variable"
]
] |
wc253/HaltingNetwork | [
"b4890f53fa87de8163a541e693a3161dcb4cad41"
] | [
"original_unfolding_networks/tools/problems.py"
] | [
"#!/usr/bin/python\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport numpy.linalg as la\nimport math\nimport tensorflow as tf\nimport os\nfrom collections import OrderedDict\nclass Generator(object):\n def __init__(self,A,L,**kwargs):\n self.A = A\n self.L=L\n M,N = A.shape\n vars(self).update(kwargs)\n self.x_ = tf.placeholder( tf.float32,(N,L),name='x' )\n self.y_ = tf.placeholder( tf.float32,(M,L),name='y' )\n\nclass TFGenerator(Generator):\n def __init__(self,**kwargs):\n Generator.__init__(self,**kwargs)\n def __call__(self,sess):\n 'generates y,x pair for training'\n return sess.run( ( self.ygen_,self.xgen_ ) )\n def get_batch(self):\n dataset_info_path='train_info.txt'#train_info.txt\n with open(dataset_info_path,'r') as dataset_info:\n input_info=OrderedDict()\n for line in dataset_info.readlines():\n items=line.split(',')\n try:\n input_info[items[0]]=[int(dim) for dim in items[1:]]\n except:\n input_info[items[0]]=[]\n def _parse_tf_example(example_proto):\n features=dict([(key,tf.FixedLenFeature([],tf.string)) for key,_ in input_info.items()])\n parsed_features=tf.parse_single_example(example_proto,features=features)\n return [tf.reshape(tf.decode_raw(parsed_features[key],tf.float32),value) for key,value in input_info.items()]\n\n dataset_path='train.tfrecords'\n dataset=tf.data.TFRecordDataset(dataset_path)#[dataset_path1,dataset_path2]\n dataset=dataset.map(_parse_tf_example)\n dataset=dataset.repeat()\n dataset=dataset.batch(1)\n iterator=dataset.make_initializable_iterator()\n data_batch=iterator.get_next()\n keys=list(input_info.keys())\n data_batch=dict([(keys[i],data_batch[i]) for i in range(len(keys))])\n return data_batch,iterator.initializer\n\nclass NumpyGenerator(Generator):\n def __init__(self,**kwargs):\n Generator.__init__(self,**kwargs)\n\n def __call__(self,sess):\n 'generates y,x pair for training'\n return self.p.genYX(self.nbatches,self.nsubprocs)\n \n def get_batch(self):\n dataset_info_path='train_info.txt'#train_info.txt\n with open(dataset_info_path,'r') as dataset_info:\n input_info=OrderedDict()\n for line in dataset_info.readlines():\n items=line.split(',')\n try:\n input_info[items[0]]=[int(dim) for dim in items[1:]]\n except:\n input_info[items[0]]=[]\n def _parse_tf_example(example_proto):\n features=dict([(key,tf.FixedLenFeature([],tf.string)) for key,_ in input_info.items()])\n parsed_features=tf.parse_single_example(example_proto,features=features)\n return [tf.reshape(tf.decode_raw(parsed_features[key],tf.float32),value) for key,value in input_info.items()]\n\n dataset_path='train.tfrecords'\n dataset=tf.data.TFRecordDataset(dataset_path)#[dataset_path1,dataset_path2]\n dataset=dataset.map(_parse_tf_example)\n dataset=dataset.repeat()\n dataset=dataset.batch(1)\n iterator=dataset.make_initializable_iterator()\n data_batch=iterator.get_next()\n keys=list(input_info.keys())\n data_batch=dict([(keys[i],data_batch[i]) for i in range(len(keys))])\n return data_batch,iterator.initializer\n\n\ndef generate(N,L,pnz):\n return ((np.random.uniform(0, 1, (N, L)) < pnz) * np.random.normal(0, 1, (N, L))).astype(np.float32)\n\ndef generate_k(N,L,k):\n bernoulli_ = np.zeros([N, L])\n for i in range(L):\n d1=np.zeros(N-k)\n d2=np.ones(k)\n d=np.concatenate([d1,d2])\n np.random.shuffle(d)\n bernoulli_[:,i]=d\n return (bernoulli_*np.random.normal(0, 1, (N, L))).astype(np.float32)\n\ndef generate_uni(N,L):\n bernoulli_ = np.zeros([N, L])\n for i in range(L):\n k = np.random.randint(1, 100, 1)[0]\n d1=np.zeros(N-k)\n d2=np.ones(k)\n d=np.concatenate([d1,d2])\n np.random.shuffle(d)\n bernoulli_[:,i]=d\n return (bernoulli_*np.random.normal(0, 1, (N, L))).astype(np.float32)\n\n\ndef cond(tmp):\n return tf.less(tf.squeeze(tf.abs(tmp)),0.1)\n\ndef body(tmp):\n tmp=tf.random_normal([1])\n return tmp\n\ndef bernoulli_gaussian_trial(A,M=250,N=500,L=1000,is_train=False):\n \n A_ = tf.constant(A,name='A')\n prob = TFGenerator(A=A,L=L,A_=A_)\n prob.name = 'Bernoulli-Gaussian, random A'\n if is_train:\n if not os.path.exists(os.path.join(os.getcwd(), 'train.tfrecords')):\n print('preparing training dataset\\n')\n f1 = open('prepare_data.txt', 'w')\n f1.close()\n def bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n writer=tf.python_io.TFRecordWriter(os.path.join(os.getcwd(),'train.tfrecords'))\n for i in range(30000):\n feature={}\n if i%100==0:\n print(i)\n f1 = open('prepare_data.txt', 'a+')\n f1.write('%d\\n'%(i))\n f1.close()\n np.random.seed()\n prob.xval = generate_uni(N, L).astype(np.float32)\n prob.xval = prob.xval / (np.sqrt(np.sum(np.square(prob.xval), axis=0, keepdims=True)))\n prob.yval = np.matmul(A, prob.xval)\n # tmp1=la.norm(prob.xval,axis=0)\n # tmp2 = la.norm(prob.yval, axis=0)\n feature['y']=bytes_feature(prob.yval.tostring())\n feature['x'] = bytes_feature(prob.xval.tostring())\n\n example=tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(example.SerializeToString())\n writer.close()\n with open(os.path.join(os.getcwd(),'train_info.txt'),'w') as dataset_info:\n dataset_info.write('y'+','+str(M)+','+str(L)+'\\n')\n dataset_info.write('x' + ',' + str(N) + ',' + str(L) + '\\n')\n if not os.path.exists(os.path.join(os.getcwd(), 'xval.npy')):\n print('preparing validating dataset\\n')\n prob.xval = generate_uni(N, L).astype(np.float32)\n prob.xval = prob.xval / (np.sqrt(np.sum(np.square(prob.xval), axis=0, keepdims=True)))\n prob.yval = np.matmul(A, prob.xval)\n np.save('xval.npy', prob.xval)\n np.save('yval.npy', prob.yval)\n\n prob.xval1 = generate_k(N,L,20).astype(np.float32)\n prob.xval1 = prob.xval1 / (np.sqrt(np.sum(np.square(prob.xval1), axis=0, keepdims=True)))\n prob.yval1 = np.matmul(A,prob.xval1)\n prob.xval2 = generate_k(N, L,40).astype(np.float32)\n prob.xval2 = prob.xval2 / (np.sqrt(np.sum(np.square(prob.xval2), axis=0, keepdims=True)))\n prob.yval2 = np.matmul(A, prob.xval2)\n prob.xval3 = generate_k(N, L,60).astype(np.float32)\n prob.xval3 = prob.xval3 / (np.sqrt(np.sum(np.square(prob.xval3), axis=0, keepdims=True)))\n prob.yval3 = np.matmul(A, prob.xval3)\n prob.xval4 = generate_k(N, L,80).astype(np.float32)\n prob.xval4 = prob.xval4 / (np.sqrt(np.sum(np.square(prob.xval4), axis=0, keepdims=True)))\n prob.yval4 = np.matmul(A, prob.xval4)\n\n else:\n if not os.path.exists(os.path.join(os.getcwd(), 'xtest_uni.npy')):\n print('preparing testing dataset\\n')\n prob.xval = generate_uni(N, 10000).astype(np.float32)\n prob.xval=prob.xval/(np.sqrt (np.sum (np.square (prob.xval), axis=0, keepdims=True)))\n prob.yval = np.matmul(A, prob.xval)\n np.save('xtest_uni.npy', prob.xval)\n np.save('ytest_uni.npy', prob.yval)\n\n\n prob.xinit = generate(N,L,0.1).astype(np.float32)\n prob.xinit = prob.xinit / (np.sqrt(np.sum(np.square(prob.xinit), axis=0, keepdims=True)))\n prob.yinit = np.matmul(A,prob.xinit)\n prob.xval = generate_uni(N, L).astype(np.float32)\n prob.xval = prob.xval / (np.sqrt(np.sum(np.square(prob.xval), axis=0, keepdims=True)))\n prob.yval = np.matmul(A, prob.xval)\n\n return prob\n\n\n"
] | [
[
"tensorflow.FixedLenFeature",
"numpy.concatenate",
"numpy.random.randint",
"numpy.square",
"tensorflow.data.TFRecordDataset",
"tensorflow.decode_raw",
"numpy.matmul",
"numpy.save",
"tensorflow.parse_single_example",
"numpy.zeros",
"tensorflow.placeholder",
"tensorflow.train.BytesList",
"tensorflow.train.Features",
"tensorflow.constant",
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.ones",
"numpy.random.normal",
"numpy.random.uniform",
"tensorflow.abs",
"tensorflow.random_normal"
]
] |
baoy-nlp/CNAT | [
"804dfbf230568262b62a006c3b0167cd6fa7cd43"
] | [
"latent_nat/vnat.py"
] | [
"import copy\n\nimport torch\nimport torch.nn as nn\nfrom fairseq.models import register_model, register_model_architecture\n\nfrom .glat import GlancingTransformer, GlancingTransformerDecoder, init_bert_params\nfrom .utils import GateNet, SelfATTEncoder, GaussianVariable, GlobalNames\n\n# to support different version of fairseq\ntry:\n from fairseq.models.transformer import EncoderOut\nexcept ImportError:\n from fairseq.models.fairseq_encoder import EncoderOut\n\n\n@register_model(\"vnat\")\nclass VariationalNAT(GlancingTransformer):\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens):\n decoder = VNATDecoder(args, tgt_dict, embed_tokens)\n if getattr(args, \"apply_bert_init\", False):\n decoder.apply(init_bert_params)\n return decoder\n\n @staticmethod\n def add_args(parser):\n GlancingTransformer.add_args(parser)\n VNATDecoder.add_args(parser)\n\n def forward(self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs):\n encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)\n word_ins_out, inner_states = self.decoder(\n normalize=False,\n prev_output_tokens=prev_output_tokens,\n encoder_out=encoder_out,\n tgt_tokens=tgt_tokens,\n extra_ret=True,\n )\n losses = self._compute_loss(word_ins_out, tgt_tokens, encoder_out, inner_states)\n\n if inner_states is not None:\n latent_factor = getattr(self.args, \"latent_factor\", 1.0)\n losses[\"KL\"] = {\n \"loss\": self._compute_latent_loss(\n inner_states[GlobalNames.PRI_RET],\n inner_states[GlobalNames.POST_RET]\n ) * latent_factor,\n \"factor\": latent_factor\n }\n\n return losses\n\n @classmethod\n def _compute_latent_loss(cls, prior_out, posterior_out):\n # prior\n mean1 = prior_out[GlobalNames.MEAN]\n logv1 = prior_out[GlobalNames.LOGV]\n var1 = logv1.exp()\n\n mean2 = posterior_out[GlobalNames.MEAN]\n logv2 = posterior_out[GlobalNames.LOGV]\n var2 = logv2.exp()\n\n kl = 0.5 * (logv2 - logv1 + (var1 / var2) + (mean2 - mean1).pow(2) / var2 - 1).sum(dim=-1).mean()\n return kl\n\n\nclass VNATDecoder(GlancingTransformerDecoder):\n \"\"\"\n Extra attributions:\n - posterior\n - prior\n \"\"\"\n\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):\n super().__init__(args, dictionary, embed_tokens, no_encoder_attn)\n\n self.posterior = self._build_posterior()\n self.prior = self._build_prior()\n self.gate = GateNet(\n d_model=self.embed_dim * 2,\n d_hidden=self.embed_dim * 4,\n d_output=1 if getattr(args, \"use_scalar_gate\", True) else self.embed_dim,\n dropout=args.dropout\n ) if args.combine_func == \"residual\" else None\n\n @staticmethod\n def add_args(parser, key=None):\n parser.add_argument(\"--latent-factor\", type=float, default=1.0)\n parser.add_argument(\"--latent-dim\", type=int, default=200)\n parser.add_argument(\"--latent-layers\", type=int, default=5)\n parser.add_argument(\"--combine-func\", type=str, default=\"residual\")\n parser.add_argument(\"--use-scalar-gate\", action=\"store_true\", default=False)\n\n def decode(\n self,\n x,\n decoder_padding_mask,\n pos=None,\n encoder_out=None,\n early_exit=None,\n tgt_tokens=None,\n **unused,\n ):\n\n z, z_ret = self.forward_latent(encoder_out, tgt_tokens, inputs=x, decoder_padding_mask=decoder_padding_mask)\n\n # integrating the latent variable information\n feats = self.forward_combine(x, z)\n\n if tgt_tokens is not None and self.glat_training and self.training:\n # Glancing Training\n decoder_outputs, ret = self._forward_decoding(feats, decoder_padding_mask, encoder_out, early_exit)\n\n # glancing for second pass\n glancing_inputs, predict, glancing_mask = self.glancing(\n features=decoder_outputs, targets=tgt_tokens, mask=decoder_padding_mask, ratio=self.sampling_ratio,\n inputs=x\n )\n\n # integrating the latent variable information\n feats = self.forward_combine(glancing_inputs, z)\n\n # second decoding pass\n features, ret = self._forward_decoding(feats, decoder_padding_mask, encoder_out, early_exit)\n\n ret[GlobalNames.FEATURES] = decoder_outputs\n ret[GlobalNames.PREDICTS] = predict\n ret[GlobalNames.GLANCING_INPUTS] = glancing_inputs\n ret[GlobalNames.GLANCING_MASK] = glancing_mask\n else:\n\n features, ret = self._forward_decoding(feats, decoder_padding_mask, encoder_out, early_exit)\n ret[GlobalNames.FEATURES] = features\n\n ret[GlobalNames.INPUT] = x\n ret.update(z_ret)\n return features, ret\n\n def forward_latent(self, encoder_out: EncoderOut, tgt_tokens=None, inputs=None, **unused):\n prior_out = self.prior(inputs=encoder_out.encoder_out, mask=~encoder_out.encoder_padding_mask)\n inner_states = {GlobalNames.PRI_RET: prior_out}\n z = prior_out[GlobalNames.REC] # batch_size, hidden\n\n if tgt_tokens is not None:\n y_mask = tgt_tokens.ne(self.padding_idx)\n y_embed = self.forward_embedding(tgt_tokens)[0]\n posterior_out = self.posterior(\n x_embed=encoder_out.encoder_out,\n y_embed=y_embed,\n x_mask=~encoder_out.encoder_padding_mask,\n y_mask=y_mask\n )\n inner_states[GlobalNames.POST_RET] = posterior_out\n\n z = posterior_out[GlobalNames.REC]\n z = z.unsqueeze(1).contiguous().expand(-1, inputs.size(1), -1)\n return z, inner_states\n\n def forward_combine(self, inputs, z):\n if self.gate is not None:\n g = self.gate(torch.cat([inputs, z], dim=-1)).sigmoid()\n inputs = inputs * g + z * (1 - g)\n else:\n inputs = inputs + z\n return inputs\n\n def _build_posterior(self):\n model_args = self.args\n args = copy.deepcopy(model_args)\n args.encoder_layers = getattr(model_args, \"latent_layers\", model_args.decoder_layers)\n return VAEPosterior(args)\n\n def _build_prior(self):\n model_args = self.args\n args = copy.deepcopy(model_args)\n return VAEPrior(args)\n\n\nclass VAEPrior(nn.Module):\n \"\"\"\n p(z|x): mapping enc(x) to mean and logv\n \"\"\"\n\n def __init__(self, args):\n super().__init__()\n self.latent = GaussianVariable(\n input_dim=args.encoder_embed_dim,\n latent_dim=getattr(args, \"latent_dim\", 200),\n output_dim=args.encoder_embed_dim\n )\n\n def forward(self, inputs, mask=None):\n inputs = inputs.transpose(0, 1)\n if mask is not None:\n h_f = (inputs * mask.unsqueeze(-1).float()).sum(dim=1) / mask.sum(dim=-1).float().unsqueeze(-1)\n else:\n h_f = inputs.mean(dim=1)\n\n return self.latent(inputs=h_f)\n\n\nclass VAEPosterior(nn.Module):\n \"\"\"\n q(z|x,y): enc(y) and enc(x), mapping enc(x,y) to mean and logv\n \"\"\"\n\n def __init__(self, args):\n super().__init__()\n\n self.y_encoder = SelfATTEncoder(args)\n\n self.latent = GaussianVariable(\n input_dim=args.encoder_embed_dim * 2,\n latent_dim=getattr(args, \"latent_dim\", 200),\n output_dim=args.encoder_embed_dim\n )\n\n def forward(self, x_embed, y_embed, x_mask=None, y_mask=None):\n def _compute_inputs(inputs, mask=None):\n if mask is not None:\n _h = (inputs * mask.unsqueeze(-1).float()).sum(dim=1) / mask.sum(dim=-1).float().unsqueeze(-1)\n else:\n _h = inputs.mean(dim=1)\n return _h\n\n x_output = x_embed.transpose(0, 1)\n h_f = _compute_inputs(x_output, x_mask)\n\n # encoding y\n y_output = self.y_encoder.forward(y_embed, ~y_mask).encoder_out\n y_output = y_output.transpose(0, 1)\n h_e = _compute_inputs(y_output, y_mask)\n\n # concatenate x and y\n h = torch.cat([h_f, h_e], dim=-1)\n return self.latent(inputs=h)\n\n\ndef base_architecture(args):\n from nat_base.vanilla_nat import base_architecture\n base_architecture(args)\n\n\n@register_model_architecture(\"vnat\", \"vnat_wmt14\")\ndef vnat_wmt14(args):\n from latent_nat.glat import glat_wmt14\n glat_wmt14(args)\n base_architecture(args)\n\n\n@register_model_architecture('vnat', 'vnat_iwslt16')\ndef vnat_iwslt16(args):\n from latent_nat.glat import glat_iwslt16\n glat_iwslt16(args)\n base_architecture(args)\n\n\n@register_model_architecture('vnat', 'vnat_iwslt14')\ndef vnat_iwslt14(args):\n from latent_nat.glat import glat_iwslt14\n glat_iwslt14(args)\n base_architecture(args)\n\n\n@register_model_architecture('vnat', 'vnat_base')\ndef vnat_base(args):\n from latent_nat.glat import glat_base\n glat_base(args)\n base_architecture(args)\n"
] | [
[
"torch.cat"
]
] |
dearwind153/pytorch-minst | [
"47e9dcc22f611f045027fda782fb55e2e4229500"
] | [
"pytorch_gpu_speed_test.py"
] | [
"import torch\nimport time\n\nprint(torch.__version__) # 返回pytorch的版本\nprint(torch.cuda.is_available()) # 当CUDA可用时返回True\n\na = torch.randn(10000, 1000) # 返回10000行1000列的张量矩阵\nb = torch.randn(1000, 2000) # 返回1000行2000列的张量矩阵\n\nt0 = time.time() # 记录时间\nc = torch.matmul(a, b) # 矩阵乘法运算\nt1 = time.time() # 记录时间\nprint(a.device, t1 - t0, c.norm(2)) # c.norm(2)表示矩阵c的二范数\n\ndevice = torch.device('cuda') # 用GPU来运行\na = a.to(device)\nb = b.to(device)\n\n# 初次调用GPU,需要数据传送,因此比较慢\nt0 = time.time()\nc = torch.matmul(a, b)\nt2 = time.time()\nprint(a.device, t2 - t0, c.norm(2))\n\n# 这才是GPU处理数据的真实运行时间,当数据量越大,GPU的优势越明显\nt0 = time.time()\nc = torch.matmul(a, b)\nt2 = time.time()\nprint(a.device, t2 - t0, c.norm(2))\n"
] | [
[
"torch.device",
"torch.randn",
"torch.matmul",
"torch.cuda.is_available"
]
] |
lelange/cu-ssp | [
"9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f"
] | [
"model_3/gpu.py"
] | [
"import tensorflow as tf \n# Creates a graph.\nwith tf.device('/gpu:0'):\n a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\nc = tf.matmul(a, b)\n# Creates a session with log_device_placement set to True.\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n# Runs the op.\nprint(sess.run(c))\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.matmul",
"tensorflow.constant",
"tensorflow.device"
]
] |
dwraft/gameRL | [
"518b2a2d193220f7334584ccde5cf2ab318d718a"
] | [
"gameRL/game_simulators/blackjack_count.py"
] | [
"# Created by Patrick Kao\nimport math\nfrom typing import Tuple\n\nimport numpy as np\nfrom gym import spaces\n\nfrom gameRL.game_simulators.blackjack import (\n BlackjackDeck,\n CARD_VALUES,\n SUITS,\n BlackjackHand,\n DEALER_MAX,\n BlackjackCustomEnv,\n)\n\n\nclass BlackjackDeckwithCount(BlackjackDeck):\n def __init__(self, N_decks: int, with_replacement=False, rho=1):\n BlackjackDeck.__init__(self, N_decks, with_replacement)\n self.count = 0\n self.rho = rho\n self.reshuffle_point = math.floor(\n len(CARD_VALUES) * SUITS * self.N_decks * (1 - self.rho)\n )\n self.cards_used = 0\n self.reshuffled = False\n\n def draw_card(self) -> Tuple[int, bool]:\n \"\"\"Draws and returns card from the deck\"\"\"\n if self.reshuffled:\n return None, self.reshuffled\n if len(self.deck) - 1 <= self.reshuffle_point:\n self.reshuffled = True\n self.cards_used += 1\n index = np.random.randint(len(self.deck))\n self.count += self.update_count(self.deck[index])\n if self.with_replacement:\n return self.deck[index], self.reshuffled\n return self.deck.pop(index), self.reshuffled\n\n def update_count(self, card, system=\"Hi-Lo\") -> int:\n \"\"\"\n Computes various card-counting systems including: Hi-Lo\n \"\"\"\n if system == \"Hi-Lo\":\n if card in [2, 3, 4, 5, 6]:\n return 1\n elif card in [7, 8, 9]:\n return 0\n else:\n return -1\n\n def get_running_count(self) -> int:\n return self.count\n\n def _get_num_decks(self) -> int:\n cards_per_deck = len(CARD_VALUES) * SUITS\n num_decks = math.ceil(len(self.deck) / cards_per_deck)\n return num_decks\n\n def _get_cards_used(self) -> int:\n return self.cards_used\n\n def _get_full_deck_size(self) -> int:\n return len(CARD_VALUES) * SUITS * self.N_decks\n\n\nclass BlackjackHandwithReshuffle(BlackjackHand):\n def __init__(self, blackjack_deck: BlackjackDeckwithCount, max_hand_sum: int = 21):\n BlackjackHand.__init__(self, blackjack_deck, max_hand_sum)\n self.reshuffled = False\n\n def draw_card(self):\n card, reshuffled = self.blackjack_deck.draw_card()\n if not reshuffled:\n self.hand.append(card)\n else:\n self.reshuffled = True\n\n\nclass BlackjackEnvwithRunningCount(BlackjackCustomEnv):\n def __init__(\n self,\n N_decks: int,\n natural_bonus: bool = True,\n rho=1,\n max_hand_sum: int = 21,\n allow_observe: bool = True,\n ):\n BlackjackCustomEnv.__init__(\n self, N_decks, natural_bonus, max_hand_sum=max_hand_sum\n )\n # actions: either \"hit\" (keep playing), \"stand\" (stop where you are), observe or join\n self.action_space = spaces.Discrete(5) if allow_observe else spaces.Discrete(3)\n # count observation depends on the card-counting system and number of decks\n # use the following defaults\n # Hi-Lo: [-20 * N_decks, 20 * N_decks], (2*20 + 1) * N_decks\n count_space = (2 * 20 + 1) * N_decks\n self.observation_space = spaces.MultiDiscrete(\n [33, 11, 2, count_space, 2]\n ) # last for observing or not\n\n self.rho = rho\n self._allow_observe = allow_observe\n\n # for game objects don't assign value until reset\n self.observing = None\n self.dealer = None\n self.dummy = None\n self.blackjack_deck = None\n self.reshuffled = None\n\n self.reset()\n\n def _calculate_player_reward(self) -> int:\n \"\"\"\n Computes the player's reward in the case that neither busts\n -1 for dealer > player, 0 for tie, 1 for player > dealer\n \"\"\"\n if self.observing:\n return 0\n player_sum = self.player.score()\n dealer_sum = self.dealer.score()\n return (player_sum > dealer_sum) - (dealer_sum > player_sum)\n\n def _hit(self) -> Tuple[bool, int]:\n \"\"\"Handles case where the player chooses to hit\"\"\"\n hand_done = False\n if self.observing: # return early if player is observing\n hand_done = True\n return hand_done, 0\n\n self.player.draw_card()\n if self.player.reshuffled: # Deck ran out of cards\n self.reshuffled = True\n hand_done = True\n reward = 0\n elif self.player.is_bust():\n hand_done = False\n reward = -1\n else:\n hand_done = False\n reward = 0\n return hand_done, reward\n\n def _stick(self) -> Tuple[bool, int]:\n \"\"\"Handles case where the player chooses to stick\"\"\"\n hand_done = True\n if self.observing: # return early if player is observing\n return hand_done, 0\n\n while self.dealer.sum_hand() < DEALER_MAX:\n self.dealer.draw_card()\n if self.dealer.reshuffled: # Return early if run out of cards\n self.reshuffled = True\n return hand_done, 0\n\n reward = self._calculate_player_reward()\n if self.natural_bonus and self.player.is_natural() and reward == 1:\n reward = 1.5\n\n return hand_done, reward\n\n def _dummy_stick(self) -> Tuple[bool, int]:\n hand_done = True\n while self.dealer.sum_hand() < DEALER_MAX:\n self.dealer.draw_card()\n if self.dealer.reshuffled: # Return early if run out of cards\n self.reshuffled = True\n return hand_done, 0\n\n reward = 0 # If the player is not in the game, they should receive no reward\n if (\n self.player\n ): # if player is in a hand switches to observing, assume he sticks\n reward = self._calculate_player_reward()\n if self.natural_bonus and self.player.is_natural() and reward == 1:\n reward = 1.5\n\n return hand_done, reward\n\n def _double_down(self):\n \"\"\"\n Handles case where the player chooses to double down\n If the double down is illegal, just ignore it\n \"\"\"\n # it is illegal to double down if you do not have a 9, 10\n hand_done = True\n if self.observing: # return early if player is observing\n return hand_done, 0\n\n # otherwise behavior same as super\n return super()._double_down()\n\n def step(self, action) -> Tuple[Tuple, int, bool, dict]:\n \"\"\"Action must be in the set {0,1,2,3}\"\"\"\n assert self.action_space.contains(action)\n # player hits\n game_done = False\n if action == 1:\n hand_done, reward = self._hit()\n elif action == 0: # player sticks\n hand_done, reward = self._stick()\n elif self._allow_observe and action == 2: # player joins\n self.observing = False\n hand_done, reward = self._dummy_stick()\n elif self._allow_observe and action == 3: # player observes:\n self.observing = True\n hand_done, reward = self._dummy_stick()\n else: # player doubles down\n hand_done, reward = self._double_down()\n\n if hand_done: # draw new cards\n self.redeal()\n\n if self.dealer.reshuffled or self.dummy.reshuffled:\n self.reshuffled = True\n\n game_done = game_done or self.reshuffled\n\n return self._get_obs(), reward, game_done, {}\n\n def _get_obs(self) -> Tuple[int, int, bool, int, bool]:\n \"\"\"\n Gets player's current obs\n :return: Returns sum of own hand, dealer card, usable ace, card counting obs, observing flag\n \"\"\"\n if self.reshuffled:\n return (\n 0,\n 1,\n False,\n self.blackjack_deck.get_running_count(),\n self.observing,\n )\n if self.observing:\n return (\n 0,\n self.dealer.hand[0],\n False,\n self.blackjack_deck.get_running_count(),\n self.observing,\n )\n else:\n return (\n self.player.sum_hand(),\n self.dealer.hand[0],\n self.player.has_usable_ace(),\n self.blackjack_deck.get_running_count(),\n self.observing,\n )\n\n def reset(self) -> Tuple[int, int, bool]:\n if not hasattr(self, \"blackjack_deck\"):\n return None\n\n self.observing = self._allow_observe\n self.blackjack_deck = BlackjackDeckwithCount(self.N_decks, rho=self.rho)\n self.dealer = BlackjackHandwithReshuffle(self.blackjack_deck)\n self.dummy = BlackjackHandwithReshuffle(self.blackjack_deck)\n self.reshuffled = False\n if not self.observing:\n self.player = BlackjackHandwithReshuffle(self.blackjack_deck)\n else:\n self.player = None\n return self._get_obs()\n\n def redeal(self) -> Tuple[int, int, bool]:\n self.dealer._initial_draw()\n self.dummy._initial_draw()\n self.reshuffled = (\n self.reshuffled or self.dealer.reshuffled or self.dummy.reshuffled\n )\n if not self.observing:\n if not self.player:\n self.player = BlackjackHandwithReshuffle(self.blackjack_deck)\n self.player._initial_draw()\n self.reshuffled = self.reshuffled or self.player.reshuffled\n else:\n self.player = None\n\n return self._get_obs()\n\n\nclass BlackjackEnvwithTrueCount(BlackjackEnvwithRunningCount):\n def __init__(self, N_decks: int, natural_bonus: bool = True, rho=1):\n BlackjackEnvwithRunningCount.__init__(self, N_decks, natural_bonus, rho=rho)\n # self.action_space = spaces.Discrete(4)\n true_min, true_max = -20, 20\n self.observation_space = spaces.Tuple(\n (\n spaces.Discrete(33), # 32 + 1 for observing hand sum of 0\n spaces.Discrete(11),\n spaces.Discrete(2),\n spaces.Box(\n np.array([true_min], dtype=np.float32),\n np.array([true_max], dtype=np.float32),\n ),\n spaces.Discrete(2), # observing or not\n )\n )\n # self.blackjack_deck: BlackjackDeck = BlackjackDeckwithCount(\n # self.N_decks, rho=rho\n # )\n # self.observing = True\n # self.reshuffled = False\n\n # self.reset()\n\n def _get_obs(self) -> Tuple[int, int, bool]:\n if self.reshuffled:\n return (\n 0,\n 1,\n False,\n self.blackjack_deck.get_true_count(),\n self.observing,\n )\n if self.observing:\n return (\n 0,\n self.dealer.hand[0],\n False,\n self.blackjack_deck.get_true_count(),\n self.observing,\n )\n else:\n return (\n self.player.sum_hand(),\n self.dealer.hand[0],\n self.player.has_usable_ace(),\n self.blackjack_deck.get_true_count(),\n self.observing,\n )\n"
] | [
[
"numpy.array"
]
] |
Bhaskers-Blu-Org2/belugasounds | [
"1e8ab23e2f035376fdabb17854d1654738964f9d"
] | [
"step6_full_analysis_scoring_for_new_dataset.py"
] | [
"#\n# full_analysis_scoring_for_new_dataset.py\n#\n# Run trained models on a new data set for which spectrograms have already\n# been generated.\n#\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n#\n\n#%% Imports\n\nimport pandas as pd\nimport numpy as np\nimport glob\nimport os\nimport cv2\nfrom keras.models import model_from_json\n\n\n#%% Path configuration\n\ncurrent_dir = \"./Whale_Acoustics/\"\n\nmodel_dir = current_dir + \"Model/\"\ndata_dir = current_dir + \"Data/\"\nspectrogram_dir = data_dir + \"Extracted_Spectrogram_Full_Analysis/\" \noutput_dir = current_dir + \"Output/\"\n\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\n#%% Enumerate spectrograms to score\n \nspectrogram_filenames = glob.glob(spectrogram_dir + '/*.png')\nprint(\"Total number of Spectrograms: \", len(spectrogram_filenames))\n\n\n#%% Load models\n\nwith open(model_dir + 'cnn_architecture_all_data.json', 'r') as f:\n model_cnn = model_from_json(f.read())\nmodel_cnn.load_weights(model_dir + 'cnn_weights_all_data.h5')\n\nwith open(model_dir + 'vgg16_architecture_all_data.json', 'r') as f:\n model_vgg16 = model_from_json(f.read())\nmodel_vgg16.load_weights(model_dir + 'vgg16_weights_all_data.h5')\n\nwith open(model_dir + 'ResNet50_architecture_all_data.json', 'r') as f:\n model_ResNet50 = model_from_json(f.read())\nmodel_ResNet50.load_weights(model_dir + 'ResNet50_weights_all_data.h5')\n\nwith open(model_dir + 'DenseNet121_architecture_all_data.json', 'r') as f:\n model_DenseNet121 = model_from_json(f.read())\nmodel_DenseNet121.load_weights(model_dir + 'DenseNet121_weights_all_data.h5')\n\n\n#%% Run models on spectrograms\n\nncol, nrow = 300, 300\n\nfull_analysis_score = pd.DataFrame()\nfull_analysis_score['spectrogram_filename'] = spectrogram_filenames\nfull_analysis_score['audio_filename'] = ''\nfull_analysis_score['spectrogram_start_second'] = ''\nfull_analysis_score['predicted_probability'] = 0.0\n\nopt_weights = pd.read_excel(output_dir + 'opt_weights.xlsx', header = None)[0].values.tolist()\n\nfor index, row in full_analysis_score.iterrows():\n if (index % 10000 == 0):\n print(index)\n audio_filename, spectrogram_start_second = row['spectrogram_filename'].split('\\\\')[1].split('_')[0:2]\n img = cv2.imread(row['spectrogram_filename'])\n img = cv2.resize(img, (ncol, nrow))\n img_reshaped = []\n img_reshaped.append(img)\n predict_prob_cnn = model_cnn.predict(np.asarray(img_reshaped) / 255.0).tolist()[0][0]\n predict_prob_vgg16 = model_vgg16.predict(np.asarray(img_reshaped) / 255.0).tolist()[0][0]\n predict_prob_ResNet50 = model_ResNet50.predict(np.asarray(img_reshaped) / 255.0).tolist()[0][0]\n predict_prob_DenseNet121 = model_DenseNet121.predict(np.asarray(img_reshaped) / 255.0).tolist()[0][0]\n ## the opmized weight for each model was computed in previous step\n predicted_probability = sum([x*y for x,y in zip([predict_prob_cnn, predict_prob_vgg16, predict_prob_ResNet50, predict_prob_DenseNet121], opt_weights)])\n full_analysis_score.at(index, 'audio_filename', audio_filename)\n full_analysis_score.at(index, 'spectrogram_start_second', spectrogram_start_second)\n full_analysis_score.at(index, 'predicted_probability', predicted_probability)\n\nfull_analysis_score.to_excel(output_dir + 'full_analysis_ouptut_predicted_scores.xlsx', index=False)\n"
] | [
[
"numpy.asarray",
"pandas.read_excel",
"pandas.DataFrame"
]
] |
LoicGrobol/decofre | [
"68e12c8da4a6c032bb5ea3edff9e8484344e94e2"
] | [
"decofre/score.py"
] | [
"#! /usr/bin/env python3\nr\"\"\"Coreference antecedent scoring\n\nUsage:\n score [options] <model> <dataset> [<output>]\n\nOptions:\n --pretty Output pretty JSON\n --device <d> The device to use for computations (defaults to `cuda:0` or `cpu`)\n -h, --help Show this screen.\n\"\"\"\nimport contextlib\nimport json\nimport sys\n\nimport typing as ty\n\nimport docopt\nimport torch\n\nfrom loguru import logger\n\nfrom decofre import datatools\nfrom decofre import utils\n\nfrom decofre.models.defaults import Scorer\nfrom decofre.runners import run_model\nfrom decofre import __version__\n\n\[email protected]\ndef smart_open(filename: str, mode: str = \"r\", *args, **kwargs):\n \"\"\"Open files and i/o streams transparently.\"\"\"\n fh: ty.IO\n if filename == \"-\":\n if \"r\" in mode:\n stream = sys.stdin\n else:\n stream = sys.stdout\n if \"b\" in mode:\n fh = stream.buffer\n else:\n fh = stream\n close = False\n else:\n fh = open(filename, mode, *args, **kwargs)\n close = True\n\n try:\n yield fh\n finally:\n if close:\n try:\n fh.close()\n except AttributeError:\n pass\n\n\ndef main_entry_point(argv=None):\n arguments = docopt.docopt(__doc__, version=__version__, argv=argv)\n logger.add(\n utils.TqdmCompatibleStream(),\n format=(\n \"[decofre] \"\n \"<green>{time:YYYY-MM-DD}T{time:HH:mm:ss}</green> {level}: \"\n \"<level>{message}</level>\"\n ),\n colorize=True,\n )\n if arguments[\"<output>\"] is None:\n arguments[\"<output>\"] = \"-\"\n\n if arguments[\"--device\"] is None:\n if torch.cuda.is_available():\n arguments[\"--device\"] = \"cuda:0\"\n else:\n arguments[\"--device\"] = \"cpu\"\n device = torch.device(arguments[\"--device\"])\n\n cor = Scorer.load(arguments[\"<model>\"])\n cor.model.eval()\n cor.model.to(device)\n\n with open(arguments[\"<dataset>\"]) as in_stream:\n data = json.load(in_stream)\n\n # FIXME: this is barely readable, needs heavy refactoring\n mentions = data[\"mentions\"]\n antecedents = data[\"antecedents\"]\n sorted_mentions_ids = sorted(antecedents.keys())\n\n # FIXME: this might OOM for large files but it really speeds things up\n digitized_mentions = {m_id: cor.digitize_span(m) for m_id, m in mentions.items()}\n\n dataset = (\n (\n digitized_mentions[m_id],\n [\n (digitized_mentions[c_id], cor.get_pair_feats(antecedents[m_id][c_id]))\n for c_id in sorted(antecedents[m_id].keys())\n ],\n )\n for m_id in sorted_mentions_ids\n )\n\n with torch.no_grad():\n sys_out = run_model(\n cor.model,\n dataset,\n prepare_batch=datatools.AntecedentsDataset.prepare_source_batch,\n batch_size=64,\n data_len=(len(sorted_mentions_ids) - 1) // 64 + 1,\n join=\"chain\",\n )\n\n scores = dict(zip(sorted_mentions_ids, sys_out))\n\n out_dict = {\"mentions\": data[\"mentions\"], \"antecedents\": dict()}\n for mention_id, antecedent_scores in scores.items():\n scores = antecedent_scores.tolist()\n out_dict[\"antecedents\"][mention_id] = {\n \"candidates\": {\n c_id: {**antecedents[mention_id][c_id], \"score\": s}\n for c_id, s in zip(sorted(antecedents[mention_id].keys()), scores[1:])\n },\n \"anaphoricity\": scores[0],\n }\n\n with smart_open(arguments[\"<output>\"], \"w\") as out_stream:\n json.dump(\n out_dict,\n out_stream,\n ensure_ascii=False,\n indent=2 if arguments[\"--pretty\"] else None,\n )\n\n\nif __name__ == \"__main__\":\n main_entry_point()\n"
] | [
[
"torch.device",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
Paulschneider007/CortexThicknessAnalysis | [
"b5d0b2391d5db6061ac692001d91506dc456e12f"
] | [
"extract_cortex_thickness_v5.py"
] | [
"#!/opt/local/bin/python\n\n__author__ = \"Andrew G. Clark\"\n__date__ = \"7 May 2014\"\n__maintainer__ = \"Andrew G. Clark\"\n__email__ = \"[email protected]\"\n\n\"\"\" This script analyzes linescans and extracts cortex thickness and density from actin/membrane linescan pairs.\n\nThe script can be run in a 'pair' mode (to analyze a single linescan pair)\nor 'batch' mode (to analyze multiple directories full of linescan pairs).\nThe mode can be specified at the bottom (\"main\" function).\n\nFor batch mode:\n\nYour parent directory should contain a file called 'dir_list.dat'\nwith the following information in row/column form, with only space as delimiters:\n\nsub_dir px_size category ch_actin sigma_actin\nstk_1 0.05 control 1 0.119\nstk_2 0.04 siRNA 2 0.220\n...\n\nThe first row must contain the column headers as shown\nDefinitions of input parameters:\n\nsub_dir: The name of the sub-directory containing the linescan pairs (linescan pairs must end in '...average.dat')\npx_size: The pixel size for the linescans in the given sub_dir\ncategory: The category of the experiment in each sub_dir (can be used for plotting later)\nch_actin: The actin channel (either '1' or '2'; used for extracting cortex thickness/i_c)\nsigma_actin: The sigma of the point spread function for the actin channel (used for extracting h/i_c)\n\nNote: For the sub_dir entries in the dir_list, only those directories NOT appearing in 'completed_list_v4_1.dat' will be analyzed\n\nOutput:\n\nIn each sub-directory, a list called '.../ls_data/ls_fit_data.dat' will be created containing linescan and thickness data\n -The columns are labeled according to channel number (ch1/ch2)\n -delta is always the position of the peak intensity of channel 2 (ch2.x_peak) minus ch1.x_peak\nIn each sub-directory, plots of the linescans and the linescans with fits (if applicable) will be saved in '.../ls_plots/'\nAt the end, a master list of all of the data combined is be created in the parent_directory\n\nFor 'manual' mode:\n\nWhen running the script, windows will pop up sequentially to request the following information:\n\n-Channel 1 average linescan file\n-Channel 2 average linescan file\n-Pixel Size\n-Actin Channel\n-Sigma (Actin)\n\nThese parameters are defined above.\n\n\"\"\"\n\nimport os\nimport math\nfrom copy import deepcopy\nfrom tkinter import *\nfrom tkinter.filedialog import *\nfrom tkinter.simpledialog import *\n\nroot = Tk()\n\nimport scipy\nfrom scipy import optimize, stats\nimport pylab\nimport numpy as np\n\nimport utility_functions as uf\n\ndef gauss_func(p, x):\n \"\"\"Definition of gaussian function used to fit linescan peaks.\n p = [a, sigma, mu, c].\n\n \"\"\"\n a, sigma, mu, c = p #unpacks p (for readability)\n g = a / (sigma * math.sqrt(2 * math.pi)) * scipy.exp(-(x - mu)**2 / (2 * sigma**2)) + c\n return g\n\ndef convolved(p,x):\n \"\"\"Defines convolved linescan. Args: x: float or list/iterable of floats,\n the position for which convolved intensity is calculated; p: list/iterable\n of floats, linecan parameters (p=[i_in, i_c, i_out, h, x_c, sigma]).\n Returns: i: float, intensity at x.\n\n \"\"\"\n i_in, i_c, i_out, h, x_c, sigma = p #unpacks p (for readability)\n\n i = (i_in + (i_c - i_in) * stats.norm.cdf((x - x_c) + h / 2., 0., sigma) +\n (i_out - i_c) * stats.norm.cdf((x - x_c) - h / 2., 0., sigma))\n\n return i\n\ndef unconvolved(p,x):\n \"\"\"Defines unconvolved linescan. Args: x: float or list/iterable of floats,\n the position for which intensity is calculated; p: list/iterable of floats,\n linecan parameters (p=[i_in, i_c, i_out, h, x_c]). Returns: i: float,\n intensity at x.\n\n \"\"\"\n\n i_in, i_c, i_out, h, x_c = p #unpacks p (for readability)\n\n i = np.zeros(len(x))\n\n for j in range(len(x)):\n if x[j] < x_c - h / 2.:\n i[j] = i_in\n if x[j] >= x_c - h / 2. and x[j] < x_c + h / 2.:\n i[j] = i_c\n if x[j] >= x_c + h / 2.:\n i[j] = i_out\n\n return i\n\ndef sort_ls_list(list):\n \"\"\"Sorts list of linescan files by keyword.\n\n Args:\n list (list): the list to be sorted (here, linescan filenames)\n param (str): the keyword to use for sorting (here, usually 'frame')\n\n \"\"\"\n\n def find_key(line):\n key = int(re.search('frame_([0-9]+)_', line).group(1))\n return key\n\n list.sort(key=find_key)\n return list\n\nclass Linescan():\n \"\"\"Linescan object with methods to extract important parameters\n from linescans.\n\n \"\"\"\n\n def __init__(self,x,i):\n \"\"\"Initializes linescan.\n\n Args:\n x (list of numbers): the position values\n i (list of numbers): the intensity values\n\n \"\"\"\n #populate linescan position/intensity\n self.x = np.array(x,dtype='float') #position list as NumPy array of floats\n self.i = np.array(i,dtype='float') #intensity list as NumPy array of floats\n\n #detminere a few easy parameters from position/intensity\n self.H = self.x[-1] - self.x[0]\n self.i_tot = np.trapz(self.i,self.x)\n\n #populate other attributes\n self.dist_to_x_in_out = 1. #specifies how far away x_in is from the peak (in um)\n self.gauss_params = None #parameter list from Gaussian fit to find peak\n self.x_peak = None #linescan peak position\n self.i_peak = None #linescan peak intensity\n self.i_in = None #intracellular intensity\n self.i_out = None #extracellular intensity\n self.max_idx = None #index of point near linescan center with highest intensity\n self.x_fit = None #position list used for peak fitting\n self.i_fit = None #intensity list used for peak fitting\n self.i_in_x_list = None #position list used to determine self.i_in\n self.i_in_i_list = None #intensity list used to determine self.i_in\n self.i_out_x_list = None #position list used to determine self.i_out\n self.i_out_i_list = None #intensity list used to determine self.i_out\n self.x_in_upper_index = None #the index at the upper end of the region where x_in is calculated\n self.x_out_lower_index = None #the index at the lower end of the region where x_out is calculated\n self.fwhm = None #full width at half-max\n\n #initializes linescans and determines linescan parameters\n self.extract_ls_parameters()\n\n def convert_px_to_um(self):\n \"\"\"Multiplies list of coordinates by pixel_size.\"\"\"\n\n self.x = np.array([a * self.px_size for a in self.x])\n\n def extract_ls_parameters(self):\n \"\"\"Extracts intensity and position information from linescan\"\"\"\n\n self.get_peak()\n self.get_i_in_out()\n self.get_fwhm()\n\n def get_peak(self):\n \"\"\"Finds the peak position and intensity of a linescan by fitting\n a Gaussian near the peak.\n\n \"\"\"\n\n #restricts fitting to near the center of the linescan\n self.max_idx = int(np.argmax(self.i[int(len(self.i)/2-6):int(len(self.i)/2+20)]) + len(self.i)/2-6)\n self.x_fit = self.x[int(self.max_idx-2):int(self.max_idx+3)]\n self.i_fit = self.i[int(self.max_idx-2):int(self.max_idx+3)]\n\n #picks reasonable starting values for fit\n self.i_in_guess = np.mean(self.i[:int(self.max_idx-14)])\n a = (self.i[self.max_idx] - self.i_in_guess) / 2.4\n sigma = 0.170\n mu = self.x[self.max_idx]\n b = self.i_in_guess\n\n #perform fit with starting values\n p0 = [a, sigma, mu, b]\n p1, success = optimize.leastsq(self.residuals_gauss,p0,\n args=(self.x_fit, self.i_fit),\n maxfev = 1000000)\n self.gauss_params = p1\n self.x_peak = p1[2]\n self.i_peak = gauss_func(p1, self.x_peak)\n\n def get_i_in_out(self):\n \"\"\"Gets values for intracellular intensity (self.i_in) and\n extracellular intensity (self.i_out). The left of the linescan\n (nearer zero) is always assumed to be the intracellular side.\n Note: the i_in and i_out values are calculated to be the average value\n of the ten points out from the distance between the peak and position x away\n from the peak, where x is given by self.dist_to_x_in_out (defined in __init__).\n \"\"\"\n\n x_in_upper = self.x_peak - self.dist_to_x_in_out\n x_in_upper_index = np.argmin(abs(self.x - x_in_upper))\n self.x_in_upper_index = x_in_upper_index #for use in finding total intensity for density calculation\n self.i_in_x_list = self.x[x_in_upper_index-10:x_in_upper_index]\n self.i_in_i_list = self.i[x_in_upper_index-10:x_in_upper_index]\n self.i_in = np.mean(self.i_in_i_list)\n\n x_out_lower = self.x_peak + self.dist_to_x_in_out\n x_out_lower_index = np.argmin(abs(self.x - x_out_lower))\n self.x_out_lower_index = x_out_lower_index #for use in finding total intensity for density calculation\n self.i_out_x_list = self.x[x_out_lower_index:x_out_lower_index+10]\n self.i_out_i_list = self.i[x_out_lower_index:x_out_lower_index+10]\n self.i_out = np.mean(self.i_out_i_list)\n\n def residuals_gauss(self,p,x,x_data):\n \"\"\"Returns residuals for Gaussian fit of the intensity peak.\n Possible values for fit parameters are constrained to avoid\n overestimation of peak intensity.\n\n Args:\n p (list): fit parameters, [a, sigma, mu, c]\n x (list): position values\n x_data (list): intensity values\n\n Returns:\n residuals (list): residuals for fit\n -or-\n fail_array (list): in place of residuals if the fit fails\n\n \"\"\"\n\n a, sigma, mu, c = p #unpacks p (for readability)\n\n i_peak_guess = gauss_func(p, mu)\n\n fail_array = np.ones(len(x)) * 99999.\n\n if all([sigma >= 0.1,\n abs(i_peak_guess - self.i[self.max_idx]) < 0.5 * self.i[self.max_idx]]):\n\n residuals = gauss_func(p,x) - x_data\n return residuals\n\n else:\n return fail_array\n\n def get_fwhm(self):\n \"\"\"Calculates the full-width at half maximum (FWHM) of the linescan peak\"\"\"\n\n #determines half-max\n hm = (self.i_in + self.i_peak) / 2.\n # print(hm)\n\n # finds points closest to hm to the left of the peak\n search = self.i[:self.max_idx]\n self.left_index = (np.abs(search - hm)).argmin()\n if hm > self.i[self.left_index]:\n self.left_index_left = deepcopy(self.left_index)\n self.left_index_right = self.left_index_left + 1\n else:\n self.left_index_right = deepcopy(self.left_index)\n self.left_index_left = self.left_index_right - 1\n\n #gets interpolated intensity (linear interpolation between 2 surrounding points\n m_left = (self.i[self.left_index_right] - self.i[self.left_index_left]) / (self.x[self.left_index_right] - self.x[self.left_index_left])\n b_left = self.i[self.left_index_right] - m_left * self.x[self.left_index_right]\n x_fwhm_left = (hm - b_left) / m_left\n self.fwhm_left = [x_fwhm_left,hm]\n\n #finds point closest to hm to the right of the peak\n search = self.i[self.max_idx:]\n self.right_index = (np.abs(search - hm)).argmin() + self.max_idx\n if hm < self.i[self.right_index]:\n self.right_index_left = deepcopy(self.right_index)\n self.right_index_right = self.right_index_left + 1\n else:\n self.right_index_right = deepcopy(self.right_index)\n self.right_index_left = self.right_index_right - 1\n\n #gets interpolated intensity (linear interpolation between 2 surrounding points\n m_right = (self.i[self.right_index_right] - self.i[self.right_index_left]) / (self.x[self.right_index_right] - self.x[self.right_index_left])\n b_right = self.i[self.right_index_right] - m_right * self.x[self.right_index_right]\n x_fwhm_right = (hm - b_right) / m_right\n self.fwhm_right = [x_fwhm_right,hm]\n\n self.fwhm = x_fwhm_right - x_fwhm_left\n\nclass Cortex():\n \"\"\"A Class for a cortex, with actin and membrane linescans and\n methods to determine cortex thickness and density.\n\n\n \"\"\"\n def __init__(self,ch1,ch2,sigma_actin,ch_actin=1):\n \"\"\"Initializes linescan pairs and remaining attributes.\n\n Args:\n ch1 (Linescan class): the ch1 linescan\n ch2 (Linescan class): the ch2 linescan\n sigma_actin (float): the sigma of the PSF for the actin channel\n\n Kwargs:\n ch_actin (int): says which channel is actin\n\n \"\"\"\n self.ch1 = ch1\n self.ch2 = ch2\n self.sigma_actin = sigma_actin\n self.ch_actin = ch_actin\n\n self.delta = self.ch2.x_peak - self.ch1.x_peak #separation between ch2 and ch1 peaks\n\n if self.ch_actin==1:\n self.actin = self.ch1\n self.memb = self.ch2\n elif self.ch_actin==2:\n self.actin = self.ch2\n self.memb = self.ch1\n else:\n self.actin = None\n self.memb = None\n\n self.h_max = 1. #maximum cortex thickness (for constraining fit)\n self.i_c_max = 500. #maximum cortex intensity (for constraining fit)\n self.h = None #cortex thickness (from fit)\n self.i_c = None #cortical actin intensity (from fit)\n self.density = None #cortical actin density\n self.X_c = None #background-independent center position of the cortical actin (from fit)\n self.solution = None #solution from actin cortex thickness fit\n\n def get_h_i_c(self):\n \"\"\" Performs fit to get cortex thickness, h, and cortex intensity, i_c\n\n Note: density is calculated as the difference between fitted cortex intensity\n and intracellular background, normalized by the intensity from the beginning\n of the linescan to end of the i_out calculation region\n\n \"\"\"\n\n delta = abs(self.delta)\n\n #SET STARTING VALUES FOR ROOTS AND SOLUTIONS\n self.solution = 2e+20\n\n #only try fitting if the peak is higher than both i_in and i_out\n if ((self.actin.i_out - self.actin.i_peak) /\n (self.actin.i_in - self.actin.i_peak))>=0:\n\n #loops through several different starting values for i_c and h\n for i_c_factor in np.arange(2.,3.1,0.2):\n for h_factor in np.arange(0.5, 2.1, 0.2):\n\n i_c_start = self.actin.i_peak * i_c_factor\n delta_start = ((self.sigma_actin**2 / delta*2) *\n np.log((self.actin.i_out - i_c_start) /\n (self.actin.i_in - i_c_start)))\n h_start = 2 * (delta - delta_start) * h_factor\n\n #performs fit\n p0 = [h_start, i_c_start]\n\n try:\n result = optimize.leastsq(self.residuals, p0,\n maxfev=100000, full_output=1)\n\n solution_temp = np.sum([x**2 for x in result[2]['fvec']])\n\n if solution_temp < self.solution:\n self.solution = deepcopy(solution_temp)\n p1 = result[0]\n\n except TypeError:\n pass\n\n #controls for bad fits\n if any([self.solution>0.01,\n p1[0] >= self.h_max - 0.001,\n p1[1] >= self.i_c_max - 1.]):\n p1 = [None, None]\n self.h = None\n self.i_c = None\n self.density = None\n self.X_c = None\n self.solution = None\n else:\n self.h, self.i_c = p1\n actin_ls_mean = np.mean(self.actin.i[:self.actin.x_out_lower_index+10])\n self.density = (self.i_c - self.actin.i_in) / actin_ls_mean\n self.X_c = self.memb.x_peak - self.h / 2.\n\n def residuals(self,p):\n \"\"\"Calculates residuals for cortex linescan fit to extract cortex\n thickness and intensity values\n\n Args:\n p (list of floats): [thickness, cortex_intensity]\n\n Returns:\n residuals (list of floats): [residual1, residual2]\n -or-\n fail_array (list of floats): [1000000., 1000000.]\n (returned only if fitting fails)\n\n \"\"\"\n\n fail_array = [1000000., 1000000.]\n\n #constrains fit and ensures log term is positive\n if all([self.h_max>p[0]>0,\n self.i_c_max>p[1]>self.actin.i_in,\n (self.actin.i_out - p[1]) / (self.actin.i_in - p[1]) > 0]):\n\n #X_c is the position of the center of the cortex\n #x_c is the position of the cortex peak\n X_c_try = self.memb.x_peak - p[0] / 2.\n delta_try = (self.sigma_actin**2 / p[0]) * np.log((self.actin.i_out - p[1]) / (self.actin.i_in - p[1]))\n x_c_try = X_c_try - delta_try\n i_peak_try = convolved([self.actin.i_in, p[1], self.actin.i_out, p[0], X_c_try, self.sigma_actin], x_c_try)\n\n #residuals are difference between calculated peak position/intensity and values from data\n residuals = [x_c_try - self.actin.x_peak, i_peak_try - self.actin.i_peak]\n return residuals\n\n else:\n return fail_array\n\n def plot_lss(self):\n \"\"\"Plots linescans\"\"\"\n\n fig = pylab.figure()\n ax = fig.add_subplot(1,1,1)\n\n #plots raw data\n pylab.plot(self.ch1.x,self.ch1.i,'go',label=\"Ch. 1\")\n pylab.plot(self.ch2.x,self.ch2.i,'ro',label=\"Ch. 2\")\n\n #plots points used for determining i_in and i_out\n pylab.plot(self.ch1.i_in_x_list,self.ch1.i_in_i_list,'yo',label=r\"$i_{\\rm{in}}$, $i_{\\rm{out}}$\")\n pylab.plot(self.ch2.i_in_x_list,self.ch2.i_in_i_list,'yo')\n pylab.plot(self.ch1.i_out_x_list,self.ch1.i_out_i_list,'yo')\n pylab.plot(self.ch2.i_out_x_list,self.ch2.i_out_i_list,'yo')\n\n #plots points used to calculate fwhm and shows the fwhm\n # pylab.plot(self.ch1.x[self.ch1.left_index_left],self.ch1.i[self.ch1.left_index_left],'ko',label=\"fwhm points\")\n # pylab.plot(self.ch1.x[self.ch1.left_index_left],self.ch1.i[self.ch1.left_index_left],'ko')\n # pylab.plot(self.ch1.x[self.ch1.left_index_right],self.ch1.i[self.ch1.left_index_right],'ko')\n # pylab.plot(self.ch1.x[self.ch1.right_index_left],self.ch1.i[self.ch1.right_index_left],'ko')\n # pylab.plot(self.ch1.x[self.ch1.right_index_right],self.ch1.i[self.ch1.right_index_right],'ko')\n #\n # pylab.plot(self.ch2.x[self.ch2.left_index_left],self.ch2.i[self.ch2.left_index_left],'ko')\n # pylab.plot(self.ch2.x[self.ch2.left_index_right],self.ch2.i[self.ch2.left_index_right],'ko')\n # pylab.plot(self.ch2.x[self.ch2.right_index_left],self.ch2.i[self.ch2.right_index_left],'ko')\n # pylab.plot(self.ch2.x[self.ch2.right_index_right],self.ch2.i[self.ch2.right_index_right],'ko')\n\n x_fwhm1, i_fwhm1 = zip(self.ch1.fwhm_left,self.ch1.fwhm_right)\n x_fwhm2, i_fwhm2 = zip(self.ch2.fwhm_left,self.ch2.fwhm_right)\n\n pylab.plot(x_fwhm1, i_fwhm1,'g',ls='-',marker='x',label=\"fwhm\")\n pylab.plot(x_fwhm2, i_fwhm2,'r',ls='-',marker='x',label='fwhm')\n\n # x_fwhm1 = [self.ch1.x[self.ch1.left_index],self.ch1.x[self.ch1.right_index]]\n # y_fwhm1 = (self.ch1.i[self.ch1.left_index] + self.ch1.i[self.ch1.right_index]) / 2.\n # i_fwhm1 = [y_fwhm1,y_fwhm1]\n # pylab.plot(x_fwhm1,i_fwhm1,'g-',label=\"fwhm\")\n #\n # x_fwhm2 = [self.ch2.x[self.ch2.left_index],self.ch2.x[self.ch2.right_index]]\n # y_fwhm2 = (self.ch2.i[self.ch2.left_index] + self.ch2.i[self.ch2.right_index]) / 2.\n # i_fwhm2 = [y_fwhm2,y_fwhm2]\n # pylab.plot(x_fwhm2,i_fwhm2,'r-',label=\"fwhm\")\n\n #plots gaussian fit curve\n x_gauss_fit_ch1 = np.linspace(self.ch1.x_fit[0],self.ch1.x_fit[-1],100)\n i_gauss_fit_ch1 = gauss_func(self.ch1.gauss_params,x_gauss_fit_ch1)\n pylab.plot(x_gauss_fit_ch1,i_gauss_fit_ch1,'b',label=\"Peak fit\")\n\n x_gauss_fit_ch2 = np.linspace(self.ch2.x_fit[0],self.ch2.x_fit[-1],100)\n i_gauss_fit_ch2 = gauss_func(self.ch2.gauss_params,x_gauss_fit_ch2)\n pylab.plot(x_gauss_fit_ch2,i_gauss_fit_ch2,'b')\n\n #finish plot\n y_min, y_max = ax.get_ylim()\n pylab.ylim = (0,y_max)\n\n pylab.xlabel(\"Position ($\\mu$m)\")\n pylab.ylabel(\"Intensity (AU)\")\n pylab.legend(loc='upper right')\n pylab.gcf().subplots_adjust(bottom=0.15)\n\n def plot_fits(self):\n \"\"\"Plots linescan pair with fitted cortex thickness\"\"\"\n\n fig = pylab.figure()\n ax = fig.add_subplot(1,1,1)\n\n if self.ch_actin==1 or self.ch_actin==\"1\":\n color_actin = 'g'\n color_memb = 'r'\n elif self.ch_actin==2 or self.ch_actin==\"2\":\n color_actin = 'r'\n color_memb = 'g'\n else:\n raise ValueError(\"Please specify ch_actin as <<1>>, <<2>> for plotting fit!\")\n\n #plots raw data\n pylab.plot(self.memb.x,self.memb.i,'o',color=color_memb,label=\"Memb. (raw)\")\n pylab.plot(self.actin.x,self.actin.i,'o',color=color_actin,label=\"Actin (raw)\")\n\n #plots unconvolved and extracted actin linescans from fits\n x_actin_hd = np.linspace(self.actin.x[0],self.actin.x[-1],1000)\n i_actin_unconv = unconvolved([self.actin.i_in, self.i_c,\n self.actin.i_out, self.h, self.X_c],\n x_actin_hd)\n i_actin_conv = convolved([self.actin.i_in, self.i_c,\n self.actin.i_out, self.h, self.X_c, self.sigma_actin],\n x_actin_hd)\n\n pylab.plot(x_actin_hd,i_actin_unconv,ls='-',color=color_actin, label='fit')\n pylab.plot(x_actin_hd,i_actin_conv,ls='--',color=color_actin, label='fit (conv.)')\n\n pylab.axvline(x=self.memb.x_peak, color=color_memb, ls='--', label=\"Memb. (peak)\")\n\n #finishes plot\n y_min, y_max = ax.get_ylim()\n pylab.ylim = (0,y_max)\n\n pylab.xlabel(\"Position ($\\mu$m)\")\n pylab.ylabel(\"Intensity (AU)\")\n pylab.legend(loc='upper right')\n pylab.gcf().subplots_adjust(bottom=0.15)\n\ndef write_master_list(parent_dir,version):\n \"\"\"Writes a master data lis in the parent directory for batch mode.\n\n Args:\n parent_dir (string): path of the parent directory\n version (string): the version of the software (for naming output file)\n\n \"\"\"\n\n dir_list_path = parent_dir + '/dir_list.dat'\n subdir_list = [_[0] for _ in uf.read_file(dir_list_path)][1:]\n\n master_data = []\n for i in range(len(subdir_list)):\n data_dir = parent_dir + '/' + subdir_list[i]\n data = uf.read_file(data_dir + '/ls_data/ls_data.dat')\n if i==0:\n for line in data:\n master_data.append(line)\n else:\n for line in data[1:]:\n master_data.append(line)\n\n # print master_data\n uf.save_data_array(master_data, parent_dir + '/master_list_v%s.dat'%version)\n\ndef load_ls(ls_path,px_size=1.):\n \"\"\"Loads a linescan file\n\n Args:\n ls_path (str): path of the average linescan file to be loaded\n px_size (float): pixel size in microns\n\n Returns:\n x (numpy array): the positions (in microns)\n i (numpy array): the intensities\n\n \"\"\"\n\n ls_data = uf.read_file(ls_path)\n x = np.array([float(_[0]) for _ in ls_data]) * px_size\n i = np.array([float(_[1]) for _ in ls_data])\n return x,i\n\ndef analyze_cortex(file_ch1,file_ch2,px_size,ch_actin,sigma_actin):\n\n \"\"\"Extracts linescan parameters and coretx thickness/density\n for a pair of linescans\n\n Args:\n file_ch1 (str): the filepath for the first linescan\n file_ch2 (str): the filepath for the second linescan\n px_size (float): the pixel size for the linescans (for the whole directory)\n ch_actin (int): the channel of the actin linescan (1 or 2)\n sigma_actin (float): the sigma of the PSF for the actin channel\n\n Kwargs:\n category (str): used to keep track of different conditions in the output data file\n\n Returns:\n cortex (Cortex class): the cortex with associated attributes\n\n \"\"\"\n\n x_ch1, i_ch1 = load_ls(file_ch1,px_size=px_size)\n x_ch2, i_ch2 = load_ls(file_ch2,px_size=px_size)\n x = deepcopy(x_ch1) #the x values should be the same for both linescans!\n\n basename = file_ch1.split('/')[-1][:-4]\n print('Analyzing file pair for:', basename)\n\n # extracts data\n actin = Linescan(x,i_ch1)\n memb = Linescan(x,i_ch2)\n cortex = Cortex(actin, memb, sigma_actin, ch_actin=ch_actin)\n\n if ch_actin==1 or ch_actin==2:\n cortex.get_h_i_c()\n elif ch_actin == \"None\":\n pass\n else:\n raise ValueError(\"Please specify ch_actin as <<1>> or <<2>> for %s!\"%file_ch1)\n\n print('h =', cortex.h)\n return cortex\n\ndef analyze_ls_pair(file_ch1,file_ch2,px_size,ch_actin,sigma_actin,version):\n \"\"\"Analyzes linescans to extract cortex thickness/density\n for a single linescan pair. Data and plots are generated and saved\n to a new folder with same name as file_ch1\n\n Args:\n file_ch1 (str): the filepath for the first linescan\n file_ch2 (str): the filepath for the second linescan\n px_size (float): the pixel size for the linescans (for the whole directory)\n ch_actin (int): the channel of the actin linescan (1 or 2)\n sigma_actin (float): the sigma of the PSF for the actin channel\n\n \"\"\"\n\n # makes directory in data_dir for saving\n save_dir = file_ch1[:-4] + '_ls_data'\n uf.make_dir(save_dir)\n\n # makes a list of parameters to extract from cortex data\n data_to_write = [['basename', 'category',\n 'delta', 'h', 'i_c', 'density', 'X_c', 'solution',\n 'ch1.i_tot', 'ch1.H', 'ch1.x_peak', 'ch1.i_peak', 'ch1.i_in', 'ch1.i_out', 'ch1.fwhm',\n 'ch2.i_tot', 'ch2.H', 'ch2.x_peak', 'ch2.i_peak', 'ch2.i_in', 'ch2.i_out', 'ch2.fwhm'\n ]]\n\n basename = file_ch1.split('/')[-1][:-4]\n category = 'pair'\n\n #gets cortex and linescan data\n cortex = analyze_cortex(file_ch1, file_ch2, px_size, ch_actin, sigma_actin)\n\n # plots raw linescans\n cortex.plot_lss()\n pylab.savefig(save_dir + \"/\" + basename + \".png\")\n pylab.close()\n\n # plots linescans with h fits\n if cortex.h != None:\n cortex.plot_fits()\n pylab.savefig(save_dir + \"/\" + basename + \"_fit.png\")\n pylab.close()\n\n # gets extracted linescan data\n data_temp = [basename, category]\n for param in data_to_write[0][2:]:\n data_temp.append(eval(\"cortex.%s\" % param))\n data_to_write.append(data_temp)\n\n # print data_to_write\n uf.save_data_array(data_to_write, save_dir + \"/ls_data.dat\")\n\ndef analyze_dir(data_dir,px_size,category,ch_actin,sigma_actin,version):\n \"\"\" Analyzes all linescan pairs in a directory full of linescans\n\n Args:\n data_dir (str): the directory containing the linescans\n px_size (float): the pixel size for the linescans (for the whole directory)\n category (str): the category for the experiment\n ch_actin (int): the channel of the actin linescan (1 or 2)\n version (str): version number (for output filenames)\n\n \"\"\"\n\n #makes necessary directories in data_dir for saving\n save_dir = data_dir + '/ls_data'\n uf.make_dir(save_dir)\n\n #makes a list of parameters to extract from cortex data\n data_to_write = [['basename','category',\n 'delta', 'h', 'i_c', 'density', 'X_c', 'solution',\n 'ch1.i_tot','ch1.H','ch1.x_peak','ch1.i_peak','ch1.i_in','ch1.i_out','ch1.fwhm',\n 'ch2.i_tot','ch2.H','ch2.x_peak','ch2.i_peak','ch2.i_in','ch2.i_out','ch2.fwhm'\n ]]\n\n #gets and sorts list of average linescans\n linescan_list = [x for x in os.listdir(data_dir) if 'average.dat' in x]\n\n for _ in linescan_list:\n print(_)\n print(re.search('frame' + '_([0-9]+)_', _).group(1))\n linescan_list = sort_ls_list(linescan_list)\n\n\n #extracts linescan parameters and thickness/density\n for i in range(int(len(linescan_list)/2)):\n\n file_ch1 = data_dir + '/' + linescan_list[2*i]\n file_ch2 = data_dir + '/' + linescan_list[2*i + 1]\n basename = file_ch1.split('/')[-1][:-4]\n\n cortex = analyze_cortex(file_ch1,file_ch2,px_size,ch_actin,sigma_actin)\n\n # plots raw linescans\n cortex.plot_lss()\n pylab.savefig(save_dir + \"/\" + basename + \".png\")\n pylab.close()\n\n # plots linescans with h fits\n if cortex.h != None:\n cortex.plot_fits()\n pylab.savefig(save_dir + \"/\" + basename + \"_fit.png\")\n pylab.close()\n\n # gets extracted linescan data\n data_temp = [basename,category]\n for param in data_to_write[0][2:]:\n data_temp.append(eval(\"cortex.%s\"%param))\n data_to_write.append(data_temp)\n\n # print data_to_write\n uf.save_data_array(data_to_write,save_dir + \"/ls_data.dat\")\n\n\ndef main():\n \"\"\"__main__ function\"\"\"\n\n version = '5'\n\n #set up root for asking questions\n # root = Tk() #moved this up to the imports\n root.withdraw()\n\n #chooses analysis mode\n mode = askinteger(title=\"Analysis Mode Selection\",\n prompt=\"Please enter:\\n1 for pairwise analysis or\\n2 for batch analysis\",\n minvalue=1,maxvalue=2)\n\n if mode==1:\n\n ch1_path = askopenfilename(title='Select an average linescan file for channel 1',\n filetypes=[(\"dat\", \"*.dat\")],\n initialdir='.',\n initialfile=\"\")\n\n ch2_path = askopenfilename(title='Select an average linescan file for channel 2',\n filetypes=[(\"dat\", \"*.dat\")],\n initialdir='/'.join(ch1_path.split('/')[:-1]),\n initialfile=ch1_path.split('/')[-1])\n\n px_size = askfloat(title='Pixel Size',prompt='Please enter your pixel size')\n ch_actin = askinteger(title='Actin Channel',prompt='Please enter the actin channel',\n minvalue=1, maxvalue=2)\n sigma_actin = askfloat(title='Actin Sigma',prompt='Please enter the sigma value\\nfor the PSF for the actin channel\\n(in microns)')\n\n analyze_ls_pair(ch1_path,ch2_path,px_size,ch_actin,sigma_actin,version)\n\n if mode==2:\n\n parent_dir = askdirectory(title='Select the parent directory (be sure it contains dir_list.dat!)',\n initialdir=os.path.split(os.path.realpath(__file__))[0])\n # parent_dir = './test_data'\n dir_list = uf.get_dict_list(uf.read_file(parent_dir + '/dir_list.dat'))\n\n for line in dir_list:\n\n sub_dir = line['sub_dir']\n px_size = float(line['px_size'])\n category = line['category']\n ch_actin = int(line['ch_actin'])\n sigma_actin = float(line['sigma_actin'])\n data_dir = parent_dir + '/' + sub_dir\n\n print(data_dir)\n\n analyze_dir(data_dir,px_size,category,ch_actin,sigma_actin,version)\n\n write_master_list(parent_dir,version)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.log",
"numpy.sum",
"scipy.stats.norm.cdf",
"numpy.linspace",
"scipy.exp",
"numpy.abs",
"numpy.arange",
"scipy.optimize.leastsq",
"numpy.mean",
"numpy.array",
"numpy.trapz"
]
] |
fcr3/lcnn | [
"aed9edc4ccae9579858484a60dc52754f184c285"
] | [
"lcnn/models/line_vectorizer_ov.py"
] | [
"import itertools\nimport random\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom lcnn.config import M\n\nFEATURE_DIM = 8\n\n\nclass LineVectorizer(nn.Module):\n def __init__(self, fc1, fc2):\n super().__init__()\n \n lambda_ = torch.linspace(0, 1, M.n_pts0)[:, None]\n self.register_buffer(\"lambda_\", lambda_)\n self.do_static_sampling = M.n_stc_posl + M.n_stc_negl > 0\n\n self.fc1 = fc1\n scale_factor = M.n_pts0 // M.n_pts1\n\n self.pooling = nn.MaxPool1d(scale_factor, scale_factor)\n self.fc2 = fc2\n self.loss = nn.BCEWithLogitsLoss(reduction=\"none\")\n\n def forward(self, result, input_dict):\n h = result[\"preds\"]\n\n x = self.fc1.infer({\n next(iter(self.fc1.inputs)): result[\"feature\"]\n })[next(iter(self.fc1.outputs))]\n n_batch, n_channel, row, col = x.shape\n\n xs, ys, fs, ps, idx, jcs = [], [], [], [], [0], []\n for i, meta in enumerate(input_dict[\"meta\"]):\n p, label, feat, jc = self.sample_lines(\n meta, h[\"jmap\"][i], h[\"joff\"][i]\n )\n\n ys.append(label)\n jcs.append(jc)\n ps.append(p)\n fs.append(feat)\n\n p = p[:, 0:1, :] * self.lambda_ + p[:, 1:2, :] * (1 - self.lambda_) - 0.5\n p = p.reshape(-1, 2) # [N_LINE x N_POINT, 2_XY]\n px, py = p[:, 0].contiguous(), p[:, 1].contiguous()\n px0 = np.clip(np.floor(px), a_min=0, a_max=127)\n py0 = np.clip(np.floor(py), a_min=0, a_max=127)\n px1 = np.clip(px0 + 1, a_min=0, a_max=127)\n py1 = np.clip(py0 + 1, a_min=0, a_max=127)\n px0l, py0l, px1l, py1l = px0, py0, px1, py1\n\n # xp: [N_LINE, N_CHANNEL, N_POINT]\n xp = (\n (\n x[i, :, px0l, py0l] * (px1 - px) * (py1 - py)\n + x[i, :, px1l, py0l] * (px - px0) * (py1 - py)\n + x[i, :, px0l, py1l] * (px1 - px) * (py - py0)\n + x[i, :, px1l, py1l] * (px - px0) * (py - py0)\n )\n .reshape(n_channel, -1, M.n_pts0)\n .permute(1, 0, 2)\n )\n \n # for deducing input shape of pooling\n # print(\"pooling expected input:\", xp.shape)\n \n xp = self.pooling(xp)\n xs.append(xp)\n idx.append(idx[-1] + xp.shape[0])\n\n x, y = torch.cat(xs), torch.cat(ys)\n f = torch.cat(fs)\n x = x.reshape(-1, M.n_pts1 * M.dim_loi)\n x = torch.cat([x, f], 1)\n \n # for deducing input shape of fc2\n # print(\"pooling expected input:\", x.shape)\n \n x = self.fc2.infer({\n next(iter(self.fc2.inputs)): x.detach().numpy()\n })[next(iter(self.fc2.outputs))]\n x = x.flatten()\n # x = self.fc2(x).flatten()\n\n if True:\n p = torch.cat(ps)\n s = torch.sigmoid(x)\n b = s > 0.5\n lines = []\n score = []\n for i in range(n_batch):\n p0 = p[idx[i] : idx[i + 1]]\n s0 = s[idx[i] : idx[i + 1]]\n mask = b[idx[i] : idx[i + 1]]\n p0 = p0[mask]\n s0 = s0[mask]\n if len(p0) == 0:\n lines.append(torch.zeros([1, M.n_out_line, 2, 2], device=p.device))\n score.append(torch.zeros([1, M.n_out_line], device=p.device))\n else:\n arg = torch.argsort(s0, descending=True)\n p0, s0 = p0[arg], s0[arg]\n lines.append(p0[None, torch.arange(M.n_out_line) % len(p0)])\n score.append(s0[None, torch.arange(M.n_out_line) % len(s0)])\n for j in range(len(jcs[i])):\n if len(jcs[i][j]) == 0:\n jcs[i][j] = torch.zeros([M.n_out_junc, 2], device=p.device)\n jcs[i][j] = jcs[i][j][\n None, torch.arange(M.n_out_junc) % len(jcs[i][j])\n ]\n result[\"preds\"][\"lines\"] = torch.cat(lines)\n result[\"preds\"][\"score\"] = torch.cat(score)\n result[\"preds\"][\"juncs\"] = torch.cat([jcs[i][0] for i in range(n_batch)])\n if len(jcs[i]) > 1:\n result[\"preds\"][\"junts\"] = torch.cat(\n [jcs[i][1] for i in range(n_batch)]\n )\n\n return result\n\n def sample_lines(self, meta, jmap, joff):\n with torch.no_grad():\n junc = meta[\"junc\"] # [N, 2]\n jtyp = meta[\"jtyp\"] # [N]\n Lpos = meta[\"Lpos\"]\n Lneg = meta[\"Lneg\"]\n\n n_type = jmap.shape[0]\n jmap = non_maximum_suppression(jmap).reshape(n_type, -1)\n joff = joff.reshape(n_type, 2, -1)\n max_K = M.n_dyn_junc // n_type\n N = len(junc)\n \n K = min(int((jmap > M.eval_junc_thres).float().sum().item()), max_K)\n \n if K < 2:\n K = 2\n device = jmap.device\n\n # index: [N_TYPE, K]\n score, index = torch.topk(jmap, k=K)\n y = (index / 128).float() + torch.gather(joff[:, 0], 1, index) + 0.5\n x = (index % 128).float() + torch.gather(joff[:, 1], 1, index) + 0.5\n\n # xy: [N_TYPE, K, 2]\n print(x)\n print(y)\n print(y[..., None])\n print(x[..., None])\n \n xy = torch.cat([y[..., None], x[..., None]], dim=-1)\n xy_ = xy[..., None, :]\n del x, y, index\n\n # dist: [N_TYPE, K, N]\n dist = torch.sum((xy_ - junc) ** 2, -1)\n cost, match = torch.min(dist, -1)\n\n # xy: [N_TYPE * K, 2]\n # match: [N_TYPE, K]\n for t in range(n_type):\n match[t, jtyp[match[t]] != t] = N\n match[cost > 1.5 * 1.5] = N\n match = match.flatten()\n\n _ = torch.arange(n_type * K, device=device)\n u, v = torch.meshgrid(_, _)\n u, v = u.flatten(), v.flatten()\n up, vp = match[u], match[v]\n label = Lpos[up, vp]\n\n c = (u < v).flatten()\n\n # sample lines\n u, v, label = u[c], v[c], label[c]\n xy = xy.reshape(n_type * K, 2)\n xyu, xyv = xy[u], xy[v]\n\n u2v = xyu - xyv\n u2v /= torch.sqrt((u2v ** 2).sum(-1, keepdim=True)).clamp(min=1e-6)\n feat = torch.cat(\n [\n xyu / 128 * M.use_cood,\n xyv / 128 * M.use_cood,\n u2v * M.use_slop,\n (u[:, None] > K).float(),\n (v[:, None] > K).float(),\n ],\n 1,\n )\n line = torch.cat([xyu[:, None], xyv[:, None]], 1)\n\n xy = xy.reshape(n_type, K, 2)\n jcs = [xy[i, score[i] > 0.03] for i in range(n_type)]\n return line, label.float(), feat, jcs\n\n\ndef non_maximum_suppression(a):\n ap = F.max_pool2d(a, 3, stride=1, padding=1)\n mask = (a == ap).float().clamp(min=0.0)\n return a * mask\n\n\nclass Bottleneck1D(nn.Module):\n def __init__(self, inplanes, outplanes):\n super(Bottleneck1D, self).__init__()\n\n planes = outplanes // 2\n self.op = nn.Sequential(\n nn.BatchNorm1d(inplanes),\n nn.ReLU(inplace=True),\n nn.Conv1d(inplanes, planes, kernel_size=1),\n nn.BatchNorm1d(planes),\n nn.ReLU(inplace=True),\n nn.Conv1d(planes, planes, kernel_size=3, padding=1),\n nn.BatchNorm1d(planes),\n nn.ReLU(inplace=True),\n nn.Conv1d(planes, outplanes, kernel_size=1),\n )\n\n def forward(self, x):\n return x + self.op(x)\n"
] | [
[
"torch.cat",
"torch.zeros",
"torch.sum",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad",
"torch.topk",
"numpy.clip",
"torch.nn.MaxPool1d",
"torch.arange",
"torch.argsort",
"torch.nn.functional.max_pool2d",
"torch.nn.BatchNorm1d",
"torch.linspace",
"torch.sigmoid",
"torch.min",
"numpy.floor",
"torch.nn.Conv1d",
"torch.gather",
"torch.nn.ReLU",
"torch.meshgrid"
]
] |
zsync/gomoku-alphazero | [
"82262554e35562533de2f296e4553f4f1f455477"
] | [
"convert.py"
] | [
"import argparse\n\nimport h5py\nimport numpy as np\n\nfrom config import CHANNELS\nfrom policy import PolicyValueModelResNet as PolicyValueModel\n\n\ndef convert_pretrained_weights(\n src_weights_file,\n dst_weights_file,\n src_width=8,\n dst_width=15,\n src_height=8,\n dst_height=15,\n):\n model_src = PolicyValueModel(src_width, src_height)\n model_src.build(input_shape=(None, src_width, src_height, CHANNELS))\n model_src.load_weights(src_weights_file)\n\n model_dst = PolicyValueModel(dst_width, dst_height)\n model_dst.build(input_shape=(None, dst_width, dst_height, CHANNELS))\n\n assert len(model_src.cnn_layers) == len(model_dst.cnn_layers)\n for i in range(len(model_src.cnn_layers)):\n layer_src = model_src.cnn_layers[i]\n layer_dst = model_dst.cnn_layers[i]\n layer_dst.set_weights(layer_src.get_weights())\n\n model_dst.save_weights(dst_weights_file)\n\n\ndef convert_pretrained_buffer(\n src_buffer_file,\n dst_buffer_file,\n src_width=8,\n dst_width=15,\n src_height=8,\n dst_height=15,\n):\n assert dst_height >= src_height\n assert dst_width >= src_width\n f_src = h5py.File(src_buffer_file, \"r\")\n f_dst = h5py.File(dst_buffer_file, \"w\")\n\n states_src = f_src[\"states\"][...]\n mcts_probs_src = f_src[\"mcts_probs\"][...]\n\n buffer_length = states_src.shape[0]\n start_width_idx = (dst_width - src_width) // 2\n start_height_idx = (dst_height - src_height) // 2\n\n states_dst = np.zeros(\n shape=(buffer_length, dst_width, dst_height, CHANNELS),\n dtype=states_src.dtype,\n )\n states_dst[\n :,\n start_width_idx : start_width_idx + src_width,\n start_height_idx : start_height_idx + src_height,\n ] = states_src[:]\n\n # 最后一根轴只能是全 1 或全 0\n states_dst[:, :, :, -1] = states_src[:, 0:1, 0:1, -1]\n\n mcts_probs_dst = np.zeros(\n shape=(buffer_length, dst_width * dst_width),\n dtype=mcts_probs_src.dtype,\n )\n mcts_probs_dst = mcts_probs_dst.reshape((buffer_length, dst_width, dst_width))\n mcts_probs_dst[\n :,\n start_width_idx : start_width_idx + src_width,\n start_height_idx : start_height_idx + src_height,\n ] = mcts_probs_src[:].reshape((buffer_length, src_width, src_width))\n mcts_probs_dst = mcts_probs_dst.reshape((buffer_length, dst_width * dst_width))\n\n f_dst[\"states\"] = states_dst\n f_dst[\"mcts_probs\"] = mcts_probs_dst\n f_dst[\"rewards\"] = f_src[\"rewards\"][...]\n\n f_src.close()\n f_dst.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Gomoku AlphaZero Weights Converter\")\n parser.add_argument(\"--src-width\", default=8, type=int, help=\"源棋盘水平宽度\")\n parser.add_argument(\"--src-height\", default=8, type=int, help=\"源棋盘竖直宽度\")\n parser.add_argument(\"--dst-width\", default=15, type=int, help=\"目标棋盘水平宽度\")\n parser.add_argument(\"--dst-height\", default=15, type=int, help=\"目标棋盘竖直宽度\")\n parser.add_argument(\"--src-weights\", default=\"./data/model-8x8#5.h5\", help=\"源预训练权重存储位置\")\n parser.add_argument(\"--dst-weights\", default=\"./data/model-15x15#5.h5\", help=\"目标预训练权重存储位置\")\n parser.add_argument(\"--src-buffer\", default=\"./data/buffer-8x8#5.h5\", help=\"源经验池存储位置\")\n parser.add_argument(\"--dst-buffer\", default=\"./data/buffer-15x15#5.h5\", help=\"目标经验池存储位置\")\n args = parser.parse_args()\n\n # 小棋盘预训练数据迁移到大棋盘\n convert_pretrained_weights(\n args.src_weights,\n args.dst_weights,\n src_width=args.src_width,\n dst_width=args.dst_width,\n src_height=args.src_height,\n dst_height=args.dst_height,\n )\n\n convert_pretrained_buffer(\n args.src_buffer,\n args.dst_buffer,\n src_width=args.src_width,\n dst_width=args.dst_width,\n src_height=args.src_height,\n dst_height=args.dst_height,\n )\n"
] | [
[
"numpy.zeros"
]
] |
JBGreisman/careless | [
"8f6c0859973757d11b26b65d9dc51d443030aa70"
] | [
"careless/io/manager.py"
] | [
"import numpy as np\nimport tensorflow as tf\nimport reciprocalspaceship as rs\nfrom .asu import ReciprocalASU,ReciprocalASUCollection\nfrom careless.models.base import BaseModel\nfrom careless.models.priors.wilson import WilsonPrior\n\nclass DataManager():\n \"\"\"\n This class comprises various data manipulation methods as well as methods to aid in model construction.\n \"\"\"\n parser = None\n def __init__(self, inputs, asu_collection, parser=None):\n \"\"\"\n Parameters\n ----------\n inputs : tuple\n asu_collection : ReciprocalASUCollection\n parser : Namespace (optional)\n A Namespace instance created by careless.parser.parser.parse_args()\n \"\"\"\n self.inputs = inputs\n self.asu_collection = asu_collection\n self.parser = parser\n\n @classmethod\n def from_pickle(cls, filename):\n import pickle\n with open(filename, 'rb') as f:\n dm = pickle.load(f)\n return dm\n\n @classmethod\n def from_mtz_files(cls, filenames, formatter):\n return cls.from_datasets((rs.read_mtz(i) for i in filenames), formatter)\n\n @classmethod\n def from_stream_files(cls, filenames, formatter):\n return cls.from_datasets((rs.read_crystfel(i) for i in filenames), formatter)\n\n def get_wilson_prior(self, b=None):\n \"\"\" Construct a wilson prior with an optional temperature factor, b, appropriate for self.asu_collection. \"\"\"\n if b is None:\n sigma = 1.\n elif isinstance(b, float):\n sigma = np.exp(-0.25 * b * self.asu_collection.dHKL**-2.)\n else:\n raise ValueError(f\"parameter b has type{type(b)} but float was expected\")\n\n return WilsonPrior(\n self.asu_collection.centric,\n self.asu_collection.multiplicity,\n sigma,\n )\n\n def get_tf_dataset(self, inputs=None):\n \"\"\"\n Pack a dataset in the way that keras and careless expect.\n\n Parameters\n ----------\n inputs : tuple (optional)\n If None, self.inputs will be used\n \"\"\"\n if inputs is None:\n inputs = self.inputs\n\n inputs = tuple(inputs)\n iobs = BaseModel.get_intensities(inputs)\n sigiobs = BaseModel.get_uncertainties(inputs)\n packed = (inputs, iobs, sigiobs)\n tfds = tf.data.Dataset.from_tensor_slices(packed)\n return tfds.batch(len(iobs))\n\n def get_predictions(self, model, inputs=None):\n \"\"\" \n Extract results from a surrogate_posterior.\n\n Parameters\n ----------\n model : VariationalMergingModel\n A merging model from careless\n inputs : tuple (optional)\n Inputs for which to make the predictions if None, self.inputs is used.\n\n Returns\n -------\n predictions : tuple\n A tuple of rs.DataSet objects containing the predictions for each \n ReciprocalASU contained in self.asu_collection\n \"\"\"\n if inputs is None:\n inputs = self.inputs\n\n refl_id = BaseModel.get_refl_id(inputs)\n iobs = BaseModel.get_intensities(inputs).flatten()\n sig_iobs = BaseModel.get_uncertainties(inputs).flatten()\n asu_id,H = self.asu_collection.to_asu_id_and_miller_index(refl_id)\n #ipred = model(inputs)\n ipred,sigipred = model.prediction_mean_stddev(inputs)\n\n h,k,l = H.T\n results = ()\n for i,asu in enumerate(self.asu_collection):\n idx = asu_id == i\n idx = idx.flatten()\n output = rs.DataSet({\n 'H' : h[idx],\n 'K' : k[idx],\n 'L' : l[idx],\n 'Iobs' : iobs[idx],\n 'SigIobs' : sig_iobs[idx],\n 'Ipred' : ipred[idx],\n 'SigIpred' : sigipred[idx],\n }, \n cell=asu.cell, \n spacegroup=asu.spacegroup,\n merged=False,\n ).infer_mtz_dtypes().set_index(['H', 'K', 'L'])\n results += (output, )\n return results\n\n def get_results(self, surrogate_posterior, inputs=None, output_parameters=True):\n \"\"\" \n Extract results from a surrogate_posterior.\n\n Parameters\n ----------\n surrogate_posterior : tfd.Distribution\n A tensorflow_probability distribution or similar object with `mean` and `stddev` methods\n inputs : tuple (optional)\n Optionally use a different object from self.inputs to compute the redundancy of reflections.\n output_parameters : bool (optional)\n If True, output the parameters of the surrogate distribution in addition to the \n moments. \n\n Returns\n -------\n results : tuple\n A tuple of rs.DataSet objects containing the results corresponding to each \n ReciprocalASU contained in self.asu_collection\n \"\"\"\n if inputs is None:\n inputs = self.inputs\n F = surrogate_posterior.mean().numpy()\n SigF = surrogate_posterior.stddev().numpy()\n params = None\n if output_parameters:\n params = {}\n for k in sorted(surrogate_posterior.parameter_properties()):\n v = surrogate_posterior.parameters[k]\n numpify = lambda x : tf.convert_to_tensor(x).numpy()\n params[k] = numpify(v).flatten() * np.ones(len(F), dtype='float32')\n asu_id,H = self.asu_collection.to_asu_id_and_miller_index(np.arange(len(F)))\n h,k,l = H.T\n refl_id = BaseModel.get_refl_id(inputs)\n N = np.bincount(refl_id.flatten(), minlength=len(F)).astype('float32')\n results = ()\n for i,asu in enumerate(self.asu_collection):\n idx = asu_id == i\n idx = idx.flatten()\n output = rs.DataSet({\n 'H' : h[idx],\n 'K' : k[idx],\n 'L' : l[idx],\n 'F' : F[idx],\n 'SigF' : SigF[idx],\n 'N' : N[idx],\n }, \n cell=asu.cell, \n spacegroup=asu.spacegroup,\n merged=True,\n ).infer_mtz_dtypes().set_index(['H', 'K', 'L'])\n if params is not None:\n for key in sorted(params.keys()):\n val = params[key]\n output[key] = rs.DataSeries(val[idx], index=output.index, dtype='R')\n\n # Remove unobserved refls\n output = output[output.N > 0] \n\n # Reformat anomalous data\n if asu.anomalous:\n output = output.unstack_anomalous()\n # PHENIX will expect the sf / error keys in a particular order.\n anom_keys = ['F(+)', 'SigF(+)', 'F(-)', 'SigF(-)', 'N(+)', 'N(-)']\n reorder = anom_keys + [key for key in output if key not in anom_keys]\n output = output[reorder]\n\n results += (output, )\n return results\n\n # <-- start xval data splitting methods\n def split_mono_data_by_mask(self, test_idx):\n \"\"\"\n Method for splitting mono data given a boolean mask. \n\n Parameters\n ----------\n test_idx : array (boolean)\n Boolean array with length of inputs.\n\n Returns\n -------\n train : tuple\n test : tuple\n \"\"\"\n test,train = (),()\n for inp in self.inputs:\n test += (inp[ test_idx.flatten(),...] ,)\n train += (inp[~test_idx.flatten(),...] ,)\n return train, test\n\n def split_data_by_refl(self, test_fraction=0.5):\n \"\"\"\n Method for splitting data given a boolean mask. \n\n Parameters\n ----------\n test_fraction : float (optional)\n The fraction of reflections which will be reserved for testing.\n\n Returns\n -------\n train : tuple\n test : tuple\n \"\"\"\n if BaseModel.is_laue(self.inputs):\n harmonic_id = BaseModel.get_harmonic_id(self.inputs)\n test_idx = (np.random.random(harmonic_id.max()+1) <= test_fraction)[harmonic_id]\n train, test = self.split_laue_data_by_mask(test_idx)\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test\n\n test_idx = np.random.random(len(self.inputs[0])) <= test_fraction\n train, test = self.split_mono_data_by_mask(test_idx)\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test\n\n def split_laue_data_by_mask(self, test_idx):\n \"\"\"\n Method for splitting laue data given a boolean mask. \n This method will split up the data and alter the harmonic_id\n column to reflect the decrease in size of the array. \n\n Parameters\n ----------\n test_idx : array (boolean)\n Boolean array with length of inputs.\n\n Returns\n -------\n train : tuple\n test : tuple\n \"\"\"\n harmonic_id = BaseModel.get_harmonic_id(self.inputs)\n\n # Let us just test that the boolean mask is valid for these data.\n # If it does not split observations, isect should be empty\n isect = np.intersect1d(\n harmonic_id[test_idx].flatten(),\n harmonic_id[~test_idx].flatten(),\n )\n if len(isect) > 0:\n raise ValueError(f\"test_idx splits harmonic observations with harmonic_id : {isect}\")\n\n def split(inputs, idx):\n harmonic_id = BaseModel.get_harmonic_id(inputs)\n\n result = ()\n uni,inv = np.unique(harmonic_id[idx], return_inverse=True)\n for i,v in enumerate(inputs):\n name = BaseModel.get_name_by_index(i)\n if name in ('intensities', 'uncertainties'):\n v = v[uni]\n v = np.pad(v, [[0, len(inv) - len(v)], [0, 0]], constant_values=1.)\n elif name == 'harmonic_id':\n v = inv[:,None]\n else:\n v = v[idx.flatten(),...]\n result += (v ,)\n return result\n\n return split(self.inputs, ~test_idx), split(self.inputs, test_idx)\n\n def split_data_by_image(self, test_fraction=0.5):\n \"\"\"\n Method for splitting data given a boolean mask. \n This method will designate full images as belonging to the \n train or test sets. \n\n Parameters\n ----------\n test_fraction : float (optional)\n The fraction of images which will be reserved for testing.\n\n Returns\n -------\n train : tuple\n test : tuple\n \"\"\"\n image_id = BaseModel.get_image_id(self.inputs)\n test_idx = np.random.random(image_id.max()+1) <= test_fraction\n\n # Low image count edge case (mostly just for testing purposes)\n if True not in test_idx:\n test_idx[0] = True\n elif False not in test_idx:\n test_idx[0] = False\n \n test_idx = test_idx[image_id]\n if BaseModel.is_laue(self.inputs):\n train, test = self.split_laue_data_by_mask(test_idx)\n else:\n train, test = self.split_mono_data_by_mask(test_idx)\n\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test\n # --> end xval data splitting methods\n\n def build_model(self, parser=None, surrogate_posterior=None, prior=None, likelihood=None, scaling_model=None, mc_sample_size=None):\n \"\"\"\n Build the model specified in parser, a careless.parser.parser.parse_args() result. Optionally override any of the \n parameters taken by the VariationalMergingModel constructor.\n The `parser` parameter is required if self.parser is not set. \n \"\"\"\n from careless.models.merging.surrogate_posteriors import TruncatedNormal\n from careless.models.merging.variational import VariationalMergingModel\n from careless.models.scaling.image import HybridImageScaler,ImageScaler\n from careless.models.scaling.nn import MLPScaler\n if parser is None:\n parser = self.parser\n if parser is None:\n raise ValueError(\"No parser supplied, but self.parser is unset\")\n\n if parser.type == 'poly':\n if parser.refine_uncertainties:\n from careless.models.likelihoods.laue import NormalEv11Likelihood as NormalLikelihood\n from careless.models.likelihoods.laue import StudentTEv11Likelihood as StudentTLikelihood\n else:\n from careless.models.likelihoods.laue import NormalLikelihood,StudentTLikelihood\n elif parser.type == 'mono':\n if parser.refine_uncertainties:\n from careless.models.likelihoods.mono import NormalEv11Likelihood as NormalLikelihood\n from careless.models.likelihoods.mono import StudentTEv11Likelihood as StudentTLikelihood\n else:\n from careless.models.likelihoods.mono import NormalLikelihood,StudentTLikelihood\n\n if prior is None:\n prior = self.get_wilson_prior(parser.wilson_prior_b)\n loc,scale = prior.mean(),prior.stddev()/10.\n low = (1e-32 * ~self.asu_collection.centric).astype('float32')\n if surrogate_posterior is None:\n surrogate_posterior = TruncatedNormal.from_loc_and_scale(loc, scale, low)\n\n if likelihood is None:\n dof = parser.studentt_likelihood_dof\n if dof is None:\n likelihood = NormalLikelihood()\n else:\n likelihood = StudentTLikelihood(dof)\n\n if scaling_model is None:\n mlp_width = parser.mlp_width\n if mlp_width is None:\n mlp_width = BaseModel.get_metadata(self.inputs).shape[-1]\n\n if parser.image_layers > 0:\n from careless.models.scaling.image import NeuralImageScaler\n n_images = np.max(BaseModel.get_image_id(self.inputs)) + 1\n scaling_model = NeuralImageScaler(\n parser.image_layers,\n n_images,\n parser.mlp_layers,\n mlp_width,\n )\n else:\n mlp_scaler = MLPScaler(parser.mlp_layers, mlp_width)\n if parser.use_image_scales:\n n_images = np.max(BaseModel.get_image_id(self.inputs)) + 1\n image_scaler = ImageScaler(n_images)\n scaling_model = HybridImageScaler(mlp_scaler, image_scaler)\n else:\n scaling_model = mlp_scaler\n\n model = VariationalMergingModel(surrogate_posterior, prior, likelihood, scaling_model, parser.mc_samples)\n\n opt = tf.keras.optimizers.Adam(\n parser.learning_rate,\n parser.beta_1,\n parser.beta_2,\n )\n\n model.compile(opt)\n return model\n"
] | [
[
"tensorflow.convert_to_tensor",
"numpy.unique",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.optimizers.Adam",
"numpy.exp"
]
] |
COVID-19-Causal-Reasoning/BEL2SCM | [
"8bc92c6016fdd0cbccd6c3c96ae3c9106b01446f"
] | [
"tests/test_plots_bel2scm.py"
] | [
"\"\"\"\n------------------CODE DESCRIPTION----------------------------------\n This is the test file which stores experiments to generate data\n for plots in the paper that were generated using bel2scm algorithm.\n\n Check test_bel2scm.py to see unit tests for usability/debugging.\n\n Check test_plots_known_parameters_scm.py\n to see experiments that were used to generate data from SCM with known parameters.\n\n All dataframes generated for this paper are in Tests/Data folder.\n\"\"\"\n\nimport unittest\n\nfrom bel2scm.neurips_bel2scm.scm import SCM\nfrom bel2scm.neurips_bel2scm.utils import json_load\nfrom bel2scm.neurips_bel2scm.utils import save_scm_object\nfrom bel2scm.neurips_bel2scm.utils import load_scm_object\nimport torch\nimport pandas as pd\nimport time\nimport numpy as np\nfrom torch import tensor\n\nclass TestSCM(unittest.TestCase):\n\n def test_igf_intervention_on_ras(self):\n \"\"\"\n Description: This experiment gets causal effect on erk by\n intervening on mek for igf graph using bel2scm algorithm\n \"\"\"\n\n bel_file_path = \"data/igf.json\"\n config_file_path = \"data/COVID-19-config.json\"\n data_file_path = \"data/observational_igf.csv\"\n\n scm = SCM(bel_file_path, config_file_path, data_file_path)\n\n exogenous_noise = scm.exogenous_dist_dict\n condition_data = scm.model(exogenous_noise)\n print(condition_data)\n intervention_data = {\n \"a(p(Ras))\": 30.0\n }\n\n do_model = scm.intervention(intervention_data)\n samples = [do_model(exogenous_noise) for _ in range(5000)]\n df = pd.DataFrame(samples)\n for col in df.columns:\n for i in range(len(df)):\n if torch.is_tensor(df[col][i]):\n df[col][i] = df[col][i].item()\n df2 = pd.read_csv(\"data/bel2scm_samples_igf.csv\")\n erk_diff = df[\"a(p(Erk))\"] - df2[\"a(p(Erk))\"]\n erk_diff.to_csv(\"data/erk_do_ras_30_minus_erk.csv\")\n df.to_csv(\"data/intervention_samples_igf.csv\")\n self.assertTrue(True, True)\n\n def test_igf_intervention_on_mek(self):\n\n bel_file_path = \"data/igf.json\"\n config_file_path = \"data/COVID-19-config.json\"\n data_file_path = \"data/observational_igf.csv\"\n output_pickle_object_file = \"igf_scm.pkl\"\n\n scm = SCM(bel_file_path, config_file_path, data_file_path)\n\n exogenous_noise = scm.exogenous_dist_dict\n condition_data = scm.model(exogenous_noise)\n print(condition_data)\n # target = \"a(p(Erk))\"\n intervention_data = {\n \"a(p(Mek))\": 40.0\n }\n\n do_model = scm.intervention(intervention_data)\n samples = [do_model(exogenous_noise) for _ in range(5000)]\n df = pd.DataFrame(samples)\n for col in df.columns:\n for i in range(len(df)):\n if torch.is_tensor(df[col][i]):\n df[col][i] = df[col][i].item()\n df2 = pd.read_csv(\"data/bel2scm_samples_igf.csv\")\n erk_diff = df[\"a(p(Erk))\"] - df2[\"a(p(Erk))\"]\n erk_diff.to_csv(\"data/erk_do_mek_40_minus_erk.csv\")\n df.to_csv(\"data/intervention_mek_40_samples_igf.csv\")\n self.assertTrue(True, True)\n\n def test_covid_causal_effect_with_estimated_parameters_datapoint1(self):\n\n torch.manual_seed(23)\n time1 = time.time()\n bel_file_path = \"data/covid_input.json\"\n config_file_path = \"data/COVID-19-config.json\"\n data_file_path = \"data/observational_samples_from_sigmoid_known_parameters.csv\"\n\n scm = SCM(bel_file_path, config_file_path, data_file_path)\n condition_data = {\n 'a(SARS_COV2)': tensor(67.35032),\n 'a(PRR)': tensor(89.7037),\n 'a(ACE2)': tensor(29.747593),\n 'a(AngII)': tensor(68.251114),\n 'a(AGTR1)': tensor(90.96106999999999),\n 'a(ADAM17)': tensor(86.84893000000001),\n 'a(TOCI)': tensor(40.76684),\n 'a(TNF)': tensor(76.85005),\n 'a(sIL_6_alpha)': tensor(87.99491),\n 'a(EGF)': tensor(84.55391),\n 'a(EGFR)': tensor(79.94534),\n 'a(IL6_STAT3)': tensor(83.39896),\n 'a(NF_xB)': tensor(82.79433399999999),\n 'a(IL6_AMP)': tensor(81.38015),\n 'a(cytokine)': tensor(80.21895)\n\n }\n target = \"a(cytokine)\"\n intervention_data = {\n \"a(TOCI)\": 0.0\n }\n\n causal_effects1, counterfactual_samples1 = scm.counterfactual_inference(condition_data, intervention_data,\n target, True)\n print(\"time required for causal effects\", time.time() - time1)\n samples_df = pd.DataFrame(causal_effects1)\n samples_df.to_csv(\"data/causal_effect_sigmoid_with_estimated_parameters_datapoint1.csv\", index=False)\n\n def test_covid_causal_effect_with_estimated_parameters_datapoint2(self):\n\n torch.manual_seed(23)\n time1 = time.time()\n bel_file_path = \"data/covid_input.json\"\n config_file_path = \"data/COVID-19-config.json\"\n data_file_path = \"data/observational_samples_from_sigmoid_known_parameters.csv\"\n\n scm = SCM(bel_file_path, config_file_path, data_file_path)\n condition_data = {\n 'a(SARS_COV2)': 61.631156999999995,\n 'a(PRR)': 87.76389,\n 'a(ACE2)': 39.719845,\n 'a(AngII)': 59.212959999999995,\n 'a(AGTR1)': 84.39899399999999,\n 'a(ADAM17)': 85.84442,\n 'a(TOCI)': 67.33063,\n 'a(TNF)': 77.83915,\n 'a(sIL_6_alpha)': 57.584044999999996,\n 'a(EGF)': 86.26822,\n 'a(EGFR)': 81.4849,\n 'a(IL6_STAT3)': 69.57323000000001,\n 'a(NF_xB)': 83.75941,\n 'a(IL6_AMP)': 77.52906,\n 'a(cytokine)': 79.07555\n\n }\n target = \"a(cytokine)\"\n intervention_data = {\n \"a(TOCI)\": 0.0\n }\n\n causal_effects1, counterfactual_samples1 = scm.counterfactual_inference(condition_data, intervention_data,\n target, True)\n print(\"time required for causal effects\", time.time() - time1)\n samples_df = pd.DataFrame(causal_effects1)\n samples_df.to_csv(\"data/causal_effect_sigmoid_with_estimated_parameters_datapoint2.csv\", index=False)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"pandas.read_csv",
"torch.manual_seed",
"torch.is_tensor",
"pandas.DataFrame",
"torch.tensor"
]
] |
ADozois/pfe_movidius | [
"62f29ae6b124ea594bc252e2b1a5441e3ac165f7"
] | [
"test.py"
] | [
"import numpy\nimport cv2\nimport sys\nsys.path.insert(0, \"../../ncapi2_shim\")\nimport mvnc_simple_api as mvnc\nfrom NCS import NCS\n\ndim=(300,300)\nEXAMPLES_BASE_DIR='../../'\nIMAGES_DIR = EXAMPLES_BASE_DIR + 'data/images/'\nIMAGE_FULL_PATH = \"/home/walle/Movidius/ncappzoo/data/images/nps_chair.png\"\n\n# ***************************************************************\n# Labels for the classifications for the network.\n# ***************************************************************\nLABELS = ('background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\n\n# Run an inference on the passed image\n# image_to_classify is the image on which an inference will be performed\n# upon successful return this image will be overlayed with boxes\n# and labels identifying the found objects within the image.\n# ssd_mobilenet_graph is the Graph object from the NCAPI which will\n# be used to peform the inference.\ndef run_inference(image_to_classify, ssd_mobilenet_graph):\n\n # get a resized version of the image that is the dimensions\n # SSD Mobile net expects\n resized_image = preprocess_image(image_to_classify)\n\n # ***************************************************************\n # Send the image to the NCS\n # ***************************************************************\n ssd_mobilenet_graph.LoadTensor(resized_image.astype(numpy.float16), None)\n\n # ***************************************************************\n # Get the result from the NCS\n # ***************************************************************\n output, userobj = ssd_mobilenet_graph.GetResult()\n\n # a.\tFirst fp16 value holds the number of valid detections = num_valid.\n # b.\tThe next 6 values are unused.\n # c.\tThe next (7 * num_valid) values contain the valid detections data\n # Each group of 7 values will describe an object/box These 7 values in order.\n # The values are:\n # 0: image_id (always 0)\n # 1: class_id (this is an index into labels)\n # 2: score (this is the probability for the class)\n # 3: box left location within image as number between 0.0 and 1.0\n # 4: box top location within image as number between 0.0 and 1.0\n # 5: box right location within image as number between 0.0 and 1.0\n # 6: box bottom location within image as number between 0.0 and 1.0\n\n # number of boxes returned\n num_valid_boxes = int(output[0])\n print('total num boxes: ' + str(num_valid_boxes))\n\n for box_index in range(num_valid_boxes):\n base_index = 7+ box_index * 7\n if (not numpy.isfinite(output[base_index]) or\n not numpy.isfinite(output[base_index + 1]) or\n not numpy.isfinite(output[base_index + 2]) or\n not numpy.isfinite(output[base_index + 3]) or\n not numpy.isfinite(output[base_index + 4]) or\n not numpy.isfinite(output[base_index + 5]) or\n not numpy.isfinite(output[base_index + 6])):\n # boxes with non infinite (inf, nan, etc) numbers must be ignored\n print('box at index: ' + str(box_index) + ' has nonfinite data, ignoring it')\n continue\n\n # clip the boxes to the image size incase network returns boxes outside of the image\n x1 = max(0, int(output[base_index + 3] * image_to_classify.shape[0]))\n y1 = max(0, int(output[base_index + 4] * image_to_classify.shape[1]))\n x2 = min(image_to_classify.shape[0], int(output[base_index + 5] * image_to_classify.shape[0]))\n y2 = min(image_to_classify.shape[1], int(output[base_index + 6] * image_to_classify.shape[1]))\n\n x1_ = str(x1)\n y1_ = str(y1)\n x2_ = str(x2)\n y2_ = str(y2)\n\n print('box at index: ' + str(box_index) + ' : ClassID: ' + LABELS[int(output[base_index + 1])] + ' '\n 'Confidence: ' + str(output[base_index + 2]*100) + '% ' +\n 'Top Left: (' + x1_ + ', ' + y1_ + ') Bottom Right: (' + x2_ + ', ' + y2_ + ')')\n\n # overlay boxes and labels on the original image to classify\n overlay_on_image(image_to_classify, output[base_index:base_index + 7])\n\n\n# overlays the boxes and labels onto the display image.\n# display_image is the image on which to overlay the boxes/labels\n# object_info is a list of 7 values as returned from the network\n# These 7 values describe the object found and they are:\n# 0: image_id (always 0 for myriad)\n# 1: class_id (this is an index into labels)\n# 2: score (this is the probability for the class)\n# 3: box left location within image as number between 0.0 and 1.0\n# 4: box top location within image as number between 0.0 and 1.0\n# 5: box right location within image as number between 0.0 and 1.0\n# 6: box bottom location within image as number between 0.0 and 1.0\n# returns None\ndef overlay_on_image(display_image, object_info):\n\n # the minimal score for a box to be shown\n min_score_percent = 60\n\n source_image_width = display_image.shape[1]\n source_image_height = display_image.shape[0]\n\n base_index = 0\n class_id = object_info[base_index + 1]\n percentage = int(object_info[base_index + 2] * 100)\n if (percentage <= min_score_percent):\n # ignore boxes less than the minimum score\n return\n\n label_text = LABELS[int(class_id)] + \" (\" + str(percentage) + \"%)\"\n box_left = int(object_info[base_index + 3] * source_image_width)\n box_top = int(object_info[base_index + 4] * source_image_height)\n box_right = int(object_info[base_index + 5] * source_image_width)\n box_bottom = int(object_info[base_index + 6] * source_image_height)\n\n box_color = (255, 128, 0) # box color\n box_thickness = 2\n cv2.rectangle(display_image, (box_left, box_top), (box_right, box_bottom), box_color, box_thickness)\n\n # draw the classification label string just above and to the left of the rectangle\n label_background_color = (125, 175, 75)\n label_text_color = (255, 255, 255) # white text\n\n label_size = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]\n label_left = box_left\n label_top = box_top - label_size[1]\n if (label_top < 1):\n label_top = 1\n label_right = label_left + label_size[0]\n label_bottom = label_top + label_size[1]\n cv2.rectangle(display_image, (label_left - 1, label_top - 1), (label_right + 1, label_bottom + 1),\n label_background_color, -1)\n\n # label text above the box\n cv2.putText(display_image, label_text, (label_left, label_bottom), cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)\n\n\n# create a preprocessed image from the source image that complies to the\n# network expectations and return it\ndef preprocess_image(src):\n\n # scale the image\n NETWORK_WIDTH = 300\n NETWORK_HEIGHT = 300\n img = cv2.resize(src, (NETWORK_WIDTH, NETWORK_HEIGHT))\n\n # adjust values to range between -1.0 and + 1.0\n img = img - 127.5\n img = img * 0.007843\n return img\n\n\ndef main():\n # name of the opencv window\n cv_window_name = \"SSD MobileNet - hit any key to exit\"\n\n # Get a list of ALL the sticks that are plugged in\n # we need at least one\n devices = mvnc.EnumerateDevices()\n if len(devices) == 0:\n print('No devices found')\n quit()\n\n # Pick the first stick to run the network\n device = mvnc.Device(devices[0])\n\n # Open the NCS\n device.OpenDevice()\n\n # The graph file that was created with the ncsdk compiler\n graph_file_name = '/home/walle/pfe_movidius/model/ssd_caffe/graph'\n\n # read in the graph file to memory buffer\n with open(graph_file_name, mode='rb') as f:\n graph_in_memory = f.read()\n\n cam = cv2.VideoCapture(0)\n\n # create the NCAPI graph instance from the memory buffer containing the graph file.\n graph = device.AllocateGraph(graph_in_memory)\n\n # read the image to run an inference on from the disk\n infer_image = cv2.imread(IMAGE_FULL_PATH)\n\n\n _, infer_image = cam.read()\n\n # run a single inference on the image and overwrite the\n # boxes and labels\n run_inference(infer_image, graph)\n # display the results and wait for user to hit a key\n cv2.imshow(cv_window_name, infer_image)\n cv2.waitKey(1)\n\n # Clean up the graph and the device\n graph.DeallocateGraph()\n device.CloseDevice()\n\n\n# main entry point for program. we'll call main() to do what needs to be done.\nif __name__ == \"__main__\":\n sys.exit(main())\n"
] | [
[
"numpy.isfinite"
]
] |
marblejenka/sawatabi | [
"8940bafed202e03b9f9fce4df2229960018871c8"
] | [
"tests/model/test_logical_model.py"
] | [
"# Copyright 2021 Kotaro Terada\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\n\nimport numpy as np\nimport pyqubo\nimport pytest\n\nimport sawatabi.constants as constants\nfrom sawatabi.model import LogicalModel\nfrom sawatabi.model.constraint import EqualityConstraint, NHotConstraint\nfrom sawatabi.solver import LocalSolver\n\n\[email protected]\ndef ising():\n return LogicalModel(mtype=\"ising\")\n\n\[email protected]\ndef qubo():\n return LogicalModel(mtype=\"qubo\")\n\n\n################################\n# Logical Model\n################################\n\n\[email protected](\"mtype\", [\"ising\", \"qubo\"])\ndef test_logical_model_constructor(mtype):\n model = LogicalModel(mtype=mtype)\n assert model.get_mtype() == mtype\n assert model._mtype == mtype\n\n\ndef test_logical_model_invalid_mtype():\n with pytest.raises(ValueError):\n LogicalModel()\n\n with pytest.raises(ValueError):\n LogicalModel(mtype=\"invalidtype\")\n\n with pytest.raises(ValueError):\n LogicalModel(mtype=12345)\n\n\[email protected](\"mtype\", [\"ising\", \"qubo\"])\ndef test_logical_model_empty(mtype):\n model = LogicalModel(mtype=mtype)\n x = model.variables(\"x\", shape=(10, 10))\n model.add_interaction(x[0, 0], coefficient=10.0)\n\n empty_model = model.empty()\n assert empty_model.get_mtype() == mtype\n assert len(empty_model._variables) == 0\n assert len(empty_model._interactions_array) == len(empty_model._default_keys)\n for k, v in empty_model._interactions_array.items():\n assert len(v) == 0\n assert empty_model._interactions_length == 0\n\n\n################################\n# Variables\n################################\n\n\[email protected](\"shape\", [(2,), (3, 4), (5, 6, 7)])\ndef test_logical_model_variables(shape):\n model = LogicalModel(mtype=\"ising\")\n x = model.variables(\"x\", shape=shape)\n\n assert len(model.get_variables()) == 1\n assert \"x\" in model.get_variables()\n assert x.shape == shape\n assert isinstance(x, pyqubo.Array)\n assert model.get_variables_by_name(\"x\") == x\n assert id(model.get_variables_by_name(\"x\")) == id(x)\n assert id(model.get_variables()[\"x\"]) == id(x)\n\n\[email protected](\"shape\", [(2,), (3, 4), (5, 6, 7)])\ndef test_logical_model_multi_variables(shape):\n model = LogicalModel(mtype=\"qubo\")\n x = model.variables(\"x\", shape)\n assert len(model.get_variables()) == 1\n assert \"x\" in model.get_variables()\n assert \"y\" not in model.get_variables()\n assert model.get_variables_by_name(\"x\") == x\n assert id(model.get_variables_by_name(\"x\")) == id(x)\n with pytest.raises(KeyError):\n model.get_variables_by_name(\"y\")\n\n y = model.variables(\"y\", shape=shape)\n assert len(model.get_variables()) == 2\n assert \"x\" in model.get_variables()\n assert \"y\" in model.get_variables()\n assert y.shape == shape\n assert isinstance(y, pyqubo.Array)\n assert model.get_variables_by_name(\"y\") == y\n assert id(model.get_variables()[\"y\"]) == id(y)\n\n\[email protected](\n \"name,shape\",\n [\n (12345, (2, 3)),\n (\"x\", 12345),\n (\"x\", ()),\n (\"x\", (\"a\", \"b\")),\n ],\n)\ndef test_logical_model_variables_invalid(name, shape):\n model = LogicalModel(mtype=\"ising\")\n with pytest.raises(TypeError):\n model.variables(name, shape=shape)\n\n\ndef test_logical_model_variables_comma(ising):\n # We cannot name variables whose name contains a comma\n with pytest.raises(AssertionError):\n ising.variables(\"x*y\", shape=(2, 2))\n\n\[email protected](\"initial_shape,additional_shape,expected_shape\", [((2,), (1,), (3,)), ((44, 33), (22, 11), (66, 44))])\ndef test_logical_model_variables_append(initial_shape, additional_shape, expected_shape):\n model = LogicalModel(mtype=\"ising\")\n model.variables(\"x\", shape=initial_shape)\n assert \"x\" in model.get_variables()\n assert model.get_variables_by_name(\"x\").shape == initial_shape\n\n model.append(\"x\", shape=additional_shape)\n assert model.get_variables_by_name(\"x\").shape == expected_shape\n\n\[email protected](\"shape\", [(2,), (33, 44)])\ndef test_logical_model_variables_append_without_initialize(shape):\n model = LogicalModel(mtype=\"ising\")\n # The following operation will be successful with a UserWarning.\n with pytest.warns(UserWarning):\n model.append(\"x\", shape=shape)\n assert \"x\" in model.get_variables()\n assert model.get_variables_by_name(\"x\").shape == shape\n\n\ndef test_logical_model_variables_append_invalid(ising):\n ising.variables(\"x\", shape=(2, 2))\n\n with pytest.raises(TypeError):\n ising.append(\"x\", shape=(\"a\", \"b\"))\n\n\[email protected](\"vartype,mtype\", [(\"SPIN\", \"ising\"), (\"BINARY\", \"qubo\")])\ndef test_logical_model_variables_from_pyqubo(vartype, mtype):\n x = pyqubo.Array.create(\"x\", shape=(2, 3), vartype=vartype)\n model = LogicalModel(mtype=mtype)\n x_when_applied = model.variables(x)\n x_from_model = model.get_variables_by_name(\"x\")\n assert id(x) == id(x_when_applied)\n assert x == x_when_applied\n assert id(x) == id(x_from_model)\n assert x == x_from_model\n\n\[email protected](\"vartype,mtype\", [(\"SPIN\", \"qubo\"), (\"BINARY\", \"ising\")])\ndef test_logical_model_variables_from_pyqubo_mismatch(vartype, mtype):\n x = pyqubo.Array.create(\"x\", shape=(2, 3), vartype=vartype)\n model = LogicalModel(mtype=mtype)\n with pytest.raises(TypeError):\n model.variables(x)\n\n\n################################\n# PyQUBO\n################################\n\n\ndef test_logical_model_from_pyqubo_expression(qubo):\n x = qubo.variables(\"x\", shape=(10,))\n y = qubo.variables(\"y\", shape=(10,))\n\n sum_x = sum(x[i] for i in range(10))\n sum_y = sum(y[i] for i in range(10))\n hamiltonian = (sum_x - sum_y) ** 2\n\n qubo.from_pyqubo(hamiltonian)\n\n qubo._update_interactions_dataframe_from_arrays() # Update the interactions DataFrame for debug\n\n for i in range(10):\n assert qubo._interactions[qubo._interactions[\"name\"] == f\"x[{i}]\"][\"coefficient\"].values[0] == -1.0\n for i in range(9):\n for j in range(i + 1, 10):\n assert qubo._interactions[qubo._interactions[\"name\"] == f\"x[{i}]*x[{j}]\"][\"coefficient\"].values[0] == -2.0\n assert qubo._interactions[qubo._interactions[\"name\"] == f\"y[{i}]*y[{j}]\"][\"coefficient\"].values[0] == -2.0\n assert qubo._interactions[qubo._interactions[\"name\"] == f\"x[{i}]*y[{j}]\"][\"coefficient\"].values[0] == 2.0\n\n assert qubo.get_offset() == 0.0\n\n\ndef test_logical_model_from_pyqubo_model_with_placeholder(qubo):\n x = qubo.variables(\"x\", shape=(10, 2))\n y = qubo.variables(\"y\", shape=(10, 2))\n\n sum_x = sum(x[i, 0] for i in range(10))\n sum_y = sum(y[i, 0] for i in range(10))\n hamiltonian = pyqubo.Placeholder(\"A\") * (sum_x - sum_y) ** 2 + 10.0\n pyqubo_model = hamiltonian.compile()\n\n qubo.from_pyqubo(pyqubo_model)\n\n # We cannot evaluate cofficient values before placeholders are resolved,\n # so convert it to a physical model.\n physical = qubo.to_physical({\"A\": 2.0})\n\n for i in range(10):\n assert physical._raw_interactions[constants.INTERACTION_LINEAR][f\"x[{i}][0]\"] == -2.0\n for i in range(10):\n for j in range(i + 1, 10):\n assert physical._raw_interactions[constants.INTERACTION_QUADRATIC][(f\"x[{i}][0]\", f\"x[{j}][0]\")] == -4.0\n assert physical._raw_interactions[constants.INTERACTION_QUADRATIC][(f\"y[{i}][0]\", f\"y[{j}][0]\")] == -4.0\n assert physical._raw_interactions[constants.INTERACTION_QUADRATIC][(f\"x[{i}][0]\", f\"y[{j}][0]\")] == 4.0\n\n assert physical.get_offset() == 10.0\n\n\ndef test_logical_model_from_pyqubo_spins(qubo):\n x = qubo.variables(\"x\", shape=(10,))\n y = qubo.variables(\"y\", shape=(10,))\n\n sum_x = sum(x[i] for i in range(10))\n sum_y = sum(y[i] for i in range(10))\n hamiltonian = (sum_x - sum_y) ** 2\n\n qubo.from_pyqubo(hamiltonian)\n physical = qubo.to_physical()\n solver = LocalSolver(exact=False)\n sampleset = solver.solve(physical, num_reads=1, num_sweeps=10000)\n spins = sampleset.record[0][0]\n assert np.count_nonzero(spins[:10]) == np.count_nonzero(spins[10:])\n\n\ndef test_logical_model_from_pyqubo_invalid(qubo):\n with pytest.raises(TypeError):\n qubo.from_pyqubo(\"invalid type\")\n\n\n################################\n# Constraints\n################################\n\n\ndef test_logical_model_n_hot_constraint_x(ising):\n x = ising.variables(\"x\", shape=(3,))\n default_label = \"Default N-hot Constraint\"\n\n ising.add_constraint(NHotConstraint(variables=x[0], n=1))\n assert len(ising.get_constraints()) == 1\n assert default_label in ising.get_constraints()\n assert len(ising.get_constraints_by_label(default_label)._variables) == 1\n assert ising.get_constraints_by_label(default_label)._n == 1\n\n ising.add_constraint(NHotConstraint(variables=x[(slice(1, 3),)], n=1))\n assert len(ising.get_constraints()) == 1\n assert default_label in ising.get_constraints()\n assert len(ising.get_constraints_by_label(default_label)._variables) == 2\n assert ising.get_constraints_by_label(default_label)._n == 1\n\n ising.add_constraint(NHotConstraint(variables=x, n=2)) # n = 2\n assert len(ising.get_constraints()) == 1\n assert default_label in ising.get_constraints()\n assert len(ising.get_constraints_by_label(default_label)._variables) == 3\n assert ising.get_constraints_by_label(default_label)._n == 2\n\n ising.add_constraint(NHotConstraint(variables=x, n=2)) # double\n assert len(ising.get_constraints()) == 1\n assert default_label in ising.get_constraints()\n assert len(ising.get_constraints_by_label(default_label)._variables) == 3\n assert ising.get_constraints_by_label(default_label)._n == 2\n\n ising.add_constraint(NHotConstraint(variables=x, n=2)) # partially\n assert len(ising.get_constraints()) == 1\n assert default_label in ising.get_constraints()\n assert len(ising.get_constraints_by_label(default_label)._variables) == 3\n assert ising.get_constraints_by_label(default_label)._n == 2\n\n\ndef test_logical_model_n_hot_constraint_y(ising):\n y = ising.variables(\"y\", shape=(2, 2))\n\n ising.add_constraint(NHotConstraint(variables=y[0, :], n=2, label=\"my label\"))\n assert len(ising.get_constraints()) == 1\n assert \"my label\" in ising.get_constraints()\n assert ising.get_constraints_by_label(\"my label\")._n == 2\n\n # variables in list representation\n c = NHotConstraint(variables=[y[1, 0], y[1, 1]], n=2, label=\"my label\")\n ising.add_constraint(c)\n assert len(ising.get_constraints()) == 1\n assert \"my label\" in ising.get_constraints()\n assert len(ising.get_constraints_by_label(\"my label\")._variables) == 2\n assert ising.get_constraints_by_label(\"my label\")._n == 2\n\n # Constraint content changes\n c.add_variable(variables=[y[0, 0]])\n assert len(ising.get_constraints()) == 1\n assert \"my label\" in ising.get_constraints()\n assert len(ising.get_constraints_by_label(\"my label\")._variables) == 3\n assert ising.get_constraints_by_label(\"my label\")._n == 2\n\n\ndef test_logical_model_n_hot_constraint_remove(ising):\n y = ising.variables(\"y\", shape=(2, 2))\n\n ising.add_constraint(NHotConstraint(variables=y, n=1, label=\"my label\"))\n assert len(ising.get_constraints()) == 1\n assert \"my label\" in ising.get_constraints()\n\n ising.remove_constraint(label=\"my label\")\n assert len(ising.get_constraints()) == 0\n\n\ndef test_logical_model_multiple_constraints_n_hot_and_n_hot(ising):\n x = ising.variables(\"x\", shape=(2, 4))\n\n ising.add_constraint(NHotConstraint(variables=x[0, :], n=1, label=\"l1\"))\n assert len(ising.get_constraints()) == 1\n assert \"l1\" in ising.get_constraints()\n assert \"l2\" not in ising.get_constraints()\n assert ising.get_constraints_by_label(\"l1\")._n == 1\n with pytest.raises(KeyError):\n ising.get_constraints_by_label(\"l2\")\n\n ising.add_constraint(NHotConstraint(variables=x[1, :], n=1, label=\"l2\"))\n assert len(ising.get_constraints()) == 2\n assert \"l1\" in ising.get_constraints()\n assert \"l2\" in ising.get_constraints()\n assert ising.get_constraints_by_label(\"l1\")._n == 1\n assert ising.get_constraints_by_label(\"l2\")._n == 1\n\n\ndef test_logical_model_multiple_constraints_n_hot_and_equality(ising):\n x = ising.variables(\"x\", shape=(3, 4))\n\n ising.add_constraint(NHotConstraint(variables=x[0, :], n=1, label=\"l1\"))\n assert len(ising.get_constraints()) == 1\n assert \"l1\" in ising.get_constraints()\n assert \"l2\" not in ising.get_constraints()\n assert isinstance(ising.get_constraints_by_label(\"l1\"), NHotConstraint)\n assert ising.get_constraints_by_label(\"l1\")._n == 1\n with pytest.raises(KeyError):\n ising.get_constraints_by_label(\"l2\")\n\n ising.add_constraint(EqualityConstraint(variables_1=x[1, :], variables_2=x[2, :], label=\"l2\"))\n assert len(ising.get_constraints()) == 2\n assert \"l1\" in ising.get_constraints()\n assert \"l2\" in ising.get_constraints()\n assert isinstance(ising.get_constraints_by_label(\"l2\"), EqualityConstraint)\n\n\ndef test_logical_model_n_hot_constraint_typeerror(ising):\n ising.variables(\"z\", shape=(4, 4, 4))\n\n # TODO: This error should be raises, but not implemented yet.\n # a = pyqubo.Spin(\"a\")\n # with pytest.raises(ValueError):\n # ising.add_constraint(NHotConstraint(a))\n\n\n################################\n# Getters\n################################\n\n\ndef test_logical_model_get_deleted_array(ising):\n x = ising.variables(\"x\", shape=(2,))\n assert len(ising.get_deleted_array()) == 0\n ising.delete_variable(target=x[0])\n assert len(ising.get_deleted_array()) == 1\n assert \"x[0]\" in ising.get_deleted_array()\n\n\ndef test_logical_model_get_fixed_array(ising):\n x = ising.variables(\"x\", shape=(2,)) # noqa: F841\n assert len(ising.get_fixed_array()) == 0\n # ising.fix_variable(target=x[0], value=1)\n # assert len(ising.get_fixed_array()) == 1\n\n\ndef test_logical_model_get_attributes(ising):\n x = ising.variables(\"x\", shape=(2,))\n ising.add_interaction(x[0], coefficient=10.0)\n ising.add_interaction(x[1], coefficient=11.0, attributes={\"foo\": \"bar\"})\n\n attributes = ising.get_attributes(x[0])\n assert len(attributes) == 1\n assert np.isnan(attributes[\"attributes.foo\"])\n\n attributes = ising.get_attributes(target=x[1])\n assert len(attributes) == 1\n assert attributes[\"attributes.foo\"] == \"bar\"\n\n attributes = ising.get_attributes(name=\"x[1]\")\n assert len(attributes) == 1\n assert attributes[\"attributes.foo\"] == \"bar\"\n\n attribute = ising.get_attribute(x[1], key=\"attributes.foo\")\n assert isinstance(attribute, str)\n assert attribute == \"bar\"\n\n with pytest.raises(KeyError):\n ising.get_attribute(name=\"x[1]\", key=\"attributes.foofoo\")\n\n\n################################\n# Built-in functions\n################################\n\n\ndef test_logical_model_eq():\n model_a = _create_ising_model_for_eq()\n model_b = copy.deepcopy(model_a) # Note: If _create_ising_model_for_eq() called again, timestamp will be different.\n assert model_a == model_b\n\n\ndef test_logical_model_ne():\n ising = LogicalModel(mtype=\"ising\")\n qubo = LogicalModel(mtype=\"qubo\")\n assert ising != qubo\n assert ising != \"another type\"\n\n\ndef _create_ising_model_for_eq():\n model = LogicalModel(mtype=\"ising\")\n x = model.variables(name=\"x\", shape=(4,))\n z = model.variables(name=\"z\", shape=(4,))\n model.add_interaction(target=x[0], coefficient=1.1)\n model.add_interaction(target=(x[0], x[1]), coefficient=2.2, scale=3.3, attributes={\"foo\": \"bar\"})\n\n model.add_interaction(target=x[2], coefficient=4.4)\n model.add_interaction(target=x[3], coefficient=5.5)\n model.remove_interaction(target=x[2])\n model.fix_variable(target=x[3], value=1)\n\n model.add_constraint(NHotConstraint(variables=z, n=1))\n\n return model\n\n\ndef test_logical_model_repr(ising):\n x = ising.variables(name=\"x\", shape=(10, 10))\n ising.add_interaction(x[0][0], coefficient=10.0)\n ising.add_interaction((x[1][0], x[1][1]), coefficient=11.0)\n\n assert isinstance(ising.__repr__(), str)\n assert \"LogicalModel({\" in ising.__repr__()\n assert \"'mtype':\" in ising.__repr__()\n assert \"'variables':\" in ising.__repr__()\n assert \"'x':\" in ising.__repr__()\n assert '\\'interactions\\': [[1,\"x[0][0]\",\"x[0][0]\",\"x[0][0]\",null,' in ising.__repr__()\n assert ',[2,\"x[1][0]*x[1][1]\",[\"x[1][0]\",\"x[1][1]\"],\"x[1][0]\",\"x[1][1]\",' in ising.__repr__()\n assert \"'offset':\" in ising.__repr__()\n assert \"'constraints':\" in ising.__repr__()\n\n\ndef test_logical_model_repr_empty_interactions(ising):\n ising.variables(name=\"x\", shape=(10, 10))\n\n assert isinstance(ising.__repr__(), str)\n assert \"LogicalModel({\" in ising.__repr__()\n assert \"'mtype':\" in ising.__repr__()\n assert \"'variables':\" in ising.__repr__()\n assert \"'x':\" in ising.__repr__()\n assert \"'interactions': 'Empty'\" in ising.__repr__()\n assert \"'offset':\" in ising.__repr__()\n assert \"'constraints':\" in ising.__repr__()\n\n\ndef test_logical_model_str(ising):\n x = ising.variables(name=\"x\", shape=(10, 10))\n ising.add_interaction(x[0][0], coefficient=10.0)\n ising.add_interaction((x[1][0], x[1][1]), coefficient=11.0)\n\n assert isinstance(ising.__str__(), str)\n assert \"LOGICAL MODEL\" in ising.__str__()\n assert \"mtype: ising\" in ising.__str__()\n assert \"variables: ['x']\" in ising.__str__()\n assert \"name: x\" in ising.__str__()\n assert \"Array([[Spin(x[0][0]),\" in ising.__str__()\n assert \"interactions:\" in ising.__str__()\n assert \" 0 1 x[0][0] x[0][0]\" in ising.__str__()\n assert \" 1 2 x[1][0]*x[1][1] (x[1][0], x[1][1])\" in ising.__str__()\n assert \"offset: 0.0\" in ising.__str__()\n assert \"constraints:\" in ising.__str__()\n\n\ndef test_logical_model_str_empty_interactions(ising):\n ising.variables(name=\"x\", shape=(10, 10))\n\n assert isinstance(ising.__str__(), str)\n assert \"LOGICAL MODEL\" in ising.__str__()\n assert \"mtype: ising\" in ising.__str__()\n assert \"variables: ['x']\" in ising.__str__()\n assert \"name: x\" in ising.__str__()\n assert \"Array([[Spin(x[0][0]),\" in ising.__str__()\n assert \"interactions:\" in ising.__str__()\n assert \"Empty\" in ising.__str__()\n assert \"offset: 0.0\" in ising.__str__()\n assert \"constraints:\" in ising.__str__()\n"
] | [
[
"numpy.isnan",
"numpy.count_nonzero"
]
] |
conda-forge-linter/pyimagej-feedstock | [
"8034cc2340f29c7e1408d1ab67a8e76266074512"
] | [
"recipe/run_test.py"
] | [
"import imagej\nimport numpy as np\n\nij = imagej.init(headless=True)\nprint(ij.getVersion())\n\nimg_shape = (512, 512)\n\nimg = np.random.random(img_shape)\noutput = np.zeros(img.shape, dtype=img.dtype)\nrai = ij.op().filter().frangiVesselness(ij.py.to_java(output), ij.py.to_java(img), [1, 1], 20)\n\nassert output.shape == img_shape\nassert rai.numDimensions() == 2\nassert rai.dimension(0) == img_shape[0]\nassert rai.dimension(1) == img_shape[1]\n"
] | [
[
"numpy.random.random",
"numpy.zeros"
]
] |
fvcalderan/interpolation | [
"1084c66cfb26244f42dec7fdeadf18ae5cdd3a2f"
] | [
"interpolation.py"
] | [
"from sympy import *\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\n__author__ = 'Felipe V. Calderan'\n__copyright__ = 'Copyright (C) 2020 Felipe V. Calderan'\n__license__ = 'BSD 3-Clause \"New\" or \"Revised\" License'\n__version__ = '1.0'\n\n\ndef check_args():\n \"\"\"check correctnes of the arguments and get them\"\"\"\n if len(sys.argv) != 4:\n print('\\nUsage: python3 interpolation.py data.csv precision output_name\\n')\n exit()\n return sys.argv[1], float(sys.argv[2]), sys.argv[3]\n\n\ndef gen_data(data):\n \"\"\"get csv file and generate axis\"\"\"\n my_data = np.genfromtxt(data, delimiter=',')\n x_axis, y_axis = zip(*my_data)\n return x_axis, y_axis\n\n\ndef gen_expr_point(point, x_axis):\n \"\"\"generate x part of lagrange interpolation\"\"\"\n nstr = ''\n dstr = ''\n for i, v in enumerate(x_axis):\n if i != point:\n nstr = nstr + '(x-' + str(v) + ')' + '*'\n dstr = dstr + '(' + str(x_axis[point]) + '-' + str(v) + ')*'\n\n return '(' + nstr[:-1] + ')/(' + dstr[:-1] + ')'\n\n\ndef gen_final_expr(x_axis, y_axis):\n \"\"\"generate final lagrange interpolation\"\"\"\n expr = ''\n for i, v in enumerate(y_axis):\n this_point = gen_expr_point(i, x_axis)\n expr = expr + str(v) + '*' + this_point + '+'\n\n return expr[:-1]\n\n\ndef gen_new_axis(x_axis, expr, precision):\n \"\"\"evaluate lagrange interpolation\"\"\"\n new_x = np.arange(min(x_axis), max(x_axis), precision)\n f = lambdify('x', expr, \"numpy\")\n new_y = f(new_x)\n\n return new_x, new_y\n\n\ndef gen_out_file(expr, x_axis, y_axis, new_x, new_y, out_name):\n \"\"\"output equation and plot to files\"\"\"\n # save equation\n tfile = open(out_name + '_LaTeX.txt', 'w')\n tfile.write(latex(expr))\n tfile.close()\n\n # save plot\n plt.plot(x_axis, y_axis, 'o', new_x, new_y, '-')\n plt.savefig(out_name, dpi=400)\n\n\ndef main():\n # check correctness of the arguments and get them\n csvfile, precision, out_name = check_args()\n\n # generate data\n x_axis, y_axis = gen_data(csvfile)\n\n # simplify and generate new interpolated data\n expr = sympify(gen_final_expr(x_axis, y_axis))\n expr = simplify(expr)\n new_x, new_y = gen_new_axis(x_axis, expr, precision)\n\n # print expression in the terminal\n pprint(expr)\n\n # generate output\n gen_out_file(expr, x_axis, y_axis, new_x, new_y, out_name)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.savefig",
"numpy.genfromtxt"
]
] |
Satyam-Bhalla/Machine-Learning-Practice | [
"0ae4b8ae9501fb0a22b236dbc508fe6b32e21f42"
] | [
"Day 8 - Logistic Regression/logistic_regression.py"
] | [
"# Logistic Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, [2, 3]].values\ny = dataset.iloc[:, 4].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Fitting Logistic Regression to the Training set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n# Visualising the Training set results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Logistic Regression (Training set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()\n\n# Visualising the Test set results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Logistic Regression (Test set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"sklearn.cross_validation.train_test_split",
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.title",
"numpy.unique",
"sklearn.metrics.confusion_matrix",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
aikonens/GoodVibes | [
"d9b7920bd27ceb39f203a3522c2570ffc0c58686"
] | [
"goodvibes/io.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import\n\nimport os.path, sys\nimport numpy as np\n\n# PHYSICAL CONSTANTS UNITS\nKCAL_TO_AU = 627.509541 # UNIT CONVERSION\n\n# Radii used to determine connectivity in symmetry corrections\n# Covalent radii taken from Cambridge Structural Database\nRADII = {'H': 0.32, 'He': 0.93, 'Li': 1.23, 'Be': 0.90, 'B': 0.82, 'C': 0.77, 'N': 0.75, 'O': 0.73, 'F': 0.72,\n 'Ne': 0.71, 'Na': 1.54, 'Mg': 1.36, 'Al': 1.18, 'Si': 1.11, 'P': 1.06, 'S': 1.02, 'Cl': 0.99, 'Ar': 0.98,\n 'K': 2.03, 'Ca': 1.74, 'Sc': 1.44, 'Ti': 1.32, 'V': 1.22, 'Cr': 1.18, 'Mn': 1.17, 'Fe': 1.17, 'Co': 1.16,\n 'Ni': 1.15, 'Cu': 1.17, 'Zn': 1.25, 'Ga': 1.26, 'Ge': 1.22, 'As': 1.20, 'Se': 1.16, 'Br': 1.14, 'Kr': 1.12,\n 'Rb': 2.16, 'Sr': 1.91, 'Y': 1.62, 'Zr': 1.45, 'Nb': 1.34, 'Mo': 1.30, 'Tc': 1.27, 'Ru': 1.25, 'Rh': 1.25,\n 'Pd': 1.28, 'Ag': 1.34, 'Cd': 1.48, 'In': 1.44, 'Sn': 1.41, 'Sb': 1.40, 'Te': 1.36, 'I': 1.33, 'Xe': 1.31,\n 'Cs': 2.35, 'Ba': 1.98, 'La': 1.69, 'Lu': 1.60, 'Hf': 1.44, 'Ta': 1.34, 'W': 1.30, 'Re': 1.28, 'Os': 1.26,\n 'Ir': 1.27, 'Pt': 1.30, 'Au': 1.34, 'Hg': 1.49, 'Tl': 1.48, 'Pb': 1.47, 'Bi': 1.46, 'X': 0}\n# Bondi van der Waals radii for all atoms from: Bondi, A. J. Phys. Chem. 1964, 68, 441-452,\n# except hydrogen, which is taken from Rowland, R. S.; Taylor, R. J. Phys. Chem. 1996, 100, 7384-7391.\n# Radii unavailable in either of these publications are set to 2 Angstrom\n# (Unfinished)\nBONDI = {'H': 1.09, 'He': 1.40, 'Li': 1.82, 'Be': 2.00, 'B': 2.00, 'C': 1.70, 'N': 1.55, 'O': 1.52, 'F': 1.47,\n 'Ne': 1.54}\n\n# Some useful arrays\nperiodictable = [\"\", \"H\", \"He\", \"Li\", \"Be\", \"B\", \"C\", \"N\", \"O\", \"F\", \"Ne\", \"Na\", \"Mg\", \"Al\", \"Si\",\n \"P\", \"S\", \"Cl\", \"Ar\", \"K\", \"Ca\", \"Sc\", \"Ti\", \"V\", \"Cr\", \"Mn\", \"Fe\", \"Co\", \"Ni\", \"Cu\", \"Zn\",\n \"Ga\", \"Ge\", \"As\", \"Se\", \"Br\", \"Kr\", \"Rb\", \"Sr\", \"Y\", \"Zr\", \"Nb\", \"Mo\", \"Tc\", \"Ru\", \"Rh\", \"Pd\",\n \"Ag\", \"Cd\", \"In\", \"Sn\", \"Sb\", \"Te\", \"I\", \"Xe\", \"Cs\", \"Ba\", \"La\", \"Ce\", \"Pr\", \"Nd\", \"Pm\", \"Sm\",\n \"Eu\", \"Gd\", \"Tb\", \"Dy\", \"Ho\", \"Er\", \"Tm\", \"Yb\", \"Lu\", \"Hf\", \"Ta\", \"W\", \"Re\", \"Os\", \"Ir\", \"Pt\",\n \"Au\", \"Hg\", \"Tl\", \"Pb\", \"Bi\", \"Po\", \"At\", \"Rn\", \"Fr\", \"Ra\", \"Ac\", \"Th\", \"Pa\", \"U\", \"Np\", \"Pu\",\n \"Am\", \"Cm\", \"Bk\", \"Cf\", \"Es\", \"Fm\", \"Md\", \"No\", \"Lr\", \"Rf\", \"Db\", \"Sg\", \"Bh\", \"Hs\", \"Mt\", \"Ds\",\n \"Rg\", \"Uub\", \"Uut\", \"Uuq\", \"Uup\", \"Uuh\", \"Uus\", \"Uuo\"]\n\ndef element_id(massno, num=False):\n \"\"\"\n Get element symbol from mass number.\n\n Used in parsing output files to determine elements present in file.\n\n Parameter:\n massno (int): mass of element.\n\n Returns:\n str: element symbol, or 'XX' if not found in periodic table.\n \"\"\"\n try:\n if num:\n return periodictable.index(massno)\n return periodictable[massno]\n except IndexError:\n return \"XX\"\n\nclass xyz_out:\n \"\"\"\n Enables output of optimized coordinates to a single xyz-formatted file.\n\n Writes Cartesian coordinates of parsed chemical input.\n\n Attributes:\n xyz (file object): path in current working directory to write Cartesian coordinates.\n \"\"\"\n def __init__(self, filein, suffix, append):\n self.xyz = open('{}_{}.{}'.format(filein, append, suffix), 'w')\n\n def write_text(self, message):\n self.xyz.write(message + \"\\n\")\n\n def write_coords(self, atoms, coords):\n for n, carts in enumerate(coords):\n self.xyz.write('{:>1}'.format(atoms[n]))\n for cart in carts:\n self.xyz.write('{:13.6f}'.format(cart))\n self.xyz.write('\\n')\n\n def finalize(self):\n self.xyz.close()\n\nclass getoutData:\n \"\"\"\n Read molecule data from a computational chemistry output file.\n\n Currently supports Gaussian and ORCA output types.\n\n Attributes:\n FREQS (list): list of frequencies parsed from Gaussian file.\n REDMASS (list): list of reduced masses parsed from Gaussian file.\n FORCECONST (list): list of force constants parsed from Gaussian file.\n NORMALMODE (list): list of normal modes parsed from Gaussian file.\n atom_nums (list): list of atom number IDs.\n atom_types (list): list of atom element symbols.\n cartesians (list): list of cartesian coordinates for each atom.\n atomictypes (list): list of atomic types output in Gaussian files.\n connectivity (list): list of atomic connectivity in a molecule, based on covalent radii\n \"\"\"\n def __init__(self, file):\n with open(file) as f:\n data = f.readlines()\n program = 'none'\n\n for line in data:\n if \"Gaussian\" in line:\n program = \"Gaussian\"\n break\n if \"* O R C A *\" in line:\n program = \"Orca\"\n break\n if \"NWChem\" in line:\n program = \"NWChem\"\n break\n\n def get_freqs(self, outlines, natoms, format):\n self.FREQS = []\n self.REDMASS = []\n self.FORCECONST = []\n self.NORMALMODE = []\n freqs_so_far = 0\n if format == \"Gaussian\":\n for i in range(0, len(outlines)):\n if outlines[i].find(\" Frequencies -- \") > -1:\n nfreqs = len(outlines[i].split())\n for j in range(2, nfreqs):\n self.FREQS.append(float(outlines[i].split()[j]))\n self.NORMALMODE.append([])\n for j in range(3, nfreqs + 1): self.REDMASS.append(float(outlines[i + 1].split()[j]))\n for j in range(3, nfreqs + 1): self.FORCECONST.append(float(outlines[i + 2].split()[j]))\n\n for j in range(0, natoms):\n for k in range(0, nfreqs - 2):\n self.NORMALMODE[(freqs_so_far + k)].append(\n [float(outlines[i + 5 + j].split()[3 * k + 2]),\n float(outlines[i + 5 + j].split()[3 * k + 3]),\n float(outlines[i + 5 + j].split()[3 * k + 4])])\n freqs_so_far = freqs_so_far + nfreqs - 2\n\n def getatom_types(self, outlines, program):\n if program == \"Gaussian\":\n for i, oline in enumerate(outlines):\n if \"Input orientation\" in oline or \"Standard orientation\" in oline:\n self.atom_nums, self.atom_types, self.cartesians, self.atomictypes, carts = [], [], [], [], \\\n outlines[i + 5:]\n for j, line in enumerate(carts):\n if \"-------\" in line:\n break\n self.atom_nums.append(int(line.split()[1]))\n self.atom_types.append(element_id(int(line.split()[1])))\n self.atomictypes.append(int(line.split()[2]))\n if len(line.split()) > 5:\n self.cartesians.append(\n [float(line.split()[3]), float(line.split()[4]), float(line.split()[5])])\n else:\n self.cartesians.append(\n [float(line.split()[2]), float(line.split()[3]), float(line.split()[4])])\n if program == \"Orca\":\n for i, oline in enumerate(outlines):\n if \"*\" in oline and \">\" in oline and \"xyz\" in oline:\n self.atom_nums, self.atom_types, self.cartesians, carts = [], [], [], outlines[i + 1:]\n for j, line in enumerate(carts):\n if \">\" in line and \"*\" in line:\n break\n if len(line.split()) > 5:\n self.cartesians.append(\n [float(line.split()[3]), float(line.split()[4]), float(line.split()[5])])\n self.atom_types.append(line.split()[2])\n self.atom_nums.append(element_id(line.split()[2], num=True))\n else:\n self.cartesians.append(\n [float(line.split()[2]), float(line.split()[3]), float(line.split()[4])])\n self.atom_types.append(line.split()[1])\n self.atom_nums.append(element_id(line.split()[1], num=True))\n if program == \"NWChem\":\n for i, oline in enumerate(outlines):\n if \"Output coordinates\" in oline:\n self.atom_nums, self.atom_types, self.cartesians, self.atomictypes, carts = [], [], [], [], outlines[i+4:]\n for j, line in enumerate(carts):\n if line.strip()=='' :\n break\n self.atom_nums.append(int(float(line.split()[2])))\n self.atom_types.append(element_id(int(float(line.split()[2]))))\n self.atomictypes.append(int(float(line.split()[2])))\n self.cartesians.append([float(line.split()[3]),float(line.split()[4]),float(line.split()[5])])\n\n getatom_types(self, data, program)\n natoms = len(self.atom_types)\n try:\n get_freqs(self, data, natoms, program)\n except:\n pass\n\n # Convert coordinates to string that can be used by the symmetry.c program\n def coords_string(self):\n xyzstring = str(len(self.atom_nums)) + '\\n'\n for atom, xyz in zip(self.atom_nums, self.cartesians):\n xyzstring += \"{0} {1:.6f} {2:.6f} {3:.6f}\\n\".format(atom, *xyz)\n return xyzstring\n\n # Obtain molecule connectivity to be used for internal symmetry determination\n def get_connectivity(self):\n connectivity = []\n tolerance = 0.2\n\n for i, ai in enumerate(self.atom_types):\n row = []\n for j, aj in enumerate(self.atom_types):\n if i == j:\n continue\n cutoff = RADII[ai] + RADII[aj] + tolerance\n distance = np.linalg.norm(np.array(self.cartesians[i]) - np.array(self.cartesians[j]))\n if distance < cutoff:\n row.append(j)\n connectivity.append(row)\n self.connectivity = connectivity\n\ndef cosmo_rs_out(datfile, names, interval=False):\n \"\"\"\n Read solvation free energies from a COSMO-RS data file\n\n Parameters:\n datfile (str): name of COSMO-RS output file.\n names (list): list of species in COSMO-RS file that correspond to names of other computational output files.\n interval (bool): flag for parser to read COSMO-RS temperature interval calculation.\n \"\"\"\n gsolv = {}\n if os.path.exists(datfile):\n with open(datfile) as f:\n data = f.readlines()\n else:\n raise ValueError(\"File {} does not exist\".format(datfile))\n\n temp = 0\n t_interval = []\n gsolv_dicts = []\n found = False\n oldtemp = 0\n gsolv_temp = {}\n if interval:\n for i, line in enumerate(data):\n for name in names:\n if line.find('(' + name.split('.')[0] + ')') > -1 and line.find('Compound') > -1:\n if data[i - 5].find('Temperature') > -1:\n temp = data[i - 5].split()[2]\n if float(temp) > float(interval[0]) and float(temp) < float(interval[1]):\n if float(temp) not in t_interval:\n t_interval.append(float(temp))\n if data[i + 10].find('Gibbs') > -1:\n gsolv = float(data[i + 10].split()[6].strip()) / KCAL_TO_AU\n gsolv_temp[name] = gsolv\n\n found = True\n if found:\n if oldtemp is 0:\n oldtemp = temp\n if temp is not oldtemp:\n gsolv_dicts.append(gsolv) # Store dict at one temp\n gsolv = {} # Clear gsolv\n gsolv.update(gsolv_temp) # Grab the first one for the new temp\n oldtemp = temp\n gsolv.update(gsolv_temp)\n gsolv_temp = {}\n found = False\n gsolv_dicts.append(gsolv) # Grab last dict\n else:\n for i, line in enumerate(data):\n for name in names:\n if line.find('(' + name.split('.')[0] + ')') > -1 and line.find('Compound') > -1:\n if data[i + 11].find('Gibbs') > -1:\n gsolv = float(data[i + 11].split()[6].strip()) / KCAL_TO_AU\n gsolv[name] = gsolv\n\n if interval:\n return t_interval, gsolv_dicts\n else:\n return gsolv\n\ndef parse_data(file):\n \"\"\"\n Read computational chemistry output file.\n\n Attempt to obtain single point energy, program type, program version, solvation_model,\n charge, empirical_dispersion, and multiplicity from file.\n\n Parameter:\n file (str): name of file to be parsed.\n\n Returns:\n float: single point energy.\n str: program used to run calculation.\n str: version of program used to run calculation.\n str: solvation model used in chemical calculation (if any).\n str: original filename parsed.\n int: overall charge of molecule or chemical system.\n str: empirical dispersion used in chemical calculation (if any).\n int: multiplicity of molecule or chemical system.\n \"\"\"\n spe, program, data, version_program, solvation_model, keyword_line, a, charge, multiplicity = 'none', 'none', [], '', '', '', 0, None, None\n\n if os.path.exists(os.path.splitext(file)[0] + '.log'):\n with open(os.path.splitext(file)[0] + '.log') as f:\n data = f.readlines()\n elif os.path.exists(os.path.splitext(file)[0] + '.out'):\n with open(os.path.splitext(file)[0] + '.out') as f:\n data = f.readlines()\n else:\n raise ValueError(\"File {} does not exist\".format(file))\n\n for line in data:\n if \"Gaussian\" in line:\n program = \"Gaussian\"\n break\n if \"* O R C A *\" in line:\n program = \"Orca\"\n break\n if \"NWChem\" in line:\n program = \"NWChem\"\n break\n repeated_link1 = 0\n for line in data:\n if program == \"Gaussian\":\n if line.strip().startswith('SCF Done:'):\n spe = float(line.strip().split()[4])\n elif line.strip().startswith('Counterpoise corrected energy'):\n spe = float(line.strip().split()[4])\n # For MP2 calculations replace with EUMP2\n elif 'EUMP2 =' in line.strip():\n spe = float((line.strip().split()[5]).replace('D', 'E'))\n # For ONIOM calculations use the extrapolated value rather than SCF value\n elif \"ONIOM: extrapolated energy\" in line.strip():\n spe = (float(line.strip().split()[4]))\n # For G4 calculations look for G4 energies (Gaussian16a bug prints G4(0 K) as DE(HF)) --Brian modified to work for G16c-where bug is fixed.\n elif line.strip().startswith('G4(0 K)'):\n spe = float(line.strip().split()[2])\n spe -= zero_point_corr_G4 #Remove G4 ZPE\n elif line.strip().startswith('E(ZPE)='): #Get G4 ZPE\n zero_point_corr_G4 = float(line.strip().split()[1])\n # For TD calculations look for SCF energies of the first excited state\n elif 'E(TD-HF/TD-DFT)' in line.strip():\n spe = float(line.strip().split()[4])\n # For Semi-empirical or Molecular Mechanics calculations\n elif \"Energy= \" in line.strip() and \"Predicted\" not in line.strip() and \"Thermal\" not in line.strip() and \"G4\" not in line.strip():\n spe = (float(line.strip().split()[1]))\n elif \"Gaussian\" in line and \"Revision\" in line and repeated_link1 == 0:\n for i in range(len(line.strip(\",\").split(\",\")) - 1):\n line.strip(\",\").split(\",\")[i]\n version_program += line.strip(\",\").split(\",\")[i]\n repeated_link1 = 1\n version_program = version_program[1:]\n elif \"Charge\" in line.strip() and \"Multiplicity\" in line.strip():\n charge = int(line.split('Multiplicity')[0].split('=')[-1].strip())\n multiplicity = line.split('=')[-1].strip()\n if program == \"Orca\":\n if line.strip().startswith('FINAL SINGLE POINT ENERGY'):\n spe = float(line.strip().split()[4])\n if 'Program Version' in line.strip():\n version_program = \"ORCA version \" + line.split()[2]\n if \"Total Charge\" in line.strip() and \"....\" in line.strip():\n charge = int(line.strip(\"=\").split()[-1])\n if \"Multiplicity\" in line.strip() and \"....\" in line.strip():\n multiplicity = int(line.strip(\"=\").split()[-1])\n if program == \"NWChem\":\n if line.strip().startswith('Total DFT energy'):\n spe = float(line.strip().split()[4])\n if 'nwchem branch' in line.strip():\n version_program = \"NWChem version \" + line.split()[3]\n if \"charge\" in line.strip():\n charge = int(line.strip().split()[-1])\n if \"mult \" in line.strip():\n multiplicity = int(line.strip().split()[-1])\n\n # Solvation model and empirical dispersion detection\n if 'Gaussian' in version_program.strip():\n for i, line in enumerate(data):\n if '#' in line.strip() and a == 0:\n for j, line in enumerate(data[i:i + 10]):\n if '--' in line.strip():\n a = a + 1\n break\n if a != 0:\n break\n else:\n for k in range(len(line.strip().split(\"\\n\"))):\n line.strip().split(\"\\n\")[k]\n keyword_line += line.strip().split(\"\\n\")[k]\n keyword_line = keyword_line.lower()\n if 'scrf' not in keyword_line.strip():\n solvation_model = \"gas phase\"\n else:\n start_scrf = keyword_line.strip().find('scrf') + 4\n if '(' in keyword_line[start_scrf:start_scrf + 4]:\n start_scrf += keyword_line[start_scrf:start_scrf + 4].find('(') + 1\n end_scrf = keyword_line.find(\")\", start_scrf)\n display_solvation_model = \"scrf=(\" + ','.join(\n keyword_line[start_scrf:end_scrf].lower().split(',')) + ')'\n sorted_solvation_model = \"scrf=(\" + ','.join(\n sorted(keyword_line[start_scrf:end_scrf].lower().split(','))) + ')'\n else:\n if ' = ' in keyword_line[start_scrf:start_scrf + 4]:\n start_scrf += keyword_line[start_scrf:start_scrf + 4].find(' = ') + 3\n elif ' =' in keyword_line[start_scrf:start_scrf + 4]:\n start_scrf += keyword_line[start_scrf:start_scrf + 4].find(' =') + 2\n elif '=' in keyword_line[start_scrf:start_scrf + 4]:\n start_scrf += keyword_line[start_scrf:start_scrf + 4].find('=') + 1\n end_scrf = keyword_line.find(\" \", start_scrf)\n if end_scrf == -1:\n display_solvation_model = \"scrf=(\" + ','.join(keyword_line[start_scrf:].lower().split(',')) + ')'\n sorted_solvation_model = \"scrf=(\" + ','.join(\n sorted(keyword_line[start_scrf:].lower().split(','))) + ')'\n else:\n display_solvation_model = \"scrf=(\" + ','.join(\n keyword_line[start_scrf:end_scrf].lower().split(',')) + ')'\n sorted_solvation_model = \"scrf=(\" + ','.join(\n sorted(keyword_line[start_scrf:end_scrf].lower().split(','))) + ')'\n if solvation_model != \"gas phase\":\n solvation_model = [sorted_solvation_model, display_solvation_model]\n empirical_dispersion = ''\n if keyword_line.strip().find('empiricaldispersion') == -1 and keyword_line.strip().find(\n 'emp=') == -1 and keyword_line.strip().find('emp =') == -1 and keyword_line.strip().find('emp(') == -1:\n empirical_dispersion = \"No empirical dispersion detected\"\n elif keyword_line.strip().find('empiricaldispersion') > -1:\n start_emp_disp = keyword_line.strip().find('empiricaldispersion') + 19\n if '(' in keyword_line[start_emp_disp:start_emp_disp + 4]:\n start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('(') + 1\n end_emp_disp = keyword_line.find(\")\", start_emp_disp)\n empirical_dispersion = 'empiricaldispersion=(' + ','.join(\n sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'\n else:\n if ' = ' in keyword_line[start_emp_disp:start_emp_disp + 4]:\n start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find(' = ') + 3\n elif ' =' in keyword_line[start_emp_disp:start_emp_disp + 4]:\n start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find(' =') + 2\n elif '=' in keyword_line[start_emp_disp:start_emp_disp + 4]:\n start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('=') + 1\n end_emp_disp = keyword_line.find(\" \", start_emp_disp)\n if end_emp_disp == -1:\n empirical_dispersion = \"empiricaldispersion=(\" + ','.join(\n sorted(keyword_line[start_emp_disp:].lower().split(','))) + ')'\n else:\n empirical_dispersion = \"empiricaldispersion=(\" + ','.join(\n sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'\n elif keyword_line.strip().find('emp=') > -1 or keyword_line.strip().find(\n 'emp =') > -1 or keyword_line.strip().find('emp(') > -1:\n # Check for temp keyword\n temp, emp_e, emp_p = False, False, False\n check_temp = keyword_line.strip().find('emp=')\n start_emp_disp = keyword_line.strip().find('emp=')\n if check_temp == -1:\n check_temp = keyword_line.strip().find('emp =')\n start_emp_disp = keyword_line.strip().find('emp =')\n if check_temp == -1:\n check_temp = keyword_line.strip().find('emp=(')\n start_emp_disp = keyword_line.strip().find('emp(')\n check_temp += -1\n if keyword_line[check_temp].lower() == 't':\n temp = True # Look for a new one\n if keyword_line.strip().find('emp=', check_temp + 5) > -1:\n emp_e = True\n start_emp_disp = keyword_line.strip().find('emp=', check_temp + 5) + 3\n elif keyword_line.strip().find('emp =', check_temp + 5) > -1:\n emp_e = True\n start_emp_disp = keyword_line.strip().find('emp =', check_temp + 5) + 3\n elif keyword_line.strip().find('emp(', check_temp + 5) > -1:\n emp_p = True\n start_emp_disp = keyword_line.strip().find('emp(', check_temp + 5) + 3\n else:\n empirical_dispersion = \"No empirical dispersion detected\"\n else:\n start_emp_disp += 3\n if (temp and emp_e) or (not temp and keyword_line.strip().find('emp=') > -1) or (\n not temp and keyword_line.strip().find('emp =')):\n if '(' in keyword_line[start_emp_disp:start_emp_disp + 4]:\n start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('(') + 1\n end_emp_disp = keyword_line.find(\")\", start_emp_disp)\n empirical_dispersion = 'empiricaldispersion=(' + ','.join(\n sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'\n else:\n if ' = ' in keyword_line[start_emp_disp:start_emp_disp + 4]:\n start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find(' = ') + 3\n elif ' =' in keyword_line[start_emp_disp:start_emp_disp + 4]:\n start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find(' =') + 2\n elif '=' in keyword_line[start_emp_disp:start_emp_disp + 4]:\n start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('=') + 1\n end_emp_disp = keyword_line.find(\" \", start_emp_disp)\n if end_emp_disp == -1:\n empirical_dispersion = \"empiricaldispersion=(\" + ','.join(\n sorted(keyword_line[start_emp_disp:].lower().split(','))) + ')'\n else:\n empirical_dispersion = \"empiricaldispersion=(\" + ','.join(\n sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'\n elif (temp and emp_p) or (not temp and keyword_line.strip().find('emp(') > -1):\n start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('(') + 1\n end_emp_disp = keyword_line.find(\")\", start_emp_disp)\n empirical_dispersion = 'empiricaldispersion=(' + ','.join(\n sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'\n if 'ORCA' in version_program.strip():\n keyword_line_1 = \"gas phase\"\n keyword_line_2 = ''\n keyword_line_3 = ''\n for i, line in enumerate(data):\n if 'CPCM SOLVATION MODEL' in line.strip():\n keyword_line_1 = \"CPCM,\"\n if 'SMD CDS free energy correction energy' in line.strip():\n keyword_line_2 = \"SMD,\"\n if \"Solvent: \" in line.strip():\n keyword_line_3 = line.strip().split()[-1]\n solvation_model = keyword_line_1 + keyword_line_2 + keyword_line_3\n empirical_dispersion1 = 'No empirical dispersion detected'\n empirical_dispersion2 = ''\n empirical_dispersion3 = ''\n for i, line in enumerate(data):\n if keyword_line.strip().find('DFT DISPERSION CORRECTION') > -1:\n empirical_dispersion1 = ''\n if keyword_line.strip().find('DFTD3') > -1:\n empirical_dispersion2 = \"D3\"\n if keyword_line.strip().find('USING zero damping') > -1:\n empirical_dispersion3 = ' with zero damping'\n empirical_dispersion = empirical_dispersion1 + empirical_dispersion2 + empirical_dispersion3\n if 'NWChem' in version_program.strip():\n # keyword_line_1 = \"gas phase\"\n # keyword_line_2 = ''\n # keyword_line_3 = ''\n # for i, line in enumerate(data):\n # if 'CPCM SOLVATION MODEL' in line.strip():\n # keyword_line_1 = \"CPCM,\"\n # if 'SMD CDS free energy correction energy' in line.strip():\n # keyword_line_2 = \"SMD,\"\n # if \"Solvent: \" in line.strip():\n # keyword_line_3 = line.strip().split()[-1]\n # solvation_model = keyword_line_1 + keyword_line_2 + keyword_line_3\n empirical_dispersion1 = 'No empirical dispersion detected'\n empirical_dispersion2 = ''\n empirical_dispersion3 = ''\n for i, line in enumerate(data):\n if keyword_line.strip().find('Dispersion correction') > -1:\n empirical_dispersion1 = ''\n if keyword_line.strip().find('disp vdw 3') > -1:\n empirical_dispersion2 = \"D3\"\n if keyword_line.strip().find('disp vdw 4') > -1:\n empirical_dispersion2 = \"D3BJ\"\n empirical_dispersion = empirical_dispersion1 + empirical_dispersion2 + empirical_dispersion3\n\n return spe, program, version_program, solvation_model, file, charge, empirical_dispersion, multiplicity\n\ndef sp_cpu(file):\n \"\"\"Read single-point output for cpu time.\"\"\"\n spe, program, data, cpu = None, None, [], None\n\n if os.path.exists(os.path.splitext(file)[0] + '.log'):\n with open(os.path.splitext(file)[0] + '.log') as f:\n data = f.readlines()\n elif os.path.exists(os.path.splitext(file)[0] + '.out'):\n with open(os.path.splitext(file)[0] + '.out') as f:\n data = f.readlines()\n else:\n raise ValueError(\"File {} does not exist\".format(file))\n\n for line in data:\n if line.find(\"Gaussian\") > -1:\n program = \"Gaussian\"\n break\n if line.find(\"* O R C A *\") > -1:\n program = \"Orca\"\n break\n if line.find(\"NWChem\") > -1:\n program = \"NWChem\"\n break\n\n for line in data:\n if program == \"Gaussian\":\n if line.strip().startswith('SCF Done:'):\n spe = float(line.strip().split()[4])\n if line.strip().find(\"Job cpu time\") > -1:\n days = int(line.split()[3])\n hours = int(line.split()[5])\n mins = int(line.split()[7])\n secs = 0\n msecs = int(float(line.split()[9]) * 1000.0)\n cpu = [days, hours, mins, secs, msecs]\n if program == \"Orca\":\n if line.strip().startswith('FINAL SINGLE POINT ENERGY'):\n spe = float(line.strip().split()[4])\n if line.strip().find(\"TOTAL RUN TIME\") > -1:\n days = int(line.split()[3])\n hours = int(line.split()[5])\n mins = int(line.split()[7])\n secs = int(line.split()[9])\n msecs = float(line.split()[11])\n cpu = [days, hours, mins, secs, msecs]\n if program == \"NWChem\":\n if line.strip().startswith('Total DFT energy ='):\n spe = float(line.strip().split()[4])\n if line.strip().find(\"Total times\") > -1:\n days = 0\n hours = 0\n mins = 0\n secs = float(line.split()[3][0:-1])\n msecs = 0\n cpu = [days,hours,mins,secs,msecs]\n\n return cpu\n\ndef level_of_theory(file):\n \"\"\"Read output for the level of theory and basis set used.\"\"\"\n repeated_theory = 0\n with open(file) as f:\n data = f.readlines()\n level, bs = 'none', 'none'\n\n for line in data:\n if line.strip().find('External calculation') > -1:\n level, bs = 'ext', 'ext'\n break\n if '\\\\Freq\\\\' in line.strip() and repeated_theory == 0:\n try:\n level, bs = (line.strip().split(\"\\\\\")[4:6])\n repeated_theory = 1\n except IndexError:\n pass\n elif '|Freq|' in line.strip() and repeated_theory == 0:\n try:\n level, bs = (line.strip().split(\"|\")[4:6])\n repeated_theory = 1\n except IndexError:\n pass\n if '\\\\SP\\\\' in line.strip() and repeated_theory == 0:\n try:\n level, bs = (line.strip().split(\"\\\\\")[4:6])\n repeated_theory = 1\n except IndexError:\n pass\n elif '|SP|' in line.strip() and repeated_theory == 0:\n try:\n level, bs = (line.strip().split(\"|\")[4:6])\n repeated_theory = 1\n except IndexError:\n pass\n if 'DLPNO BASED TRIPLES CORRECTION' in line.strip():\n level = 'DLPNO-CCSD(T)'\n if 'Estimated CBS total energy' in line.strip():\n try:\n bs = (\"Extrapol.\" + line.strip().split()[4])\n except IndexError:\n pass\n # Remove the restricted R or unrestricted U label\n if level[0] in ('R', 'U'):\n level = level[1:]\n level_of_theory = '/'.join([level, bs])\n return level_of_theory\n\ndef read_initial(file):\n \"\"\"At beginning of procedure, read level of theory, solvation model, and check for normal termination\"\"\"\n with open(file) as f:\n data = f.readlines()\n level, bs, program, keyword_line = 'none', 'none', 'none', 'none'\n progress, orientation = 'Incomplete', 'Input'\n a, repeated_theory = 0, 0\n no_grid = True\n DFT, dft_used, level, bs, scf_iradan, cphf_iradan = False, 'F', 'none', 'none', False, False\n grid_lookup = {1: 'sg1', 2: 'coarse', 4: 'fine', 5: 'ultrafine', 7: 'superfine'}\n\n for line in data:\n # Determine program\n if \"Gaussian\" in line:\n program = \"Gaussian\"\n break\n if \"* O R C A *\" in line:\n program = \"Orca\"\n break\n if \"NWChem\" in line:\n program = \"NWChem\"\n break\n for line in data:\n # Grab pertinent information from file\n if line.strip().find('External calculation') > -1:\n level, bs = 'ext', 'ext'\n if line.strip().find('Standard orientation:') > -1:\n orientation = 'Standard'\n if line.strip().find('IExCor=') > -1 and no_grid:\n try:\n dft_used = line.split('=')[2].split()[0]\n grid = grid_lookup[int(dft_used)]\n no_grid = False\n except:\n pass\n if '\\\\Freq\\\\' in line.strip() and repeated_theory == 0:\n try:\n level, bs = (line.strip().split(\"\\\\\")[4:6])\n repeated_theory = 1\n except IndexError:\n pass\n elif '|Freq|' in line.strip() and repeated_theory == 0:\n try:\n level, bs = (line.strip().split(\"|\")[4:6])\n repeated_theory = 1\n except IndexError:\n pass\n if '\\\\SP\\\\' in line.strip() and repeated_theory == 0:\n try:\n level, bs = (line.strip().split(\"\\\\\")[4:6])\n repeated_theory = 1\n except IndexError:\n pass\n elif '|SP|' in line.strip() and repeated_theory == 0:\n try:\n level, bs = (line.strip().split(\"|\")[4:6])\n repeated_theory = 1\n except IndexError:\n pass\n if 'DLPNO BASED TRIPLES CORRECTION' in line.strip():\n level = 'DLPNO-CCSD(T)'\n if 'Estimated CBS total energy' in line.strip():\n try:\n bs = (\"Extrapol.\" + line.strip().split()[4])\n except IndexError:\n pass\n # Remove the restricted R or unrestricted U label\n if level[0] in ('R', 'U'):\n level = level[1:]\n\n #NWChem specific parsing\n if program is 'NWChem':\n keyword_line_1 = \"gas phase\"\n keyword_line_2 = ''\n keyword_line_3 = ''\n for i, line in enumerate(data):\n if line.strip().startswith(\"xc \"):\n level=line.strip().split()[1]\n if line.strip().startswith(\"* library \"):\n bs = line.strip().replace(\"* library \",'')\n #need to update these tags for NWChem solvation later\n if 'CPCM SOLVATION MODEL' in line.strip():\n keyword_line_1 = \"CPCM,\"\n if 'SMD CDS free energy correction energy' in line.strip():\n keyword_line_2 = \"SMD,\"\n if \"Solvent: \" in line.strip():\n keyword_line_3 = line.strip().split()[-1]\n #need to update NWChem keyword for error calculation\n if 'Total times' in line:\n progress = 'Normal'\n elif 'error termination' in line:\n progress = 'Error'\n solvation_model = keyword_line_1 + keyword_line_2 + keyword_line_3\n\n # Grab solvation models - Gaussian files\n if program is 'Gaussian':\n for i, line in enumerate(data):\n if '#' in line.strip() and a == 0:\n for j, line in enumerate(data[i:i + 10]):\n if '--' in line.strip():\n a = a + 1\n break\n if a != 0:\n break\n else:\n for k in range(len(line.strip().split(\"\\n\"))):\n line.strip().split(\"\\n\")[k]\n keyword_line += line.strip().split(\"\\n\")[k]\n if 'Normal termination' in line:\n progress = 'Normal'\n elif 'Error termination' in line:\n progress = 'Error'\n keyword_line = keyword_line.lower()\n if 'scrf' not in keyword_line.strip():\n solvation_model = \"gas phase\"\n else:\n start_scrf = keyword_line.strip().find('scrf') + 5\n if keyword_line[start_scrf] == \"(\":\n end_scrf = keyword_line.find(\")\", start_scrf)\n solvation_model = \"scrf=\" + keyword_line[start_scrf:end_scrf]\n if solvation_model[-1] != \")\":\n solvation_model = solvation_model + \")\"\n else:\n start_scrf2 = keyword_line.strip().find('scrf') + 4\n if keyword_line.find(\" \", start_scrf) > -1:\n end_scrf = keyword_line.find(\" \", start_scrf)\n else:\n end_scrf = len(keyword_line)\n if keyword_line[start_scrf2] == \"(\":\n solvation_model = \"scrf=(\" + keyword_line[start_scrf:end_scrf]\n if solvation_model[-1] != \")\":\n solvation_model = solvation_model + \")\"\n else:\n if keyword_line.find(\" \", start_scrf) > -1:\n end_scrf = keyword_line.find(\" \", start_scrf)\n else:\n end_scrf = len(keyword_line)\n solvation_model = \"scrf=\" + keyword_line[start_scrf:end_scrf]\n # ORCA parsing for solvation model\n elif program is 'Orca':\n keyword_line_1 = \"gas phase\"\n keyword_line_2 = ''\n keyword_line_3 = ''\n for i, line in enumerate(data):\n if 'CPCM SOLVATION MODEL' in line.strip():\n keyword_line_1 = \"CPCM,\"\n if 'SMD CDS free energy correction energy' in line.strip():\n keyword_line_2 = \"SMD,\"\n if \"Solvent: \" in line.strip():\n keyword_line_3 = line.strip().split()[-1]\n if 'ORCA TERMINATED NORMALLY' in line:\n progress = 'Normal'\n elif 'error termination' in line:\n progress = 'Error'\n solvation_model = keyword_line_1 + keyword_line_2 + keyword_line_3\n level_of_theory = '/'.join([level, bs])\n\n return level_of_theory, solvation_model, progress, orientation, dft_used\n\ndef jobtype(file):\n \"\"\"Read output for the level of theory and basis set used.\"\"\"\n with open(file) as f:\n data = f.readlines()\n job = ''\n for line in data:\n if line.strip().find('\\\\SP\\\\') > -1:\n job += 'SP'\n if line.strip().find('\\\\FOpt\\\\') > -1:\n job += 'GS'\n if line.strip().find('\\\\FTS\\\\') > -1:\n job += 'TS'\n if line.strip().find('\\\\Freq\\\\') > -1:\n job += 'Freq'\n return job\n"
] | [
[
"numpy.array"
]
] |
discohead/jesse | [
"5f025cc72adb33132b75a516f74f96b52ca12af3"
] | [
"jesse/indicators/mom.py"
] | [
"from typing import Union\n\nimport numpy as np\nimport talib\n\nfrom jesse.helpers import get_candle_source\nfrom jesse.helpers import get_config\n\n\ndef mom(candles: np.ndarray, period: int = 10, source_type: str = \"close\", sequential: bool = False) -> Union[\n float, np.ndarray]:\n \"\"\"\n MOM - Momentum\n\n :param candles: np.ndarray\n :param period: int - default=10\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n warmup_candles_num = get_config('env.data.warmup_candles_num', 240)\n if not sequential and len(candles) > warmup_candles_num:\n candles = candles[-warmup_candles_num:]\n\n source = get_candle_source(candles, source_type=source_type)\n res = talib.MOM(source, timeperiod=period)\n\n if sequential:\n return res\n else:\n return None if np.isnan(res[-1]) else res[-1]\n"
] | [
[
"numpy.isnan"
]
] |
ZhouHUB/pyIID | [
"6114fb5ae4388061c7aae9f5b0b2e41aa4ca4341"
] | [
"pyiid/experiments/elasticscatter/kernels/__init__.py"
] | [
"import math\nfrom numba import *\nfrom numba import cuda, f4, i4, int32\nimport numpy as np\nfrom builtins import range\n\n__author__ = 'christopher'\n\n\n@jit(target='cpu', nopython=True)\ndef ij_to_k(i, j):\n return int(j + i * (i - 1) / 2)\n\n\n@jit(target='cpu', nopython=True)\ndef k_to_ij(k):\n i = math.floor(float((1 + math.sqrt(1 + 8. * k))) / 2.)\n j = k - i * (i - 1) / 2.\n return i4(i), i4(j)\n\n\ndef symmetric_reshape(in_data):\n im, jm = k_to_ij(in_data.shape[0])\n out_data = np.zeros((im, im) + in_data.shape[1:])\n for k in range(in_data.shape[0]):\n i, j = k_to_ij(k)\n out_data[i, j] = in_data[k]\n out_data[j, i] = in_data[k]\n return out_data\n\n\ndef antisymmetric_reshape(in_data):\n im, jm = k_to_ij(in_data.shape[0])\n out_data = np.zeros((im, im) + in_data.shape[1:])\n for k in range(in_data.shape[0]):\n i, j = k_to_ij(k)\n out_data[i, j] = -1 * in_data[k]\n out_data[j, i] = in_data[k]\n return out_data\n\n\[email protected](device=True)\ndef cuda_k_to_ij(k):\n i = math.floor((f4(1) + f4(math.sqrt(f4(1) + f4(8.) * f4(k)))) * f4(.5))\n j = f4(k) - f4(i) * (f4(i) - f4(1)) * f4(.5)\n return i4(i), i4(j)\n\n\[email protected](device=True)\ndef cuda_ij_to_k(i, j):\n return int32(j + i * (i - 1) / 2)\n"
] | [
[
"numpy.zeros"
]
] |
Y-oHr-N/pretools | [
"5efc6f0ae3f131446ad1e5c4b635d9ddfaff1677"
] | [
"pretools/sklearn/splitters.py"
] | [
"\"\"\"Splitters.\"\"\"\n\nfrom typing import Iterator, Optional, Tuple\n\nimport numpy as np\nfrom sklearn.model_selection._split import _BaseKFold\nfrom sklearn.utils.validation import _num_samples\n\nfrom ..types import OneDimArrayLikeType, TwoDimArrayLikeType\n\n\ndef _unique_without_sort(array: OneDimArrayLikeType) -> np.ndarray:\n unique, index = np.unique(array, return_index=True)\n\n return unique[index.argsort()]\n\n\nclass GroupTimeSeriesSplit(_BaseKFold):\n \"\"\"Time series cross-validator variant with non-overlapping groups.\n\n Examples\n --------\n >>> import numpy as np\n >>> from pretools.sklearn.splitters import GroupTimeSeriesSplit\n >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])\n >>> y = np.array([1, 2, 3, 4, 5, 6])\n >>> groups = np.array([1, 1, 1, 2, 2, 3])\n >>> cv = GroupTimeSeriesSplit(n_splits=2)\n >>> for train, test in cv.split(X, y, groups):\n ... X_train, X_test = X[train], X[test]\n ... y_train, y_test = y[train], y[test]\n \"\"\"\n\n def __init__(\n self,\n n_splits: int = 5,\n max_train_size: Optional[int] = None,\n gap: int = 0,\n ) -> None:\n super().__init__(n_splits, shuffle=False, random_state=None)\n\n self.gap = gap\n self.max_train_size = max_train_size\n\n def split(\n self,\n X: Optional[TwoDimArrayLikeType] = None,\n y: Optional[OneDimArrayLikeType] = None,\n groups: Optional[OneDimArrayLikeType] = None,\n ) -> Iterator[Tuple[np.ndarray, np.ndarray]]:\n \"\"\"Generate indices to split data into training and test set.\"\"\"\n unique = _unique_without_sort(groups)\n\n n_samples = _num_samples(groups)\n n_folds = self.n_splits + 1\n (n_groups,) = unique.shape\n\n if n_folds > n_groups:\n raise ValueError(\n f\"Cannot have number of folds ={n_folds} greater than the \"\n f\"number of groups: {n_groups}.\"\n )\n\n test_size = n_groups // n_folds\n\n if n_groups - self.gap - test_size * self.n_splits <= 0:\n raise ValueError(\n (\n f\"Too many splits={self.n_splits} for number of \"\n f\"groups={n_groups} with test_size={test_size} and \"\n f\"gap={self.gap}.\"\n )\n )\n\n indices = np.arange(n_samples)\n\n for i in range(self.n_splits):\n test_start = (i + 1) * test_size\n\n if i + 1 < self.n_splits:\n test_end = (i + 2) * test_size\n else:\n test_end = n_groups\n\n train_end = test_start - self.gap\n\n if self.max_train_size is None:\n is_train = np.isin(groups, unique[:train_end])\n else:\n is_train = np.isin(\n groups,\n unique[\n max(0, train_end - self.max_train_size) : train_end\n ],\n )\n\n is_test = np.isin(groups, unique[test_start:test_end])\n\n yield indices[is_train], indices[is_test]\n"
] | [
[
"numpy.arange",
"sklearn.utils.validation._num_samples",
"numpy.isin",
"numpy.unique"
]
] |
phaustin/jb_test | [
"e25e3dcad07b860ddd3cc151295bdacad54ead8e"
] | [
"quantecon_flat/mini_book/docs/_static/lecture_specific/pandas/wb_download.py"
] | [
"import matplotlib.pyplot as plt\nimport requests\nimport pandas as pd\n\n# == Get data and read into file gd.xls == #\nwb_data_query = (\n \"http://api.worldbank.org/v2/en/indicator/gc.dod.totl.gd.zs?downloadformat=excel\"\n)\nr = requests.get(wb_data_query)\nwith open(\"gd.xls\", \"wb\") as output:\n output.write(r.content)\n\n# == Parse data into a DataFrame == #\ngovt_debt = pd.read_excel(\"gd.xls\", sheet_name=\"Data\", skiprows=3, index_col=1)\n\n# == Take desired values and plot == #\ngovt_debt = govt_debt.transpose()\ngovt_debt = govt_debt[[\"AUS\", \"USA\"]]\ngovt_debt = govt_debt[38:]\ngovt_debt.plot(lw=2)\nplt.show()\n"
] | [
[
"pandas.read_excel",
"matplotlib.pyplot.show"
]
] |
AhmedSamySaad/MAC | [
"be045066d6416d7a9d7ca7f23923448e0edca433"
] | [
"src/model_test.py"
] | [
"\nimport os\nimport time\nimport datetime\nimport random\nimport json\nimport argparse\nimport numpy as np\nimport keras.backend as K\n# from sklearn.metrics import confusion_matrix\nimport tensorflow as tf\nfrom sklearn.metrics import f1_score\n# from keras.optimizers import Adam\nfrom tensorflow.keras.optimizers import Adam\nfrom keras.utils import np_utils\nfrom keras.models import load_model\nimport data_loader\nfrom keras.models import model_from_json\n\n\nimport densenet\nim_size = 320\n\n\n\nmodel = densenet.DenseNet(nb_classes=1, img_dim=(320,320,1), depth=22, nb_dense_block=4, growth_rate=12, nb_filter=16, dropout_rate=0.2, weight_decay=1E-4)\n# model.load_weights('./save_models/[email protected]') #model_10_epochs\nmodel.load_weights('../models/XR_HUMERUS/[email protected]') #model_52_epochs\n# model.load_weights('./save_models/[email protected]')\nX_valid_path, Y_valid = data_loader.load_path(root_path = '../valid/XR_HUMERUS', size = im_size)\nX_valid = data_loader.load_image(X_valid_path,im_size)\n\ny1 = model.predict(X_valid, batch_size=None, verbose=0, steps=None)\nprediction= [] #othman edit\nj = len(y1)\n\nfor i in range (0, j):\n\tif y1[i]>0.5 :\n\t\t# print(X_valid_path[i],\":\\t\",\"Positive\\t\", y1[i])\n\t\tprediction.append(1) #othman edit\n\telse:\n\t\t# print(X_valid_path[i],\":\\t\",\"Negative\\t\", y1[i])\n\t\tprediction.append(0) #othman edit\n\n\n# print(len(Y_valid)== len(prediction)) #othman edit\n\nY_valid = np.array(Y_valid)\nprediction= np.array(prediction)\nprint(\"Accuracy: \",(Y_valid==prediction).sum()/len(Y_valid))\n\nprint(f\"confusion_matrix: {tf.math.confusion_matrix(Y_valid, prediction)}\")\nprint(f\"f_1 score: {f1_score(Y_valid, prediction)}\")"
] | [
[
"sklearn.metrics.f1_score",
"numpy.array",
"tensorflow.math.confusion_matrix"
]
] |
wbo4958/cudf | [
"b12f24d25815145a573a9d70f8c6140a8ab9d2cb"
] | [
"python/cudf/cudf/core/index.py"
] | [
"# Copyright (c) 2018, NVIDIA CORPORATION.\n\nfrom __future__ import division, print_function\n\nimport functools\nimport pickle\n\nimport cupy\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\n\nimport cudf\nfrom cudf._lib.nvtx import annotate\nfrom cudf.core.column import (\n CategoricalColumn,\n ColumnBase,\n DatetimeColumn,\n NumericalColumn,\n StringColumn,\n column,\n)\nfrom cudf.core.frame import Frame\nfrom cudf.utils import ioutils, utils\nfrom cudf.utils.docutils import copy_docstring\nfrom cudf.utils.dtypes import is_categorical_dtype, is_scalar, min_signed_type\nfrom cudf.utils.utils import cached_property\n\n\ndef _to_frame(this_index, index=True, name=None):\n \"\"\"Create a DataFrame with a column containing this Index\n\n Parameters\n ----------\n index : boolean, default True\n Set the index of the returned DataFrame as the original Index\n name : str, default None\n Name to be used for the column\n\n Returns\n -------\n DataFrame\n cudf DataFrame\n \"\"\"\n\n from cudf import DataFrame\n\n if name is not None:\n col_name = name\n elif this_index.name is None:\n col_name = 0\n else:\n col_name = this_index.name\n\n return DataFrame(\n {col_name: this_index._values}, index=this_index if index else None\n )\n\n\nclass Index(Frame):\n \"\"\"The root interface for all Series indexes.\n \"\"\"\n\n def serialize(self):\n \"\"\"Serialize into pickle format suitable for file storage or network\n transmission.\n \"\"\"\n header = {}\n header[\"index_column\"] = {}\n # store metadata values of index separately\n # Indexes: Numerical/DateTime/String are often GPU backed\n header[\"index_column\"], frames = self._values.serialize()\n\n header[\"name\"] = pickle.dumps(self.name)\n header[\"dtype\"] = pickle.dumps(self.dtype)\n header[\"type-serialized\"] = pickle.dumps(type(self))\n header[\"frame_count\"] = len(frames)\n return header, frames\n\n def __contains__(self, item):\n return item in self._values\n\n def get_level_values(self, level):\n if level == self.name:\n return self\n elif pd.api.types.is_integer(level):\n if level != 0:\n raise IndexError(\n f\"Cannot get level: {level} \" f\"for index with 1 level\"\n )\n return self\n else:\n raise KeyError(f\"Requested level with name {level} \" \"not found\")\n\n def _mimic_inplace(self, other, inplace=False):\n if inplace is True:\n col = self._data[self.name]\n col._mimic_inplace(other._data[other.name], inplace=True)\n else:\n return other\n\n @classmethod\n def deserialize(cls, header, frames):\n \"\"\"\n \"\"\"\n h = header[\"index_column\"]\n idx_typ = pickle.loads(header[\"type-serialized\"])\n name = pickle.loads(header[\"name\"])\n\n col_typ = pickle.loads(h[\"type-serialized\"])\n index = col_typ.deserialize(h, frames[: header[\"frame_count\"]])\n return idx_typ(index, name=name)\n\n @property\n def names(self):\n return (self.name,)\n\n @names.setter\n def names(self, values):\n if not pd.api.types.is_list_like(values):\n raise ValueError(\"Names must be a list-like\")\n\n num_values = len(values)\n if num_values > 1:\n raise ValueError(\n \"Length of new names must be 1, got %d\" % num_values\n )\n\n self.name = values[0]\n\n @property\n def name(self):\n return next(iter(self._data.names))\n\n @name.setter\n def name(self, value):\n col = self._data.pop(self.name)\n self._data[value] = col\n\n def dropna(self):\n \"\"\"\n Return a Series with null values removed.\n \"\"\"\n return super().dropna(subset=[self.name])\n\n def take(self, indices):\n \"\"\"Gather only the specific subset of indices\n\n Parameters\n ---\n indices: An array-like that maps to values contained in this Index.\n \"\"\"\n return self[indices]\n\n def argsort(self, ascending=True):\n indices = self._values.argsort(ascending=ascending)\n indices.name = self.name\n return indices\n\n @property\n def values(self):\n if is_categorical_dtype(self.dtype) or np.issubdtype(\n self.dtype, np.dtype(\"object\")\n ):\n raise TypeError(\"Data must be numeric\")\n if len(self) == 0:\n return cupy.asarray([], dtype=self.dtype)\n if self._values.null_count > 0:\n raise ValueError(\"Column must have no nulls.\")\n\n return cupy.asarray(self._values.data_array_view)\n\n def to_pandas(self):\n return pd.Index(self._values.to_pandas(), name=self.name)\n\n def to_arrow(self):\n return self._values.to_arrow()\n\n @ioutils.doc_to_dlpack()\n def to_dlpack(self):\n \"\"\"{docstring}\"\"\"\n import cudf.io.dlpack as dlpack\n\n return dlpack.to_dlpack(self)\n\n @property\n def gpu_values(self):\n return self._values.data_array_view\n\n def min(self):\n return self._values.min()\n\n def max(self):\n return self._values.max()\n\n def sum(self):\n return self._values.sum()\n\n @classmethod\n def _concat(cls, objs):\n data = ColumnBase._concat([o._values for o in objs])\n names = {obj.name for obj in objs}\n if len(names) == 1:\n [name] = names\n else:\n name = None\n result = as_index(data)\n result.name = name\n return result\n\n def _apply_op(self, fn, other=None):\n from cudf.core.series import Series\n\n idx_series = Series(self, name=self.name)\n op = getattr(idx_series, fn)\n if other is not None:\n return as_index(op(other))\n else:\n return as_index(op())\n\n def unique(self):\n return as_index(self._values.unique(), name=self.name)\n\n def __add__(self, other):\n return self._apply_op(\"__add__\", other)\n\n def __radd__(self, other):\n return self._apply_op(\"__radd__\", other)\n\n def __sub__(self, other):\n return self._apply_op(\"__sub__\", other)\n\n def __rsub__(self, other):\n return self._apply_op(\"__rsub__\", other)\n\n def __mul__(self, other):\n return self._apply_op(\"__mul__\", other)\n\n def __rmul__(self, other):\n return self._apply_op(\"__rmul__\", other)\n\n def __mod__(self, other):\n return self._apply_op(\"__mod__\", other)\n\n def __rmod__(self, other):\n return self._apply_op(\"__rmod__\", other)\n\n def __pow__(self, other):\n return self._apply_op(\"__pow__\", other)\n\n def __floordiv__(self, other):\n return self._apply_op(\"__floordiv__\", other)\n\n def __rfloordiv__(self, other):\n return self._apply_op(\"__rfloordiv__\", other)\n\n def __truediv__(self, other):\n return self._apply_op(\"__truediv__\", other)\n\n def __rtruediv__(self, other):\n return self._apply_op(\"__rtruediv__\", other)\n\n __div__ = __truediv__\n\n def __and__(self, other):\n return self._apply_op(\"__and__\", other)\n\n def __or__(self, other):\n return self._apply_op(\"__or__\", other)\n\n def __xor__(self, other):\n return self._apply_op(\"__xor__\", other)\n\n def __eq__(self, other):\n return self._apply_op(\"__eq__\", other)\n\n def __ne__(self, other):\n return self._apply_op(\"__ne__\", other)\n\n def __lt__(self, other):\n return self._apply_op(\"__lt__\", other)\n\n def __le__(self, other):\n return self._apply_op(\"__le__\", other)\n\n def __gt__(self, other):\n return self._apply_op(\"__gt__\", other)\n\n def __ge__(self, other):\n return self._apply_op(\"__ge__\", other)\n\n @annotate(\"INDEX_EQUALS\", color=\"green\", domain=\"cudf_python\")\n def equals(self, other):\n if self is other:\n return True\n if len(self) != len(other):\n return False\n elif len(self) == 1:\n val = self[0] == other[0]\n # when self is multiindex we need to checkall\n if isinstance(val, np.ndarray):\n return val.all()\n return bool(val)\n else:\n result = self == other\n if isinstance(result, bool):\n return result\n else:\n return result._values.all()\n\n def join(self, other, method, how=\"left\", return_indexers=False):\n column_join_res = self._values.join(\n other._values,\n how=how,\n return_indexers=return_indexers,\n method=method,\n )\n if return_indexers:\n joined_col, indexers = column_join_res\n joined_index = as_index(joined_col)\n return joined_index, indexers\n else:\n return column_join_res\n\n def rename(self, name, inplace=False):\n \"\"\"\n Alter Index name.\n\n Defaults to returning new index.\n\n Parameters\n ----------\n name : label\n Name(s) to set.\n\n Returns\n -------\n Index\n\n \"\"\"\n if inplace is True:\n self.name = name\n return None\n else:\n out = self.copy(deep=False)\n out.name = name\n return out.copy(deep=True)\n\n def astype(self, dtype):\n \"\"\"Convert to the given ``dtype``.\n\n Returns\n -------\n If the dtype changed, a new ``Index`` is returned by casting each\n values to the given dtype.\n If the dtype is not changed, ``self`` is returned.\n \"\"\"\n if pd.api.types.is_dtype_equal(dtype, self.dtype):\n return self\n\n return as_index(self._values.astype(dtype), name=self.name)\n\n def to_array(self, fillna=None):\n \"\"\"Get a dense numpy array for the data.\n\n Parameters\n ----------\n fillna : str or None\n Defaults to None, which will skip null values.\n If it equals \"pandas\", null values are filled with NaNs.\n Non integral dtype is promoted to np.float64.\n\n Notes\n -----\n\n if ``fillna`` is ``None``, null values are skipped. Therefore, the\n output size could be smaller.\n \"\"\"\n return self._values.to_array(fillna=fillna)\n\n def to_series(self):\n from cudf.core.series import Series\n\n return Series(self._values)\n\n @property\n def is_unique(self):\n raise (NotImplementedError)\n\n @property\n def is_monotonic(self):\n return self.is_monotonic_increasing\n\n @property\n def is_monotonic_increasing(self):\n raise (NotImplementedError)\n\n @property\n def is_monotonic_decreasing(self):\n raise (NotImplementedError)\n\n def get_slice_bound(self, label, side, kind):\n raise (NotImplementedError)\n\n def __array_function__(self, func, types, args, kwargs):\n from cudf.core.series import Series\n\n # check if the function is implemented for the current type\n cudf_index_module = type(self)\n for submodule in func.__module__.split(\".\")[1:]:\n # point cudf_index_module to the correct submodule\n if hasattr(cudf_index_module, submodule):\n cudf_index_module = getattr(cudf_index_module, submodule)\n else:\n return NotImplemented\n\n fname = func.__name__\n\n handled_types = [Index, Series]\n\n # check if we don't handle any of the types (including sub-class)\n for t in types:\n if not any(\n issubclass(t, handled_type) for handled_type in handled_types\n ):\n return NotImplemented\n\n if hasattr(cudf_index_module, fname):\n cudf_func = getattr(cudf_index_module, fname)\n # Handle case if cudf_func is same as numpy function\n if cudf_func is func:\n return NotImplemented\n else:\n return cudf_func(*args, **kwargs)\n\n else:\n return NotImplemented\n\n def isin(self, values):\n \"\"\"Return a boolean array where the index values are in values.\n\n Compute boolean array of whether each index value is found in\n the passed set of values. The length of the returned boolean\n array matches the length of the index.\n\n Parameters\n ----------\n values : set, list-like, Index\n Sought values.\n\n Returns\n -------\n is_contained : cupy array\n CuPy array of boolean values.\n\n \"\"\"\n\n result = self.to_series().isin(values).values\n\n return result\n\n def where(self, cond, other=None):\n \"\"\"\n Replace values where the condition is False.\n\n Parameters\n ----------\n cond : bool array-like with the same length as self\n Where cond is True, keep the original value.\n Where False, replace with corresponding value from other.\n Callables are not supported.\n other: scalar, or array-like\n Entries where cond is False are replaced with\n corresponding value from other. Callables are not\n supported. Default is None.\n\n Returns\n -------\n Same type as caller\n \"\"\"\n return super().where(cond=cond, other=other)\n\n @property\n def __cuda_array_interface__(self):\n raise (NotImplementedError)\n\n def memory_usage(self, deep=False):\n return self._values._memory_usage(deep=deep)\n\n @classmethod\n def from_pandas(cls, index):\n if not isinstance(index, pd.Index):\n raise TypeError(\"not a pandas.Index\")\n\n ind = as_index(pa.Array.from_pandas(index))\n ind.name = index.name\n return ind\n\n @classmethod\n def _from_table(cls, table):\n if not isinstance(table, RangeIndex):\n if table._num_columns == 0:\n raise ValueError(\"Cannot construct Index from any empty Table\")\n if table._num_columns == 1:\n values = next(iter(table._data.values()))\n\n if isinstance(values, NumericalColumn):\n out = GenericIndex.__new__(GenericIndex)\n elif isinstance(values, DatetimeColumn):\n out = DatetimeIndex.__new__(DatetimeIndex)\n elif isinstance(values, StringColumn):\n out = StringIndex.__new__(StringIndex)\n elif isinstance(values, CategoricalColumn):\n out = CategoricalIndex.__new__(CategoricalIndex)\n out._data = table._data\n out._index = None\n return out\n else:\n return cudf.MultiIndex._from_table(\n table, names=table._data.names\n )\n else:\n return as_index(table)\n\n\nclass RangeIndex(Index):\n \"\"\"An iterable integer index defined by a starting value and ending value.\n Can be sliced and indexed arbitrarily without allocating memory for the\n complete structure.\n\n Properties\n ---\n _start: The first value\n _stop: The last value\n name: Name of the index\n \"\"\"\n\n def __init__(self, start, stop=None, name=None):\n \"\"\"RangeIndex(size), RangeIndex(start, stop)\n\n Parameters\n ----------\n start, stop: int\n name: string\n \"\"\"\n if isinstance(start, range):\n therange = start\n start = therange.start\n stop = therange.stop\n if stop is None:\n start, stop = 0, start\n self._start = int(start)\n self._stop = int(stop)\n self._cached_values = None\n self._index = None\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._name = value\n\n @property\n def _num_columns(self):\n return 1\n\n @property\n def _num_rows(self):\n return len(self)\n\n @cached_property\n def _values(self):\n if len(self) > 0:\n vals = cupy.arange(self._start, self._stop, dtype=self.dtype)\n return column.as_column(vals)\n else:\n return column.column_empty(0, masked=False, dtype=self.dtype)\n\n @property\n def _data(self):\n from cudf.core.column_accessor import ColumnAccessor\n\n return ColumnAccessor({self.name: self._values})\n\n def __contains__(self, item):\n if not isinstance(\n item, tuple(np.sctypes[\"int\"] + np.sctypes[\"float\"] + [int, float])\n ):\n return False\n if not item % 1 == 0:\n return False\n if self._start <= item < self._stop:\n return True\n else:\n return False\n\n def copy(self, deep=True):\n return RangeIndex(start=self._start, stop=self._stop, name=self.name)\n\n def __repr__(self):\n return (\n \"{}(start={}, stop={}\".format(\n self.__class__.__name__, self._start, self._stop\n )\n + (\n \", name='{}'\".format(str(self.name))\n if self.name is not None\n else \"\"\n )\n + \")\"\n )\n\n def __len__(self):\n return max(0, self._stop - self._start)\n\n def __getitem__(self, index):\n from numbers import Number\n\n if isinstance(index, slice):\n start, stop, step = index.indices(len(self))\n sln = (stop - start) // step\n sln = max(0, sln)\n start += self._start\n stop += self._start\n if sln == 0:\n return RangeIndex(0, None, self.name)\n elif step == 1:\n return RangeIndex(start, stop, self.name)\n else:\n return index_from_range(start, stop, step)\n\n elif isinstance(index, Number):\n index = utils.normalize_index(index, len(self))\n index += self._start\n return index\n else:\n if is_scalar(index):\n index = min_signed_type(index)(index)\n index = column.as_column(index)\n\n return as_index(self._values[index], name=self.name)\n\n def __eq__(self, other):\n return super(type(self), self).__eq__(other)\n\n def __reduce__(self):\n return (RangeIndex, (self._start, self._stop, self.name))\n\n def equals(self, other):\n if self is other:\n return True\n if len(self) != len(other):\n return False\n if isinstance(other, cudf.core.index.RangeIndex):\n return self._start == other._start and self._stop == other._stop\n else:\n return (self == other)._values.all()\n\n def serialize(self):\n \"\"\"Serialize Index file storage or network transmission.\n \"\"\"\n header = {}\n header[\"index_column\"] = {}\n\n # store metadata values of index separately\n # We don't need to store the GPU buffer for RangeIndexes\n # cuDF only needs to store start/stop and rehydrate\n # during de-serialization\n header[\"index_column\"][\"start\"] = self._start\n header[\"index_column\"][\"stop\"] = self._stop\n frames = []\n\n header[\"name\"] = pickle.dumps(self.name)\n header[\"dtype\"] = pickle.dumps(self.dtype)\n header[\"type-serialized\"] = pickle.dumps(type(self))\n header[\"frame_count\"] = 0\n return header, frames\n\n @classmethod\n def deserialize(cls, header, frames):\n \"\"\"\n \"\"\"\n h = header[\"index_column\"]\n name = pickle.loads(header[\"name\"])\n start = h[\"start\"]\n stop = h[\"stop\"]\n return RangeIndex(start=start, stop=stop, name=name)\n\n @property\n def dtype(self):\n return np.dtype(np.int64)\n\n @property\n def is_contiguous(self):\n return True\n\n @property\n def size(self):\n return max(0, self._stop - self._start)\n\n def find_label_range(self, first, last):\n # clip first to range\n if first is None or first < self._start:\n begin = self._start\n elif first < self._stop:\n begin = first\n else:\n begin = self._stop\n # clip last to range\n if last is None:\n end = self._stop\n elif last < self._start:\n end = begin\n elif last < self._stop:\n end = last + 1\n else:\n end = self._stop\n # shift to index\n return begin - self._start, end - self._start\n\n @copy_docstring(_to_frame)\n def to_frame(self, index=True, name=None):\n return _to_frame(self, index, name)\n\n def to_gpu_array(self):\n return self._values.to_gpu_array()\n\n def to_pandas(self):\n return pd.RangeIndex(\n start=self._start,\n stop=self._stop,\n dtype=self.dtype,\n name=self.name,\n )\n\n @property\n def is_unique(self):\n return True\n\n @property\n def is_monotonic_increasing(self):\n return self._start <= self._stop\n\n @property\n def is_monotonic_decreasing(self):\n return self._start >= self._stop\n\n def get_slice_bound(self, label, side, kind):\n if label < self._start:\n return 0\n elif label >= self._stop:\n return len(self)\n else:\n if side == \"left\":\n return label - self._start\n elif side == \"right\":\n return (label - self._start) + 1\n\n @property\n def __cuda_array_interface__(self):\n return self._values.__cuda_array_interface__\n\n def memory_usage(self, **kwargs):\n return 0\n\n def unique(self):\n # RangeIndex always has unique values\n return self\n\n\ndef index_from_range(start, stop=None, step=None):\n vals = cupy.arange(start, stop, step, dtype=np.int64)\n return as_index(vals)\n\n\nclass GenericIndex(Index):\n \"\"\"An array of orderable values that represent the indices of another Column\n\n Attributes\n ---\n _values: A Column object\n name: A string\n \"\"\"\n\n def __init__(self, values, **kwargs):\n \"\"\"\n Parameters\n ----------\n values : Column\n The Column of values for this index\n name : str optional\n The name of the Index. If not provided, the Index adopts the value\n Column's name. Otherwise if this name is different from the value\n Column's, the values Column will be cloned to adopt this name.\n \"\"\"\n from cudf.core.series import Series\n\n kwargs = _setdefault_name(values, kwargs)\n\n # normalize the input\n if isinstance(values, Series):\n values = values._column\n elif isinstance(values, column.ColumnBase):\n values = values\n else:\n if isinstance(values, (list, tuple)):\n if len(values) == 0:\n values = np.asarray([], dtype=\"int64\")\n else:\n values = np.asarray(values)\n values = column.as_column(values)\n assert isinstance(values, (NumericalColumn, StringColumn))\n\n name = kwargs.get(\"name\")\n super().__init__({name: values})\n\n @property\n def _values(self):\n return next(iter(self._data.columns))\n\n def copy(self, deep=True):\n result = as_index(self._values.copy(deep=deep))\n result.name = self.name\n return result\n\n def __sizeof__(self):\n return self._values.__sizeof__()\n\n def __reduce__(self):\n _maker = functools.partial(\n self.__class__, self._values, name=self.name\n )\n\n return _maker, ()\n\n def __len__(self):\n return len(self._values)\n\n def __repr__(self):\n from pandas._config import get_option\n\n max_seq_items = get_option(\"max_seq_items\") or len(self)\n mr = 0\n if 2 * max_seq_items < len(self):\n mr = max_seq_items + 1\n\n if len(self) > mr and mr != 0:\n top = self[0:mr]\n bottom = self[-1 * mr :]\n from cudf import concat\n\n preprocess = concat([top, bottom])\n else:\n preprocess = self\n if preprocess._values.nullable:\n output = (\n self.__class__(preprocess._values.astype(\"O\").fillna(\"null\"))\n .to_pandas()\n .__repr__()\n )\n else:\n output = preprocess.to_pandas().__repr__()\n\n lines = output.split(\"\\n\")\n if len(lines) > 1:\n tmp_meta = lines[-1]\n prior_to_dtype = lines[-1].split(\"dtype\")[0]\n lines = lines[:-1]\n lines.append(prior_to_dtype + \"dtype='%s'\" % self.dtype)\n if self.name is not None:\n lines[-1] = lines[-1] + \", name='%s'\" % self.name\n if \"length\" in tmp_meta:\n lines[-1] = lines[-1] + \", length=%d)\" % len(self)\n else:\n lines[-1] = lines[-1] + \")\"\n\n return \"\\n\".join(lines)\n\n def __getitem__(self, index):\n res = self._values[index]\n if not isinstance(index, int):\n res = as_index(res)\n res.name = self.name\n return res\n else:\n return res\n\n @copy_docstring(_to_frame)\n def to_frame(self, index=True, name=None):\n return _to_frame(self, index, name)\n\n @property\n def dtype(self):\n return self._values.dtype\n\n def find_label_range(self, first, last):\n \"\"\"Find range that starts with *first* and ends with *last*,\n inclusively.\n\n Returns\n -------\n begin, end : 2-tuple of int\n The starting index and the ending index.\n The *last* value occurs at ``end - 1`` position.\n \"\"\"\n col = self._values\n begin, end = None, None\n if first is not None:\n begin = col.find_first_value(first, closest=True)\n if last is not None:\n end = col.find_last_value(last, closest=True)\n end += 1\n return begin, end\n\n @property\n def is_unique(self):\n return self._values.is_unique\n\n @property\n def is_monotonic(self):\n return self._values.is_monotonic\n\n @property\n def is_monotonic_increasing(self):\n return self._values.is_monotonic_increasing\n\n @property\n def is_monotonic_decreasing(self):\n return self._values.is_monotonic_decreasing\n\n def get_slice_bound(self, label, side, kind):\n return self._values.get_slice_bound(label, side, kind)\n\n @property\n def __cuda_array_interface__(self):\n return self._values.__cuda_array_interface__\n\n\nclass DatetimeIndex(GenericIndex):\n # TODO this constructor should take a timezone or something to be\n # consistent with pandas\n def __init__(self, values, **kwargs):\n # we should be more strict on what we accept here but\n # we'd have to go and figure out all the semantics around\n # pandas dtindex creation first which. For now\n # just make sure we handle np.datetime64 arrays\n # and then just dispatch upstream\n kwargs = _setdefault_name(values, kwargs)\n if isinstance(values, np.ndarray) and values.dtype.kind == \"M\":\n values = column.as_column(values)\n elif isinstance(values, pd.DatetimeIndex):\n values = column.as_column(values.values)\n elif isinstance(values, (list, tuple)):\n values = column.as_column(np.array(values, dtype=\"<M8[ms]\"))\n super(DatetimeIndex, self).__init__(values, **kwargs)\n\n @property\n def year(self):\n return self.get_dt_field(\"year\")\n\n @property\n def month(self):\n return self.get_dt_field(\"month\")\n\n @property\n def day(self):\n return self.get_dt_field(\"day\")\n\n @property\n def hour(self):\n return self.get_dt_field(\"hour\")\n\n @property\n def minute(self):\n return self.get_dt_field(\"minute\")\n\n @property\n def second(self):\n return self.get_dt_field(\"second\")\n\n @property\n def weekday(self):\n return self.get_dt_field(\"weekday\")\n\n def to_pandas(self):\n nanos = self._values.astype(\"datetime64[ns]\")\n return pd.DatetimeIndex(nanos.to_pandas(), name=self.name)\n\n def get_dt_field(self, field):\n out_column = self._values.get_dt_field(field)\n # column.column_empty_like always returns a Column object\n # but we need a NumericalColumn for GenericIndex..\n # how should this be handled?\n out_column = column.build_column(\n data=out_column.base_data,\n dtype=out_column.dtype,\n mask=out_column.base_mask,\n offset=out_column.offset,\n )\n return as_index(out_column, name=self.name)\n\n\nclass CategoricalIndex(GenericIndex):\n \"\"\"An categorical of orderable values that represent the indices of another\n Column\n\n Attributes\n ---\n _values: A CategoricalColumn object\n name: A string\n \"\"\"\n\n def __init__(self, values, **kwargs):\n kwargs = _setdefault_name(values, kwargs)\n if isinstance(values, CategoricalColumn):\n values = values\n elif isinstance(values, pd.Series) and (\n is_categorical_dtype(values.dtype)\n ):\n codes_data = column.as_column(values.cat.codes.values)\n values = column.build_categorical_column(\n categories=values.cat.categories,\n codes=codes_data,\n ordered=values.cat.ordered,\n )\n elif isinstance(values, (pd.Categorical, pd.CategoricalIndex)):\n codes_data = column.as_column(values.codes)\n values = column.build_categorical_column(\n categories=values.categories,\n codes=codes_data,\n ordered=values.ordered,\n )\n elif isinstance(values, (list, tuple)):\n values = column.as_column(\n pd.Categorical(values, categories=values)\n )\n super(CategoricalIndex, self).__init__(values, **kwargs)\n\n @property\n def codes(self):\n return self._values.cat().codes\n\n @property\n def categories(self):\n return self._values.cat().categories\n\n\nclass StringIndex(GenericIndex):\n \"\"\"String defined indices into another Column\n\n Attributes\n ---\n _values: A StringColumn object or NDArray of strings\n name: A string\n \"\"\"\n\n def __init__(self, values, **kwargs):\n kwargs = _setdefault_name(values, kwargs)\n if isinstance(values, StringColumn):\n values = values.copy()\n elif isinstance(values, StringIndex):\n values = values._values.copy()\n else:\n values = column.as_column(values, dtype=\"str\")\n if not pd.api.types.is_string_dtype(values.dtype):\n raise ValueError(\n \"Couldn't create StringIndex from passed in object\"\n )\n super(StringIndex, self).__init__(values, **kwargs)\n\n def to_pandas(self):\n return pd.Index(self.to_array(), name=self.name, dtype=\"object\")\n\n def take(self, indices):\n return self._values[indices]\n\n def __repr__(self):\n return (\n \"{}({}, dtype='object'\".format(\n self.__class__.__name__, self._values.to_array()\n )\n + (\n \", name='{}'\".format(self.name)\n if self.name is not None\n else \"\"\n )\n + \")\"\n )\n\n\ndef as_index(arbitrary, **kwargs):\n \"\"\"Create an Index from an arbitrary object\n\n Currently supported inputs are:\n\n * ``Column``\n * ``Buffer``\n * ``Series``\n * ``Index``\n * numba device array\n * numpy array\n * pyarrow array\n * pandas.Categorical\n\n Returns\n -------\n result : subclass of Index\n - CategoricalIndex for Categorical input.\n - DatetimeIndex for Datetime input.\n - GenericIndex for all other inputs.\n \"\"\"\n\n kwargs = _setdefault_name(arbitrary, kwargs)\n\n if isinstance(arbitrary, cudf.MultiIndex):\n return arbitrary\n elif isinstance(arbitrary, Index):\n idx = arbitrary.copy(deep=False)\n idx.rename(**kwargs, inplace=True)\n return idx\n elif isinstance(arbitrary, NumericalColumn):\n return GenericIndex(arbitrary, **kwargs)\n elif isinstance(arbitrary, StringColumn):\n return StringIndex(arbitrary, **kwargs)\n elif isinstance(arbitrary, DatetimeColumn):\n return DatetimeIndex(arbitrary, **kwargs)\n elif isinstance(arbitrary, CategoricalColumn):\n return CategoricalIndex(arbitrary, **kwargs)\n elif isinstance(arbitrary, cudf.Series):\n return as_index(arbitrary._column, **kwargs)\n elif isinstance(arbitrary, pd.RangeIndex):\n return RangeIndex(start=arbitrary.start, stop=arbitrary.stop, **kwargs)\n elif isinstance(arbitrary, pd.MultiIndex):\n return cudf.MultiIndex.from_pandas(arbitrary)\n elif isinstance(arbitrary, range):\n if arbitrary.step == 1:\n return RangeIndex(arbitrary.start, arbitrary.stop, **kwargs)\n return as_index(column.as_column(arbitrary), **kwargs)\n\n\ndef _setdefault_name(values, kwargs):\n if \"name\" not in kwargs:\n if not hasattr(values, \"name\"):\n kwargs.setdefault(\"name\", None)\n else:\n kwargs.setdefault(\"name\", values.name)\n return kwargs\n"
] | [
[
"pandas.api.types.is_integer",
"pandas.RangeIndex",
"numpy.asarray",
"pandas.Categorical",
"numpy.dtype",
"pandas._config.get_option",
"pandas.api.types.is_list_like",
"pandas.api.types.is_dtype_equal",
"pandas.api.types.is_string_dtype",
"numpy.array"
]
] |
CHIMAWAN001/scikit-multiflow | [
"d538bc50d128442eceda6a21bd6557a3923cf1d8"
] | [
"src/skmultiflow/ADCN/ADCN_process/ADCNbasic.py"
] | [
"import numpy as np\nfrom scipy.special import softmax\nimport time \nimport copy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom numpy import linalg as LA\nimport pdb\nfrom collections import deque\nimport random\nimport warnings\nfrom skmultiflow.ADCN.ADCN_process.utilsADCN import clusteringLoss, maskingNoise, imageNoise, meanStdCalculator, stableSoftmax\nfrom skmultiflow.ADCN.ADCN_process.model import ConvAeMNIST, smallAE, cluster, ConvAeMNIST, ADCNoldtask\n\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\n\nclass ADCN():\n def __init__(self, nOutput,\n nInput = 196,\n nHiddenNode = 96,\n alpha_w = 0.005,\n alpha_d = 0.001,\n LR = 0.01,\n desiredLabels = [0,1,2,3,4,5,6,7,8,9]):\n # random seed control\n # np.random.seed(0)\n # torch.manual_seed(0)\n # random.seed(0)\n\n # initial network\n self.ADCNcnn = ConvAeMNIST()\n self.ADCNae = [smallAE(nInput, nHiddenNode)]\n self.ADCNold = []\n self.ADCNcluster = []\n\n # network significance\n self.averageBias = [meanStdCalculator()]\n self.averageVar = [meanStdCalculator()]\n self.averageInput = [meanStdCalculator()]\n\n # hyperparameters\n self.lr = LR\n self.criterion = nn.MSELoss()\n\n # drift detection parameters\n self.alphaWarning = alpha_w\n self.alphaDrift = alpha_d\n self.driftStatusOld = 0\n self.driftStatus = 0\n self.driftHistory = []\n # self.prevFeatureMatrix = []\n self.bufferData = torch.Tensor().float()\n self.bufferLabel = torch.Tensor().long()\n \n # Evolving\n self.growNode = False\n self.pruneNode = False\n self.evolving = True\n self.clusterGrowing = True\n\n # net properties\n self.nInput = nInput\n self.nOutput = nOutput\n self.nHiddenLayer = 1\n self.nHiddenNode = nHiddenNode\n \n # cluster properties\n self.desiredLabels= desiredLabels\n self.nInitCluster = 2\n self.nCluster = self.nInitCluster\n self.regStrClusteringLoss = 0.01\n\n # LWF\n self.nOutputPerTask = nOutput\n self.regStrLWF = 5\n\n def updateNetProperties(self):\n self.nHiddenLayer = len(self.ADCNae)\n nHiddenNode = 0\n nCluster = 0\n for idx, nett in enumerate(self.ADCNae):\n nHiddenNode += nett.nNodes\n try:\n nCluster += self.ADCNcluster[idx].nCluster\n except:\n nCluster = 0\n \n self.nHiddenNode = nHiddenNode\n self.nCluster = nCluster\n\n def getNetProperties(self):\n for _,nett in enumerate(self.ADCNae):\n nett.getNetProperties()\n\n # ============================= forward pass =============================\n def forwardADCN(self, x, winIdx = None):\n # prepare model\n self.ADCNcnn = self.ADCNcnn.to(self.device)\n\n # forward encoder CNN\n x = x.to(self.device)\n x = self.ADCNcnn(x)\n\n # feedforward from input layer to latent space\n for iLayer in range(len(self.ADCNae)):\n currnet = self.ADCNae[iLayer].network\n obj = currnet.train()\n obj = obj.to(self.device)\n x = obj(x)\n\n if winIdx is not None:\n # store latent features of the winning layer for clustering\n if iLayer == winIdx:\n self.latentFeatures = x.detach().clone()\n\n if iLayer == len(self.ADCNae) - 2:\n # store input feature for the last hidden layer\n self.lastInputFeature = x.detach().clone()\n\n return x\n\n def forwardBiasVar(self, target, winIdx):\n # x is the input features for this layer\n # only for ADCNae\n # Disable gradient calculation\n with torch.no_grad():\n target = target.to(self.device)\n\n # encode\n x = self.ADCNae[winIdx].network(target)\n hiddenNodeSignificance = torch.mean(x.detach().clone(), 0)\n x2 = (x.detach().clone())**2\n\n # decode\n x = self.ADCNae[winIdx].network(x, 2)\n x2 = self.ADCNae[winIdx].network(x2, 2)\n\n # get bias and variance\n bias = torch.mean((x - target)**2).item()\n variance = torch.mean(x2 - target**2).item()\n # pdb.set_trace()\n\n return bias, variance, hiddenNodeSignificance\n\n def forward(self, x):\n # encode decode in end-to-end manner\n # prepare model\n self.ADCNcnn = self.ADCNcnn.to(self.device)\n\n # prepare data\n x = x.to(self.device)\n\n # forward encoder CNN\n x = self.ADCNcnn(x)\n\n # feedforward from input layer to latent space, encode\n for iLayer,_ in enumerate(self.ADCNae):\n currnet = self.ADCNae[iLayer].network\n obj = currnet.train()\n obj = obj.to(self.device)\n x = obj(x)\n\n # should consider clustering loss\n\n\n # feedforward from latent space to output layer, decode\n for iLayer in range(len(self.ADCNae)-1,0-1,-1):\n currnet = self.ADCNae[iLayer].network\n obj = currnet.train()\n obj = obj.to(self.device)\n x = obj(x, 2)\n\n # forward decoder CNN\n x = self.ADCNcnn(x, 2)\n \n return x\n\n # ============================= Initialization =============================\n def initialization(self, labeledData,winIdx,\n batchSize = 16,\n epoch = 50,\n device = torch.device('cpu')):\n # initialization phase: train CNN, train AE, train cluster, without clustering loss, evolving, epoch, add cluster in the end\n # always trained using labeled data\n # will create cluster for the last layer\n if winIdx == 0:\n self.labeledData = labeledData\n self.batchSize = batchSize\n\n self.device = device\n print('Network initialization phase is started')\n\n # Train CNN, for the first training, epoch is used\n latentFeatures = self.trainCNN(labeledData, unlabeled = False, epoch = epoch)\n\n # Train AE, this trains the newly created layer\n latentFeatures = self.trainBasicAe(latentFeatures, labeledData,\n evolving = self.evolving, winIdx = 0,\n epoch = epoch, unlabeled = False)\n initialData = latentFeatures.detach().clone().to('cpu')\n\n # create cluster, this is only done when there is no cluster for idx-th layer\n self.createCluster(initialData, epoch = epoch)\n \n if winIdx > 0:\n self.forwardADCN(labeledData, winIdx = winIdx)\n\n # Train AE, this trains the newly created layer\n latentFeatures = self.lastInputFeature # latentFeatures is the extracted features from 2nd last layers\n latentFeatures = self.trainBasicAe(latentFeatures, labeledData,\n evolving = self.evolving, winIdx = winIdx,\n epoch = epoch, unlabeled = False)\n initialData = latentFeatures.detach().clone().to('cpu')\n\n # create cluster, this is only done when there is no cluster for idx-th layer \n # pdb.set_trace()\n self.createCluster(initialData, epoch = epoch)\n\n def createCluster(self, initialData, epoch = 1):\n # print('\\n')\n # print('Cluster initialization phase is started')\n\n nInput = initialData.shape[1]\n\n myCluster = cluster(nInput, initialData[0:len(self.desiredLabels)], \n nInitCluster = self.nInitCluster,\n clusterGrowing = self.clusterGrowing,\n desiredLabels = self.desiredLabels)\n\n # updateCluster\n myCluster.fit(initialData, epoch = epoch)\n self.clusterHistory = myCluster.clusterHistory\n # print('A cluster was created containing ',myCluster.nCluster ,' centroids')\n\n # add cluster to the global cluster\n self.ADCNcluster = self.ADCNcluster + [myCluster]\n\n # ============================= Training =============================\n def trainCNN(self, x, unlabeled = True, epoch = 1):\n # x is image data with size 1 x 28 x 28\n nData = x.shape[0]\n x = x.to(self.device)\n\n # get optimizer\n optimizer = torch.optim.SGD(self.ADCNcnn.parameters(),\n lr = self.lr,\n momentum = 0.95,\n weight_decay = 0.00005)\n\n # prepare network\n self.ADCNcnn = self.ADCNcnn.train()\n self.ADCNcnn = self.ADCNcnn.to(self.device)\n\n # print('CNN training is started')\n\n for iEpoch in range(0, epoch):\n\n # if iEpoch%20 == 0:\n # print('Epoch: ', iEpoch)\n \n shuffled_indices = torch.randperm(nData)\n\n for iData in range(0,nData,self.batchSize):\n indices = shuffled_indices[iData:iData + self.batchSize]\n \n # load data\n minibatch_xTrain = x[indices]\n \n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n\n # forward \n # latentAE here is the output of the deepest autoencoder\n latentFeatures = self.ADCNcnn(minibatch_xTrain) # encode\n outputs = self.ADCNcnn(latentFeatures, 2) # decode\n\n # calculate the loss\n loss = self.criterion(outputs, minibatch_xTrain)\n\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n\n # perform a single optimization step (parameter update)\n optimizer.step()\n\n with torch.no_grad():\n latentFeatures = self.ADCNcnn(x)\n\n # latentFeatures is the extracted features for the next hidden layer\n return latentFeatures.detach().clone()\n\n def trainBasicAe(self, x, x_ori, winIdx,\n epoch = 1, clustering = False,\n evolving = True, unlabeled = True):\n # grow layer: train CNN, train AE, train cluster\n # x is the extracted features from the previous hidden layer\n # this will update [winIdx]-th layer in greedy layer wise manner\n # TO DO: create grow and prune nodes, why batch size 1 is very bad, drift detection\n nData = x.shape[0]\n\n # masked input\n # maskedX = maskingNoise(x.detach().clone()) # make zero some of the input feature\n\n # prepare data\n x = x.to(self.device)\n # maskedX = maskedX.to(self.device)\n\n # get optimizer\n optimizer = torch.optim.SGD(self.ADCNae[winIdx].network.parameters(),\n lr = self.lr, momentum = 0.95,\n weight_decay = 0.00005)\n\n # prepare network\n self.ADCNae[winIdx].network = self.ADCNae[winIdx].network.train()\n self.ADCNae[winIdx].network = self.ADCNae[winIdx].network.to(self.device)\n\n self.growNode = False\n self.pruneNode = False\n evolve = 0\n hiddenNodeHist = []\n\n # print('AE training is started')\n\n for iEpoch in range(0, epoch):\n\n # if iEpoch%20 == 0:\n # print('Epoch: ', iEpoch)\n \n shuffled_indices = torch.randperm(nData)\n\n for iData in range(0, nData, self.batchSize):\n indices = shuffled_indices[iData:iData + self.batchSize]\n \n # load data\n minibatch_xTrain = x[indices] # input with masking noise\n minibatch_x = x[indices] # original input\n minibatch_x_ori = x_ori[indices]\n\n if iEpoch == 0:\n if self.batchSize > 1:\n minibatch_x_mean = torch.mean(minibatch_x, dim=0).unsqueeze(dim=0)\n else:\n minibatch_x_mean = minibatch_x\n\n # calculate mean of input\n try:\n self.averageInput[winIdx].updateMeanStd(minibatch_x_mean.detach().clone().to('cpu'))\n except:\n # if the number of input changes as a result of node evolution, the counter is reinitiated\n self.averageInput[winIdx].reset()\n self.averageInput[winIdx].updateMeanStd(minibatch_x_mean.detach().clone().to('cpu'))\n\n # calculate bias and variance\n bias, variance, HS = self.forwardBiasVar(self.averageInput[winIdx].mean, winIdx = winIdx)\n\n # update bias and variance\n self.updateBiasVariance(bias, variance, winIdx)\n else:\n # calculate mean of input\n bias, variance, HS = self.forwardBiasVar(self.averageInput[winIdx].mean, winIdx = winIdx)\n\n # update bias and variance\n self.updateBiasVariance(bias, variance, winIdx)\n\n if evolving:\n if self.growNode and clustering:\n # add an input to the cluster\n self.ADCNcluster[winIdx].growInput(HS[-1].item())\n\n # growing\n self.growNodeIdentification(bias, winIdx)\n if self.growNode:\n self.hiddenNodeGrowing(winIdx)\n evolve = 1\n # print('+++ Grow node +++')\n \n # pruning\n self.pruneNodeIdentification(variance, winIdx)\n if self.pruneNode:\n # print('--- Prune node ---')\n self.findLeastSignificantNode(HS)\n self.hiddenNodePruning(winIdx)\n evolve = 1\n\n # delete an input to the cluster\n if clustering:\n self.ADCNcluster[winIdx].deleteInput(self.leastSignificantNode)\n \n # clear the gradients of all optimized variables\n optimizer = torch.optim.SGD(self.ADCNae[winIdx].network.parameters(), \n lr = self.lr, momentum = 0.95, weight_decay = 0.00005)\n optimizer.zero_grad()\n\n # forward \n # latentAE here is the output of the deepest autoencoder\n latentAE = self.ADCNae[winIdx].network(minibatch_xTrain)\n outputs = self.ADCNae[winIdx].network(latentAE, 2)\n\n ## calculate the loss\n # reconstruction loss\n loss = self.criterion(outputs, minibatch_x)\n\n if clustering and not self.growNode:\n # clustering loss\n self.ADCNcluster[winIdx].getCluster(latentAE.to('cpu').detach().clone())\n oneHotClusters = F.one_hot(\n torch.tensor(self.ADCNcluster[winIdx].predictedClusters),\n num_classes = self.ADCNcluster[winIdx].nCluster).float().to(self.device)\n centroids = torch.tensor(self.ADCNcluster[winIdx].centroids).float().to(self.device)\n\n # total loss\n loss.add_(self.regStrClusteringLoss/2,clusteringLoss(latentAE, oneHotClusters, centroids))\n\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n\n # perform a single optimization step (parameter update)\n optimizer.step()\n\n if evolving and self.growNode and clustering:\n # add an input to the cluster\n _, _, HS = self.forwardBiasVar(self.averageInput[winIdx].mean, winIdx = winIdx)\n self.ADCNcluster[winIdx].growInput(HS[-1].item())\n\n hiddenNodeHist.append(self.nHiddenNode)\n\n with torch.no_grad():\n latentAE = self.ADCNae[winIdx].network(x)\n\n if clustering:\n self.ADCNcluster[winIdx].fit(latentAE.detach().clone().to('cpu'), epoch)\n\n self.hiddenNodeHist = hiddenNodeHist\n\n # latentAE is the extracted features of winIdx-th AE for the next hidden layer\n return latentAE.detach().clone()\n\n def trainAE(self, x, x_ori,\n epoch = 1,\n clustering = True,\n evolving = True):\n # x is the extracted features from CNN encoder\n # this will update ALL layers of AE in greedy layer wise manner\n for idx, _ in enumerate(self.ADCNae):\n x = self.trainBasicAe(x, x_ori, idx,\n epoch = epoch,\n clustering = clustering,\n evolving = evolving)\n\n def fit(self, x, epoch = 1):\n # train ADCN, all network structures, layer wise manner\n # x is image data with size 1 x 28 x 28\n extractedFeatures = self.trainCNN(x, epoch = epoch)\n self.trainAE(extractedFeatures, x,\n epoch = epoch,\n evolving = self.evolving)\n\n def fitCL(self, x,\n reconsLoss = False,\n unlabeled = True,\n epoch = 1):\n # x is current task image data with size 1 x 28 x 28\n # train ADCN to retain the knowledge in old task, part of continual learning\n # executed iff there is at least 1 old task\n if len(self.ADCNold) > 0:\n nData = x.shape[0]\n x = x.to(self.device)\n \n # get optimizer\n optimizer = torch.optim.SGD(self.ADCNcnn.parameters(),\n lr = self.lr,\n momentum = 0.95,\n weight_decay = 0.00005)\n for iLayer,_ in enumerate(self.ADCNae):\n optimizer.add_param_group({'params': self.ADCNae[iLayer].network.parameters()})\n\n # prepare network\n self.ADCNcnn = self.ADCNcnn.train()\n self.ADCNcnn = self.ADCNcnn.to(self.device)\n\n # print('LWF training is started')\n\n for iEpoch in range(0, epoch):\n\n # if iEpoch%20 == 0:\n # print('Epoch: ', iEpoch)\n \n shuffled_indices = torch.randperm(nData)\n\n for iData in range(0, nData, self.batchSize):\n indices = shuffled_indices[iData:iData + self.batchSize]\n \n # load data\n minibatch_xTrain = x[indices]\n\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n\n # forward \n outputs = self.forward(minibatch_xTrain)\n\n # calculate the LWF loss, accross all previous task\n loss = self.LwFloss(outputs, minibatch_xTrain)\n lwfLoss = loss.detach().clone().item()\n\n if reconsLoss:\n lossRecons = self.criterion(outputs, minibatch_xTrain)\n loss.add_(lossRecons)\n\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n\n # perform a single optimization step (parameter update)\n optimizer.step()\n print('LWF loss: ', lwfLoss, 'recons Loss: ',loss.detach().clone().item() - lwfLoss)\n\n def updateAllegiance(self, labeledData, labeledLabel,\n randomTesting = False):\n # At the end of each phase, we introduce a limited amount of labeled data per class to evaluate classification accuracy.\n # forward to each layer\n for iLayer in range(len(self.ADCNae)):\n self.forwardADCN(labeledData, winIdx = iLayer)\n\n # update allegiance in each layer\n self.ADCNcluster[iLayer].updateAllegiance(self.latentFeatures.detach().clone().to('cpu').numpy(), \n labeledLabel.to('cpu').numpy(),\n randomTesting = randomTesting)\n\n # ============================= Testing =============================\n def predict(self, x):\n with torch.no_grad():\n x = x.to(self.device)\n\n # prepare network\n self.ADCNcnn = self.ADCNcnn.eval()\n self.ADCNcnn = self.ADCNcnn.to(self.device)\n \n # forward encoder CNN\n x = self.ADCNcnn(x)\n\n # feedforward from input layer to latent space\n score = np.zeros([x.shape[0], self.nOutput])\n for iLayer in range(len(self.ADCNae)):\n currnet = self.ADCNae[iLayer].network\n obj = currnet.eval()\n obj = obj.to(self.device)\n x = obj(x)\n\n # predict\n self.ADCNcluster[iLayer].predict(x.detach().clone().to('cpu'))\n # pdb.set_trace()\n score = score + stableSoftmax(self.ADCNcluster[iLayer].score)\n\n self.predictedLabel = np.argmax(score,1)\n self.score = score\n\n def testing(self, x, label):\n # testing\n start_test = time.time()\n self.predict(x)\n end_test = time.time()\n self.testingTime = end_test - start_test\n correct = (self.predictedLabel == label.numpy()).sum().item()\n self.accuracy = 100*correct/(self.predictedLabel == label.numpy()).shape[0] # 1: correct, 0: wrong\n self.trueClassLabel = label.numpy()\n\n def randomTesting(self, x, label):\n # testing\n start_test = time.time()\n self.predict(x)\n end_test = time.time()\n self.testingTime = end_test - start_test\n randomPrediction = np.random.randint(self.nOutput, size=self.predictedLabel.shape[0])\n correct = (randomPrediction == label.numpy()).sum().item()\n self.accuracy = 100*correct/(self.predictedLabel == label.numpy()).shape[0] # 1: correct, 0: wrong\n self.trueClassLabel = label.numpy()\n\n # ============================= Evolving mechanism =============================\n def layerGrowing(self):\n if len(self.ADCNae) == len(self.ADCNcluster):\n self.ADCNae = self.ADCNae + [smallAE(self.ADCNae[-1].nNodes,int(self.ADCNae[-1].nNodes/2))]\n self.averageBias = self.averageBias + [meanStdCalculator()]\n self.averageVar = self.averageVar + [meanStdCalculator()]\n self.averageInput = self.averageInput + [meanStdCalculator()]\n self.nHiddenLayer = len(self.ADCNae)\n # print('*** ADD a new LAYER ***')\n \n def hiddenNodeGrowing(self, winIdx):\n if winIdx <= (len(self.ADCNae)-1):\n copyNet = copy.deepcopy(self.ADCNae[winIdx])\n copyNet.nodeGrowing(device = self.device)\n self.ADCNae[winIdx] = copy.deepcopy(copyNet)\n if winIdx != (len(self.ADCNae)-1):\n copyNextNet = copy.deepcopy(self.ADCNae[winIdx+1])\n copyNextNet.inputGrowing(device = self.device)\n self.ADCNae[winIdx+1] = copy.deepcopy(copyNextNet)\n\n # print('+++ GROW a hidden NODE +++')\n self.updateNetProperties()\n else:\n raise IndexError\n \n def hiddenNodePruning(self, winIdx):\n if winIdx <= (len(self.ADCNae)-1):\n copyNet = copy.deepcopy(self.ADCNae[winIdx])\n copyNet.nodePruning(self.leastSignificantNode)\n self.ADCNae[winIdx] = copy.deepcopy(copyNet)\n if winIdx != (len(self.ADCNae)-1):\n copyNextNet = copy.deepcopy(self.ADCNae[winIdx+1])\n copyNextNet.inputPruning(self.leastSignificantNode)\n self.ADCNae[winIdx+1] = copy.deepcopy(copyNextNet)\n \n # print('--- Hidden NODE No: ',self.leastSignificantNode,' is PRUNED ---')\n self.updateNetProperties()\n else:\n raise IndexError\n \n # ============================= Network Evaluation =============================\n def LwFloss(self, currentBatchOutput, currentBatchData):\n loss = []\n criterion = nn.BCELoss()\n for iTask,_ in enumerate(self.ADCNold):\n with torch.no_grad():\n minibatch_xOld = self.ADCNold[iTask].forward(currentBatchData, self.device) # it acts as the target\n\n regStr = self.regStrLWF*(1.0 - self.nOutputPerTask/((iTask + 1)*self.nOutputPerTask + self.nOutputPerTask))\n loss.append(regStr*criterion(currentBatchOutput, minibatch_xOld.detach().clone()))\n\n return sum(loss)\n\n def driftDetection(self, batchData, prevBatchData = None):\n self.ADCNcnn = self.ADCNcnn.to(self.device)\n batchData = batchData.to(self.device)\n\n with torch.no_grad():\n # forward encoder CNN\n currFeatureMatrix = self.ADCNcnn(batchData)\n currFeatureMatrix = currFeatureMatrix.to('cpu')\n\n if prevBatchData is not None:\n prevBatchData = prevBatchData.to(self.device)\n\n # forward encoder CNN\n prevFeatureMatrix = self.ADCNcnn(prevBatchData)\n prevFeatureMatrix = prevFeatureMatrix.to('cpu')\n # currFeatureMatrix is a list containing the mean of extracted features\n \n self.driftStatusOld = self.driftStatus\n driftStatus = 0 # 0: no drift, 1: warning, 2: drift\n\n if self.driftStatusOld != 2:\n # detect drift\n # combine buffer data, when previous batch is warning\n if self.driftStatusOld == 1:\n with torch.no_grad():\n bufferFetureMatrix = self.bufferFetureMatrix.to(self.device)\n\n # forward encoder CNN\n bufferFetureMatrix = self.ADCNcnn(bufferFetureMatrix)\n bufferFetureMatrix = bufferFetureMatrix.to('cpu')\n\n currFeatureMatrix = torch.cat((bufferFetureMatrix,currFeatureMatrix),0)\n\n # combine current and previous feature matrix\n combinedFeatureMatrix = currFeatureMatrix\n if prevBatchData is not None:\n combinedFeatureMatrix = torch.cat((prevFeatureMatrix,currFeatureMatrix),0)\n\n # prepare statistical coefficient to confirm a cut point\n nData = combinedFeatureMatrix.shape[0]\n cutPointCandidate = [int(nData/4),int(nData/2),int(nData*3/4)]\n cutPoint = 0\n miu_X = torch.mean(combinedFeatureMatrix,0) # statistics of the current batch data extracted features \n errorBoundX = np.sqrt((1/(2*nData))*np.log(1/self.alphaDrift))\n\n # confirm the cut point\n for iCut in cutPointCandidate:\n miu_V = torch.mean(combinedFeatureMatrix[0:iCut], 0)\n nV = combinedFeatureMatrix[0:iCut].shape[0]\n errorBoundV = np.sqrt((1/(2*nV))*np.log(1/self.alphaDrift))\n if torch.mean(miu_X + errorBoundX).item() <= torch.mean(miu_V + errorBoundV).item():\n cutPoint = iCut\n # print('A cut point is detected cut: ', cutPoint)\n break\n\n # confirm drift\n # if np.abs(miu_F - miu_E) >= errorBoundDrift: # This formula is able to detect drift, even the performance improves\n if cutPoint > 0:\n # prepare statistical coefficient to confirm a drift\n max_b,_ = torch.max(combinedFeatureMatrix, 0)\n min_a,_ = torch.min(combinedFeatureMatrix, 0)\n diff_ba = max_b - min_a\n\n errorBoundDrift = torch.mean(diff_ba*np.sqrt(((nData - nV)/(2*nV*nData))*np.log(1/self.alphaDrift))).item()\n # if miu_V - miu_X >= errorBoundDrift: # This formula is only able to detect drift when the performance decreses\n # print('np.abs(miu_V - miu_X) : ', np.abs(miu_V - miu_X),'>','errorBoundDrift',errorBoundDrift)\n\n if torch.mean(np.abs(miu_V - miu_X)).item() >= errorBoundDrift: # This formula is able to detect drift, even the performance improves\n # confirm drift\n # print('H0 is rejected with size: ', errorBoundDrift)\n # print('Status: DRIFT')\n driftStatus = 2\n self.bufferFetureMatrix = []\n # self.prevFeatureMatrix = []\n pdb.set_trace\n else:\n # prepare statistical coefficient to confirm a warning\n errorBoundWarning = torch.mean(diff_ba*np.sqrt(((nData - nV)/(2*nV*nData))*np.log(1/self.alphaWarning))).item()\n # print('np.abs(miu_V - miu_X) : ', np.abs(miu_V - miu_X),'>','errorBoundWarning',errorBoundWarning)\n # if there is a warning in the previous batch, then there is only 2 option left: drift or stable.\n # it is assumed that the number of samples is large enough to confirm those 2 situation\n # if miu_V - miu_X >= errorBoundWarning and self.driftStatusOld != 1:\n if torch.mean(np.abs(miu_V - miu_X)).item() >= errorBoundWarning and self.driftStatusOld != 1:\n # confirm warning\n # if there is a warning, the currFeatureMatrix is stored in the buffer and will be evaluated in the next batch\n # together with the currFeatureMatrix of the next batch\n # print('H0 is rejected with size: ', errorBoundWarning)\n # print('Status: WARNING')\n driftStatus = 1\n self.bufferFetureMatrix = prevBatchData.to('cpu')\n else:\n # confirm stable\n # print('H0 is NOT rejected, size:', torch.mean(np.abs(miu_V - miu_X)).item(),\n # '; Error bound warning: ', errorBoundWarning, '; Error bound drift: ', errorBoundDrift)\n # print('Status: STABLE')\n driftStatus = 0\n self.bufferFetureMatrix = []\n # self.prevFeatureMatrix = currFeatureMatrix\n else:\n # there is no cutpoint detected means that there is no significant increase in the combinedFeatureMatrix\n # print('Cut point is NOT detected')\n # print('Status: STABLE')\n driftStatus = 0\n # self.prevFeatureMatrix = currFeatureMatrix\n\n self.driftStatus = driftStatus\n self.driftHistory.append(driftStatus)\n \n def growNodeIdentification(self, bias, winIdx):\n # confirm high variance situation\n # winIdx is the indes of current AE network, started from 0 (np.log2(winIdx) + 1)\n winIdxa = winIdx + 1 # winIdx start from 0, that is why it is required to add 1\n # dynamicKsigmaGrow = (np.log(winIdxa) + 1)*(1.3*np.exp(-bias) + 0.7)\n dynamicKsigmaGrow = (1.3*np.exp(-bias) + 0.7)\n growCondition1 = (self.averageBias[winIdx].minMean + \n dynamicKsigmaGrow*self.averageBias[winIdx].minStd)\n growCondition2 = self.averageBias[winIdx].mean + self.averageBias[winIdx].std\n\n # print('growCondition2: ', growCondition2,'growCondition1: ', growCondition1)\n if growCondition2 > growCondition1 and self.averageBias[winIdx].count >= 1:\n self.growNode = True\n # if winIdx > 0:\n # pdb.set_trace()\n else:\n self.growNode = False\n \n def pruneNodeIdentification(self, variance, winIdx):\n # confirm high variance situation\n # winIdx is the indes of current AE network, started from 0\n winIdxa = winIdx + 1 # winIdx start from 0, that is why it is required to add 1\n # dynamicKsigmaPrune = (np.log(winIdxa) + 1)*(1.3*np.exp(-variance) + 0.7)\n dynamicKsigmaPrune = (1.3*np.exp(-variance) + 0.7)\n pruneCondition1 = (self.averageVar[winIdx].minMean \n + 2*dynamicKsigmaPrune*self.averageVar[winIdx].minStd)\n pruneCondition2 = self.averageVar[winIdx].mean + self.averageVar[winIdx].std\n \n if (pruneCondition2 > pruneCondition1 and not self.growNode and \n self.averageVar[winIdx].count >= 20 and\n self.ADCNae[winIdx].nNodes > self.nOutput):\n self.pruneNode = True\n else:\n self.pruneNode = False\n\n def findLeastSignificantNode(self, hiddenNodeSignificance):\n # find the least significant node given the hidden node significance in the current layer, only for AE network\n # should be executed after doing feedforwardBiasVar on the winning layer\n self.leastSignificantNode = torch.argmin(torch.abs(hiddenNodeSignificance)).tolist()\n # print('Pruned node: ', self.leastSignificantNode)\n \n def updateBiasVariance(self, bias, variance, winIdx):\n # calculate mean of bias and variance\n # should be executed after doing feedforwardBiasVar on the winning layer\n self.averageBias[winIdx].updateMeanStd(bias)\n if self.averageBias[winIdx].count < 1 or self.growNode:\n self.averageBias[winIdx].resetMinMeanStd()\n # if winIdx > 0:\n # pdb.set_trace()\n else:\n self.averageBias[winIdx].updateMeanStdMin()\n \n # calculate mean of variance\n self.averageVar[winIdx].updateMeanStd(variance)\n if self.averageVar[winIdx].count < 20 or self.pruneNode:\n self.averageVar[winIdx].resetMinMeanStd()\n else:\n self.averageVar[winIdx].updateMeanStdMin()\n \n # ============================= Data management =============================\n def trainingDataPreparation(self, batchData, batchLabel):\n # training data preparation\n # if there is a warning, the data is stored in the buffer, and no training is conducted\n # if there is a drift or a stable condition and the buffer is not empty, then the data in the \n # buffer is concatenated together with the current data\n if self.driftStatus == 0 or self.driftStatus == 2: # STABLE or DRIFT\n # check buffer\n if self.bufferData.shape[0] != 0:\n # add buffer to the current data batch\n self.batchData = torch.cat((self.bufferData,batchData),0)\n self.batchLabel = torch.cat((self.bufferLabel,batchLabel),0)\n\n # clear buffer\n self.bufferData = torch.Tensor().float()\n self.bufferLabel = torch.Tensor().long()\n\n if self.driftStatus == 1: # WARNING\n # store data to buffer\n # print('Store data to buffer')\n self.bufferData = batchData\n self.bufferLabel = batchLabel\n\n return batchData, batchLabel\n\n def augmentLabeledSamples(self, data, labels):\n # augment the collection of labeled samples for the network and clusters\n # should be executed after training\n self.labeledData = torch.cat((self.labeledData,data),0)\n self.labeledLabel = torch.cat((self.labeledLabel,labels),0)\n\n ## augment input data for each cluster\n # prepare model\n self.ADCNcnn = self.ADCNcnn.to(self.device)\n\n # forward encoder CNN\n data = data.to(self.device)\n\n with torch.no_grad():\n \n data = self.ADCNcnn(data)\n\n # feedforward from input layer to latent space\n for iLayer in range(len(self.ADCNae)):\n currnet = self.ADCNae[iLayer].network\n obj = currnet.train()\n obj = obj.to(self.device)\n data = obj(data)\n\n self.ADCNcluster[iLayer].augmentLabeledSamples(data.detach().clone().to('cpu').numpy(), \n labels.to('cpu').numpy())\n\n def storeOldModel(self, taskId):\n # store taskId-th task model to generate output from the network of the previous task\n print('store model : ', taskId)\n \n # create blank network\n oldNet = ADCNoldtask(taskId)\n\n # copy net property\n oldNet.nInput = self.nInput \n oldNet.nOutput = self.nOutput \n oldNet.nHiddenLayer = self.nHiddenLayer \n oldNet.nHiddenNode = self.nHiddenNode \n oldNet.taskId = taskId\n\n # copy network\n oldNet.ADCNcnn = copy.deepcopy(self.ADCNcnn)\n oldNet.ADCNae = copy.deepcopy(self.ADCNae)\n\n # put it in the collection of old task\n self.ADCNold = self.ADCNold + [oldNet]"
] | [
[
"torch.mean",
"numpy.log",
"torch.abs",
"torch.max",
"torch.Tensor",
"torch.cat",
"torch.randperm",
"numpy.abs",
"torch.min",
"torch.nn.BCELoss",
"torch.tensor",
"numpy.argmax",
"torch.no_grad",
"torch.device",
"numpy.exp",
"numpy.zeros",
"torch.nn.MSELoss",
"numpy.random.randint"
]
] |
mitjanikolaus/CPC_audio | [
"4f6fc0b828a89766695ba1e5da4d6fbd0cac9131"
] | [
"cpc/dataset.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport os\nimport random\nimport time\nimport tqdm\nimport torch\nimport soundfile as sf\nfrom pathlib import Path\nfrom copy import deepcopy\nfrom torch.multiprocessing import Pool\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import Sampler, BatchSampler\n\nimport torchaudio\n\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\nclass AudioBatchData(Dataset):\n\n def __init__(self,\n path,\n sizeWindow,\n seqNames,\n phoneLabelsDict,\n nSpeakers,\n nProcessLoader=50,\n MAX_SIZE_LOADED=4000000000):\n \"\"\"\n Args:\n - path (string): path to the training dataset\n - sizeWindow (int): size of the sliding window\n - seqNames (list): sequences to load\n - phoneLabelsDict (dictionnary): if not None, a dictionnary with the\n following entries\n\n \"step\": size of a labelled window\n \"$SEQ_NAME\": list of phonem labels for\n the sequence $SEQ_NAME\n - nSpeakers (int): number of speakers to expect.\n - nProcessLoader (int): number of processes to call when loading the\n data from the disk\n - MAX_SIZE_LOADED (int): target maximal size of the floating array\n containing all loaded data.\n \"\"\"\n self.MAX_SIZE_LOADED = MAX_SIZE_LOADED\n self.nProcessLoader = nProcessLoader\n self.dbPath = Path(path)\n self.sizeWindow = sizeWindow\n self.seqNames = [(s, self.dbPath / x) for s, x in seqNames]\n self.reload_pool = Pool(nProcessLoader)\n\n self.prepare()\n self.speakers = list(range(nSpeakers))\n self.data = []\n\n self.phoneSize = 0 if phoneLabelsDict is None else \\\n phoneLabelsDict[\"step\"]\n self.phoneStep = 0 if phoneLabelsDict is None else \\\n self.sizeWindow // self.phoneSize\n\n self.phoneLabelsDict = deepcopy(phoneLabelsDict)\n self.loadNextPack(first=True)\n self.loadNextPack()\n self.doubleLabels = False\n\n def resetPhoneLabels(self, newPhoneLabels, step):\n self.phoneSize = step\n self.phoneStep = self.sizeWindow // self.phoneSize\n self.phoneLabelsDict = deepcopy(newPhoneLabels)\n self.loadNextPack()\n\n def splitSeqTags(seqName):\n path = os.path.normpath(seqName)\n return path.split(os.sep)\n\n def getSeqNames(self):\n return [str(x[1]) for x in self.seqNames]\n\n def clear(self):\n if 'data' in self.__dict__:\n del self.data\n if 'speakerLabel' in self.__dict__:\n del self.speakerLabel\n if 'phoneLabels' in self.__dict__:\n del self.phoneLabels\n if 'seqLabel' in self.__dict__:\n del self.seqLabel\n\n def prepare(self):\n random.shuffle(self.seqNames)\n start_time = time.time()\n\n print(\"Checking length...\")\n allLength = self.reload_pool.map(extractLength, self.seqNames)\n\n self.packageIndex, self.totSize = [], 0\n start, packageSize = 0, 0\n for index, length in tqdm.tqdm(enumerate(allLength)):\n packageSize += length\n if packageSize > self.MAX_SIZE_LOADED:\n self.packageIndex.append([start, index])\n self.totSize += packageSize\n start, packageSize = index, 0\n\n if packageSize > 0:\n self.packageIndex.append([start, len(self.seqNames)])\n self.totSize += packageSize\n\n print(f\"Done, elapsed: {time.time() - start_time:.3f} seconds\")\n print(f'Scanned {len(self.seqNames)} sequences '\n f'in {time.time() - start_time:.2f} seconds')\n print(f\"{len(self.packageIndex)} chunks computed\")\n self.currentPack = -1\n self.nextPack = 0\n\n def getNPacks(self):\n return len(self.packageIndex)\n\n def loadNextPack(self, first=False):\n self.clear()\n if not first:\n self.currentPack = self.nextPack\n start_time = time.time()\n print('Joining pool')\n self.r.wait()\n print(f'Joined process, elapsed={time.time()-start_time:.3f} secs')\n self.nextData = self.r.get()\n self.parseNextDataBlock()\n del self.nextData\n self.nextPack = (self.currentPack + 1) % len(self.packageIndex)\n seqStart, seqEnd = self.packageIndex[self.nextPack]\n if self.nextPack == 0 and len(self.packageIndex) > 1:\n self.prepare()\n self.r = self.reload_pool.map_async(loadFile,\n self.seqNames[seqStart:seqEnd])\n\n def parseNextDataBlock(self):\n\n # Labels\n self.speakerLabel = [0]\n self.seqLabel = [0]\n self.phoneLabels = []\n speakerSize = 0\n indexSpeaker = 0\n\n # To accelerate the process a bit\n self.nextData.sort(key=lambda x: (x[0], x[1]))\n tmpData = []\n\n for speaker, seqName, seq in self.nextData:\n while self.speakers[indexSpeaker] < speaker:\n indexSpeaker += 1\n self.speakerLabel.append(speakerSize)\n if self.speakers[indexSpeaker] != speaker:\n raise ValueError(f'{speaker} invalid speaker')\n\n if self.phoneLabelsDict is not None:\n self.phoneLabels += self.phoneLabelsDict[seqName]\n newSize = len(self.phoneLabelsDict[seqName]) * self.phoneSize\n seq = seq[:newSize]\n\n sizeSeq = seq.size(0)\n tmpData.append(seq)\n self.seqLabel.append(self.seqLabel[-1] + sizeSeq)\n speakerSize += sizeSeq\n del seq\n\n self.speakerLabel.append(speakerSize)\n self.data = torch.cat(tmpData, dim=0)\n\n def getPhonem(self, idx):\n idPhone = idx // self.phoneSize\n return self.phoneLabels[idPhone:(idPhone + self.phoneStep)]\n\n def getSpeakerLabel(self, idx):\n idSpeaker = next(x[0] for x in enumerate(\n self.speakerLabel) if x[1] > idx) - 1\n return idSpeaker\n\n def __len__(self):\n return self.totSize // self.sizeWindow\n\n def __getitem__(self, idx):\n\n if idx < 0 or idx >= len(self.data) - self.sizeWindow - 1:\n print(idx)\n\n outData = self.data[idx:(self.sizeWindow + idx)].view(1, -1)\n label = torch.tensor(self.getSpeakerLabel(idx), dtype=torch.long)\n if self.phoneSize > 0:\n label_phone = torch.tensor(self.getPhonem(idx), dtype=torch.long)\n if not self.doubleLabels:\n label = label_phone\n else:\n label_phone = torch.zeros(1)\n\n if self.doubleLabels:\n return outData, label, label_phone\n\n return outData, label\n\n def getNSpeakers(self):\n return len(self.speakers)\n\n def getNSeqs(self):\n return len(self.seqLabel) - 1\n\n def getNLoadsPerEpoch(self):\n return len(self.packageIndex)\n\n def getBaseSampler(self, type, batchSize, offset):\n if type == \"samespeaker\":\n return SameSpeakerSampler(batchSize, self.speakerLabel,\n self.sizeWindow, offset)\n if type == \"samesequence\":\n return SameSpeakerSampler(batchSize, self.seqLabel,\n self.sizeWindow, offset)\n if type == \"sequential\":\n return SequentialSampler(len(self.data), self.sizeWindow,\n offset, batchSize)\n sampler = UniformAudioSampler(len(self.data), self.sizeWindow,\n offset)\n return BatchSampler(sampler, batchSize, True)\n\n def getDataLoader(self, batchSize, type, randomOffset, numWorkers=0,\n onLoop=-1):\n r\"\"\"\n Get a batch sampler for the current dataset.\n Args:\n - batchSize (int): batch size\n - groupSize (int): in the case of type in [\"speaker\", \"sequence\"]\n number of items sharing a same label in the group\n (see AudioBatchSampler)\n - type (string):\n type == \"speaker\": grouped sampler speaker-wise\n type == \"sequence\": grouped sampler sequence-wise\n type == \"sequential\": sequential sampling\n else: uniform random sampling of the full audio\n vector\n - randomOffset (bool): if True add a random offset to the sampler\n at the begining of each iteration\n \"\"\"\n nLoops = len(self.packageIndex)\n totSize = self.totSize // (self.sizeWindow * batchSize)\n if onLoop >= 0:\n self.currentPack = onLoop - 1\n self.loadNextPack()\n nLoops = 1\n\n def samplerCall():\n offset = random.randint(0, self.sizeWindow // 2) \\\n if randomOffset else 0\n return self.getBaseSampler(type, batchSize, offset)\n\n return AudioLoader(self, samplerCall, nLoops, self.loadNextPack,\n totSize, numWorkers)\n\n\ndef loadFile(data):\n speaker, fullPath = data\n seqName = fullPath.stem\n seq = torchaudio.load(fullPath)[0].view(-1)\n\n return speaker, seqName, seq\n\n\nclass AudioLoader(object):\n r\"\"\"\n A DataLoader meant to handle an AudioBatchData object.\n In order to handle big datasets AudioBatchData works with big chunks of\n audio it loads sequentially in memory: once all batches have been sampled\n on a chunk, the AudioBatchData loads the next one.\n \"\"\"\n def __init__(self,\n dataset,\n samplerCall,\n nLoop,\n updateCall,\n size,\n numWorkers):\n r\"\"\"\n Args:\n - dataset (AudioBatchData): target dataset\n - samplerCall (function): batch-sampler to call\n - nLoop (int): number of chunks to load\n - updateCall (function): function loading the next chunk\n - size (int): total number of batches\n - numWorkers (int): see torch.utils.data.DataLoader\n \"\"\"\n self.samplerCall = samplerCall\n self.updateCall = updateCall\n self.nLoop = nLoop\n self.size = size\n self.dataset = dataset\n self.numWorkers = numWorkers\n\n def __len__(self):\n return self.size\n\n def __iter__(self):\n\n for i in range(self.nLoop):\n sampler = self.samplerCall()\n dataloader = DataLoader(self.dataset,\n batch_sampler=sampler,\n num_workers=self.numWorkers)\n for x in dataloader:\n yield x\n if i < self.nLoop - 1:\n self.updateCall()\n\n\nclass UniformAudioSampler(Sampler):\n\n def __init__(self,\n dataSize,\n sizeWindow,\n offset):\n\n self.len = dataSize // sizeWindow\n self.sizeWindow = sizeWindow\n self.offset = offset\n if self.offset > 0:\n self.len -= 1\n\n def __iter__(self):\n return iter((self.offset\n + self.sizeWindow * torch.randperm(self.len)).tolist())\n\n def __len__(self):\n return self.len\n\n\nclass SequentialSampler(Sampler):\n\n def __init__(self, dataSize, sizeWindow, offset, batchSize):\n\n self.len = (dataSize // sizeWindow) // batchSize\n self.sizeWindow = sizeWindow\n self.offset = offset\n self.startBatches = [x * (dataSize // batchSize)\n for x in range(batchSize)]\n self.batchSize = batchSize\n if self.offset > 0:\n self.len -= 1\n\n def __iter__(self):\n for idx in range(self.len):\n yield [self.offset + self.sizeWindow * idx\n + start for start in self.startBatches]\n\n def __len__(self):\n return self.len\n\n\nclass SameSpeakerSampler(Sampler):\n\n def __init__(self,\n batchSize,\n samplingIntervals,\n sizeWindow,\n offset):\n\n self.samplingIntervals = samplingIntervals\n self.sizeWindow = sizeWindow\n self.batchSize = batchSize\n self.offset = offset\n\n if self.samplingIntervals[0] != 0:\n raise AttributeError(\"Sampling intervals should start at zero\")\n\n nWindows = len(self.samplingIntervals) - 1\n self.sizeSamplers = [(self.samplingIntervals[i+1] -\n self.samplingIntervals[i]) // self.sizeWindow\n for i in range(nWindows)]\n\n if self.offset > 0:\n self.sizeSamplers = [max(0, x - 1) for x in self.sizeSamplers]\n\n order = [(x, torch.randperm(val).tolist())\n for x, val in enumerate(self.sizeSamplers) if val > 0]\n\n # Build Batches\n self.batches = []\n for indexSampler, randperm in order:\n indexStart, sizeSampler = 0, self.sizeSamplers[indexSampler]\n while indexStart < sizeSampler:\n indexEnd = min(sizeSampler, indexStart + self.batchSize)\n locBatch = [self.getIndex(x, indexSampler)\n for x in randperm[indexStart:indexEnd]]\n indexStart = indexEnd\n self.batches.append(locBatch)\n\n def __len__(self):\n return len(self.batches)\n\n def getIndex(self, x, iInterval):\n return self.offset + x * self.sizeWindow \\\n + self.samplingIntervals[iInterval]\n\n def __iter__(self):\n random.shuffle(self.batches)\n return iter(self.batches)\n\n\ndef extractLength(couple):\n speaker, locPath = couple\n info = torchaudio.info(str(locPath))\n return info.num_frames\n\n\ndef findAllSeqs(dirName,\n extension='.flac',\n loadCache=False,\n speaker_level=1):\n r\"\"\"\n Lists all the sequences with the given extension in the dirName directory.\n Output:\n outSequences, speakers\n\n outSequence\n A list of tuples seq_path, speaker where:\n - seq_path is the relative path of each sequence relative to the\n parent directory\n - speaker is the corresponding speaker index\n\n outSpeakers\n The speaker labels (in order)\n\n The speaker labels are organized the following way\n \\dirName\n \\speaker_label\n \\..\n ...\n seqName.extension\n\n Adjust the value of speaker_level if you want to choose which level of\n directory defines the speaker label. Ex if speaker_level == 2 then the\n dataset should be organized in the following fashion\n \\dirName\n \\crappy_label\n \\speaker_label\n \\..\n ...\n seqName.extension\n Set speaker_label == 0 if no speaker label will be retrieved no matter the\n organization of the dataset.\n\n \"\"\"\n cache_path = os.path.join(dirName, '_seqs_cache.txt')\n if loadCache:\n try:\n outSequences, speakers = torch.load(cache_path)\n print(f'Loaded from cache {cache_path} successfully')\n return outSequences, speakers\n except OSError as err:\n print(f'Ran in an error while loading {cache_path}: {err}')\n print('Could not load cache, rebuilding')\n\n if dirName[-1] != os.sep:\n dirName += os.sep\n prefixSize = len(dirName)\n speakersTarget = {}\n outSequences = []\n for root, dirs, filenames in tqdm.tqdm(os.walk(dirName)):\n filtered_files = [f for f in filenames if f.endswith(extension)]\n\n if len(filtered_files) > 0:\n speakerStr = (os.sep).join(\n root[prefixSize:].split(os.sep)[:speaker_level])\n if speakerStr not in speakersTarget:\n speakersTarget[speakerStr] = len(speakersTarget)\n speaker = speakersTarget[speakerStr]\n for filename in filtered_files:\n full_path = os.path.join(root[prefixSize:], filename)\n outSequences.append((speaker, full_path))\n outSpeakers = [None for x in speakersTarget]\n for key, index in speakersTarget.items():\n outSpeakers[index] = key\n try:\n torch.save((outSequences, outSpeakers), cache_path)\n print(f'Saved cache file at {cache_path}')\n except OSError as err:\n print(f'Ran in an error while saving {cache_path}: {err}')\n return outSequences, outSpeakers\n\n\ndef parseSeqLabels(pathLabels):\n with open(pathLabels, 'r') as f:\n lines = f.readlines()\n output = {\"step\": 160} # Step in librispeech dataset is 160bits\n maxPhone = 0\n for line in lines:\n data = line.split()\n output[data[0]] = [int(x) for x in data[1:]]\n maxPhone = max(maxPhone, max(output[data[0]]))\n return output, maxPhone + 1\n\n\ndef filterSeqs(pathTxt, seqCouples):\n with open(pathTxt, 'r') as f:\n inSeqs = [p.replace('\\n', '') for p in f.readlines()]\n\n inSeqs.sort()\n seqCouples.sort(key=lambda x: os.path.basename(os.path.splitext(x[1])[0]))\n output, index = [], 0\n for x in seqCouples:\n seq = os.path.basename(os.path.splitext(x[1])[0])\n while index < len(inSeqs) and seq > inSeqs[index]:\n index += 1\n if index == len(inSeqs):\n break\n if seq == inSeqs[index]:\n output.append(x)\n return output\n"
] | [
[
"torch.cat",
"torch.zeros",
"torch.load",
"torch.randperm",
"torch.utils.data.DataLoader",
"torch.multiprocessing.Pool",
"torch.utils.data.sampler.BatchSampler",
"torch.multiprocessing.set_sharing_strategy",
"torch.save"
]
] |
king4arabs/mindspore | [
"bc38590e5300588aa551355836043af0ea092a72"
] | [
"tests/st/ops/cpu/test_bias_add.py"
] | [
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.bias_add = P.BiasAdd()\n\n def construct(self, x, b):\n return self.bias_add(x, b)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_bias_add4d():\n x = np.ones([2, 3, 4, 4]).astype(np.float32)\n b = np.array([1, 1, 1]).astype(np.float32)\n bias_add = Net()\n output = bias_add(Tensor(x), Tensor(b))\n expect_output = np.ones([2, 3, 4, 4]).astype(np.float32) * 2\n print(output)\n assert np.all(output.asnumpy() == expect_output), \"bias_add execute failed, please check current code commit\"\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_bias_add2d():\n x = np.ones([2, 3]).astype(np.float32)\n b = np.array([1, 1, 1]).astype(np.float32)\n bias_add = Net()\n output = bias_add(Tensor(x), Tensor(b))\n expect_output = np.ones([2, 3]).astype(np.float32) * 2\n print(output)\n assert np.all(output.asnumpy() == expect_output), \"bias_add execute failed, please check current code commit\"\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_bias_add3d():\n x = np.ones([2, 3, 4]).astype(np.float32)\n b = np.array([1, 1, 1]).astype(np.float32)\n bias_add = Net()\n output = bias_add(Tensor(x), Tensor(b))\n expect_output = np.ones([2, 3, 4]).astype(np.float32) * 2\n print(output)\n assert np.all(output.asnumpy() == expect_output), \"bias_add execute failed, please check current code commit\"\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_bias_add5d():\n x = np.ones([2, 5, 4, 4, 4]).astype(np.float32)\n b = np.array([1, 1, 1, 1, 1]).astype(np.float32)\n bias_add = Net()\n output = bias_add(Tensor(x), Tensor(b))\n expect_output = np.ones([2, 5, 4, 4, 4]).astype(np.float32) * 2\n print(output)\n assert np.all(output.asnumpy() == expect_output), \"bias_add execute failed, please check current code commit\"\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_bias_add6d():\n x = np.ones([2, 4, 4, 4, 4, 1]).astype(np.float32)\n b = np.array([1, 1, 1, 1]).astype(np.float32)\n bias_add = Net()\n output = bias_add(Tensor(x), Tensor(b))\n expect_output = np.ones([2, 4, 4, 4, 4, 1]).astype(np.float32) * 2\n print(output)\n assert np.all(output.asnumpy() == expect_output), \"bias_add execute failed, please check current code commit\"\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_bias_add7d():\n x = np.ones([2, 4, 4, 4, 4, 1, 2]).astype(np.float32)\n b = np.array([1, 1, 1, 1]).astype(np.float32)\n bias_add = Net()\n output = bias_add(Tensor(x), Tensor(b))\n expect_output = np.ones([2, 4, 4, 4, 4, 1, 2]).astype(np.float32) * 2\n print(output)\n assert np.all(output.asnumpy() == expect_output), \"bias_add execute failed, please check current code commit\"\n"
] | [
[
"numpy.array",
"numpy.ones"
]
] |
paulseghers/dtu_mlops_exercises | [
"d9ecc253e6ae5a0fefe9a59d6698d4e895941af0"
] | [
"src/model.py"
] | [
"from torch import nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n# taken from https://nextjournal.com/gkoehler/pytorch-mnist\n\nclass MyAwesomeModel(nn.Module):\n def __init__(self, height=28, width=28, channels=1, classes=10, dropout=0.25):\n self.width, self.height, self.channels, self.classes, self.dropout_rate = width, height, channels, classes, dropout\n #super().__init__()\n super(MyAwesomeModel, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5)\n self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size=4)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n if x.ndim != 4:\n raise ValueError('Expected input to a 4D tensor')\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)\n\n\nclass mnist_classifier(nn.Module):\n def __init__(self):\n super().__init__()\n # Inputs to hidden layer linear transformation\n self.fc1 = nn.Linear(784, 128)\n self.fc2 = nn.Linear(128, 64)\n # Output layer, 10 units - one for each digit\n self.output = nn.Linear(64, 10)\n self.dropout = nn.Dropout(p=0.2)\n\n def forward(self, x):\n x = x.view(x.shape[0], -1)\n # Hidden layer with ReLU activation\n x = self.dropout(F.relu(self.fc1(x)))\n x = self.dropout(F.relu(self.fc2(x)))\n # Output layer with log softmax activation\n x = F.log_softmax(self.output(x), dim=1)\n return x"
] | [
[
"torch.nn.Dropout",
"torch.nn.Dropout2d",
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout",
"torch.nn.Conv2d",
"torch.nn.Linear"
]
] |
peq10/cancer_vsd | [
"a8ad7cf6ceb20327e41c833d5e9a7ff63b49f9bd"
] | [
"vsd_cancer/make_paper_data/make_paper_figures/mda_231_figure.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 29 18:33:36 2021\n\n@author: peter\n\"\"\"\nfrom pathlib import Path\n\nfrom vsd_cancer.functions import cancer_functions as canf\n\nimport numpy as np\nimport pandas as pd\nimport scipy.ndimage as ndimage\nimport matplotlib.pyplot as plt\nimport f.plotting_functions as pf\n\nimport matplotlib.cm\nimport matplotlib.gridspec as gridspec\nimport matplotlib as mpl\n\nimport seaborn as sns\n\nimport datetime\n\n\ndef make_figures(initial_df, save_dir, figure_dir, filetype=\".png\"):\n figsave = Path(figure_dir, \"231_figure\")\n if not figsave.is_dir():\n figsave.mkdir()\n\n trial_string_use = \"cancer_20201207_slip1_area1_long_acq_corr_corr_long_acqu_blue_0.03465_green_0.07063_heated_to_37_1\"\n\n df = pd.read_csv(initial_df)\n\n if True:\n num_traces = 15\n sep = 25\n make_example_trace_fig(\n trial_string_use, num_traces, sep, df, save_dir, figsave, filetype\n )\n # make example for synchrony fig\n make_example_trace_fig(\n trial_string_use,\n 15,\n sep,\n df,\n save_dir,\n Path(figure_dir, \"wave_figure\"),\n filetype,\n )\n\n plot_positive_negative_events(save_dir, figsave, filetype)\n\n plot_percent_quiet(save_dir, figsave, filetype)\n\n\ndef plot_percent_quiet(save_dir, figsave, filetype):\n\n df = pd.read_csv(Path(save_dir, \"non_ttx_active_df_by_cell.csv\"))\n df2 = pd.read_csv(Path(save_dir, \"TTX_active_df_by_cell.csv\"))\n\n df2 = df2[df2.stage == \"pre\"]\n\n df = pd.concat([df, df2])\n\n T = 0.2\n\n df[\"exp_stage\"] = df.expt + \"_\" + df.stage\n df[\"day_slip\"] = df.day.astype(str) + \"_\" + df.slip.astype(str)\n\n df[\"event_rate\"] = (df[\"n_neg_events\"] + df[\"n_pos_events\"]) / (\n df[\"obs_length\"] * T\n )\n df[\"neg_event_rate\"] = (df[\"n_neg_events\"]) / (df[\"obs_length\"] * T)\n\n df[\"integ_rate\"] = (df[\"integrated_events\"]) / (df[\"obs_length\"] * T)\n df[\"neg_integ_rate\"] = -1 * (df[\"neg_integrated_events\"]) / (df[\"obs_length\"] * T)\n\n mda = df[\n [x in [\"standard\", \"TTX_10um\", \"TTX_10um_washout\", \"TTX_1um\"] for x in df.expt]\n ][[\"neg_event_rate\", \"day_slip\"]]\n\n mda[\"active\"] = 100 * (mda[\"neg_event_rate\"] > 0).astype(int)\n\n active_d = mda[mda.active != 0]\n active_rate = active_d.groupby(\"day_slip\").mean()[\"neg_event_rate\"].to_numpy()\n\n active = mda.groupby(\"day_slip\").mean()[\"active\"].to_numpy()\n\n with open(Path(figsave, \"event_rate_info.txt\"), \"w\") as f:\n f.write(f\"{datetime.datetime.now()}\\n\")\n f.write(f\"Mean per coverslip active cell rate: {np.mean(active_rate)}\")\n f.write(f\", SEM: {np.std(active_rate)/np.sqrt(len(active_rate))}\\n\")\n f.write(f\"n = {len(active_rate)}\\n\")\n\n fig, ax4 = plt.subplots()\n # sns.violinplot(y=active,saturation = 0.5)\n # ax4.plot(np.random.normal(loc = 1,scale = scale,size = sens.shape[0]),sens[:,-1],'.k',markersize = 12)\n sns.swarmplot(y=active, ax=ax4, color=\"k\", size=7)\n ax4.xaxis.set_visible(False)\n ax4.set_ylabel(\"Active Cells (%)\")\n pf.set_thickaxes(ax4, 3, remove=[\"top\", \"right\", \"bottom\"])\n pf.set_all_fontsize(ax4, 16)\n pf.set_all_fontsize(ax4, 16)\n\n pf.make_square_plot(ax4)\n fig.savefig(\n Path(figsave, f\"231_active_cells{filetype}\"),\n bbox_inches=\"tight\",\n dpi=300,\n transparent=True,\n )\n\n fig, ax = plt.subplots()\n # sns.violinplot(y=active,saturation = 0.5)\n # ax4.plot(np.random.normal(loc = 1,scale = scale,size = sens.shape[0]),sens[:,-1],'.k',markersize = 12)\n sns.swarmplot(y=active_rate * 10**3, ax=ax, color=\"k\", size=7)\n ax.xaxis.set_visible(False)\n ax.set_ylabel(\"Mean event rate\\n(active cells, x10$^3$ s$^{-1}$)\")\n pf.set_thickaxes(ax, 3, remove=[\"top\", \"right\", \"bottom\"])\n pf.set_all_fontsize(ax, 16)\n pf.set_all_fontsize(ax, 16)\n\n pf.make_square_plot(ax)\n fig.savefig(\n Path(figsave, f\"231_active_rates{filetype}\"),\n bbox_inches=\"tight\",\n dpi=300,\n transparent=True,\n )\n\n fig1, ax1 = plt.subplots()\n ax1.hist(active_d.neg_event_rate * 1000, bins=10, log=True, color=(0.2, 0.2, 0.2))\n ax1.set_xlabel(\"Event rate (active cells, x10$^3$ s$^{-1}$)\")\n ax1.set_ylabel(\"Number of cells\")\n pf.set_thickaxes(ax1, 3)\n pf.set_all_fontsize(ax1, 16)\n pf.set_all_fontsize(ax1, 16)\n fig1.savefig(\n Path(figsave, f\"231_active_hists{filetype}\"),\n bbox_inches=\"tight\",\n dpi=300,\n transparent=True,\n )\n\n\ndef make_example_trace_fig(\n trial_string_use, num_traces, sep, df, save_dir, figsave, filetype\n):\n T = 0.2\n\n im_scalebar_length_um = 100\n\n for idx, data in enumerate(df.itertuples()):\n trial_string = data.trial_string\n # print(trial_string)\n trial_save = Path(save_dir, \"ratio_stacks\", trial_string)\n\n if trial_string != trial_string_use:\n continue\n else:\n break\n\n im = np.load(Path(trial_save, f\"{trial_string}_im.npy\"))\n seg = np.load(Path(trial_save, f\"{trial_string}_seg.npy\"))\n\n masks = canf.lab2masks(seg)\n\n tcs = np.load(Path(trial_save, f\"{trial_string}_all_tcs.npy\"))\n\n event_dict = np.load(\n Path(trial_save, f\"{trial_string}_event_properties.npy\"), allow_pickle=True\n ).item()\n\n idx = 0\n events = event_dict[\"events\"][idx]\n\n keep = [x for x in np.arange(tcs.shape[0])]\n\n # sort by event amounts\n sort_order = np.array(\n [\n np.sum(np.abs(events[\"event_props\"][x][:, -1])) if x in events.keys() else 0\n for x in range(tcs.shape[0])\n ]\n )\n\n tcs = tcs[keep, :]\n masks = masks[keep, ...]\n sort_order = np.argsort(sort_order[keep])[::-1]\n\n tcs = tcs[sort_order, :]\n masks = masks[sort_order, :]\n so = np.array(keep)[sort_order]\n\n tcs = tcs[:num_traces, ...]\n so = so[:num_traces]\n masks = masks[:num_traces, ...]\n\n # now sort back in position\n llocs = np.array([ndimage.measurements.center_of_mass(x) for x in masks])\n llocs = llocs[:, 0] * masks.shape[-2] + llocs[:, 1]\n order2 = np.argsort(llocs)[::-1]\n\n tcs = tcs[order2, ...]\n so = so[order2, ...]\n masks = masks[order2, ...]\n\n tc_filt = ndimage.gaussian_filter(\n tcs, (0, 3)\n ) # np.array([prox_tv.tv1_1d(t,0.01) for t in tcs])\n\n cmap = matplotlib.cm.tab20\n\n with open(Path(figsave, \"cell_ids.txt\"), \"w\") as f:\n f.write(f\"{trial_string}\\n\")\n for s in so:\n f.write(f\"{s}\\n\")\n\n fig = plt.figure(constrained_layout=True)\n gs = fig.add_gridspec(2, 5)\n ax = fig.add_subplot(gs[:, -2:])\n colors = []\n for i in range(num_traces):\n ax.plot([0, tcs.shape[-1] * T], np.ones(2) * i * 100 / sep, \"k\", alpha=0.5)\n line = ax.plot(\n np.arange(tcs.shape[-1]) * T,\n (tcs[i] - 1) * 100 + i * 100 / sep,\n color=cmap(i / num_traces),\n )\n _ = ax.plot(\n np.arange(tcs.shape[-1]) * T,\n (tc_filt[i] - 1) * 100 + i * 100 / sep,\n color=\"k\",\n )\n colors.append(line[0].get_c())\n ev = events[so[i]]\n ax.text(\n -10,\n (i - 0.15) * 100 / sep,\n f\"{i}\",\n fontdict={\"fontsize\": 14},\n color=colors[i],\n ha=\"right\",\n va=\"center\",\n )\n if False:\n for l in ev.T:\n ax.fill_betweenx(\n [(i - 0.5 * 0.9) * 100 / sep, (i + 0.5 * 0.9) * 100 / sep],\n l[0] * T,\n l[1] * T,\n facecolor=\"r\",\n alpha=0.5,\n )\n\n plt.axis(\"off\")\n pf.plot_scalebar(ax, 0, (tcs[:num_traces].min() - 1) * 100, 200, 3, thickness=3)\n\n colors = (np.array(colors) * 255).astype(np.uint8)\n # colors = np.hstack([colors,np.ones((colors.shape[0],1))])\n\n over = masks[:num_traces]\n struct = np.zeros((3, 3, 3))\n struct[1, ...] = 1\n over = np.logical_xor(\n ndimage.binary_dilation(over, structure=struct, iterations=3), over\n ).astype(int)\n over = np.sum(over[..., None] * colors[:, None, None, :], 0).astype(np.uint8)\n length = int(im_scalebar_length_um / 1.04)\n\n over[-20:-15, -length - 10 : -10] = np.ones(4, dtype=np.uint8) * 255\n\n ax1 = fig.add_subplot(gs[:, :-2])\n ax1.imshow(im, cmap=\"Greys_r\")\n ax1.imshow(over)\n plt.axis(\"off\")\n pf.label_roi_centroids(\n ax1, masks[:num_traces, ...], colors / 255, fontdict={\"fontsize\": 0}\n )\n\n fig.savefig(\n Path(figsave, f\"example_tcs{filetype}\"),\n bbox_inches=\"tight\",\n dpi=300,\n transparent=True,\n )\n\n\ndef plot_positive_negative_events(save_dir, figsave, filetype):\n\n df = pd.read_csv(Path(save_dir, \"all_events_df.csv\"))\n df[\"exp_stage\"] = df.expt + \"_\" + df.stage\n\n use = [\"TTX_10um_washout_pre\", \"TTX_1um_pre\", \"TTX_10um_pre\", \"standard_none\"]\n\n fig1 = plot_events(df, use, log=True)\n fig1.savefig(\n Path(figsave, f\"231_histograms_log{filetype}\"),\n bbox_inches=\"tight\",\n dpi=300,\n transparent=True,\n )\n\n fig2 = plot_events(df, use, log=False)\n fig2.savefig(\n Path(figsave, f\"231_histograms_no_log{filetype}\"),\n bbox_inches=\"tight\",\n dpi=300,\n transparent=True,\n )\n\n fig3 = plot_events2(df, use, log=True)\n fig3.savefig(\n Path(figsave, f\"231_histograms_log_both_pos{filetype}\"),\n bbox_inches=\"tight\",\n dpi=300,\n transparent=True,\n )\n\n fig4 = plot_events2(df, use, log=False)\n fig4.savefig(\n Path(figsave, f\"231_histograms_no_log_both_pos{filetype}\"),\n bbox_inches=\"tight\",\n dpi=300,\n transparent=True,\n )\n\n get_example_event_locs(df, use, log=True)\n\n\ndef get_example_event_locs(\n df, use, log=True, upper_lim=6.6, lower_lim=0, T=0.2, nbins=50, only_neg=True\n):\n dfn = df.copy()\n\n use_bool = np.array([np.any(x in use) for x in dfn.exp_stage])\n dfn = dfn[use_bool]\n\n too_big = np.abs(dfn.event_amplitude) > 6.6 / 100\n too_small = np.abs(dfn.event_amplitude) < 0 / 100\n dfn = dfn[np.logical_not(np.logical_or(too_big, too_small))]\n\n length_bins = np.histogram(dfn[\"event_length\"] * T, bins=nbins)[1]\n\n amp_bins = np.histogram(dfn[\"event_amplitude\"] * 100, bins=nbins)[1]\n\n with open(\n Path(\n Path(\"/home/peter/Dropbox/Papers/cancer/v2/\"), \"231_figure\", \"cell_ids.txt\"\n )\n ) as f:\n dat = f.readlines()\n\n trial_string = dat[0].strip()\n cids = dat[1:]\n cids = [int(x.strip()) for x in cids]\n\n dfn = dfn[dfn.trial_string == trial_string]\n\n trial_save = Path(\n Path(\"/home/peter/data/Firefly/cancer/analysis/full\"),\n \"ratio_stacks\",\n trial_string,\n )\n tcs = np.load(Path(trial_save, f\"{trial_string}_all_tcs.npy\"))\n\n tcs = ndimage.gaussian_filter(tcs, (0, 3))\n\n # now for each trace display where the things are binned\n for c in cids:\n fig, ax = plt.subplots(nrows=2)\n ax[1].hist2d(\n dfn[dfn.cell_id == f\"{trial_string}_cell_{c}\"].event_amplitude * 100,\n dfn[dfn.cell_id == f\"{trial_string}_cell_{c}\"].event_length * T,\n bins=(amp_bins, length_bins),\n norm=mpl.colors.LogNorm(),\n )\n ax[0].plot(np.arange(tcs.shape[1]) * T, tcs[c, :])\n for data in dfn[dfn.cell_id == f\"{trial_string}_cell_{c}\"].itertuples():\n ax[0].plot(data.event_time * T, tcs[c, :].max(), \".r\")\n\n ax[1].set_xlabel(f\"{trial_string}_cell_{c}\")\n fig.savefig(\n Path(\n \"/home/peter/Dropbox/Papers/cancer/v2/\",\n \"231_figure/example_trace_bins\",\n f\"bins_cell_{c}.png\",\n )\n )\n\n dfn[dfn.cell_id == f\"{trial_string}_cell_{c}\"].to_csv(\n Path(\n \"/home/peter/Dropbox/Papers/cancer/v2/\",\n \"231_figure/example_trace_bins\",\n f\"df_cell_{c}.csv\",\n )\n )\n\n\ndef plot_events(\n df, use, log=True, upper_lim=6.6, lower_lim=0, T=0.2, nbins=50, only_neg=True\n):\n\n dfn = df.copy()\n\n use_bool = np.array([np.any(x in use) for x in dfn.exp_stage])\n dfn = dfn[use_bool]\n\n too_big = np.abs(dfn.event_amplitude) > 6.6 / 100\n too_small = np.abs(dfn.event_amplitude) < 0 / 100\n dfn = dfn[np.logical_not(np.logical_or(too_big, too_small))]\n\n length_bins = np.histogram(dfn[\"event_length\"] * T, bins=nbins)[1]\n\n amp_bins = np.histogram(np.abs(dfn[\"event_amplitude\"]) * 100, bins=nbins)[1]\n\n neg = dfn[dfn.event_amplitude < 0]\n pos = dfn[dfn.event_amplitude > 0]\n\n gs = gridspec.GridSpec(2, 2)\n gs.update(wspace=0.3, hspace=0.3)\n\n fig, axarr = plt.subplots(figsize=(8, 6))\n\n ax0 = plt.subplot(gs[0])\n ax0.hist(np.abs(neg[\"event_amplitude\"]) * 100, bins=amp_bins, log=log, label=\"-ve\")\n ax0.hist(pos[\"event_amplitude\"] * 100, bins=amp_bins, log=log, label=\"+ve\")\n ax0.set_xlabel(\"Absolute event amplitude (% $\\Delta$R/R$_0$)\")\n ax0.set_ylabel(\"Observed Frequency\")\n ax0.legend(frameon=False)\n\n ax1 = plt.subplot(gs[1])\n ax1.hist(np.abs(neg[\"event_length\"]) * T, bins=length_bins, log=log, label=\"-ve\")\n ax1.hist(pos[\"event_length\"] * T, bins=length_bins, log=log, label=\"+ve\")\n ax1.set_xlabel(\"Event length (s)\")\n ax1.set_ylabel(\"Observed Frequency\")\n ax1.legend(frameon=False)\n\n if log:\n norm = mpl.colors.LogNorm()\n else:\n norm = None\n\n ax2 = plt.subplot(gs[2])\n hh = ax2.hist2d(\n np.abs(neg[\"event_amplitude\"]) * 100,\n neg[\"event_length\"] * T,\n bins=(amp_bins, length_bins),\n norm=norm,\n )\n plt.colorbar(hh[3])\n\n ax2.set_xlabel(\"Negative event amplitude (% $\\Delta$R/R$_0$)\")\n ax2.set_ylabel(\"Event length (s)\")\n\n ax3 = plt.subplot(gs[3])\n\n hh2 = ax3.hist2d(\n np.abs(pos[\"event_amplitude\"]) * 100,\n pos[\"event_length\"] * T,\n bins=(amp_bins, length_bins),\n norm=norm,\n )\n plt.colorbar(hh2[3])\n ax3.set_xlabel(\"Positive event size (% $\\Delta$R/R$_0$)\")\n ax3.set_ylabel(\"Event length (s)\")\n\n return fig\n\n\ndef plot_events2(\n df, use, log=True, upper_lim=6.6, lower_lim=0, T=0.2, nbins=50, only_neg=True\n):\n\n dfn = df.copy()\n\n use_bool = np.array([np.any(x in use) for x in dfn.exp_stage])\n dfn = dfn[use_bool]\n\n too_big = np.abs(dfn.event_amplitude) > 6.6 / 100\n too_small = np.abs(dfn.event_amplitude) < 0 / 100\n dfn = dfn[np.logical_not(np.logical_or(too_big, too_small))]\n\n length_bins = np.histogram(dfn[\"event_length\"] * T, bins=nbins)[1]\n\n amp_bins = np.histogram(dfn[\"event_amplitude\"] * 100, bins=nbins)[1]\n\n neg = dfn[dfn.event_amplitude < 0]\n pos = dfn[dfn.event_amplitude > 0]\n\n gs = gridspec.GridSpec(2, 2)\n gs.update(wspace=0.3, hspace=0.3)\n\n fig, axarr = plt.subplots(figsize=(8, 6))\n\n ax0 = plt.subplot(gs[0])\n ax0.hist(neg[\"event_amplitude\"] * 100, bins=amp_bins, log=log, label=\"-ve\")\n ax0.hist(pos[\"event_amplitude\"] * 100, bins=amp_bins, log=log, label=\"+ve\")\n ax0.set_xlabel(\"Event amplitude (% $\\Delta$R/R$_0$)\")\n ax0.set_ylabel(\"Observed Frequency\")\n ax0.legend(frameon=False)\n\n ax1 = plt.subplot(gs[1])\n ax1.hist(np.abs(neg[\"event_length\"]) * T, bins=length_bins, log=log, label=\"-ve\")\n ax1.hist(pos[\"event_length\"] * T, bins=length_bins, log=log, label=\"+ve\")\n ax1.set_xlabel(\"Event length (s)\")\n ax1.set_ylabel(\"Observed Frequency\")\n ax1.legend(frameon=False)\n\n if log:\n norm = mpl.colors.LogNorm()\n else:\n norm = None\n\n ax2 = plt.subplot(gs[2])\n hh = ax2.hist2d(\n dfn[\"event_amplitude\"] * 100,\n dfn[\"event_length\"] * T,\n bins=(amp_bins, length_bins),\n norm=norm,\n )\n plt.colorbar(hh[3])\n ax2.set_xlabel(\"Event amplitude (% $\\Delta$R/R$_0$)\")\n ax2.set_ylabel(\"Event length (s)\")\n\n # get number of events before/after TTX\n thresh = 2\n iid = np.argwhere(hh[1] < -thresh)[-1][0]\n iid2 = np.argwhere(hh[1] > thresh)[0][0]\n n_events_neg = np.sum(hh[0][:iid, :])\n n_events_pos = np.sum(hh[0][iid2:, :])\n\n with open(\n Path(\n \"/home/peter/Dropbox/Papers/cancer/v2/231_figure\", \"num_negpos_events.txt\"\n ),\n \"w\",\n ) as f:\n f.write(f\"{datetime.datetime.now()}\\n\")\n f.write(f\"Number events in bins up to edge at {hh[1][iid]:.3f} %\\n\")\n f.write(f\"neg: {n_events_neg} \\n\")\n f.write(f\"Number events in bins from edge at {hh[1][iid2]:.3f} %\\n\")\n f.write(f\"post: {n_events_pos} \\n\")\n\n thresh = 1.5\n iid = np.argwhere(hh[1] < -thresh)[-1][0]\n iid2 = np.argwhere(hh[1] > thresh)[0][0]\n n_events_neg = np.sum(hh[0][:iid, :])\n n_events_pos = np.sum(hh[0][iid2:, :])\n\n with open(\n Path(\n \"/home/peter/Dropbox/Papers/cancer/v2/231_figure\", \"num_negpos_events.txt\"\n ),\n \"a\",\n ) as f:\n f.write(f\"{datetime.datetime.now()}\\n\")\n f.write(f\"Number events in bins up to edge at {hh[1][iid]:.3f} %\\n\")\n f.write(f\"neg: {n_events_neg} \\n\")\n f.write(f\"Number events in bins from edge at {hh[1][iid2]:.3f} %\\n\")\n f.write(f\"post: {n_events_pos} \\n\")\n\n return fig\n\n\nif __name__ == \"__main__\":\n top_dir = Path(\"/home/peter/data/Firefly/cancer\")\n save_dir = Path(top_dir, \"analysis\", \"full\")\n figure_dir = Path(\"/home/peter/Dropbox/Papers/cancer/v2/\")\n initial_df = Path(top_dir, \"analysis\", \"long_acqs_20210428_experiments_correct.csv\")\n make_figures(initial_df, save_dir, figure_dir, filetype=\".pdf\")\n"
] | [
[
"scipy.ndimage.measurements.center_of_mass",
"numpy.mean",
"numpy.any",
"numpy.histogram",
"pandas.read_csv",
"numpy.arange",
"numpy.std",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.axis",
"numpy.zeros",
"matplotlib.pyplot.figure",
"pandas.concat",
"numpy.logical_or",
"scipy.ndimage.binary_dilation",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"scipy.ndimage.gaussian_filter",
"numpy.abs",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.subplots",
"numpy.ones",
"numpy.argwhere",
"matplotlib.pyplot.colorbar"
]
] |
mk1123/multiagent-particle-envs | [
"17f2073c791837d7248980149167e79783772d55"
] | [
"multiagent/utils.py"
] | [
"import csv\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nimport os\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport os\n\n# data_path = os.path.join(os.getcwd(), \"baselines\", \"behavioral_sim\", \"building_data.csv\")\n# csv_path = os.path.dirname(os.path.realpath(__file__)) + \"/building_data.csv\"\n\n\ndef price_signal(day=45, type_of_DR=\"real_time_pricing\"):\n\n \"\"\"\n Utkarsha's work on price signal from a building with demand and solar\n Input: Day = an int signifying a 24 hour period. 365 total, all of 2012, start at 1.\n Output: netdemand_price, a measure of how expensive energy is at each time in the day\n optionally, we can return the optimized demand, which is the building\n calculating where the net demand should be allocated\n \"\"\"\n csv_path = \"building_data.csv\"\n csv_path_2 = \"../gym-socialgame/gym_socialgame/envs/building_data.csv\"\n current_dir = os.path.dirname(os.path.realpath(__file__))\n csv_path_3 = os.path.join(\n current_dir,\n \"../../transactive_control/gym-socialgame/gym_socialgame/envs/building_data.csv\",\n )\n try:\n df = pd.read_csv(csv_path)\n except:\n try:\n df = pd.read_csv(csv_path_2)\n except:\n df = pd.read_csv(csv_path_3)\n\n pv = 0.001 * np.array(df[\"PV (W)\"].tolist())\n price = np.array(df[\"Price( $ per kWh)\"].tolist())\n demand = np.nan_to_num(np.array(df[\"Office_Elizabeth (kWh)\"].tolist()))\n demand_charge = 10 / 30 # 10$/kW per month\n\n pvsize = 0 # Assumption\n netdemand = demand - pvsize * pv\n\n # Data starts at 5 am on Jan 1\n netdemand_24 = netdemand[24 * day - 5 : 24 * day + 19]\n price_24 = price[24 * day - 5 : 24 * day + 19]\n pv_24 = pv[24 * day - 5 : 24 * day + 19]\n demand_24 = demand[24 * day - 5 : 24 * day + 19]\n\n # Calculate optimal load scheduling. 90% of load is fixed, 10% is controllable.\n def optimise_24h(netdemand_24, price_24):\n currentcost = netdemand_24 @ price_24\n\n fixed_load = 0.9 * netdemand_24\n controllable_load = sum(0.1 * netdemand_24)\n\n def objective(x):\n load = fixed_load + x\n cost = np.multiply(price_24, load)\n # Negative demand means zero cost, not negative cost\n # Adding L1 regularisation to penalise shifting of occupant demand\n lambd = 0.005\n # Demand charge: attached to peak demand\n return (\n sum(np.maximum(cost, 0))\n + demand_charge * max(load)\n + lambd * sum(abs(x - 0.1 * netdemand_24))\n )\n\n def constraint_sumofx(x):\n return sum(x) - controllable_load\n\n def constraint_x_positive(x):\n return x\n\n x0 = np.zeros(24)\n cons = [\n {\"type\": \"eq\", \"fun\": constraint_sumofx},\n {\"type\": \"ineq\", \"fun\": constraint_x_positive},\n ]\n sol = minimize(objective, x0, constraints=cons)\n return sol\n\n if type_of_DR == \"real_time_pricing\":\n sol = optimise_24h(netdemand_24, price_24)\n x = sol[\"x\"]\n diff = x - 0.1 * netdemand_24\n return -diff - min(-diff)\n\n elif type_of_DR == \"time_of_use\":\n if np.mean(price_24[8:18]) == price_24[9]:\n price_24[13:16] += 3\n return price_24\n else:\n return \"error!!!\"\n\n\ndef fourier_points_from_action(action, points_length, fourier_basis_size):\n assert (\n fourier_basis_size == (action.size + 1) // 2\n ), \"Incorrect fourier basis size for actions\"\n root_points = (action[0] / 2) * np.ones(points_length)\n inp = np.linspace(0, 1, points_length)\n for k in range(1, fourier_basis_size):\n ak, bk = action[2 * k - 1], action[2 * k]\n root_points += ak * np.sin(2 * np.pi * k * inp) + bk * np.cos(\n 2 * np.pi * k * inp\n )\n\n points = root_points ** 2\n # TODO: More elegant solution than clipping\n points = np.clip(points, 0, 10)\n return points\n\n"
] | [
[
"pandas.read_csv",
"numpy.maximum",
"numpy.linspace",
"numpy.clip",
"numpy.multiply",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"scipy.optimize.minimize",
"numpy.mean",
"numpy.zeros"
]
] |
boost-entropy-python/0xDeCA10B | [
"fe67c97283a19b83bb5c5616705ed6ff570cdd8f"
] | [
"simulation/decai/simulation/data/simple_data_loader.py"
] | [
"from dataclasses import dataclass\nfrom logging import Logger\nfrom typing import List\n\nimport numpy as np\nfrom injector import Binder, inject, Module\n\nfrom decai.simulation.data.data_loader import DataLoader\n\n\n@inject\n@dataclass\nclass SimpleDataLoader(DataLoader):\n \"\"\"\n Load simple data for testing.\n \"\"\"\n\n _logger: Logger\n\n def classifications(self) -> List[str]:\n return [\"0\", \"1\"]\n\n def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):\n def _ground_truth(data):\n if data[0] * data[2] > 0:\n return 1\n else:\n return 0\n\n x_train = np.array([\n [0, 0, 0],\n [1, 1, 1],\n\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0],\n\n [0, 0, 2],\n [0, 2, 0],\n [2, 0, 0],\n [2, 0, 2],\n\n [0, 0, -3],\n [0, 3, 0],\n [0, 3, -3],\n [0, -3, 3],\n\n [0, 0, 4],\n [0, 4, 4],\n [4, 0, 0],\n\n [-6, 0, 0],\n ])\n x_test = np.array([\n [0, 2, 2],\n [0, 1, -1],\n [-1, 0, 0],\n [0, -1, 0],\n [1, -1, 2],\n [0, 0, 3],\n [0, -2, 0],\n [0, 2, -2],\n [3, 0, 0],\n [-2, 0, 2],\n [2, -2, 0],\n\n ])\n if train_size is not None:\n x_train = x_train[:train_size]\n if test_size is not None:\n x_test = x_test[:test_size]\n\n y_train = [_ground_truth(x) for x in x_train]\n y_test = [_ground_truth(x) for x in x_test]\n\n return (x_train, y_train), (x_test, y_test)\n\n\nclass SimpleDataModule(Module):\n \"\"\"\n Set up a `DataLoader` mainly for testing.\n \"\"\"\n\n def configure(self, binder: Binder):\n binder.bind(DataLoader, to=SimpleDataLoader)\n"
] | [
[
"numpy.array"
]
] |
keio-smilab22/HLSM-MAT | [
"6f34a5ec9226b28c01ae47ef16fe28662ab32935"
] | [
"lgp/models/alfred/hlsm/hlsm_navigation_model.py"
] | [
"from typing import Dict\n\nimport numpy as np\nimport math\n\nimport torch\nimport torch.nn as nn\n\nfrom lgp.abcd.model import LearnableModel\nfrom lgp.ops.spatial_distr import multidim_logsoftmax\n\nfrom lgp.utils.viz import show_image\n\nfrom lgp.env.alfred.alfred_subgoal import AlfredSubgoal\n\nfrom lgp.models.alfred.hlsm.hlsm_state_repr import AlfredSpatialStateRepr\nfrom lgp.models.alfred.hlsm.unets.lingunet_3 import Lingunet3\n\n\nMODEL_ROTATION = True\n\n\nclass HlsmNavigationModel(LearnableModel):\n\n \"\"\"\n Given a current state s_t, proposes an action distribution that makes sense.\n \"\"\"\n def __init__(self, hparams = None):\n super().__init__()\n self.hidden_dim = 128\n self.feature_2d_dim = AlfredSpatialStateRepr.get_2d_feature_dim()\n self.action_2d_dim = 2\n\n out_channels = 6 if MODEL_ROTATION else 1\n self.lingunet = Lingunet3(\n in_channels=self.feature_2d_dim + self.action_2d_dim,\n context_size=self.hidden_dim,\n out_channels=out_channels)\n self.act_type_emb = nn.Embedding(AlfredSubgoal.get_action_type_space_dim(), self.hidden_dim)\n self.act_arg_emb = nn.Embedding(AlfredSubgoal.get_action_arg_space_dim() + 1, self.hidden_dim)\n self.act_linear = nn.Linear(self.hidden_dim, self.hidden_dim)\n\n self.iter = nn.Parameter(torch.zeros([1]), requires_grad=False)\n\n self.nllloss = nn.NLLLoss(reduce=True, size_average=True)\n self.act = nn.LeakyReLU()\n\n def mle(self,\n state: AlfredSpatialStateRepr,\n object_id: torch.tensor):\n return ...\n\n def forward_model(self,\n features_2d: torch.tensor,\n subgoal_arg_features: torch.tensor,\n subgoal_tensors: torch.tensor):\n # Inputs\n act_types = subgoal_tensors[:, 0]\n act_args = subgoal_tensors[:, 1] + 1\n lingin = torch.cat([features_2d, subgoal_arg_features], dim=1)\n\n # Action representation\n type_emb = self.act(self.act_type_emb(act_types))\n arg_emb = self.act(self.act_arg_emb(act_args))\n act_emb = self.act_linear(type_emb + arg_emb)\n\n # Goal prediction\n out = self.lingunet(lingin, act_emb)\n\n # Unpacking results\n pos_logits = out[:, 0:1, :, :]\n yaw_class = out[:, 1:5, :, :]\n pitch_reg = out[:, 5:6, :, :]\n\n # Activations\n pos_log_distr = multidim_logsoftmax(pos_logits, dims=(2, 3)) # P(x, y)\n yaw_log_distr = multidim_logsoftmax(yaw_class, dims=(1,)) # P(yaw | x, y)\n pitch_pred = torch.tanh(pitch_reg) * math.pi # P(pitch | yaw, x, y)\n # Pitch is always between -pi and pi\n\n return pos_log_distr, yaw_log_distr, pitch_pred\n\n def get_name(self) -> str:\n return \"alfred_spatial_exploration_model\"\n\n def success(self, pred_logits, class_indices):\n # TODO: Measure distance between argmax ground truth and predicted\n return ...\n\n def collect_metrics(self, location_logdistr, gt_location_distr):\n metrics = {\n }\n return metrics\n\n def loss(self, batch: Dict):\n # This is now forward\n return self.forward(batch)\n\n def forward(self, batch: Dict):\n # TODO: Add dataset collate function that centers features around the agent position\n state_images = batch[\"state_images\"]\n features_2d = batch[\"features_2d\"]\n subgoals = batch[\"subgoals\"]\n subgoal_arg_features = batch[\"subgoal_args\"]\n gt_pos_yaw_prob = batch[\"nav_goal_images\"]\n gt_pitch = batch[\"nav_goal_pitch_images\"]\n\n #print(gt_pitch.sum(dim=(1, 2, 3)).detach().cpu().numpy().tolist())\n b, c, h, w = features_2d.shape\n pos_pred_log_distr, yaw_log_distr, pitch_prediction = self.forward_model(features_2d, subgoal_arg_features, subgoals)\n\n # Loss for predicting the goal position\n gt_pos_prob = gt_pos_yaw_prob.sum(dim=1, keepdim=True)\n flat_gt_pos_prob = gt_pos_prob.view((b, -1))\n assert flat_gt_pos_prob.sum() == b, \"Each ground truth distribution needs to be a simplex\"\n flat_pos_pred_logdistr = pos_pred_log_distr.view((b, -1))\n pos_loss = -(flat_gt_pos_prob * flat_pos_pred_logdistr).sum(dim=1).mean(dim=0)\n\n # Loss for predicting the yaw at the goal position\n gt_yaw_prob = gt_pos_yaw_prob\n yaw_loss = -(gt_yaw_prob * yaw_log_distr).sum(dim=(1, 2, 3)).mean(dim=0) # Sum across spatial dims, because only one spatial position is actually non-zero per batch\n\n # Loss for predicting the pitch at the goal pose (position + yaw)\n gt_pitch = gt_pitch.sum(dim=1, keepdim=True)\n has_pitch = (gt_pitch != 0)\n pitch_loss = (has_pitch * ((pitch_prediction - gt_pitch) ** 2)).sum() / (has_pitch.sum())\n\n loss = pos_loss + yaw_loss + pitch_loss\n\n viz = False\n\n def map_colors_for_viz(pdist):\n # Red - 0, Blue - 1, bluegreen - 2, Yellow - 3\n colors = torch.tensor([[1, 0, 0], [0, 0, 1], [0, 0.5, 0.5], [0.5, 0.5, 0]], dtype=pdist.dtype, device=pdist.device)\n # B x 4 x 3 x H x W\n colors = colors[None, :, :, None, None]\n pdist = pdist[:, :, None, :, :]\n pdist_c = (pdist * colors).sum(dim=1).clamp(0, 1)\n return pdist_c\n\n if viz and self.iter.item() % 1 == 0:\n alpha = 0.15\n\n # Position\n td_state_np = state_images[0].permute((1, 2, 0)).detach().cpu().numpy()\n td_g_np = gt_pos_prob[0].permute((1, 2, 0)).detach().cpu().numpy()\n comb_img_gt = alpha * td_state_np + (1 - alpha) * td_g_np\n show_image(comb_img_gt, \"state_with_pos_gt\", scale=4, waitkey=1)\n\n location_distr = torch.exp(pos_pred_log_distr)\n location_distr = location_distr / torch.max(location_distr)\n location_distr_np = location_distr[0].permute((1, 2, 0)).detach().cpu().numpy()\n comb_img_pred = td_state_np * alpha + (1 - alpha) * location_distr_np\n show_image(comb_img_pred, \"state_with_pos_pred\", scale=4, waitkey=1)\n\n # Yaw\n td_yaw_gt_np = map_colors_for_viz(gt_pos_yaw_prob)[0].permute((1, 2, 0)).detach().cpu().numpy()\n comb_yaw_img_gt = alpha * td_state_np + (1 - alpha) * td_yaw_gt_np\n show_image(comb_yaw_img_gt, \"state_with_yaw_gt\", scale=4, waitkey=1)\n\n yaw_distr_pred = torch.exp(yaw_log_distr)\n yaw_distr_pred = yaw_distr_pred / torch.max(yaw_distr_pred)\n yaw_distr_pred_np = map_colors_for_viz(yaw_distr_pred)[0].permute((1, 2, 0)).detach().cpu().numpy()\n comb_yaw_img_pred = td_state_np * alpha + (1 - alpha) * yaw_distr_pred_np\n show_image(comb_yaw_img_pred, \"state_with_yaw_pred\", scale=4, waitkey=1)\n\n # Pitch\n PITCH4 = False\n if PITCH4:\n # TODO: this should look up the pitch for the corresponding predicted / gt yaw bins. Too much work...\n td_state_np_tiled = np.tile(td_state_np, (4, 1, 1))\n gt_pitch_img = torch.cat([gt_pitch[:, 0:1], gt_pitch[:, 1:2], gt_pitch[:, 2:3], gt_pitch[:, 3:4]], dim=2)\n #gt_pitch_img = gt_pitch.sum(dim=1, keepdim=True)\n gt_pitch_img = torch.cat([gt_pitch_img.clamp(0, math.pi) * 0.5, gt_pitch_img.clamp(-math.pi, 0) * (-0.5), gt_pitch_img.clamp(0, 0)], dim=1)\n gt_pitch_img = gt_pitch_img[0].permute((1, 2, 0)).detach().cpu().numpy()\n comb_pitch_gt = alpha * td_state_np_tiled + (1 - alpha) * gt_pitch_img\n show_image(comb_pitch_gt, \"state_with_pitch_gt\", scale=4, waitkey=1)\n\n pitch_pred_img = torch.cat([pitch_prediction[:, 0:1], pitch_prediction[:, 1:2], pitch_prediction[:, 2:3], pitch_prediction[:, 3:4]], dim=2)\n pitch_pred_img = torch.cat([pitch_pred_img.clamp(0, 2) * 0.5, pitch_pred_img.clamp(-2, 0) * (-0.5), pitch_pred_img.clamp(0, 0)], dim=1)\n pitch_pred_img = pitch_pred_img[0].permute((1, 2, 0)).detach().cpu().numpy()\n comb_pitch_pred = alpha * td_state_np_tiled + (1 - alpha) * pitch_pred_img\n show_image(comb_pitch_pred, \"state_with_pitch_pred\", scale=4, waitkey=1)\n else:\n # TODO: this should look up the pitch for the corresponding predicted / gt yaw bins. Too much work...\n gt_pitch_img = gt_pitch\n #gt_pitch_img = gt_pitch.sum(dim=1, keepdim=True)\n gt_pitch_img = torch.cat([gt_pitch_img.clamp(0, math.pi) * 0.5, gt_pitch_img.clamp(-math.pi, 0) * (-0.5), gt_pitch_img.clamp(0, 0)], dim=1)\n gt_pitch_img = gt_pitch_img[0].permute((1, 2, 0)).detach().cpu().numpy()\n comb_pitch_gt = alpha * td_state_np + (1 - alpha) * gt_pitch_img\n show_image(comb_pitch_gt, \"state_with_pitch_gt\", scale=4, waitkey=1)\n\n pitch_pred_img = pitch_prediction\n pitch_pred_img = torch.cat([pitch_pred_img.clamp(0, math.pi) * 0.5, pitch_pred_img.clamp(-math.pi, 0) * (-0.5), pitch_pred_img.clamp(0, 0)], dim=1)\n pitch_pred_img = pitch_pred_img[0].permute((1, 2, 0)).detach().cpu().numpy()\n comb_pitch_pred = alpha * td_state_np + (1 - alpha) * pitch_pred_img\n show_image(comb_pitch_pred, \"state_with_pitch_pred\", scale=4, waitkey=1)\n\n #metrics = self.collect_metrics(location_log_distr, gt_location_flat)\n metrics = {}\n metrics[\"loss\"] = loss.item()\n metrics[\"pitch_loss\"] = pitch_loss.item()\n metrics[\"pos_loss\"] = pos_loss.item()\n metrics[\"yaw_loss\"] = yaw_loss.item()\n\n self.iter += 1\n\n return loss, metrics\n\n\nimport lgp.model_registry\nlgp.model_registry.register_model(\"alfred_spatial_navigation_model\", HlsmNavigationModel)\n"
] | [
[
"torch.nn.NLLLoss",
"torch.max",
"torch.zeros",
"torch.cat",
"numpy.tile",
"torch.tensor",
"torch.tanh",
"torch.nn.Linear",
"torch.exp",
"torch.nn.LeakyReLU"
]
] |
sintech/python_wizard | [
"397cac3abc2b74bc20a0ca202e3df0d990730dd4"
] | [
"pywizard/HammingWindow.py"
] | [
"import logging\nimport scipy as sp\n\nclass HammingWindow(object):\n _windows = {}\n\n @classmethod\n def processBuffer(cls, buf):\n l = len(buf)\n if l not in cls._windows:\n logging.debug(\"HammingWindow: Generate window for len {}\".format(l))\n cls._windows[l] = [ (0.54 - 0.46 * sp.cos(2 * sp.pi * i / (l - 1))) for i in range(l)]\n\n buf.samples *= cls._windows[l]\n\n"
] | [
[
"scipy.cos"
]
] |
AssistiveRoboticsUNH/hierarchical_learner | [
"6375f69c7fc6f3f7dc78be9cb4d3c172436966f0"
] | [
"feature_ranking/run_classification.py"
] | [
"import torch\nimport os\nimport numpy as np\nimport pandas as pd\n\nfrom datasets.utils import create_dataloader\n\n\ndef train(lfd_params, model, verbose=False, input_dtype=\"video\"):\n\n # Create DataLoaders\n assert input_dtype in [\"video\", \"iad\", \"gcn\"], \"ERROR: run_videos.py: input_dtype must be 'video' or 'itr'\"\n\n if input_dtype == \"video\":\n from datasets.dataset_video import DatasetVideo as CustomDataset\n elif input_dtype == \"iad\":\n from datasets.dataset_iad import DatasetIAD as CustomDataset\n else:\n from datasets.dataset_gcn import DatasetGCN as CustomDataset\n\n dataset = CustomDataset(lfd_params, lfd_params.application.file_directory, \"train\", verbose=False,\n num_segments=lfd_params.input_frames, backbone=lfd_params.model.model_id)\n data_loader = create_dataloader(dataset, lfd_params, \"train\", shuffle=True)\n\n # put model on GPU\n params = list(model.parameters())\n net = torch.nn.DataParallel(model, device_ids=lfd_params.gpus).cuda()\n net.train()\n\n # define loss function\n criterion = torch.nn.CrossEntropyLoss().cuda()\n\n # define optimizer\n optimizer = torch.optim.Adam(params, lr=lfd_params.lr)\n\n # Train Network\n loss_record = []\n with torch.autograd.detect_anomaly():\n\n epoch = lfd_params.epochs\n for e in range(epoch):\n\n cumulative_loss = 0\n\n for i, data_packet in enumerate(data_loader):\n obs, label = data_packet\n obs = obs.float()\n\n # compute output\n logits = net(obs)\n\n # get loss\n loss = criterion(logits, label.cuda())\n loss.backward()\n\n # optimize SGD\n optimizer.step()\n optimizer.zero_grad()\n\n if verbose and i % 100 == 0:\n print(\"epoch: {:3d}/{:3d}\".format(e, epoch))\n\n print(\"loss:\", loss.cpu().detach().numpy())\n print(\"expected:\", label.cpu().detach().numpy())\n print(\"pred:\", np.argmax(logits.cpu().detach().numpy(), axis=1))\n print(\"logits:\")\n print(logits.cpu().detach().numpy())\n\n cumulative_loss += loss.cpu().detach().numpy()\n print(\"e:\", e, \"loss:\", cumulative_loss)\n loss_record.append(cumulative_loss)\n\n # show loss over time, output placed in Log Directory\n import matplotlib.pyplot as plt\n plt.plot(loss_record)\n\n # add bells and whistles to plt\n plt.title(model.filename)\n plt.ylabel(\"loss\")\n plt.tight_layout()\n\n # make sure log_dir exists\n log_dir = model.filename\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n # save plt to file\n fig_filename = os.path.join(log_dir, \"train_loss.png\")\n plt.savefig(fig_filename)\n\n # clear plt so I don't draw on top of my multiple images.\n plt.clf()\n\n return model\n\n\ndef evaluate(lfd_params, model, mode=\"evaluation\", verbose=False, input_dtype=\"video\"):\n\n # Create DataLoaders\n assert input_dtype in [\"video\", \"iad\", \"gcn\"], \"ERROR: run_videos.py: input_dtype must be 'video' or 'itr'\"\n\n if input_dtype == \"video\":\n from datasets.dataset_video import DatasetVideo as CustomDataset\n elif input_dtype == \"iad\":\n from datasets.dataset_iad import DatasetIAD as CustomDataset\n #elif input_dtype == \"itr\":\n # from obsolete_files.dataset_itr import DatasetITR as CustomDataset\n else:\n from datasets.dataset_gcn import DatasetGCN as CustomDataset\n dataset = CustomDataset(lfd_params, lfd_params.application.file_directory, mode, verbose=True,\n num_segments=lfd_params.input_frames, backbone=lfd_params.model.model_id)\n data_loader = create_dataloader(dataset, lfd_params, mode, shuffle=False)\n\n # put model on GPU\n net = torch.nn.DataParallel(model, device_ids=lfd_params.gpus).cuda()\n net.eval()\n\n # Train Network\n expected_label_list = []\n predicted_label_list = []\n filename_list = []\n\n for i, data_packet in enumerate(data_loader):\n obs, label, filename = data_packet\n obs = obs.float()\n\n # compute output\n logits = net(obs)\n\n # get label information\n expected_label = label.cpu().detach().numpy()[0]\n predicted_label = np.argmax(logits.cpu().detach().numpy(), axis=1) [0]\n\n # add data to lists to be returned\n expected_label_list.append(expected_label)\n predicted_label_list.append(predicted_label)\n filename_list.append(filename)\n\n if verbose:\n print(\"file: {:3d}/{:3d}\".format(i, len(data_loader)))\n\n print(\"expected_label:\", expected_label)\n print(\"predicted_label:\", predicted_label)\n print(\"logits:\")\n print(logits.cpu().detach().numpy())\n\n # return Pandas dataframe\n return pd.DataFrame({\n \"expected_label\": expected_label_list,\n \"predicted_label\": predicted_label_list,\n \"filename\": filename_list,\n })\n\ndef generate_iad_files(lfd_params, model, dataset_mode, verbose=False, backbone=None):\n\n # Create DataLoaders\n from datasets.dataset_video import DatasetVideo as CustomDataset\n\n dataset = CustomDataset(lfd_params, lfd_params.application.file_directory, dataset_mode, verbose=True,\n num_segments=lfd_params.input_frames)\n data_loader = create_dataloader(dataset, lfd_params, dataset_mode, shuffle=False)\n\n # put model on GPU\n net = torch.nn.DataParallel(model, device_ids=lfd_params.gpus).cuda()\n net.eval()\n\n for i, data_packet in enumerate(data_loader):\n obs, label, filename = data_packet\n\n # compute output\n iad = net(obs)\n iad = iad.detach().cpu().numpy()\n\n for n, file in enumerate(filename):\n\n # format new save name\n save_id = file.split('/')\n file_id = save_id[-1] + \".npz\"\n save_id = save_id[:save_id.index(\"frames\")] + [\"iad_\"+backbone] + save_id[save_id.index(\"frames\") + 1:-1]\n save_id = '/' + os.path.join(*save_id)\n\n # create a directory to save the ITRs in\n if not os.path.exists(save_id):\n os.makedirs(save_id)\n\n save_id = os.path.join(save_id, file_id)\n\n if verbose:\n print(\"n: {0}, filename: {1}, saved_id: {2}\".format(n, file, save_id))\n\n # save ITR to file with given name\n print(save_id)\n print(\"iad.shape:\", iad[n].shape)\n\n np.savez(save_id, data=iad[n])\n"
] | [
[
"torch.optim.Adam",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.tight_layout",
"numpy.savez",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"torch.nn.DataParallel",
"torch.autograd.detect_anomaly",
"matplotlib.pyplot.ylabel"
]
] |
GabrielMissael/solution | [
"aff33732d04efedb60c1ebc70fd5108ae5cc558e"
] | [
"solution/ml/sentiment.py"
] | [
"import pandas as pd\nimport re\nimport emoji\nfrom googletrans import Translator, constants\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\ndef deEmojify(text):\n return emoji.get_emoji_regexp().sub(r'', text)\n\ndef AddSentimentAnalysis(tweet_df_route:str):\n df = pd.read_pickle(tweet_df_route)\n\n df = df[df['labels'] != -1]\n\n top_labels = df['labels'].value_counts()[:3].keys().to_list()\n\n df = df[df['labels'].isin(top_labels)]\n\n tweets = df['text'].to_list()\n\n clean_tweets = [re.sub(r'\\n', \" \", tweet) for tweet in tweets]\n\n translator = Translator()\n sentiment = SentimentIntensityAnalyzer()\n\n sentiment_list=[]\n for i, tweet in enumerate(clean_tweets):\n tweet = deEmojify(tweet)\n tweet = translator.translate(tweet, src='es', dest='en').text\n sentiment_dict = sentiment.polarity_scores(tweet)\n sentiment_list.append(sentiment_dict['compound'])\n\n df['Sentiment'] = sentiment_list\n\n return df\n"
] | [
[
"pandas.read_pickle"
]
] |
selinabitting/compas_view2 | [
"cac8abaf8fbde13ceabe35324be92779ea2e535f"
] | [
"src/compas_view2/objects/object.py"
] | [
"import abc\nfrom compas.geometry import Transformation\nfrom compas.geometry import Translation\nfrom compas.geometry import Rotation\nfrom compas.geometry import Scale\nfrom compas.geometry import decompose_matrix\nfrom compas.geometry import identity_matrix\n\nimport numpy as np\n\n\nABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})\n\nDATA_OBJECT = {}\n\n\nclass Object(ABC):\n \"\"\"Base object for compas_view2\n\n Attributes\n ----------\n name : str\n The name of the object.\n is_selected : bool\n Whether the object is selected.\n translation : list\n The translation vector of the object.\n rotation : list\n The Euler rotation of the object in XYZ order.\n scale : list\n The scale factor of the object.\n matrix: list\n The 4x4 transformation matrix that is composed from translation, rotation and scale.\n \"\"\"\n\n @staticmethod\n def register(dtype, otype):\n \"\"\"Register an object class to its corrensponding data type\"\"\"\n DATA_OBJECT[dtype] = otype\n\n @staticmethod\n def build(data, **kwargs):\n \"\"\"Build an object class according to its corrensponding data type\"\"\"\n try:\n obj = DATA_OBJECT[data.__class__](data, **kwargs)\n except KeyError:\n raise TypeError(\"Type {} is not supported by the viewer.\".format(type(data)))\n return obj\n\n def __init__(self, data, name=None, is_selected=False, is_visible=True):\n self._data = data\n self.name = name or str(self)\n self.is_selected = is_selected\n self.is_visible = is_visible\n self._instance_color = None\n self._translation = [0., 0., 0.]\n self._rotation = [0., 0., 0.]\n self._scale = [1., 1., 1.]\n self._transformation = Transformation()\n self._matrix_buffer = None\n\n @property\n def otype(self):\n return DATA_OBJECT[self._data.__class__]\n\n @property\n def DATA_OBJECT(self):\n return DATA_OBJECT\n\n @abc.abstractmethod\n def init(self):\n pass\n\n @abc.abstractmethod\n def draw(self, shader):\n pass\n\n def create(self):\n pass\n\n @property\n def properties(self):\n return None\n\n @property\n def translation(self):\n return self._translation\n\n @translation.setter\n def translation(self, vector):\n self._translation[0] = vector[0]\n self._translation[1] = vector[1]\n self._translation[2] = vector[2]\n\n @property\n def rotation(self):\n return self._rotation\n\n @rotation.setter\n def rotation(self, angles):\n self._rotation[0] = angles[0]\n self._rotation[1] = angles[1]\n self._rotation[2] = angles[2]\n\n @property\n def scale(self):\n return self._scale\n\n @scale.setter\n def scale(self, factors):\n self._scale[0] = factors[0]\n self._scale[1] = factors[1]\n self._scale[2] = factors[2]\n\n def _update_matrix(self):\n \"\"\"Update the matrix from object's translation, rotation and scale\"\"\"\n if self.translation == [0, 0, 0] and self.rotation == [0, 0, 0] and self.scale == [1, 1, 1]:\n self._transformation.matrix = identity_matrix(4)\n self._matrix_buffer = None\n else:\n T1 = Translation.from_vector(self.translation)\n R1 = Rotation.from_euler_angles(self.rotation)\n S1 = Scale.from_factors(self.scale)\n M = T1 * R1 * S1\n self._transformation.matrix = M.matrix\n self._matrix_buffer = np.array(self.matrix).flatten()\n\n @property\n def matrix(self):\n \"\"\"Get the updated matrix from object's translation, rotation and scale\"\"\"\n return self._transformation.matrix\n\n @matrix.setter\n def matrix(self, matrix):\n \"\"\"Set the object's translation, rotation and scale from given matrix, and update object's matrix\"\"\"\n scale, _, rotation, tranlation, _ = decompose_matrix(matrix)\n self.translation = tranlation\n self.rotation = rotation\n self.scale = scale\n self._update_matrix()\n"
] | [
[
"numpy.array"
]
] |
AurimasGr/com_exoselfie | [
"8bf2336c2578aa658c933df49a85a7f2b3f1e567"
] | [
"app/utils/hack_index.py"
] | [
"import pandas as pd\n\nimport numpy as np\nimport openpyxl\nimport pandas as pd\nfrom scipy.optimize import curve_fit\nimport cv2\nimport codecs\nimport os, shutil\n\n\n# from transforms import RGBTransform\n\ndef exp(dT, a, b, c):\n return a * np.exp(-b * dT) + c\n\n\n## Calculate physical parameters\n\ndef getT(T_star, R, a, A):\n \"\"\"\n From https://en.wikipedia.org/wiki/Planetary_equilibrium_temperature\n\n T_eq = T_star * sqrt(R/2a) * (1 - A)^1/4\n\n Where:\n T_eq - Planetary equilibrium temperature\n T_star - Star's (surface?) temperature\n A - Planet's bond albedo\n R - Radius of the star\n a - orbital distance\n\n Recommended to use default albedo around 0-10%\n \"\"\"\n R_sol = 6.957e5 # km\n AU = 1.496e8 # km\n K = -272.15 # C\n R *= R_sol\n a *= AU\n T_eq = T_star * np.sqrt(R / (2 * a)) * (1 - A) ** (1 / 4)\n T_eq += K\n return (T_eq)\n\n\ndef getG(M_exo, R_exo):\n \"\"\"\n Input mass; radius\n Output g_exo at surface\n\n From https://en.wikipedia.org/wiki/Surface_gravity\n g = G * M_exo / R_exo^2\n \"\"\"\n M_earth = 5.972e24 # kg\n R_earth = 6.371e6 # m\n g_earth = 9.8\n G = 6.674e-11\n M_exo *= M_earth\n R_exo *= R_earth\n g_exo_abs = G * M_exo / (R_exo ** 2)\n g_exo = g_exo_abs / g_earth # relative\n return (g_exo)\n\n\n## Calculate survival & image params\ndef calibrateHeat():\n \"\"\"\n TC = (TF - 32) / 1.8\n Survive heat if TC <= 36.5\n Survive cold if TC > -1\n \"\"\"\n lim_hot = 36.5\n lim_cold = -1\n\n calib_cold = pd.read_excel(\"death_calibration.xlsx\",\n sheet_name='Cold')\n calib_hot = pd.read_excel(\"death_calibration.xlsx\",\n sheet_name='Hot')\n calib_cold['T_C'] = (calib_cold['T_F'] - 32) / 1.8\n calib_hot['T_C'] = (calib_hot['T_F'] - 32) / 1.8\n\n fit_hot = calib_hot[['T_C', 't_h']].loc[calib_hot['T_C'] > lim_hot]\n fit_cold = calib_cold[['T_C', 't_h']].loc[calib_cold['T_C'] < lim_cold]\n fit_hot['dT'] = fit_hot['T_C'] - lim_hot\n fit_cold['dT'] = - (fit_cold['T_C'] - lim_cold)\n\n # Fit\n phot, pcov = curve_fit(exp, fit_hot['dT'], np.log(fit_hot['t_h']))\n pcold, pcov = curve_fit(exp, fit_cold['dT'], np.log(fit_cold['t_h']))\n\n # Plot\n \"\"\"\n plt.scatter(fit_cold['dT'],np.log(fit_cold['t_h']),label='cold')\n plt.scatter(fit_hot['dT'],np.log(fit_hot['t_h']),label='hot')\n plt.plot(fit_hot['dT'], exp(fit_hot['dT'], *phot),label='hot-fit')\n plt.plot(fit_cold['dT'], exp(fit_cold['dT'], *pcold),label='cold-fit')\n plt.legend()\n \"\"\"\n return (phot, pcold)\n\n\ndef surviveHeat(T_eq):\n \"\"\"\n Input temperature\n Dead or alive; heat flag; survival time; weight coefficient 0-1\n\n Survival function is exponential on dT:\n a * np.exp(-b * x) + c\n\n \"\"\"\n hot = {}\n hot['lim'] = 36.5\n hot['a'] = 6.56979;\n hot['b'] = 0.1425;\n hot['c'] = -1.8737\n hot['extr'] = 100\n cold = {}\n cold['lim'] = -1\n cold['a'] = 6.82646;\n cold['b'] = 0.0844427;\n cold['c'] = 0.318452\n cold['extr'] = -50\n res = {}\n res['T'] = T_eq\n if T_eq > cold['lim'] and T_eq < hot['lim']:\n res['surv'] = True # survival bool\n res['t_surv'] = None # survival time, hrs\n res['cod'] = None # cause of death\n res['wt'] = 0 # image blending weight\n elif T_eq <= cold['lim']:\n # Model\n dT = abs(T_eq - cold['lim'])\n a = cold['a'];\n b = cold['b'];\n c = cold['c']\n t = np.e ** exp(dT, a, b, c)\n # Weight\n wrange = abs(cold['extr'] - cold['lim'])\n wt = dT / wrange\n if wt > 1:\n wt = 1\n # Outs\n res['surv'] = False\n res['t_surv'] = t\n res['cod'] = 'cold'\n res['wt'] = wt\n elif T_eq >= hot['lim']:\n # Model\n dT = abs(T_eq - hot['lim'])\n a = hot['a'];\n b = hot['b'];\n c = hot['c']\n t = np.e ** exp(dT, a, b, c)\n # Weight\n wrange = abs(hot['extr'] - hot['lim'])\n wt = dT / wrange\n if wt > 1:\n wt = 1\n # Outs\n res['surv'] = False\n res['t_surv'] = t\n res['cod'] = 'hot'\n res['wt'] = wt\n return (res)\n\n\ndef surviveG(G_exo):\n \"\"\"\n Input G\n Output alive or dead; gravity flag; weight coefficient 0-1\n \"\"\"\n G_extr = 5\n G_micro = 0.8\n G_hyper = 1.2\n res = {}\n res['G'] = G_exo\n if G_exo < G_micro:\n res['surv'] = True\n res['flag'] = 'micro'\n res['wt'] = 0.5 * G_exo / G_micro\n elif G_exo < G_hyper:\n res['surv'] = True\n res['flag'] = 'normal'\n res['wt'] = 0.5\n elif G_exo < G_extr:\n res['surv'] = True\n res['flag'] = 'hyper'\n res['wt'] = 0.5 + 0.5 * (G_exo - G_hyper) / (G_extr - G_hyper)\n else:\n res['surv'] = False\n res['flag'] = 'extreme'\n res['wt'] = 1\n return (res)\n\n\ndef surviveTotal(T, G):\n \"\"\"\n Input gravity/heat dicts\n Output full result dict\n \"\"\"\n G_surv = 0.1 # survival time in hours when in extreme G\n cfg = {}\n cfg['T'] = surviveHeat(T)\n cfg['G'] = surviveG(G)\n res = {}\n res['T'] = T\n res['G'] = G\n res['G_flag'] = cfg['G']['flag']\n res['G_wt'] = cfg['G']['wt']\n res['T_wt'] = cfg['T']['wt']\n res['t_surv'] = cfg['T']['t_surv']\n if cfg['T']['surv'] and cfg['G']['surv']:\n res['surv'] = True\n res['cod'] = None\n else:\n res['surv'] = False\n if (not cfg['T']['surv']) and (not cfg['G']['surv']):\n res['t_surv'] = G_surv\n if cfg['T']['cod'] == 'hot':\n res['cod'] = 'hot&G'\n else:\n res['cod'] = 'cold&G'\n elif not cfg['G']['surv']:\n res['cod'] = 'G'\n res['t_surv'] = G_surv\n else:\n if cfg['T']['cod'] == 'hot':\n res['cod'] = 'hot'\n else:\n res['cod'] = 'cold'\n return (res)\n\n\ndef getRGB(T):\n \"\"\"\n Input star temperature\n RGB reflecting black body radiation\n\n From: https://en.wikipedia.org/wiki/Black-body_radiation\n I(v,T) = 2hv^3/c^2 * 1/(e^(hv/kT) - 1)\n \"\"\"\n h = 6.626e-34\n c = 2.998e8\n k = 1.38e-23\n\n def bbr(T, v):\n I = (2 * h * v ** 3 / c ** 2) * 1 / (np.e ** (h * v / (k * T)) - 1)\n return (I)\n\n vR = c / 610e-9\n vG = c / 550e-9\n vB = c / 465e-9\n IR = bbr(T, vR)\n IG = bbr(T, vG)\n IB = bbr(T, vB)\n RGB = pd.DataFrame({\n 'color': ['R', 'G', 'B'],\n 'I': [IR, IG, IB],\n })\n rgb = RGB['I'] / np.max(RGB['I'])\n return (rgb)\n\n\n## Exoplanet database\ndef loadData():\n df = pd.read_excel(\"utils/NASA_Dataset_final.xlsx\",\n sheet_name='Final Dataset',\n header=3)\n #df = df[df.keys()[:9]] # cleans rows a bit\n return(df)\n\n\ndef getSimilar(df, T, G, A):\n \"\"\"\n Calculate similarity score of planets in database;\n Temperature, G, Albedo;\n Appends similarity score array;\n Finds index of highest similarity\n \"\"\"\n K = -272.15\n T -= K\n df['pl_teq'] = np.nan\n df['pl_g'] = np.nan\n df['sim'] = np.nan\n for i in df.index:\n # T\n T_exo = getT(df['st_teff'][i],\n df['st_rad'][i],\n df['pl_orbsmax'][i],\n A)\n df.loc[i, 'pl_teq'] = T_exo\n T_exo -= K\n T_sim = 1 - abs((T - T_exo) / np.max((T, T_exo)))\n # G\n G_exo = getG(df['pl_masse'][i], df['pl_rade'][i])\n df.loc[i, 'pl_g'] = G_exo\n G_sim = 1 - abs((G - G_exo) / np.max((G, G_exo)))\n # Total\n df.loc[i, 'sim'] = np.sqrt(T_sim * G_sim)\n idx = df.loc[df['sim'] == np.max(df['sim'])].index[0]\n return (df, idx)\n\n\n## Tint\ndef tint(T, img_path):\n rgb = getRGB(T)\n rgb = list(rgb)\n img = cv2.imread(img_path)\n img = np.einsum('ijk,k->ijk', img, rgb)\n img = np.asarray(img, dtype=int)\n return (img)\n\n\n## Text\ndef returnText(cfg, T_star):\n # Survival\n text = f\"\"\"\n Here's a picture of you on the planet that you've built yourself! \n It orbits a start that \n \"\"\"\n if T_star < 2000:\n text += \"appears red \"\n elif T_star < 5000:\n text += \"has an orange tint \"\n elif T_star < 9000:\n text += \"has a yellow tint \"\n elif T_star < 10300:\n text += \"appears white to the human eye \"\n else:\n text += \"has a bluish color \"\n text += f\"because of its temperature. \"\n text += f\"\"\"\n The average temperature on this planet is {round(cfg['T']):.0f} \n degrees centigrade which is considered \n \"\"\"\n if cfg['cod']:\n if 'hot' in cfg['cod']:\n if cfg['T_wt'] == 1:\n text += \"extremely hot. \"\n else:\n text += \"hot. \"\n elif 'cold' in cfg['cod']:\n if cfg['T_wt'] == 1:\n text += \"extremely cold. \"\n else:\n text += \"cold. \"\n else:\n text += \"normal. \"\n text += \"The surface gravity of your planet is \"\n if cfg['G_flag'] == 'normal':\n text += f\"around that of the Earth which is normal to the human body. \"\n elif cfg['G_flag'] == 'micro':\n if cfg['G'] < 0.1:\n text += f\"less than 10% \"\n else:\n text += f\"around {round(cfg['G'], 1) * 100:.0f}% \"\n text += \"of that on the Earth and is considered small. \"\n elif cfg['G_flag'] == 'hyper':\n if round(cfg['G']) == 1:\n G = 2\n else:\n G = round(cfg['G'])\n text += f\"around {G:.0f} times larger than Earth's and is considered strong. \"\n elif cfg['G_flag'] == 'extreme':\n G = round(cfg['G'])\n text += f\"around {G:.0f} times larger than Earth's and is considered deadly. \"\n if cfg['surv']:\n text += f\"\"\"\n Congratulations, you would probably survive on this planet!\n \"\"\"\n else:\n text += \"Unfortunately you \"\n if cfg['t_surv'] < 0.2:\n text += \"wouldn't last 10 minutes \"\n elif round(cfg['t_surv']) <= 1:\n text += \"would be dead after an hour \"\n else:\n text += \"would survive for only around \"\n if cfg['t_surv'] <= 10:\n text += f\"{cfg['t_surv']:.0f} hours \"\n elif cfg['t_surv'] <= 50:\n text += f\"{round(cfg['t_surv'] / 10, 1) * 10:.0f} hours\"\n else:\n text += f\"{round(cfg['t_surv'] / 10):.0f} days \"\n text += \"on this planet \"\n if round(cfg['t_surv']) <= 1:\n text += f\"because of \"\n else:\n text += f\"and would eventually die from \"\n if 'hot' in cfg['cod']:\n if cfg['T_wt'] == 1:\n text += \"extreme heat\"\n else:\n text += \"too much heat\"\n elif 'cold' in cfg['cod']:\n if cfg['T_wt'] == 1:\n text += \"extreme cold\"\n else:\n text += \"hypothermia\"\n if '&' in cfg['cod']:\n text += \" and \"\n if 'G' in cfg['cod']:\n text += \"unbearable gravitational force\"\n text += \".\"\n text = text.replace('\\n', '').replace(\" \", '')\n paragraph1 = text\n\n # Exoplanet\n df = loadData()\n df, idx = getSimilar(df, cfg['T'], cfg['G'], 0.05)\n planet = df.loc[idx]\n text = f\"There is a known exoplanet called '{planet['pl_name']}' \"\n text += f\"that is similar to your planet with a score of \"\n text += f\"{planet['sim'] * 100:.0f}. \"\n text += f\"This planet completes an orbit around its host star \"\n text += f\"{planet['st_name']} \"\n if round(planet['pl_orbper']) == 1:\n text += 'each day.'\n elif planet['pl_orbper'] > 1:\n text += f\"every {round(planet['pl_orbper']):.0f} days.\"\n elif round(planet['pl_orbper'] * 24) == 1:\n text += \"every hour.\"\n elif planet['pl_orbper'] * 24 < 1:\n text += f\"multiple times every hour.\"\n else:\n text += f\"every {round(planet['pl_orbper'] * 24):.0f} hours.\"\n text = text.replace('\\n', '').replace(\" \", '')\n text = paragraph1 + '\\n\\n' + text\n return (text)\n\n\ndef hack_index(text_replacement, image_name):\n with codecs.open(\"templates/backup/index.html\") as file:\n text = file.read()\n\n replaced_text = text.replace(\"\"\"<p class=\"card-text\"> Your survival score: </p>\n <p class=\"card-text\"> Survival status: </p>\n <p class=\"card-text\"> Temperature on your planet: </p>\n <p class=\"card-text\"> Surface gravity on your plannet: </p>\n <p class=\"card-text\"> Similar exoplanet discovered by NASA: </p>\"\"\",\n f\"\"\"<p class=\"card-text\"> {text_replacement} </p>\"\"\")\n\n replaced_text = replaced_text.replace(\"Result.jpg\", image_name)\n\n with open(\"templates/backup/temp/index.html\", \"w\") as file:\n file.write(replaced_text)\n\n shutil.copyfile(\"templates/backup/temp/index.html\", \"templates/index.html\")"
] | [
[
"numpy.log",
"pandas.read_excel",
"numpy.sqrt",
"numpy.einsum",
"numpy.asarray",
"pandas.DataFrame",
"numpy.max",
"numpy.exp"
]
] |
sholderbach/pandasbikeshed | [
"56780195718a18b808231a9eb5f1d4a95f7a2c0a"
] | [
"pandasbikeshed/cli_tools/clip2tex.py"
] | [
"import pandas as pd\nimport argparse\n\ndef run(filename=None, escape=False, print_it=True):\n try:\n df = pd.read_clipboard(sep='\\t')\n except:\n print('Could not read from clipboard. Make sure your clipboard contains tab separated data!')\n return 1\n df = df.replace(pd.np.nan, '')\n df_str = df.to_latex(escape=escape, index=False)\n if filename:\n with open(filename, 'w') as f:\n f.write(df_str)\n if print_it:\n print(df_str)\n return 0\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(prog='clip2tex', description='A small command-line tool to convert tables copied to the clipboard e.g. from MS Excel or GDrive to a simple LaTeX')\n parser.add_argument('FILENAME', nargs='?')\n parser.add_argument('--escape', '-e', action='store_true', help='Set to escape all special characters')\n args = parser.parse_args()\n if args.FILENAME:\n run(filename=args.FILENAME, escape=args.escape, print_it=False)\n else:\n run(escape=args.escape)\n\n"
] | [
[
"pandas.read_clipboard"
]
] |
papaemmelab/cnvkit | [
"c3695209f2c39c07174839e7767757cce9dfd0e7"
] | [
"cnvlib/scatter.py"
] | [
"\"\"\"The 'scatter' command for rendering copy number as scatter plots.\"\"\"\nimport collections\nimport logging\n\nimport numpy as np\nfrom matplotlib import pyplot\nimport matplotlib.patches as mpatches\nfrom skgenome.rangelabel import unpack_range\n\nfrom . import core, params, plots\nfrom .plots import MB\nfrom .cnary import CopyNumArray as CNA\n\nHIGHLIGHT_COLOR = 'gold'\nPOINT_COLOR = '#606060'\nSEG_COLOR = 'darkorange'\nTREND_COLOR = '#A0A0A0'\nSUBCLONE_COLOR = '#0011FF'\nCNLOH_COLOR = \"#02ba33\"\n\n\ndef do_scatter(cnarr, segments=None, variants=None,\n show_range=None, show_gene=None, do_trend=False, by_bin=False,\n window_width=1e6, y_min=None, y_max=None,\n antitarget_marker=None, segment_color=SEG_COLOR, title=None,\n ):\n \"\"\"Plot probe log2 coverages and segmentation calls together.\"\"\"\n if by_bin:\n bp_per_bin = (sum(c.end.iat[-1] for _, c in cnarr.by_chromosome())\n / len(cnarr))\n window_width /= bp_per_bin\n show_range_bins = plots.translate_region_to_bins(show_range, cnarr)\n cnarr, segments, variants = plots.update_binwise_positions(\n cnarr, segments, variants)\n global MB\n orig_mb = MB\n MB = 1\n\n if not show_gene and not show_range:\n fig = genome_scatter(cnarr, segments, variants, do_trend, y_min, y_max, title,\n segment_color)\n else:\n if by_bin:\n show_range = show_range_bins\n fig = chromosome_scatter(cnarr, segments, variants, show_range, show_gene,\n antitarget_marker, do_trend, by_bin, window_width,\n y_min, y_max, title, segment_color)\n\n if by_bin:\n # Reset to avoid permanently altering the value of cnvlib.scatter.MB\n MB = orig_mb\n return fig\n\n# === Genome-level scatter plots ===\n\ndef genome_scatter(cnarr, segments=None, variants=None, do_trend=False,\n y_min=None, y_max=None, title=None, segment_color=SEG_COLOR):\n \"\"\"Plot all chromosomes, concatenated on one plot.\"\"\"\n if (cnarr or segments) and variants:\n # Lay out top 3/5 for the CN scatter, bottom 2/5 for SNP plot\n axgrid = pyplot.GridSpec(5, 1, hspace=.85)\n axis = pyplot.subplot(axgrid[:3])\n axis2 = pyplot.subplot(axgrid[3:], sharex=axis)\n # Place chromosome labels between the CNR and SNP plots\n axis2.tick_params(labelbottom=False)\n chrom_sizes = plots.chromosome_sizes(cnarr or segments)\n axis2 = snv_on_genome(axis2, variants, chrom_sizes, segments, do_trend,\n segment_color)\n else:\n _fig, axis = pyplot.subplots()\n if title is None:\n title = (cnarr or segments or variants).sample_id\n if cnarr or segments:\n axis.set_title(title)\n axis = cnv_on_genome(axis, cnarr, segments, do_trend, y_min, y_max,\n segment_color)\n else:\n axis.set_title(\"Variant allele frequencies: %s\" % title)\n chrom_sizes = collections.OrderedDict(\n (chrom, subarr[\"end\"].max())\n for chrom, subarr in variants.by_chromosome())\n axis = snv_on_genome(axis, variants, chrom_sizes, segments, do_trend,\n segment_color)\n return axis.get_figure()\n\n\ndef cnv_on_genome(axis, probes, segments, do_trend=False, y_min=None,\n y_max=None, segment_color=SEG_COLOR):\n \"\"\"Plot bin ratios and/or segments for all chromosomes on one plot.\"\"\"\n # Configure axes etc.\n patches = [\n mpatches.Patch(color='darkorange', label='clonal'),\n mpatches.Patch(color=CNLOH_COLOR, label='clonal CN-LOH'),\n mpatches.Patch(color=SUBCLONE_COLOR, label='subclonal'),\n ]\n axis.legend(handles=patches, loc=\"upper right\")\n axis.axhline(color='k')\n axis.set_ylabel(\"Copy ratio (log2)\")\n has_subclones = False\n if 'aberrant_cell_frac' in segments.data.columns:\n has_subclones = True\n clone_frac = max(segments['aberrant_cell_frac'])\n segments['aberrant_cell_frac']\n if not (y_min and y_max):\n if segments:\n # Auto-scale y-axis according to segment mean-coverage values\n # (Avoid spuriously low log2 values in HLA and chrY)\n low_chroms = segments.chromosome.isin(('6', 'chr6', 'Y', 'chrY'))\n seg_auto_vals = segments[~low_chroms]['log2'].dropna()\n if not y_min:\n y_min = (np.nanmin([seg_auto_vals.min() - .2, -1.5])\n if len(seg_auto_vals) else -2.5)\n if not y_max:\n y_max = (np.nanmax([seg_auto_vals.max() + .2, 1.5])\n if len(seg_auto_vals) else 2.5)\n else:\n if not y_min:\n y_min = -2.5\n if not y_max:\n y_max = 2.5\n axis.set_ylim(y_min, y_max)\n\n # Group probes by chromosome (to calculate plotting coordinates)\n if probes:\n chrom_sizes = plots.chromosome_sizes(probes)\n chrom_probes = dict(probes.by_chromosome())\n # Precalculate smoothing window size so all chromosomes have similar\n # degree of smoothness\n # NB: Target panel has ~1k bins/chrom. -> 250-bin window\n # Exome: ~10k bins/chrom. -> 2500-bin window\n window_size = int(round(.15 * len(probes) /\n probes.chromosome.nunique()))\n else:\n chrom_sizes = plots.chromosome_sizes(segments)\n # Same for segment calls\n chrom_segs = dict(segments.by_chromosome()) if segments else {}\n\n # Plot points & segments\n x_starts = plots.plot_x_dividers(axis, chrom_sizes)\n for chrom, x_offset in x_starts.items():\n if probes and chrom in chrom_probes:\n subprobes = chrom_probes[chrom]\n x = 0.5 * (subprobes['start'] + subprobes['end']) + x_offset\n axis.scatter(x, subprobes['log2'], marker='.',\n color=POINT_COLOR, edgecolor='none', alpha=0.2)\n if do_trend:\n # ENH break trendline by chromosome arm boundaries?\n axis.plot(x, subprobes.smooth_log2(), #window_size),\n color=POINT_COLOR, linewidth=2, zorder=-1)\n\n if chrom in chrom_segs:\n for seg in chrom_segs[chrom]:\n if has_subclones:\n is_subclone = 1 if seg.aberrant_cell_frac != clone_frac else 0\n color = choose_segment_color(seg, segment_color) if not is_subclone else SUBCLONE_COLOR\n print(chrom, seg.end-seg.start, color)\n axis.plot((seg.start + x_offset, seg.end + x_offset),\n (max(y_min+0.1, seg.log2), max(y_min+0.1, seg.log2)),\n color=color, linewidth=3, solid_capstyle='round')\n if is_subclone:\n cn_state = str(int(seg.cn1)) + \"+\" + str(int(seg.cn2))\n text = f\"{int(seg.aberrant_cell_frac*100)}% {cn_state}\"\n axis.text(seg.start+x_offset, max(y_min+0.1, seg.log2+0.01), text, fontsize=7)\n else:\n color = choose_segment_color(seg, segment_color)\n axis.plot((seg.start + x_offset, seg.end + x_offset),\n (max(y_min+0.1, seg.log2), max(y_min+0.1, seg.log2)),\n color=color, linewidth=3, solid_capstyle='round')\n return axis\n\n\ndef my_cnv_on_genome(axis, probes, segments, do_trend=False, y_min=None,\n y_max=None, segment_color=SEG_COLOR):\n \"\"\"Plot bin ratios and/or segments for all chromosomes on one plot.\"\"\"\n # Configure axes etc.\n patches = [\n mpatches.Patch(color='darkorange', label='clonal'),\n mpatches.Patch(color=CNLOH_COLOR, label='clonal CN-LOH'),\n mpatches.Patch(color=SUBCLONE_COLOR, label='subclonal'),\n ]\n axis.legend(handles=patches, loc=\"upper right\")\n axis.axhline(color='k')\n axis.set_ylabel(\"Copy ratio (log2)\")\n has_subclones = False\n if 'aberrant_cell_frac' in segments.data.columns:\n has_subclones = True\n clone_frac = max(segments['aberrant_cell_frac'])\n segments['aberrant_cell_frac']\n if not (y_min and y_max):\n if segments:\n # Auto-scale y-axis according to segment mean-coverage values\n # (Avoid spuriously low log2 values in HLA and chrY)\n low_chroms = segments.chromosome.isin(('6', 'chr6', 'Y', 'chrY'))\n seg_auto_vals = segments[~low_chroms]['log2'].dropna()\n if not y_min:\n y_min = (np.nanmin([seg_auto_vals.min() - .2, -1.5])\n if len(seg_auto_vals) else -2.5)\n if not y_max:\n y_max = (np.nanmax([seg_auto_vals.max() + .2, 1.5])\n if len(seg_auto_vals) else 2.5)\n else:\n if not y_min:\n y_min = -2.5\n if not y_max:\n y_max = 2.5\n axis.set_ylim(y_min, y_max)\n\n # Group probes by chromosome (to calculate plotting coordinates)\n if probes:\n chrom_sizes = plots.chromosome_sizes(probes)\n chrom_probes = dict(probes.by_chromosome())\n # Precalculate smoothing window size so all chromosomes have similar\n # degree of smoothness\n # NB: Target panel has ~1k bins/chrom. -> 250-bin window\n # Exome: ~10k bins/chrom. -> 2500-bin window\n window_size = int(round(.15 * len(probes) /\n probes.chromosome.nunique()))\n else:\n chrom_sizes = plots.chromosome_sizes(segments)\n # Same for segment calls\n chrom_segs = dict(segments.by_chromosome()) if segments else {}\n\n # Plot points & segments\n x_starts = plots.plot_x_dividers(axis, chrom_sizes)\n for chrom, x_offset in x_starts.items():\n if probes and chrom in chrom_probes:\n subprobes = chrom_probes[chrom]\n x = 0.5 * (subprobes['start'] + subprobes['end']) + x_offset\n axis.scatter(x, subprobes['log2'], marker='.',\n color=POINT_COLOR, edgecolor='none', alpha=0.2)\n if do_trend:\n # ENH break trendline by chromosome arm boundaries?\n axis.plot(x, subprobes.smooth_log2(), #window_size),\n color=POINT_COLOR, linewidth=2, zorder=-1)\n\n if chrom in chrom_segs:\n for seg in chrom_segs[chrom]:\n if has_subclones:\n is_subclone = 1 if seg.aberrant_cell_frac != clone_frac else 0\n color = choose_segment_color(seg, segment_color) if not is_subclone else SUBCLONE_COLOR\n print(chrom, seg.end-seg.start, color)\n axis.plot((seg.start + x_offset, seg.end + x_offset),\n (max(y_min+0.1, seg.log2), max(y_min+0.1, seg.log2)),\n color=color, linewidth=3, solid_capstyle='round')\n if is_subclone:\n cn_state = str(int(seg.cn1)) + \"+\" + str(int(seg.cn2))\n text = f\"{int(seg.aberrant_cell_frac*100)}% {cn_state}\"\n axis.text(seg.start+x_offset, max(y_min+0.1, seg.log2+0.01), text, fontsize=7)\n else:\n color = choose_segment_color(seg, segment_color)\n axis.plot((seg.start + x_offset, seg.end + x_offset),\n (max(y_min+0.1, seg.log2), max(y_min+0.1, seg.log2)),\n color=color, linewidth=3, solid_capstyle='round')\n return axis\n\n\ndef snv_on_genome(axis, variants, chrom_sizes, segments, do_trend, segment_color):\n \"\"\"Plot a scatter-plot of SNP chromosomal positions and shifts.\"\"\"\n axis.set_ylim(0.0, 1.0)\n axis.set_ylabel(\"VAF\")\n x_starts = plots.plot_x_dividers(axis, chrom_sizes)\n\n # Calculate the coordinates of plot components\n chrom_snvs = dict(variants.by_chromosome())\n if segments:\n chrom_segs = dict(segments.by_chromosome())\n elif do_trend:\n # Pretend a single segment covers each chromosome\n chrom_segs = {chrom: None for chrom in chrom_snvs}\n else:\n chrom_segs = {}\n\n for chrom, x_offset in x_starts.items():\n if chrom not in chrom_snvs:\n continue\n\n snvs = chrom_snvs[chrom]\n # Plot the points\n axis.scatter(snvs['start'].values + x_offset,\n snvs['alt_freq'].values,\n color=POINT_COLOR, edgecolor='none',\n alpha=0.2, marker='.')\n # Trend bars: always calculated, only shown on request\n if chrom in chrom_segs:\n # Draw average VAF within each segment\n segs = chrom_segs[chrom]\n for seg, v_freq in get_segment_vafs(snvs, segs):\n if seg:\n posn = [seg.start + x_offset, seg.end + x_offset]\n color = choose_segment_color(seg, segment_color,\n default_bright=False)\n else:\n posn = [snvs.start.iat[0] + x_offset,\n snvs.start.iat[-1] + x_offset]\n color = TREND_COLOR\n axis.plot(posn, [v_freq, v_freq],\n color=color, linewidth=2, zorder=-1,\n solid_capstyle='round')\n return axis\n\n# === Chromosome-level scatter plots ===\n\ndef chromosome_scatter(cnarr, segments, variants, show_range, show_gene,\n antitarget_marker, do_trend, by_bin, window_width,\n y_min, y_max, title, segment_color):\n \"\"\"Plot a specified region on one chromosome.\n\n Possibilities::\n\n Options | Shown\n ------------ | --------\n -c | -g | Genes | Region\n ------- | -- | ----- | ------\n - | + | given | auto: gene(s) + margin\n chr | - | none | whole chrom\n chr | + | given | whole chrom\n chr:s-e | - | all | given\n chr:s-e | + | given | given\n\n \"\"\"\n sel_probes, sel_segs, sel_snvs, window_coords, genes, chrom = \\\n select_range_genes(cnarr, segments, variants, show_range,\n show_gene, window_width)\n # Create plots\n if cnarr or segments:\n # Plot CNVs at chromosome level\n if variants:\n # Lay out top 3/5 for the CN scatter, bottom 2/5 for SNP plot\n axgrid = pyplot.GridSpec(5, 1, hspace=.5)\n axis = pyplot.subplot(axgrid[:3])\n axis2 = pyplot.subplot(axgrid[3:], sharex=axis)\n # Plot allele freqs for only the selected region\n snv_on_chromosome(axis2, sel_snvs, sel_segs, genes, do_trend,\n by_bin, segment_color)\n else:\n _fig, axis = pyplot.subplots()\n if by_bin:\n axis.set_xlabel(\"Position (bin)\")\n else:\n axis.set_xlabel(\"Position (Mb)\")\n axis = cnv_on_chromosome(axis, sel_probes, sel_segs, genes,\n antitarget_marker=antitarget_marker,\n do_trend=do_trend, x_limits=window_coords,\n y_min=y_min, y_max=y_max, segment_color=segment_color, variants=variants)\n elif variants:\n # Only plot SNVs in a single-panel layout\n _fig, axis = pyplot.subplots()\n axis = snv_on_chromosome(axis, sel_snvs, sel_segs, genes, do_trend,\n by_bin, segment_color)\n\n if title is None:\n title = \"%s %s\" % ((cnarr or segments or variants).sample_id, chrom)\n axis.set_title(title)\n return axis.get_figure()\n\ndef select_range_genes(cnarr, segments, variants, show_range, show_gene,\n window_width):\n \"\"\"Determine which datapoints to show based on the given options.\n\n Behaviors::\n\n start/end show_gene\n + + given region + genes; err if any gene outside it\n - + window +/- around genes\n + - given region, highlighting any genes within it\n - - whole chromosome, no genes\n\n If `show_range` is a chromosome name only, no start/end positions, then the\n whole chromosome will be shown.\n\n If region start/end coordinates are given and `show_gene` is '' or ',' (or\n all commas, etc.), then instead of highlighting all genes in the selection,\n no genes will be highlighted.\n \"\"\"\n chrom, start, end = unpack_range(show_range)\n if start is None and end is None:\n # Either the specified range is only chrom, no start-end, or gene names\n # were given\n window_coords = ()\n else:\n # Viewing region coordinates were specified -- take them as given\n # Fill in open-ended ranges' endpoints\n if start is None:\n start = 0\n elif start < 0:\n start = 0\n if not end:\n # Default selection endpoint to the maximum chromosome position\n end = (cnarr or segments or variants\n ).filter(chromosome=chrom).end.iat[-1]\n if end <= start:\n raise ValueError(\"Coordinate range {}:{}-{} (from {}) has size <= 0\"\n .format(chrom, start, end, show_range))\n window_coords = (start, end)\n\n gene_ranges = []\n if show_gene is None:\n if window_coords:\n if cnarr:\n # Highlight all genes within the given range\n gene_ranges = plots.gene_coords_by_range(cnarr, chrom, start, end)[chrom]\n if not gene_ranges and (end - start) < 10 * window_width:\n # No genes in the selected region, so if the selection is small\n # (i.e. <80% of the displayed window, <10x window padding),\n # highlight the selected region itself.\n # (To prevent this, use show_gene='' or window_width=0)\n logging.info(\"No genes found in selection; will highlight the \"\n \"selected region itself instead\")\n gene_ranges = [(start, end, \"Selection\")]\n window_coords = (max(0, start - window_width),\n end + window_width)\n\n else:\n gene_names = filter(None, show_gene.split(','))\n if gene_names:\n # Scan for probes matching the specified gene(s)\n gene_coords = plots.gene_coords_by_name(cnarr or segments,\n gene_names)\n if len(gene_coords) > 1:\n raise ValueError(\"Genes %s are split across chromosomes %s\"\n % (show_gene, list(gene_coords.keys())))\n g_chrom, gene_ranges = gene_coords.popitem()\n if chrom:\n # Confirm that the selected chromosomes match\n core.assert_equal(\"Chromosome also selected by region (-c) \"\n \"does not match\",\n **{\"chromosome\": chrom,\n \"gene(s)\": g_chrom})\n else:\n chrom = g_chrom\n\n gene_ranges.sort()\n if window_coords:\n # Verify all genes fit in the given window\n for gene_start, gene_end, gene_name in gene_ranges:\n if not (start <= gene_start and gene_end <= end):\n raise ValueError(\"Selected gene %s (%s:%d-%d) \"\n \"is outside specified region %s\"\n % (gene_name, chrom, gene_start,\n gene_end, show_range))\n elif not show_range:\n # Set the display window to the selected genes +/- a margin\n window_coords = (max(0, gene_ranges[0][0] - window_width),\n gene_ranges[-1][1] + window_width)\n\n # Prune plotted elements to the selected region\n sel_probes = (cnarr.in_range(chrom, *window_coords)\n if cnarr else CNA([]))\n sel_segs = (segments.in_range(chrom, *window_coords, mode='trim')\n if segments else CNA([]))\n sel_snvs = (variants.in_range(chrom, *window_coords)\n if variants else None)\n logging.info(\"Showing %d probes and %d selected genes in region %s\",\n len(sel_probes), len(gene_ranges),\n (chrom + \":%d-%d\" % window_coords if window_coords else chrom))\n\n return sel_probes, sel_segs, sel_snvs, window_coords, gene_ranges, chrom\n\n\ndef cnv_on_chromosome(axis, probes, segments, genes, antitarget_marker=None,\n do_trend=False, x_limits=None, y_min=None, y_max=None,\n segment_color=SEG_COLOR, variants=None):\n \"\"\"Draw a scatter plot of probe values with optional segments overlaid.\n\n Parameters\n ----------\n genes : list\n Of tuples: (start, end, gene name)\n \"\"\"\n # TODO - allow plotting just segments without probes\n # Get scatter plot coordinates\n x = 0.5 * (probes['start'] + probes['end']) * MB # bin midpoints\n y = probes['log2']\n if 'weight' in probes:\n w = 46 * probes['weight'] ** 2 + 2\n else:\n w = np.repeat(30, len(x))\n\n # Configure axes\n if not y_min:\n y_min = max(-5.0, min(y.min() - .1, -.3)) if len(y) else -1.1\n if not y_max:\n y_max = max(.3, y.max() + (.25 if genes else .1)) if len(y) else 1.1\n if x_limits:\n x_min, x_max = x_limits\n axis.set_xlim(x_min * MB, x_max * MB)\n else:\n set_xlim_from(axis, probes, segments, variants=variants)\n setup_chromosome(axis, y_min, y_max, \"Copy ratio (log2)\")\n if genes:\n highlight_genes(axis, genes,\n min(2.4, y.max() + .1) if len(y) else .1)\n\n if antitarget_marker in (None, 'o'):\n # Plot targets and antitargets with the same marker\n axis.scatter(x, y, w, color=POINT_COLOR, alpha=0.4, marker='o')\n else:\n # Use the given marker to plot antitargets separately\n x_fg = []\n y_fg = []\n w_fg = []\n x_bg = []\n y_bg = []\n # w_bg = []\n is_bg = probes['gene'].isin(params.ANTITARGET_ALIASES)\n for x_pt, y_pt, w_pt, is_bg_pt in zip(x, y, w, is_bg):\n if is_bg_pt:\n x_bg.append(x_pt)\n y_bg.append(y_pt)\n # w_bg.append(w_pt)\n else:\n x_fg.append(x_pt)\n y_fg.append(y_pt)\n w_fg.append(w_pt)\n axis.scatter(x_fg, y_fg, w_fg, color=POINT_COLOR, alpha=0.4, marker='o')\n axis.scatter(x_bg, y_bg, color=POINT_COLOR, alpha=0.5,\n marker=antitarget_marker)\n\n # Add a local trend line\n if do_trend:\n axis.plot(x, probes.smooth_log2(), #.1),\n color=POINT_COLOR, linewidth=2, zorder=-1)\n\n # Draw segments as horizontal lines\n if segments:\n for row in segments:\n color = choose_segment_color(row, segment_color)\n axis.plot((row.start * MB, row.end * MB),\n (row.log2, row.log2),\n color=color, linewidth=4, solid_capstyle='round')\n return axis\n\ndef snv_on_chromosome(axis, variants, segments, genes, do_trend, by_bin,\n segment_color):\n # TODO set x-limits if not already done for probes/segments\n # set_xlim_from(axis, None, segments, variants)\n # setup_chromosome(axis, 0.0, 1.0, \"VAF\")\n axis.set_ylim(0.0, 1.0)\n axis.set_ylabel(\"VAF\")\n if by_bin:\n axis.set_xlabel(\"Position (bin)\")\n else:\n axis.set_xlabel(\"Position (Mb)\")\n axis.get_yaxis().tick_left()\n axis.get_xaxis().tick_top()\n axis.tick_params(which='both', direction='out',\n labelbottom=False, labeltop=False)\n\n x_mb = variants['start'].values * MB\n y = variants['alt_freq'].values\n axis.scatter(x_mb, y, color=POINT_COLOR, alpha=0.3)\n if segments or do_trend:\n # Draw average VAF within each segment\n for seg, v_freq in get_segment_vafs(variants, segments):\n if seg:\n posn = [seg.start * MB, seg.end * MB]\n color = choose_segment_color(seg, segment_color,\n default_bright=False)\n else:\n posn = [variants.start.iat[0] * MB, variants.start.iat[-1] * MB]\n color = TREND_COLOR\n axis.plot(posn, [v_freq, v_freq],\n color=color, linewidth=2, zorder=1,\n solid_capstyle='round')\n\n if genes:\n highlight_genes(axis, genes, .9)\n return axis\n\ndef set_xlim_from(axis, probes=None, segments=None, variants=None):\n \"\"\"Configure axes for plotting a single chromosome's data.\n\n Parameters\n ----------\n probes : CopyNumArray\n segments : CopyNumArray\n variants : VariantArray\n All should already be subsetted to the region that will be plotted.\n \"\"\"\n min_x = np.inf\n max_x = 0\n for arr in (probes, segments, variants):\n if arr and len(arr):\n max_x = max(max_x, arr.end.iat[-1])\n min_x = min(min_x, arr.start.iat[0])\n if max_x <= min_x:\n if min_x != np.inf:\n logging.warning(\"WARNING: selection start %s > end %s; did you \"\n \"correctly sort the input file by genomic \"\n \"location?\", min_x, max_x)\n raise ValueError(\"No usable data points to plot out of \"\n \"%d probes, %d segments, %d variants\"\n % (len(probes) if probes else 0,\n len(segments) if segments else 0,\n len(variants) if variants else 0))\n axis.set_xlim(min_x * MB, max_x * MB)\n\n\ndef setup_chromosome(axis, y_min=None, y_max=None, y_label=None):\n \"\"\"Configure axes for plotting a single chromosome's data.\"\"\"\n if y_min and y_max:\n axis.set_ylim(y_min, y_max)\n if y_min < 0 < y_max:\n axis.axhline(color='k')\n if y_label:\n axis.set_ylabel(y_label)\n axis.tick_params(which='both', direction='out')\n axis.get_xaxis().tick_bottom()\n axis.get_yaxis().tick_left()\n\n\n# === Shared ===\n\ndef choose_segment_color(segment, highlight_color, default_bright=True):\n \"\"\"Choose a display color based on a segment's CNA status.\n\n Uses the fields added by the 'call' command. If these aren't present, use\n `highlight_color` for everything.\n\n For sex chromosomes, some single-copy deletions or gains might not be\n highlighted, since sample sex isn't used to infer the neutral ploidies.\n \"\"\"\n neutral_color = TREND_COLOR\n if 'cn' not in segment._fields:\n # No 'call' info\n return highlight_color if default_bright else neutral_color\n\n # Detect copy number alteration\n expected_ploidies = {'chrY': (0, 1), 'Y': (0, 1),\n 'chrX': (1, 2), 'X': (1, 2)}\n if segment.cn not in expected_ploidies.get(segment.chromosome, [2]):\n return highlight_color\n\n # Detect CNLOH\n if (segment.chromosome not in expected_ploidies and\n 'cn1' in segment._fields and 'cn2' in segment._fields and\n (segment.cn1 == 2) and (segment.cn2 == 0)):\n return CNLOH_COLOR\n \n # Detect regions of allelic imbalance / LOH\n if (segment.chromosome not in expected_ploidies and\n 'cn1' in segment._fields and 'cn2' in segment._fields and\n (segment.cn1 != segment.cn2)):\n return highlight_color\n\n return neutral_color\n\n\ndef get_segment_vafs(variants, segments):\n \"\"\"Group SNP allele frequencies by segment.\n\n Assume variants and segments were already subset to one chromosome.\n\n Yields\n ------\n tuple\n (segment, value)\n \"\"\"\n if segments:\n chunks = variants.by_ranges(segments)\n else:\n # Fake segments cover the whole region\n chunks = [(None, variants)]\n for seg, seg_snvs in chunks:\n # ENH: seg_snvs.tumor_boost()\n freqs = seg_snvs['alt_freq'].values\n # Separately emit VAFs above and below .5 for plotting\n idx_above_mid = (freqs > 0.5)\n for idx_vaf in (idx_above_mid, ~idx_above_mid):\n if sum(idx_vaf) > 1:\n yield (seg, np.median(freqs[idx_vaf]))\n\n\ndef highlight_genes(axis, genes, y_posn):\n \"\"\"Show gene regions with background color and a text label.\"\"\"\n # Rotate text in proportion to gene density\n ngenes = len(genes)\n text_size = ('small' if ngenes <= 6 else 'x-small')\n if ngenes <= 3:\n text_rot = 'horizontal'\n elif ngenes <= 6:\n text_rot = 30\n elif ngenes <= 10:\n text_rot = 45\n elif ngenes <= 20:\n text_rot = 60\n else:\n text_rot = 'vertical'\n for gene in genes:\n gene_start, gene_end, gene_name = gene\n # Highlight and label gene region\n # (rescale positions from bases to megabases)\n axis.axvspan(gene_start * MB, gene_end * MB,\n alpha=0.5, color=HIGHLIGHT_COLOR, zorder=-1)\n axis.text(0.5 * (gene_start + gene_end) * MB,\n y_posn,\n gene_name,\n horizontalalignment='center',\n rotation=text_rot,\n size=text_size)\n"
] | [
[
"matplotlib.patches.Patch",
"numpy.median",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.GridSpec"
]
] |
Treadco/cannon_bowel | [
"8750bde0dc3c65b4a3e95fc8071f06dd496eade3"
] | [
"fuzzy.py"
] | [
"#!/usr/bin/python\n# this defaults to python 2 on my machine\n# (c) 2017 Treadco software.\n#\n# python version of the fuzzy rbm\n# supports the non-fuzzy version.\n#\n# \nlicense =''' \nCopyright (c) 2017 Treadco LLC, Amelia Treader, Robert W Harrison\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport numpy as np\nimport sys,os\n\n\n\n#\n# use pickle.dump(instance,file)\n# and pickle.load(file)\n#\n# to save and restore data. file is a Python FILE object so \n# it's opened first.\n#\n#\n\n\n#\n# fuzzy uses an ndarray so the ndarray primitives are available\n# we'll wrap some of them into a standard interface, but you can\n# roll your own if need be.\n#\n\n\nclass fuzzy:\n def __init__(me, the_min,the_max,the_number_of_divisions):\n me.my_min = the_min\n me.my_max = the_max\n me.delta = (the_max-the_min)/the_number_of_divisions\n me.nd = the_number_of_divisions\n me.counts = np.float32(np.zeros(the_number_of_divisions))\n ddi = the_number_of_divisions/2\n me.args = []\n for i in range(0, the_number_of_divisions):\n me.args.append((i-ddi)*me.delta)\n\n def initialize_counts(me):\n me.counts.__imul__(0.)\n\n def add(me, what):\n i = int(( what - me.my_min)/me.delta +0.5)\n# print(what,i)\n# insert rangechecking here.\n if i >= me.nd:\n i = me.nd -1\n if i < 0:\n i = 0\n me.counts[i] += 1.\n# print(what,me.counts[i],i)\n# sys.stdout.flush()\n\n\n def expected_value(me):\n ds = me.counts.sum()\n if ds == 0.:\n ds = 1.\n dsum = np.dot( me.args, me.counts)\n return dsum/ds \n# print me.counts\n# return (me.my_min + me.my_max)*0.5\n# dsum = 0.\n# ddi = len(me.counts)/2\n# for i in range(0, len(me.counts)):\n# dx = (i-ddi) *me.delta\n# dsum += me.counts[i]*dx\n# return dsum/ds \n# use numpy you dumb fsck\n# im = np.argmax(me.counts)\n# return float(im)*me.delta + me.my_min\n \n def belief(me):\n ds = me.counts.sum()\n if ds == 0.:\n return (me.my_min + me.my_max)*0.5,0.\n# use numpy you dumb fsck\n im = float(np.argmax(me.counts))\n return (im*me.delta + me.my_min),im/ds\n \n def damp(me, avalue):\n ds = me.counts.sum()\n if ds == 0.:\n return 1.\n im = int((avalue - me.my_min)/(me.my_max-me.my_min)+0.5)\n if im >= me.nd:\n im = me.nd -1\n if im < 0:\n im = 0\n i = np.argmax(me.counts)\n if abs(i-im) < 2:\n return 1.\n return -0.1 \n\ndef main():\n print(\"this is the main routine, defined for testing purposes\")\n\n simon = fuzzy(-1.,1., 10)\n simon.add(0.)\n simon.add(0.1)\n simon.add(0.2)\n simon.add(0.3)\n print( simon.counts)\n print( simon.expected_value()) \n simon.initialize_counts()\n print( simon.counts)\n simon.add(0.)\n simon.add(0.1)\n simon.add(0.2)\n print( simon.counts)\n print( simon.expected_value()) \n \n\n#main()\n"
] | [
[
"numpy.dot",
"numpy.argmax",
"numpy.zeros"
]
] |
AhmedFakhry47/You-Only-Look-Faster--Object-Detection-Deep-Learning-Model- | [
"f6435aec492f82f2cea11ca569326ce715efef38"
] | [
"Yolf.py"
] | [
"from __future__ import division\nimport yolfnets as nets\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom IPython.display import clear_output\nimport random\nimport cv2\nfrom copy import copy, deepcopy\nfrom pathlib import Path\nimport os\nimport time \nfrom datetime import timedelta\nfrom tqdm import tqdm\n#import zipfile\nimport tarfile\nimport shutil\nimport wget\nimport sys\nimport voc\nfrom Yolf_utils import *\n\n\nvoc_dir = '/home/alex054u4/data/nutshell/newdata/VOCdevkit/VOC%d'\n\n# Define the model hyper parameters\nN_classes=20\nx = tf.placeholder(tf.float32, shape=(None, 416, 416, 3), name='input_x')\nyolo=model(x, lmbda=0, dropout_rate=0)\n# Define an optimizer\nepoch = tf.Variable(0,trainable=False,name=\"Epoch\")\n\nlr = tf.Variable(1e-3,trainable=False,dtype=tf.float64)\nlr_sch = tf.math.multiply(lr,tf.math.pow(tf.cast(0.5,tf.float64),tf.math.divide(epoch,10)))\ntrain = tf.train.AdamOptimizer(lr, 0.9).minimize(yolo.loss)\n\n\n#Check points for step training_trial_step\ncheckpoint_path = \"/home/alex054u4/data/nutshell/training_trial_YOLF_GOLD\"\ncheckpoint_prefix = os.path.join(checkpoint_path,\"ckpt\")\nif not os.path.exists(checkpoint_path):\n os.mkdir(checkpoint_path)\n\n\ninit_op = tf.global_variables_initializer()\ntrain_saver = tf.train.Saver(max_to_keep=2)\n\ndef evaluate_accuracy(data_type='tr'):\n if (data_type == 'tr'): acc_data = voc.load(voc_dir % 2007,'trainval',total_num =48)\n elif(data_type == 'te') : acc_data = voc.load(voc_dir % 2007, 'test', total_num=48)\n\n #print('Train Accuracy: ',voc.evaluate(boxes, voc_dir % 2007, 'trainval'))\n results = []\n for i,(img,_) in enumerate(acc_data):\n acc_outs = sess.run(yolo, {x: yolo.preprocess(img),is_training: False})\n boxes=yolo.get_boxes(acc_outs, img.shape[1:3])\n results.append(boxes)\n if (data_type =='tr'):return voc.evaluate(results, voc_dir % 2007, 'trainval')\n elif (data_type=='te'):return voc.evaluate(results, voc_dir % 2007, 'test')\n\n\nwith tf.Session() as sess:\n ckpt_files = [f for f in os.listdir(checkpoint_path) if os.path.isfile(os.path.join(checkpoint_path, f)) and 'ckpt' in f]\n if (len(ckpt_files)!=0):\n train_saver.restore(sess,checkpoint_prefix)\n else:\n sess.run(init_op)\n sess.run(yolo.stem.pretrained())\n\n\n losses = 0.0\n av_loss = 0.0\n best_acc = 0.0\n best_epoch = 0\n for i in tqdm(range(epoch.eval(),233)): \n trains = voc.load_train([voc_dir % 2007, voc_dir % 2012],'trainval', batch_size=48)\n\n for j,(imgs, metas) in enumerate(trains):\n # `trains` returns None when it covers the full batch once\n if imgs is None: break\n metas.insert(0, yolo.preprocess(imgs)) # for `inputs`\n metas.append(True) # for `is_training`\n outs= sess.run([train, yolo.loss],dict(zip(yolo.inputs, metas)))\n losses+=outs[-1]\n\n\n av_loss = 0.9*av_loss + 0.1*(losses/j) #Moving average for loss \n\n\n if(math.isnan(av_loss)):\n print(\"NN output: \\n\", yolo)\n print('\\n======================================================================================\\n')\n print(tf.trainable_variables())\n\n print('\\nepoch:',step.eval(),'lr: ',lr.eval(),'loss:',av_loss)\n\n tracc_str,_ = evaluate_accuracy('tr')\n teacc_str,teacc = evaluate_accuracy('te')\n print ('\\n') \n\n if(i%10 == 0):\n if (teacc > best_acc):\n best_acc= acc\n sess.run(epoch.assign(i))\n sess.run(lr.assign(lr_sch))\n train_saver.save(sess,checkpoint_prefix)\n else:\n sess.run(lr.assign(1e-4))\n\n print ('highest training accuacy:', acc_best, 'at epoch:', best_epoch, '\\n')\n print ('=================================================================================================================================================================================')"
] | [
[
"tensorflow.Variable",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.trainable_variables",
"tensorflow.global_variables_initializer",
"tensorflow.math.divide",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Saver"
]
] |
Waztom/fragalysis-backend | [
"1d7775740bc6d4cce3a846064fd57bb0fcdb8269"
] | [
"car/api.py"
] | [
"from rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom django.http import JsonResponse\nfrom django.core.files.base import ContentFile\nfrom django.conf import settings\nimport os\nimport json\nfrom celery.result import AsyncResult\nfrom viewer.tasks import check_services\nimport pandas as pd\n\nfrom car.tasks import (\n validateFileUpload,\n uploadManifoldReaction,\n uploadCustomReaction,\n createOTScript,\n canonicalizeSmiles,\n updateReactionSuccess,\n)\n\n# Import standard models\nfrom .models import (\n Project,\n MculeQuote,\n Batch,\n PubChemInfo,\n Target,\n Method,\n Reaction,\n Reactant,\n CatalogEntry,\n Product,\n AnalyseAction,\n)\n\n# Import action models\nfrom .models import (\n AddAction,\n ExtractAction,\n FilterAction,\n QuenchAction,\n SetTemperatureAction,\n StirAction,\n)\n\n# Import OT Session models\nfrom .models import (\n OTSession,\n Deck,\n Pipette,\n TipRack,\n Plate,\n Well,\n OTProtocol,\n OTBatchProtocol,\n CompoundOrder,\n OTScript,\n)\n\n# Import standard serializers\nfrom .serializers import (\n OTProtocolSerializer,\n OTBatchProtocolSerializer,\n ProjectSerializer,\n ProjectSerializerAll,\n MculeQuoteSerializer,\n BatchSerializer,\n BatchSerializerAll,\n TargetSerializer,\n TargetSerializerAll,\n MethodSerializer,\n MethodSerializerAll,\n ReactionSerializer,\n ReactionSerializerAll,\n PubChemInfoSerializer,\n ProductSerializer,\n ProductSerializerAll,\n ReactantSerializer,\n ReactantSerializerAll,\n CatalogEntrySerializer,\n)\n\n# Import action serializers\nfrom .serializers import (\n AnalyseActionSerializer,\n AddActionSerializer,\n ExtractActionSerializer,\n FilterActionSerializer,\n QuenchActionSerializer,\n SetTemperatureActionSerializer,\n StirActionSerializer,\n)\n\n# Import OT Session serializers\nfrom .serializers import (\n OTBatchProtocolSerializer,\n OTSessionSerializer,\n DeckSerializer,\n PipetteSerializer,\n TipRackSerializer,\n PlateSerializer,\n WellSerializer,\n CompoundOrderSerializer,\n OTScriptSerializer,\n)\n\nfrom rdkit import Chem\nfrom rdkit.Chem import Descriptors\nfrom django.core.files.storage import default_storage\n\nfrom .utils import createSVGString\n\n\ndef duplicatetarget(target_obj: Target, fk_obj: Batch):\n related_catalogentry_queryset = target_obj.catalogentries.all()\n\n target_obj.image = ContentFile(target_obj.image.read(), name=target_obj.image.name)\n target_obj.pk = None\n target_obj.batch_id = fk_obj\n target_obj.save()\n\n for catalogentry_obj in related_catalogentry_queryset:\n catalogentry_obj.pk = None\n catalogentry_obj.target_id = target_obj\n catalogentry_obj.save()\n\n return target_obj\n\n\ndef duplicatemethod(method_obj: Method, fk_obj: Target):\n related_reaction_queryset = method_obj.reactions.all()\n\n # Duplicate method before cloning the related children reaction objs\n method_obj.pk = None\n method_obj.target_id = fk_obj\n method_obj.save()\n\n for reaction_obj in related_reaction_queryset:\n product_obj = reaction_obj.products.all()[0]\n related_addaction_objs = reaction_obj.addactions.all()\n related_stiraction_objs = reaction_obj.stiractions.all()\n related_analyseaction_objs = reaction_obj.analyseactions.all()\n related_reactant_objs = reaction_obj.reactants.all()\n\n reaction_obj.reactionimage = ContentFile(\n reaction_obj.reactionimage.read(), name=reaction_obj.reactionimage.name\n )\n reaction_obj.pk = None\n reaction_obj.method_id = method_obj\n reaction_obj.save()\n\n product_obj.image = ContentFile(\n product_obj.image.read(), name=product_obj.image.name\n )\n product_obj.pk = None\n product_obj.reaction_id = reaction_obj\n product_obj.save()\n\n for addaction_obj in related_addaction_objs:\n addaction_obj.pk = None\n addaction_obj.reaction_id = reaction_obj\n addaction_obj.save()\n\n for stiraction_obj in related_stiraction_objs:\n stiraction_obj.pk = None\n stiraction_obj.reaction_id = reaction_obj\n stiraction_obj.save()\n\n for analyseaction_obj in related_analyseaction_objs:\n analyseaction_obj.pk = None\n analyseaction_obj.reaction_id = reaction_obj\n analyseaction_obj.save()\n\n for reactant_obj in related_reactant_objs:\n related_catalogentry_objs = reactant_obj.catalogentries.all()\n reactant_obj.pk = None\n reactant_obj.reaction_id = reaction_obj\n reactant_obj.save()\n for catalog_obj in related_catalogentry_objs:\n catalog_obj.pk = None\n catalog_obj.reactant_id = reactant_obj\n catalog_obj.save()\n\n\ndef save_tmp_file(myfile):\n name = myfile.name\n path = default_storage.save(\"tmp/\" + name, ContentFile(myfile.read()))\n tmp_file = str(os.path.join(settings.MEDIA_ROOT, path))\n return tmp_file\n\n\nclass ProjectViewSet(viewsets.ModelViewSet):\n queryset = Project.objects.all()\n\n def get_serializer_class(self):\n fetchall = self.request.GET.get(\"fetchall\", None)\n return ProjectSerializerAll if fetchall == \"yes\" else ProjectSerializer\n\n @action(methods=[\"post\"], detail=False)\n def createproject(self, request, pk=None):\n check_services()\n project_info = {}\n project_info[\"projectname\"] = request.data[\"project_name\"]\n project_info[\"submittername\"] = request.data[\"submitter_name\"]\n project_info[\"submitterorganisation\"] = request.data[\"submitter_organisation\"]\n project_info[\"proteintarget\"] = request.data[\"protein_target\"]\n validate_choice = request.data[\"validate_choice\"]\n API_choice = request.data[\"API_choice\"]\n\n csvfile = request.FILES[\"csv_file\"]\n tmp_file = save_tmp_file(csvfile)\n\n if str(validate_choice) == \"0\":\n\n if str(API_choice) == \"1\":\n task = validateFileUpload.delay(\n csv_fp=tmp_file, validate_type=\"custom-chem\"\n )\n\n if str(API_choice) == \"2\":\n task = validateFileUpload.delay(\n csv_fp=tmp_file, validate_type=\"combi-custom-chem\"\n )\n\n else:\n task = validateFileUpload.delay(\n csv_fp=tmp_file, validate_type=\"retro-API\"\n )\n\n if str(validate_choice) == \"1\":\n if str(API_choice) == \"0\":\n task = (\n validateFileUpload.s(\n csv_fp=tmp_file,\n validate_type=\"retro-API\",\n project_info=project_info,\n validate_only=False,\n )\n | uploadManifoldReaction.s()\n ).apply_async()\n\n if str(API_choice) == \"1\":\n task = (\n validateFileUpload.s(\n csv_fp=tmp_file,\n validate_type=\"custom-chem\",\n project_info=project_info,\n validate_only=False,\n )\n | uploadCustomReaction.s()\n ).apply_async()\n\n if str(API_choice) == \"2\":\n task = (\n validateFileUpload.s(\n csv_fp=tmp_file,\n validate_type=\"combi-custom-chem\",\n project_info=project_info,\n validate_only=False,\n )\n | uploadCustomReaction.s()\n ).apply_async()\n\n data = {\"task_id\": task.id}\n return JsonResponse(data=data)\n\n @action(detail=False, methods=[\"get\"])\n def gettaskstatus(self, request, pk=None):\n task_id = self.request.GET.get(\"task_id\", None)\n if task_id:\n task = AsyncResult(task_id)\n if task.status == \"FAILURE\":\n data = {\"task_status\": task.status, \"traceback\": str(task.traceback)}\n return JsonResponse(data)\n\n if task.status == \"SUCCESS\":\n results = task.get()\n validate_dict = results[0]\n validated = results[1]\n project_info = results[2]\n\n if not project_info:\n if validated:\n data = {\"task_status\": task.status, \"validated\": True}\n return JsonResponse(data)\n\n if not validated:\n errorsummary = json.dumps(validate_dict)\n data = {\n \"task_status\": task.status,\n \"validated\": False,\n \"validation_errors\": errorsummary,\n }\n return JsonResponse(data)\n\n if project_info:\n project_id = project_info[\"project_id\"]\n\n if validated:\n data = {\n \"task_status\": task.status,\n \"validated\": True,\n \"project_id\": project_id,\n }\n return JsonResponse(data)\n\n if not validated:\n errorsummary = json.dumps(validate_dict)\n data = {\n \"task_status\": task.status,\n \"validated\": False,\n \"validation_errors\": errorsummary,\n }\n\n return JsonResponse(data)\n\n if task.status == \"PENDING\":\n data = {\"task_status\": task.status}\n return JsonResponse(data)\n\n\nclass MculeQuoteViewSet(viewsets.ModelViewSet):\n queryset = MculeQuote.objects.all()\n serializer_class = MculeQuoteSerializer\n\n\nclass BatchViewSet(viewsets.ModelViewSet):\n queryset = Batch.objects.all()\n filterset_fields = [\"project_id\"]\n\n def get_serializer_class(self):\n fetchall = self.request.GET.get(\"fetchall\", None)\n return BatchSerializerAll if fetchall == \"yes\" else BatchSerializer\n\n def createBatch(self, project_obj, batch_node_obj, batch_tag):\n batch_obj = Batch()\n batch_obj.project_id = project_obj\n batch_obj.batch_id = batch_node_obj\n batch_obj.batch_tag = batch_tag\n batch_obj.save()\n return batch_obj\n\n def create(self, request, **kwargs):\n method_ids = request.data[\"methodids\"]\n batch_tag = request.data[\"batchtag\"]\n try:\n target_query_set = Target.objects.filter(\n methods__id__in=method_ids\n ).distinct()\n batch_obj = target_query_set[0].batch_id\n project_obj = batch_obj.project_id\n batch_obj_new = self.createBatch(\n project_obj=project_obj, batch_node_obj=batch_obj, batch_tag=batch_tag\n )\n for target_obj in target_query_set:\n method_query_set_to_clone = Method.objects.filter(\n target_id=target_obj\n ).filter(pk__in=method_ids)\n target_obj_clone = duplicatetarget(\n target_obj=target_obj, fk_obj=batch_obj_new\n )\n for method_obj in method_query_set_to_clone:\n duplicatemethod(method_obj=method_obj, fk_obj=target_obj_clone)\n serialized_data = BatchSerializer(batch_obj_new).data\n if serialized_data:\n return JsonResponse(data=serialized_data)\n else:\n return JsonResponse(data=\"Something went wrong\")\n except:\n return JsonResponse(data=\"Something went wrong\")\n\n @action(methods=[\"post\"], detail=False)\n def canonicalizesmiles(self, request, pk=None):\n check_services()\n if request.POST.get(\"smiles\"):\n smiles = request.POST.getlist(\"smiles\")\n task = canonicalizeSmiles.delay(smiles=smiles)\n data = {\"task_id\": task.id}\n return JsonResponse(data=data)\n if len(request.FILES) != 0:\n csvfile = request.FILES[\"csv_file\"]\n tmp_file = save_tmp_file(csvfile)\n task = canonicalizeSmiles.delay(csvfile=tmp_file)\n data = {\"task_id\": task.id}\n return JsonResponse(data=data)\n\n @action(detail=False, methods=[\"get\"])\n def gettaskstatus(self, request, pk=None):\n task_id = self.request.GET.get(\"task_id\", None)\n if task_id:\n task = AsyncResult(task_id)\n if task.status == \"FAILURE\":\n data = {\"task_status\": task.status, \"traceback\": str(task.traceback)}\n return JsonResponse(data)\n\n if task.status == \"SUCCESS\":\n result = task.get()\n validated = result[0]\n\n if validated:\n canonicalizedsmiles = result[1]\n data = {\n \"task_status\": task.status,\n \"canonicalizedsmiles\": canonicalizedsmiles,\n }\n return JsonResponse(data)\n if not validated:\n error_summary = result[1]\n data = {\"task_status\": task.status, \"error_summary\": error_summary}\n return JsonResponse(data)\n\n if task.status == \"PENDING\":\n data = {\"task_status\": task.status}\n return JsonResponse(data)\n\n @action(methods=[\"post\"], detail=False)\n def updatereactionsuccess(self, request, pk=None):\n if request.POST.get(\"reaction_ids\"):\n reaction_ids = request.POST.getlist(\"reaction_ids\")\n if len(request.FILES) != 0:\n csvfile = request.FILES[\"csv_file\"]\n reaction_ids = pd.read_csv(csvfile)[\"reaction_id\"]\n if Reaction.objects.filter(id__in=reaction_ids).exists():\n Reaction.objects.filter(id__in=reaction_ids).update(success=False)\n data = {\"reaction_ids\": reaction_ids}\n else:\n data = {\"reaction_ids\": None}\n return JsonResponse(data=data)\n\n\nclass TargetViewSet(viewsets.ModelViewSet):\n queryset = Target.objects.all()\n filterset_fields = [\"batch_id\"]\n\n def get_serializer_class(self):\n fetchall = self.request.GET.get(\"fetchall\", None)\n return TargetSerializerAll if fetchall == \"yes\" else TargetSerializer\n\n\nclass MethodViewSet(viewsets.ModelViewSet):\n queryset = Method.objects.all()\n filterset_fields = [\"target_id\", \"nosteps\"]\n\n def get_serializer_class(self):\n fetchall = self.request.GET.get(\"fetchall\", None)\n return MethodSerializerAll if fetchall == \"yes\" else MethodSerializer\n\n\nclass ReactionViewSet(viewsets.ModelViewSet):\n queryset = Reaction.objects.all()\n filterset_fields = {\"method_id\": [\"exact\"], \"successrate\": [\"gte\", \"lte\"]}\n\n def get_serializer_class(self):\n fetchall = self.request.GET.get(\"fetchall\", None)\n return ReactionSerializerAll if fetchall == \"yes\" else ReactionSerializer\n\n\nclass PubChemInfoViewSet(viewsets.ModelViewSet):\n queryset = PubChemInfo.objects.all()\n serializer_class = PubChemInfoSerializer\n\n\nclass ProductViewSet(viewsets.ModelViewSet):\n queryset = Product.objects.all()\n serializer_class = ProductSerializer\n filterset_fields = [\"reaction_id\"]\n\n def get_serializer_class(self):\n fetchall = self.request.GET.get(\"fetchall\", None)\n return ProductSerializerAll if fetchall == \"yes\" else ProductSerializer\n\n\nclass ReactantViewSet(viewsets.ModelViewSet):\n queryset = Reactant.objects.all()\n filterset_fields = [\"reaction_id\"]\n\n def get_serializer_class(self):\n fetchall = self.request.GET.get(\"fetchall\", None)\n return ReactantSerializerAll if fetchall == \"yes\" else ReactantSerializer\n\n\nclass CatalogEntryViewSet(viewsets.ModelViewSet):\n queryset = CatalogEntry.objects.all()\n serializer_class = CatalogEntrySerializer\n\n\n# Action viewsets\nclass AnalyseActionViewSet(viewsets.ModelViewSet):\n queryset = AnalyseAction.objects.all()\n serializer_class = AnalyseActionSerializer\n filterset_fields = [\"reaction_id\"]\n\n\nclass AddActionViewSet(viewsets.ModelViewSet):\n queryset = AddAction.objects.all()\n serializer_class = AddActionSerializer\n filterset_fields = [\"reaction_id\"]\n\n def get_patch_object(self, pk):\n return AddAction.objects.get(pk=pk)\n\n def partial_update(self, request, pk):\n addaction = self.get_patch_object(pk)\n\n if \"materialsmiles\" in request.data:\n materialsmiles = request.data[\"materialsmiles\"]\n mol = Chem.MolFromSmiles(materialsmiles)\n molecular_weight = Descriptors.ExactMolWt(mol)\n add_svg_string = createSVGString(materialsmiles)\n # Delete previous image\n svg_fp = addaction.materialimage.path\n default_storage.delete(svg_fp)\n # Add new image\n add_svg_fn = default_storage.save(\n \"addactionimages/{}.svg\".format(materialsmiles),\n ContentFile(add_svg_string),\n )\n addaction.materialsmiles = materialsmiles\n addaction.molecularweight = molecular_weight\n addaction.materialimage = add_svg_fn\n addaction.save()\n serialized_data = AddActionSerializer(addaction).data\n if serialized_data:\n return JsonResponse(data=serialized_data)\n else:\n return JsonResponse(data=\"wrong parameters\")\n else:\n serializer = AddActionSerializer(addaction, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(data=serializer.data)\n else:\n return JsonResponse(data=\"wrong parameters\")\n\n\nclass ExtractActionViewSet(viewsets.ModelViewSet):\n queryset = ExtractAction.objects.all()\n serializer_class = ExtractActionSerializer\n filterset_fields = [\"reaction_id\"]\n\n\nclass FilterActionViewSet(viewsets.ModelViewSet):\n queryset = FilterAction.objects.all()\n serializer_class = FilterActionSerializer\n filterset_fields = [\"reaction_id\"]\n\n\nclass QuenchActionViewSet(viewsets.ModelViewSet):\n queryset = QuenchAction.objects.all()\n serializer_class = QuenchActionSerializer\n filterset_fields = [\"reaction_id\"]\n\n\nclass SetTemperatureActionViewSet(viewsets.ModelViewSet):\n queryset = SetTemperatureAction.objects.all()\n serializer_class = SetTemperatureActionSerializer\n filterset_fields = [\"reaction_idd\"]\n\n\nclass StirActionViewSet(viewsets.ModelViewSet):\n queryset = StirAction.objects.all()\n serializer_class = StirActionSerializer\n filterset_fields = [\"reaction_id\"]\n\n\n# OT Session viewsets\nclass OTProtocolViewSet(viewsets.ModelViewSet):\n queryset = OTProtocol.objects.all()\n serializer_class = OTProtocolSerializer\n filterset_fields = [\"project_id\"]\n\n @action(methods=[\"post\"], detail=False)\n def createotprotocol(self, request, pk=None):\n check_services()\n batch_ids = request.data[\"batchids\"]\n protocol_name = request.data[\"protocol_name\"]\n print(batch_ids)\n task = createOTScript.delay(batchids=batch_ids, protocol_name=protocol_name)\n data = {\"task_id\": task.id}\n return JsonResponse(data=data)\n\n @action(detail=False, methods=[\"get\"])\n def gettaskstatus(self, request, pk=None):\n task_id = self.request.GET.get(\"task_id\", None)\n if task_id:\n task = AsyncResult(task_id)\n if task.status == \"FAILURE\":\n data = {\"task_status\": task.status, \"traceback\": str(task.traceback)}\n return JsonResponse(data)\n\n if task.status == \"SUCCESS\":\n task_summary, otprotocol_id = task.get()\n data = {\n \"task_status\": task.status,\n \"otprotocol_id\": otprotocol_id,\n \"task_summary\": task_summary,\n }\n return JsonResponse(data)\n\n if task.status == \"PENDING\":\n data = {\"task_status\": task.status}\n return JsonResponse(data)\n\n\nclass OTBatchProtocolViewSet(viewsets.ModelViewSet):\n queryset = OTBatchProtocol.objects.all()\n serializer_class = OTBatchProtocolSerializer\n filterset_fields = [\"otprotocol_id\", \"batch_id\", \"celery_task_id\"]\n\n\nclass OTSessionViewSet(viewsets.ModelViewSet):\n queryset = OTSession.objects.all()\n serializer_class = OTSessionSerializer\n filterset_fields = [\"otbatchprotocol_id\"]\n\n\nclass DeckViewSet(viewsets.ModelViewSet):\n queryset = Deck.objects.all()\n serializer_class = DeckSerializer\n filterset_fields = [\"otsession_id\"]\n\n\nclass PipetteViewSet(viewsets.ModelViewSet):\n queryset = Pipette.objects.all()\n serializer_class = PipetteSerializer\n filterset_fields = [\"otsession_id\"]\n\n\nclass TipRackViewSet(viewsets.ModelViewSet):\n queryset = TipRack.objects.all()\n serializer_class = TipRackSerializer\n filterset_fields = [\"otsession_id\"]\n\n\nclass PlateViewSet(viewsets.ModelViewSet):\n queryset = Plate.objects.all()\n serializer_class = PlateSerializer\n filterset_fields = [\"otsession_id\"]\n\n\nclass WellViewSet(viewsets.ModelViewSet):\n queryset = Well.objects.all()\n serializer_class = WellSerializer\n filterset_fields = [\"otsession_id\"]\n\n\nclass CompoundOrderViewSet(viewsets.ModelViewSet):\n queryset = CompoundOrder.objects.all()\n serializer_class = CompoundOrderSerializer\n filterset_fields = [\"otsession_id\"]\n\n\nclass OTScriptViewSet(viewsets.ModelViewSet):\n queryset = OTScript.objects.all()\n serializer_class = OTScriptSerializer\n filterset_fields = [\"otsession_id\"]\n"
] | [
[
"pandas.read_csv"
]
] |
kdheepak/carsons | [
"6919cd5be416a58f14c1d5d933a52905a6d5f6a6"
] | [
"carsons/carsons.py"
] | [
"from numpy import pi as π\n\nfrom numpy import zeros\nfrom numpy.linalg import inv\nfrom numpy import sqrt\nfrom numpy import log\nfrom numpy import cos\nfrom numpy import sin\nfrom numpy import arctan\nfrom itertools import islice\n\n\ndef convert_geometric_model(geometric_model):\n carsons_model = CarsonsEquations(geometric_model)\n\n z_primitive = carsons_model.build_z_primitive()\n z_abc = perform_kron_reduction(z_primitive)\n return z_abc\n\n\ndef perform_kron_reduction(z_primitive):\n \"\"\" Reduces the primitive impedance matrix to an equivalent impedance\n matrix.\n\n We break z_primative up into four quadrants as follows:\n\n Ẑpp = [Ẑaa, Ẑab, Ẑac] Ẑpn = [Ẑan]\n [Ẑba, Ẑbb, Ẑbc] [Ẑbn]\n [Ẑca, Ẑcb, Ẑcc] [Ẑcn]\n\n Ẑnp = [Ẑna, Ẑnb, Ẑnc] Ẑnn = [Ẑnn]\n\n Ẑnn is of dimension mxm, where m is the number of neutrals. E.g. with\n m = 2:\n Ẑan = [Ẑan₁, Ẑan₂]\n [Ẑbn₁, Ẑbn₂]\n [Ẑcn₁, Ẑcn₂]\n\n Ẑna = [Ẑn₁a, Ẑn₁b, Ẑn₁c] Ẑnn = [Ẑn₁n₁, Ẑn₁n₂]\n [Ẑn₂a, Ẑn₂b, Ẑn₂c] [Ẑn₂n₁, Ẑn₂n₂]\n\n Definitions:\n Ẑ ----- \"primative\" impedance value, i.e. one that does not factor\n in the mutuals caused by neighboring neutral conductors.\n Z ----- a phase-phase impedance value that factors the mutual impedance\n of neighboring neutral conductors\n\n Returns:\n Z ---- a corrected impedance matrix in the form:\n\n Zabc = [Zaa, Zab, Zac]\n [Zba, Zbb, Zbc]\n [Zca, Zcb, Zcc]\n \"\"\"\n Ẑpp, Ẑpn = z_primitive[0:3, 0:3], z_primitive[0:3, 3:]\n Ẑnp, Ẑnn = z_primitive[3:, 0:3], z_primitive[3:, 3:]\n Z_abc = Ẑpp - Ẑpn @ inv(Ẑnn) @ Ẑnp\n return Z_abc\n\n\nclass CarsonsEquations():\n\n ƒ = 60 # frequency, Hz\n ρ = 100 # resistivity, ohms/meter^3\n μ = 4 * π * 1e-7 # permeability, Henry / meter\n ω = 2.0 * π * ƒ # angular frequency radians / second\n\n def __init__(self, model):\n self.phases = model.phases\n self.phase_positions = model.wire_positions\n self.gmr = model.geometric_mean_radius\n self.r = model.resistance\n\n def build_z_primitive(self):\n abc_conductors = [\n ph if ph in self.phases\n else None for ph in (\"A\", \"B\", \"C\")\n ]\n neutral_conductors = sorted([\n ph for ph in self.phases\n if ph.startswith(\"N\")\n ])\n conductors = abc_conductors + neutral_conductors\n\n dimension = len(conductors)\n z_primitive = zeros(shape=(dimension, dimension), dtype=complex)\n\n for index_i, phase_i in enumerate(conductors):\n for index_j, phase_j in enumerate(conductors):\n if phase_i is not None and phase_j is not None:\n R = self.compute_R(phase_i, phase_j)\n X = self.compute_X(phase_i, phase_j)\n z_primitive[index_i, index_j] = complex(R, X)\n\n return z_primitive\n\n def compute_R(self, i, j):\n rᵢ = self.r[i]\n ΔR = self.μ * self.ω / π * self.compute_P(i, j)\n\n if i == j:\n return rᵢ + ΔR\n else:\n return ΔR\n\n def compute_X(self, i, j):\n Qᵢⱼ = self.compute_Q(i, j)\n ΔX = self.μ * self.ω / π * Qᵢⱼ\n\n if i != j:\n Dᵢⱼ = self.compute_D(i, j)\n dᵢⱼ = self.compute_d(i, j)\n geometry_ratio = Dᵢⱼ / dᵢⱼ\n else:\n hᵢ = self.get_h(i)\n gmrⱼ = self.gmr[j]\n geometry_ratio = 2.0 * hᵢ / gmrⱼ\n\n X_o = self.ω * self.μ / (2 * π) * log(geometry_ratio)\n\n return X_o + ΔX\n\n def compute_P(self, i, j, number_of_terms=1):\n terms = islice(self.compute_P_terms(i, j), number_of_terms)\n return sum(terms)\n\n def compute_P_terms(self, i, j):\n yield π / 8.0\n\n kᵢⱼ = self.compute_k(i, j)\n θᵢⱼ = self.compute_θ(i, j)\n\n yield -kᵢⱼ / (3*sqrt(2)) * cos(θᵢⱼ)\n yield kᵢⱼ ** 2 / 16 * (0.6728 + log(2 / kᵢⱼ)) * cos(2 * θᵢⱼ)\n yield kᵢⱼ ** 2 / 16 * θᵢⱼ * sin(2 * θᵢⱼ)\n yield kᵢⱼ ** 3 / (45 * sqrt(2)) * cos(3 * θᵢⱼ)\n yield -π * kᵢⱼ ** 4 / 64 * cos(2 * θᵢⱼ)\n\n def compute_Q(self, i, j, number_of_terms=2):\n terms = islice(self.compute_Q_terms(i, j), number_of_terms)\n return sum(terms)\n\n def compute_Q_terms(self, i, j):\n yield -0.0386\n\n kᵢⱼ = self.compute_k(i, j)\n yield 0.5 * log(2 / kᵢⱼ)\n\n θᵢⱼ = self.compute_θ(i, j)\n yield kᵢⱼ / (3 * sqrt(2)) * cos(θᵢⱼ)\n yield -π * kᵢⱼ ** 2 / 64 * cos(2 * θᵢⱼ)\n yield kᵢⱼ ** 3 / (45 * sqrt(2)) * cos(3 * θᵢⱼ)\n yield -kᵢⱼ ** 4 / 384 * θᵢⱼ * sin(4 * θᵢⱼ)\n yield -kᵢⱼ ** 4 / 384 * cos(4 * θᵢⱼ) * (log(2 / kᵢⱼ) + 1.0895)\n\n def compute_k(self, i, j):\n Dᵢⱼ = self.compute_D(i, j)\n return Dᵢⱼ * sqrt(self.ω * self.μ / self.ρ)\n\n def compute_θ(self, i, j):\n xᵢ, _ = self.phase_positions[i]\n xⱼ, _ = self.phase_positions[j]\n xᵢⱼ = abs(xⱼ - xᵢ)\n hᵢ, hⱼ = self.get_h(i), self.get_h(j)\n\n return arctan(xᵢⱼ / (hᵢ + hⱼ))\n\n def compute_d(self, i, j):\n return self.calculate_distance(\n self.phase_positions[i],\n self.phase_positions[j])\n\n def compute_D(self, i, j):\n xⱼ, yⱼ = self.phase_positions[j]\n return self.calculate_distance(self.phase_positions[i], (xⱼ, -yⱼ))\n\n @staticmethod\n def calculate_distance(positionᵢ, positionⱼ):\n xᵢ, yᵢ = positionᵢ\n xⱼ, yⱼ = positionⱼ\n return sqrt((xᵢ - xⱼ)**2 + (yᵢ - yⱼ)**2)\n\n def get_h(self, i):\n _, yᵢ = self.phase_positions[i]\n return yᵢ\n"
] | [
[
"numpy.log",
"numpy.sqrt",
"numpy.arctan",
"numpy.linalg.inv",
"numpy.cos",
"numpy.sin",
"numpy.zeros"
]
] |
maxwellzh/Torch-gather | [
"1e47e4d9dcb5a0039bbff12b8f8ce78431ad5322"
] | [
"test/test_cat.py"
] | [
"\nfrom gather._C import gather_cat_forward, gather_cat_backward\nfrom gather import cat as gathercat\nimport torch\nimport time\n\n\ndef vis(msg: str, L: int = 40):\n if len(msg) >= L:\n print(msg)\n else:\n pad_l = (L-len(msg))//2\n pad_r = (L-len(msg)) - pad_l\n print(\"{} {} {}\".format(pad_l*'=', msg, pad_r*'='))\n\n\ndef test(seed: int):\n vis(f'Test process with seed={seed}', 60)\n torch.manual_seed(seed)\n\n N = torch.randint(1, 32, (1,)).item()\n T = torch.randint(2, 512, (1,)).item()\n V = torch.randint(1, 1024, (1,)).item()\n lx = torch.randint(T//2, T, (N, ), dtype=torch.int, device=0)\n xs = torch.randn((N, lx.max(), V), dtype=torch.float, device=0)\n\n lx = lx.to(dtype=torch.int, device=0)\n print(\"xs size: \", xs.size())\n print(\"lx size: \", lx.size())\n\n xs.requires_grad = True\n\n def manual_cat(xs, lx):\n return torch.cat([xs[i, :lx[i]].view(-1, xs.size(-1)) for i in range(lx.size(0))], dim=0)\n\n def test_forward():\n vis('Test forward/backward computation')\n\n # manually cal\n manual = manual_cat(xs, lx)\n\n gather_x = gathercat(xs, lx)\n\n if not torch.all(manual == gather_x):\n print(\"Forward mismatch\")\n print(manual)\n print(gather_x)\n raise RuntimeError\n else:\n print(\"Forward correct.\")\n\n weighted_w = torch.randn_like(gather_x)\n (gather_x*weighted_w).sum().backward()\n tx_grad = xs.grad.data.detach()\n xs.grad = None\n\n (manual*weighted_w).sum().backward()\n mx_grad = xs.grad.data.detach()\n xs.grad = None\n\n cmp = tx_grad == mx_grad\n if not torch.all(cmp):\n print(\"Backward mismatch.\")\n print(torch.sum(torch.abs(tx_grad-mx_grad)))\n print(tx_grad[torch.logical_not(cmp)])\n print(mx_grad[torch.logical_not(cmp)])\n raise RuntimeError\n\n else:\n print(\"Backward correct.\")\n\n def test_autogradcheck():\n vis('Test autograd with torch')\n try:\n torch.autograd.gradcheck(gathercat, (xs, lx))\n except Exception as e:\n print(e)\n print(\"Maybe limit the (N, T, V) to smaller number and re-test.\")\n exit(1)\n\n def test_contiguous(xs):\n model = torch.nn.LSTM(xs.size(-1), xs.size(-1),\n num_layers=3).to(device=0)\n model.flatten_parameters()\n\n with torch.no_grad():\n xs, _ = model(xs.transpose(0, 1))\n xs = xs.transpose(0, 1)\n xs.requires_grad = True\n gather_x = gathercat(xs, lx)\n print(\"Wow! It works with non contiguous layout!\")\n\n def test_fp16(xs:torch.Tensor):\n # xs_half = xs.to(dtype=torch.float16)\n xs_half = xs\n with torch.cuda.amp.autocast():\n gathered_x = gathercat(xs_half, lx)\n manual_x = manual_cat(xs_half, lx)\n\n gathered_x.sum().backward()\n g_grad = xs.grad\n xs.grad = None\n\n manual_x.sum().backward()\n m_grad = xs.grad\n xs.grad = None\n\n if torch.all(g_grad == m_grad):\n print(\"FP16 backward correct.\")\n else:\n print(\":( FP16 backward error.\")\n\n if torch.all(gathered_x == manual_x):\n print(\"Wow! It works with FP16!\")\n else:\n print(gathered_x)\n print(manual_x)\n pass\n\n def test_performance():\n with torch.no_grad():\n gather_x = gathercat(xs, lx)\n weighted = torch.randn_like(gather_x)\n\n cnt = 500\n\n t_beg = time.time()\n for _ in range(cnt):\n gather_x = manual_cat(xs, lx)\n (weighted*gather_x).mean().backward()\n xs.grad = None\n print(\"Torch cat runs {} times, {:.4f} ms on average\".format(\n cnt, (time.time()-t_beg)/cnt*1000))\n\n t_beg = time.time()\n for _ in range(cnt):\n gather_x = gathercat(xs, lx)\n (weighted*gather_x).mean().backward()\n xs.grad = None\n\n print(\"Gather cat runs {} times, {:.4f} ms on average\".format(\n cnt, (time.time()-t_beg)/cnt*1000))\n\n test_forward()\n # test_autogradcheck()\n test_contiguous(xs)\n test_fp16(xs)\n test_performance()\n\n print('')\n\n\nif __name__ == \"__main__\":\n\n for i in range(5):\n test(i)\n"
] | [
[
"torch.randn_like",
"torch.all",
"torch.abs",
"torch.randint",
"torch.manual_seed",
"torch.cuda.amp.autocast",
"torch.no_grad",
"torch.autograd.gradcheck",
"torch.logical_not"
]
] |
olaals/end-to-end-RGB-pose-estimation-baseline | [
"c26151587657d7d496bb2c3ab47bb4549c0e83e6"
] | [
"models/perceiver.py"
] | [
"import torch\nfrom perceiver_pytorch import Perceiver as PerceiverTorch\nimport torch.nn as nn\n\n\nclass Perceiver(nn.Module):\n def __init__(self, input_channels, num_outputs):\n super(Perceiver, self).__init__()\n self.perceiver = PerceiverTorch(\n input_channels = input_channels, # number of channels for each token of the input\n input_axis = 2, # number of axis for input data (2 for images, 3 for video)\n num_freq_bands = 6, # number of freq bands, with original value (2 * K + 1)\n max_freq = 10., # maximum frequency, hyperparameter depending on how fine the data is\n depth = 6, # depth of net. The shape of the final attention mechanism will be:\n # depth * (cross attention -> self_per_cross_attn * self attention)\n num_latents = 256, # number of latents, or induced set points, or centroids. different papers giving it different names\n latent_dim = 512, # latent dimension\n cross_heads = 1, # number of heads for cross attention. paper said 1\n latent_heads = 8, # number of heads for latent self attention, 8\n cross_dim_head = 64, # number of dimensions per cross attention head\n latent_dim_head = 64, # number of dimensions per latent self attention head\n num_classes = num_outputs, # output number of classes\n attn_dropout = 0.,\n ff_dropout = 0.,\n weight_tie_layers = False, # whether to weight tie layers (optional, as indicated in the diagram)\n fourier_encode_data = True, # whether to auto-fourier encode the data, using the input_axis given. defaults to True, but can be turned off if you are fourier encoding the data yourself\n self_per_cross_attn = 2 # number of self attention blocks per cross attention\n\n )\n\n def forward(self, x):\n x = x.permute((0,2,3,1))\n return self.perceiver(x)\n\n\n\n\"\"\"\n num_freq_bands=6,\n depth=6,\n max_freq=10, \n input_channels=input_channels, \n num_classes=num_outputs\n \"\"\"\n\n\nif __name__ == '__main__':\n tens = torch.randn((1,6,320,320))\n model = Perceiver(6, 9)\n\n out=model(tens)\n print(out.shape)\n"
] | [
[
"torch.randn"
]
] |
sourcery-ai-bot/semantic-kitti-api | [
"fb4088b9bdf58db0603409c4dcd0a796c020159d"
] | [
"evaluate_semantics_by_distance.py"
] | [
"#!/usr/bin/env python3\n# This file is covered by the LICENSE file in the root of this project.\n\nimport argparse\nimport os\nimport yaml\nimport sys\nimport numpy as np\n\nDISTANCES = [(1e-8, 10.0), (10.0, 20.0), (20.0, 30.0), (30.0, 40.0), (40.0, 50.0)]\n\n# possible splits\nsplits = [\"train\", \"valid\", \"test\"]\n\n# possible backends\nbackends = [\"numpy\", \"torch\"]\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"./evaluate_semantics_by_distance.py\")\n parser.add_argument(\n \"--dataset\",\n \"-d\",\n type=str,\n required=True,\n help=\"Dataset dir. No Default\",\n )\n parser.add_argument(\n \"--predictions\",\n \"-p\",\n type=str,\n required=None,\n help=\"Prediction dir. Same organization as dataset, but predictions in\"\n 'each sequences \"prediction\" directory. No Default. If no option is set'\n \" we look for the labels in the same directory as dataset\",\n )\n parser.add_argument(\n \"--split\",\n \"-s\",\n type=str,\n required=False,\n default=\"valid\",\n help=\"Split to evaluate on. One of \"\n + str(splits)\n + \". Defaults to %(default)s\",\n )\n parser.add_argument(\n \"--backend\",\n \"-b\",\n type=str,\n required=False,\n default=\"numpy\",\n help=\"Backend for evaluation. One of \"\n + str(backends)\n + \" Defaults to %(default)s\",\n )\n parser.add_argument(\n \"--datacfg\",\n \"-dc\",\n type=str,\n required=False,\n default=\"config/semantic-kitti.yaml\",\n help=\"Dataset config file. Defaults to %(default)s\",\n )\n parser.add_argument(\n \"--limit\",\n \"-l\",\n type=int,\n required=False,\n default=None,\n help='Limit to the first \"--limit\" points of each scan. Useful for'\n \" evaluating single scan from agregated pointcloud.\"\n \" Defaults to %(default)s\",\n )\n parser.add_argument(\n \"--codalab\",\n dest=\"codalab\",\n default=False,\n action=\"store_true\",\n help='Exports \"segmentation_scores_distance.txt\" for codalab'\n \"Defaults to %(default)s\",\n )\n FLAGS, unparsed = parser.parse_known_args()\n\n # fill in real predictions dir\n if FLAGS.predictions is None:\n FLAGS.predictions = FLAGS.dataset\n\n # print summary of what we will do\n print(\"*\" * 80)\n print(\"INTERFACE:\")\n print(\"Data: \", FLAGS.dataset)\n print(\"Predictions: \", FLAGS.predictions)\n print(\"Backend: \", FLAGS.backend)\n print(\"Split: \", FLAGS.split)\n print(\"Config: \", FLAGS.datacfg)\n print(\"Limit: \", FLAGS.limit)\n print(\"Codalab: \", FLAGS.codalab)\n print(\"*\" * 80)\n\n # assert split\n assert FLAGS.split in splits\n\n # assert backend\n assert FLAGS.backend in backends\n\n print(\"Opening data config file %s\" % FLAGS.datacfg)\n DATA = yaml.safe_load(open(FLAGS.datacfg, \"r\"))\n\n # get number of interest classes, and the label mappings\n class_strings = DATA[\"labels\"]\n class_remap = DATA[\"learning_map\"]\n class_inv_remap = DATA[\"learning_map_inv\"]\n class_ignore = DATA[\"learning_ignore\"]\n nr_classes = len(class_inv_remap)\n\n # make lookup table for mapping\n maxkey = max(class_remap.keys())\n\n # +100 hack making lut bigger just in case there are unknown labels\n remap_lut = np.zeros((maxkey + 100), dtype=np.int32)\n remap_lut[list(class_remap.keys())] = list(class_remap.values())\n\n # print(remap_lut)\n\n # create evaluator\n ignore = []\n for cl, ign in class_ignore.items():\n if ign:\n x_cl = int(cl)\n ignore.append(x_cl)\n print(\"Ignoring xentropy class \", x_cl, \" in IoU evaluation\")\n\n # create evaluator\n evaluators = []\n for i in range(len(DISTANCES)):\n if FLAGS.backend == \"torch\":\n from auxiliary.torch_ioueval import iouEval\n\n evaluators.append(iouEval(nr_classes, ignore))\n evaluators[i].reset()\n elif FLAGS.backend == \"numpy\":\n from auxiliary.np_ioueval import iouEval\n\n evaluators.append(iouEval(nr_classes, ignore))\n evaluators[i].reset()\n else:\n print(\"Backend for evaluator should be one of \", str(backends))\n quit()\n\n # get test set\n test_sequences = DATA[\"split\"][FLAGS.split]\n\n # get scan paths\n scan_names = []\n for sequence in test_sequences:\n sequence = \"{0:02d}\".format(int(sequence))\n label_paths = os.path.join(\n FLAGS.dataset, \"sequences\", str(sequence), \"velodyne\"\n )\n # populate the label names\n seq_scan_names = [\n os.path.join(dp, f)\n for dp, dn, fn in os.walk(os.path.expanduser(label_paths))\n for f in fn\n if \".bin\" in f\n ]\n seq_scan_names.sort()\n scan_names.extend(seq_scan_names)\n # print(scan_names)\n\n # get label paths\n label_names = []\n for sequence in test_sequences:\n sequence = \"{0:02d}\".format(int(sequence))\n label_paths = os.path.join(FLAGS.dataset, \"sequences\", str(sequence), \"labels\")\n # populate the label names\n seq_label_names = [\n os.path.join(dp, f)\n for dp, dn, fn in os.walk(os.path.expanduser(label_paths))\n for f in fn\n if \".label\" in f\n ]\n seq_label_names.sort()\n label_names.extend(seq_label_names)\n # print(label_names)\n\n # get predictions paths\n pred_names = []\n for sequence in test_sequences:\n sequence = \"{0:02d}\".format(int(sequence))\n pred_paths = os.path.join(\n FLAGS.predictions, \"sequences\", sequence, \"predictions\"\n )\n # populate the label names\n seq_pred_names = [\n os.path.join(dp, f)\n for dp, dn, fn in os.walk(os.path.expanduser(pred_paths))\n for f in fn\n if \".label\" in f\n ]\n seq_pred_names.sort()\n pred_names.extend(seq_pred_names)\n # print(pred_names)\n\n # check that I have the same number of files\n print(\"scans\", len(scan_names))\n print(\"labels: \", len(label_names))\n print(\"predictions: \", len(pred_names))\n assert len(label_names) == len(pred_names) and len(scan_names) == len(label_names)\n\n # open each file, get the tensor, and make the iou comparison\n for scan_file, label_file, pred_file in zip(scan_names, label_names, pred_names):\n print(\"evaluating scan \", scan_file)\n # open scan\n scan = np.fromfile(scan_file, dtype=np.float32)\n scan = scan.reshape((-1, 4)) # reshape to matrix\n if FLAGS.limit is not None:\n scan = scan[: FLAGS.limit] # limit to desired length\n depth = np.linalg.norm(\n scan[:, :3], 2, axis=1\n ) # get depth to filter by distance\n\n # open label\n label = np.fromfile(label_file, dtype=np.int32)\n label = label.reshape((-1)) # reshape to vector\n label = label & 0xFFFF # get lower half for semantics\n if FLAGS.limit is not None:\n label = label[: FLAGS.limit] # limit to desired length\n label = remap_lut[label] # remap to xentropy format\n\n # open prediction\n pred = np.fromfile(pred_file, dtype=np.int32)\n pred = pred.reshape((-1)) # reshape to vector\n pred = pred & 0xFFFF # get lower half for semantics\n if FLAGS.limit is not None:\n pred = pred[: FLAGS.limit] # limit to desired length\n pred = remap_lut[pred] # remap to xentropy format\n\n # evaluate for all distances\n for idx in range(len(DISTANCES)):\n # select by range\n lrange = DISTANCES[idx][0]\n hrange = DISTANCES[idx][1]\n mask = np.logical_and(depth > lrange, depth < hrange)\n\n # mask by distance\n # mask_depth = depth[mask]\n # print(\"mask range, \", mask_depth.max(), mask_depth.min())\n mask_label = label[mask]\n mask_pred = pred[mask]\n\n # add single scan to evaluation\n evaluators[idx].addBatch(mask_pred, mask_label)\n\n # print for all ranges\n print(\"*\" * 80)\n for idx in range(len(DISTANCES)):\n # when I am done, print the evaluation\n m_accuracy = evaluators[idx].getacc()\n m_jaccard, class_jaccard = evaluators[idx].getIoU()\n\n # print for spreadsheet\n sys.stdout.write(\n \"range {lrange}m to {hrange}m,\".format(\n lrange=DISTANCES[idx][0], hrange=DISTANCES[idx][1]\n )\n )\n for i, jacc in enumerate(class_jaccard):\n if i not in ignore:\n sys.stdout.write(\"{jacc:.3f}\".format(jacc=jacc.item()))\n sys.stdout.write(\",\")\n sys.stdout.write(\"{jacc:.3f}\".format(jacc=m_jaccard.item()))\n sys.stdout.write(\",\")\n sys.stdout.write(\"{acc:.3f}\".format(acc=m_accuracy.item()))\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n\n # if codalab is necessary, then do it\n if FLAGS.codalab:\n results = {}\n for idx in range(len(DISTANCES)):\n # make string for distance\n d_str = str(DISTANCES[idx][-1]) + \"m_\"\n\n # get values for this distance range\n m_accuracy = evaluators[idx].getacc()\n m_jaccard, class_jaccard = evaluators[idx].getIoU()\n\n # put in dictionary\n results[d_str + \"accuracy_mean\"] = float(m_accuracy)\n results[d_str + \"iou_mean\"] = float(m_jaccard)\n for i, jacc in enumerate(class_jaccard):\n if i not in ignore:\n results[d_str + \"iou_\" + class_strings[class_inv_remap[i]]] = float(\n jacc\n )\n # save to file\n with open(\"segmentation_scores_distance.txt\", \"w\") as yaml_file:\n yaml.dump(results, yaml_file, default_flow_style=False)\n"
] | [
[
"numpy.fromfile",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.logical_and"
]
] |
Merck/BioPhi-2021-publication | [
"988a34f4c482321105151fb626ffea6d5e136862"
] | [
"bin/humanness_z_score.py"
] | [
"#!/usr/bin/env python\n\nimport argparse\nimport pandas as pd\nfrom Bio import SeqIO\nimport os\nimport numpy as np\nfrom collections import OrderedDict\nfrom tqdm import tqdm\nfrom abnumber import Chain\nimport re\nimport requests\nimport time\n\nSCORE_REGEX = re.compile('<h3>The Z-score value of the Query sequence is: (-?[0-9.]+)</h3>')\n\ndef get_z_score_online(seq):\n chain = Chain(seq, scheme='imgt')\n chain_type = 'human_heavy' if chain.chain_type == 'H' else ('human_lambda' if chain.chain_type == 'L' else 'human_kappa')\n html = None\n for retry in range(5):\n url = f'http://www.bioinf.org.uk/abs/shab/shab.cgi?aa_sequence={seq}&DB={chain_type}'\n request = requests.get(url)\n time.sleep(0.5 + retry * 5)\n if request.ok:\n html = request.text\n break\n else:\n print('Retry', retry+1)\n if not html:\n raise ValueError('Z-score server is not accessible')\n matches = SCORE_REGEX.findall(html)\n if not matches:\n print(html)\n raise ValueError(f'Error calling url {url}')\n return float(matches[0])\n\ndef get_z_scores_online(queries):\n results = []\n for query in tqdm(queries):\n zscore = get_z_score_online(query.seq)\n results.append(OrderedDict(\n id=query.id,\n description=query.description,\n zscore=zscore\n ))\n return pd.DataFrame(results)\n\nif __name__ == \"__main__\":\n # Parse command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"query\", help=\"Query FASTA sequence file.\")\n parser.add_argument(\"output\", help=\"Output TSV file path.\")\n\n options = parser.parse_args()\n \n queries = SeqIO.parse(options.query, 'fasta')\n \n print('Note: The sequences will be processed through UCL Z-score web service! Sleeping for 10s, press Ctrl+C to cancel...')\n time.sleep(10)\n print('Processing...')\n df = get_z_scores_online(queries)\n \n df.to_csv(options.output, sep='\\t', index=False)\n print(f'Saved to: {options.output}')\n"
] | [
[
"pandas.DataFrame"
]
] |
Femi-Tofade/Visualization_Package | [
"9056e1c0d44962a3b519f4d260ba38d85c58b413"
] | [
"visualization_code.py"
] | [
"import matplotlib.pyplot as plt\nimport math\n\nclass VisualizationCode:\n def __init__(self):\n \"\"\"\n Class to create different well labelled visualizations such as line plots, and bar charts based off matplotlib\n \n \"\"\"\n \n x = []\n y = []\n \n def data(data_1, data_2):\n \"\"\"Function to read in data from a txt file. The txt file should have\n\t\tone number (float) per line. The numbers are stored in the data attribute.\n\t\t\t\t\n\t\tArgs:\n\t\t\tdata_1 and data_2 (string): names of files to read from\n\t\t\n\t\tReturns:\n\t\t\tx and y: data read from data_1 and data_2 respectively\n\t\t\n\t\t\"\"\"\n with open(data_1, data_2) as file1, file2:\n data_1_list = []\n line1 = file1.readline()\n while line1:\n data_1_list.append(int(line1))\n line1 = file.readline()\n \n data_2_list = []\n line1 = file2.readline()\n while line2:\n data_2_list.append(int(line2))\n line2 = file.readline()\n data_1.close()\n data_2.close()\n \n x = data_1_list\n y = data_2_list\n \n return x, y\n \n def plot_line(x,y):\n #FUNCTION TO PLOT THE LINE PLOT \n plt.figure(figsize =(7,4))\n plt.plot(x, y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=12)\n plt.title('Line Plot showing the relationship between x and y', fontsize = 15)\n plt.ylabel('y', fontsize = 11)\n plt.xlabel('x', fontsize = 11)\n plt.grid(True)\n plt.show()\n \n\n \n def plot_bar(x,y):\n #FUNCTION TO PLOT THE BAR PLOT \n\n plt.bar(x, y, color='red', marker='o', linestyle='dashed', linewidth=2, markersize=12)\n plt.title('Distribution of Data', fontsize = 15)\n plt.ylabel('y', fontsize = 11)\n plt.xlabel('x', fontsize = 11)\n\n plt.show()\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
Things-School/amazon-sagemaker-predictive-maintenance-deployed-at-edge | [
"d4849f7e01a9f5b4d6a4b6eaee7e4e27a50eec85"
] | [
"predictlambda_v2.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 18 22:24:45 2019\n@author: stenatu\n\nUpdated on Thu Nov 5 13:19:12 2020 IST\n@author: sufiankaki\n\n# Edit this lambda function which invokes your trained XgBoost Model deployed\n# on the Greengrass Core to make predictions whenever new sensor data comes in\n# The output of the lambda predictins are sent to IoT. If a Faulty part is found,\n# the output is sent to SNS.\n\n# To get this lambda function to work, fill out the TODOs.\n\"\"\"\n\n#\n# Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n\nimport logging\nimport platform\nimport sys\nfrom datetime import datetime\nimport greengrasssdk\nimport boto3\nimport random\nimport json\nimport xgboost as xgb\nimport pickle\nimport numpy as np\n\n\n# Setup logging to stdout\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\n\n# Creating a greengrass core sdk client\nclient = greengrasssdk.client(\"iot-data\")\n\n# Retrieving platform information to send from Greengrass Core\nmy_platform = platform.platform()\nsns = boto3.client('sns')\nmodel_path = '/greengrass-machine-learning/xgboost/xgboost-model'\nTOPIC_ARN = 'arn:aws:sns:us-east-1:<ACCOUNT_ID>:<SNS_TopicName>' #TODO: enter your SNS Topic ARN here.\nLAMBDA_TOPIC = 'xgboost/offline' #TODO: enter your subscription topic from Lambda to IoT\n\nprint(\"Imports invoked\")\n\n # Load the model object.\nmodel = pickle.load(open(model_path, 'rb'))\n\n\n\ndef predict_part(datapoint):\n \n data = [random.uniform(-1, 1)/10 for x in range(167)]\n data = [datapoint] + data\n \n dataarray = np.array([data])\n \n start = datetime.now()\n \n print(start)\n \n \n response = model.predict(xgb.DMatrix(dataarray))\n\n end = datetime.now()\n \n mytime = (end - start).total_seconds()*1000\n \n print(\"Offline Model RunTime = {} milliseconds\".format((end - start).total_seconds()*1000))\n \n result = round(response[0])\n print(result)\n pred = round(result)\n \n # If Prediction == 1, then part is Faulty, else it is Not Faulty.\n if pred ==1:\n predicted_label = 'Faulty'\n else:\n predicted_label = 'Not Faulty'\n \n #publish results to local greengrass topic.\n if not my_platform:\n client.publish(topic=LAMBDA_TOPIC, payload='Predicted Label {} in {} milliseconds'.format(predicted_label, mytime))\n else:\n client.publish(topic=LAMBDA_TOPIC, payload=' Predicted Label {} in {} milliseconds. Sent from Greengrass Core running on platform: {}'.format(predicted_label, mytime, my_platform))\n \n #publish to SNS topic.\n if pred == 1:\n response = sns.publish(\n TopicArn=TOPIC_ARN, \n Message='Faulty Part Found on Line 1. Immediate attention required.' \n )\n print(\"Published to Topic\")\n\n\n# This is a dummy handler and will not be invoked\n# Instead the code above will be executed in an infinite loop for our example\ndef function_handler(event, context):\n datapoint = json.loads(event[\"state\"][\"desired\"][\"property\"])\n return predict_part(datapoint)\n"
] | [
[
"numpy.array"
]
] |
gordominossi/EDOs | [
"151ac38ed6b269755740b56e990032bcb572f11a"
] | [
"Entrega/2.4 - Runge-Kutta4.py"
] | [
"#EP2 - MAP3122 - Métodos Numéricos e Aplicações\n\n########### Exercício 2.4 ##############\n########## Resolução por RK4 ###########\n\n# Nome: Gabriel Moreira Minossi | NUSP: 9349346\n# Nome: Vinicius Bueno de Moraes | NUSP: 10256432\n\nimport numpy as np #Import de Bibliotecas\nimport math\nimport matplotlib.pyplot as plt\n\nprint()\nprint(\"MAP3122 - EP2\")\nprint(\"Exercício 2.4 - Resolução por Runge-Kutta 4\")\nprint()\n\nh = 10/6000\nn = 6000\nx = 1.5\ny = 1.5\nts = []\ncoelhos = []\nraposas = []\n\nj=0\nfor i in range(n+1):\n ts.append(j*h)\n j = j+1\n\nx = 1.5\ny = 1.5\n\nfor i in ts: #Cálculo da solução pelo método RK4\n if i == 0:\n coelhos.append(x)\n raposas.append(y)\n else:\n k1 = h * ((2/3) * x - ((4/3) * x * y))\n k2 = h * (((2/3) * x + (h/2)) - (((4/3) * x * y) + k1 *(h/2)))\n k3 = h * (((2/3) * x + (h/2)) - (((4/3) * x * y) + k2 *(h/2)))\n k4 = h * (((2/3) * x + (h)) - (((4/3) * x * y) + k3 * (h)))\n k = (k1+2*k2+2*k3+k4)/6\n x = x + k\n coelhos.append(x)\n k1 = h * (y * x - (y))\n k2 = h * ((y * x + (h/2)) - ((y) + k1 * (h/2)))\n k3 = h * ((y * x + (h/2)) - ((y) + k2 * (h/2)))\n k4 = h * ((y * x + (h)) - ((y) + k3 * (h)))\n k = (k1+2*k2+2*k3+k4)/6\n y = y + k\n raposas.append(y)\nk = 0\n\nplt.figure(figsize=(12, 6)) #Plot do Gráfico\nplt.suptitle(\"2.4 - Resolução por Runge-Kutta 4\")\nplt.subplot(1, 2, 1)\nplt.grid()\nplt.plot(coelhos, raposas, \"-b\")\nplt.title(\"Retrato de Fase (Coelhos, Raposas)\")\nplt.ylabel(\"Raposas\")\nplt.xlabel(\"Coelhos\")\nplt.subplot(1, 2, 2)\nplt.grid()\nplt.plot(ts, coelhos, \"-b\", label= \"Coelhos\")\nplt.plot(ts, raposas, \"-r\", label = \"Raposas\")\nplt.title(\"Evolução das populações com o tempo \\n para h ≅ %s | n = %s\" %(round(h, 5), n))\nplt.legend(loc=\"upper left\")\nplt.ylabel(\"População\")\nplt.xlabel(\"t\")\nplt.show()\n\nprint(\"O programa foi executado com sucesso para n = 6000 e os gráficos exibidos\")\nprint()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
chasebk/code_TWO_ELM | [
"c5145fa50175e63e467b7aa134065398235f8e5a"
] | [
"drafts/draw/draw_spider_chart_final2.py"
] | [
"import pandas as pd\nfrom math import pi\nimport matplotlib.pyplot as plt\n\n\ncolors = [\"teal\", \"red\", \"green\", \"blue\", \"orange\", \"yellow\"]\ntitles = [\"MLNN\",\"ELM\", \"GA-ELM\", \"PSO-ELM\", \"TWO-ELM\", \"OTWO-ELM\"]\n\n#Create a data frame from Messi and Ronaldo's 6 Ultimate Team data points from FIFA 18\nk2 = {'MLNN':16.716,'ELM':16.292, \"GA-ELM\": 15.378, \"PSO-ELM\": 15.394, \"TWO-ELM\": 15.400, \"OTWO-ELM\": 15.363}\nk5 = {\"MLNN\": 15.857, \"ELM\": 15.744, \"GA-ELM\": 14.914, \"PSO-ELM\": 14.834, \"TWO-ELM\": 14.885, \"OTWO-ELM\": 14.874}\n\n\ndata = pd.DataFrame([k2, k5], index = [\"k=2\", \"k=5\"])\nprint(data)\n\nAttributes =list(data)\nAttNo = len(Attributes)\n\n\nvalues = data.iloc[0].tolist()\nvalues += values [:1]\nangles = [n / float(AttNo) * 2 * pi for n in range(AttNo)]\nangles += angles [:1]\n\nvalues1 = data.iloc[1].tolist()\nvalues1 += values1 [:1]\nangles1 = [n / float(AttNo) * 2 * pi for n in range(AttNo)]\nangles1 += angles1 [:1]\n\n#Create the chart as before, but with both Ronaldo's and Messi's angles/values\nax = plt.subplot(111, polar=True)\n\n#Add the attribute labels to our axes\nplt.xticks(angles[:-1], Attributes)\n\n\n\n#Plot the line around the outside of the filled area, using the angles and values calculated before\nax.plot(angles,values)\nax.fill(angles, values, 'teal', alpha=0.1)\n\nax.plot(angles1,values1)\nax.fill(angles1, values1, 'red', alpha=0.1)\n\n\n#Give the plot a title and show it\nax.set_title(\"Sliding window k = 2\")\n\nplt.savefig(\"hehe.png\", bbox_inches=\"tight\")\nplt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
]
] |
Mynasino/DigitRecognizer | [
"a06e50e297a39c4ff626e4a5de00f781d9109592"
] | [
"DigitRecognizer/Trainer.py"
] | [
"import tensorflow as tf \r\nimport numpy as np \r\nimport DataLoader\r\n\r\nclass tf_trainer(object):\r\n def __init__(self):\r\n self.sess = tf.Session()\r\n self.sess.run(tf.global_variables_initializer())\r\n self.DataLoad = DataLoader.DataLoad()\r\n\r\n def train(self, graph, n_epoch, learning_rate, batch_size, n_train):\r\n n_batch = int(n_train / batch_size)\r\n if n_train % batch_size:\r\n n_batch = n_batch + 1\r\n\r\n n_test_batch = int((42000 - n_train) / batch_size)\r\n if (42000 - n_train) % batch_size:\r\n n_test_batch = n_test_batch + 1\r\n\r\n for i_epoch in range(n_epoch):\r\n train_loss = train_acc = 0\r\n for train_X, train_Y in self.DataLoad.data_iter(batch_size, n_train):\r\n feed_dict = {\r\n graph.X:train_X,\r\n graph.Y:train_Y,\r\n graph.lr:learning_rate,\r\n }\r\n self.sess.run(graph.train_step, feed_dict=feed_dict)\r\n train_loss = train_loss + self.sess.run(graph.loss, feed_dict=feed_dict)\r\n train_acc = train_acc + self.sess.run(graph.accuracy, feed_dict=feed_dict)\r\n train_loss = train_loss / n_batch\r\n train_acc = train_acc / n_batch\r\n\r\n test_loss = test_acc = 0\r\n test_inds = np.arange(n_train, 42000)\r\n for i in range(n_test_batch):\r\n feed_dict = {\r\n graph.X:self.DataLoad.all_X[test_inds[i*batch_size:min((i+1)*batch_size, 42000)]],\r\n graph.Y:self.DataLoad.all_Y[test_inds[i*batch_size:min((i+1)*batch_size, 42000)]],\r\n }\r\n test_loss = test_loss + self.sess.run(graph.loss, feed_dict=feed_dict)\r\n test_acc = test_acc + self.sess.run(graph.accuracy, feed_dict=feed_dict)\r\n test_loss = test_loss / n_test_batch\r\n test_acc = test_acc / n_test_batch\r\n\r\n print(\"epoch %d train loss %f acc %f test loss %f acc %f\" % (i_epoch, train_loss, train_acc, test_loss, test_acc))\r\n\r\n def output(self, graph, feed_X):\r\n feed_dict = {\r\n graph.X:feed_X,\r\n }\r\n return self.sess.run(graph.type, feed_dict=feed_dict)"
] | [
[
"tensorflow.global_variables_initializer",
"numpy.arange",
"tensorflow.Session"
]
] |
HKUNLP/UnifiedSKG | [
"49a2ff950bb312b980c22ad72b11520db72ab6a3"
] | [
"models/adapter/adapter.py"
] | [
"import math\r\n\r\nimport torch\r\nfrom torch import nn\r\n\r\n\r\nclass Activation_Function_Class(nn.Module):\r\n \"\"\"\r\n Implementation of various activation function.\r\n \"\"\"\r\n\r\n def __init__(self, hidden_act):\r\n\r\n if hidden_act.lower() == \"relu\":\r\n self.f = nn.functional.relu\r\n elif hidden_act.lower() == \"tanh\":\r\n self.f = torch.tanh\r\n elif hidden_act.lower() == \"swish\":\r\n\r\n def swish(x):\r\n return x * torch.sigmoid(x)\r\n\r\n self.f = swish\r\n elif hidden_act.lower() == \"gelu\":\r\n\r\n def gelu_new(x):\r\n \"\"\"\r\n Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).\r\n Also see https://arxiv.org/abs/1606.08415\r\n \"\"\"\r\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\r\n\r\n self.f = gelu_new\r\n elif hidden_act.lower() == \"leakyrelu\":\r\n self.f = nn.functional.leaky_relu\r\n\r\n super().__init__()\r\n\r\n def forward(self, x):\r\n return self.f(x)\r\n\r\n\r\n# Single Adapter\r\n\r\n\r\nclass Adapter(nn.Module):\r\n \"\"\"\r\n Implementation of a single Adapter block for T5. # TODO: Chen\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n input_size,\r\n down_sample=None,\r\n non_linearity=\"relu\",\r\n init_bert_weights=True,\r\n add_layer_norm_before=True,\r\n residual_before_ln=True,\r\n ):\r\n super().__init__()\r\n\r\n self.input_size = input_size\r\n self.add_layer_norm_before = add_layer_norm_before\r\n self.residual_before_ln = residual_before_ln\r\n\r\n # list for all modules of the adapter, passed into nn.Sequential()\r\n seq_list = []\r\n\r\n # If we want to have a layer norm on input, we add it to seq_list\r\n if self.add_layer_norm_before:\r\n self.adapter_norm_before = nn.LayerNorm(self.input_size)\r\n seq_list.append(self.adapter_norm_before)\r\n\r\n # if a downsample size is not passed, we just half the size of the original input\r\n self.down_sample = down_sample\r\n if down_sample is None:\r\n self.down_sample = self.input_size // 2\r\n\r\n # Linear down projection of the input\r\n seq_list.append(nn.Linear(self.input_size, self.down_sample))\r\n\r\n # select non-linearity\r\n self.non_linearity = Activation_Function_Class(non_linearity.lower())\r\n\r\n seq_list.append(self.non_linearity)\r\n\r\n # sequential adapter, first downproject, then non-linearity then upsample. In the forward pass we include the\r\n # residual connection\r\n self.adapter_down = nn.Sequential(*seq_list)\r\n\r\n # Up projection to input size\r\n self.adapter_up = nn.Linear(self.down_sample, self.input_size)\r\n\r\n # if we want to initialize with the bert strategy then this function is called for all the linear layers\r\n if init_bert_weights:\r\n self.adapter_down.apply(self.init_bert_weights)\r\n self.adapter_up.apply(self.init_bert_weights)\r\n\r\n def forward(self, x):\r\n down = self.adapter_down(x)\r\n\r\n up = self.adapter_up(down)\r\n\r\n output = up\r\n\r\n output = output + x\r\n\r\n return output\r\n\r\n # This is copied from the BertPreTrainedModel class to make this a self containing class.\r\n @staticmethod\r\n def init_bert_weights(module):\r\n \"\"\"Initialize the weights.\"\"\"\r\n if isinstance(module, (nn.Linear, nn.Embedding)):\r\n # std defaults to 0.02, this might need to be changed\r\n module.weight.data.normal_(mean=0.0, std=0.02)\r\n elif isinstance(module, nn.LayerNorm):\r\n module.bias.data.zero_()\r\n module.weight.data.fill_(1.0)\r\n if isinstance(module, nn.Linear) and module.bias is not None:\r\n module.bias.data.zero_()\r\n"
] | [
[
"torch.nn.Sequential",
"torch.sigmoid",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.pow"
]
] |
jrodrigopuca/procesamiento-img | [
"52078b80e88b548f4d034cc931deb711f4d0edbb"
] | [
"script/procesador.py"
] | [
"import numpy as np\nfrom scipy import ndimage\nimport cv2 as cv\nimport sys, os\nimport matplotlib.pyplot as plt\n\n#Lista de algunos kernels que andan por ahí\n\nkernelBlur =[[1.0/9,1.0/9,1.0/9],\n\t\t\t[1.0/9,1.0/9,1.0/9],\n\t\t\t[1.0/9,1.0/9,1.0/9]]\n\nkernelGaussianBlur =[[1.0/16,2.0/16,1.0/16],\n\t\t\t\t\t[2.0/16,4.0/16,2.0/16],\n\t\t\t\t\t[1.0/16,2.0/16,1.0/16]]\n\nkernelGaussianBlurMax =[[1.0/256,4.0/256,6.0/256,4.0/256,1.0/256],\n\t\t\t\t\t\t[4.0/256,16.0/256,24.0/256,16.0/256,1.0/256],\n\t\t\t\t\t\t[6.0/256,24.0/256,36.0/256,24.0/256,6.0/256],\n\t\t\t\t\t\t[4.0/256,16.0/256,24.0/256,16.0/256,1.0/256],\n\t\t\t\t\t\t[1.0/256,4.0/256,6.0/256,4.0/256,1.0/256]]\n\nkernelEdge1= [[1,0,-1],\n\t\t\t[0,0,0],\n\t\t\t[-1,0,1]]\n\nkernelEdge3= [[-1,-1,-1],\n\t\t\t[-1,8,-1],\n\t\t\t[-1,-1,-1]]\n\nkernelLaplacian = [[0,1,0],\n\t\t\t\t[1,-4,1],\n\t\t\t\t[0,1,0]]\n\n\nkernelSharpen= [[0,-1,0],\n\t\t\t\t[-1,5,-1],\n\t\t\t\t[0,-1,0]]\n\nkernelSobelX = [[1,0,-1],\n\t\t\t\t[2,0,-2],\n\t\t\t\t[1,0,-1]]\n\nkernelSobelY = [[1,2,1],\n\t\t\t\t[0,0,0],\n\t\t\t\t[-1,-2,-1]]\n\nkernelPrewittX = [[-1,0,1],\n\t\t\t\t\t[-1,0,1],\n\t\t\t\t\t[-1,0,1]]\n\nkernelPrewittY = [[1,1,1],\n\t\t\t\t\t[0,0,0],\n\t\t\t\t\t[-1,-1,-1]]\n\nkernelRobertsX = [[1,0],\n\t\t\t\t[0,-1]]\n\nkernelRobertsY = [[0,1],\n\t\t\t\t[-1,0]]\n\n######## Algunas funciones auxiliares #######\n\n#Cargar custom kernel\ndef cargarKernel(ruta):\n\t#TODO: Verificar que sea cuadrada + verificar ruta y eso\n\tarchivo_kernel = open (ruta , 'r')\n\tcontenido = [[float(num) for num in line.split(',')] for line in archivo_kernel ]\n\t#TODO: Parsear para que acepte floats del tipo x/y\n\treturn contenido\n\n#Ayuda\ndef mostrarAyuda():\n\tprint(\"\\\nPara procesar alguna imagen:\\n\\\n\\n\\\n\tpython procesador.py [nombre_imagen] [opciones] [argumento_extra]\\n\\\n\t\\n\\\n\t*Las imagenes tienen que estar en el mismo directorio\\n\\\n\t\\n\\\nOpciones:\\n\\\n\t\\n\\\n\th: Ayuda\\n\\\n\tk: Usar kernel desde un archivo, en [argumento_extra] se debe colocar el nombre del mismo.\\n\\\n\tb: Blur simple\\n\\\n\tgb: Blur Gaussiano 3x3\\n\\\n\tgbm: Blur Gaussiano 5x5\\n\\\n\te1: Algun detector de bordes desconocido\\n\\\n\te3: Algun detector de bordes desconocido 2\\n\\\n\tl: Laplaciano\\n\\\n\tsh: Filtro sharpen\\n\\\n\ts: Sobel\\n\\\n\tp: Prewitt\\n\\\n\tr: Roberts\\n\\\n\tmediana: Filtro mediana\\n\\\n\tnsp: Ruido sal y pimienta\\n\\\n\tni: Ruido impulsivo\\n\\\n\tng: Ruido Gaussiano\\n\\\n\tnr: Rudio Rayleigh\\n\\n\")\n\tsys.exit()\n\n#Abrir imagen para procesar\ndef abrir(nombre_imagen):\n\tdirectorio_actual = os.getcwd()\n\truta_imagen = directorio_actual + '/' + nombre_imagen\n\timagen = cv.imread(ruta_imagen,0)\n\t#Chequear si existe\n\tif imagen is None:\n\t\tprint(\"Imagen no encontrada\\n\")\n\t\tmostrarAyuda()\n\t\tsys.exit()\n\telse:\n\t\talto, ancho = imagen.shape\n\t\tsalida = np.empty([alto,ancho])\n\t\t#Hago esto porque opencv o numpy tiene un mecanismo muy gracioso, no puedo copiarlo directamente\n\t\tfor i in range(alto):\n\t\t\tfor j in range(ancho):\n\t\t\t\tsalida[i,j] = imagen[i,j]\n\t\treturn salida\n\n#Guardar\ndef guardar(filtro,imagen):\n\tcv.imwrite(filtro+\".png\",imagen)\n\n#Guardar y salir\ndef salir(filtro,imagen):\n\tcv.imwrite(\"result/\"+filtro+\".png\",imagen)\n\tplt.imshow(imagen, cmap='gray')\n\tsys.exit()\n\ndef normalizar(imagen):\n\t#Normaliza a valores entre 0 y 255\n\talto, ancho = imagen.shape\n\tim_normal = np.empty([alto,ancho])\n\tmaximo = np.amax(imagen)\n\tminimo = np.amin(imagen)\n\tshrink = 255/(maximo - minimo)\n\treturn ((imagen - minimo) * shrink).astype(int)\n\n\n####### Programa principal #######\n\nif sys.argv[1] == \"h\":\n\tmostrarAyuda()\nelse:\n\timagen = abrir(sys.argv[1])\n\n#Empezar a procesar\nalto, ancho = imagen.shape\nfiltrada = np.empty([alto,ancho])\n\n#Ver opciones y procesar\n#TODO: Mejorar logica de parametros por consola\n\n#Custom kernel\nif('k' in sys.argv):\n\tif len(sys.argv)>3:\n\t\tkernelCustom = np.array(cargarKernel(sys.argv[3]))\n\t\tfiltrada = ndimage.convolve(imagen, kernelCustom).astype(int)\n\t\tsalir(\"custom_kernel\",filtrada)\n\telse:\n\t\tprint(\"Falta ruta del archivo con el kernel\")\n\nif('b' in sys.argv):\n\tfiltrada = ndimage.convolve(imagen, kernelBlur)\n\tsalir(\"blur\",filtrada)\n\nif('gb' in sys.argv):\n\tfiltrada = ndimage.convolve(imagen, kernelGaussianBlur)\n\tsalir(\"gaussian_blur\",filtrada)\n\nif('gbm' in sys.argv):\n\tfiltrada = ndimage.convolve(imagen, kernelGaussianBlurMax)\n\tsalir(\"gaussian_blur_max\",filtrada)\n\nif('e1' in sys.argv):\n\tfiltrada = ndimage.convolve(imagen, kernelEdge1)\n\tsalir(\"edge1\",filtrada)\n\nif('e3' in sys.argv):\n\tfiltrada = ndimage.convolve(imagen, kernelEdge3)\n\tsalir(\"edge3\",filtrada)\n\nif('l' in sys.argv):\n\tfiltrada = ndimage.convolve(imagen, kernelLaplacian)\n\tsalir(\"laplacian\",filtrada)\n\nif('sh' in sys.argv):\n\tfiltrada = ndimage.convolve(imagen, kernelSharpen)\n\tsalir(\"sharpen\",filtrada)\n\n\n##### Edge detection mas trabajada: Sobel, Prewitt y Roberts\n\nif('s' in sys.argv):\n\tumbral = 30\n\tif len(sys.argv)>3:\n\t\tumbral = int(sys.argv[3])\n\tderivadax = np.empty([alto,ancho])\n\tderivadax = ndimage.convolve(imagen, kernelSobelX) \n\tderivaday = np.empty([alto,ancho])\n\tderivaday = ndimage.convolve(imagen, kernelSobelY)\n\t#Normalizar antes de guardar\n\tguardar(\"sobel_x\",normalizar(derivadax))\n\tguardar(\"sobel_y\",normalizar(derivaday))\n\tfiltrada = normalizar(np.hypot(derivadax,derivaday))\n\tsalir(\"sobel\" + \"-\" + str(umbral),((filtrada>umbral) * filtrada))\n\nif('p' in sys.argv):\n\tumbral = 30\n\tif len(sys.argv)>3:\n\t\tumbral = int(sys.argv[3])\n\tderivadax = np.empty([alto,ancho])\n\tderivadax = ndimage.convolve(imagen, kernelPrewittX) \n\tderivaday = np.empty([alto,ancho])\n\tderivaday = ndimage.convolve(imagen, kernelPrewittY)\n\t#Normalizar antes de guardar\n\tguardar(\"prewitt_x\",normalizar(derivadax))\n\tguardar(\"prewitt_y\",normalizar(derivaday))\n\tfiltrada = normalizar(np.hypot(derivadax,derivaday))\n\tsalir(\"prewitt\" + \"-\" + str(umbral),((filtrada>umbral) * filtrada))\n\nif('r' in sys.argv):\n\tumbral = 30\n\tif len(sys.argv)>3:\n\t\tumbral = int(sys.argv[3])\n\tderivadax = np.empty([alto,ancho])\n\tderivadax = ndimage.convolve(imagen, kernelRobertsX) \n\tderivaday = np.empty([alto,ancho])\n\tderivaday = ndimage.convolve(imagen, kernelRobertsY)\n\t#Normalizar antes de guardar\n\tguardar(\"roberts_x\",normalizar(derivadax))\n\tguardar(\"roberts_y\",normalizar(derivaday))\n\tfiltrada = normalizar(np.hypot(derivadax,derivaday))\n\tsalir(\"roberts\" + \"-\" + str(umbral),((filtrada>umbral) * filtrada))\n\n\n##### Filtro mediana\n\nif('mediana' in sys.argv):\n\t#Hice un filtro de mediana con opencv, numpy y un filtro de mediana de scipy\n\t#Por alguna razon es bastante lento (porque tiene que ordenar 9 elementos por cada pixel)\n\tsalir(\"mediana\",ndimage.median_filter(imagen, 3))\n\n\n##### Ruidos\n\n#TODO: no iterar para detectar los umbrales en ruido impulsivo y sal-pimienta\nif('nsp' in sys.argv):\n\t#Condimentar\n\tumbralSal = 0.999\n\tumbralPimienta = 0.0001\n\tif len(sys.argv)>3:\n\t\t#Nos da 1000 valores de umbral en total (si solo usamos enteros)\n\t\tumbralSal = (1000-float(sys.argv[3]))/1000\n\t\tumbralPimienta = (float(sys.argv[3]))/1000\n\tfor i in range(alto):\n\t\t\tfor j in range(ancho):\n\t\t\t\tvalor = np.random.random_sample()\n\t\t\t\tif valor>umbralSal:\n\t\t\t\t\tfiltrada[i,j] = 255\n\t\t\t\telif valor<umbralPimienta:\n\t\t\t\t\tfiltrada[i,j] = 0\n\t\t\t\telse:\n\t\t\t\t\tfiltrada[i,j] = imagen[i,j]\n\tsalir(\"ruido_sal-pimienta\" + \"-\" + str(umbralSal) + \"-\" + str(umbralPimienta),filtrada)\n\nif('ni' in sys.argv):\n\t#Salar\n\tumbral = 0.999\n\tif len(sys.argv)>3:\n\t\t#Nos da 1000 valores de umbral en total (si solo usamos enteros)\n\t\tumbral = (1000-float(sys.argv[3]))/1000\n\tfor i in range(alto):\n\t\t\tfor j in range(ancho):\n\t\t\t\tif np.random.random_sample()>umbral:\n\t\t\t\t\tfiltrada[i,j] = 255\n\t\t\t\telse:\n\t\t\t\t\tfiltrada[i,j] = imagen[i,j]\n\tsalir(\"ruido_impulsivo\" + \"-\" + str(umbral),filtrada)\n\nif('ng' in sys.argv):\n\t#Ruido gaussiano\n\tmedia = 0\n\tdesviacion = 1\n\tif len(sys.argv)>3:\n\t\tdesviacion = float(sys.argv[3])\n\tsalir(\"ruido_gaussiano\" + \"-\" + str(media) + \"-\" + str(desviacion),\\\n\t\t(imagen + np.random.normal(media,desviacion,(alto,ancho))))\n\nif('nr' in sys.argv):\n\t#Ruido Rayleigh\n\tescala = 1\n\tif len(sys.argv)>3:\n\t\tescala = float(sys.argv[3])\n\tsalir(\"ruido_rayleigh\" + str(escala),imagen + np.random.rayleigh(escala,(alto,ancho)))"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.amax",
"numpy.amin",
"scipy.ndimage.median_filter",
"scipy.ndimage.convolve",
"numpy.random.random_sample",
"numpy.random.normal",
"numpy.random.rayleigh",
"numpy.hypot",
"numpy.empty"
]
] |
RiteshMaheshwari/naarad | [
"401ae683e38900e94898b15ab64d4410a556956c"
] | [
"src/naarad/metrics/jmeter_metric.py"
] | [
"# coding=utf-8\n\"\"\"\n© 2013 LinkedIn Corp. All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an \"AS IS\" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\"\"\"\nfrom collections import defaultdict\nimport datetime\nimport gc\nimport logging\nimport os\nimport re\nimport numpy\nimport heapq\nfrom naarad.metrics.metric import Metric\nfrom naarad.graphing.plot_data import PlotData as PD\nimport naarad.utils\nimport naarad.naarad_imports\nfrom naarad.naarad_constants import important_sub_metrics_import\n\n\nlogger = logging.getLogger('naarad.metrics.JmeterMetric')\n\nclass JmeterMetric(Metric):\n def __init__ (self, metric_type, infile_list, hostname, output_directory, resource_path, label, ts_start, ts_end,\n rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options):\n Metric.__init__(self, metric_type, infile_list, hostname, output_directory, resource_path, label, ts_start, ts_end,\n rule_strings, important_sub_metrics, anomaly_detection_metrics)\n self.sub_metric_description = {\n 'lb': 'Transaction Name',\n 'lt': 'Time to First byte',\n 'ts': 'Timestamp',\n 'tn': 'Transaction Name (Parent)',\n 's': 'Status',\n 'ResponseTime': 'Response Time',\n 'rc': 'Response Code',\n 'rm': 'Response Message',\n 'dt': 'Data Type',\n 'ResponseSize': 'Response Size',\n 'qps': 'Successful Transactions per second',\n 'ErrorsPerSecond': 'Errors per second',\n 'DataThroughput': 'Data Throughput'\n }\n self.sub_metric_units = {\n 'lt': 'ms',\n 'ResponseTime': 'ms',\n 'ResponseSize': 'bytes',\n 'qps': 'qps',\n 'DataThroughput': 'mbps',\n 'ErrorsPerSecond': 'qps'\n }\n self.calculated_stats = {}\n self.aggregation_granularity = 'second'\n self.calculated_percentiles = {}\n self.summary_stats = defaultdict(dict)\n self.summary_html_content_enabled = True\n self.summary_charts = [self.label + '.Overall_Summary.div']\n if not self.important_sub_metrics:\n self.important_sub_metrics = important_sub_metrics_import['JMETER']\n if other_options:\n for (key, val) in other_options.iteritems():\n setattr(self, key, val)\n\n\n\n def get_csv(self, transaction_name, column):\n col = naarad.utils.sanitize_string(column)\n if col == 't':\n col = 'ResponseTime'\n elif col == 'by':\n col = 'ResponseSize'\n elif col == 'thr':\n col = 'DataThroughput'\n elif col == 'eqps':\n col = 'ErrorsPerSecond'\n\n if transaction_name == '__overall_summary__':\n transaction_name = 'Overall_Summary'\n csv = os.path.join(self.resource_directory, self.label + '.' + transaction_name + '.' + col + '.csv')\n self.csv_column_map[csv] = transaction_name + '.' + col\n return csv\n\n def aggregate_count_over_time(self, metric_store, line_data, transaction_list, aggregate_timestamp):\n \"\"\"\n Organize and store the count of data from the log line into the metric store by metric type, transaction, timestamp\n\n :param dict metric_store: The metric store used to store all the parsed jmeter log data\n :param dict line_data: dict with the extracted k:v from the log line\n :param list transaction_list: list of transaction to be used for storing the metrics from given line\n :param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period\n :return: None\n \"\"\"\n for transaction in transaction_list:\n if line_data['s'] == 'true':\n all_qps = metric_store['qps']\n else:\n all_qps = metric_store['eqps']\n qps = all_qps[transaction]\n if aggregate_timestamp in qps:\n qps[aggregate_timestamp] += 1\n else:\n qps[aggregate_timestamp] = 1\n return None\n\n def aggregate_values_over_time(self, metric_store, line_data, transaction_list, metric_list, aggregate_timestamp):\n \"\"\"\n Organize and store the data from the log line into the metric store by metric type, transaction, timestamp\n\n :param dict metric_store: The metric store used to store all the parsed jmeter log data\n :param dict line_data: dict with the extracted k:v from the log line\n :param list transaction_list: list of transaction to be used for storing the metrics from given line\n :param list metric_list: list of metrics to extract from the log line\n :param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period\n :return: None\n \"\"\"\n for metric in metric_list:\n for transaction in transaction_list:\n metric_data = reduce(defaultdict.__getitem__,[metric, transaction, aggregate_timestamp], metric_store)\n metric_data.append(float(line_data[metric]))\n return None\n\n def average_values_for_plot(self, metric_store, data, averaging_factor):\n \"\"\"\n Create the time series for the various metrics, averaged over the aggregation period being used for plots\n\n :param dict metric_store: The metric store used to store all the parsed jmeter log data\n :param dict data: Dict with all the metric data to be output to csv\n :param float averaging_factor: averaging factor to be used for calculating the average per second metrics\n :return: None\n \"\"\"\n for metric, transaction_store in metric_store.items():\n for transaction, time_store in transaction_store.items():\n for time_stamp, metric_data in sorted(time_store.items()):\n if metric in ['t', 'by']:\n data[self.get_csv(transaction, metric)].append(','.join([str(time_stamp), str(sum(map(float,metric_data))/float(len(metric_data)))]))\n if metric == 'by':\n metric_store['thr'][transaction][time_stamp] = sum(map(float,metric_data))/float(averaging_factor * 1024 * 1024 / 8.0)\n data[self.get_csv(transaction, 'thr')].append(','.join([str(time_stamp), str(metric_store['thr'][transaction][time_stamp])]))\n elif metric in ['qps', 'eqps']:\n data[self.get_csv(transaction, metric)].append(','.join([str(time_stamp), str(metric_data/float(averaging_factor))]))\n return None\n\n def calculate_key_stats(self, metric_store):\n \"\"\"\n Calculate key statistics for given data and store in the class variables calculated_stats and calculated_percentiles\n calculated_stats:\n 'mean', 'std', 'median', 'min', 'max'\n calculated_percentiles:\n range(5,101,5), 99\n :param dict metric_store: The metric store used to store all the parsed jmeter log data\n :return: none\n \"\"\"\n stats_to_calculate = ['mean', 'std', 'median', 'min', 'max'] # TODO: get input from user\n percentiles_to_calculate = range(5,101,5) # TODO: get input from user\n percentiles_to_calculate.append(99)\n for transaction in metric_store['t'].keys():\n transaction_key = transaction + '.' + 'ResponseTime'\n #For ResponseTime and ResponseSize, each timestamp has a list of values associated with it.\n #Using heapq.merge to merge all the lists into a single list to be passed to numpy.\n self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \\\n naarad.utils.calculate_stats(list(heapq.merge(*metric_store['t'][transaction].values())),\n stats_to_calculate, percentiles_to_calculate)\n self.update_summary_stats(transaction_key)\n transaction_key = transaction + '.' + 'qps'\n if len(metric_store['qps'][transaction].values()) > 0:\n self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \\\n naarad.utils.calculate_stats(metric_store['qps'][transaction].values(),\n stats_to_calculate, percentiles_to_calculate)\n self.update_summary_stats(transaction_key)\n transaction_key = transaction + '.' + 'ResponseSize'\n self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \\\n naarad.utils.calculate_stats(list(heapq.merge(*metric_store['by'][transaction].values())),\n stats_to_calculate, percentiles_to_calculate)\n self.update_summary_stats(transaction_key)\n if 'eqps' in metric_store.keys() and transaction in metric_store['eqps'].keys():\n transaction_key = transaction + '.' + 'ErrorsPerSecond'\n self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \\\n naarad.utils.calculate_stats(metric_store['eqps'][transaction].values(),\n stats_to_calculate, percentiles_to_calculate)\n self.update_summary_stats(transaction + '.' + 'ErrorsPerSecond')\n transaction_key = transaction + '.' + 'DataThroughput'\n self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \\\n naarad.utils.calculate_stats(metric_store['thr'][transaction].values(),\n stats_to_calculate, percentiles_to_calculate)\n self.update_summary_stats(transaction_key)\n return None\n\n def parse(self):\n \"\"\"\n Parse the Jmeter file and calculate key stats\n\n :return: status of the metric parse\n \"\"\"\n file_status = True\n for infile in self.infile_list:\n file_status = file_status and naarad.utils.is_valid_file(infile)\n if not file_status:\n return False\n\n status = self.parse_xml_jtl(self.aggregation_granularity)\n gc.collect()\n return status\n\n def _sanitize_label(self, raw_label):\n return raw_label.replace('/', '_').replace('?', '_')\n\n def parse_xml_jtl(self, granularity):\n \"\"\"\n Parse Jmeter workload output in XML format and extract overall and per transaction data and key statistics\n\n :param string granularity: The time period over which to aggregate and average the raw data. Valid values are 'hour', 'minute' or 'second'\n :return: status of the metric parse\n \"\"\"\n data = defaultdict(list)\n processed_data = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))\n line_regex = re.compile(r' (lb|ts|t|by|s)=\"([^\"]+)\"')\n for input_file in self.infile_list:\n logger.info('Processing : %s', input_file)\n timestamp_format = None\n with open(input_file) as infile:\n for line in infile:\n if '<httpSample' not in line and '<sample' not in line:\n continue\n line_data = dict(re.findall(line_regex, line))\n if not timestamp_format or timestamp_format == 'unknown':\n timestamp_format = naarad.utils.detect_timestamp_format(line_data['ts'])\n if timestamp_format == 'unknown':\n continue\n ts = naarad.utils.get_standardized_timestamp(line_data['ts'], timestamp_format)\n if ts == -1:\n continue\n ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)\n aggregate_timestamp, averaging_factor = self.get_aggregation_timestamp(ts, granularity)\n self.aggregate_count_over_time(processed_data, line_data, [self._sanitize_label(line_data['lb']), 'Overall_Summary'], aggregate_timestamp)\n self.aggregate_values_over_time(processed_data, line_data, [self._sanitize_label(line_data['lb']), 'Overall_Summary'], ['t', 'by'], aggregate_timestamp)\n logger.info('Finished parsing : %s', input_file)\n logger.info('Processing metrics for output to csv')\n self.average_values_for_plot(processed_data, data, averaging_factor)\n logger.info('Writing time series csv')\n for csv in data.keys():\n self.csv_files.append(csv)\n with open(csv, 'w') as csvf:\n csvf.write('\\n'.join(sorted(data[csv])))\n logger.info('Processing raw data for stats')\n self.calculate_key_stats(processed_data)\n return True\n\n def calculate_stats(self):\n stats_csv = self.get_stats_csv()\n imp_metric_stats_csv = self.get_important_sub_metrics_csv()\n csv_header = 'sub_metric,mean,std. deviation,median,min,max,90%,95%,99%\\n'\n imp_csv_header = 'sub_metric,mean,std,p50,p75,p90,p95,p99,min,max\\n'\n with open(stats_csv,'w') as FH:\n FH.write(csv_header)\n for sub_metric in self.calculated_stats:\n percentile_data = self.calculated_percentiles[sub_metric]\n stats_data = self.calculated_stats[sub_metric]\n csv_data = ','.join([sub_metric,str(round(stats_data['mean'], 2)),str(round(stats_data['std'], 2)),str(round(stats_data['median'], 2)),str(round(stats_data['min'], 2)),str(round(stats_data['max'], 2)),str(round(percentile_data[90], 2)),str(round(percentile_data[95], 2)),str(round(percentile_data[99], 2))])\n FH.write(csv_data + '\\n')\n self.stats_files.append(stats_csv)\n for sub_metric in self.calculated_percentiles:\n percentiles_csv = self.get_csv(sub_metric,'percentiles')\n percentile_data = self.calculated_percentiles[sub_metric]\n with open(percentiles_csv,'w') as FH:\n for percentile in sorted(percentile_data):\n FH.write(str(percentile) + ',' + str(numpy.round_(percentile_data[percentile],2)) + '\\n')\n self.percentiles_files.append(percentiles_csv)\n with open(imp_metric_stats_csv, 'w') as FH_IMP:\n FH_IMP.write(csv_header)\n for sub_metric in self.important_sub_metrics:\n if sub_metric in self.calculated_stats.keys():\n percentile_data = self.calculated_percentiles[sub_metric]\n stats_data = self.calculated_stats[sub_metric]\n csv_data = ','.join([sub_metric,str(round(stats_data['mean'], 2)),str(round(stats_data['std'], 2)),str(round(stats_data['median'], 2)),str(round(stats_data['min'], 2)),str(round(stats_data['max'], 2)),str(round(percentile_data[90], 2)),str(round(percentile_data[95], 2)),str(round(percentile_data[99], 2))])\n FH_IMP.write(csv_data + '\\n')\n self.important_stats_files.append(imp_metric_stats_csv)\n\n def plot_timeseries(self, graphing_library='matplotlib'):\n if graphing_library != 'matplotlib':\n return Metric.plot_timeseries(self, graphing_library)\n else:\n logger.info('Using graphing_library {lib} for metric {name}'.format(lib=graphing_library, name=self.label))\n plot_data = {}\n # plot time series data for submetrics\n for out_csv in sorted(self.csv_files, reverse=True):\n csv_filename = os.path.basename(out_csv)\n # The last element is .csv, don't need that in the name of the chart\n column = csv_filename.split('.')[-2]\n transaction_name = ' '.join(csv_filename.split('.')[1:-2])\n plot = PD(input_csv=out_csv, csv_column=1, series_name=transaction_name, y_label=self.sub_metric_description[column] + ' (' + self.sub_metric_units[column] + ')', precision=None, graph_height=500, graph_width=1200, graph_type='line')\n if transaction_name in plot_data:\n plot_data[transaction_name].append(plot)\n else:\n plot_data[transaction_name] = [plot]\n for transaction in plot_data:\n graphed, div_file = Metric.graphing_modules[graphing_library].graph_data(plot_data[transaction], self.resource_directory, self.resource_path, self.label + '.' + transaction )\n if graphed:\n self.plot_files.append(div_file)\n return True\n"
] | [
[
"numpy.round_"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.