repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Project-Agni/Detection
[ "6b2c8ec25f8bd2bd15995d67f2808352cec9e2af" ]
[ "agents/usrl/vae.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom a2c_ppo_acktr.utils import init\n\nimport os\nimport numpy as np\nfrom torch.utils.data import RandomSampler, BatchSampler\nfrom .trainer import Trainer\nfrom .utils import EarlyStopping\n\n\nclass Unflatten(nn.Module):\n def __init__(self, new_shape):\n super().__init__()\n self.new_shape = new_shape\n\n def forward(self, x):\n x_uf = x.view(-1, *self.new_shape)\n return x_uf\n\n\nclass Decoder(nn.Module):\n def __init__(\n self,\n feature_size,\n final_conv_size,\n final_conv_shape,\n num_input_channels,\n encoder_type=\"Nature\",\n ):\n super().__init__()\n self.feature_size = feature_size\n self.final_conv_size = final_conv_size\n self.final_conv_shape = final_conv_shape\n self.num_input_channels = num_input_channels\n # self.fc =\n init_ = lambda m: init(\n m,\n nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0),\n nn.init.calculate_gain(\"relu\"),\n )\n if encoder_type == \"Nature\":\n self.main = nn.Sequential(\n nn.Linear(\n in_features=self.feature_size, out_features=self.final_conv_size\n ),\n nn.ReLU(),\n Unflatten(self.final_conv_shape),\n init_(\n nn.ConvTranspose2d(\n in_channels=64,\n out_channels=128,\n kernel_size=3,\n stride=1,\n padding=0,\n )\n ),\n nn.ReLU(),\n init_(\n nn.ConvTranspose2d(\n in_channels=128,\n out_channels=64,\n kernel_size=4,\n stride=2,\n padding=0,\n )\n ),\n nn.ReLU(),\n init_(\n nn.ConvTranspose2d(\n in_channels=64,\n out_channels=32,\n kernel_size=4,\n stride=2,\n padding=0,\n output_padding=1,\n )\n ),\n nn.ReLU(),\n init_(\n nn.ConvTranspose2d(\n in_channels=32,\n out_channels=num_input_channels,\n kernel_size=8,\n stride=4,\n output_padding=(2, 0),\n )\n ),\n nn.Sigmoid(),\n )\n\n def forward(self, f):\n im = self.main(f)\n return im\n\n\nclass VAE(nn.Module):\n def __init__(self, encoder):\n super().__init__()\n self.encoder = encoder\n self.feature_size = self.encoder.feature_size\n self.final_conv_size = self.encoder.final_conv_size\n self.final_conv_shape = self.encoder.final_conv_shape\n self.input_channels = self.encoder.input_channels\n\n # self.mu_fc = nn.Linear(in_features=self.feature_size,\n # out_features=self.feature_size)\n\n self.logvar_fc = nn.Linear(\n in_features=self.final_conv_size, out_features=self.feature_size\n )\n\n self.decoder = Decoder(\n feature_size=self.feature_size,\n final_conv_size=self.final_conv_size,\n final_conv_shape=self.final_conv_shape,\n num_input_channels=self.input_channels,\n )\n\n def reparametrize(self, mu, logvar):\n if self.training:\n eps = torch.randn(*logvar.size()).to(mu.device)\n std = torch.exp(0.5 * logvar)\n z = mu + eps * std\n else:\n z = mu\n return z\n\n def forward(self, x):\n mu = self.encoder(x)\n logvar = self.logvar_fc(self.encoder.main[:-1](x))\n z = self.reparametrize(mu, logvar)\n x_hat = self.decoder(z)\n return x_hat, mu, logvar\n\n\nclass VAELoss(object):\n def __init__(self, beta=1.0):\n self.beta = beta\n\n def __call__(self, x, x_hat, mu, logvar):\n kldiv = -0.5 * torch.sum(1 + logvar - mu ** 2 - torch.exp(logvar))\n rec = F.mse_loss(x_hat, x, reduction=\"sum\")\n loss = rec + self.beta * kldiv\n return loss\n\n\nclass VAETrainer(Trainer):\n # TODO: Make it work for all modes, right now only it defaults to pcl.\n def __init__(self, encoder, config, device=torch.device(\"cpu\"), wandb=None):\n super().__init__(encoder, wandb, device)\n self.config = config\n self.patience = self.config[\"patience\"]\n self.VAE = VAE(encoder).to(device)\n self.epochs = config[\"epochs\"]\n self.batch_size = config[\"batch_size\"]\n self.device = device\n self.optimizer = torch.optim.Adam(\n list(self.VAE.parameters()), lr=config[\"lr\"], eps=1e-5\n )\n self.loss_fn = VAELoss(beta=self.config[\"beta\"])\n self.early_stopper = EarlyStopping(\n patience=self.patience, verbose=False, wandb=self.wandb, name=\"encoder\"\n )\n\n def generate_batch(self, episodes):\n total_steps = sum([len(e) for e in episodes])\n print(\"Total Steps: {}\".format(total_steps))\n # Episode sampler\n # Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch\n sampler = BatchSampler(\n RandomSampler(\n range(len(episodes)), replacement=True, num_samples=total_steps\n ),\n self.batch_size,\n drop_last=True,\n )\n for indices in sampler:\n episodes_batch = [episodes[x] for x in indices]\n x_t, x_tprev, x_that, ts, thats = [], [], [], [], []\n for episode in episodes_batch:\n # Get one sample from this episode\n t, t_hat = 0, 0\n t, t_hat = np.random.randint(0, len(episode)), np.random.randint(\n 0, len(episode)\n )\n x_t.append(episode[t])\n yield torch.stack(x_t).float().to(self.device) / 255.0\n\n def do_one_epoch(self, epoch, episodes):\n mode = \"train\" if self.VAE.training else \"val\"\n epoch_loss, accuracy, steps = 0.0, 0.0, 0\n data_generator = self.generate_batch(episodes)\n for x_t in data_generator:\n with torch.set_grad_enabled(mode == \"train\"):\n x_hat, mu, logvar = self.VAE(x_t)\n loss = self.loss_fn(x_t, x_hat, mu, logvar)\n\n if mode == \"train\":\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n epoch_loss += loss.detach().item()\n steps += 1\n self.log_results(epoch, epoch_loss / steps, prefix=mode)\n if mode == \"val\":\n self.early_stopper(-epoch_loss / steps, self.encoder)\n\n # xim = x_hat.detach().cpu().numpy()[0].transpose(1,2,0)\n # self.wandb.log({\"example_reconstruction\": [self.wandb.Image(xim, caption=\"\")]})\n\n def train(self, tr_eps, val_eps):\n for e in range(self.epochs):\n self.VAE.train()\n self.do_one_epoch(e, tr_eps)\n\n self.VAE.eval()\n self.do_one_epoch(e, val_eps)\n\n if self.early_stopper.early_stop:\n break\n torch.save(\n self.encoder.state_dict(),\n os.path.join(self.wandb.run.dir, self.config[\"env_name\"] + \".pt\"),\n )\n\n def log_results(self, epoch_idx, epoch_loss, prefix=\"\"):\n print(\n \"{} Epoch: {}, Epoch Loss: {}\".format(\n prefix.capitalize(), epoch_idx, epoch_loss\n )\n )\n self.wandb.log({prefix + \"_loss\": epoch_loss})\n" ]
[ [ "torch.nn.init.calculate_gain", "torch.nn.ConvTranspose2d", "torch.nn.init.constant_", "torch.nn.Sigmoid", "torch.exp", "torch.nn.functional.mse_loss", "torch.nn.Linear", "torch.set_grad_enabled", "torch.device", "torch.nn.ReLU", "torch.stack" ] ]
draven-agency/fedlearner
[ "d85eb50b2b43d9bd6b121bd9906eb0731533a615" ]
[ "fedlearner/trainer/estimator.py" ]
[ "# Copyright 2020 The FedLearner Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n# pylint: disable=protected-access\n\nimport os\nimport logging\nimport time\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow.compat.v1.train import Optimizer\nfrom tensorflow.compat.v1.estimator import ModeKeys\nfrom tensorflow_estimator.python.estimator import model_fn as model_fn_lib\n\nfrom fedlearner.common.etcd_client import EtcdClient\nfrom fedlearner.trainer import patch # pylint: disable=unused-import\n\n\nSYNC_PATH = '/sync/'\n\n\nclass FLModel(object):\n def __init__(self, role, bridge, example_ids, exporting=False):\n self._role = role\n self._bridge = bridge\n self._example_ids = example_ids\n self._exporting = exporting\n\n self._train_ops = []\n self._recvs = []\n self._sends = []\n self._outputs = []\n\n @property\n def train_ops(self):\n return self._train_ops\n\n @property\n def sends(self):\n return [(n, t) for n, t, _ in self._sends]\n\n @property\n def recvs(self):\n return [(n, t) for n, t, _ in self._recvs]\n\n def verify_example_ids(self):\n tensor = tf.strings.to_hash_bucket_fast(self._example_ids, 2**31 - 1)\n if self._role == 'leader':\n self.send('_verify_example_ids', tensor)\n else:\n recv_tensor = self.recv('_verify_example_ids', tensor.dtype)\n op = tf.assert_equal(tensor, recv_tensor)\n self._train_ops.append(op)\n\n def send(self, name, tensor, require_grad=False):\n with tf.control_dependencies([self._example_ids]):\n op = self._bridge.send_op(name, tensor)\n self._train_ops.append(op)\n self._sends.append((name, tensor, require_grad))\n if require_grad:\n return self.recv(name + '_grad', tensor.dtype)\n return None\n\n def recv(self, name, dtype=tf.float32, require_grad=False):\n with tf.control_dependencies([self._example_ids]):\n tensor = self._bridge.receive_op(name, dtype)\n self._recvs.append((name, tensor, require_grad))\n return tensor\n\n def minimize(self,\n optimizer,\n loss,\n global_step=None,\n var_list=None,\n gate_gradients=Optimizer.GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n name=None,\n grad_loss=None):\n recv_grads = [i for i in self._recvs if i[2]]\n\n if var_list is None:\n var_list = \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) + \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)\n var_list = [v for _, v, _ in recv_grads] + var_list\n\n grads_and_vars = optimizer.compute_gradients(\n loss,\n var_list=var_list,\n gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n grad_loss=grad_loss)\n\n send_grads = grads_and_vars[:len(recv_grads)]\n for (n, _, _), (grad, _) in zip(recv_grads, send_grads):\n if grad is not None:\n self.send(n + '_grad', grad)\n\n train_op = optimizer.apply_gradients(grads_and_vars[len(recv_grads):],\n global_step=global_step,\n name=name)\n\n return train_op\n\n def make_spec(self,\n mode,\n predictions=None,\n loss=None,\n train_op=None,\n eval_metric_ops=None,\n training_chief_hooks=None,\n training_hooks=None,\n evaluation_hooks=None,\n prediction_hooks=None):\n if isinstance(predictions, tf.Tensor):\n predictions = {'output': predictions}\n if mode == ModeKeys.TRAIN:\n train_op = tf.group([train_op] + self._train_ops)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n training_chief_hooks=training_chief_hooks,\n training_hooks=training_hooks,\n evaluation_hooks=evaluation_hooks,\n prediction_hooks=prediction_hooks)\n\n\nclass FLEstimator(object):\n def __init__(self,\n model_fn,\n bridge,\n trainer_master,\n role,\n worker_rank=0,\n cluster_spec=None):\n self._model_fn = model_fn\n self._bridge = bridge\n self._trainer_master = trainer_master\n self._role = role\n self._worker_rank = worker_rank\n self._cluster_spec = cluster_spec\n\n def _get_features_and_labels_from_input_fn(self, input_fn, mode):\n dataset = input_fn(self._bridge, self._trainer_master)\n features, labels = dataset.make_one_shot_iterator().get_next()\n return features, labels\n\n def _get_model_spec(self, features, labels, mode):\n model = FLModel(self._role, self._bridge,\n features.get('example_id', None),\n exporting=(mode == ModeKeys.PREDICT))\n spec = self._model_fn(model, features, labels, mode)\n return spec, model\n\n def _cheif_barriar(self, is_chief=False, sync_times=300):\n worker_replicas = os.environ.get('REPLICA_NUM', 0)\n etcd_client = EtcdClient(os.environ['ETCD_CLUSTER'],\n os.environ['ETCD_ADDRESS'], SYNC_PATH)\n sync_path = '%s/%s' % (os.environ['APPLICATION_ID'],\n os.environ['WORKER_RANK'])\n logging.info('Creating a sync flag at %s', sync_path)\n etcd_client.set_data(sync_path, 1)\n if is_chief:\n for _ in range(sync_times):\n sync_list = etcd_client.get_prefix_kvs(\n os.environ['APPLICATION_ID'])\n logging.info('Sync file pattern is: %s', sync_list)\n if len(sync_list) < worker_replicas:\n logging.info('Count of ready workers is %d',\n len(sync_list))\n time.sleep(6)\n else:\n break\n\n def train(self,\n input_fn,\n checkpoint_path=None,\n save_checkpoint_steps=None,\n save_checkpoint_secs=None):\n if self._cluster_spec is not None:\n device_fn = tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:%d\" % self._worker_rank,\n merge_devices=True,\n cluster=self._cluster_spec)\n cluster_def = self._cluster_spec.as_cluster_def()\n local_address = self._cluster_spec.job_tasks('worker')[\n self._worker_rank]\n server = tf.train.Server(tf.train.ClusterSpec(\n {'local': {\n 0: local_address\n }}),\n job_name='local',\n task_index=0)\n target = 'grpc://' + local_address\n else:\n device_fn = None\n cluster_def = None\n target = None\n\n config = tf.ConfigProto(cluster_def=cluster_def)\n config.inter_op_parallelism_threads = 4\n config.intra_op_parallelism_threads = 4\n config.experimental.share_session_state_in_clusterspec_propagation \\\n = True\n tf.config.set_soft_device_placement(False)\n\n with tf.Graph().as_default() as g:\n with tf.device(device_fn):\n features, labels = self._get_features_and_labels_from_input_fn(\n input_fn, ModeKeys.TRAIN)\n spec, _ = self._get_model_spec(features, labels, ModeKeys.TRAIN)\n\n # Explicitly add a Saver\n if not tf.get_collection(tf.GraphKeys.SAVERS):\n saver = tf.train.Saver(\n sharded=True,\n defer_build=True,\n save_relative_paths=True) # Must set for portability\n tf.add_to_collection(tf.GraphKeys.SAVERS, saver)\n\n self._bridge.connect()\n\n try:\n with tf.train.MonitoredTrainingSession(\n master=target,\n config=config,\n is_chief=(self._worker_rank == 0),\n checkpoint_dir=checkpoint_path,\n save_checkpoint_steps=save_checkpoint_steps,\n save_checkpoint_secs=save_checkpoint_secs,\n hooks=spec.training_hooks) as sess:\n iter_id = 0\n while not sess.should_stop():\n self._bridge.start(iter_id)\n logging.debug('after bridge start.')\n sess.run(spec.train_op, feed_dict={})\n logging.debug('after session run.')\n self._bridge.commit()\n logging.debug('after bridge commit.')\n iter_id += 1\n if self._cluster_spec is not None:\n self._cheif_barriar(is_chief=(self._worker_rank == 0))\n finally:\n self._bridge.terminate()\n\n return self\n\n def evaluate(self,\n input_fn,\n checkpoint_path=None):\n if not tf.train.latest_checkpoint(checkpoint_path):\n raise ValueError(\n \"Could not find trained model at %s\" % checkpoint_path)\n\n with tf.Graph().as_default():\n features, labels = self._get_features_and_labels_from_input_fn(\n input_fn, ModeKeys.EVAL)\n spec, model = self._get_model_spec(features, labels, ModeKeys.EVAL)\n\n # Track the average loss in default\n eval_metric_ops = spec.eval_metric_ops or {}\n if model_fn_lib.LOSS_METRIC_KEY not in eval_metric_ops:\n loss_metric = tf.metrics.mean(spec.loss)\n eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric\n\n # Create the real eval op\n update_ops, eval_dict = _extract_metric_update_ops(eval_metric_ops)\n update_ops.extend(model._train_ops)\n eval_op = tf.group(*update_ops)\n\n # Also track the global step\n if tf.GraphKeys.GLOBAL_STEP in eval_dict:\n raise ValueError(\n 'Metric with name `global_step` is not allowed, because '\n 'Estimator already defines a default metric with the '\n 'same name.')\n eval_dict[tf.GraphKeys.GLOBAL_STEP] = \\\n tf.train.get_or_create_global_step()\n\n # Prepare the session creator.\n scaffold = tf.train.Scaffold()\n session_creator = tf.train.ChiefSessionCreator(\n scaffold=scaffold,\n checkpoint_dir=checkpoint_path)\n\n # Prepare hooks\n all_hooks = list(spec.evaluation_hooks) or []\n final_ops_hook = tf.train.FinalOpsHook(eval_dict)\n all_hooks.append(final_ops_hook)\n\n # Evaluate over dataset\n self._bridge.connect()\n try:\n with tf.train.MonitoredSession(\n session_creator=session_creator, hooks=all_hooks) as sess:\n iter_id = 0\n while not sess.should_stop():\n self._bridge.start(iter_id)\n logging.debug('after bridge start.')\n sess.run(eval_op)\n logging.debug('after session run.')\n self._bridge.commit()\n logging.debug('after bridge commit.')\n iter_id += 1\n finally:\n self._bridge.terminate()\n\n # Print result\n logging.info('Metrics for iteration %d: %s',\n iter_id, _dict_to_str(final_ops_hook.final_ops_values))\n return final_ops_hook.final_ops_values\n\n def export_saved_model(self,\n export_dir_base,\n serving_input_receiver_fn,\n checkpoint_path=None):\n with tf.Graph().as_default():\n receiver = serving_input_receiver_fn()\n spec, model = self._get_model_spec(receiver.features, None,\n ModeKeys.PREDICT)\n assert not model.sends, \"Exported model cannot send\"\n assert not model.recvs, \"Exported model cannot receive\"\n\n with tf.Session() as sess:\n saver_for_restore = tf.train.Saver(sharded=True)\n saver_for_restore.restore(\n sess, tf.train.latest_checkpoint(checkpoint_path))\n tf.saved_model.simple_save(sess, export_dir_base,\n receiver.receiver_tensors,\n spec.predictions, None)\n\n return export_dir_base\n\n\ndef _extract_metric_update_ops(eval_dict):\n \"\"\"Separate update operations from metric value operations.\"\"\"\n update_ops = []\n value_ops = {}\n # Sort metrics lexicographically so graph is identical every time.\n for name in sorted(eval_dict.keys()):\n metric_tensor, update_op = eval_dict[name]\n value_ops[name] = metric_tensor\n update_ops.append(update_op)\n return update_ops, value_ops\n\n\ndef _dict_to_str(dictionary):\n \"\"\"Get a `str` representation of a `dict`.\n\n Args:\n dictionary: The `dict` to be represented as `str`.\n\n Returns:\n A `str` representing the `dictionary`.\n \"\"\"\n return ', '.join('%s = %s' % (k, v)\n for k, v in sorted(dictionary.items())\n if not isinstance(v, bytes))\n" ]
[ [ "tensorflow.compat.v1.metrics.mean", "tensorflow.compat.v1.train.MonitoredTrainingSession", "tensorflow.compat.v1.group", "tensorflow.compat.v1.train.Scaffold", "tensorflow.compat.v1.train.MonitoredSession", "tensorflow.compat.v1.train.replica_device_setter", "tensorflow.compat.v1.config.set_soft_device_placement", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.compat.v1.strings.to_hash_bucket_fast", "tensorflow.compat.v1.train.ChiefSessionCreator", "tensorflow.compat.v1.estimator.EstimatorSpec", "tensorflow.compat.v1.saved_model.simple_save", "tensorflow.compat.v1.train.ClusterSpec", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.train.latest_checkpoint", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.device", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.add_to_collection", "tensorflow.compat.v1.assert_equal", "tensorflow.compat.v1.train.FinalOpsHook" ] ]
Siemwind/PolarSeg
[ "63f912ea5a0edc7ae7128752ba5339ff33e2584f" ]
[ "network/ptBEV.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport numba as nb\nimport multiprocessing\nimport torch_scatter\n\nclass ptBEVnet(nn.Module):\n \n def __init__(self, BEV_net, grid_size, pt_model = 'pointnet', fea_dim = 3, pt_pooling = 'max', kernal_size = 3,\n out_pt_fea_dim = 64, max_pt_per_encode = 64, cluster_num = 4, pt_selection = 'farthest', fea_compre = None):\n super(ptBEVnet, self).__init__()\n assert pt_pooling in ['max']\n assert pt_selection in ['random','farthest']\n \n if pt_model == 'pointnet':\n self.PPmodel = nn.Sequential(\n nn.BatchNorm1d(fea_dim),\n \n nn.Linear(fea_dim, 64),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n \n nn.Linear(64, 128),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n \n nn.Linear(128, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(inplace=True),\n \n nn.Linear(256, out_pt_fea_dim)\n )\n \n self.pt_model = pt_model\n self.BEV_model = BEV_net\n self.pt_pooling = pt_pooling\n self.max_pt = max_pt_per_encode\n self.pt_selection = pt_selection\n self.fea_compre = fea_compre\n self.grid_size = grid_size\n \n # NN stuff\n if kernal_size != 1:\n if self.pt_pooling == 'max':\n self.local_pool_op = torch.nn.MaxPool2d(kernal_size, stride=1, padding=(kernal_size-1)//2, dilation=1)\n else: raise NotImplementedError\n else: self.local_pool_op = None\n \n # parametric pooling \n if self.pt_pooling == 'max':\n self.pool_dim = out_pt_fea_dim\n \n # point feature compression\n if self.fea_compre is not None:\n self.fea_compression = nn.Sequential(\n nn.Linear(self.pool_dim, self.fea_compre),\n nn.ReLU())\n self.pt_fea_dim = self.fea_compre\n else:\n self.pt_fea_dim = self.pool_dim\n \n def forward(self, pt_fea, xy_ind, voxel_fea=None):\n cur_dev = pt_fea[0].get_device()\n \n # concate everything\n cat_pt_ind = []\n for i_batch in range(len(xy_ind)):\n cat_pt_ind.append(F.pad(xy_ind[i_batch],(1,0),'constant',value = i_batch))\n\n cat_pt_fea = torch.cat(pt_fea,dim = 0)\n cat_pt_ind = torch.cat(cat_pt_ind,dim = 0)\n pt_num = cat_pt_ind.shape[0]\n\n # shuffle the data\n shuffled_ind = torch.randperm(pt_num,device = cur_dev)\n cat_pt_fea = cat_pt_fea[shuffled_ind,:]\n cat_pt_ind = cat_pt_ind[shuffled_ind,:]\n \n # unique xy grid index\n unq, unq_inv, unq_cnt = torch.unique(cat_pt_ind,return_inverse=True, return_counts=True, dim=0)\n unq = unq.type(torch.int64)\n \n # subsample pts\n if self.pt_selection == 'random':\n grp_ind = grp_range_torch(unq_cnt,cur_dev)[torch.argsort(torch.argsort(unq_inv))]\n remain_ind = grp_ind < self.max_pt\n elif self.pt_selection == 'farthest':\n unq_ind = np.split(np.argsort(unq_inv.detach().cpu().numpy()), np.cumsum(unq_cnt.detach().cpu().numpy()[:-1]))\n remain_ind = np.zeros((pt_num,),dtype = np.bool)\n np_cat_fea = cat_pt_fea.detach().cpu().numpy()[:,:3]\n pool_in = []\n for i_inds in unq_ind:\n if len(i_inds) > self.max_pt:\n pool_in.append((np_cat_fea[i_inds,:],self.max_pt))\n if len(pool_in) > 0:\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n FPS_results = pool.starmap(parallel_FPS, pool_in)\n pool.close()\n pool.join()\n count = 0\n for i_inds in unq_ind:\n if len(i_inds) <= self.max_pt:\n remain_ind[i_inds] = True\n else:\n remain_ind[i_inds[FPS_results[count]]] = True\n count += 1\n \n cat_pt_fea = cat_pt_fea[remain_ind,:]\n cat_pt_ind = cat_pt_ind[remain_ind,:]\n unq_inv = unq_inv[remain_ind]\n unq_cnt = torch.clamp(unq_cnt,max=self.max_pt)\n \n # process feature\n if self.pt_model == 'pointnet':\n processed_cat_pt_fea = self.PPmodel(cat_pt_fea)\n \n if self.pt_pooling == 'max':\n pooled_data = torch_scatter.scatter_max(processed_cat_pt_fea, unq_inv, dim=0)[0]\n else: raise NotImplementedError\n \n if self.fea_compre:\n processed_pooled_data = self.fea_compression(pooled_data)\n else:\n processed_pooled_data = pooled_data\n \n # stuff pooled data into 4D tensor\n out_data_dim = [len(pt_fea),self.grid_size[0],self.grid_size[1],self.pt_fea_dim]\n out_data = torch.zeros(out_data_dim, dtype=torch.float32).to(cur_dev)\n out_data[unq[:,0],unq[:,1],unq[:,2],:] = processed_pooled_data\n out_data = out_data.permute(0,3,1,2)\n if self.local_pool_op != None:\n out_data = self.local_pool_op(out_data)\n if voxel_fea is not None:\n out_data = torch.cat((out_data, voxel_fea), 1)\n \n # run through network\n net_return_data = self.BEV_model(out_data)\n \n return net_return_data\n \ndef grp_range_torch(a,dev):\n idx = torch.cumsum(a,0)\n id_arr = torch.ones(idx[-1],dtype = torch.int64,device=dev)\n id_arr[0] = 0\n id_arr[idx[:-1]] = -a[:-1]+1\n return torch.cumsum(id_arr,0)\n\ndef parallel_FPS(np_cat_fea,K):\n return nb_greedy_FPS(np_cat_fea,K)\n\[email protected]('b1[:](f4[:,:],i4)',nopython=True,cache=True)\ndef nb_greedy_FPS(xyz,K):\n start_element = 0\n sample_num = xyz.shape[0]\n sum_vec = np.zeros((sample_num,1),dtype = np.float32)\n xyz_sq = xyz**2\n for j in range(sample_num):\n sum_vec[j,0] = np.sum(xyz_sq[j,:])\n pairwise_distance = sum_vec + np.transpose(sum_vec) - 2*np.dot(xyz, np.transpose(xyz))\n \n candidates_ind = np.zeros((sample_num,),dtype = np.bool_)\n candidates_ind[start_element] = True\n remain_ind = np.ones((sample_num,),dtype = np.bool_)\n remain_ind[start_element] = False\n all_ind = np.arange(sample_num)\n \n for i in range(1,K):\n if i == 1:\n min_remain_pt_dis = pairwise_distance[:,start_element]\n min_remain_pt_dis = min_remain_pt_dis[remain_ind]\n else:\n cur_dis = pairwise_distance[remain_ind,:]\n cur_dis = cur_dis[:,candidates_ind]\n min_remain_pt_dis = np.zeros((cur_dis.shape[0],),dtype = np.float32)\n for j in range(cur_dis.shape[0]):\n min_remain_pt_dis[j] = np.min(cur_dis[j,:])\n next_ind_in_remain = np.argmax(min_remain_pt_dis)\n next_ind = all_ind[remain_ind][next_ind_in_remain]\n candidates_ind[next_ind] = True\n remain_ind[next_ind] = False\n \n return candidates_ind" ]
[ [ "torch.cat", "torch.randperm", "torch.zeros", "torch.unique", "torch.clamp", "torch.ones", "numpy.arange", "numpy.argmax", "torch.argsort", "numpy.zeros", "torch.nn.functional.pad", "torch.nn.BatchNorm1d", "numpy.min", "torch.nn.Linear", "numpy.transpose", "numpy.sum", "numpy.ones", "torch.nn.MaxPool2d", "torch.nn.ReLU", "torch.cumsum" ] ]
chrisrichardson/libtab
[ "1f6593409bf51427bd6d8d1036bb885f5fbb7a8c" ]
[ "test/test_regge.py" ]
[ "# Copyright (c) 2020 Chris Richardson\n# FEniCS Project\n# SPDX-License-Identifier: MIT\n\nimport libtab\nimport numpy as np\n\n\ndef test_regge_tri():\n # Simplest element\n regge = libtab.Regge(\"triangle\", 1)\n\n # tabulate at origin\n pts = [[0.0, 0.0]]\n w = regge.tabulate(0, pts)[0]\n w = w.reshape((4, -1)).transpose().reshape(-1, 2, 2)\n\n ref = np.array([[[-0., 0.5],\n [0.5, -0.]],\n\n [[0., 0.5],\n [0.5, -0.]],\n\n [[-0., 1.],\n [1., 2.]],\n\n [[-0., -0.5],\n [-0.5, -1.]],\n\n [[2., 1.],\n [1., -0.]],\n\n [[-1., -0.5],\n [-0.5, 0.]],\n\n [[-0., 0.],\n [0., 0.]],\n\n [[0., -0.],\n [-0., -0.]],\n\n [[-0., -1.5],\n [-1.5, 0.]]])\n\n assert(np.isclose(ref, w).all())\n\n\ndef test_regge_tri2():\n # Second order\n regge = libtab.Regge(\"triangle\", 2)\n # tabulate at origin\n pts = [[0.0, 0.0]]\n w = regge.tabulate(0, pts)[0]\n w = w.reshape((4, -1)).transpose().reshape(-1, 2, 2)\n\n ref = np.array([[[0., -0.5],\n [-0.5, 0.]],\n\n [[0., -0.5],\n [-0.5, -0.]],\n\n [[-0., -0.5],\n [-0.5, 0.]],\n\n [[-0., 1.5],\n [1.5, 3.]],\n\n [[0., -1.5],\n [-1.5, -3.]],\n\n [[-0., 0.5],\n [0.5, 1.]],\n\n [[3., 1.5],\n [1.5, -0.]],\n\n [[-3., -1.5],\n [-1.5, 0.]],\n\n [[1., 0.5],\n [0.5, -0.]],\n\n [[-0., -0.],\n [-0., -0.]],\n\n [[0., -0.],\n [-0., -0.]],\n\n [[0., -3.],\n [-3., 0.]],\n\n [[0., -0.],\n [-0., -0.]],\n\n [[-0., -0.],\n [-0., 0.]],\n\n [[-0., 2.],\n [2., -0.]],\n\n [[-0., 0.],\n [0., 0.]],\n\n [[0., 0.],\n [0., -0.]],\n\n [[0., 2.],\n [2., -0.]]])\n assert(np.isclose(ref, w).all())\n\n\ndef test_regge_tet():\n # Simplest element\n regge = libtab.Regge(\"tetrahedron\", 1)\n # tabulate at origin\n pts = [[0.0, 0.0, 0.0]]\n w = regge.tabulate(0, pts)[0]\n w = w.reshape((9, -1)).transpose().reshape(-1, 3, 3)\n\n ref = np.array([[[0., 0., 0.],\n [0., 0., 0.5],\n [0., 0.5, -0.]],\n\n [[-0., 0., -0.],\n [0., -0., 0.5],\n [-0., 0.5, 0.]],\n\n [[0., -0., 0.5],\n [-0., 0., 0.],\n [0.5, 0., 0.]],\n\n [[-0., 0., 0.5],\n [0., -0., 0.],\n [0.5, 0., 0.]],\n\n [[-0., 0.5, 0.],\n [0.5, -0., -0.],\n [0., -0., 0.]],\n\n [[0., 0.5, 0.],\n [0.5, -0., 0.],\n [0., 0., 0.]],\n\n [[0., 0., 1.],\n [0., 0., 1.],\n [1., 1., 2.]],\n\n [[0., -0., -0.5],\n [-0., 0., -0.5],\n [-0.5, -0.5, -1.]],\n\n [[0., 1., -0.],\n [1., 2., 1.],\n [-0., 1., -0.]],\n\n [[-0., -0.5, -0.],\n [-0.5, -1., -0.5],\n [-0., -0.5, -0.]],\n\n [[2., 1., 1.],\n [1., -0., 0.],\n [1., 0., 0.]],\n\n [[-1., -0.5, -0.5],\n [-0.5, -0., 0.],\n [-0.5, 0., -0.]],\n\n [[-0., 0., -0.],\n [0., -0., -0.],\n [-0., -0., -0.]],\n\n [[-0., -0., -0.],\n [-0., -0., -0.],\n [-0., -0., -0.]],\n\n [[-0., 0., -0.],\n [0., 0., -0.],\n [-0., -0., -0.]],\n\n [[0., -0., 0.],\n [-0., -0., -0.],\n [0., -0., 0.]],\n\n [[-0., -0., -0.],\n [-0., -0., 0.],\n [-0., 0., 0.]],\n\n [[-0., -0., -0.],\n [-0., -0., -1.5],\n [-0., -1.5, -0.]],\n\n [[0., -0., 0.],\n [-0., 0., -0.],\n [0., -0., 0.]],\n\n [[0., -0., -0.],\n [-0., -0., -0.],\n [-0., -0., 0.]],\n\n [[-0., 0., -1.5],\n [0., -0., -0.],\n [-1.5, -0., -0.]],\n\n [[-0., -0., -0.],\n [-0., 0., -0.],\n [-0., -0., -0.]],\n\n [[0., -0., -0.],\n [-0., -0., -0.],\n [-0., -0., 0.]],\n\n [[-0., -1.5, -0.],\n [-1.5, 0., -0.],\n [-0., -0., -0.]]])\n assert(np.isclose(ref, w).all())\n" ]
[ [ "numpy.array", "numpy.isclose" ] ]
danielpatrickhug/Top2Vec
[ "3f39d4d07f070b3879a928fec375b4bc4eba5a0c" ]
[ "top2vec/tests/test_top2vec.py" ]
[ "import pytest\nfrom top2vec.Top2Vec import Top2Vec\nfrom sklearn.datasets import fetch_20newsgroups\nimport numpy as np\nimport tempfile\n\n# get 20 newsgroups data\nnewsgroups_train = fetch_20newsgroups(subset='all', remove=('headers', 'footers', 'quotes'))\nnewsgroups_documents = newsgroups_train.data[0:2000]\n\n# train top2vec model without doc_ids provided\ntop2vec = Top2Vec(documents=newsgroups_documents, speed=\"fast-learn\", workers=8)\n\n# train top2vec model with doc_ids provided\ndoc_ids = [str(num) for num in range(0, len(newsgroups_documents))]\ntop2vec_docids = Top2Vec(documents=newsgroups_documents, document_ids=doc_ids, speed=\"fast-learn\", workers=8)\n\n# train top2vec model without saving documents\ntop2vec_no_docs = Top2Vec(documents=newsgroups_documents, keep_documents=False, speed=\"fast-learn\", workers=8)\n\n# train top2vec model with corpus_file\ntop2vec_corpus_file = Top2Vec(documents=newsgroups_documents, use_corpus_file=True, speed=\"fast-learn\", workers=8)\n\n# test USE\ntop2vec_use = Top2Vec(documents=newsgroups_documents, embedding_model='universal-sentence-encoder')\n\n# test USE with model embedding\ntop2vec_use_model_embedding = Top2Vec(documents=newsgroups_documents,\n embedding_model='universal-sentence-encoder',\n use_embedding_model_tokenizer=True)\n\n# test USE-multilang\ntop2vec_use_multilang = Top2Vec(documents=newsgroups_documents,\n embedding_model='universal-sentence-encoder-multilingual')\n\n# test Sentence Transformer-multilang\ntop2vec_transformer_multilang = Top2Vec(documents=newsgroups_documents,\n embedding_model='distiluse-base-multilingual-cased')\n\n# test Sentence Transformer with model emebdding\ntop2vec_transformer_model_embedding = Top2Vec(documents=newsgroups_documents,\n embedding_model='distiluse-base-multilingual-cased',\n use_embedding_model_tokenizer=True)\n\nmodels = [top2vec, top2vec_docids, top2vec_no_docs, top2vec_corpus_file,\n top2vec_use, top2vec_use_multilang, top2vec_transformer_multilang,\n top2vec_use_model_embedding, top2vec_transformer_model_embedding]\n\n\[email protected]('top2vec_model', models)\ndef test_add_documents_original(top2vec_model):\n num_docs = top2vec_model.document_vectors.shape[0]\n\n docs_to_add = newsgroups_train.data[0:100]\n\n topic_count_sum = sum(top2vec_model.get_topic_sizes()[0])\n\n if top2vec_model.document_ids_provided is False:\n top2vec_model.add_documents(docs_to_add)\n else:\n doc_ids_new = [str(num) for num in range(2000, 2000 + len(docs_to_add))]\n top2vec_model.add_documents(docs_to_add, doc_ids_new)\n\n topic_count_sum_new = sum(top2vec_model.get_topic_sizes()[0])\n num_docs_new = top2vec_model.document_vectors.shape[0]\n\n assert topic_count_sum + len(docs_to_add) == topic_count_sum_new == num_docs + len(docs_to_add) \\\n == num_docs_new == len(top2vec_model.doc_top)\n\n if top2vec_model.documents is not None:\n assert num_docs_new == len(top2vec_model.documents)\n\n\[email protected]('top2vec_model', models)\ndef test_hierarchical_topic_reduction(top2vec_model):\n num_topics = top2vec_model.get_num_topics()\n\n if num_topics > 10:\n reduced_num = 10\n elif num_topics - 1 > 0:\n reduced_num = num_topics - 1\n\n hierarchy = top2vec_model.hierarchical_topic_reduction(reduced_num)\n\n assert len(hierarchy) == reduced_num == len(top2vec_model.topic_vectors_reduced)\n\n\[email protected]('top2vec_model', models)\ndef test_add_documents_post_reduce(top2vec_model):\n docs_to_add = newsgroups_train.data[500:600]\n\n num_docs = top2vec_model.document_vectors.shape[0]\n topic_count_sum = sum(top2vec_model.get_topic_sizes()[0])\n topic_count_reduced_sum = sum(top2vec_model.get_topic_sizes(reduced=True)[0])\n\n if top2vec_model.document_ids_provided is False:\n top2vec_model.add_documents(docs_to_add)\n else:\n doc_ids_new = [str(num) for num in range(2100, 2100 + len(docs_to_add))]\n top2vec_model.add_documents(docs_to_add, doc_ids_new)\n\n topic_count_sum_new = sum(top2vec_model.get_topic_sizes()[0])\n topic_count_reduced_sum_new = sum(top2vec_model.get_topic_sizes(reduced=True)[0])\n\n num_docs_new = top2vec_model.document_vectors.shape[0]\n\n assert topic_count_sum + len(docs_to_add) == topic_count_sum_new == topic_count_reduced_sum + len(docs_to_add) \\\n == topic_count_reduced_sum_new == num_docs + len(docs_to_add) == num_docs_new == len(top2vec_model.doc_top) \\\n == len(top2vec_model.doc_top_reduced)\n\n if top2vec_model.documents is not None:\n assert num_docs_new == len(top2vec_model.documents)\n\n\[email protected]('top2vec_model', models)\ndef test_delete_documents(top2vec_model):\n doc_ids_to_delete = list(range(500, 550))\n\n num_docs = top2vec_model.document_vectors.shape[0]\n topic_count_sum = sum(top2vec_model.get_topic_sizes()[0])\n topic_count_reduced_sum = sum(top2vec_model.get_topic_sizes(reduced=True)[0])\n\n if top2vec_model.document_ids_provided is False:\n top2vec_model.delete_documents(doc_ids=doc_ids_to_delete)\n else:\n doc_ids_to_delete = [str(doc_id) for doc_id in doc_ids_to_delete]\n top2vec_model.delete_documents(doc_ids=doc_ids_to_delete)\n\n topic_count_sum_new = sum(top2vec_model.get_topic_sizes()[0])\n topic_count_reduced_sum_new = sum(top2vec_model.get_topic_sizes(reduced=True)[0])\n num_docs_new = top2vec_model.document_vectors.shape[0]\n\n assert topic_count_sum - len(doc_ids_to_delete) == topic_count_sum_new == topic_count_reduced_sum - \\\n len(doc_ids_to_delete) == topic_count_reduced_sum_new == num_docs - len(doc_ids_to_delete) \\\n == num_docs_new == len(top2vec_model.doc_top) == len(top2vec_model.doc_top_reduced)\n\n if top2vec_model.documents is not None:\n assert num_docs_new == len(top2vec_model.documents)\n\n\[email protected]('top2vec_model', models)\ndef test_get_topic_hierarchy(top2vec_model):\n hierarchy = top2vec_model.get_topic_hierarchy()\n\n assert len(hierarchy) == len(top2vec_model.topic_vectors_reduced)\n\n\[email protected]('top2vec_model', models)\[email protected]('reduced', [False, True])\ndef test_get_num_topics(top2vec_model, reduced):\n # check that there are more than 0 topics\n assert top2vec_model.get_num_topics(reduced=reduced) > 0\n\n\[email protected]('top2vec_model', models)\[email protected]('reduced', [False, True])\ndef test_get_topics(top2vec_model, reduced):\n num_topics = top2vec_model.get_num_topics(reduced=reduced)\n words, word_scores, topic_nums = top2vec_model.get_topics(reduced=reduced)\n\n # check that for each topic there are words, word_scores and topic_nums\n assert len(words) == len(word_scores) == len(topic_nums) == num_topics\n\n # check that for each word there is a score\n assert len(words[0]) == len(word_scores[0])\n\n # check that topics words are returned in decreasing order\n topic_words_scores = word_scores[0]\n assert all(topic_words_scores[i] >= topic_words_scores[i + 1] for i in range(len(topic_words_scores) - 1))\n\n\[email protected]('top2vec_model', models)\[email protected]('reduced', [False, True])\ndef test_get_topic_size(top2vec_model, reduced):\n topic_sizes, topic_nums = top2vec_model.get_topic_sizes(reduced=reduced)\n\n # check that topic sizes add up to number of documents\n assert sum(topic_sizes) == top2vec_model.document_vectors.shape[0]\n\n # check that topics are ordered decreasingly\n assert all(topic_sizes[i] >= topic_sizes[i + 1] for i in range(len(topic_sizes) - 1))\n\n\[email protected]('top2vec_model', models)\[email protected]('reduced', [False, True])\ndef test_generate_topic_wordcloud(top2vec_model, reduced):\n # generate word cloud\n num_topics = top2vec_model.get_num_topics(reduced=reduced)\n top2vec_model.generate_topic_wordcloud(num_topics - 1, reduced=reduced)\n\n\[email protected]('top2vec_model', models)\[email protected]('reduced', [False, True])\ndef test_search_documents_by_topic(top2vec_model, reduced):\n # get topic sizes\n topic_sizes, topic_nums = top2vec_model.get_topic_sizes(reduced=reduced)\n topic = topic_nums[0]\n num_docs = topic_sizes[0]\n\n # search documents by topic\n if top2vec_model.documents is not None:\n documents, document_scores, document_ids = top2vec_model.search_documents_by_topic(topic, num_docs,\n reduced=reduced)\n else:\n document_scores, document_ids = top2vec_model.search_documents_by_topic(topic, num_docs, reduced=reduced)\n\n # check that for each document there is a score and number\n if top2vec_model.documents is not None:\n assert len(documents) == len(document_scores) == len(document_ids) == num_docs\n else:\n assert len(document_scores) == len(document_ids) == num_docs\n\n # check that documents are returned in decreasing order\n assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))\n\n # check that all documents returned are most similar to topic being searched\n document_indexes = [top2vec_model.doc_id2index[doc_id] for doc_id in document_ids]\n\n if reduced:\n doc_topics = set(np.argmax(\n np.inner(top2vec_model.document_vectors[document_indexes],\n top2vec_model.topic_vectors_reduced), axis=1))\n else:\n doc_topics = set(np.argmax(\n np.inner(top2vec_model.document_vectors[document_indexes],\n top2vec_model.topic_vectors), axis=1))\n assert len(doc_topics) == 1 and topic in doc_topics\n\n\[email protected]('top2vec_model', models)\ndef test_search_documents_by_keywords(top2vec_model):\n keywords = top2vec_model.vocab\n keyword = keywords[-1]\n num_docs = 10\n\n if top2vec_model.documents is not None:\n documents, document_scores, document_ids = top2vec_model.search_documents_by_keywords(keywords=[keyword],\n num_docs=num_docs)\n else:\n document_scores, document_ids = top2vec_model.search_documents_by_keywords(keywords=[keyword],\n num_docs=num_docs)\n\n # check that for each document there is a score and number\n if top2vec_model.documents is not None:\n assert len(documents) == len(document_scores) == len(document_ids) == num_docs\n else:\n assert len(document_scores) == len(document_ids) == num_docs\n\n # check that documents are returned in decreasing order\n assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_similar_words(top2vec_model):\n keywords = top2vec_model.vocab\n keyword = keywords[-1]\n num_words = 20\n\n words, word_scores = top2vec_model.similar_words(keywords=[keyword], num_words=num_words)\n\n # check that there is a score for each word\n assert len(words) == len(word_scores) == num_words\n\n # check that words are returned in decreasing order\n assert all(word_scores[i] >= word_scores[i + 1] for i in range(len(word_scores) - 1))\n\n\[email protected]('top2vec_model', models)\[email protected]('reduced', [False, True])\ndef test_search_topics(top2vec_model, reduced):\n num_topics = top2vec_model.get_num_topics(reduced=reduced)\n keywords = top2vec_model.vocab\n keyword = keywords[-1]\n topic_words, word_scores, topic_scores, topic_nums = top2vec_model.search_topics(keywords=[keyword],\n num_topics=num_topics,\n reduced=reduced)\n # check that for each topic there are topic words, word scores, topic scores and score of topic\n assert len(topic_words) == len(word_scores) == len(topic_scores) == len(topic_nums) == num_topics\n\n # check that for each topic words have scores\n assert len(topic_words[0]) == len(word_scores[0])\n\n # check that topics are returned in decreasing order\n assert all(topic_scores[i] >= topic_scores[i + 1] for i in range(len(topic_scores) - 1))\n\n # check that topics words are returned in decreasing order\n topic_words_scores = word_scores[0]\n assert all(topic_words_scores[i] >= topic_words_scores[i + 1] for i in range(len(topic_words_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_search_document_by_documents(top2vec_model):\n doc_id = top2vec_model.document_ids[0]\n\n num_docs = 10\n\n if top2vec_model.documents is not None:\n documents, document_scores, document_ids = top2vec_model.search_documents_by_documents(doc_ids=[doc_id],\n num_docs=num_docs)\n else:\n document_scores, document_ids = top2vec_model.search_documents_by_documents(doc_ids=[doc_id],\n num_docs=num_docs)\n\n # check that for each document there is a score and number\n if top2vec_model.documents is not None:\n assert len(documents) == len(document_scores) == len(document_ids) == num_docs\n else:\n assert len(document_scores) == len(document_ids) == num_docs\n\n # check that documents are returned in decreasing order\n assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_get_documents_topics(top2vec_model):\n doc_ids_get = top2vec_model.document_ids[[0, 5]]\n\n if top2vec_model.hierarchy is not None:\n doc_topics, doc_dist, topic_words, topic_word_scores = top2vec_model.get_documents_topics(doc_ids=doc_ids_get,\n reduced=True)\n else:\n doc_topics, doc_dist, topic_words, topic_word_scores = top2vec_model.get_documents_topics(doc_ids=doc_ids_get)\n\n assert len(doc_topics) == len(doc_dist) == len(topic_words) == len(topic_word_scores) == len(doc_ids_get)\n\n\[email protected]('top2vec_model', models)\ndef test_get_documents_topics_multiple(top2vec_model):\n doc_ids_get = top2vec_model.document_ids[[0, 1, 5]]\n num_topics = 2\n\n if top2vec_model.hierarchy is not None:\n doc_topics, doc_dist, topic_words, topic_word_scores = top2vec_model.get_documents_topics(doc_ids=doc_ids_get,\n reduced=True,\n num_topics=num_topics)\n\n actual_number_topics = top2vec_model.get_num_topics(reduced=True)\n\n else:\n doc_topics, doc_dist, topic_words, topic_word_scores = top2vec_model.get_documents_topics(doc_ids=doc_ids_get,\n num_topics=num_topics)\n\n actual_number_topics = top2vec_model.get_num_topics(reduced=False)\n\n assert len(doc_topics) == len(doc_dist) == len(topic_words) == len(topic_word_scores) == len(doc_ids_get)\n\n if num_topics <= actual_number_topics:\n assert doc_topics.shape[1] == num_topics\n assert doc_dist.shape[1] == num_topics\n assert topic_words.shape[1] == num_topics\n assert topic_word_scores.shape[1] == num_topics\n\n\[email protected]('top2vec_model', models)\ndef test_search_documents_by_vector(top2vec_model):\n document_vectors = top2vec_model.document_vectors\n top2vec_model.search_documents_by_vector(vector=document_vectors[0], num_docs=10)\n\n num_docs = 10\n\n if top2vec_model.documents is not None:\n documents, document_scores, document_ids = top2vec_model.search_documents_by_vector(vector=document_vectors[0],\n num_docs=num_docs)\n else:\n document_scores, document_ids = top2vec_model.search_documents_by_vector(vector=document_vectors[0],\n num_docs=num_docs)\n if top2vec_model.documents is not None:\n assert len(documents) == len(document_scores) == len(document_ids) == num_docs\n else:\n assert len(document_scores) == len(document_ids) == num_docs\n\n # check that documents are returned in decreasing order\n assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_index_documents(top2vec_model):\n top2vec_model.index_document_vectors()\n assert top2vec_model.document_vectors.shape[1] <= top2vec_model.document_index.get_max_elements()\n\n\[email protected]('top2vec_model', models)\ndef test_search_documents_by_vector_index(top2vec_model):\n document_vectors = top2vec_model.document_vectors\n top2vec_model.search_documents_by_vector(vector=document_vectors[0], num_docs=10)\n\n num_docs = 10\n\n if top2vec_model.documents is not None:\n documents, document_scores, document_ids = top2vec_model.search_documents_by_vector(vector=document_vectors[0],\n num_docs=num_docs,\n use_index=True)\n else:\n document_scores, document_ids = top2vec_model.search_documents_by_vector(vector=document_vectors[0],\n num_docs=num_docs,\n use_index=True)\n if top2vec_model.documents is not None:\n assert len(documents) == len(document_scores) == len(document_ids) == num_docs\n else:\n assert len(document_scores) == len(document_ids) == num_docs\n\n # check that documents are returned in decreasing order\n assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_search_documents_by_keywords_index(top2vec_model):\n keywords = top2vec_model.vocab\n keyword = keywords[-1]\n num_docs = 10\n\n if top2vec_model.documents is not None:\n documents, document_scores, document_ids = top2vec_model.search_documents_by_keywords(keywords=[keyword],\n num_docs=num_docs,\n use_index=True)\n else:\n document_scores, document_ids = top2vec_model.search_documents_by_keywords(keywords=[keyword],\n num_docs=num_docs,\n use_index=True)\n\n # check that for each document there is a score and number\n if top2vec_model.documents is not None:\n assert len(documents) == len(document_scores) == len(document_ids) == num_docs\n else:\n assert len(document_scores) == len(document_ids) == num_docs\n\n # check that documents are returned in decreasing order\n assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_search_document_by_documents_index(top2vec_model):\n doc_id = top2vec_model.document_ids[0]\n\n num_docs = 10\n\n if top2vec_model.documents is not None:\n documents, document_scores, document_ids = top2vec_model.search_documents_by_documents(doc_ids=[doc_id],\n num_docs=num_docs,\n use_index=True)\n else:\n document_scores, document_ids = top2vec_model.search_documents_by_documents(doc_ids=[doc_id],\n num_docs=num_docs,\n use_index=True)\n\n # check that for each document there is a score and number\n if top2vec_model.documents is not None:\n assert len(documents) == len(document_scores) == len(document_ids) == num_docs\n else:\n assert len(document_scores) == len(document_ids) == num_docs\n\n # check that documents are returned in decreasing order\n assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_search_words_by_vector(top2vec_model):\n word_vectors = top2vec_model.word_vectors\n top2vec_model.search_words_by_vector(vector=word_vectors[0], num_words=10)\n\n num_words = 10\n\n words, word_scores = top2vec_model.search_words_by_vector(vector=word_vectors[0],\n num_words=num_words)\n\n # check that there is a score for each word\n assert len(words) == len(word_scores) == num_words\n\n # check that words are returned in decreasing order\n assert all(word_scores[i] >= word_scores[i + 1] for i in range(len(word_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_index_words(top2vec_model):\n top2vec_model.index_word_vectors()\n assert top2vec_model.word_vectors.shape[1] <= top2vec_model.word_index.get_max_elements()\n\n\[email protected]('top2vec_model', models)\ndef test_similar_words_index(top2vec_model):\n keywords = top2vec_model.vocab\n keyword = keywords[-1]\n num_words = 20\n\n words, word_scores = top2vec_model.similar_words(keywords=[keyword], num_words=num_words, use_index=True)\n\n # check that there is a score for each word\n assert len(words) == len(word_scores) == num_words\n\n # check that words are returned in decreasing order\n assert all(word_scores[i] >= word_scores[i + 1] for i in range(len(word_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_similar_words_index(top2vec_model):\n temp = tempfile.NamedTemporaryFile(mode='w+b')\n top2vec_model.save(temp.name)\n Top2Vec.load(temp.name)\n temp.close()\n\n\[email protected]('top2vec_model', models)\ndef test_query_documents(top2vec_model):\n num_docs = 10\n\n if top2vec_model.documents is not None:\n documents, document_scores, document_ids = top2vec_model.query_documents(query=\"what is the meaning of life?\",\n num_docs=num_docs)\n else:\n document_scores, document_ids = top2vec_model.query_documents(query=\"what is the meaning of life?\",\n num_docs=num_docs)\n\n # check that for each document there is a score and number\n if top2vec_model.documents is not None:\n assert len(documents) == len(document_scores) == len(document_ids) == num_docs\n else:\n assert len(document_scores) == len(document_ids) == num_docs\n\n # check that documents are returned in decreasing order\n assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))\n\n\[email protected]('top2vec_model', models)\ndef test_query_topics(top2vec_model):\n num_topics = top2vec_model.get_num_topics()\n topic_words, word_scores, topic_scores, topic_nums = top2vec_model.query_topics(query=\"what is the \"\n \"meaning of life?\",\n num_topics=num_topics)\n\n # check that for each topic there are topic words, word scores, topic scores and score of topic\n assert len(topic_words) == len(word_scores) == len(topic_scores) == len(topic_nums) == num_topics\n\n # check that for each topic words have scores\n assert len(topic_words[0]) == len(word_scores[0])\n\n # check that topics are returned in decreasing order\n assert all(topic_scores[i] >= topic_scores[i + 1] for i in range(len(topic_scores) - 1))\n\n # check that topics words are returned in decreasing order\n topic_words_scores = word_scores[0]\n assert all(topic_words_scores[i] >= topic_words_scores[i + 1] for i in range(len(topic_words_scores) - 1))\n" ]
[ [ "numpy.inner", "sklearn.datasets.fetch_20newsgroups" ] ]
GeneZC/AlphaPoet
[ "82715e9cc36aedfa78c250a7a7f8129669eea440" ]
[ "main.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport random\nimport pickle\nimport os\nfrom dataloader import Gen_Data_loader, Dis_Data_loader\nfrom generator import Generator\nfrom discriminator import BLEUCNN\nfrom rollout import ROLLOUT\nfrom mobilenet import MobileNet\n\nEMB_DIM = 300 # embedding dimension\nHIDDEN_DIM = 300 # hidden state dimension of lstm cell\nSEQ_LENGTH = 12 # sequence length\nSTART_TOKEN = 0\nPRE_EPOCH_NUM = 1 # supervise (maximum likelihood estimation) epochs\nSEED = 88\nBATCH_SIZE = 64\n\nRL_EPOCH_NUM = 30\nDICO_PKL = 'dict.pkl'\nDICO= 'tang.txt'\nCORPUS = 'data/temp.txt'\nIMAGE = 'data/temp/'\n\ndef create_dico(filename):\n dico = {}\n dico['unk'] = 1000000\n dico['sos'] = 1000001\n with open(filename, 'r', encoding='utf-8') as f:\n for line in f:\n for word in line.strip():\n if word != ' ':\n if word not in dico:\n dico[word] = 1\n else:\n dico[word] += 1\n sorted_items = sorted(dico.items(), key=lambda x: (x[1], x[0]), reverse=True)\n id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}\n item_to_id = {v: k for k, v in id_to_item.items()}\n return item_to_id, id_to_item\n\ndef create_data(filename, word_to_id):\n inp_stream = []\n with open(filename, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line == '':\n continue\n inp_stream.append([word_to_id[line[word_idx]] for word_idx in range(SEQ_LENGTH)])\n return inp_stream\n\ndef main():\n random.seed(SEED)\n np.random.seed(SEED)\n\n if os.path.exists(DICO_PKL):\n with open(DICO_PKL, 'rb') as f:\n word_to_id, id_to_word = pickle.load(f)\n else:\n word_to_id, id_to_word = create_dico(DICO)\n with open(DICO_PKL, 'wb') as f:\n pickle.dump([word_to_id, id_to_word], f)\n \n gen_data_loader = Gen_Data_loader(BATCH_SIZE, word_to_id)\n dis_data_loader = Dis_Data_loader(BATCH_SIZE, word_to_id)\n vocab_size = len(word_to_id)\n assert START_TOKEN == word_to_id['sos']\n\n generator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN)\n discriminator = BLEUCNN(SEQ_LENGTH, 2, EMB_DIM, generator)\n mobilenet = MobileNet(BATCH_SIZE)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n mobilenet.load_pretrained_weights(sess)\n sess.run(tf.global_variables_initializer())\n\n log = open('experiment-log.txt', 'w', encoding='utf-8')\n # pre-train generator and discriminator\n log.write('pre-training...\\n')\n print('Start pre-training discriminator...')\n datas = create_data(DICO, word_to_id)\n gen_data_loader.create_batches(CORPUS, IMAGE)\n samples = []\n for it in range(gen_data_loader.num_batch):\n inp_batch, image_batch = gen_data_loader.next_batch()\n feed_dict = {\n mobilenet.X: image_batch,\n mobilenet.is_training: False \n }\n hidden_batch = sess.run(mobilenet.y_output, feed_dict=feed_dict)\n samples.extend(generator.generate(sess, hidden_batch).tolist())\n dis_data_loader.create_batches(random.sample(datas, 3000), samples)\n for _ in range(PRE_EPOCH_NUM):\n dis_data_loader.reset_pointer()\n for it in range(dis_data_loader.num_batch):\n x_batch, labels = dis_data_loader.next_batch()\n feed = {\n discriminator.input_x: x_batch,\n discriminator.labels: labels,\n discriminator.dropout_keep_prob: 0.75\n }\n _ = sess.run(discriminator.train_op, feed)\n\n print('Start pre-training generator...')\n for epoch in range(PRE_EPOCH_NUM):\n supervised_g_losses = []\n gen_data_loader.reset_pointer()\n for it in range(gen_data_loader.num_batch):\n inp_batch, image_batch = gen_data_loader.next_batch()\n feed_dict = {\n mobilenet.X: image_batch,\n mobilenet.is_training: False \n }\n hidden_batch = sess.run(mobilenet.y_output, feed_dict=feed_dict)\n _, g_loss = generator.pretrain_step(sess, inp_batch, hidden_batch)\n supervised_g_losses.append(g_loss)\n loss = np.mean(supervised_g_losses)\n if epoch % 5 == 0:\n print('pre-train epoch ', epoch, 'train_loss ', loss)\n buffer = 'epoch:\\t'+ str(epoch) + '\\ttrain_loss:\\t' + str(loss) + '\\n'\n log.write(buffer)\n\n rollout = ROLLOUT(generator, 0.8)\n\n print('#########################################################################')\n print('Start REINFORCE Training...')\n log.write('REINFORCE training...\\n')\n for total_batch in range(RL_EPOCH_NUM):\n gen_data_loader.reset_pointer()\n for it in range(gen_data_loader.num_batch):\n ra = random.randint(0, 1)\n inp_batch, image_batch = gen_data_loader.next_batch(shuffle=ra)\n feed_dict = {\n mobilenet.X: image_batch,\n mobilenet.is_training: False \n }\n hidden_batch = sess.run(mobilenet.y_output, feed_dict=feed_dict)\n samples = generator.generate(sess, hidden_batch)\n rewards = rollout.get_reward(sess, samples, hidden_batch, 16, discriminator)\n feed = {generator.x:inp_batch, generator.rewards: rewards, generator.hiddens:hidden_batch}\n _ = sess.run(generator.g_updates, feed_dict=feed)\n\n # Test\n if total_batch % 5 == 0 or total_batch == RL_EPOCH_NUM - 1:\n mean_rewards = []\n gen_data_loader.reset_pointer()\n for it in range(gen_data_loader.num_batch):\n inp_batch, image_batch = gen_data_loader.next_batch()\n feed_dict = {\n mobilenet.X: image_batch,\n mobilenet.is_training: False \n }\n hidden_batch = sess.run(mobilenet.y_output, feed_dict=feed_dict)\n samples = generator.generate(sess, hidden_batch)\n rewards = rollout.get_reward(sess, samples, hidden_batch, 16, discriminator)\n mean_rewards.append(np.mean(rewards[:,-1]))\n reward = np.mean(mean_rewards)\n buffer = 'epoch:\\t' + str(total_batch) + '\\treward:\\t' + str(reward) + '\\n'\n print('total_batch: ', total_batch, 'reward: ', reward)\n log.write(buffer)\n generator.save_weight(sess)\n\n # Update roll-out parameters\n rollout.update_params()\n discriminator.update_embedding()\n\n # Train the discriminator\n samples = []\n for it in range(gen_data_loader.num_batch):\n inp_batch, image_batch = gen_data_loader.next_batch()\n feed_dict = {\n mobilenet.X: image_batch,\n mobilenet.is_training: False \n }\n hidden_batch = sess.run(mobilenet.y_output, feed_dict=feed_dict)\n samples.extend(generator.generate(sess, hidden_batch).tolist())\n dis_data_loader.create_batches(random.sample(datas, 3000), samples)\n dis_data_loader.reset_pointer()\n for it in range(dis_data_loader.num_batch):\n x_batch, labels = dis_data_loader.next_batch()\n feed = {\n discriminator.input_x: x_batch,\n discriminator.labels: labels,\n discriminator.dropout_keep_prob: 0.75\n }\n _ = sess.run(discriminator.train_op, feed)\n\n # final test\n gen_data_loader.reset_pointer()\n _, image_batch = gen_data_loader.next_batch()\n feed_dict = {\n mobilenet.X: image_batch,\n mobilenet.is_training: False \n }\n hidden_batch = sess.run(mobilenet.y_output, feed_dict=feed_dict)\n samples = generator.generate(sess, hidden_batch)\n y = samples.tolist()\n sams = []\n for k, sam in enumerate(y):\n sa = [id_to_word[i] for i in sam]\n sa = ''.join(sa)\n sams.append(sa)\n for sam in sams:\n log.write(sam+'\\n')\n log.close()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.random.seed", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "numpy.mean", "tensorflow.Session" ] ]
gordonchiang/HoQ
[ "f483648ef0075037de890316475918f154729291" ]
[ "dumps/graph_mobile.py" ]
[ "#!/usr/bin/env python3\n\nfrom copy import deepcopy\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\ndef genList(d, delay, loss):\n return [ d['TCP'][delay][loss], d['TCP+TLS'][delay][loss], d['QUIC'][delay][loss] ]\n\ndef getVal(l):\n return [ l[0]['val'], l[1]['val'], l[2]['val'] ]\n\ndef getAvg(l):\n return ( l[0]['avg'], l[1]['avg'], l[2]['avg'] )\n\ndef genPlot(axs, x, y, mode, delay, loss):\n d = None\n if mode == 'Single Request': d = main\n elif mode == 'Batch Request': d = mult\n l = genList(d, delay, loss)\n bp = axs[x, y].boxplot(getVal(l))\n title = 'Mobile, {}, {} delay, {} loss'.format(mode, delay, loss)\n axs[x, y].set_title(title)\n axs[x, y].set_xticklabels(['TCP', 'TCP+TLS', 'QUIC'])\n axs[x, y].set_ylabel('Duration (s)')\n\n for i, line in enumerate(bp['medians']):\n a, b = line.get_xydata()[1]\n text = ' μ={:.3f} '.format(getAvg(l)[i])\n axs[x, y].annotate(text, xy=(a, b))\n\ndataset = pd.read_csv('output.csv')\n\nmain = {\n 'TCP': {\n '10ms': {\n '1%': { 'val': [None] * 3, 'avg': None },\n '3%': { 'val': [None] * 3, 'avg': None },\n '5%': { 'val': [None] * 3, 'avg': None },\n },\n '25ms': {\n '1%': { 'val': [None] * 3, 'avg': None },\n '3%': { 'val': [None] * 3, 'avg': None },\n '5%': { 'val': [None] * 3, 'avg': None },\n },\n '50ms': {\n '1%': { 'val': [None] * 3, 'avg': None },\n '3%': { 'val': [None] * 3, 'avg': None },\n '5%': { 'val': [None] * 3, 'avg': None },\n },\n },\n 'TCP+TLS': {\n '10ms': {\n '1%': { 'val': [None] * 3, 'avg': None },\n '3%': { 'val': [None] * 3, 'avg': None },\n '5%': { 'val': [None] * 3, 'avg': None },\n },\n '25ms': {\n '1%': { 'val': [None] * 3, 'avg': None },\n '3%': { 'val': [None] * 3, 'avg': None },\n '5%': { 'val': [None] * 3, 'avg': None },\n },\n '50ms': {\n '1%': { 'val': [None] * 3, 'avg': None },\n '3%': { 'val': [None] * 3, 'avg': None },\n '5%': { 'val': [None] * 3, 'avg': None },\n },\n },\n 'QUIC': {\n '10ms': {\n '1%': { 'val': [None] * 3, 'avg': None },\n '3%': { 'val': [None] * 3, 'avg': None },\n '5%': { 'val': [None] * 3, 'avg': None },\n },\n '25ms': {\n '1%': { 'val': [None] * 3, 'avg': None },\n '3%': { 'val': [None] * 3, 'avg': None },\n '5%': { 'val': [None] * 3, 'avg': None },\n },\n '50ms': {\n '1%': { 'val': [None] * 3, 'avg': None },\n '3%': { 'val': [None] * 3, 'avg': None },\n '5%': { 'val': [None] * 3, 'avg': None },\n },\n },\n}\n\nmult = deepcopy(main)\n\nfor row in dataset.itertuples():\n dump = row[1]\n if 'mobile/' in dump:\n dict, protocol_index, delay_index, loss_index = None, None, None, None\n if 'main' in dump:\n dict = main\n elif 'mult' in dump:\n dict = mult\n\n if 'no_tls' in dump: protocol_index = 'TCP'\n elif 'quic' in dump: protocol_index = 'QUIC'\n elif 'tls' in dump: protocol_index = 'TCP+TLS'\n\n if '50ms' in dump: delay_index = '50ms'\n elif '25ms' in dump: delay_index = '25ms'\n elif '10ms' in dump: delay_index = '10ms'\n\n if '5%' in dump: loss_index = '5%'\n elif '3%' in dump: loss_index = '3%'\n elif '1%' in dump: loss_index = '1%'\n\n dict[protocol_index][delay_index][loss_index]['val'] = (row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11])\n dict[protocol_index][delay_index][loss_index]['avg'] = row[12]\n\nfig, axs = plt.subplots(6, 3, constrained_layout=True, figsize=(20,20))\n\ngenPlot(axs, 0, 0, 'Single Request', '10ms', '1%')\ngenPlot(axs, 0, 1, 'Single Request', '10ms', '3%')\ngenPlot(axs, 0, 2, 'Single Request', '10ms', '5%')\ngenPlot(axs, 1, 0, 'Single Request', '25ms', '1%')\ngenPlot(axs, 1, 1, 'Single Request', '25ms', '3%')\ngenPlot(axs, 1, 2, 'Single Request', '25ms', '5%')\ngenPlot(axs, 2, 0, 'Single Request', '50ms', '1%')\ngenPlot(axs, 2, 1, 'Single Request', '50ms', '3%')\ngenPlot(axs, 2, 2, 'Single Request', '50ms', '5%')\ngenPlot(axs, 3, 0, 'Batch Request', '10ms', '1%')\ngenPlot(axs, 3, 1, 'Batch Request', '10ms', '3%')\ngenPlot(axs, 3, 2, 'Batch Request', '10ms', '5%')\ngenPlot(axs, 4, 0, 'Batch Request', '25ms', '1%')\ngenPlot(axs, 4, 1, 'Batch Request', '25ms', '3%')\ngenPlot(axs, 4, 2, 'Batch Request', '25ms', '5%')\ngenPlot(axs, 5, 0, 'Batch Request', '50ms', '1%')\ngenPlot(axs, 5, 1, 'Batch Request', '50ms', '3%')\ngenPlot(axs, 5, 2, 'Batch Request', '50ms', '5%')\n\nplt.savefig('mobile.svg', format='svg')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig" ] ]
nobilearn/tensorflow-yolov4-tflite
[ "1b79f20148ed6ab7894ad213363fd31056c527c9" ]
[ "benchmarks.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport time\nimport cv2\nfrom core.yolov4 import YOLOv4, YOLOv3_tiny, YOLOv3, decode\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\nfrom tensorflow.python.saved_model import tag_constants\nfrom core import utils\nfrom core.config import cfg\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\n\nflags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')\nflags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')\nflags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')\nflags.DEFINE_string('weights', './data/yolov4.weights', 'path to weights file')\nflags.DEFINE_string('image', './data/kite.jpg', 'path to input image')\nflags.DEFINE_integer('size', 416, 'resize images to')\n\n\ndef main(_argv):\n config = ConfigProto()\n config.gpu_options.allow_growth = True\n session = InteractiveSession(config=config)\n NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))\n input_size = FLAGS.size\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n if len(physical_devices) > 0:\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\n if FLAGS.framework == 'tf':\n input_layer = tf.keras.layers.Input([input_size, input_size, 3])\n if FLAGS.tiny:\n feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)\n bbox_tensors = []\n for i, fm in enumerate(feature_maps):\n bbox_tensor = decode(fm, NUM_CLASS, i)\n bbox_tensors.append(bbox_tensor)\n model = tf.keras.Model(input_layer, bbox_tensors)\n utils.load_weights_tiny(model, FLAGS.weights)\n else:\n if FLAGS.model == 'yolov3':\n feature_maps = YOLOv3(input_layer, NUM_CLASS)\n bbox_tensors = []\n for i, fm in enumerate(feature_maps):\n bbox_tensor = decode(fm, NUM_CLASS, i)\n bbox_tensors.append(bbox_tensor)\n model = tf.keras.Model(input_layer, bbox_tensors)\n utils.load_weights_v3(model, FLAGS.weights)\n elif FLAGS.model == 'yolov4':\n feature_maps = YOLOv4(input_layer, NUM_CLASS)\n bbox_tensors = []\n for i, fm in enumerate(feature_maps):\n bbox_tensor = decode(fm, NUM_CLASS, i)\n bbox_tensors.append(bbox_tensor)\n model = tf.keras.Model(input_layer, bbox_tensors)\n utils.load_weights(model, FLAGS.weights)\n elif FLAGS.framework == 'trt':\n saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])\n signature_keys = list(saved_model_loaded.signatures.keys())\n print(signature_keys)\n infer = saved_model_loaded.signatures['serving_default']\n\n logging.info('weights loaded')\n\n @tf.function\n def run_model(x):\n return model(x)\n\n # Test the TensorFlow Lite model on random input data.\n sum = 0\n original_image = cv2.imread(FLAGS.image)\n original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\n original_image_size = original_image.shape[:2]\n image_data = utils.image_preporcess(np.copy(original_image), [FLAGS.size, FLAGS.size])\n image_data = image_data[np.newaxis, ...].astype(np.float32)\n img_raw = tf.image.decode_image(\n open(FLAGS.image, 'rb').read(), channels=3)\n img_raw = tf.expand_dims(img_raw, 0)\n img_raw = tf.image.resize(img_raw, (FLAGS.size, FLAGS.size))\n batched_input = tf.constant(image_data)\n for i in range(1000):\n prev_time = time.time()\n # pred_bbox = model.predict(image_data)\n if FLAGS.framework == 'tf':\n pred_bbox = run_model(image_data)\n elif FLAGS.framework == 'trt':\n pred_bbox = infer(batched_input)\n # pred_bbox = pred_bbox.numpy()\n curr_time = time.time()\n exec_time = curr_time - prev_time\n if i == 0: continue\n sum += (1 / exec_time)\n info = str(i) + \" time:\" + str(round(exec_time, 3)) + \" average FPS:\" + str(round(sum / i, 2)) + \", FPS: \" + str(\n round((1 / exec_time), 1))\n print(info)\n\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n" ]
[ [ "tensorflow.compat.v1.ConfigProto", "tensorflow.constant", "tensorflow.saved_model.load", "tensorflow.config.experimental.set_memory_growth", "tensorflow.config.experimental.list_physical_devices", "tensorflow.expand_dims", "tensorflow.keras.Model", "numpy.copy", "tensorflow.image.resize", "tensorflow.compat.v1.InteractiveSession", "tensorflow.keras.layers.Input" ] ]
adelevie/transformers
[ "18ca0e91402d17950b870d7c9f67ddb7fd573817" ]
[ "tests/test_modeling_common.py" ]
[ "# coding=utf-8\n# Copyright 2019 HuggingFace Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport os.path\nimport random\nimport tempfile\nimport unittest\nfrom typing import List, Tuple\n\nfrom transformers import is_torch_available\nfrom transformers.testing_utils import require_multigpu, require_torch, slow, torch_device\n\n\nif is_torch_available():\n import numpy as np\n import torch\n\n from transformers import (\n AdaptiveEmbedding,\n PretrainedConfig,\n PreTrainedModel,\n BertConfig,\n BertModel,\n BERT_PRETRAINED_MODEL_ARCHIVE_LIST,\n MODEL_FOR_MULTIPLE_CHOICE_MAPPING,\n MODEL_FOR_QUESTION_ANSWERING_MAPPING,\n MODEL_FOR_CAUSAL_LM_MAPPING,\n MODEL_FOR_MASKED_LM_MAPPING,\n MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,\n MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,\n MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,\n top_k_top_p_filtering,\n )\n\n\ndef _config_zero_init(config):\n configs_no_init = copy.deepcopy(config)\n for key in configs_no_init.__dict__.keys():\n if \"_range\" in key or \"_std\" in key or \"initializer_factor\" in key:\n setattr(configs_no_init, key, 1e-10)\n return configs_no_init\n\n\n@require_torch\nclass ModelTesterMixin:\n\n model_tester = None\n all_model_classes = ()\n all_generative_model_classes = ()\n test_torchscript = True\n test_pruning = True\n test_resize_embeddings = True\n test_head_masking = True\n test_missing_keys = True\n is_encoder_decoder = False\n\n def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):\n inputs_dict = copy.deepcopy(inputs_dict)\n if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():\n inputs_dict = {\n k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()\n if isinstance(v, torch.Tensor) and v.ndim > 1\n else v\n for k, v in inputs_dict.items()\n }\n\n if return_labels:\n if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():\n inputs_dict[\"labels\"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device)\n elif model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():\n inputs_dict[\"start_positions\"] = torch.zeros(\n self.model_tester.batch_size, dtype=torch.long, device=torch_device\n )\n inputs_dict[\"end_positions\"] = torch.zeros(\n self.model_tester.batch_size, dtype=torch.long, device=torch_device\n )\n elif model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():\n inputs_dict[\"labels\"] = torch.zeros(\n self.model_tester.batch_size, dtype=torch.long, device=torch_device\n )\n elif model_class in [\n *MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),\n *MODEL_FOR_CAUSAL_LM_MAPPING.values(),\n *MODEL_FOR_MASKED_LM_MAPPING.values(),\n *MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),\n ]:\n inputs_dict[\"labels\"] = torch.zeros(\n (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device\n )\n return inputs_dict\n\n def test_save_load(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n out_2 = outputs[0].cpu().numpy()\n out_2[np.isnan(out_2)] = 0\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n model.save_pretrained(tmpdirname)\n model = model_class.from_pretrained(tmpdirname)\n model.to(torch_device)\n with torch.no_grad():\n after_outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n # Make sure we don't have nans\n out_1 = after_outputs[0].cpu().numpy()\n out_1[np.isnan(out_1)] = 0\n max_diff = np.amax(np.abs(out_1 - out_2))\n self.assertLessEqual(max_diff, 1e-5)\n\n def test_initialization(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n configs_no_init = _config_zero_init(config)\n for model_class in self.all_model_classes:\n model = model_class(config=configs_no_init)\n for name, param in model.named_parameters():\n if param.requires_grad:\n self.assertIn(\n ((param.data.mean() * 1e9).round() / 1e9).item(),\n [0.0, 1.0],\n msg=\"Parameter {} of model {} seems not properly initialized\".format(name, model_class),\n )\n\n def test_determinism(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n first = model(**self._prepare_for_class(inputs_dict, model_class))[0]\n second = model(**self._prepare_for_class(inputs_dict, model_class))[0]\n out_1 = first.cpu().numpy()\n out_2 = second.cpu().numpy()\n out_1 = out_1[~np.isnan(out_1)]\n out_2 = out_2[~np.isnan(out_2)]\n max_diff = np.amax(np.abs(out_1 - out_2))\n self.assertLessEqual(max_diff, 1e-5)\n\n def test_attention_outputs(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n seq_len = getattr(self.model_tester, \"seq_length\", None)\n decoder_seq_length = getattr(self.model_tester, \"decoder_seq_length\", seq_len)\n encoder_seq_length = getattr(self.model_tester, \"encoder_seq_length\", seq_len)\n decoder_key_length = getattr(self.model_tester, \"key_length\", decoder_seq_length)\n encoder_key_length = getattr(self.model_tester, \"key_length\", encoder_seq_length)\n chunk_length = getattr(self.model_tester, \"chunk_length\", None)\n if chunk_length is not None and hasattr(self.model_tester, \"num_hashes\"):\n encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = False\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs[-1]\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n # check that output_attentions also work using config\n del inputs_dict[\"output_attentions\"]\n config.output_attentions = True\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs[-1]\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n if chunk_length is not None:\n self.assertListEqual(\n list(attentions[0].shape[-4:]),\n [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],\n )\n else:\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],\n )\n out_len = len(outputs)\n\n if self.is_encoder_decoder:\n correct_outlen = 4\n decoder_attention_idx = 1\n\n # loss is at first position\n if \"labels\" in inputs_dict:\n correct_outlen += 1 # loss is added to beginning\n decoder_attention_idx += 1\n # Question Answering model returns start_logits and end_logits\n if model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():\n correct_outlen += 1 # start_logits and end_logits instead of only 1 output\n decoder_attention_idx += 1\n self.assertEqual(out_len, correct_outlen)\n\n decoder_attentions = outputs[decoder_attention_idx]\n self.assertIsInstance(decoder_attentions, (list, tuple))\n self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(decoder_attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],\n )\n\n # Check attention is always last and order is fine\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = True\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))\n\n self_attentions = outputs[-1]\n self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)\n if chunk_length is not None:\n self.assertListEqual(\n list(self_attentions[0].shape[-4:]),\n [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],\n )\n else:\n self.assertListEqual(\n list(self_attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],\n )\n\n def test_torchscript(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n self._create_and_check_torchscript(config, inputs_dict)\n\n def test_torchscript_output_attentions(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.output_attentions = True\n self._create_and_check_torchscript(config, inputs_dict)\n\n def test_torchscript_output_hidden_state(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.output_hidden_states = True\n self._create_and_check_torchscript(config, inputs_dict)\n\n def _create_and_check_torchscript(self, config, inputs_dict):\n if not self.test_torchscript:\n return\n\n configs_no_init = _config_zero_init(config) # To be sure we have no Nan\n configs_no_init.torchscript = True\n for model_class in self.all_model_classes:\n model = model_class(config=configs_no_init)\n model.to(torch_device)\n model.eval()\n inputs = self._prepare_for_class(inputs_dict, model_class)[\"input_ids\"] # Let's keep only input_ids\n\n try:\n traced_gpt2 = torch.jit.trace(model, inputs)\n except RuntimeError:\n self.fail(\"Couldn't trace module.\")\n\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n pt_file_name = os.path.join(tmp_dir_name, \"traced_model.pt\")\n\n try:\n torch.jit.save(traced_gpt2, pt_file_name)\n except Exception:\n self.fail(\"Couldn't save module.\")\n\n try:\n loaded_model = torch.jit.load(pt_file_name)\n except Exception:\n self.fail(\"Couldn't load module.\")\n\n model.to(torch_device)\n model.eval()\n\n loaded_model.to(torch_device)\n loaded_model.eval()\n\n model_state_dict = model.state_dict()\n loaded_model_state_dict = loaded_model.state_dict()\n\n self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))\n\n models_equal = True\n for layer_name, p1 in model_state_dict.items():\n p2 = loaded_model_state_dict[layer_name]\n if p1.data.ne(p2.data).sum() > 0:\n models_equal = False\n\n self.assertTrue(models_equal)\n\n def test_headmasking(self):\n if not self.test_head_masking:\n return\n\n global_rng.seed(42)\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n global_rng.seed()\n\n inputs_dict[\"output_attentions\"] = True\n config.output_hidden_states = True\n configs_no_init = _config_zero_init(config) # To be sure we have no Nan\n for model_class in self.all_model_classes:\n model = model_class(config=configs_no_init)\n model.to(torch_device)\n model.eval()\n\n # Prepare head_mask\n # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior)\n head_mask = torch.ones(\n self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device,\n )\n head_mask[0, 0] = 0\n head_mask[-1, :-1] = 0\n head_mask.requires_grad_(requires_grad=True)\n inputs = self._prepare_for_class(inputs_dict, model_class).copy()\n inputs[\"head_mask\"] = head_mask\n\n outputs = model(**inputs)\n\n # Test that we can get a gradient back for importance score computation\n output = sum(t.sum() for t in outputs[0])\n output = output.sum()\n output.backward()\n multihead_outputs = head_mask.grad\n\n attentions = outputs[-1]\n\n # Remove Nan\n for t in attentions:\n self.assertLess(\n torch.sum(torch.isnan(t)), t.numel() / 4\n ) # Check we don't have more than 25% nans (arbitrary)\n attentions = [\n t.masked_fill(torch.isnan(t), 0.0) for t in attentions\n ] # remove them (the test is less complete)\n\n self.assertIsNotNone(multihead_outputs)\n self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers)\n self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0)\n self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0)\n self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0)\n self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0)\n self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0)\n\n def test_head_pruning(self):\n if not self.test_pruning:\n return\n\n for model_class in self.all_model_classes:\n (config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()\n\n if \"head_mask\" in inputs_dict:\n del inputs_dict[\"head_mask\"]\n\n inputs_dict[\"output_attentions\"] = True\n config.output_hidden_states = False\n model = model_class(config=config)\n model.to(torch_device)\n model.eval()\n heads_to_prune = {\n 0: list(range(1, self.model_tester.num_attention_heads)),\n -1: [0],\n }\n model.prune_heads(heads_to_prune)\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n attentions = outputs[-1]\n\n self.assertEqual(attentions[0].shape[-3], 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)\n\n def test_head_pruning_save_load_from_pretrained(self):\n if not self.test_pruning:\n return\n\n for model_class in self.all_model_classes:\n (config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()\n\n if \"head_mask\" in inputs_dict:\n del inputs_dict[\"head_mask\"]\n\n inputs_dict[\"output_attentions\"] = True\n config.output_hidden_states = False\n model = model_class(config=config)\n model.to(torch_device)\n model.eval()\n heads_to_prune = {\n 0: list(range(1, self.model_tester.num_attention_heads)),\n -1: [0],\n }\n model.prune_heads(heads_to_prune)\n\n with tempfile.TemporaryDirectory() as temp_dir_name:\n model.save_pretrained(temp_dir_name)\n model = model_class.from_pretrained(temp_dir_name)\n model.to(torch_device)\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs[-1]\n self.assertEqual(attentions[0].shape[-3], 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)\n\n def test_head_pruning_save_load_from_config_init(self):\n if not self.test_pruning:\n return\n\n for model_class in self.all_model_classes:\n (config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()\n\n if \"head_mask\" in inputs_dict:\n del inputs_dict[\"head_mask\"]\n\n inputs_dict[\"output_attentions\"] = True\n config.output_hidden_states = False\n\n heads_to_prune = {\n 0: list(range(1, self.model_tester.num_attention_heads)),\n -1: [0],\n }\n config.pruned_heads = heads_to_prune\n\n model = model_class(config=config)\n model.to(torch_device)\n model.eval()\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs[-1]\n\n self.assertEqual(attentions[0].shape[-3], 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)\n\n def test_head_pruning_integration(self):\n if not self.test_pruning:\n return\n\n for model_class in self.all_model_classes:\n (config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()\n\n if \"head_mask\" in inputs_dict:\n del inputs_dict[\"head_mask\"]\n\n inputs_dict[\"output_attentions\"] = True\n config.output_hidden_states = False\n\n heads_to_prune = {0: [0], 1: [1, 2]}\n config.pruned_heads = heads_to_prune\n\n model = model_class(config=config)\n model.to(torch_device)\n model.eval()\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs[-1]\n\n self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)\n self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)\n\n with tempfile.TemporaryDirectory() as temp_dir_name:\n model.save_pretrained(temp_dir_name)\n model = model_class.from_pretrained(temp_dir_name)\n model.to(torch_device)\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs[-1]\n\n self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)\n self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)\n\n heads_to_prune = {0: [0], 2: [1, 2]}\n model.prune_heads(heads_to_prune)\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs[-1]\n\n self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)\n self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2)\n self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)\n\n self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]})\n\n def test_hidden_states_output(self):\n def check_hidden_states_output(inputs_dict, config, model_class):\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n hidden_states = outputs[-1]\n\n self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)\n if hasattr(self.model_tester, \"encoder_seq_length\"):\n seq_length = self.model_tester.encoder_seq_length\n if hasattr(self.model_tester, \"chunk_length\") and self.model_tester.chunk_length > 1:\n seq_length = seq_length * self.model_tester.chunk_length\n else:\n seq_length = self.model_tester.seq_length\n\n self.assertListEqual(\n list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size],\n )\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_hidden_states\"] = True\n check_hidden_states_output(inputs_dict, config, model_class)\n\n # check that output_hidden_states also work using config\n del inputs_dict[\"output_hidden_states\"]\n config.output_hidden_states = True\n\n check_hidden_states_output(inputs_dict, config, model_class)\n\n def test_feed_forward_chunking(self):\n (original_config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()\n for model_class in self.all_model_classes:\n torch.manual_seed(0)\n config = copy.deepcopy(original_config)\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n\n hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]\n\n torch.manual_seed(0)\n config.chunk_size_feed_forward = 1\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n\n hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]\n self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))\n\n def test_resize_tokens_embeddings(self):\n (original_config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()\n if not self.test_resize_embeddings:\n return\n\n for model_class in self.all_model_classes:\n config = copy.deepcopy(original_config)\n model = model_class(config)\n model.to(torch_device)\n\n if self.model_tester.is_training is False:\n model.eval()\n\n model_vocab_size = config.vocab_size\n # Retrieve the embeddings and clone theme\n model_embed = model.resize_token_embeddings(model_vocab_size)\n cloned_embeddings = model_embed.weight.clone()\n\n # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size\n model_embed = model.resize_token_embeddings(model_vocab_size + 10)\n self.assertEqual(model.config.vocab_size, model_vocab_size + 10)\n # Check that it actually resizes the embeddings matrix\n self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)\n # Check that the model can still do a forward pass successfully (every parameter should be resized)\n model(**self._prepare_for_class(inputs_dict, model_class))\n\n # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size\n model_embed = model.resize_token_embeddings(model_vocab_size - 15)\n self.assertEqual(model.config.vocab_size, model_vocab_size - 15)\n # Check that it actually resizes the embeddings matrix\n self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)\n\n # Check that the model can still do a forward pass successfully (every parameter should be resized)\n # Input ids should be clamped to the maximum size of the vocabulary\n inputs_dict[\"input_ids\"].clamp_(max=model_vocab_size - 15 - 1)\n model(**self._prepare_for_class(inputs_dict, model_class))\n\n # Check that adding and removing tokens has not modified the first part of the embedding matrix.\n models_equal = True\n for p1, p2 in zip(cloned_embeddings, model_embed.weight):\n if p1.data.ne(p2.data).sum() > 0:\n models_equal = False\n\n self.assertTrue(models_equal)\n\n def test_model_common_attributes(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Embedding, AdaptiveEmbedding))\n model.set_input_embeddings(torch.nn.Embedding(10, 10))\n x = model.get_output_embeddings()\n self.assertTrue(x is None or isinstance(x, torch.nn.Linear))\n\n def test_correct_missing_keys(self):\n if not self.test_missing_keys:\n return\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n base_model_prefix = model.base_model_prefix\n\n if hasattr(model, base_model_prefix):\n with tempfile.TemporaryDirectory() as temp_dir_name:\n model.base_model.save_pretrained(temp_dir_name)\n model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True)\n\n with self.subTest(msg=\"Missing keys for {}\".format(model.__class__.__name__)):\n self.assertGreater(len(loading_info[\"missing_keys\"]), 0)\n\n def test_tie_model_weights(self):\n if not self.test_torchscript:\n return\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n def check_same_values(layer_1, layer_2):\n equal = True\n for p1, p2 in zip(layer_1.weight, layer_2.weight):\n if p1.data.ne(p2.data).sum() > 0:\n equal = False\n return equal\n\n for model_class in self.all_model_classes:\n config.torchscript = True\n model_not_tied = model_class(config)\n if model_not_tied.get_output_embeddings() is None:\n continue\n\n config_tied = copy.deepcopy(config)\n config_tied.torchscript = False\n model_tied = model_class(config_tied)\n params_tied = list(model_tied.parameters())\n # Check that the embedding layer and decoding layer are the same in size and in value\n # self.assertTrue(check_same_values(embeddings, decoding))\n\n # # Check that after modification, they remain the same.\n # embeddings.weight.data.div_(2)\n # # Check that the embedding layer and decoding layer are the same in size and in value\n # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)\n # self.assertTrue(check_same_values(embeddings, decoding))\n\n # # Check that after modification, they remain the same.\n # decoding.weight.data.div_(4)\n # # Check that the embedding layer and decoding layer are the same in size and in value\n # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)\n # self.assertTrue(check_same_values(embeddings, decoding))\n\n # Check that after resize they remain tied.\n model_tied.resize_token_embeddings(config.vocab_size + 10)\n params_tied_2 = list(model_tied.parameters())\n self.assertEqual(len(params_tied_2), len(params_tied))\n\n # decoding.weight.data.mul_(20)\n # # Check that the embedding layer and decoding layer are the same in size and in value\n # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape)\n # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head))\n\n def test_model_outputs_equivalence(self):\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):\n with torch.no_grad():\n tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)\n dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()\n\n def recursive_check(tuple_object, dict_object):\n if isinstance(tuple_object, (List, Tuple)):\n for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):\n recursive_check(tuple_iterable_value, dict_iterable_value)\n elif tuple_object is None:\n return\n elif torch.isinf(tuple_object).any() and torch.isinf(dict_object).any():\n # TODO: (Lysandre) - maybe take a look if that's ok here\n return\n else:\n self.assertTrue(\n torch.allclose(tuple_object, dict_object, atol=1e-5),\n msg=f\"Tuple and dict output are not equal. Difference: {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.\",\n )\n\n recursive_check(tuple_output, dict_output)\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class)\n check_equivalence(model, tuple_inputs, dict_inputs)\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n check_equivalence(model, tuple_inputs, dict_inputs)\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class)\n check_equivalence(model, tuple_inputs, dict_inputs, {\"output_hidden_states\": True})\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class)\n check_equivalence(model, tuple_inputs, dict_inputs, {\"output_attentions\": True})\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n check_equivalence(model, tuple_inputs, dict_inputs, {\"output_hidden_states\": True})\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n check_equivalence(model, tuple_inputs, dict_inputs, {\"output_attentions\": True})\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n check_equivalence(\n model, tuple_inputs, dict_inputs, {\"output_hidden_states\": True, \"output_attentions\": True}\n )\n\n def test_inputs_embeds(self):\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n\n inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))\n if not self.is_encoder_decoder:\n input_ids = inputs[\"input_ids\"]\n del inputs[\"input_ids\"]\n else:\n encoder_input_ids = inputs[\"input_ids\"]\n decoder_input_ids = inputs.get(\"decoder_input_ids\", encoder_input_ids)\n del inputs[\"input_ids\"]\n inputs.pop(\"decoder_input_ids\", None)\n\n wte = model.get_input_embeddings()\n if not self.is_encoder_decoder:\n inputs[\"inputs_embeds\"] = wte(input_ids)\n else:\n inputs[\"inputs_embeds\"] = wte(encoder_input_ids)\n inputs[\"decoder_inputs_embeds\"] = wte(decoder_input_ids)\n\n with torch.no_grad():\n model(**inputs)\n\n def test_lm_head_model_random_no_beam_search_generate(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n input_ids = inputs_dict[\"input_ids\"] if \"input_ids\" in inputs_dict else inputs_dict[\"inputs\"]\n\n # make sure that input_ids is at most of size 15\n input_ids = input_ids[..., :15]\n\n # iterate over all generative models\n for model_class in self.all_generative_model_classes:\n model = model_class(config).to(torch_device)\n model.eval()\n\n if config.bos_token_id is None:\n # if bos token id is not defined, model needs input_ids\n with self.assertRaises(AssertionError):\n model.generate(do_sample=True, max_length=5)\n # num_return_sequences = 1\n self._check_generated_ids(model.generate(input_ids, do_sample=True))\n else:\n # num_return_sequences = 1\n self._check_generated_ids(model.generate(do_sample=True, max_length=5))\n\n with self.assertRaises(AssertionError):\n # generating multiple sequences when no beam search generation\n # is not allowed as it would always generate the same sequences\n model.generate(input_ids, do_sample=False, num_return_sequences=2)\n\n # num_return_sequences > 1, sample\n self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))\n\n # check bad words tokens language generation\n # create list of 1-seq bad token and list of 2-seq of bad tokens\n bad_words_ids = [\n self._generate_random_bad_tokens(1, model.config),\n self._generate_random_bad_tokens(2, model.config),\n ]\n output_tokens = model.generate(\n input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2\n )\n # only count generated tokens\n generated_ids = output_tokens[:, input_ids.shape[-1] :]\n self.assertFalse(self._check_match_tokens(generated_ids.tolist(), bad_words_ids))\n\n def test_lm_head_model_random_beam_search_generate(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n input_ids = (inputs_dict[\"input_ids\"] if \"input_ids\" in inputs_dict else inputs_dict[\"inputs\"]).to(\n torch_device\n )\n\n # make sure that input_ids is at most of size 15\n input_ids = input_ids[..., :15]\n\n for model_class in self.all_generative_model_classes:\n model = model_class(config).to(torch_device)\n model.eval()\n\n if config.bos_token_id is None:\n # if bos token id is not defined mobel needs input_ids, num_return_sequences = 1\n self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))\n else:\n # num_return_sequences = 1\n self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))\n\n with self.assertRaises(AssertionError):\n # generating more sequences than having beams leads is not possible\n model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)\n\n # num_return_sequences > 1, sample\n self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2, num_return_sequences=2,))\n # num_return_sequences > 1, greedy\n self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))\n\n # check bad words tokens language generation\n # create list of 1-seq bad token and list of 2-seq of bad tokens\n bad_words_ids = [\n self._generate_random_bad_tokens(1, model.config),\n self._generate_random_bad_tokens(2, model.config),\n ]\n output_tokens = model.generate(\n input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2\n )\n # only count generated tokens\n generated_ids = output_tokens[:, input_ids.shape[-1] :]\n self.assertFalse(self._check_match_tokens(generated_ids.tolist(), bad_words_ids))\n\n def _generate_random_bad_tokens(self, num_bad_tokens: int, config) -> List[int]:\n # special tokens cannot be bad tokens\n special_tokens = [x for x in [config.bos_token_id, config.eos_token_id, config.pad_token_id] if x is not None]\n # create random bad tokens that are not special tokens\n bad_tokens = []\n while len(bad_tokens) < num_bad_tokens:\n token = ids_tensor((1, 1), self.model_tester.vocab_size).squeeze(0).cpu().numpy()[0]\n if token not in special_tokens:\n bad_tokens.append(token)\n return bad_tokens\n\n def _check_generated_ids(self, output_ids):\n for token_id in output_ids[0].tolist():\n self.assertGreaterEqual(token_id, 0)\n self.assertLess(token_id, self.model_tester.vocab_size)\n\n def _check_match_tokens(self, generated_ids, bad_words_ids):\n # for all bad word tokens\n for bad_word_ids in bad_words_ids:\n # for all slices in batch\n for generated_ids_slice in generated_ids:\n # for all word idx\n for i in range(len(bad_word_ids), len(generated_ids_slice)):\n # if tokens match\n if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:\n return True\n return False\n\n @require_multigpu\n def test_multigpu_data_parallel_forward(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n # some params shouldn't be scattered by nn.DataParallel\n # so just remove them if they are present.\n blacklist_non_batched_params = [\"head_mask\"]\n for k in blacklist_non_batched_params:\n inputs_dict.pop(k, None)\n\n # move input tensors to cuda:O\n for k, v in inputs_dict.items():\n if torch.is_tensor(v):\n inputs_dict[k] = v.to(0)\n\n for model_class in self.all_model_classes:\n model = model_class(config=config)\n model.to(0)\n model.eval()\n\n # Wrap model in nn.DataParallel\n model = torch.nn.DataParallel(model)\n with torch.no_grad():\n _ = model(**self._prepare_for_class(inputs_dict, model_class))\n\n\nglobal_rng = random.Random()\n\n\ndef ids_tensor(shape, vocab_size, rng=None, name=None):\n # Creates a random int32 tensor of the shape within the vocab size\n if rng is None:\n rng = global_rng\n\n total_dims = 1\n for dim in shape:\n total_dims *= dim\n\n values = []\n for _ in range(total_dims):\n values.append(rng.randint(0, vocab_size - 1))\n\n return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()\n\n\ndef floats_tensor(shape, scale=1.0, rng=None, name=None):\n \"\"\"Creates a random float32 tensor\"\"\"\n if rng is None:\n rng = global_rng\n\n total_dims = 1\n for dim in shape:\n total_dims *= dim\n\n values = []\n for _ in range(total_dims):\n values.append(rng.random() * scale)\n\n return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()\n\n\n@require_torch\nclass ModelUtilsTest(unittest.TestCase):\n @slow\n def test_model_from_pretrained(self):\n for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n config = BertConfig.from_pretrained(model_name)\n self.assertIsNotNone(config)\n self.assertIsInstance(config, PretrainedConfig)\n\n model = BertModel.from_pretrained(model_name)\n model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True)\n self.assertIsNotNone(model)\n self.assertIsInstance(model, PreTrainedModel)\n for value in loading_info.values():\n self.assertEqual(len(value), 0)\n\n config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)\n model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)\n self.assertEqual(model.config.output_hidden_states, True)\n self.assertEqual(model.config, config)\n\n\n@require_torch\nclass UtilsFunctionsTest(unittest.TestCase):\n\n # tests whether the top_k_top_p function behaves as expected\n def test_top_k_top_p_filtering(self):\n logits = torch.tensor(\n [\n [\n 8.2220991, # 3rd highest value; idx. 0\n -0.5620044,\n 5.23229752,\n 4.0386393,\n -6.8798378,\n -0.54785802,\n -3.2012153,\n 2.92777176,\n 1.88171953,\n 7.35341276, # 5th highest value; idx. 9\n 8.43207833, # 2nd highest value; idx. 10\n -9.85711836,\n -5.96209236,\n -1.13039161,\n -7.1115294,\n -0.8369633,\n -5.3186408,\n 7.06427407,\n 0.81369344,\n -0.82023817,\n -5.9179796,\n 0.58813443,\n -6.99778438,\n 4.71551189,\n -0.18771637,\n 7.44020759, # 4th highest value; idx. 25\n 9.38450987, # 1st highest value; idx. 26\n 2.12662941,\n -9.32562038,\n 2.35652522,\n ], # cummulative prob of 5 highest values <= 0.6\n [\n 0.58425518,\n 4.53139238,\n -5.57510464,\n -6.28030699,\n -7.19529503,\n -4.02122551,\n 1.39337037,\n -6.06707057,\n 1.59480517,\n -9.643119,\n 0.03907799,\n 0.67231762,\n -8.88206726,\n 6.27115922, # 4th highest value; idx. 13\n 2.28520723,\n 4.82767506,\n 4.30421368,\n 8.8275313, # 2nd highest value; idx. 17\n 5.44029958, # 5th highest value; idx. 18\n -4.4735794,\n 7.38579536, # 3rd highest value; idx. 20\n -2.91051663,\n 2.61946077,\n -2.5674762,\n -9.48959302,\n -4.02922645,\n -1.35416918,\n 9.67702323, # 1st highest value; idx. 27\n -5.89478553,\n 1.85370467,\n ], # cummulative prob of 5 highest values <= 0.6\n ],\n dtype=torch.float,\n device=torch_device,\n )\n\n non_inf_expected_idx = torch.tensor(\n [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],\n dtype=torch.long,\n device=torch_device,\n ) # expected non filtered idx as noted above\n\n non_inf_expected_output = torch.tensor(\n [\n 8.2221,\n 7.3534,\n 8.4321,\n 7.4402,\n 9.3845,\n 6.2712,\n 8.8275,\n 5.4403,\n 7.3858,\n 9.6770,\n ], # expected non filtered values as noted above\n dtype=torch.float,\n device=torch_device,\n )\n\n output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)\n non_inf_output = output[output != -float(\"inf\")].to(device=torch_device)\n non_inf_idx = (output != -float(\"inf\")).nonzero().to(device=torch_device)\n\n self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12))\n self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx)))\n" ]
[ [ "torch.jit.save", "torch.abs", "torch.jit.load", "torch.ones", "numpy.abs", "torch.jit.trace", "torch.zeros", "torch.eq", "torch.manual_seed", "numpy.isnan", "torch.isnan", "torch.isinf", "torch.is_tensor", "torch.nn.Embedding", "torch.tensor", "torch.no_grad", "torch.allclose", "torch.nn.DataParallel" ] ]
markdls/models
[ "d2cb4f87ec51057c1c0268274851af6169e1bbbd" ]
[ "research/lfads/distributions.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ==============================================================================\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom utils import linear, log_sum_exp\n\nclass Poisson(object):\n \"\"\"Poisson distributon\n\n Computes the log probability under the model.\n\n \"\"\"\n def __init__(self, log_rates):\n \"\"\" Create Poisson distributions with log_rates parameters.\n\n Args:\n log_rates: a tensor-like list of log rates underlying the Poisson dist.\n \"\"\"\n self.logr = log_rates\n\n def logp(self, bin_counts):\n \"\"\"Compute the log probability for the counts in the bin, under the model.\n\n Args:\n bin_counts: array-like integer counts\n\n Returns:\n The log-probability under the Poisson models for each element of\n bin_counts.\n \"\"\"\n k = tf.to_float(bin_counts)\n # log poisson(k, r) = log(r^k * e^(-r) / k!) = k log(r) - r - log k!\n # log poisson(k, r=exp(x)) = k * x - exp(x) - lgamma(k + 1)\n return k * self.logr - tf.exp(self.logr) - tf.lgamma(k + 1)\n\n\ndef diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):\n \"\"\"Log-likelihood under a Gaussian distribution with diagonal covariance.\n Returns the log-likelihood for each dimension. One should sum the\n results for the log-likelihood under the full multidimensional model.\n\n Args:\n z: The value to compute the log-likelihood.\n mu: The mean of the Gaussian\n logvar: The log variance of the Gaussian.\n\n Returns:\n The log-likelihood under the Gaussian model.\n \"\"\"\n\n return -0.5 * (logvar + np.log(2*np.pi) + \\\n tf.square((z-mu)/tf.exp(0.5*logvar)))\n\n\ndef gaussian_pos_log_likelihood(unused_mean, logvar, noise):\n \"\"\"Gaussian log-likelihood function for a posterior in VAE\n\n Note: This function is specialized for a posterior distribution, that has the\n form of z = mean + sigma * noise.\n\n Args:\n unused_mean: ignore\n logvar: The log variance of the distribution\n noise: The noise used in the sampling of the posterior.\n\n Returns:\n The log-likelihood under the Gaussian model.\n \"\"\"\n # ln N(z; mean, sigma) = - ln(sigma) - 0.5 ln 2pi - noise^2 / 2\n return - 0.5 * (logvar + np.log(2 * np.pi) + tf.square(noise))\n\n\nclass Gaussian(object):\n \"\"\"Base class for Gaussian distribution classes.\"\"\"\n pass\n\n\nclass DiagonalGaussian(Gaussian):\n \"\"\"Diagonal Gaussian with different constant mean and variances in each\n dimension.\n \"\"\"\n\n def __init__(self, batch_size, z_size, mean, logvar):\n \"\"\"Create a diagonal gaussian distribution.\n\n Args:\n batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.\n z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.\n mean: The N-D mean of the distribution.\n logvar: The N-D log variance of the diagonal distribution.\n \"\"\"\n size__xz = [None, z_size]\n self.mean = mean # bxn already\n self.logvar = logvar # bxn already\n self.noise = noise = tf.random_normal(tf.shape(logvar))\n self.sample = mean + tf.exp(0.5 * logvar) * noise\n mean.set_shape(size__xz)\n logvar.set_shape(size__xz)\n self.sample.set_shape(size__xz)\n\n def logp(self, z=None):\n \"\"\"Compute the log-likelihood under the distribution.\n\n Args:\n z (optional): value to compute likelihood for, if None, use sample.\n\n Returns:\n The likelihood of z under the model.\n \"\"\"\n if z is None:\n z = self.sample\n\n # This is needed to make sure that the gradients are simple.\n # The value of the function shouldn't change.\n if z == self.sample:\n return gaussian_pos_log_likelihood(self.mean, self.logvar, self.noise)\n\n return diag_gaussian_log_likelihood(z, self.mean, self.logvar)\n\n\nclass LearnableDiagonalGaussian(Gaussian):\n \"\"\"Diagonal Gaussian whose mean and variance are learned parameters.\"\"\"\n\n def __init__(self, batch_size, z_size, name, mean_init=0.0,\n var_init=1.0, var_min=0.0, var_max=1000000.0):\n \"\"\"Create a learnable diagonal gaussian distribution.\n\n Args:\n batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.\n z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.\n name: prefix name for the mean and log TF variables.\n mean_init (optional): The N-D mean initialization of the distribution.\n var_init (optional): The N-D variance initialization of the diagonal\n distribution.\n var_min (optional): The minimum value the learned variance can take in any\n dimension.\n var_max (optional): The maximum value the learned variance can take in any\n dimension.\n \"\"\"\n\n size_1xn = [1, z_size]\n size__xn = [None, z_size]\n size_bx1 = tf.stack([batch_size, 1])\n assert var_init > 0.0, \"Problems\"\n assert var_max >= var_min, \"Problems\"\n assert var_init >= var_min, \"Problems\"\n assert var_max >= var_init, \"Problems\"\n\n\n z_mean_1xn = tf.get_variable(name=name+\"/mean\", shape=size_1xn,\n initializer=tf.constant_initializer(mean_init))\n self.mean_bxn = mean_bxn = tf.tile(z_mean_1xn, size_bx1)\n mean_bxn.set_shape(size__xn) # tile loses shape\n\n log_var_init = np.log(var_init)\n if var_max > var_min:\n var_is_trainable = True\n else:\n var_is_trainable = False\n\n z_logvar_1xn = \\\n tf.get_variable(name=(name+\"/logvar\"), shape=size_1xn,\n initializer=tf.constant_initializer(log_var_init),\n trainable=var_is_trainable)\n\n if var_is_trainable:\n z_logit_var_1xn = tf.exp(z_logvar_1xn)\n z_var_1xn = tf.nn.sigmoid(z_logit_var_1xn)*(var_max-var_min) + var_min\n z_logvar_1xn = tf.log(z_var_1xn)\n\n logvar_bxn = tf.tile(z_logvar_1xn, size_bx1)\n self.logvar_bxn = logvar_bxn\n self.noise_bxn = noise_bxn = tf.random_normal(tf.shape(logvar_bxn))\n self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn\n\n def logp(self, z=None):\n \"\"\"Compute the log-likelihood under the distribution.\n\n Args:\n z (optional): value to compute likelihood for, if None, use sample.\n\n Returns:\n The likelihood of z under the model.\n \"\"\"\n if z is None:\n z = self.sample\n\n # This is needed to make sure that the gradients are simple.\n # The value of the function shouldn't change.\n if z == self.sample_bxn:\n return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn,\n self.noise_bxn)\n\n return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)\n\n @property\n def mean(self):\n return self.mean_bxn\n\n @property\n def logvar(self):\n return self.logvar_bxn\n\n @property\n def sample(self):\n return self.sample_bxn\n\n\nclass DiagonalGaussianFromInput(Gaussian):\n \"\"\"Diagonal Gaussian whose mean and variance are conditioned on other\n variables.\n\n Note: the parameters to convert from input to the learned mean and log\n variance are held in this class.\n \"\"\"\n\n def __init__(self, x_bxu, z_size, name, var_min=0.0):\n \"\"\"Create an input dependent diagonal Gaussian distribution.\n\n Args:\n x: The input tensor from which the mean and variance are computed,\n via a linear transformation of x. I.e.\n mu = Wx + b, log(var) = Mx + c\n z_size: The size of the distribution.\n name: The name to prefix to learned variables.\n var_min (optional): Minimal variance allowed. This is an additional\n way to control the amount of information getting through the stochastic\n layer.\n \"\"\"\n size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size])\n self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name+\"/mean\"))\n logvar_bxn = linear(x_bxu, z_size, name=(name+\"/logvar\"))\n if var_min > 0.0:\n logvar_bxn = tf.log(tf.exp(logvar_bxn) + var_min)\n self.logvar_bxn = logvar_bxn\n\n self.noise_bxn = noise_bxn = tf.random_normal(size_bxn)\n self.noise_bxn.set_shape([None, z_size])\n self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn\n\n def logp(self, z=None):\n \"\"\"Compute the log-likelihood under the distribution.\n\n Args:\n z (optional): value to compute likelihood for, if None, use sample.\n\n Returns:\n The likelihood of z under the model.\n \"\"\"\n\n if z is None:\n z = self.sample\n\n # This is needed to make sure that the gradients are simple.\n # The value of the function shouldn't change.\n if z == self.sample_bxn:\n return gaussian_pos_log_likelihood(self.mean_bxn,\n self.logvar_bxn, self.noise_bxn)\n\n return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)\n\n @property\n def mean(self):\n return self.mean_bxn\n\n @property\n def logvar(self):\n return self.logvar_bxn\n\n @property\n def sample(self):\n return self.sample_bxn\n\n\nclass GaussianProcess:\n \"\"\"Base class for Gaussian processes.\"\"\"\n pass\n\n\nclass LearnableAutoRegressive1Prior(GaussianProcess):\n \"\"\"AR(1) model where autocorrelation and process variance are learned\n parameters. Assumed zero mean.\n\n \"\"\"\n\n def __init__(self, batch_size, z_size,\n autocorrelation_taus, noise_variances,\n do_train_prior_ar_atau, do_train_prior_ar_nvar,\n num_steps, name):\n \"\"\"Create a learnable autoregressive (1) process.\n\n Args:\n batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.\n z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.\n autocorrelation_taus: The auto correlation time constant of the AR(1)\n process.\n A value of 0 is uncorrelated gaussian noise.\n noise_variances: The variance of the additive noise, *not* the process\n variance.\n do_train_prior_ar_atau: Train or leave as constant, the autocorrelation?\n do_train_prior_ar_nvar: Train or leave as constant, the noise variance?\n num_steps: Number of steps to run the process.\n name: The name to prefix to learned TF variables.\n \"\"\"\n\n # Note the use of the plural in all of these quantities. This is intended\n # to mark that even though a sample z_t from the posterior is thought of a\n # single sample of a multidimensional gaussian, the prior is actually\n # thought of as U AR(1) processes, where U is the dimension of the inferred\n # input.\n size_bx1 = tf.stack([batch_size, 1])\n size__xu = [None, z_size]\n # process variance, the variance at time t over all instantiations of AR(1)\n # with these parameters.\n log_evar_inits_1xu = tf.expand_dims(tf.log(noise_variances), 0)\n self.logevars_1xu = logevars_1xu = \\\n tf.Variable(log_evar_inits_1xu, name=name+\"/logevars\", dtype=tf.float32,\n trainable=do_train_prior_ar_nvar)\n self.logevars_bxu = logevars_bxu = tf.tile(logevars_1xu, size_bx1)\n logevars_bxu.set_shape(size__xu) # tile loses shape\n\n # \\tau, which is the autocorrelation time constant of the AR(1) process\n log_atau_inits_1xu = tf.expand_dims(tf.log(autocorrelation_taus), 0)\n self.logataus_1xu = logataus_1xu = \\\n tf.Variable(log_atau_inits_1xu, name=name+\"/logatau\", dtype=tf.float32,\n trainable=do_train_prior_ar_atau)\n\n # phi in x_t = \\mu + phi x_tm1 + \\eps\n # phi = exp(-1/tau)\n # phi = exp(-1/exp(logtau))\n # phi = exp(-exp(-logtau))\n phis_1xu = tf.exp(-tf.exp(-logataus_1xu))\n self.phis_bxu = phis_bxu = tf.tile(phis_1xu, size_bx1)\n phis_bxu.set_shape(size__xu)\n\n # process noise\n # pvar = evar / (1- phi^2)\n # logpvar = log ( exp(logevar) / (1 - phi^2) )\n # logpvar = logevar - log(1-phi^2)\n # logpvar = logevar - (log(1-phi) + log(1+phi))\n self.logpvars_1xu = \\\n logevars_1xu - tf.log(1.0-phis_1xu) - tf.log(1.0+phis_1xu)\n self.logpvars_bxu = logpvars_bxu = tf.tile(self.logpvars_1xu, size_bx1)\n logpvars_bxu.set_shape(size__xu)\n\n # process mean (zero but included in for completeness)\n self.pmeans_bxu = pmeans_bxu = tf.zeros_like(phis_bxu)\n\n # For sampling from the prior during de-novo generation.\n self.means_t = means_t = [None] * num_steps\n self.logvars_t = logvars_t = [None] * num_steps\n self.samples_t = samples_t = [None] * num_steps\n self.gaussians_t = gaussians_t = [None] * num_steps\n sample_bxu = tf.zeros_like(phis_bxu)\n for t in range(num_steps):\n # process variance used here to make process completely stationary\n if t == 0:\n logvar_pt_bxu = self.logpvars_bxu\n else:\n logvar_pt_bxu = self.logevars_bxu\n\n z_mean_pt_bxu = pmeans_bxu + phis_bxu * sample_bxu\n gaussians_t[t] = DiagonalGaussian(batch_size, z_size,\n mean=z_mean_pt_bxu,\n logvar=logvar_pt_bxu)\n sample_bxu = gaussians_t[t].sample\n samples_t[t] = sample_bxu\n logvars_t[t] = logvar_pt_bxu\n means_t[t] = z_mean_pt_bxu\n\n def logp_t(self, z_t_bxu, z_tm1_bxu=None):\n \"\"\"Compute the log-likelihood under the distribution for a given time t,\n not the whole sequence.\n\n Args:\n z_t_bxu: sample to compute likelihood for at time t.\n z_tm1_bxu (optional): sample condition probability of z_t upon.\n\n Returns:\n The likelihood of p_t under the model at time t. i.e.\n p(z_t|z_tm1) = N(z_tm1 * phis, eps^2)\n\n \"\"\"\n if z_tm1_bxu is None:\n return diag_gaussian_log_likelihood(z_t_bxu, self.pmeans_bxu,\n self.logpvars_bxu)\n else:\n means_t_bxu = self.pmeans_bxu + self.phis_bxu * z_tm1_bxu\n logp_tgtm1_bxu = diag_gaussian_log_likelihood(z_t_bxu,\n means_t_bxu,\n self.logevars_bxu)\n return logp_tgtm1_bxu\n\n\nclass KLCost_GaussianGaussian(object):\n \"\"\"log p(x|z) + KL(q||p) terms for Gaussian posterior and Gaussian prior. See\n eqn 10 and Appendix B in VAE for latter term,\n http://arxiv.org/abs/1312.6114\n\n The log p(x|z) term is the reconstruction error under the model.\n The KL term represents the penalty for passing information from the encoder\n to the decoder.\n To sample KL(q||p), we simply sample\n ln q - ln p\n by drawing samples from q and averaging.\n \"\"\"\n\n def __init__(self, zs, prior_zs):\n \"\"\"Create a lower bound in three parts, normalized reconstruction\n cost, normalized KL divergence cost, and their sum.\n\n E_q[ln p(z_i | z_{i+1}) / q(z_i | x)\n \\int q(z) ln p(z) dz = - 0.5 ln(2pi) - 0.5 \\sum (ln(sigma_p^2) + \\\n sigma_q^2 / sigma_p^2 + (mean_p - mean_q)^2 / sigma_p^2)\n\n \\int q(z) ln q(z) dz = - 0.5 ln(2pi) - 0.5 \\sum (ln(sigma_q^2) + 1)\n\n Args:\n zs: posterior z ~ q(z|x)\n prior_zs: prior zs\n \"\"\"\n # L = -KL + log p(x|z), to maximize bound on likelihood\n # -L = KL - log p(x|z), to minimize bound on NLL\n # so 'KL cost' is postive KL divergence\n kl_b = 0.0\n for z, prior_z in zip(zs, prior_zs):\n assert isinstance(z, Gaussian)\n assert isinstance(prior_z, Gaussian)\n # ln(2pi) terms cancel\n kl_b += 0.5 * tf.reduce_sum(\n prior_z.logvar - z.logvar\n + tf.exp(z.logvar - prior_z.logvar)\n + tf.square((z.mean - prior_z.mean) / tf.exp(0.5 * prior_z.logvar))\n - 1.0, [1])\n\n self.kl_cost_b = kl_b\n self.kl_cost = tf.reduce_mean(kl_b)\n\n\nclass KLCost_GaussianGaussianProcessSampled(object):\n \"\"\" log p(x|z) + KL(q||p) terms for Gaussian posterior and Gaussian process\n prior via sampling.\n\n The log p(x|z) term is the reconstruction error under the model.\n The KL term represents the penalty for passing information from the encoder\n to the decoder.\n To sample KL(q||p), we simply sample\n ln q - ln p\n by drawing samples from q and averaging.\n \"\"\"\n\n def __init__(self, post_zs, prior_z_process):\n \"\"\"Create a lower bound in three parts, normalized reconstruction\n cost, normalized KL divergence cost, and their sum.\n\n Args:\n post_zs: posterior z ~ q(z|x)\n prior_z_process: prior AR(1) process\n \"\"\"\n assert len(post_zs) > 1, \"GP is for time, need more than 1 time step.\"\n assert isinstance(prior_z_process, GaussianProcess), \"Must use GP.\"\n\n # L = -KL + log p(x|z), to maximize bound on likelihood\n # -L = KL - log p(x|z), to minimize bound on NLL\n # so 'KL cost' is postive KL divergence\n z0_bxu = post_zs[0].sample\n logq_bxu = post_zs[0].logp(z0_bxu)\n logp_bxu = prior_z_process.logp_t(z0_bxu)\n z_tm1_bxu = z0_bxu\n for z_t in post_zs[1:]:\n # posterior is independent in time, prior is not\n z_t_bxu = z_t.sample\n logq_bxu += z_t.logp(z_t_bxu)\n logp_bxu += prior_z_process.logp_t(z_t_bxu, z_tm1_bxu)\n z_tm1 = z_t_bxu\n\n kl_bxu = logq_bxu - logp_bxu\n kl_b = tf.reduce_sum(kl_bxu, [1])\n self.kl_cost_b = kl_b\n self.kl_cost = tf.reduce_mean(kl_b)\n" ]
[ [ "tensorflow.compat.v1.stack", "numpy.log", "tensorflow.compat.v1.square", "tensorflow.compat.v1.Variable", "tensorflow.compat.v1.exp", "tensorflow.compat.v1.nn.sigmoid", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.random_normal", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.to_float", "tensorflow.compat.v1.tile", "tensorflow.compat.v1.constant_initializer", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.lgamma", "tensorflow.compat.v1.zeros_like", "tensorflow.compat.v1.log" ] ]
UWRobotLearning/rmp2
[ "c612a014f517204b38c552619a441be4b3d7b67f" ]
[ "rmp2/utils/plot_configs.py" ]
[ "# The first version was licensed as \"Original Source License\"(see below).\n# Several enhancements and at UW Robot Learning Lab\n# \n# Original Source License:\n# \n# Copyright (c) 2019 Georgia Tech Robot Learning Lab\n# Licensed under the MIT License.\n\n\"\"\"\nconfigs for ploting\n\"\"\"\n\nfrom matplotlib import cm\nfrom itertools import chain\n\nSET2COLORS = cm.get_cmap('Set2').colors\nSET2 = {'darkgreen': SET2COLORS[0],\n 'orange': SET2COLORS[1],\n 'blue': SET2COLORS[2],\n 'pink': SET2COLORS[3],\n 'lightgreen': SET2COLORS[4],\n 'gold': SET2COLORS[5],\n 'brown': SET2COLORS[6],\n 'grey': SET2COLORS[7],\n }\n\nSET1COLORS = cm.get_cmap('Set1').colors\nSET1 = {\n 'red': SET1COLORS[0],\n 'blue': SET1COLORS[1],\n 'green': SET1COLORS[2],\n 'purple': SET1COLORS[3],\n 'orange': SET1COLORS[4],\n 'yellow': SET1COLORS[5],\n 'brown': SET1COLORS[6],\n 'pink': SET1COLORS[7],\n 'grey': SET1COLORS[8]\n}\ncode_configs = {\n 'bc-nn': (r'\\textsc{BC+NN}', SET1['blue']),\n 'bc-rmp': (r'\\textsc{BC+RMP}', SET1['purple']),\n 'code-nn': (r'\\textsc{CODE+NN}', SET1['green']),\n 'code-rmp': (r'\\textsc{CODE+RMP}', SET2['lightgreen']),\n 'order': [\n 'bc-nn', 'bc-rmp', 'code-nn', 'code-rmp']\n}\n\nrmp2_configs = {\n 'rmp': (r'\\textsc{RMP}', SET2['lightgreen']),\n 'rmp-obs-feat': (r'\\textsc{RMP-RESIDUAL}', SET1['blue']),\n 'nn': (r'\\textsc{NN}', 'gray'), # SET1['grey']),\n 'nn-residual': (r'\\textsc{NN-RESIDUAL}', 'indianred'), # SET1['red']),\n 'order': [\n 'rmp-obs-feat', 'rmp', 'nn-residual', 'nn']\n}\n\ngtc_configs = {\n 'rmp-obs-feat': (r'\\textsc{STRUCTURED}', [0.4627451, 0.7254902, 0.]),\n 'nn': (r'\\textsc{NN}', 'gray'), # SET1['grey']),\n 'order': [\n 'rmp-obs-feat', 'nn']\n}\n\nclass Configs(object):\n def __init__(self, style=None, colormap=None):\n if not style:\n self.configs = None\n if colormap is None: \n c1 = iter(cm.get_cmap('Set1').colors)\n c2 = iter(cm.get_cmap('Set2').colors)\n c3 = iter(cm.get_cmap('Set3').colors)\n self.colors = chain(c1, c2, c3)\n else:\n self.colors = iter(cm.get_cmap(colormap).colors)\n else:\n self.configs = globals()[style + '_configs']\n for exp_name in self.configs['order']:\n assert exp_name in self.configs, 'Unknown exp: {}'.format(exp_name)\n\n def color(self, exp_name):\n if self.configs is None:\n color = next(self.colors)\n else:\n color = self.configs[exp_name][1]\n return color\n\n def label(self, exp_name):\n if self.configs is None:\n return exp_name\n return self.configs[exp_name][0]\n\n def sort_dirs(self, dirs):\n if self.configs is None:\n return dirs\n\n def custom_key(exp_name):\n if exp_name in self.configs['order']:\n return self.configs['order'].index(exp_name)\n else:\n return 100\n return sorted(dirs, key=custom_key)\n" ]
[ [ "matplotlib.cm.get_cmap" ] ]
honey-sangtani-c5i/retail-demo-store
[ "c76e03b2a1750d9ec16f2dd8c952b8c4c8a53ef8" ]
[ "generators/datagenerator/sessions.py" ]
[ "# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: MIT-0\n\nimport datetime\nfrom collections import UserList\nfrom datagenerator.funnel import Funnel\nimport random\nimport numpy as np\n\nclass Sessions(UserList):\n def __init__(self, from_datetime, to_datetime, event_templates, num_sessions, user_pool):\n # defines % users for each 24 hour time slot, starting at midnight\n self.percent_users = (1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 19, 20, 20, 9, 5, 4, 3, 1, 1, 1, 1, 1)\n self.event_templates = event_templates\n self.num_sessions = num_sessions\n # Parse out the start date and turn it into a datetime - YYYY-mm-dd format\n self.from_datetime = from_datetime\n self.to_datetime = to_datetime\n self.data = []\n\n for hourly_users in self.user_time_slots():\n for i in range(hourly_users[1]):\n active_user = np.random.binomial(1, .5)\n user = user_pool.user(active_user)\n # Pick a random funnel - note that the same user can repeat the same funnel several times potentially\n funnel = random.choice(self.event_templates) \n self.data.append(Funnel(hourly_users[0], funnel, user))\n\n # Generates a series of datetime stamps for events (default every minute from start)\n def user_time_slots(self):\n curr = self.from_datetime\n while curr < self.to_datetime:\n yield (curr, int(self.num_sessions * (self.percent_users[curr.hour] / 100)))\n curr += datetime.timedelta(hours=1)" ]
[ [ "numpy.random.binomial" ] ]
iojon/PyOgg
[ "b03f4a7f3a4e677ab2d4fb9a413be32eb16f0b2d" ]
[ "tests/test_opus_file_stream.py" ]
[ "import pytest\nimport pyogg\nimport os\n\nos.chdir(os.path.dirname(__file__))\n\ndef test_error_in_filename():\n # Load a non-existant file\n filename = \"does-not-exist.opus\"\n with pytest.raises(pyogg.PyOggError):\n opus_stream = pyogg.OpusFileStream(filename)\n\n \ndef test_total_length():\n # Load the demonstration file that is exactly 5 seconds long\n filename = \"../examples/left-right-demo-5s.opus\"\n \n # Open the file using OpusFileStream, which does not read the entire\n # file immediately.\n opus_stream = pyogg.OpusFileStream(filename)\n\n # Loop through the OpusFileStream until we've read all the data\n samples_read = 0\n while True:\n # Read the next part of the stream\n buf = opus_stream.get_buffer_as_array()\n\n # Check if we've reached the end of the stream\n if buf is None:\n break\n\n # Increment the number of samples read\n samples_read += buf.shape[0]\n\n expected_duration_seconds = 5\n samples_per_second = opus_stream.frequency\n expected_duration_samples = (\n expected_duration_seconds\n * samples_per_second\n )\n duration_samples = samples_read\n assert duration_samples == expected_duration_samples\n\n\ndef test_same_data_as_opus_file():\n # Load the demonstration file that is exactly 5 seconds long\n filename = \"../examples/left-right-demo-5s.opus\"\n\n # Open the file using OpusFile to read the entire file into memory\n opus_file = pyogg.OpusFile(filename)\n \n # Open the file (again) using OpusFileStream, which does not read\n # the entire file immediately.\n opus_stream = pyogg.OpusFileStream(filename)\n\n # Loop through the OpusFileStream until we've read all the data\n buf_all = bytes()\n while True:\n # Read the next part of the stream\n buf = opus_stream.get_buffer()\n\n # Check if we've reached the end of the stream\n if buf is None:\n break\n\n # Add the bytes we've read to buf_all. Note that this\n # technique isn't efficient and shouldn't be used in\n # production code.\n buf_all += buf\n \n assert buf_all == opus_file.buffer\n \n \ndef test_same_data_as_opus_file_using_as_array():\n import numpy # type: ignore\n \n # Load the demonstration file that is exactly 5 seconds long\n filename = \"../examples/left-right-demo-5s.opus\"\n\n # Open the file using OpusFile to read the entire file into memory\n opus_file = pyogg.OpusFile(filename)\n \n # Open the file (again) using OpusFileStream, which does not read\n # the entire file immediately.\n opus_stream = pyogg.OpusFileStream(filename)\n\n # Loop through the OpusFileStream until we've read all the data\n buf_all = None\n while True:\n # Read the next part of the stream\n buf = opus_stream.get_buffer_as_array()\n\n # Check if we've reached the end of the stream\n if buf is None:\n break\n\n # Add the bytes we've read to buf_all. Note that this\n # technique isn't efficient and shouldn't be used in\n # production code.\n if buf_all is None:\n buf_all = buf\n else:\n buf_all = numpy.concatenate(\n (buf_all, buf)\n )\n\n # Check that every byte is identical for both buffers\n assert numpy.all(buf_all == opus_file.as_array())\n \n" ]
[ [ "numpy.concatenate" ] ]
smaeyama/mzprojection
[ "5a05252006bcfdf4a33faf1c089ea1a95aa8b349" ]
[ "python/example.py" ]
[ "#!/usr/bin/env python\n\n\nif __name__ == '__main__':\n import numpy as np\n from mzprojection import mzprojection_long_time_series\n #from mzprojection import mzprojection_ensemble_of_time_series\n\n #= Read sample data =\n indata = np.loadtxt('../sample_data/sample_time_series.dat')\n time = indata[:,0] # Time\n u_raw = indata[:,1] + 1.0j * indata[:,2] # Variable of interest u(t)\n dudt_raw = indata[:,3] + 1.0j * indata[:,4] # = du/dt\n f_raw = indata[:,5] + 1.0j * indata[:,6] # Analyzed data f(t)\n\n #= Parameters for ensemble average =\n nrec = len(time) # Total record number \n ista = 2000 # Start record number for sampling \n nperiod = 500 # Length of a sample \n nshift = 1 # Length of time shift while sampling\n delta_t = time[1] - time[0] # Time step size \n\n #= Mori-Zwanzig projection =\n #\n # f(t) = Omega*u(t) - int_0^t Gamma(s)*u(t-s) ds + r(t)\n #\n omega, memoryf, s_raw, r_raw, uu, ududt, fdudt, rr, rdudt, ru, fu, ff = mzprojection_long_time_series(nrec, ista, nperiod, nshift, delta_t, u_raw, dudt_raw, f_raw)\n\n ##= Mori-Zwanzig projection =\n ##\n ## f(t) = Omega*u(t) - int_0^t Gamma(s)*u(t-s) ds + r(t)\n ##\n #nsample = int((nrec-ista-nperiod)/nshift) + 1\n #u = np.zeros([nperiod,nsample], dtype=np.complex128)\n #dudt = np.zeros([nperiod,nsample], dtype=np.complex128)\n #f = np.zeros([nperiod,nsample], dtype=np.complex128)\n #for iperiod in range(nperiod):\n # u[ iperiod,:] = u_raw[ista+iperiod:ista+iperiod+nshift*(nsample-1)+1:nshift]\n # dudt[iperiod,:] = dudt_raw[ista+iperiod:ista+iperiod+nshift*(nsample-1)+1:nshift]\n # f[ iperiod,:] = f_raw[ista+iperiod:ista+iperiod+nshift*(nsample-1)+1:nshift]\n #omega, memoryf, s, r, uu, ududt, fdudt, rr, rdudt, ru, fu, ff = mzprojection_ensemble_of_time_series(nsample, nperiod, delta_t, u, dudt, f)\n\n #= Output results =\n outdata = np.real(np.vstack([time, u_raw.real, u_raw.imag, dudt_raw.real, dudt_raw.imag, f_raw.real, f_raw.imag, (omega*u_raw).real, (omega*u_raw).imag, s_raw.real, s_raw.imag, r_raw.real, r_raw.imag]))\n outdata = outdata.transpose()\n np.savetxt('out_timeevolution.dat', outdata, fmt='%17.7e')\n outdata = np.real(np.vstack([time[0:nperiod], uu.real, uu.imag, fu.real, fu.imag, ududt.real, ududt.imag, fdudt.real, fdudt.imag]))\n outdata = outdata.transpose()\n np.savetxt('out_correlation.dat', outdata, fmt='%17.7e')\n outdata = np.real(np.vstack([time[0:nperiod], memoryf.real, memoryf.imag, (rr/uu[0]).real, (rr/uu[0]).imag, (rdudt/uu[0]).real, (rdudt/uu[0]).imag]))\n outdata = outdata.transpose()\n np.savetxt('out_check_memoryfunc.dat', outdata, fmt='%17.7e')\n outdata = np.real(np.vstack([time[0:nperiod], rr.real, rr.imag, (ru/np.sqrt(np.abs(rr[0]*uu[0]))).real, (ru/np.sqrt(np.abs(rr[0]*uu[0]))).imag, (fu/np.sqrt(np.abs(ff[0]*uu[0]))).real, (fu/np.sqrt(np.abs(ff[0]*uu[0]))).imag]))\n outdata = outdata.transpose()\n np.savetxt('out_check_r.dat', outdata, fmt='%17.7e')\n print(omega)\n\n" ]
[ [ "numpy.savetxt", "numpy.vstack", "numpy.abs", "numpy.loadtxt" ] ]
veritas9872/Knowledge-Distillation-Task
[ "d260b1057c96cfc52af8ff7a0775befbd102f59d" ]
[ "train/grid_search.py" ]
[ "\"\"\"\nCode for applying grid search to find the best parameters for knowledge distillation.\nThe distillation ratio and temperature parameters are being tuned in this search.\n\"\"\"\nimport torch\n\nfrom train.distill_knowledge import main\nfrom utils.options import knowledge_distillation_options\n\n\ndef grid_search():\n options = dict(\n train_method='Search'\n )\n temperatures = [1, 2, 4, 8, 16, 32, 64]\n distill_ratios = [1., 0.99, 0.95, 0.9, 0.75, 0.5, 0.25, 0.1, 0.05, 0.01, 0.]\n\n for temp in temperatures:\n for dist in distill_ratios:\n options['temperature'] = temp\n options['distill_ratio'] = dist\n opt = knowledge_distillation_options(**options).parse_args()\n # Reproducibility settings. Seeding must be repeated at the start of every run.\n torch.random.manual_seed(9872)\n main(opt)\n\n\nif __name__ == '__main__':\n # Reproducibility settings.\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n grid_search()\n" ]
[ [ "torch.random.manual_seed" ] ]
AnthonyC958/SNe-Lensing
[ "1d1fc953457de298a55222b1dba06427e35c225b" ]
[ "MICE.py" ]
[ "import Convergence\nimport cones\nfrom astropy.io import fits\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle\nfrom matplotlib.collections import PatchCollection\nimport random\nfrom scipy.stats import rankdata\nimport pickle\nfrom scipy.signal import savgol_filter\nimport collections\n\ncolours = [[0, 150/255, 100/255], [225/255, 149/255, 0], [207/255, 0, 48/255], [145/255, 4/255, 180/255],\n 'C4', 'C9', 'C6', 'C7', 'C8', 'C5']\nblue = [23/255, 114/255, 183/255, 0.75]\norange = [255/255, 119/255, 15/255, 0.75]\ngreen = [0, 150/255, 100/255, 0.75]\nyellow = [253/255, 170/255, 0, 0.75]\ngrey = [0.75, 0.75, 0.75]\nRADII = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,\n 0.8, 0.9, 1.0, 1.25, 1.5, 1.75, 2.0,\n 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75,\n 4.0, 4.25, 4.5, 4.75, 5.0, 5.25, 5.5,\n 5.75, 6.0, 6.25, 6.5, 6.75, 7.0, 7.25,\n 7.5, 7.75, 8.0, 8.25, 8.5, 8.75, 9.0,\n 9.25, 9.5, 9.75, 10.0, 10.25, 10.5, 10.75,\n 11.0, 11.25, 11.5, 11.75, 12.0, 12.25, 12.5,\n 12.75, 13.0, 13.25, 13.5, 13.75, 14.0, 14.5,\n 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0,\n 18.5, 19.0, 19.5, 20.0, 21.0, 22.0, 23.0,\n 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0]\n\nplt.rcParams['font.family'] = 'serif'\nplt.rcParams['font.serif'] = 'Stixgeneral'\nplt.rcParams['mathtext.fontset'] = 'stix'\nplt.rcParams['axes.labelsize'] = 20\nplt.rcParams['axes.titlesize'] = 16\nplt.rcParams['xtick.labelsize'] = 16\nplt.rcParams['ytick.labelsize'] = 16\nplt.rcParams['legend.fontsize'] = 16\nplt.rcParams['figure.titlesize'] = 20\nplt.rcParams['xtick.top'] = True\nplt.rcParams['ytick.right'] = True\nplt.rcParams['xtick.direction'] = 'in'\nplt.rcParams['ytick.direction'] = 'in'\nplt.rcParams['xtick.minor.visible'] = True\nplt.rcParams['ytick.minor.visible'] = True\n\n\ndef deep_update(old_dict, update_to_dict):\n for key, value in update_to_dict.items():\n if isinstance(value, collections.Mapping):\n old_dict[key] = deep_update(old_dict.get(key, {}), value)\n else:\n old_dict[key] = value\n return old_dict\n\n\ndef get_data():\n with fits.open('MICEsim5.fits') as hdul1:\n RA = hdul1[1].data['ra']\n DEC = hdul1[1].data['dec']\n kap = hdul1[1].data['kappa']\n z = hdul1[1].data['z_v']\n ID = hdul1[1].data['id']\n # Comoving = hdul1[1].data['d_c_v']\n\n cut_RA = np.array(RA)[[z >= 0.01]][::25]\n cut_DEC = np.array(DEC)[[z >= 0.01]][::25]\n cut_kap = np.array(kap)[[z >= 0.01]][::25]\n cut_ID = np.array(ID)[[z >= 0.01]][::25]\n cut_z = np.array(z)[[z >= 0.01]][::25]\n cut_data = {'RA': cut_RA, 'DEC': cut_DEC, 'z': cut_z, 'kappa': cut_kap, 'id': cut_ID}\n print(len(cut_z))\n return cut_data\n\n\ndef make_big_cone(data, redo=False):\n if redo:\n RAs = data['RA']\n DECs = data['DEC']\n zs = data['z']\n kappas = data['kappa']\n centre = [(min(RAs) + max(RAs)) / 2, (min(DECs) + max(DECs)) / 2]\n radius = round(min(max(RAs) - centre[0], centre[0] - min(RAs), max(DECs) - centre[1], centre[1] - min(DECs)), 2)\n # radius = 3.5\n big_cone = {'Zs': zs[(RAs - centre[0]) ** 2 + (DECs - centre[1]) ** 2 <= radius ** 2],\n 'kappa': kappas[(RAs - centre[0]) ** 2 + (DECs - centre[1]) ** 2 <= radius ** 2]}\n for i in [1, 2]:\n big_cone['Zs'] = np.append(big_cone['Zs'], zs[(RAs - centre[0] + 2 * i * radius) ** 2 +\n (DECs - centre[1]) ** 2 <= radius ** 2])\n big_cone['kappa'] = np.append(big_cone['kappa'], kappas[(RAs - centre[0] + 2 * i * radius) ** 2 +\n (DECs - centre[1]) ** 2 <= radius ** 2])\n big_cone['Zs'] = np.append(big_cone['Zs'], zs[(RAs - centre[0] - 2 * i * radius) ** 2 +\n (DECs - centre[1]) ** 2 <= radius ** 2])\n big_cone['kappa'] = np.append(big_cone['kappa'], kappas[(RAs - centre[0] - 2 * i * radius) ** 2 +\n (DECs - centre[1]) ** 2 <= radius ** 2])\n\n pickle_out = open(f\"big_cone.pickle\", \"wb\")\n pickle.dump(big_cone, pickle_out)\n pickle_out.close()\n else:\n pickle_in = open(f\"big_cone.pickle\", \"rb\")\n big_cone = pickle.load(pickle_in)\n pass\n\n return big_cone\n\n\ndef find_expected(big_cone, r_big, bins, redo=False, plot=False):\n max_z = 1.41\n chi_bin_widths, chi_bins, z_bins, z_bin_widths = Convergence.create_z_bins(0.01, max_z, bins, OM=0.25, OL=0.75,\n h=0.7)\n limits = np.cumsum(z_bin_widths) + z_bins[0]\n limits = np.insert(limits, 0, 0)\n expected = {}\n if redo:\n expected_big = []\n for num1 in range(len(limits) - 1):\n expected_big.append(np.count_nonzero(np.logical_and(big_cone['Zs'] > limits[num1], big_cone['Zs'] <\n limits[num1 + 1])) / 5.0)\n # Made 5 cones, so take average\n # plt.plot(limits[1:], [np.cumsum(expected_big)[i] * (12.0 / r_big / 60.0) ** 2 for i in range(len(expected_big))],\n # marker='o', markersize=2.5, color=colours[0])\n # plt.xlabel('$z$')\n # plt.ylabel('Cumulative Count')\n # plt.show()\n\n for cone_radius in RADII[10:]:\n expected[f\"Radius{str(cone_radius)}\"] = [expected_big[i] * (cone_radius / r_big / 60.0) ** 2\n for i in range(len(expected_big))]\n\n pickle_out = open(\"sparseMICEexpected.pickle\", \"wb\")\n pickle.dump(expected, pickle_out)\n pickle_out.close()\n else:\n pickle_in = open(\"MICEexpected.pickle\", \"rb\")\n expected = pickle.load(pickle_in)\n\n if plot:\n for cone_radius in RADII[10::20]:\n plt.plot([0, 5], [0, 0], color=grey, linestyle='--')\n plt.plot((limits[1:]+limits[:-1])/2.0, expected[f\"Radius{str(cone_radius)}\"], marker='o', markersize=2.5,\n color=colours[0])\n plt.xlabel('$z$')\n plt.ylabel('Expected Count')\n plt.xlim([0, 1.5])\n plt.show()\n\n return [limits, expected, chi_bin_widths, chi_bins, z_bins]\n\n\ndef get_random(data, redo=False, seed=1337):\n RAs = np.array(data['RA'])\n DECs = np.array(data['DEC'])\n # d_cs = np.array(data['d_c'])\n zs = np.array(data['z'])\n kappas = np.array(data['kappa'])\n\n # Don't want to deal with up to 30' (0.5 degree) cones that have any portion outside left and right bounds.\n SN_DECs = DECs[np.logical_and(RAs < max(RAs) - 0.5, RAs > min(RAs) + 0.5)]\n SN_zs = zs[np.logical_and(RAs < max(RAs) - 0.5, RAs > min(RAs) + 0.5)]\n SN_kappas = kappas[np.logical_and(RAs < max(RAs) - 0.5, RAs > min(RAs) + 0.5)]\n # SN_d_cs = d_cs[np.logical_and(RAs < max(RAs) - 0.5, RAs > min(RAs) + 0.5)]\n SN_RAs = RAs[np.logical_and(RAs < max(RAs) - 0.5, RAs > min(RAs) + 0.5)]\n\n # SN_RAs = SN_RAs[np.logical_and(SN_DECs < max(DECs) - 0.5, SN_DECs > min(DECs) + 0.5)]\n # SN_kappas = SN_kappas[np.logical_and(SN_DECs < max(DECs) - 0.5, SN_DECs > min(DECs) + 0.5)]\n # SN_zs = SN_zs[np.logical_and(SN_DECs < max(DECs) - 0.5, SN_DECs > min(DECs) + 0.5)]\n # SN_DECs = SN_DECs[np.logical_and(SN_DECs < max(DECs) - 0.5, SN_DECs > min(DECs) + 0.5)]\n\n if redo:\n # rhos = np.zeros(100)\n # Pick random sample\n # for j in range(100):\n random.seed(1337)\n rand_samp_size = 1500\n indices = random.sample(range(len(SN_zs)), rand_samp_size)\n rand_zs = SN_zs[indices]\n rand_RAs = SN_RAs[indices]\n rand_DECs = SN_DECs[indices]\n rand_kappas = SN_kappas[indices]\n # dists = SN_d_cs[indices] * (1 + rand_zs)\n # print(rand_RAs, rand_DECs)\n\n # Add scatter to distance moduli\n dists = []\n rand_chis = []\n for z in rand_zs:\n chi_to_z = Convergence.comoving(np.linspace(0, z, 1001), OM=0.25, OL=0.75, h=0.7)\n dists.append(chi_to_z[-1] * (1 + z))\n rand_chis.append(chi_to_z[-1])\n mus = 5 * np.log10(np.array(dists) / 10 * 1E9)\n mu_diff = np.zeros(len(mus))\n mu_diff += - (5.0 / np.log(10) * rand_kappas)\n # print(mu_diff)\n # plt.plot(rand_zs, mus, ls='', marker='.')\n # plt.show()\n # exit()\n\n # conv_rank = rankdata(mu_diff)\n random.seed(seed)\n for i in range(len(mu_diff)):\n mu_diff[i] += random.gauss(0.0, rand_zs[i] * 0.1 / 1.5 + 0.15) # Width is 0.15 at z=0, 0.25 at z=1.5\n #\n # mu_rank = rankdata(mu_diff)\n # diff = np.abs(conv_rank - mu_rank)\n # rhos[j] = 1 - 6 / (len(rand_kappas) * (len(rand_kappas) ** 2 - 1)) * np.sum(diff ** 2)\n #\n # print(np.mean(rhos), \" +/- \", np.std(rhos))\n # exit()\n rand_mus = mus + mu_diff\n rand_errs = np.array([abs(random.uniform(0.05+0.1*rand_zs[i], 0.1+0.45*rand_zs[i]))\n for i in range(rand_samp_size)])\n SN_data = {'mu_diff': mu_diff, 'SNZ': rand_zs, 'SNkappa': rand_kappas,\n 'SNRA': rand_RAs, 'SNDEC': rand_DECs, 'SNMU': rand_mus, 'SNMU_ERR': rand_errs}\n pickle_out = open(\"sparse2MICE_SN_data.pickle\", \"wb\")\n pickle.dump(SN_data, pickle_out)\n pickle_out.close()\n print(\"Finished SN_data\")\n # exit()\n # pickle_in = open(\"sparse_lenses.pickle\", \"rb\")\n # lenses = pickle.load(pickle_in)\n lenses = {}\n for cone_radius in RADII[29::2]:\n lenses[f\"Radius{str(cone_radius)}\"] = {}\n for num, (SRA, SDE, SZ, Sk, SM, SE) in enumerate(zip(rand_RAs, rand_DECs, rand_zs, rand_kappas, rand_mus,\n rand_errs)):\n lenses[f\"Radius{str(cone_radius)}\"][f'SN{int(num)+1}'] = {'RAs': [], 'DECs': [], 'Zs': [], 'SNZ': SZ,\n 'SNMU': SM, 'SNMU_ERR': SE, 'SNRA': SRA,\n 'SNkappa': Sk, 'SNDEC': SDE, 'WEIGHT': 1.0}\n if SDE > 3.6 - cone_radius/60.0:\n h = SDE - (3.6 - cone_radius/60.0)\n elif SDE < -(0.0 - cone_radius/60.0):\n h = -(0.0 - cone_radius/60.0) - SDE\n else:\n h = 0.0\n theta = 2 * np.arccos(1 - h / (cone_radius/60.0))\n fraction_outside = 1 / (2 * np.pi) * (theta - np.sin(theta))\n lenses[f\"Radius{str(cone_radius)}\"][f'SN{int(num)+1}']['WEIGHT'] = 1.0 - fraction_outside\n\n cone_indices = [np.logical_and((RAs - SRA) ** 2 + (DECs - SDE) ** 2 <= (cone_radius / 60.0) ** 2, zs < SZ)]\n lenses[f\"Radius{str(cone_radius)}\"][f'SN{int(num)+1}']['RAs'].append(RAs[cone_indices])\n lenses[f\"Radius{str(cone_radius)}\"][f'SN{int(num)+1}']['DECs'].append(DECs[cone_indices])\n lenses[f\"Radius{str(cone_radius)}\"][f'SN{int(num)+1}']['Zs'].append(zs[cone_indices])\n print(f\"Finished radius {str(cone_radius)}'\")\n pickle_out = open(\"sparse_lenses.pickle\", \"wb\")\n pickle.dump(lenses, pickle_out)\n\n # prev_rad = 0.0\n # for cone_radius in RADII[10:]:\n # lenses[f\"Radius{str(cone_radius)}\"] = {}\n # for num, (RA, DEC) in enumerate(zip(rand_RAs, rand_DECs)):\n # cone_indices = [np.logical_and((RAs - RA) ** 2 + (DECs - DEC) ** 2 >= (prev_rad / 60.0) ** 2,\n # (RAs - RA) ** 2 + (DECs - DEC) ** 2 <= (cone_radius / 60.0) ** 2)]\n # lenses[f\"Radius{str(cone_radius)}\"][f\"Shell{str(num+1)}\"] = np.where(cone_indices[0] == 1)\n # print(f\"Sorted {num+1}/{rand_samp_size} for radius {cone_radius}'\")\n # heights = np.zeros(rand_samp_size)\n # outsides_u = [rand_DECs > 3.6 - cone_radius / 60.0]\n # heights[outsides_u] = rand_DECs[outsides_u] - (3.6 - cone_radius / 60.0)\n # outsides_d = [rand_DECs < cone_radius / 60.0]\n # heights[outsides_d] = cone_radius / 60.0 - rand_DECs[outsides_d]\n # thetas = 2 * np.arccos(1 - heights / (cone_radius / 60.0))\n # fraction_outside = 1 / (2 * np.pi) * (thetas - np.sin(thetas))\n # weights = 1.0 - fraction_outside\n # lenses[f\"Radius{str(cone_radius)}\"]['WEIGHT'] = weights\n # print(f\"Sorted radius {cone_radius}'\")\n # prev_rad = cone_radius\n # pickle_out = open(f\"sparse2_random_cones.pickle\", \"wb\")\n # pickle.dump(lenses, pickle_out)\n # pickle_out.close()\n exit()\n else:\n pickle_in = open(\"sparse2_lenses.pickle\", \"rb\")\n lenses = pickle.load(pickle_in)\n\n return lenses\n\n\ndef plot_cones(data, plot_hist=False, cone_radius=12.0):\n \"\"\"Plots all galaxies and SNe along with visualisation of cones and galaxies contributing to lensing.\n\n Input:\n cut_data -- dictionary that contains all data (RA, DEC, z, etc.) of galaxies.\n sorted_data -- dictionary that contains all information for every SN sorted into cones.\n plot_hist -- boolean that determines if a histogram of the galaxy and SNe distribution is plotted. Defaults to\n False.\n cone_radius -- the radius of the cones. Defaults to 12'.\n \"\"\"\n pickle_in = open(\"sparseMICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n # lenses = sorted_data[f\"Radius{str(cone_radius)}\"]\n # # Go through all SNe\n # for SN_num, key in enumerate(lenses.keys()):\n # if key != 'WEIGHT':\n # cone_indices = np.array([], dtype=np.int16)\n # # Get shells from all previous RADII\n # for r in RADII[0:np.argmin(np.abs(RADII - np.array(cone_radius))) + 1]:\n # cone_indices = np.append(cone_indices, sorted_data[f\"Radius{r}\"][key])\n # # Get redshifts of all galaxies in each SN cone\n # cone_zs[key] = all_zs[cone_indices]\n # print(lenses.keys())\n patches = []\n SNRA = SN_data['SNRA']\n SNDEC = SN_data['SNDEC']\n SNz = SN_data['SNZ']\n for x, y in zip(SNRA, SNDEC):\n circle = Circle((x, y), cone_radius/60.0)\n patches.append(circle)\n\n RA_gal = data['RA']\n DEC_gal = data['DEC']\n z_gal = data['z']\n fig, ax = plt.subplots()\n ax.plot(RA_gal, DEC_gal, marker='o', linestyle='', markersize=1, color=[0.5, 0.5, 0.5])\n contRAs = []\n contDECs = []\n for ra, dec, z in zip(SNRA, SNDEC, SNz):\n indices1 = np.logical_and(z_gal <= z, (RA_gal - ra) ** 2 + (DEC_gal - dec) ** 2 <=\n (cone_radius / 60.0) ** 2)\n contRAs = np.append(contRAs, RA_gal[indices1])\n contDECs = np.append(contDECs, DEC_gal[indices1])\n ax.plot(contRAs, contDECs, marker='o', linestyle='', markersize=3, color=colours[1])\n p = PatchCollection(patches, alpha=0.4, color=colours[1])\n ax.add_collection(p)\n\n ax.plot(SNRA, SNDEC, marker='o', linestyle='', markersize=3, label='Supernova', color=colours[3])\n plt.xlabel('$\\\\alpha$')\n plt.ylabel('$\\delta$')\n plt.text(27, -0.8, f\"{cone_radius}' radius\")\n # plt.legend(loc='lower right')\n plt.axis('equal')\n plt.xlim([10.0, 11.5])\n plt.ylim([1.5, 2.5])\n plt.show()\n\n if plot_hist:\n labels = ['Galaxies', 'Supernovae']\n cols = [green, yellow]\n for num, z in enumerate([np.array(z_gal), np.array(SNz)]):\n counts, bin_edges = np.histogram(z, bins=np.arange(0, 1.5 + 0.05, 0.05))\n plt.bar(0.5 * (bin_edges[1:] + bin_edges[:-1]), counts / max(counts), 0.05, linewidth=1, fc=cols[num],\n label=f'{labels[num]}', edgecolor=colours[num])\n plt.xlabel('$z$')\n plt.ylabel('Normalised Count')\n plt.xlim([0, 1.45])\n plt.tight_layout()\n plt.legend(frameon=0)\n\n plt.show()\n\n\ndef find_convergence(gal_data, exp_data, redo=False, plot_scatter=False, plot_total=False, weighted=False, fis=False,\n impact=False):\n \"\"\"Finds the convergence along each line of sight to a SN for a variety of cone_widths.\n\n Inputs:\n exp_data -- dictionary containing all expected counts per bin per cone width.\n SNz -- redshifts of each SN.\n redo -- boolean that determines whether convergence is calculated or loaded. Dafault false.\n plot_scatter -- boolean that determined whether scatter plot of convergence per SN redshift is plotted.\n Default false.\n plot_total -- boolean that determines whether total convergence per cone radius is plotted. Default false.\n \"\"\"\n all_zs = gal_data['z']\n all_RAs = gal_data['RA']\n all_DECs = gal_data['DEC']\n limits = exp_data[0]\n chi_widths = exp_data[2]\n chi_bis = exp_data[3]\n z_bins = exp_data[4]\n fine_z = np.linspace(0, 1.5, 1001)\n Dpara_fine = Convergence.comoving(fine_z)\n if redo:\n kappa = {}\n if fis or impact:\n pickle_in = open(\"sparseMICE_SN_data_fis.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n pickle_in = open(\"sparse_random_cones_fis.pickle\", \"rb\")\n else:\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n pickle_in = open(\"random_cones_new.pickle\", \"rb\")\n lens_data = pickle.load(pickle_in)\n\n for cone_radius in RADII[29:]:\n if fis or impact:\n SN_zs = SN_data[f\"Radius{cone_radius}\"][\"SNZ\"]\n else:\n SN_zs = SN_data[\"SNZ\"]\n cone_zs = {}\n cone_RAs = {}\n cone_DECs = {}\n if weighted:\n SN_weights = lens_data[f\"Radius{cone_radius}\"][\"WEIGHT\"]\n # Go through all SNe\n for SN_num, key in enumerate(lens_data[f\"Radius{cone_radius}\"].keys()):\n if key != 'WEIGHT':\n cone_indices = np.array([], dtype=np.int16)\n # Get shells from all previous RADII\n for r in RADII[10:np.argmin(np.abs(RADII - np.array(cone_radius))) + 1]:\n cone_indices = np.append(cone_indices, lens_data[f\"Radius{r}\"][key])\n # Get redshifts of all galaxies in each SN cone\n cone_zs[key] = all_zs[cone_indices]\n cone_RAs[key] = all_RAs[cone_indices]\n cone_DECs[key] = all_DECs[cone_indices]\n # print(cone_DECs)\n if impact:\n pickle_in = open(\"MICEexpected_IPs.pickle\", \"rb\")\n expected_data = pickle.load(pickle_in)\n expected_counts = expected_data[f\"Radius{cone_radius}\"]\n else:\n expected_counts = exp_data[1][f\"Radius{str(cone_radius)}\"]\n kappa[f\"Radius{str(cone_radius)}\"] = {\"SNkappa\": [], \"Total\": 0, \"SNallkappas\": {}}\n d_arr = {}\n counts = {}\n for num, (key, zs) in enumerate(cone_zs.items()):\n bin_c = range(int(np.argmin(np.abs(limits - SN_zs[num]))))\n counts[key] = np.zeros(len(bin_c))\n for num2 in bin_c:\n tmp = [np.logical_and(limits[num2] < zs, zs <= limits[num2 + 1])]\n if weighted:\n counts[key][num2] = np.count_nonzero(tmp) / SN_weights[num]\n elif impact:\n thetas = (((cone_RAs[key] - SN_data[f\"Radius{cone_radius}\"][\"SNRA\"][num]) ** 2 +\n (cone_DECs[key] - SN_data[f\"Radius{cone_radius}\"][\"SNDEC\"][num]) ** 2) ** 0.5 *\n np.pi / 180)\n if len(thetas >= 0):\n Dperps = thetas[np.array(thetas) != 0] * np.interp(cone_zs[key][np.array(thetas) != 0],\n fine_z, Dpara_fine) * 1000.0 / (\n 1 + np.array(cone_zs[key][np.array(thetas) != 0]))\n IPs = 1 / Dperps\n if len(IPs) == 0:\n counts[key][num2] = 0.0\n else:\n # print(cone_radius, key, num2, sum(IPs))\n counts[key][num2] = sum(IPs)\n else:\n counts[key][num2] = np.count_nonzero(tmp)\n\n chiSNs = []\n for z in SN_zs:\n chi = Convergence.comoving(np.linspace(0, z, 1001), OM=0.25, OL=0.75, h=0.7)\n chiSNs.append(chi[-1])\n # c_arr = []\n\n for num, (key, cs) in enumerate(counts.items()):\n print(cs)\n d_arr[key] = (cs - expected_counts[:len(cs)]) / expected_counts[:(len(cs))]\n SNkappa, allkappas = Convergence.general_convergence(chi_widths[:len(cs)], chi_bis[:len(cs)],\n z_bins[:len(cs)], d_arr[key], chiSNs[num], OM=0.25, h=0.7)\n kappa[f\"Radius{str(cone_radius)}\"][\"SNkappa\"].append(SNkappa)\n kappa[f\"Radius{str(cone_radius)}\"][\"SNallkappas\"][key] = allkappas\n # c_arr.append(cs)\n # s = plt.scatter([sum(c_arr[i]) for i in range(1500)], kappa[f\"Radius{str(cone_radius)}\"][\"SNkappa\"],\n # c=SN_zs, cmap='coolwarm')\n # cbar = plt.colorbar(s)\n # cbar.set_label('$z$')\n # plt.xlabel('Total Count')\n # plt.ylabel('$\\kappa$')\n # plt.show()\n\n kappa[f\"Radius{str(cone_radius)}\"][\"Total\"] = np.sum(kappa[f\"Radius{str(cone_radius)}\"][\"SNkappa\"])\n print(f\"Finished radius {str(cone_radius)}'\")\n if not fis:\n if weighted:\n pickle_out = open(\"MICEkappa_weighted.pickle\", \"wb\")\n elif impact:\n pickle_out = open(\"sparseMICEkappa_impact.pickle\", \"wb\")\n else:\n pickle_out = open(\"MICEkappa.pickle\", \"wb\")\n else:\n pickle_out = open(\"sparseMICEkappa_fis.pickle\", \"wb\")\n pickle.dump(kappa, pickle_out)\n pickle_out.close()\n else:\n if not fis:\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n if weighted:\n pickle_in = open(\"MICEkappa_weighted.pickle\", \"rb\")\n elif impact:\n pickle_in = open(\"sparseMICE_SN_data_fis.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n pickle_in = open(\"sparseMICEkappa_impact.pickle\", \"rb\")\n else:\n pickle_in = open(\"MICEkappa.pickle\", \"rb\")\n else:\n pickle_in = open(\"sparseMICE_SN_data_fis.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n pickle_in = open(\"sparseMICEkappa_fis.pickle\", \"rb\")\n kappa = pickle.load(pickle_in)\n\n for cone_radius in RADII[29:]:\n if fis or impact:\n SN_zs = SN_data[f\"Radius{cone_radius}\"][\"SNZ\"]\n SN_kappas = SN_data[f\"Radius{cone_radius}\"][\"SNkappa\"]\n else:\n SN_zs = SN_data[\"SNZ\"]\n SN_kappas = SN_data[\"SNkappa\"]\n bins = np.linspace(0.05, 1.4 - 0.05, 14)\n # print(bins)\n mean_kappa = []\n standard_error = []\n mean_MICEkappa = []\n standard_MICEerror = []\n conv = kappa[f\"Radius{str(cone_radius)}\"][\"SNkappa\"]\n\n for b in bins:\n ks = []\n MICEks = []\n for z, k, Mk in zip(SN_zs, conv, SN_kappas):\n if b - 0.05 < z <= b + 0.05:\n ks.append(k)\n MICEks.append(Mk)\n\n mean_kappa.append(np.mean(ks))\n mean_MICEkappa.append(np.mean(MICEks))\n standard_error.append(np.std(ks) / np.sqrt(len(ks)))\n standard_MICEerror.append(np.std(MICEks) / np.sqrt(len(MICEks)))\n\n if plot_scatter:\n conv = kappa[f\"Radius{str(cone_radius)}\"][\"SNkappa\"]\n ax = plt.subplot2grid((1, 4), (0, 0), colspan=3)\n # ax = plt.subplot2grid((1, 1), (0, 0))\n ax2 = plt.subplot2grid((1, 4), (0, 3))\n ax.set_ylabel(\"$\\kappa$\")\n ax.set_xlabel(\"$z$\")\n ax2.set_xlabel(\"Count\")\n ax.tick_params(labelsize=12)\n ax2.tick_params(labelsize=12)\n ax2.set_yticklabels([])\n plt.subplots_adjust(wspace=0, hspace=0)\n ax.plot([0, 1.42], [0, 0], color=[0.25, 0.25, 0.25], linestyle='--', zorder=10)\n ax.axis([0, 1.42, -0.05, 0.06])\n ax2.axis([0, 500, -0.05, 0.06])\n ax.set_xticklabels([0, 0.25, 0.50, 0.75, 1.00, 1.25])\n # ax.set_xticklabels([0, 0.2, 0.4, 0])\n ax.plot(SN_zs, conv, linestyle='', marker='o', markersize=2, color=colours[0], label='Cone Method')\n ax.plot(SN_zs, SN_kappas, linestyle='', marker='o', markersize=2, color=colours[1], label='MICE value')\n ax2.hist(conv, bins=np.arange(-0.05, 0.08 + 0.005, 0.005), orientation='horizontal',\n fc=green, edgecolor=colours[0])\n ax2.hist(SN_kappas, bins=np.arange(-0.05, 0.08 + 0.005, 0.005), orientation='horizontal',\n fc=yellow, edgecolor=colours[1])\n ax.errorbar(bins, mean_MICEkappa, standard_MICEerror, marker='d', color='b', markersize=3, capsize=3,\n zorder=20)\n ax.errorbar(bins, mean_kappa, standard_error, marker='s', color='r', markersize=3, capsize=3, zorder=20)\n plt.show()\n\n if plot_total:\n conv_total = []\n for cone_radius in RADII[10:]:\n conv_total.append(kappa[f\"Radius{str(cone_radius)}\"][\"Total\"])\n plt.ylabel(\"$\\kappa$\")\n plt.xlabel(\"Cone Radius (arcmin)\")\n plt.tick_params(labelsize=12)\n plt.plot([0, 30], [0, 0], color=grey, linestyle='--')\n plt.xlim([0, 30])\n plt.plot(RADII[10:], conv_total, marker='o', markersize=2, color=colours[0])\n plt.show()\n\n return kappa\n\n\ndef find_correlation(convergence_data, radii, plot_correlation=False, plot_radii=False, fis=False, mu_diff=None,\n impact=False):\n \"\"\"Finds the value of the slope for plotting residuals against convergence. Magnitude of slope and error\n quantify correlation between the two.\n\n Inputs:\n conv -- convergence.\n mu_diff -- residuals.\n \"\"\"\n correlations = []\n correlation_errs = []\n for cone_radius in radii:\n if fis or impact:\n pickle_in = open(\"MICE_SN_data_fis.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n if mu_diff is None:\n mu_diff = SN_data[f\"Radius{str(cone_radius)}\"][\"mu_diff\"]\n conv = np.array(convergence_data[f\"Radius{str(cone_radius)}\"][\"SNkappa\"])\n else:\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n # redshift_cut = [SN_data['SNZ'] > 0.2]\n if mu_diff is None:\n mu_diff = SN_data[\"mu_diff\"]\n conv = np.array(convergence_data[f\"Radius{str(cone_radius)}\"][\"SNkappa\"])\n\n conv_rank = rankdata(conv)\n mu_rank = rankdata(mu_diff)\n # print(mu_diff)\n diff = np.abs(conv_rank - mu_rank)\n rho = 1 - 6 / (len(conv) * (len(conv) ** 2 - 1)) * np.sum(diff ** 2)\n rho_err = np.sqrt((1 - rho ** 2) / (len(conv) - 1))\n correlations.append(rho)\n correlation_errs.append(rho_err)\n\n if plot_correlation:\n edges = np.linspace(-0.0065, 0.011, 6)\n bins = (edges[1:] + edges[:-1]) / 2\n mean_dmu = []\n standard_error = []\n for bin in bins:\n dmus = []\n for kappa, dmu in zip(conv, mu_diff):\n if bin - 0.007 / 4 < kappa <= bin + 0.0007 / 4:\n dmus.append(dmu)\n mean_dmu.append(np.mean(dmus))\n standard_error.append(np.std(dmus) / np.sqrt(len(dmus)))\n\n plt.plot([min(conv), max(conv)], [0, 0], color=grey, linestyle='--')\n plt.plot(conv, mu_diff, linestyle='', marker='o', markersize=2, color=colours[0])\n # plt.plot(conv, fit, color=colours[1], label=f'$\\Delta\\mu = {round(float(grad),3)}\\kappa$')\n plt.errorbar(bins, mean_dmu, standard_error, marker='s', color='r', markersize=3, capsize=3, linestyle='')\n plt.xlabel('$\\kappa$')\n plt.ylabel('$\\Delta\\mu$')\n plt.xlim([-0.008, 0.011])\n plt.legend(frameon=0, loc='lower right')\n plt.ylim([-0.3, 0.3])\n plt.text(0.0038, -0.19, f'$\\\\rho$ = {round(rho, 3)} $\\pm$ {round(rho_err, 3)}', fontsize=16)\n plt.show()\n\n if plot_radii:\n u_err = [correlations[i] + correlation_errs[i] for i in range(len(correlations))]\n d_err = [correlations[i] - correlation_errs[i] for i in range(len(correlations))]\n smooth_corr = savgol_filter([correlations[i] for i in range(len(correlations))], 11, 4)\n smooth_u_err = savgol_filter(u_err, 11, 4)\n smooth_d_err = savgol_filter(d_err, 11, 4)\n plt.plot([0, 30], [0, 0], color=grey, linestyle='--')\n plt.plot(radii, smooth_corr, color=colours[0])\n plt.plot(radii, [correlations[i] for i in range(len(correlations))], marker='x', color=colours[1],\n linestyle='')\n plt.fill_between(radii, smooth_u_err, smooth_d_err, color=colours[0], alpha=0.4)\n\n plt.xlabel('Cone Radius (arcmin)')\n plt.ylabel(\"Spearman's Rank Coefficient\")\n plt.gca().invert_yaxis()\n plt.show()\n return [correlations, smooth_corr, smooth_u_err, smooth_d_err, np.array(u_err) - np.array(correlations)]\n\n return correlations, correlation_errs\n\n\ndef plot_Hubble():\n \"\"\"Plots the Hubble diagram (distance modulus against redshift), including the best fitting cosmology, and\n residuals from best cosmology.\n\n Inputs:\n z -- redshift of SNe.\n mu -- distance modulus of SNe.\n mu_err -- error in distance modulus of SNe.\n mu_cosm -- distance modulus of best fitting cosmology.\n mu_diff -- residuals from best fitting cosmology.\n z_arr -- array of redshifts used for best fitting cosmology.\n \"\"\"\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n z = SN_data[\"SNZ\"]\n mu = SN_data['SNMU']\n mu_err = SN_data['SNMU_ERR']\n z_array = np.linspace(0.0, 1.5 + 0.01, 1001)\n mu_cosm = 5 * np.log10((1 + z_array) * Convergence.comoving(z_array, OM=0.25, OL=0.75, h=0.7) * 1000) + 25\n mu_diff = SN_data['mu_diff']\n ax = plt.subplot2grid((2, 1), (0, 0))\n ax2 = plt.subplot2grid((2, 1), (1, 0))\n ax.set_ylabel(\"$\\mu$\")\n ax2.set_xlabel(\"$z$\")\n ax2.set_ylabel(\"$\\Delta\\mu$\")\n plt.subplots_adjust(wspace=0, hspace=0)\n ax.set_xticklabels([])\n ax.tick_params(labelsize=12)\n ax.errorbar(z[::2], mu[::2], mu_err[::2], linestyle='', linewidth=0.8, marker='o',\n markersize=2, capsize=2, color='C3', zorder=0, alpha=0.6, elinewidth=0.7)\n ax.plot(z[::2], mu[::2], linestyle='', marker='o', markersize=2, color='C3', alpha=0.4, markerfacecolor='C3')\n\n ax.set_ylim([38.5, 46])\n ax.set_xlim([0, 1.5])\n ax.plot(z_array, mu_cosm, linestyle='--', linewidth=0.8, color='C0', zorder=10)\n ax2.errorbar(z[::2], mu_diff[::2], mu_err[::2], linestyle='', linewidth=1, marker='o',\n markersize=2, capsize=2, color='C3', zorder=0, alpha=0.6, elinewidth=0.7)\n ax2.plot(z[::2], mu_diff[::2], linestyle='', marker='o', markersize=2, color='C3', alpha=0.4, markerfacecolor='C3')\n ax2.plot(z_array, np.zeros(len(z_array)), zorder=10, color='C0', linewidth=0.8, linestyle='--')\n ax2.set_ylim(-1.0, 1.0)\n ax2.set_xlim([0, 1.5])\n ax2.tick_params(labelsize=12)\n\n plt.show()\n\n\ndef degradation(radii):\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n pickle_in = open(\"MICEkappa_weighted.pickle\", \"rb\")\n kappa = pickle.load(pickle_in)\n corrs = []\n errs = []\n degrads = [0.03, 0.05, 0.1, 0.12, 0.2, 0.35]\n for gauss_width in degrads:\n corr = []\n err = []\n for j in range(100):\n mu_diff = - (5.0 / np.log(10) * SN_data[\"SNkappa\"])\n random.seed(1337+j)\n for i in range(1500):\n # mu_diff[i] += random.gauss(0.0, gauss_width)\n mu_diff[i] += random.gauss(0.0, SN_data[\"SNZ\"][i] * gauss_width / 1.5)\n # print(mu_diff)\n c, e = find_correlation(kappa, radii, mu_diff=mu_diff)\n corr.append(c)\n err.append(e)\n print(gauss_width)\n corrs.append(np.mean(corr, 0))\n errs.append(np.mean(err, 0))\n for num, d in enumerate(degrads):\n plt.plot(radii, corrs[num], label=f\"$\\sigma$ = {round(d, 2)}'\")\n plt.legend(frameon=0)\n plt.xlabel('Cone Radius (arcmin)')\n plt.ylabel(\"Spearman's Rank Coefficient\")\n plt.axis([0, 30, -0.45, 0.0])\n plt.gca().invert_yaxis()\n plt.tight_layout()\n plt.show()\n exit()\n\n\ndef bin_test(alldata, big_cone, big_cone_radius):\n corrs = []\n kappas = []\n bins = [11, 21, 31, 51, 101, 151]\n for num_bins in bins:\n exp_data = find_expected(big_cone, big_cone_radius, num_bins, redo=True)\n kappa = find_convergence(alldata, exp_data, redo=True, fis=True)\n counts, bin_edges = np.histogram(kappa['Radius3.5']['SNkappa'], bins=np.arange(-0.04, 0.07 + 0.002, 0.002))\n bin_centres = 0.5 * (bin_edges[1:] + bin_edges[:-1])\n counts2, bin_edges2 = np.histogram(kappa['Radius9.0']['SNkappa'], bins=np.arange(-0.04, 0.07 + 0.002, 0.002))\n bin_centres2 = 0.5 * (bin_edges2[1:] + bin_edges2[:-1])\n kappas.append([[bin_centres, counts], [bin_centres2, counts2]])\n # correlation = find_correlation(kappa, RADII, fis=True)\n # corrs.append(correlation[0])\n\n for i, num_bins in enumerate(bins):\n # plt.plot(RADII, corrs[i], label=f\"{num_bins-1}\")\n plt.plot(kappas[i][0][0], kappas[i][0][1], label=f\"{num_bins-1}\")\n plt.legend(frameon=0)\n plt.xlabel('$\\kappa$')\n # plt.xlim([1, 20])\n plt.ylabel('Count')\n # plt.gca().invert_yaxis()\n plt.show()\n\n for i, num_bins in enumerate(bins):\n # plt.plot(RADII, corrs[i], label=f\"{num_bins-1}\")\n plt.plot(kappas[i][1][0], kappas[i][1][1], label=f\"{num_bins-1}\")\n plt.legend(frameon=0)\n plt.xlabel('$\\kappa$')\n # plt.xlim([1, 20])\n plt.ylabel('Count')\n # plt.gca().invert_yaxis()\n plt.show()\n\n exit()\n\n\ndef redo_SN_data(data, seed=1337):\n RAs = np.array(data['RA'])\n DECs = np.array(data['DEC'])\n zs = np.array(data['z'])\n kappas = np.array(data['kappa'])\n\n SN_DECs = DECs[np.logical_and(RAs < max(RAs) - 0.5, RAs > min(RAs) + 0.5)]\n SN_zs = zs[np.logical_and(RAs < max(RAs) - 0.5, RAs > min(RAs) + 0.5)]\n SN_kappas = kappas[np.logical_and(RAs < max(RAs) - 0.5, RAs > min(RAs) + 0.5)]\n SN_RAs = RAs[np.logical_and(RAs < max(RAs) - 0.5, RAs > min(RAs) + 0.5)]\n\n random.seed(1337)\n rand_samp_size = 1500\n indices = random.sample(range(len(SN_zs)), rand_samp_size)\n rand_zs = SN_zs[indices]\n rand_RAs = SN_RAs[indices]\n rand_DECs = SN_DECs[indices]\n rand_kappas = SN_kappas[indices]\n\n dists = []\n rand_chis = []\n for z in rand_zs:\n chi_to_z = Convergence.comoving(np.linspace(0, z, 1001), OM=0.25, OL=0.75, h=0.7)\n dists.append(chi_to_z[-1] * (1 + z))\n rand_chis.append(chi_to_z[-1])\n mus = 5 * np.log10(np.array(dists) / 10 * 1E9)\n mu_diff = np.zeros(len(mus))\n mu_diff += - (5.0 / np.log(10) * rand_kappas)\n\n random.seed(seed)\n for i in range(len(mu_diff)):\n mu_diff[i] += random.gauss(0.0, rand_zs[i] * 0.1 / 1.5 + 0.15) # Width is 0.15 at z=0, 0.25 at z=1.5\n rand_mus = mus + mu_diff\n rand_errs = np.array([abs(random.uniform(0.05 + 0.1 * rand_zs[i], 0.1 + 0.45 * rand_zs[i]))\n for i in range(rand_samp_size)])\n SN_data = {'mu_diff': mu_diff, 'SNZ': rand_zs, 'SNkappa': rand_kappas,\n 'SNRA': rand_RAs, 'SNDEC': rand_DECs, 'SNMU': rand_mus, 'SNMU_ERR': rand_errs}\n pickle_out = open(\"sparseMICE_SN_data.pickle\", \"wb\")\n pickle.dump(SN_data, pickle_out)\n pickle_out.close()\n print(\"Finished SN_data\")\n\n pickle_in = open(\"sparse_lenses.pickle\", \"rb\")\n lenses = pickle.load(pickle_in)\n\n for R in lenses.keys():\n for num, SN in enumerate(lenses[R].keys()):\n lenses[R][SN][\"SNMU\"] = SN_data[\"SNMU\"][num]\n\n pickle_out = open(\"sparse_lenses.pickle\", \"wb\")\n pickle.dump(lenses, pickle_out)\n\n\n\nif __name__ == \"__main__\":\n #### test sparse data ####\n use_weighted = True\n alldata = get_data()\n # test_cones = cones.make_test_cones(alldata, redo=False, plot=False)\n # print(len(alldata[\"RA\"]))\n # lens_data = get_random(alldata, redo=False)\n # # plot_cones(alldata)\n # exp_data = cones.find_expected_counts(test_cones, 111, redo=False, plot=False)\n #\n # lensing_gals_fully_in_sample = {}\n # number_fis = np.zeros(len(RADII[29:]))\n # num = 0\n # not_fis_indices = np.zeros(1500)\n # for rad in RADII[29:]:\n # lensing_gals_fully_in_sample[f\"Radius{rad}\"] = {}\n # for num2, (key2, SN) in enumerate(lens_data[f\"Radius{rad}\"].items()):\n # if SN[\"WEIGHT\"] == 1:\n # not_fis_indices[num2] = 1\n # lensing_gals_fully_in_sample[f\"Radius{rad}\"][key2] = SN\n # number_fis[num] += 1\n # num += 1\n # # plt.plot(RADII[29:], number_fis, '+')\n # # plt.show()\n # kappa_fis = cones.find_convergence(lensing_gals_fully_in_sample, exp_data, redo=False, plot_total=True,\n # plot_scatter=False, fis=True, max_z=1.42)\n # sparse_FIS = cones.find_correlation(kappa_fis, lensing_gals_fully_in_sample, plot_radii=True)\n # exit()\n\n #### test sparse data ####\n # use_weighted = True\n # alldata = get_data()\n # big_cone_centre = [(min(alldata['RA']) + max(alldata['RA'])) / 2, (min(alldata['DEC']) + max(alldata['DEC'])) / 2]\n # big_cone_radius = round(min(max(alldata['RA']) - big_cone_centre[0], big_cone_centre[0] - min(alldata['RA']),\n # max(alldata['DEC']) - big_cone_centre[1], big_cone_centre[1] - min(alldata['DEC'])), 2)\n # big_cone = make_big_cone(alldata, redo=False)\n # print(len(alldata[\"RA\"]))\n # get_random(alldata, redo=True)\n # # plot_cones(alldata)\n # exp_data = find_expected(big_cone, big_cone_radius, 101, redo=False, plot=False)\n #\n # sparse_kappa = find_convergence(alldata, exp_data, redo=False, plot_total=True, plot_scatter=False, fis=True)\n # sparse_FIS = find_correlation(sparse_kappa, RADII[10:], plot_radii=True, fis=True)\n # exit()\n\n big_cone_centre = [(min(alldata['RA']) + max(alldata['RA'])) / 2, (min(alldata['DEC']) + max(alldata['DEC'])) / 2]\n big_cone_radius = round(min(max(alldata['RA']) - big_cone_centre[0], big_cone_centre[0] - min(alldata['RA']),\n max(alldata['DEC']) - big_cone_centre[1], big_cone_centre[1] - min(alldata['DEC'])), 2)\n big_cone = make_big_cone(alldata, redo=False)\n # bin_test(alldata, big_cone, big_cone_radius)\n # exit()\n exp_data = find_expected(big_cone, big_cone_radius, 111, redo=False, plot=False)\n get_random(alldata, redo=True)\n # plot_cones(alldata, plot_hist=True, cone_radius=6.0)\n # plot_Hubble()\n\n kappa = find_convergence(alldata, exp_data, redo=False, plot_total=False, plot_scatter=False, weighted=use_weighted)\n use_weighted = not use_weighted\n kappa_weighted = find_convergence(alldata, exp_data, redo=False, plot_total=False, plot_scatter=False,\n weighted=use_weighted)\n degradation(RADII)\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n # pickle_in = open(\"random_cones_new.pickle\", \"rb\")\n # lens_data = pickle.load(pickle_in)\n # SN_z = SN_data[\"SNZ\"]\n SN_kappa = SN_data[\"SNkappa\"]\n # SN_mu = SN_data['SNMU']\n # SN_mu_err = SN_data['SNMU_ERR']\n # SN_chi = []\n # gal_zs = {}\n # for z in SN_z:\n # chi = Convergence.comoving(np.linspace(0, z, 1001), OM=0.25, OL=0.75, h=0.7)\n # SN_chi.append(chi[-1])\n # data = {}\n # for rad in RADII:\n # data[f'Radius{rad}'] = {}\n # for j, (key, SN) in enumerate(lens_data[f\"Radius{rad}\"].items()):\n # if key != \"WEIGHT\":\n # cone_IDs = np.array([], dtype=np.int16)\n # for r in RADII[0:np.argmin(np.abs(RADII - np.array(rad)))]:\n # cone_IDs = np.append(cone_IDs, lens_data[f\"Radius{r}\"][f\"Shell{j+1}\"])\n # gal_zs[key] = alldata['z'][cone_IDs]\n # data[f'Radius{rad}'][key] = {\"Zs\": gal_zs[key], \"SNZ\": SN_z[j], \"SNMU\": SN_mu[j],\n # \"SNMU_ERR\": SN_mu_err[j], \"WEIGHT\": lens_data[f\"Radius{rad}\"]['WEIGHT'][j]}\n # print(data[\"Radius30.0\"].keys())\n # cones_MICE_conv = cones.find_convergence(data, exp_data, redo=False, plot_scatter=False, plot_total=True, MICE=True,\n # weighted=True, max_z=1.5)\n pickle_in = open(\"MICEkappa.pickle\", \"rb\")\n cones_MICE_conv = pickle.load(pickle_in)\n pickle_in = open(\"MICEkappa_weighted.pickle\", \"rb\")\n cones_MICE_conv_weighted = pickle.load(pickle_in)\n unweighted = find_correlation(cones_MICE_conv, RADII, plot_radii=True)\n weighted = find_correlation(cones_MICE_conv_weighted, RADII, plot_radii=True)\n\n kappa_fis = find_convergence(alldata, exp_data, redo=False, plot_total=False, plot_scatter=False, weighted=False,\n fis=True)\n fully_in_sample = find_correlation(kappa_fis, RADII, plot_correlation=False, plot_radii=True, fis=True)\n # print(fully_in_sample[0][12], fully_in_sample[4][12])\n fig, ax = plt.subplots()\n # ax2 = fig.add_axes([0.55, 0.5, 0.35, 0.35])\n ax.plot([0, 30], [0, 0], color=grey, linestyle='--')\n ax.plot(RADII, unweighted[1], color=colours[0])\n ax.plot(RADII, unweighted[0], marker='x', linestyle='', color=[0, 0.5, 0.9])\n ax.fill_between(RADII, unweighted[2], unweighted[3], color=colours[0], alpha=0.3)\n ax.plot(RADII, weighted[1], color=colours[1])\n ax.plot(RADII, weighted[0], marker='x', linestyle='', color=[0.7, 0.3, 0])\n ax.fill_between(RADII, weighted[2], weighted[3], color=colours[1], alpha=0.3)\n ax.plot(RADII, fully_in_sample[1], color=colours[2])\n ax.plot(RADII, fully_in_sample[0], marker='x', linestyle='', color=[0.7, 0.1, 0.6])\n ax.fill_between(RADII, fully_in_sample[2], fully_in_sample[3], color=colours[2], alpha=0.3)\n kwargs1 = {'marker': 'x', 'markeredgecolor': [0, 0.5, 0.9], 'color': colours[0]}\n kwargs2 = {'marker': 'x', 'markeredgecolor': [0.7, 0.3, 0], 'color': colours[1]}\n kwargs3 = {'marker': 'x', 'markeredgecolor': [0.7, 0.1, 0.6], 'color': colours[2]}\n ax.plot([], [], label='Unweighted', **kwargs1)\n ax.plot([], [], label='Weighted', **kwargs2)\n ax.plot([], [], label='Fully In Sample', **kwargs3)\n ax.invert_yaxis()\n ax.set_xlim([0, 30])\n # ax.set_ylim([0.2, 0.65])\n ax.legend(frameon=0, loc='lower left')\n ax.set_xlabel('Cone Radius (arcmin)')\n ax.set_ylabel(\"Spearman's Rank Coefficient\")\n # ax2.plot([0, 30], [0, 0], color=grey, linestyle='--')\n # ax2.plot(RADII, unweighted[1], color=colours[0])\n # ax2.plot(RADII, unweighted[0], marker='x', linestyle='', color=[0, 0.5, 0.9])\n # ax2.fill_between(RADII, unweighted[2], unweighted[3], color=colours[0], alpha=0.3)\n # ax2.plot(RADII, weighted[1], color=colours[1])\n # ax2.plot(RADII, weighted[0], marker='x', linestyle='', color=[0.7, 0.3, 0])\n # ax2.fill_between(RADII, weighted[2], weighted[3], color=colours[1], alpha=0.3)\n # ax2.plot(RADII, fully_in_sample[1], color=colours[2])\n # ax2.plot(RADII, fully_in_sample[0], marker='x', linestyle='', color=[0.7, 0.1, 0.6])\n # ax2.fill_between(RADII, fully_in_sample[2], fully_in_sample[3], color=colours[2], alpha=0.3)\n # ax2.set_xlim([0.6, 6.6])\n # ax2.set_ylim([0.535, 0.62])\n plt.show()\n\n # conv_total = []\n # conv_total_weighted = []\n # conv_total_fis = []\n # conv_total_MICE = sum(SN_kappa)\n # for cone_radius in RADII:\n # conv_total.append(kappa[f\"Radius{str(cone_radius)}\"][\"Total\"])\n # conv_total_weighted.append(kappa_weighted[f\"Radius{str(cone_radius)}\"][\"Total\"])\n # conv_total_fis.append(kappa_fis[f\"Radius{str(cone_radius)}\"][\"Total\"])\n # plt.ylabel(\"Total Convergence\")\n # plt.xlabel(\"Cone Radius (arcmin)\")\n # plt.tick_params(labelsize=12)\n # plt.plot([0, 30], [0, 0], color=grey, linestyle='--')\n # plt.xlim([0, 30])\n # plt.plot(RADII, conv_total, marker='o', markersize=2, color=colours[0], label='Unweighted')\n # plt.plot(RADII, conv_total_weighted, marker='o', markersize=2, color=colours[1], label='Weighted')\n # plt.plot(RADII, conv_total_fis, marker='o', markersize=2, color=colours[2], label='Fully in sample')\n # plt.plot(RADII, [conv_total_MICE for i in range(84)], marker='o', markersize=2, color=colours[3], label='MICECAT')\n # plt.legend(frameon=0)\n # plt.show()\n\n kappa_impact = find_convergence(alldata, exp_data, redo=True, plot_total=False, plot_scatter=False, impact=True)\n fully_in_sample = find_correlation(kappa_impact, RADII, plot_correlation=False, plot_radii=True, impact=True)\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "numpy.cumsum", "matplotlib.pyplot.plot", "numpy.mean", "matplotlib.pyplot.subplot2grid", "scipy.signal.savgol_filter", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.arange", "numpy.sin", "numpy.std", "numpy.insert", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.axis", "numpy.count_nonzero", "matplotlib.pyplot.text", "numpy.log", "matplotlib.pyplot.ylim", "matplotlib.patches.Circle", "numpy.arccos", "numpy.append", "matplotlib.pyplot.fill_between", "numpy.logical_and", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.ylabel", "matplotlib.collections.PatchCollection", "numpy.abs", "scipy.stats.rankdata", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.tick_params" ] ]
rccannizzaro/QC-StrategyBacktest
[ "847dbd61680466bc60ce7893eced8a8f70d16b2e" ]
[ "QuantConnect - StrategyBacktest.py" ]
[ "########################################################################################\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# #\n########################################################################################\n\nimport numpy as np\nimport pandas as pd\nimport time as timer\nfrom System.Drawing import Color\nfrom Strategies import *\nfrom Logger import *\n\nclass StrategyBacktest(QCAlgorithm):\n\n # #####################################\n # Backtesting parameters\n # #####################################\n def Initialize(self):\n # Backtesting period\n self.SetStartDate(2021, 1, 1)\n self.SetEndDate(2021, 11, 30)\n # Store the initial account value\n self.initialAccountValue = 1000000\n self.SetCash(self.initialAccountValue)\n \n # Logging level: \n # -> 0 = ERROR\n # -> 1 = WARNING\n # -> 2 = INFO\n # -> 3 = DEBUG\n # -> 4 = TRACE (Attention!! This can consume your entire daily log limit)\n self.logLevel = 2\n \n # Ticker Symbol\n self.ticker = \"SPX\"\n \n # Days to Expiration\n self.dte = 45\n # The size of the window used to filter the option chain: options expiring in the range [dte-dteWindow, dte] will be selected\n self.dteWindow = 7\n \n # Risk Free Rate for the Black-Scholes-Merton model\n self.riskFreeRate = 0.001\n\n # Use Limit Orders to open/close a position?\n self.useLimitOrders = True\n \n # Slippage used to set Limit orders\n self.slippage = 0.05\n \n # Adjustment factor applied to the Mid-Price to set the Limit Order:\n # - Credit Strategy:\n # Adj = 0.3 --> sets the Limit Order price 30% higher than the current Mid-Price\n # - Debit Strategy:\n # Adj = -0.2 --> sets the Limit Order price 20% lower than the current Mid-Price\n self.limitOrderRelativePriceAdjustment = 0.2\n \n # Alternative method to set the absolute price (per contract) of the Limit Order. This method is used if a number is specified\n # Unless you know that your price target can get a fill, it is advisable to use a relative adjustment or you may never get your order filled \n # - Credit Strategy:\n # AbsolutePrice = 1.5 --> sets the Limit Order price at exactly 1.5$\n # - Debit Strategy:\n # AbsolutePrice = -2.3 --> sets the Limit Order price at exactly -2.3$\n # self.limitOrderAbsolutePrice = 2.1\n \n # Set expiration for Limit orders\n self.limitOrderExpiration = timedelta(hours = 4)\n \n # Target <credit|debit> premium amount: used to determine the number of contracts needed to reach the desired target amount\n # - targetPremiumPct --> target premium is expressed as a percentage of the total Portfolio Net Liq (0 < targetPremiumPct < 1)\n # - targetPremium --> target premium is a fixed dollar amount\n # If both are specified, targetPremiumPct takes precedence. If none of them are specified, the number of contracts specified by the maxOrderQuantity parameter is used.\n self.targetPremiumPct = None\n self.targetPremium = 1000\n\n # Maximum quantity used to scale each position. If the target premium cannot be reached within this quantity (i.e. premium received is too low), the position is not going to be opened\n self.maxOrderQuantity = 20\n # If True, the order is submitted as long as it does not exceed the maxOrderQuantity.\n self.validateQuantity = True\n \n # Profit Target Factor (Multiplier of the premium received/paid when the position was opened)\n self.profitTarget = 0.6\n \n # Stop Loss Multiplier, expressed as a function of the profit target (rather than the credit received)\n # The position is closed (Market Order) if:\n # Position P&L < -abs(openPremium) * stopLossMultiplier\n # where:\n # - openPremium is the premium received (positive) in case of credit strategies\n # - openPremium is the premium paid (negative) in case of debit strategies\n #\n # Credit Strategies (i.e. $2 credit):\n # - profitTarget < 1 (i.e. 0.5 -> 50% profit target -> $1 profit)\n # - stopLossMultiplier = 2 * profitTarget (i.e. -abs(openPremium) * stopLossMultiplier = -abs(2) * 2 * 0.5 = -2 --> stop if P&L < -2$)\n # Debit Strategies (i.e. $4 debit):\n # - profitTarget < 1 (i.e. 0.5 -> 50% profit target -> $2 profit)\n # - stopLossMultiplier < 1 (You can't lose more than the debit paid. i.e. stopLossMultiplier = 0.6 --> stop if P&L < -2.4$)\n self.stopLossMultiplier = 2 * self.profitTarget\n #self.stopLossMultiplier = 0.6\n \n # DTE Threshold. This is ignored if self.dte < self.dteThreshold\n self.dteThreshold = None\n # DIT Threshold. This is ignored if self.dte < self.ditThreshold\n self.ditThreshold = None\n \n # Controls what happens when an open position reaches/crosses the dteThreshold ( -> DTE(openPosition) <= dteThreshold)\n # - If True, the position is closed as soon as the dteThreshold is reached, regardless of whether the position is profitable or not\n # - If False, once the dteThreshold is reached, the position is closed as soon as it is profitable\n self.forceDteThreshold = False\n # Controls what happens when an open position reaches/crosses the ditThreshold ( -> DIT(openPosition) >= ditThreshold)\n # - If True, the position is closed as soon as the ditThreshold is reached, regardless of whether the position is profitable or not\n # - If False, once the ditThreshold is reached, the position is closed as soon as it is profitable\n self.forceDitThreshold = False\n \n # Maximum number of open positions at any given time\n self.maxActivePositions = 20\n\n # If True, the order mid-price is validated to make sure the Bid-Ask spread is not too wide.\n # - The order is not submitted if the ratio between Bid-Ask spread of the entire order and its mid-price is more than self.bidAskSpreadRatio\n self.validateBidAskSpread = False\n self.bidAskSpreadRatio = 0.8\n\n #Controls whether to include Cancelled orders (Limit orders that didn't fill) in the final output\n self.includeCancelledOrders = True\n\n # Controls whether to allow multiple positions to be opened for the same Expiration date\n self.allowMultipleEntriesPerExpiry = False\n \n # Controls whether to include details on each leg (open/close fill price and descriptive statistics about mid-price, Greeks, and IV)\n self.includeLegDetails = False\n # The frequency (in minutes) with which the leg details are updated (used only if includeLegDetails = True). \n # Updating with high frequency (i.e. every 5 minutes) will slow down the execution\n self.legDatailsUpdateFrequency = 30\n \n \n # Controls whether to use the furthest (True) or the earliest (False) expiration date when multiple expirations are available in the chain\n self.useFurthestExpiry = True\n # Controls whether to consider the DTE of the last closed position when opening a new one:\n # If True, the Expiry date of the new position is selected such that the open DTE is the nearest to the DTE of the closed position\n self.dynamicDTESelection = True\n \n # ########################################################################\n # Trading Strategies. \n # - Multiple strategies can be executed at the same time\n # - Each strategy is processed indipendently of the others\n # - New strategies can be created by extending the OptionStrategy class and implementing the getOrder method\n # Parameters details:\n # - Net Delta: Used for Straddle, IronFly and Butterfly strategy. \n # - If netDelta = None --> the Strategy will be centered around the ATM strike\n # - If netDelta = n (-50, 50) --> the strike selection will be centered in a way to achieve the requested net delta exposure\n\n # ########################################################################\n \n # Holds all the strategies to be executed\n self.strategies = []\n \n # self.strategies.append(PutStrategy(self, delta = 10, creditStrategy = True))\n # self.strategies.append(CallStrategy(self, delta = 10, creditStrategy = True))\n # self.strategies.append(StraddleStrategy(self, netDelta = None, creditStrategy = True))\n # self.strategies.append(StrangleStrategy(self, putDelta = 10, callDelta = 10, creditStrategy = True))\n self.strategies.append(PutSpreadStrategy(self, delta = 10, wingSize = 25, creditStrategy = True))\n self.strategies.append(CallSpreadStrategy(self, delta = 10, wingSize = 25, creditStrategy = True))\n # self.strategies.append(IronCondorStrategy(self, putDelta = 10, callDelta = 10, putWingSize = 10, callWingSize = 10, creditStrategy = True))\n # self.strategies.append(IronFlyStrategy(self, netDelta = None, putWingSize = 10, callWingSize = 10, creditStrategy = True))\n # self.strategies.append(ButterflyStrategy(self, butteflyType = \"Put\", netDelta = None, butterflyLeftWingSize = 10, butterflyRightWingSize = 10, creditStrategy = True))\n # self.strategies.append(TEBombShelterStrategy(self, delta = 15, frontDte = self.dte - 30, hedgeAllocation = 0.1, chartUpdateFrequency = 5))\n\n # Coarse filter for the Universe selection. It selects nStrikes on both sides of the ATM strike for each available expiration\n self.nStrikesLeft = 200\n self.nStrikesRight = 200\n\n # Time Resolution\n self.timeResolution = Resolution.Minute # Resolution.Minute .Hour .Daily\n \n # Set brokerage model and margin account\n self.SetBrokerageModel(BrokerageName.InteractiveBrokersBrokerage, AccountType.Margin)\n\n # The start time at which the algorithm will start scheduling the strategy execution (to open new positions). No positions will be opened before this time\n self.scheduleStartTime = time(9, 45, 0)\n # Periodic interval with which the algorithm will check to open new positions\n self.scheduleFrequency = timedelta(hours = 1)\n \n # Setup the backtesting algorithm\n self.setupBacktest()\n \n # Setup the charts\n self.setupCharts()\n \n \n \n def setupCharts(self, openPositions = True, Stats = True, PnL = True, WinLossStats = True, Performance = True, LossDetails = True):\n \n # Initialize flag (used to trigger a chart update)\n self.statsUpdated = False\n \n # Create an object to store all the stats\n self.stats = CustomObject()\n \n # Store the details about which charts will be plotted (there is a maximum of 10 series per backtest)\n self.stats.plot = CustomObject()\n self.stats.plot.openPositions = openPositions\n self.stats.plot.Stats = Stats\n self.stats.plot.PnL = PnL\n self.stats.plot.WinLossStats = WinLossStats\n self.stats.plot.Performance = Performance\n self.stats.plot.LossDetails = LossDetails\n \n # Initialize performance metrics\n self.stats.won = 0\n self.stats.lost = 0\n self.stats.winRate = 0.0\n self.stats.premiumCaptureRate = 0.0\n self.stats.totalCredit = 0.0\n self.stats.totalDebit = 0.0\n self.stats.PnL = 0.0\n self.stats.totalWinAmt = 0.0\n self.stats.totalLossAmt = 0.0\n self.stats.averageWinAmt = 0.0\n self.stats.averageLossAmt = 0.0\n self.stats.maxWin = 0.0\n self.stats.maxLoss = 0.0\n self.stats.testedCall = 0\n self.stats.testedPut = 0\n \n # Setup Charts\n if openPositions:\n activePositionsPlot = Chart('Open Positions')\n activePositionsPlot.AddSeries(Series('Open Positions', SeriesType.Line, ''))\n \n if Stats:\n statsPlot = Chart('Stats')\n statsPlot.AddSeries(Series('Won', SeriesType.Line, '', Color.Green))\n statsPlot.AddSeries(Series('Lost', SeriesType.Line, '', Color.Red))\n\n if PnL:\n pnlPlot = Chart('Profit and Loss')\n pnlPlot.AddSeries(Series('PnL', SeriesType.Line, ''))\n\n if WinLossStats:\n winLossStatsPlot = Chart('Win and Loss Stats')\n winLossStatsPlot.AddSeries(Series('Average Win', SeriesType.Line, '$', Color.Green))\n winLossStatsPlot.AddSeries(Series('Average Loss', SeriesType.Line, '$', Color.Red))\n\n if Performance:\n performancePlot = Chart('Performance')\n performancePlot.AddSeries(Series('Win Rate', SeriesType.Line, '%'))\n performancePlot.AddSeries(Series('Premium Capture', SeriesType.Line, '%'))\n\n # Loss Details chart. Only relevant in case of credit strategies\n if LossDetails:\n lossPlot = Chart('Loss Details')\n lossPlot.AddSeries(Series('Short Put Tested', SeriesType.Line, ''))\n lossPlot.AddSeries(Series('Short Call Tested', SeriesType.Line, ''))\n\n # Call the chart initialization method of each strategy (give a chance to setup custom charts)\n for strategy in self.strategies:\n strategy.setupCharts()\n\n # Add the first data point to the charts\n self.statsUpdated = True\n self.updateCharts()\n\n def updateCharts(self):\n\n # Start the timer\n self.executionTimer.start()\n\n # Call the updateCharts method of each strategy (give a chance to update any custom charts)\n for strategy in self.strategies:\n strategy.updateCharts()\n\n # Exit if there is nothing to update\n if not (self.statsUpdated or self.Time.time() == time(15, 59, 0)):\n return\n \n # Reset the flag\n self.statsUpdated = False\n \n plotInfo = self.stats.plot\n \n # Add the latest stats to the plots\n if plotInfo.openPositions:\n self.Plot(\"Open Positions\", \"Open Positions\", self.currentActivePositions)\n if plotInfo.Stats:\n self.Plot(\"Stats\", \"Won\", self.stats.won)\n self.Plot(\"Stats\", \"Lost\", self.stats.lost)\n if plotInfo.PnL:\n self.Plot(\"Profit and Loss\", \"PnL\", self.stats.PnL)\n if plotInfo.WinLossStats:\n self.Plot(\"Win and Loss Stats\", \"Average Win\", self.stats.averageWinAmt)\n self.Plot(\"Win and Loss Stats\", \"Average Loss\", self.stats.averageLossAmt)\n if plotInfo.Performance:\n self.Plot(\"Performance\", \"Win Rate\", self.stats.winRate)\n self.Plot(\"Performance\", \"Premium Capture\", self.stats.premiumCaptureRate)\n if plotInfo.LossDetails:\n self.Plot(\"Loss Details\", \"Short Put Tested\", self.stats.testedPut)\n self.Plot(\"Loss Details\", \"Short Call Tested\", self.stats.testedCall)\n \n # Stop the timer\n self.executionTimer.stop()\n\n\n \n\n def setupBacktest(self): \n \n # Set the logger\n self.logger = Logger(self, className = type(self).__name__, logLevel = self.logLevel)\n \n # Set the timer to monitor the execution performance\n self.executionTimer = Timer(self)\n \n # Number of currently active positions\n self.currentActivePositions = 0\n \n # Number of current working orders to open\n self.currentWorkingOrdersToOpen = 0\n \n # Initialize the dictionary to keep track of all positions\n self.allPositions = {}\n \n # Dictionary to keep track of all the available expiration dates at any given date\n self.expiryList = {}\n \n # Add the underlying\n if self.ticker in [\"SPX\", \"VIX\"]:\n # Underlying is an index\n underlying = self.AddIndex(self.ticker, self.timeResolution)\n option = self.AddIndexOption(underlying.Symbol, self.timeResolution)\n else:\n # Underlying is an equity\n underlying = self.AddEquity(self.ticker, self.timeResolution)\n option = self.AddOption(underlying.Symbol, self.timeResolution)\n \n # Set the benchmark.\n self.SetBenchmark(underlying.Symbol)\n\n\n # Store the symbol for the option and the underlying\n self.underlyingSymbol = underlying.Symbol\n self.optionSymbol = option.Symbol\n\n # Set data normalization mode to Raw\n underlying.SetDataNormalizationMode(DataNormalizationMode.Raw)\n\n # Keep track of the option contract subscriptions\n self.optionContractsSubscriptions = []\n\n # Set Security Initializer\n self.SetSecurityInitializer(self.securityInitializer)\n \n # Set the option chain filter function\n option.SetFilter(self.optionChainFilter)\n \n # -----------------------------------------------------------------------------\n # Scheduled functions (every xx minutes)\n # -----------------------------------------------------------------------------\n #self.Schedule.On(self.DateRules.EveryDay(self.underlyingSymbol)\n # , self.TimeRules.Every(TimeSpan.FromMinutes(self.scheduleFrequency))\n # , Action(self.openPosition)\n # )\n\n\n\n # Initialize the security every time that a new one is added\n def OnSecuritiesChanged(self, changes):\n for security in changes.AddedSecurities:\n self.securityInitializer(security)\n \n\n # Called every time a security (Option or Equity/Index) is initialized\n def securityInitializer(self, security):\n security.SetDataNormalizationMode(DataNormalizationMode.Raw)\n security.SetMarketPrice(self.GetLastKnownPrice(security))\n if security.Type in [SecurityType.Option, SecurityType.IndexOption]:\n security.SetFillModel(BetaFillModel(self))\n security.SetFeeModel(TastyWorksFeeModel())\n\n\n # Coarse filter for the option chain\n def optionChainFilter(self, universe):\n # Start the timer\n self.executionTimer.start()\n\n # Include Weekly contracts\n # nStrikes contracts to each side of the ATM\n # Contracts expiring in the range (DTE-5, DTE)\n filteredUniverse = universe.IncludeWeeklys()\\\n .Strikes(-self.nStrikesLeft, self.nStrikesRight)\\\n .Expiration(max(0, self.dte - self.dteWindow), max(0, self.dte))\n\n # Stop the timer\n self.executionTimer.stop()\n \n return filteredUniverse\n\n \n def optionChainProviderFilter(self, symbols, min_strike_rank, max_strike_rank, minDte, maxDte):\n # Check if we got any symbols to process\n if len(symbols) == 0: \n return None\n \n # Filter the symbols based on the expiry range\n filteredSymbols = [symbol for symbol in symbols \n if minDte <= (symbol.ID.Date.date() - self.Time.date()).days <= maxDte\n ]\n\n # Exit if there are no symbols for the selected expiry range\n if not filteredSymbols: \n return None\n\n # Get the latest price of the underlying\n underlyingLastPrice = self.Securities[self.underlyingSymbol].Price\n\n # Find the ATM strike\n atm_strike = sorted(filteredSymbols\n ,key = lambda x: abs(x.ID.StrikePrice - self.Securities[self.underlyingSymbol].Price)\n )[0].ID.StrikePrice\n \n # Get the list of available strikes\n strike_list = sorted(set([i.ID.StrikePrice for i in filteredSymbols]))\n \n # Find the index of ATM strike in the sorted strike list\n atm_strike_rank = strike_list.index(atm_strike)\n # Get the Min and Max strike price based on the specified number of strikes\n min_strike = strike_list[max(0, atm_strike_rank + min_strike_rank + 1)]\n max_strike = strike_list[min(atm_strike_rank + max_strike_rank - 1, len(strike_list)-1)]\n \n # Get the list of symbols within the selected strike range\n selectedSymbols = [symbol for symbol in filteredSymbols \n if min_strike <= symbol.ID.StrikePrice <= max_strike\n ]\n\n # Loop through all Symbols and create a list of OptionContract objects\n contracts = []\n for symbol in selectedSymbols:\n # Create the OptionContract\n contract = OptionContract(symbol, symbol.Underlying)\n # Add this contract to the data subscription so we can retrieve the Bid/Ask price\n if not contract.Symbol in self.optionContractsSubscriptions:\n self.AddOptionContract(contract.Symbol, self.timeResolution)\n \n # Set the BidPrice\n contract.BidPrice = self.Securities[contract.Symbol].BidPrice\n # Set the AskPrice\n contract.AskPrice = self.Securities[contract.Symbol].AskPrice\n # Set the UnderlyingLastPrice\n contract.UnderlyingLastPrice = underlyingLastPrice\n # Add this contract to the output list\n contracts.append(contract)\n\n # Return the list of contracts\n return contracts \n \n def getOptionContracts(self, slice):\n # Start the timer\n self.executionTimer.start()\n \n contracts = None\n # Set the DTE range (make sure values are not negative)\n minDte = max(0, self.dte - self.dteWindow)\n maxDte = max(0, self.dte)\n \n # Loop through all chains\n for chain in slice.OptionChains:\n # Look for the specified optionSymbol \n if chain.Key != self.optionSymbol:\n continue \n # Make sure there are any contracts in this chain \n if chain.Value.Contracts.Count != 0:\n # Put the contracts into a list so we can cache the Greeks across multiple strategies\n contracts = [contract for contract in chain.Value\n if minDte <= (contract.Expiry.date() - self.Time.date()).days <= maxDte\n ]\n\n # If no chains were found, use OptionChainProvider to see if we can find any contracts\n # Only do this for short term expiration contracts (DTE < 3) where slice.OptionChains usually fails to retrieve any chains\n # We don't want to do this all the times for performance reasons\n if contracts == None and self.dte < 3:\n # Get the list of available option Symbols\n symbols = self.OptionChainProvider.GetOptionContractList(self.underlyingSymbol, self.Time)\n # Get the contracts\n contracts = self.optionChainProviderFilter(symbols, -self.nStrikesLeft, self.nStrikesRight, minDte, maxDte)\n\n # Stop the timer\n self.executionTimer.stop()\n \n return contracts\n\n def runStrategies(self):\n # Start the timer\n self.executionTimer.start()\n \n # Exit if the algorithm is warming up or the market is closed\n if self.IsWarmingUp or not self.IsMarketOpen(self.underlyingSymbol):\n return\n \n # Compute the schedule start datetime\n scheduleStartDttm = datetime.combine(self.Time.date(), self.scheduleStartTime)\n \n # Exit if we have not reached the the schedule start datetime\n if self.Time < scheduleStartDttm:\n return\n \n # Get the number of minutes since the schedule start time\n minutesSincescheduleStart = round((self.Time - scheduleStartDttm).seconds/60)\n \n # Convert the schedule frequency (timedelta) into a number of minutes\n scheduleFrequencyMinutes = round(self.scheduleFrequency.seconds/60)\n \n # Exit if we are not at the right scheduled interval\n if minutesSincescheduleStart % scheduleFrequencyMinutes != 0:\n return\n\n # Do not open any new positions if we have reached the maximum\n if (self.currentActivePositions + self.currentWorkingOrdersToOpen) >= self.maxActivePositions:\n return\n \n # Get the option chain\n chain = self.getOptionContracts(self.CurrentSlice)\n\n\n # Exit if we got no chains\n if chain == None:\n self.logger.debug(\" -> No chains inside currentSlice!\")\n return\n\n # The list of expiry dates will change once a day (at most). See if we have already processed this list for the current date\n if self.Time.date() in self.expiryList:\n # Get the expiryList from the dictionary\n expiryList = self.expiryList.get(self.Time.date())\n else:\n # Start the timer\n self.executionTimer.start(methodName = \"runStrategies -> getExpiryList\")\n \n # Set the DTE range (make sure values are not negative)\n minDte = max(0, self.dte - self.dteWindow)\n maxDte = max(0, self.dte)\n # Get the list of expiry dates, sorted in reverse order\n expiryList = sorted(set([contract.Expiry for contract in chain \n if minDte <= (contract.Expiry.date() - self.Time.date()).days <= maxDte\n ]\n )\n , reverse = True\n )\n # Add the list to the dictionary\n self.expiryList[self.Time.date()] = expiryList\n # Log the list of expiration dates found in the chain\n self.logger.debug(f\"Expiration dates in the chain: {len(expiryList)}\")\n for expiry in expiryList:\n self.logger.debug(f\" -> {expiry}\")\n \n # Start the timer\n self.executionTimer.stop(methodName = \"runStrategies -> getExpiryList\")\n\n # Exit if we haven't found any Expiration cycles to process\n if not expiryList:\n return\n \n # Loop through all strategies\n for strategy in self.strategies:\n # Run the strategy\n strategy.run(chain, expiryList = expiryList)\n \n # Stop the timer\n self.executionTimer.stop()\n \n def OnOrderEvent(self, orderEvent):\n # Start the timer\n self.executionTimer.start()\n\n # Log the order event\n self.logger.debug(orderEvent)\n \n # Loop through all strategies\n for strategy in self.strategies:\n # Call the Strategy orderEvent handler\n strategy.handleOrderEvent(orderEvent)\n \n # Stop the timer\n self.executionTimer.stop()\n\n \n def OnData(self, slice):\n # Start the timer\n self.executionTimer.start()\n \n # Update the charts\n self.updateCharts()\n\n # Exit if the algorithm is warming up\n if self.IsWarmingUp:\n return\n\n # Run the strategies to open new positions\n self.runStrategies()\n\n # Loop through all strategies\n for strategy in self.strategies:\n # Manage all the open positions for the current strategy\n strategy.managePositions()\n \n # Update the charts (in case any position was closed)\n self.updateCharts()\n\n # Stop the timer\n self.executionTimer.stop()\n\n\n def OnEndOfAlgorithm(self):\n \n # Convert the dictionary into a Pandas Data Frame\n dfAllPositions = pd.DataFrame.from_dict(self.allPositions, orient = \"index\")\n \n self.Log(\"\")\n self.Log(\"---------------------------------\")\n self.Log(\" Execution Statistics \")\n self.Log(\"---------------------------------\")\n self.executionTimer.showStats()\n self.Log(\"\")\n self.Log(\"\")\n \n self.Log(\"\")\n self.Log(\"---------------------------------\")\n self.Log(\" Performance Statistics \")\n self.Log(\"---------------------------------\")\n self.Log(\"\")\n self.Log(f\"Total Contracts: {self.stats.won + self.stats.lost}\")\n self.Log(f\" -> Won: {self.stats.won}\")\n self.Log(f\" -> Lost: {self.stats.lost}\")\n self.Log(f\" -> Win Rate: {self.stats.winRate}\")\n self.Log(f\"Total Credit: {self.stats.totalCredit}\")\n self.Log(f\"Total Debit: {self.stats.totalDebit}\")\n self.Log(f\"Total P&L: {self.stats.PnL}\")\n self.Log(f\"Average profit: {self.stats.averageWinAmt}\")\n self.Log(f\"Average Loss: {self.stats.averageLossAmt}\")\n self.Log(f\"Max Win: {self.stats.maxWin}\")\n self.Log(f\"Max Loss: {self.stats.maxLoss}\")\n self.Log(f\"Tested Calls: {self.stats.testedCall}\")\n self.Log(f\"Tested Puts: {self.stats.testedPut}\")\n self.Log(\"\")\n self.Log(\"\")\n \n self.Log(\"---------------------------------\")\n self.Log(\" Trade Log \")\n self.Log(\"---------------------------------\")\n self.Log(\"\")\n # Print the data frame to the log in csv format\n self.Log(dfAllPositions.to_csv(index = False))\n #self.Log(self.allPositions)\n self.Log(\"\")\n \nclass TastyWorksFeeModel:\n def GetOrderFee(self, parameters):\n optionFee = min(10, parameters.Order.AbsoluteQuantity * 0.5)\n transactionFee = parameters.Order.AbsoluteQuantity * 0.14\n return OrderFee(CashAmount(optionFee + transactionFee, 'USD'))\n\n\n# Dummy class useful to create empty objects\nclass CustomObject:\n pass\n\n# Custom Fill model based on Beta distribution:\n# - Orders are filled based on a Beta distribution skewed towards the mid-price with Sigma = bidAskSpread/6 (-> 99% fills within the bid-ask spread)\nclass BetaFillModel(ImmediateFillModel):\n\n # Initialize Random Number generator with a fixed seed (for replicability)\n random = np.random.RandomState(1234)\n \n def __init__(self, context):\n self.context = context\n \n def MarketFill(self, asset, order):\n # Start the timer\n self.context.executionTimer.start()\n \n # Get the random number generator\n random = BetaFillModel.random\n # Compute the Bid-Ask spread\n bidAskSpread = abs(asset.AskPrice - asset.BidPrice)\n # Compute the Mid-Price\n midPrice = 0.5*(asset.AskPrice + asset.BidPrice)\n # Call the parent method\n fill = super().MarketFill(asset, order)\n # Setting the parameters of the Beta distribution:\n # - The shape parameters (alpha and beta) are chosen such that the fill is \"reasonably close\" to the mid-price about 96% of the times\n # - How close -> The fill price is within 15% of half the bid-Ask spread\n if order.Direction == OrderDirection.Sell:\n # Beta distribution in the range [Bid-Price, Mid-Price], skewed towards the Mid-Price\n # - Fill price is within the range [Mid-Price - 0.15*bidAskSpread/2, Mid-Price] with about 96% probability\n offset = asset.BidPrice\n alpha = 20\n beta = 1\n else:\n # Beta distribution in the range [Mid-Price, Ask-Price], skewed towards the Mid-Price\n # - Fill price is within the range [Mid-Price, Mid-Price + 0.15*bidAskSpread/2] with about 96% probability\n offset = midPrice\n alpha = 1\n beta = 20\n # Range (width) of the Beta distribution\n range = bidAskSpread/2.0\n # Compute the new fillPrice (centered around the midPrice)\n fillPrice = round(offset + range * random.beta(alpha, beta), 2)\n # Update the FillPrice attribute\n fill.FillPrice = fillPrice\n # Stop the timer\n self.context.executionTimer.stop()\n # Return the fill\n return fill\n\n \nclass Timer:\n\n performanceTemplate = {\"calls\": 0.0\n , \"elapsedMin\": float('Inf')\n , \"elapsedMean\": None\n , \"elapsedMax\": float('-Inf')\n , \"elapsedTotal\": 0.0\n , \"elapsedLast\": None\n , \"startTime\": None\n }\n \n def __init__(self, context):\n self.context = context\n self.performance = {}\n \n def start(self, methodName = None):\n # Get the name of the calling method\n methodName = methodName or sys._getframe(1).f_code.co_name\n # Get current performance stats\n performance = self.performance.get(methodName, Timer.performanceTemplate.copy())\n # Get the startTime\n performance[\"startTime\"] = timer.perf_counter()\n # Save it back in the dictionary\n self.performance[methodName] = performance\n \n \n def stop(self, methodName = None):\n # Get the name of the calling method\n methodName = methodName or sys._getframe(1).f_code.co_name\n # Get current performance stats\n performance = self.performance.get(methodName)\n # Compute the elapsed\n elapsed = timer.perf_counter() - performance[\"startTime\"]\n # Update the stats\n performance[\"calls\"] += 1\n performance[\"elapsedLast\"] = elapsed\n performance[\"elapsedMin\"] = min(performance[\"elapsedMin\"], elapsed)\n performance[\"elapsedMax\"] = max(performance[\"elapsedMax\"], elapsed)\n performance[\"elapsedTotal\"] += elapsed\n performance[\"elapsedMean\"] = performance[\"elapsedTotal\"]/performance[\"calls\"]\n \n def showStats(self, methodName = None):\n methods = methodName or self.performance.keys()\n for method in methods:\n performance = self.performance.get(method)\n if performance:\n self.context.logger.info(f\"Execution Stats ({method}):\")\n for key in performance:\n if key != \"startTime\":\n if key == \"calls\" or performance[key] == None:\n value = performance[key]\n elif math.isinf(performance[key]):\n value = None\n else:\n value = timedelta(seconds = performance[key])\n self.context.logger.info(f\" --> {key}:{value}\")\n else:\n self.context.logger.warning(f\"There are no execution stats available for method {method}!\")" ]
[ [ "numpy.random.RandomState", "pandas.DataFrame.from_dict" ] ]
XYPB/SpecVQGAN
[ "ed3c0f86c41bc408824979305d9c4f6df0877973", "ed3c0f86c41bc408824979305d9c4f6df0877973" ]
[ "specvqgan/onset_baseline/data/greatesthit.py", "sample_visualization.py" ]
[ "from data import *\nimport pdb\nfrom utils import sound, sourcesep\nimport csv\nimport glob\nimport h5py\nimport io\nimport json\nimport librosa\nimport numpy as np\nimport os\nimport pickle\nfrom PIL import Image\nfrom PIL import ImageFilter\nimport random\nimport scipy\nimport soundfile as sf\nimport time\nfrom tqdm import tqdm\nimport glob\nimport cv2\n\nimport torch\nimport torch.nn as nn\nimport torchaudio\nimport torchvision.transforms as transforms\n# import kornia as K\nimport sys\nsys.path.append('..')\n\n\nclass GreatestHitDataset(object):\n def __init__(self, args, split='train'):\n self.split = split\n if split == 'train':\n list_sample = '/home/duyxxd/SpecVQGAN/data/greatesthit_train_2.00.json'\n elif split == 'val':\n list_sample = '/home/duyxxd/SpecVQGAN/data/greatesthit_valid_2.00.json'\n elif split == 'test':\n list_sample = '/home/duyxxd/SpecVQGAN/data/greatesthit_test_2.00.json'\n\n # save args parameter\n self.repeat = args.repeat if split == 'train' else 1\n self.max_sample = args.max_sample\n\n self.video_transform = transforms.Compose(\n self.generate_video_transform(args))\n \n if isinstance(list_sample, str):\n with open(list_sample, \"r\") as f:\n self.list_sample = json.load(f)\n\n if self.max_sample > 0:\n self.list_sample = self.list_sample[0:self.max_sample]\n self.list_sample = self.list_sample * self.repeat\n\n random.seed(1234)\n np.random.seed(1234)\n num_sample = len(self.list_sample)\n if self.split == 'train':\n random.shuffle(self.list_sample)\n\n # self.class_dist = self.unbalanced_dist()\n print('Greatesthit Dataloader: # sample of {}: {}'.format(self.split, num_sample))\n\n\n def __getitem__(self, index):\n # import pdb; pdb.set_trace()\n info = self.list_sample[index].split('_')[0]\n video_path = os.path.join('data', 'Greatest-hits', 'ProcessedData', info)\n frame_path = os.path.join(video_path, 'frames')\n audio_path = os.path.join(video_path, 'audio')\n audio_path = glob.glob(f\"{audio_path}/*.wav\")[0]\n meta_path = os.path.join(video_path, 'hit_record.json')\n with open(meta_path, \"r\") as f:\n meta_dict = json.load(f)\n\n audio, audio_sample_rate = sf.read(audio_path, start=0, stop=1000, dtype='float64', always_2d=True)\n frame_rate = 15\n duration = 2.0\n frame_list = glob.glob(f'{frame_path}/*.jpg')\n frame_list.sort()\n\n hit_time = float(self.list_sample[index].split('_')[-1]) / 22050\n if self.split == 'train':\n frame_start = hit_time * frame_rate + np.random.randint(10) - 5\n frame_start = max(frame_start, 0)\n frame_start = min(frame_start, len(frame_list) - duration * frame_rate)\n \n else:\n frame_start = hit_time * frame_rate\n frame_start = max(frame_start, 0)\n frame_start = min(frame_start, len(frame_list) - duration * frame_rate)\n frame_start = int(frame_start)\n \n frame_list = frame_list[frame_start: int(\n frame_start + np.ceil(duration * frame_rate))]\n audio_start = int(frame_start / frame_rate * audio_sample_rate)\n audio_end = int(audio_start + duration * audio_sample_rate)\n\n imgs = self.read_image(frame_list)\n audio, audio_rate = sf.read(audio_path, start=audio_start, stop=audio_end, dtype='float64', always_2d=True)\n audio = audio.mean(-1)\n\n onsets = librosa.onset.onset_detect(y=audio, sr=audio_rate, units='time', delta=0.3)\n onsets = np.rint(onsets * frame_rate).astype(int)\n onsets[onsets>29] = 29\n label = torch.zeros(len(frame_list))\n label[onsets] = 1\n\n batch = {\n 'frames': imgs,\n 'label': label\n }\n return batch\n\n def getitem_test(self, index):\n self.__getitem__(index)\n\n def __len__(self):\n return len(self.list_sample)\n\n\n def read_image(self, frame_list):\n imgs = []\n convert_tensor = transforms.ToTensor()\n for img_path in frame_list:\n image = Image.open(img_path).convert('RGB')\n image = convert_tensor(image)\n imgs.append(image.unsqueeze(0))\n # (T, C, H ,W)\n imgs = torch.cat(imgs, dim=0).squeeze()\n imgs = self.video_transform(imgs)\n imgs = imgs.permute(1, 0, 2, 3)\n # (C, T, H ,W)\n return imgs\n\n def generate_video_transform(self, args):\n resize_funct = transforms.Resize((128, 128))\n if self.split == 'train':\n crop_funct = transforms.RandomCrop(\n (112, 112))\n color_funct = transforms.ColorJitter(\n brightness=0.1, contrast=0.1, saturation=0, hue=0)\n else:\n crop_funct = transforms.CenterCrop(\n (112, 112))\n color_funct = transforms.Lambda(lambda img: img)\n\n vision_transform_list = [\n resize_funct,\n crop_funct,\n color_funct,\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ]\n return vision_transform_list\n", "import argparse\nimport csv\nimport glob\nimport os\nimport sys\nimport time\nfrom datetime import datetime\nfrom pathlib import Path\n\ntry:\n import streamlit as st\nexcept ModuleNotFoundError:\n pass\n\nimport torch\nimport torchvision\nimport yaml\nfrom omegaconf import OmegaConf\n\nfrom specvqgan.util import get_ckpt_path\n\nsys.path.insert(0, '.') # nopep8\nimport matplotlib.pyplot as plt\nimport soundfile\nfrom torch.utils.data.dataloader import default_collate\n\nfrom feature_extraction.extract_mel_spectrogram import inv_transforms\nfrom train import instantiate_from_config\nfrom vocoder.modules import Generator\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-r\",\n \"--resume\",\n type=str,\n nargs=\"?\",\n help=\"load from logdir or checkpoint in logdir\",\n )\n parser.add_argument(\n \"-b\",\n \"--base\",\n nargs=\"*\",\n metavar=\"base_config.yaml\",\n help=\"paths to base configs. Loaded from left-to-right. \"\n \"Parameters can be overwritten or added with command-line options of the form `--key value`.\",\n default=list(),\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n nargs=\"?\",\n metavar=\"single_config.yaml\",\n help=\"path to single config. If specified, base configs will be ignored \"\n \"(except for the last one if left unspecified).\",\n const=True,\n default=\"\",\n )\n parser.add_argument(\n \"--ignore_base_data\",\n action=\"store_true\",\n help=\"Ignore data specification from base configs. Useful if you want \"\n \"to specify a custom datasets on the command line.\",\n )\n parser.add_argument(\n '--vocoder_path',\n default='./vocoder/logs/vggsound/',\n help='The path to the folder with pre-trained Vocoder (a folder from ./vocoder/logs)'\n )\n parser.add_argument(\n '--logdir',\n default='./logs/',\n help='Path to the log dir with pre-trained GPT'\n )\n return parser\n\ndef rename_models(x):\n x = x[x.index('T')+1:]\n name2type = {\n '00-43-28_vggsound_transformer': 'VGGSound – Class – VGGSound Codebook',\n '14-41-19_vas_transformer': 'VAS – Class – VGGSound Codebook',\n '09-42-07_vas_transformer': 'VAS – Class – VAS Codebook',\n '16-35-20_vggsound_transformer': 'VGGSound – No Feats – VGGSound Codebook',\n '11-18-51_vggsound_transformer': 'VGGSound – 1 Feat BN – VGGSound Codebook',\n '09-34-10_vggsound_transformer': 'VGGSound – 5 Feats BN – VGGSound Codebook',\n '07-27-58_vggsound_transformer': 'VGGSound – 212 Feats BN – VGGSound Codebook',\n '16-34-36_vas_transformer': 'VAS – No Feats – VGGSound Codebook',\n '06-32-51_vas_transformer': 'VAS – 1 Feat BN – VGGSound Codebook',\n '05-51-34_vas_transformer': 'VAS – 5 Feats BN – VGGSound Codebook',\n '05-38-40_vas_transformer': 'VAS – 212 Feats BN – VGGSound Codebook',\n '16-24-38_vas_transformer': 'VAS – No Feats – VAS Codebook',\n '13-31-37_vas_transformer': 'VAS – 1 Feats BN – VAS Codebook',\n '14-14-24_vas_transformer': 'VAS – 5 Feats BN – VAS Codebook',\n '15-17-18_vas_transformer': 'VAS – 212 Feats BN – VAS Codebook',\n '11-47-40_vas_transformer': 'VAS – 1 Feat RN50 – VGGSound Codebook',\n '11-36-00_vas_transformer': 'VAS – 5 Feats RN50 – VGGSound Codebook',\n '11-52-28_vas_transformer': 'VAS – 212 Feats RN50 – VGGSound Codebook',\n '14-59-49_vas_transformer': 'VAS – 1 Feat RN50 – VAS Codebook',\n '14-51-25_vas_transformer': 'VAS – 5 Feats RN50 – VAS Codebook',\n '13-34-39_vas_transformer': 'VAS – 212 Feats RN50 – VAS Codebook',\n '21-03-22_vggsound_transformer': 'VGGSound – 1 Feat RN50 – VGGSound Codebook',\n '21-34-25_vggsound_transformer': 'VGGSound – 5 Feats RN50 – VGGSound Codebook',\n '21-34-41_vggsound_transformer': 'VGGSound – 212 Feats RN50 – VGGSound Codebook',\n }\n if x in name2type:\n x = f'{name2type[x]} ({x})'\n return x\n\ndef load_model_from_config(config, sd, gpu=True, eval_mode=True):\n if \"ckpt_path\" in config.params:\n st.warning(\"Deleting the restore-ckpt path from the config...\")\n config.params.ckpt_path = None\n if \"downsample_cond_size\" in config.params:\n st.warning(\"Deleting downsample-cond-size from the config and setting factor=0.5 instead...\")\n config.params.downsample_cond_size = -1\n config.params[\"downsample_cond_factor\"] = 0.5\n try:\n if \"ckpt_path\" in config.params.first_stage_config.params:\n config.params.first_stage_config.params.ckpt_path = None\n st.warning(\"Deleting the first-stage restore-ckpt path from the config...\")\n if \"ckpt_path\" in config.params.cond_stage_config.params:\n config.params.cond_stage_config.params.ckpt_path = None\n st.warning(\"Deleting the cond-stage restore-ckpt path from the config...\")\n except:\n pass\n\n model = instantiate_from_config(config)\n if sd is not None:\n missing, unexpected = model.load_state_dict(sd, strict=False)\n try:\n st.warning(f\"Missing Keys in State Dict: {missing}\")\n st.warning(f\"Unexpected Keys in State Dict: {unexpected}\")\n except NameError:\n pass\n if gpu:\n model.cuda()\n if eval_mode:\n model.eval()\n return {\"model\": model}\n\ndef load_vocoder(ckpt_vocoder: str, eval_mode: bool):\n ckpt_vocoder = Path(ckpt_vocoder)\n vocoder_sd = torch.load(ckpt_vocoder / 'best_netG.pt', map_location='cpu')\n\n with open(ckpt_vocoder / 'args.yml', 'r') as f:\n args = yaml.load(f, Loader=yaml.UnsafeLoader)\n\n vocoder = Generator(args.n_mel_channels, args.ngf, args.n_residual_layers)\n vocoder.load_state_dict(vocoder_sd)\n\n if eval_mode:\n vocoder.eval()\n\n return {'model': vocoder}\n\ndef load_feature_extractor(gpu, eval_mode=True):\n s = '''\n feature_extractor:\n target: evaluation.feature_extractors.melception.Melception\n params:\n num_classes: 309\n features_list: ['logits']\n feature_extractor_weights_path: ./evaluation/logs/21-05-10T09-28-40/melception-21-05-10T09-28-40.pt\n transform_dset_out_to_inception_in:\n - target: evaluation.datasets.transforms.FromMinusOneOneToZeroOne\n - target: specvqgan.modules.losses.vggishish.transforms.StandardNormalizeAudio\n params:\n specs_dir: ./data/vggsound/melspec_10s_22050hz\n cache_path: ./specvqgan/modules/losses/vggishish/data/\n - target: evaluation.datasets.transforms.GetInputFromBatchByKey\n params:\n input_key: image\n - target: evaluation.datasets.transforms.ToFloat32'''\n feat_extractor_cfg = OmegaConf.create(s)\n # downloading the checkpoint for melception\n get_ckpt_path('melception', 'evaluation/logs/21-05-10T09-28-40')\n pl_sd = torch.load(feat_extractor_cfg.feature_extractor.params.feature_extractor_weights_path,\n map_location=\"cpu\")\n\n # use gpu=False to compute it on CPU\n feat_extractor = load_model_from_config(\n feat_extractor_cfg.feature_extractor, pl_sd['model'], gpu=gpu, eval_mode=eval_mode)['model']\n\n if feat_extractor_cfg.transform_dset_out_to_inception_in is not None:\n transforms = [instantiate_from_config(c) for c in feat_extractor_cfg.transform_dset_out_to_inception_in]\n else:\n transforms = [lambda x: x]\n transforms = torchvision.transforms.Compose(transforms)\n\n vggsound_meta = list(csv.reader(open('./data/vggsound.csv'), quotechar='\"'))\n unique_classes = sorted(list(set(row[2] for row in vggsound_meta)))\n label2target = {label: target for target, label in enumerate(unique_classes)}\n target2label = {target: label for label, target in label2target.items()}\n return {'model': feat_extractor, 'transforms': transforms, 'target2label': target2label}\n\ndef load_model_and_dataset(config, ckpt, ckpt_vocoder, gpu=True, eval_mode=True):\n # get data\n dsets = instantiate_from_config(config.data)\n dsets.prepare_data()\n dsets.setup()\n\n # now load the specified checkpoint\n if ckpt:\n pl_sd = torch.load(ckpt, map_location=\"cpu\")\n global_step = pl_sd[\"global_step\"]\n else:\n pl_sd = {\"state_dict\": None}\n global_step = None\n\n # loading the vocoder\n if ckpt_vocoder:\n vocoder = load_vocoder(ckpt_vocoder, eval_mode)['model']\n vocoder = vocoder.to('cuda') if gpu else vocoder\n\n model = load_model_from_config(config.model, pl_sd['state_dict'], gpu=gpu, eval_mode=eval_mode)['model']\n\n # patch config for the adjusted input length which could be longer than during training (infinite samples)\n # local_permuter = model.first_stage_permuter\n # if config.model.params.first_stage_permuter_config.params.W is not None:\n # config.model.params.first_stage_permuter_config.params.W *= W_scale\n # model.first_stage_permuter = instantiate_from_config(config.model.params.first_stage_permuter_config).cuda().eval()\n # print(config.model.params.first_stage_permuter_config)\n\n feat_extractor = load_feature_extractor(gpu, eval_mode)\n\n return dsets, model, vocoder, global_step, feat_extractor\n\n\n# the same as the decorator `@st.cache(allow_output_mutation=True, suppress_st_warning=True)`\ntry:\n load_model_and_dataset = st.cache(load_model_and_dataset, allow_output_mutation=True,\n suppress_st_warning=True)\nexcept NameError:\n pass\n\n\ndef bchw_to_st(x, to_scale=True, flip_dims=None):\n if flip_dims is not None:\n # dims is a tuple. To flip only 2nd dim use: `flip_dims=(2,)`\n x = x.flip(dims=flip_dims)\n if to_scale:\n # (-1, 1) -> (0, 1)\n return (x.detach().cpu().numpy().transpose(0, 2, 3, 1) + 1.) / 2.\n else:\n return x.detach().cpu().numpy().transpose(0, 2, 3, 1)\n\ndef tensor_to_plt(x, vmin=None, vmax=None, flip_dims=None):\n if flip_dims is not None:\n # dims is a tuple. To flip only 2nd dim use: `flip_dims=(2,)`\n x = x.flip(dims=flip_dims)\n # remove batch dim and make channel-last\n if len(x.shape) > 3:\n x = x.squeeze(0)\n # if the figure is taller than it is wider rotate (transpose). Also clipping it as feats can be large\n if x.shape[-1] < x.shape[-2]:\n x = x.clip(-2, 2).transpose(-1, -2)\n x = x.cpu()\n if len(x.shape) == 3:\n x = x.permute(1, 2, 0)\n # fig, arr = plt.subplots(nrows=1, ncols=1)\n # # arr[i].set_title(f'{vid_name}_{name}')\n # arr.imshow(x)\n # arr.set_frame_on(False)\n fig = plt.figure(frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n # for facehq\n # TODO: if x.shape[0] == 3:\n # x = x.flip(dims=(1,)).permute(1, 2, 0)\n # x = (x + 1) / 2\n # x = x.clip(0, 1)\n\n # newer version of the matplotlib started to fails when an image has 3 dim with `1` as the last one\n if x.ndim == 3 and x.shape[-1] == 1:\n x = x[:, :, 0]\n ax.imshow(x, cmap=plt.get_cmap('gray'), vmin=vmin, vmax=vmax)\n # ax.set_title('Some', fontsize=8)\n return fig\n\ndef save_results(spec_plt, waves_dict, topk_preds, logdir, batch, mode, sample_rate, specs_key_in_batch):\n # implemented only for B=1, otherwise mind the batch[key][0]\n label = ''.join(filter(lambda x: str.isalnum(x) or ' ', batch['label'][0])).replace(' ', '_')\n target = int(batch['target'][0])\n vid_id = Path(batch[specs_key_in_batch][0]).name.replace('_mel.npy', '')\n time_stamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')\n save_dir = Path(logdir) / 'streamlit' / f'{target:03d}_{label}' / vid_id\n os.makedirs(save_dir, exist_ok=True)\n dpi = 300\n for wave_type, wave in waves_dict.items():\n soundfile.write(save_dir / f'{mode}_{time_stamp}_{wave_type}.wav', wave, sample_rate, 'PCM_24')\n if len(wave) > sample_rate * 10:\n dpi *= 10\n spec_plt.savefig(save_dir / f'{mode}_{time_stamp}.png', bbox_inches='tight', pad_inches=0, dpi=dpi)\n with open(save_dir / f'{mode}_{time_stamp}_topkpreds.txt', 'w') as out_f:\n out_f.write(topk_preds)\n\ndef show_wave_in_streamlit(wave_npy, sample_rate, caption):\n # showing in streamlit. We cannot just show the npy wave and we need to save it first\n temp_wav_file_path = 'todel.wav'\n soundfile.write(temp_wav_file_path, wave_npy, sample_rate, 'PCM_24')\n st.text(caption)\n st.audio(temp_wav_file_path, format='audio/wav')\n os.remove(temp_wav_file_path)\n\ndef spec_to_audio_to_st(x, spec_dir_path, sample_rate, show_griffin_lim, vocoder=None, show_in_st=True):\n # audios are in [-1, 1], making them in [0, 1]\n spec = (x.data.squeeze(0) + 1) / 2\n\n out = {}\n if vocoder:\n # (L,) <- wave: (1, 1, L).squeeze() <- spec: (1, F, T)\n wave_from_vocoder = vocoder(spec).squeeze().cpu().numpy()\n out['vocoder'] = wave_from_vocoder\n if show_in_st:\n show_wave_in_streamlit(wave_from_vocoder, sample_rate, 'Reconstructed Wave via MelGAN')\n\n if show_griffin_lim:\n spec = spec.squeeze(0).cpu().numpy()\n wave_from_griffinlim = inv_transforms(spec, Path(spec_dir_path).stem)\n out['inv_transforms'] = wave_from_griffinlim\n if show_in_st:\n show_wave_in_streamlit(wave_from_griffinlim, sample_rate, 'Reconstructed Wave via Griffin Lim')\n\n return out\n\ndef all_attention_to_st(attention, placeholders=None, scale_by_prior=None):\n if scale_by_prior:\n B, H, T, T = attention.shape\n # attention weight is 1/T: if we have a seq with length 3 the weights are 1/3, 1/3, and 1/3\n # making T by T matrix with zeros in the upper triangular part\n attention_uniform_prior = 1 / torch.arange(1, T+1).view(1, T, 1).repeat(B, 1, T)\n attention_uniform_prior = attention_uniform_prior.tril().view(B, 1, T, T).to(attention.device)\n attention = attention - attention_uniform_prior\n\n attention_agg = attention.sum(dim=1, keepdims=True)\n att_st = tensor_to_plt(attention_agg)\n # z_att_st = tensor_to_plt(z_att, flip_z_dims)\n if placeholders is None:\n return att_st\n else:\n placeholders['title_z_att'].text(f'Attention to All. {list(attention_agg.squeeze().shape)}')\n placeholders['z_att'].write(att_st)\n placeholders['title_c_att'].empty()\n placeholders['c_att'].empty()\n\ndef last_attention_to_st(attention, z_curr_step, c_length, z_permuter, c_permuter, quant_c_shape,\n quant_z_shape, placeholders=None, flip_c_dims=None, flip_z_dims=None):\n B, H, T, T = attention.shape\n # Since the attention ignores the last (target) element, we will visualize it as 0 – padding last 2 dims\n # (B, H, T+1, T+1)\n attention = torch.nn.functional.pad(attention, pad=(0, 1, 0, 1), value=0)\n current_step = c_length + z_curr_step\n attention_at_curr_step = attention[:, :, current_step-1, :]\n # (B, H, c_length), (B, H, z_length) <-\n c_att, z_att = attention_at_curr_step[:, :, :c_length], attention_at_curr_step[:, :, c_length:]\n # aggregate through all heads H -> (B, c_length), (B, z_length)\n c_att = c_att.sum(dim=1) # * 10\n z_att = z_att.sum(dim=1) # * 10\n # (B, length) -> (B, 1, *2d_or_1d_code_book_shape). *shpae[2:] will take 2 elems if 2d and 1 if 1d\n c_att = c_permuter(c_att, reverse=True).reshape(B, 1, *quant_c_shape[2:])\n z_att = z_permuter(z_att, reverse=True).reshape(B, 1, *quant_z_shape[2:])\n # we don't need to flip 1d cond but we do need it for 2d input because of the spectrograms (upside-down)\n # making value in two plots in the same range\n # vmin = min(c_att.min(), z_att.min())\n # vmax = max(c_att.max(), z_att.max())\n vmin = None\n vmax = None\n c_att_st = tensor_to_plt(c_att, vmin, vmax, flip_c_dims)\n z_att_st = tensor_to_plt(z_att, vmin, vmax, flip_z_dims)\n c_att_weight = c_att.sum() / H\n z_att_weight = z_att.sum() / H\n if placeholders is None:\n return c_att_st, z_att_st\n else:\n if len(c_att.squeeze().shape) > 0:\n placeholders['title_c_att'].text(f'Attention to C. {list(c_att.squeeze().shape)}. Sum {c_att_weight:.2f}')\n placeholders['c_att'].pyplot(c_att_st)\n else:\n placeholders['c_att'].empty()\n placeholders['title_c_att'].text(f'Attention to C. Sum {c_att_weight:.2f}')\n placeholders['title_z_att'].text(f'Attention to Z. {list(z_att.squeeze().shape)}. Sum {z_att_weight:.2f}')\n placeholders['z_att'].write(z_att_st)\n\ndef get_class_preditions(x, feat_extractor, k=10):\n # use device=torch.device('cpu') to compute on cpu and save some memory\n device = x.device\n x = {'image': x.squeeze(0).cpu()}\n x = feat_extractor['transforms'](x).to(device)\n features = feat_extractor['model'](x)\n featuresdict = feat_extractor['model'].convert_features_tuple_to_dict(features)\n probs = featuresdict['logits'].softmax(dim=1)\n topk_probs, topk_targets = probs.topk(k)\n to_print = f'Spectrogram Classifier (K={k}):\\n'\n for p, y in zip(topk_probs.squeeze(0).cpu().tolist(), topk_targets.squeeze(0).cpu().tolist()):\n to_print += f'\\t{feat_extractor[\"target2label\"][y]}: {p:.5f}\\n'\n return to_print\n\n\ndef sample_conditionally(z_indices, sampling_shape, c_indices, quant_c, full_att_mat, scale_att_by_prior,\n temperature, top_x, update_every, placeholders,\n cond_stage_model_name, flip_z_dims, flip_c_dims, to_save_results, logdir, batch,\n specs_key_in_batch, vocoder, feat_sampler_cfg, show_griffin_lim, feat_extractor,\n mode):\n start_t = time.time()\n\n # for facehq\n # patch_size_j = 16\n # patch_size_i = 16\n patch_size_i = 5\n patch_size_j = 53\n\n B, D, hr_h, hr_w = sampling_shape\n # assert hr_w % patch_size_j == 0 and hr_w // patch_size_j == int(hr_w // patch_size_j)\n\n if mode == 'full':\n start_step = 0\n else:\n start_step = (patch_size_j // 2) * patch_size_i\n\n z_pred_indices = torch.zeros((B, hr_h*hr_w)).long().to(z_indices.device)\n z_pred_indices[:, :start_step] = z_indices[:, :start_step]\n\n for step in range(start_step, hr_w * hr_h):\n i = step % hr_h\n j = step // hr_h\n\n i_start = min(max(0, i - (patch_size_i // 2)), hr_h - patch_size_i)\n j_start = min(max(0, j - (patch_size_j // 2)), hr_w - patch_size_j)\n i_end = i_start + patch_size_i\n j_end = j_start + patch_size_j\n\n local_i = i - i_start\n local_j = j - j_start\n\n patch_2d_shape = (B, D, patch_size_i, patch_size_j)\n\n placeholders['time'].text(f\"Time: {time.time() - start_t:3.2f} seconds\")\n placeholders['info'].text(\n f\"Step: ({i},{j}) | Local: ({local_i},{local_j}) | Crop: ({i_start}:{i_end},{j_start}:{j_end})\"\n )\n\n # TODO: faceshq – we don't need to permute the reshaped indices (1st and 2nd time)\n # slicing the possibly permuted flat sequence:\n # 1D z_pred_indices is permuted: A_flat = [1, 2, 3, 4, 5, 6, 7, 8, 9].\n # the 2D input should be: A = [[1, 4, 7], [2, 5, 8], [3, 6, 9]].\n # Therefore, after the first reshape it will be A.T = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n # the last reshape flattens is back\n patch = z_pred_indices \\\n .reshape(B, hr_w, hr_h) \\\n .permute(0, 2, 1)[:, i_start:i_end, j_start:j_end].permute(0, 2, 1) \\\n .reshape(B, patch_size_i * patch_size_j)\n\n # if cond_stage_model_name == 'CoordStage':\n # cpatch = c_indices \\\n # .reshape(B, hr_w, hr_h) \\\n # .permute(0, 2, 1)[:, i_start:i_end, j_start:j_end].permute(0, 2, 1) \\\n # .reshape(B, patch_size_i * patch_size_j)\n # elif cond_stage_model_name == 'VQModel1d':\n # cpatch = c_indices[:, j_start:j_end]\n # elif cond_stage_model_name == 'FeatsClassStage':\n # features = quant_c['feature']\n # if feat_sampler_cfg is None:\n # time_step_coeff = features.shape[-1] / sampling_shape[-1]\n # assert time_step_coeff == int(time_step_coeff), f'{features.shape}, {sampling_shape}'\n # j_start_feats = int(j_start * time_step_coeff)\n # j_end_feats = int(j_end * time_step_coeff)\n # else:\n # feat_sample_size = feat_sampler_cfg.params.feat_sample_size\n # times_to_repeat_after_resample = feat_sampler_cfg.params.times_to_repeat_after_resample\n # if times_to_repeat_after_resample is not None:\n # feat_sample_size *= times_to_repeat_after_resample\n # patches_in_z = sampling_shape[-1] // patch_size_j\n # patches_in_c = features.shape[-1] // feat_sample_size\n # # assert patches_in_c == patches_in_z, f'{features.shape}, {sampling_shape}'\n # j_start_feats = j_start // patch_size_j\n # j_end_feats = j_start + feat_sample_size\n # cpatch = {\n # 'target': quant_c['target'],\n # 'feature': c_indices['feature'][:, :, j_start_feats:j_end_feats]\n # }\n # elif cond_stage_model_name in ['RawFeatsStage', 'FeatClusterStage']:\n # if feat_sampler_cfg is None:\n # time_step_coeff = quant_c.shape[-1] / sampling_shape[-1]\n # assert time_step_coeff == int(time_step_coeff), f'{quant_c.shape}, {sampling_shape}'\n # j_start_feats = int(j_start * time_step_coeff)\n # j_end_feats = int(j_end * time_step_coeff)\n # else:\n # feat_sample_size = feat_sampler_cfg.params.feat_sample_size\n # times_to_repeat_after_resample = feat_sampler_cfg.params.times_to_repeat_after_resample\n # if times_to_repeat_after_resample is not None:\n # feat_sample_size *= times_to_repeat_after_resample\n # patches_in_z = sampling_shape[-1] // patch_size_j\n # patches_in_c = quant_c.shape[-1] // feat_sample_size\n # print(patches_in_c, patches_in_z)\n # # assert patches_in_c == patches_in_z, f'{quant_c.shape}, {sampling_shape}'\n # j_start_feats = j_start // patch_size_j\n # j_end_feats = j_start + feat_sample_size\n # if cond_stage_model_name == 'FeatClusterStage':\n # cpatch = c_indices[:, j_start_feats:j_end_feats]\n # else:\n # cpatch = c_indices[:, :, j_start_feats:j_end_feats]\n # elif cond_stage_model_name == 'ClassOnlyStage':\n # cpatch = c_indices\n # else:\n # raise NotImplementedError\n\n # assuming we don't crop the conditioning and just use the whole c, if not desired uncomment the above\n cpatch = c_indices\n\n if cond_stage_model_name in ['RawFeatsStage', 'ClassOnlyStage', 'FeatsClassStage']:\n logits, _, attention = model.transformer(patch[:, :-1], cpatch)\n else:\n patch = torch.cat((cpatch, patch), dim=1)\n logits, _, attention = model.transformer(patch[:, :-1])\n # remove conditioning\n logits = logits[:, -patch_size_j*patch_size_i:, :]\n\n local_pos_in_flat = local_j * patch_size_i + local_i\n logits = logits[:, local_pos_in_flat, :]\n\n logits = logits / temperature\n\n if top_x is not None:\n logits = model.top_k_logits(logits, top_x)\n # apply softmax to convert to probabilities\n probs = torch.nn.functional.softmax(logits, dim=-1)\n # sample from the distribution\n ix = torch.multinomial(probs, num_samples=1)\n z_pred_indices[:, j * hr_h + i] = ix\n # print(\n # z_pred_indices \\\n # .reshape(B, hr_w, hr_h).permute(0, 2, 1)[:, i_start:i_end, j_start:j_end].permute(0, 2, 1)\n # )\n # print(z_pred_indices.reshape(B, hr_w, hr_h).permute(0, 2, 1).permute(0, 2, 1))\n\n if step % update_every == 0:\n z_pred_img = model.decode_to_img(z_pred_indices, sampling_shape)\n placeholders['title_gen_spec'].text(f'Sampling {mode}. {list(z_pred_img.squeeze().shape)}')\n # fliping the spectrogram just for illustration purposes (low freqs to bottom, high - top)\n z_pred_img_st = tensor_to_plt(z_pred_img, flip_dims=flip_z_dims)\n placeholders['gen_spec'].write(z_pred_img_st)\n\n if full_att_mat:\n all_attention_to_st(attention, placeholders, scale_att_by_prior)\n else:\n if cond_stage_model_name == 'FeatsClassStage':\n # 212 + 1\n c_length = cpatch['feature'].shape[-1] + cpatch['target'].shape[-1]\n quant_c_shape = [None, None, c_length]\n else:\n c_length = cpatch.shape[-1]\n quant_c_shape = quant_c.shape\n # quant_z_shape = sampling_shape\n last_attention_to_st(attention, local_pos_in_flat, c_length, model.first_stage_permuter,\n model.cond_stage_permuter, quant_c_shape, patch_2d_shape, placeholders,\n flip_c_dims, flip_z_dims)\n\n # quant_z_shape = sampling_shape\n z_pred_img = model.decode_to_img(z_pred_indices, sampling_shape)\n\n print(f'Time: {time.time() - start_t:3.2f} seconds')\n\n # showing the final image\n placeholders['title_gen_spec'].text(f'Sampling {mode}. {list(z_pred_img.squeeze().shape)}')\n z_pred_img_st = tensor_to_plt(z_pred_img, flip_dims=flip_z_dims)\n placeholders['gen_spec'].write(z_pred_img_st)\n\n if full_att_mat:\n all_attention_to_st(attention, placeholders, scale_att_by_prior)\n else:\n if cond_stage_model_name == 'FeatsClassStage':\n # 212 + 1\n c_length = cpatch['feature'].shape[-1] + cpatch['target'].shape[-1]\n quant_c_shape = [None, None, c_length]\n else:\n c_length = cpatch.shape[-1]\n quant_c_shape = quant_c.shape\n\n last_attention_to_st(attention, local_pos_in_flat, c_length, model.first_stage_permuter,\n model.cond_stage_permuter, quant_c_shape, patch_2d_shape, placeholders,\n flip_c_dims, flip_z_dims)\n\n topk_preds = get_class_preditions(z_pred_img, feat_extractor)\n st.text(topk_preds)\n\n waves = spec_to_audio_to_st(z_pred_img, config.data.params.spec_dir_path,\n config.data.params.sample_rate, show_griffin_lim, vocoder)\n\n if to_save_results:\n save_results(z_pred_img_st, waves, topk_preds, logdir, batch, mode, config.data.params.sample_rate,\n specs_key_in_batch)\n\n st.info('Done')\n\n\nif __name__ == \"__main__\":\n st.sidebar.info('''\n Hi there 👋\n\n This is a demo for **Visually Guided Sound Generation** project 🖼️ 👉 🔉.\n\n [Project Page](https://v-iashin.github.io/specvqgan)\n • [Paper](https://arxiv.org/abs/2110.08791)\n • [Code](https://github.com/v-iashin/SpecVQGAN)\n • [Colab](https://colab.research.google.com/drive/1pxTIMweAKApJZ3ZFqyBee3HtMqFpnwQ0?usp=sharing)\n ''')\n\n sys.path.append(os.getcwd())\n\n parser = get_parser()\n\n opt, unknown = parser.parse_known_args()\n\n avail_models = Path(opt.logdir).rglob('*/checkpoints')\n # 'T' is an empty model which prevents loading the first model by default\n avail_models = ['T'] + sorted([str(p.parent) for p in avail_models])\n # filtering out codebook models as we need only samplers\n avail_models = [m for m in avail_models if 'codebook' not in m]\n assert len(avail_models) > 0, f'There is no model in {opt.logdir}'\n st.sidebar.header('Select a Model')\n model_ckpt = st.sidebar.selectbox('', avail_models, 0, format_func=rename_models)\n if model_ckpt == 'T':\n st.stop()\n\n opt.resume = model_ckpt\n\n ckpt_vocoder = opt.vocoder_path\n ckpt = None\n if opt.resume:\n if not os.path.exists(opt.resume):\n raise ValueError(\"Cannot find {}\".format(opt.resume))\n if os.path.isfile(opt.resume):\n paths = opt.resume.split(\"/\")\n try:\n template_idx = len(paths)-paths[::-1].index(\"logs\")+1\n except ValueError:\n idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt\n logdir = \"/\".join(paths[:idx])\n ckpt = opt.resume\n else:\n assert os.path.isdir(opt.resume), opt.resume\n logdir = opt.resume.rstrip(\"/\")\n ckpt_dir = os.path.join(logdir, \"checkpoints\")\n ckpt_file = sorted(os.listdir(ckpt_dir))\n if len(ckpt_file) > 1:\n print(f'Warning: Found more than one checkpoint in {ckpt_dir}: {ckpt_file}')\n ckpt_file = ckpt_file[0]\n print(f'Using {ckpt_file}')\n ckpt = os.path.join(logdir, 'checkpoints', ckpt_file)\n print(f\"logdir:{logdir}\")\n base_configs = sorted(glob.glob(os.path.join(logdir, \"configs/*-project.yaml\")))\n opt.base = base_configs+opt.base\n\n if opt.config:\n if type(opt.config) == str:\n opt.base = [opt.config]\n else:\n opt.base = [opt.base[-1]]\n\n configs = [OmegaConf.load(cfg) for cfg in opt.base]\n cli = OmegaConf.from_dotlist(unknown)\n if opt.ignore_base_data:\n for config in configs:\n if hasattr(config, \"data\"):\n del config[\"data\"]\n config = OmegaConf.merge(*configs, cli)\n\n # determine the data folder\n if 'vggsound.VGGSound' in config.data.params.train.target:\n datapath = './data/vggsound/'\n raw_vids_dir = os.path.join(datapath, 'video')\n elif 'vas.VAS' in config.data.params.train.target:\n datapath = './data/vas/'\n raw_vids_dir = os.path.join(datapath, 'videos', '*')\n else:\n raise NotImplementedError\n\n # patch config. E.g. if the model is trained on another machine with different paths\n for a in ['spec_dir_path', 'rgb_feats_dir_path', 'flow_feats_dir_path']:\n if config.data.params[a] is not None:\n if 'vggsound.VGGSound' in config.data.params.train.target:\n config.data.params[a] = os.path.join(datapath, Path(config.data.params[a]).name)\n elif 'vas.VAS' in config.data.params.train.target:\n config.data.params[a] = os.path.join(datapath, 'features', '*', Path(config.data.params[a]).name)\n\n with st.beta_expander('Streamlit Logs'):\n dsets, model, vocoder, global_step, feat_extractor = load_model_and_dataset(\n config, ckpt, ckpt_vocoder, gpu=True, eval_mode=True\n )\n\n with st.beta_expander('Sampler Model Config'):\n st.text(f'Global step: {global_step}')\n st.text(f'Checkpoint: {ckpt}')\n st.json(OmegaConf.to_container(config))\n\n with torch.no_grad():\n if len(dsets.datasets) > 1:\n splits = sorted(dsets.datasets.keys())\n if 'vas.VAS' in config.data.params.train.target:\n # prevent loading train on demo which results in a error in streamlit\n splits = ['validation', 'train']\n st.sidebar.header('Select Data')\n split = st.sidebar.radio('Split', splits)\n dset = dsets.datasets[split]\n else:\n dset = next(iter(dsets.datasets.values()))\n\n # filter dataset for available items using set intersection\n if 'vggsound.VGGSound' in config.data.params.train.target:\n avail_dataset = glob.glob(config.data.params['spec_dir_path'] + '/*_mel.npy')\n avail_dataset = sorted(list(set(avail_dataset).intersection(dset.specs_dataset.dataset)))\n avail_targets = list({dset.specs_dataset.video2target[Path(c).stem[:11]] for c in avail_dataset})\n avail_label2target = {dset.specs_dataset.target2label[t]: t for t in avail_targets}\n dset.specs_dataset.label2target = avail_label2target\n dset.specs_dataset.dataset = avail_dataset\n if hasattr(dset, 'feats_dataset'):\n avail_dataset = glob.glob(config.data.params['rgb_feats_dir_path'] + '/*.pkl')\n avail_dataset = [Path(p).stem for p in avail_dataset]\n avail_dataset = sorted(list(set(avail_dataset).intersection(dset.feats_dataset.dataset)))\n dset.feats_dataset.dataset = avail_dataset\n elif 'vas.VAS' in config.data.params.train.target:\n avail_dataset = glob.glob(config.data.params['spec_dir_path'] + '/*_mel.npy')\n avail_dataset = [os.path.join(Path(p).parent.parent.stem, Path(p).stem.replace('_mel', '')) for p in avail_dataset]\n avail_dataset = sorted(list(set(avail_dataset).intersection(dset.specs_dataset.dataset)))\n dset.specs_dataset.dataset = avail_dataset\n if hasattr(dset, 'feats_dataset'):\n avail_dataset = glob.glob(config.data.params['rgb_feats_dir_path'] + '/*.pkl')\n avail_dataset = [os.path.join(Path(p).parent.parent.stem, Path(p).stem) for p in avail_dataset]\n avail_dataset = sorted(list(set(avail_dataset).intersection(dset.feats_dataset.dataset)))\n dset.feats_dataset.dataset = avail_dataset\n\n if len(dset) == 0:\n st.sidebar.info('There are no samples for this split. Please select another split.')\n st.stop()\n\n select_specific_class = st.sidebar.checkbox('Select Specific Class...', value=False)\n\n # add available classes\n if select_specific_class:\n labels = dset.specs_dataset.label2target.keys()\n label_choice = st.sidebar.selectbox('Select a Class', sorted(labels))\n # filter dataset for observations belonging to a specific class\n label2target = dset.specs_dataset.label2target\n if 'vggsound.VGGSound' in config.data.params.train.target:\n video2target = dset.specs_dataset.video2target\n paths = dset.specs_dataset.dataset\n filter_paths = [c for c in paths if video2target[Path(c).stem[:11]] == label2target[label_choice]]\n dset.specs_dataset.dataset = filter_paths\n # if we have another first stage we need to do something extra\n if hasattr(dset, 'feats_dataset'):\n paths_feats = dset.feats_dataset.dataset\n filter_paths_feats = [c for c in paths_feats if video2target[Path(c).stem[:11]] == label2target[label_choice]]\n dset.feats_dataset.dataset = filter_paths_feats\n elif 'vas.VAS' in config.data.params.train.target:\n paths = dset.specs_dataset.dataset\n filter_paths = [c for c in paths if c.startswith(label_choice)]\n dset.specs_dataset.dataset = filter_paths\n # if we have another first stage we need to do something extra\n if hasattr(dset, 'feats_dataset'):\n paths_feats = dset.feats_dataset.dataset\n filter_paths_feats = [c for c in paths_feats if c.startswith(label_choice)]\n dset.feats_dataset.dataset = filter_paths_feats\n\n batch_size = 1\n start_index = st.sidebar.number_input(f'Example Index in the Dataset [0, {len(dset)-1}]',\n value=0, min_value=0, max_value=len(dset)-batch_size)\n indices = list(range(start_index, start_index+batch_size))\n\n batch = default_collate([dset[i] for i in indices])\n\n if select_specific_class:\n # restoring original dataset because we cached the dataset class and filtered for one class.\n # Next time, the filtered dataset will be filtered again which empties the dataset.\n dset.specs_dataset.dataset = paths\n # if we have another first stage we need to do something extra\n if hasattr(dset, 'feats_dataset'):\n dset.feats_dataset.dataset = paths_feats\n\n feat_sampler_cfg = dset.condition_dataset_cfg.feat_sampler_cfg\n cond_stage_model_name = model.cond_stage_model.__class__.__name__\n transformer_model_name = model.transformer.__class__.__name__\n\n if (cond_stage_model_name in ['VQModel1d', 'FeatClusterStage']\n or transformer_model_name in ['GPTFeats', 'GPTFeatsClass']):\n specs_key_in_batch = 'file_path_specs_'\n flip_c_dims = None\n elif transformer_model_name == 'GPTClass':\n specs_key_in_batch = 'file_path_'\n flip_c_dims = None\n else:\n specs_key_in_batch = 'file_path_'\n flip_c_dims = (2,)\n flip_z_dims = (2,)\n\n st.text('')\n with st.beta_expander(f'Original Video. Class: {batch[\"label\"]}.'):\n vid_fname = Path(batch[specs_key_in_batch][0]).name.replace('_mel.npy', '.mp4')\n st.text(f'Video file name: {vid_fname}')\n if 'vggsound.VGGSound' in config.data.params.train.target:\n video_file = open(os.path.join(raw_vids_dir, vid_fname), 'rb').read()\n elif 'vas.VAS' in config.data.params.train.target:\n cls = batch['label'][0]\n video_file = open(os.path.join(raw_vids_dir.replace('*', cls), vid_fname), 'rb').read()\n st.video(video_file, format='video/mp4')\n\n x = model.get_input(model.first_stage_key, batch).to(model.device)\n c = model.get_input(model.cond_stage_key, batch)\n if isinstance(c, dict):\n c = {k: v.to(model.device) for k, v in c.items()}\n else:\n c = c.to(model.device)\n\n quant_z, z_indices = model.encode_to_z(x)\n quant_c, c_indices = model.encode_to_c(c)\n\n xrec = model.first_stage_model.decode(quant_z)\n crec = model.cond_stage_model.decode(quant_c)\n\n if transformer_model_name == 'GPTFeatsClass':\n orig_cond_shape = c['feature'].squeeze().shape\n rec_cond_shape = crec[\"feature\"].squeeze().shape\n else:\n orig_cond_shape = c.squeeze().shape\n rec_cond_shape = crec.squeeze().shape\n\n st.text('')\n with st.beta_expander(f'Conditioning {list(orig_cond_shape)}'):\n if transformer_model_name == 'GPTClass':\n st.write(batch['label'])\n elif transformer_model_name == 'GPTFeatsClass':\n st.write(batch['label'])\n st.write(tensor_to_plt(c['feature'], flip_dims=flip_c_dims))\n else:\n st.write(tensor_to_plt(c, flip_dims=flip_c_dims))\n # with st.beta_expander(f'Conditioning Reconstruction {list(rec_cond_shape)}'):\n # if transformer_model_name == 'GPTClass':\n # st.write(batch['label'])\n # elif transformer_model_name == 'GPTFeatsClass':\n # st.write(batch['label'])\n # st.write(tensor_to_plt(crec['feature'], flip_dims=flip_c_dims))\n # else:\n # st.write(tensor_to_plt(crec, flip_dims=flip_c_dims))\n\n st.sidebar.header('Results Handling')\n update_every = st.sidebar.number_input('Display Result Every ... Step', value=3)\n show_griffin_lim = st.sidebar.checkbox(\n 'Also Show Griffin-Lim', value=False,\n help='Show spectrogram reconstruction from Griffin-Lim algorithm along the pre-trained vocoder')\n to_save_results = st.sidebar.checkbox('Save Results', value=True)\n\n st.text('')\n with st.beta_expander(f'Input {list(x.squeeze().shape)}'):\n st.write(tensor_to_plt(x, flip_dims=flip_z_dims))\n topk_results = get_class_preditions(x, feat_extractor)\n st.text(topk_results)\n if st.button('Get Audio (Input)'):\n spec_to_audio_to_st(x, config.data.params.spec_dir_path,\n config.data.params.sample_rate, show_griffin_lim, vocoder)\n with st.beta_expander(f'Input Reconstruction from SpecVQGAN {list(xrec.squeeze().shape)}', expanded=True):\n st.write(tensor_to_plt(xrec, flip_dims=flip_z_dims))\n topk_results = get_class_preditions(xrec, feat_extractor)\n st.text(topk_results)\n if st.button('Get Audio (Input Reconstruction)'):\n spec_to_audio_to_st(xrec, config.data.params.spec_dir_path,\n config.data.params.sample_rate, show_griffin_lim, vocoder)\n\n st.sidebar.header('Sampling Parameters')\n temperature = st.sidebar.number_input(\n 'Softmax Temperature', value=1.0,\n help='$T$ in $\\exp(x_i/T) / \\Sigma_j \\exp(x_j/T)$'\n )\n top_x = st.sidebar.number_input(\n 'Top X', value=config.model.params.first_stage_config.params.n_embed // 2,\n help='Cuts sampling space of the next token to Top $X$ highest probability tokens. '\n + 'It increases diversity of samples but at the cost of relevance. '\n + 'As a rule of thumb, use `X = |codebook| // 2`.'\n )\n W_scale = st.sidebar.number_input(\n 'Temporal Scale', value=1, min_value=1,\n help='The output length is `temporal_scale * 9.8 seconds`.')\n sample_half = st.sidebar.checkbox(\n 'Prime with GT Tokens', value=False,\n help='If checked, the first half of the tokens will be taken from the ground truth audio'\n + ' codebook representation and sampling will continue this sequence.')\n full_att_mat = st.sidebar.checkbox(\n 'Show Full Attention Matrix', value=False,\n help='The attention will be shown for each time stamp instead of only the current one.')\n if full_att_mat:\n scale_att_by_prior = st.sidebar.checkbox(\n 'Subtract Prior from Attention', value=True,\n help='If checked, subtracts $1/S$ from each attention weight, where $S$ is number of'\n + ' previous tokens. For example, $[2/3, 1/6, 1/6]~–~[1/3, 1/3, 1/3] = [1/3, -1/6, -1/6]$')\n else:\n scale_att_by_prior = False\n\n st.header('Sampling Results:')\n\n # dummy outputs just to reserver some space\n placeholders = {\n 'info': st.text('Step: (?,?) | Local: (?,?) | Crop: (?:?,?:?)'),\n 'time': st.text('Time: ?'),\n 'mode': st.text('Mode: ?'),\n 'title_c_att': st.text('Attention to C.'),\n 'c_att': st.pyplot(tensor_to_plt(torch.zeros_like(x))),\n 'title_z_att': st.text('Attention to Z.'),\n 'z_att': st.pyplot(tensor_to_plt(torch.zeros_like(x))),\n 'title_gen_spec': st.text('Generated sample'),\n 'gen_spec': st.pyplot(tensor_to_plt(torch.zeros_like(x))),\n 'title_rec_audio': st.text('Reconstructed Audio of the Generated Sample'),\n }\n\n sampling_shape = list(quant_z.shape)\n # hr_w * w_scale\n sampling_shape[3] *= W_scale\n\n if st.sidebar.button('Start Sampling'):\n mode = 'half' if sample_half else 'full'\n sample_conditionally(\n z_indices,\n sampling_shape,\n c_indices,\n quant_c,\n full_att_mat,\n scale_att_by_prior,\n temperature,\n top_x,\n update_every,\n placeholders,\n cond_stage_model_name,\n flip_z_dims,\n flip_c_dims,\n to_save_results,\n logdir,\n batch,\n specs_key_in_batch,\n vocoder,\n feat_sampler_cfg,\n show_griffin_lim,\n feat_extractor,\n mode\n )\n" ]
[ [ "numpy.random.seed", "torch.cat", "numpy.rint", "numpy.ceil", "numpy.random.randint" ], [ "torch.nn.functional.softmax", "torch.load", "torch.cat", "torch.zeros", "torch.zeros_like", "matplotlib.pyplot.get_cmap", "torch.multinomial", "torch.no_grad", "torch.utils.data.dataloader.default_collate", "torch.arange", "torch.nn.functional.pad", "matplotlib.pyplot.figure" ] ]
dattranfiot/Carla-ppo
[ "f6961e50a8e43ef65d1c0bb6e953db99c0948b36" ]
[ "utils.py" ]
[ "import types\n\nimport cv2\nimport numpy as np\nimport scipy.signal\nimport tensorflow as tf\n\n\nclass VideoRecorder():\n def __init__(self, filename, frame_size, fps=30):\n self.video_writer = cv2.VideoWriter(\n filename,\n cv2.VideoWriter_fourcc(*\"MPEG\"), int(fps),\n (frame_size[1], frame_size[0]))\n\n def add_frame(self, frame):\n self.video_writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n\n def release(self):\n self.video_writer.release()\n\n def __del__(self):\n self.release()\n\ndef build_mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):\n for h in hidden_sizes[:-1]:\n x = tf.layers.dense(x, units=h, activation=activation)\n return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)\n\ndef create_counter_variable(name):\n counter = types.SimpleNamespace()\n counter.var = tf.Variable(0, name=name, trainable=False)\n counter.inc_op = tf.assign(counter.var, counter.var + 1)\n return counter\n\ndef create_mean_metrics_from_dict(metrics):\n # Set up summaries for each metric\n update_metrics_ops = []\n summaries = []\n for name, (value, update_op) in metrics.items():\n summaries.append(tf.summary.scalar(name, value))\n update_metrics_ops.append(update_op)\n return tf.summary.merge(summaries), tf.group(update_metrics_ops)\n\ndef compute_gae(rewards, values, bootstrap_values, terminals, gamma, lam):\n rewards = np.array(rewards)\n values = np.array(list(values) + [bootstrap_values])\n terminals = np.array(terminals)\n deltas = rewards + (1.0 - terminals) * gamma * values[1:] - values[:-1]\n return scipy.signal.lfilter([1], [1, -gamma * lam], deltas[::-1], axis=0)[::-1]\n" ]
[ [ "tensorflow.group", "tensorflow.Variable", "tensorflow.assign", "tensorflow.layers.dense", "numpy.array", "tensorflow.summary.scalar", "tensorflow.summary.merge" ] ]
Driesssens/ppo-a2c-thesis
[ "4f7b8c8290940bb4dc40cf067a99b890655c55ec" ]
[ "thesis/preprocessor.py" ]
[ "import torch_rl\nimport numpy\nimport torch\n\n\nclass MyObssPreprocessor:\n \"\"\"A preprocessor of observations returned by the environment.\n It converts MiniGrid observation space and MiniGrid observations\n into the format that the model can handle.\"\"\"\n\n def __init__(self, obs_space):\n self.obs_space = {\n \"image\": obs_space.spaces['image'].shape,\n }\n\n if 'carrying' in obs_space.spaces:\n self.obs_space['carrying'] = 2\n\n def __call__(self, obss, device=None):\n \"\"\"Converts a list of MiniGrid observations, i.e. a list of\n (image, instruction) tuples into two PyTorch tensors.\n\n The images are concatenated. The instructions are tokenified, then\n tokens are converted into lists of ids using a Vocabulary object, and\n finally, the lists of ids are concatenated.\n\n Returns\n -------\n preprocessed_obss : DictList\n Contains preprocessed images and preprocessed instructions.\n \"\"\"\n\n preprocessed_obss = torch_rl.DictList()\n\n if \"image\" in self.obs_space.keys():\n images = numpy.array([obs[\"image\"] for obs in obss])\n images = torch.tensor(images, device=device, dtype=torch.float)\n\n preprocessed_obss.image = images\n\n if \"carrying\" in self.obs_space:\n carryings = numpy.array([obs[\"carrying\"] for obs in obss])\n carryings = torch.tensor(carryings, device=device, dtype=torch.float)\n\n preprocessed_obss.carrying = carryings\n\n return preprocessed_obss\n" ]
[ [ "numpy.array", "torch.tensor" ] ]
ameisner/fiberassign
[ "8b13f8681f2b8a0826cdc18387890461ca37989a" ]
[ "py/fiberassign/test/test_qa.py" ]
[ "\"\"\"\nTest fiberassign target operations.\n\"\"\"\nimport os\nimport subprocess\nimport re\nimport shutil\nimport unittest\nfrom datetime import datetime\nimport json\nimport glob\n\nimport numpy as np\n\nimport fitsio\n\nimport desimodel\n\nimport fiberassign\n\nfrom fiberassign.utils import option_list, GlobalTimers\n\nfrom fiberassign.hardware import load_hardware\n\nfrom fiberassign.tiles import load_tiles, Tiles\n\nfrom fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,\n TARGET_TYPE_SUPPSKY,\n TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,\n Targets, TargetsAvailable, TargetTree,\n LocationsAvailable, load_target_file, targets_in_tiles)\n\nfrom fiberassign.assign import (Assignment, write_assignment_fits,\n write_assignment_ascii, merge_results,\n read_assignment_fits_tile)\n\nfrom fiberassign.qa import qa_tiles, qa_targets\n\nfrom fiberassign.vis import plot_tiles, plot_qa, set_matplotlib_pdf_backend\n\nfrom fiberassign.scripts.assign import parse_assign, run_assign_full\n\nfrom fiberassign.scripts.plot import parse_plot, run_plot\n\nfrom fiberassign.scripts.qa import parse_qa, run_qa\n\nfrom fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa\n\n\nfrom .simulate import (test_subdir_create, sim_tiles, sim_targets,\n sim_focalplane, petal_rotation, test_assign_date)\n\n\nclass TestQA(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n # Find the location of scripts. First try the case where we are running\n # tests from the top level of the source tree.\n cls.topDir = os.path.dirname( # top-level\n os.path.dirname( # build/\n os.path.dirname( # lib.arch/\n os.path.dirname( # fiberassign/\n os.path.dirname(os.path.abspath(__file__)) # test/\n )\n )\n )\n )\n cls.binDir = os.path.join(cls.topDir, \"bin\")\n if not os.path.isdir(cls.binDir):\n # We are running from some other directory from an installed package\n cls.topDir = os.path.dirname( # top-level\n os.path.dirname( # lib/\n os.path.dirname( # python3.x/\n os.path.dirname( # site-packages/\n os.path.dirname( # egg/\n os.path.dirname( # fiberassign/\n os.path.dirname(os.path.abspath(__file__)) # test/\n )\n )\n )\n )\n )\n )\n cls.binDir = os.path.join(cls.topDir, \"bin\")\n\n def setUp(self):\n self.density_science = 5000\n self.density_standards = 5000\n self.density_sky = 10\n self.density_suppsky = 5000\n pass\n\n def tearDown(self):\n pass\n\n def test_science(self):\n set_matplotlib_pdf_backend()\n import matplotlib.pyplot as plt\n test_dir = test_subdir_create(\"qa_test_science\")\n log_file = os.path.join(test_dir, \"log.txt\")\n\n np.random.seed(123456789)\n input_mtl = os.path.join(test_dir, \"mtl.fits\")\n # For this test, we will use just 2 science target classes, in order to verify\n # we get approximately the correct distribution\n sdist = [\n (3000, 1, 0.25, \"QSO\"),\n (2000, 1, 0.75, \"ELG\")\n ]\n nscience = sim_targets(\n input_mtl,\n TARGET_TYPE_SCIENCE,\n 0,\n density=self.density_science,\n science_frac=sdist\n )\n\n log_msg = \"Simulated {} science targets\\n\".format(nscience)\n\n tgs = Targets()\n load_target_file(tgs, input_mtl)\n\n # Read hardware properties\n fp, exclude, state = sim_focalplane(rundate=test_assign_date)\n hw = load_hardware(focalplane=(fp, exclude, state))\n tfile = os.path.join(test_dir, \"footprint.fits\")\n sim_tiles(tfile)\n tiles = load_tiles(tiles_file=tfile)\n\n # Precompute target positions\n tile_targetids, tile_x, tile_y = targets_in_tiles(hw, tgs, tiles)\n # Compute the targets available to each fiber for each tile.\n tgsavail = TargetsAvailable(hw, tiles, tile_targetids, tile_x, tile_y)\n\n # Compute the fibers on all tiles available for each target\n favail = LocationsAvailable(tgsavail)\n\n # Pass empty map of STUCK positioners that land on good sky\n stucksky = {}\n\n # Create assignment object\n asgn = Assignment(tgs, tgsavail, favail, stucksky)\n\n # First-pass assignment of science targets\n asgn.assign_unused(TARGET_TYPE_SCIENCE)\n\n # Redistribute\n asgn.redistribute_science()\n\n write_assignment_fits(tiles, asgn, out_dir=test_dir, all_targets=True)\n\n tile_ids = list(tiles.id)\n\n merge_results(\n [input_mtl], list(), tile_ids, result_dir=test_dir, copy_fba=False\n )\n\n # FIXME: In order to use the qa_targets function, we need to know the\n # starting requested number of observations (NUMOBS_INIT). Then we can use\n # that value for each target and compare to the number actually assigned.\n # However, the NUMOBS_INIT column was removed from the merged TARGET table.\n # If we are ever able to reach consensus on restoring that column, then we\n # can re-enable these tests below.\n #\n # qa_targets(\n # hw,\n # tiles,\n # result_dir=test_dir,\n # result_prefix=\"fiberassign-\"\n # )\n #\n # # Load the target catalog so that we have access to the target properties\n #\n # fd = fitsio.FITS(input_mtl, \"r\")\n # scidata = np.array(np.sort(fd[1].read(), order=\"TARGETID\"))\n # fd.close()\n # del fd\n #\n # # How many possible positioner assignments did we have?\n # nassign = 5000 * len(tile_ids)\n #\n # possible = dict()\n # achieved = dict()\n #\n # namepat = re.compile(r\".*/qa_target_count_(.*)_init-(.*)\\.fits\")\n # for qafile in glob.glob(\"{}/qa_target_count_*.fits\".format(test_dir)):\n # namemat = namepat.match(qafile)\n # name = namemat.group(1)\n # obs = int(namemat.group(2))\n # if obs == 0:\n # continue\n # fd = fitsio.FITS(qafile, \"r\")\n # fdata = fd[\"COUNTS\"].read()\n # # Sort by target ID so we can select easily\n # fdata = np.sort(fdata, order=\"TARGETID\")\n # tgid = np.array(fdata[\"TARGETID\"])\n # counts = np.array(fdata[\"NUMOBS_DONE\"])\n # avail = np.array(fdata[\"NUMOBS_AVAIL\"])\n # del fdata\n # fd.close()\n #\n # # Select target properties. BOTH TARGET LISTS MUST BE SORTED.\n # rows = np.where(np.isin(scidata[\"TARGETID\"], tgid, assume_unique=True))[0]\n #\n # ra = np.array(scidata[\"RA\"][rows])\n # dec = np.array(scidata[\"DEC\"][rows])\n # dtarget = np.array(scidata[\"DESI_TARGET\"][rows])\n # init = np.array(scidata[\"NUMOBS_INIT\"][rows])\n #\n # requested = obs * np.ones_like(avail)\n #\n # under = np.where(avail < requested)[0]\n # over = np.where(avail > requested)[0]\n #\n # limavail = np.array(avail)\n # limavail[over] = obs\n #\n # deficit = np.zeros(len(limavail), dtype=np.int)\n #\n # deficit[:] = limavail - counts\n # deficit[avail == 0] = 0\n #\n # possible[name] = np.sum(limavail)\n # achieved[name] = np.sum(counts)\n #\n # log_msg += \"{}-{}:\\n\".format(name, obs)\n #\n # pindx = np.where(deficit > 0)[0]\n # poor_tgid = tgid[pindx]\n # poor_dtarget = dtarget[pindx]\n # log_msg += \" Deficit > 0: {}\\n\".format(len(poor_tgid))\n # poor_ra = ra[pindx]\n # poor_dec = dec[pindx]\n # poor_deficit = deficit[pindx]\n #\n # # Plot Target availability\n # # Commented out by default, since in the case of high target density\n # # needed for maximizing assignments, there are far more targets than\n # # the number of available fiber placements.\n #\n # # marksize = 4 * np.ones_like(deficit)\n # #\n # # fig = plt.figure(figsize=(12, 12))\n # # ax = fig.add_subplot(1, 1, 1)\n # # ax.scatter(ra, dec, s=2, c=\"black\", marker=\"o\")\n # # for pt, pr, pd, pdef in zip(poor_tgid, poor_ra, poor_dec, poor_deficit):\n # # ploc = plt.Circle(\n # # (pr, pd), radius=(0.05*pdef), fc=\"none\", ec=\"red\"\n # # )\n # # ax.add_artist(ploc)\n # # ax.set_xlabel(\"RA\", fontsize=\"large\")\n # # ax.set_ylabel(\"DEC\", fontsize=\"large\")\n # # ax.set_title(\n # # \"Target \\\"{}\\\": (min(avail, requested) - counts) > 0\".format(\n # # name, obs\n # # )\n # # )\n # # #ax.legend(handles=lg, framealpha=1.0, loc=\"upper right\")\n # # plt.savefig(os.path.join(test_dir, \"{}-{}_deficit.pdf\".format(name, obs)), dpi=300, format=\"pdf\")\n #\n # log_msg += \\\n # \"Assigned {} tiles for total of {} possible target observations\\n\".format(\n # len(tile_ids), nassign\n # )\n # ach = 0\n # for nm in possible.keys():\n # ach += achieved[nm]\n # log_msg += \\\n # \" type {} had {} possible target obs and achieved {}\\n\".format(\n # nm, possible[nm], achieved[nm]\n # )\n # frac = 100.0 * ach / nassign\n # log_msg += \\\n # \" {} / {} = {:0.2f}% of fibers were assigned\\n\".format(\n # ach, nassign, frac\n # )\n # for nm in possible.keys():\n # log_msg += \\\n # \" type {} had {:0.2f}% of achieved observations\\n\".format(\n # nm, achieved[nm] / ach\n # )\n # with open(log_file, \"w\") as f:\n # f.write(log_msg)\n #\n # self.assertGreaterEqual(frac, 99.0)\n\n # Test if qa-fiberassign script runs without crashing\n script = os.path.join(self.binDir, \"qa-fiberassign\")\n if os.path.exists(script):\n fafiles = glob.glob(f\"{test_dir}/fiberassign-*.fits\")\n cmd = \"{} --targets {}\".format(script, \" \".join(fafiles))\n err = subprocess.call(cmd.split())\n self.assertEqual(err, 0, f\"FAILED ({err}): {cmd}\")\n else:\n print(f\"ERROR: didn't find {script}\")\n\n\ndef test_suite():\n \"\"\"Allows testing of only this module with the command::\n\n python setup.py test -m <modulename>\n \"\"\"\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n" ]
[ [ "numpy.random.seed" ] ]
jphacks/TK_1804
[ "b71e5ee95ea60476758979845f3ebfd5a4355d41" ]
[ "src/tools/calibration.py" ]
[ "import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nif cap.isOpened() is False:\n raise(\"IO Error\")\n\n# termination criteria\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((6*7,3), np.float32)\nobjp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d point in real world space\nimgpoints = [] # 2d points in image plane.\n\nimgInd=0\nwhile True:\n ret, img = cap.read()\n if ret == False:\n continue\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n cv2.putText(img,'Number of capture: '+str(imgInd),(30,20),cv2.FONT_HERSHEY_PLAIN, 1,(0,255,0))\n cv2.putText(img,'c: Capture the image',(30,40),cv2.FONT_HERSHEY_PLAIN, 1,(0,255,0))\n cv2.putText(img,'q: Finish capturing and calcurate the camera matrix and distortion',(30,60),cv2.FONT_HERSHEY_PLAIN, 1,(0,255,0))\n cv2.imshow(\"Image\", img) \n\n k = cv2.waitKey(1) & 0xFF\n if k == ord('c'):\n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, (7,6),None)\n # If found, add object points, image points (after refining them)\n if ret == True:\n objpoints.append(objp)\n\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n imgpoints.append(corners2)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (8,8), corners2,ret)\n cv2.imshow('Image',img)\n cv2.waitKey(500)\n\n imgInd+=1\n\n if k == ord('q'):\n break\n\n# Calc urate the camera matrix\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\n\n# Save the csv file\nnp.savetxt(\"mtx.csv\", mtx, delimiter=\",\")\nnp.savetxt(\"dist.csv\", dist, delimiter=\",\")\n\ncap.release()\ncv2.destroyAllWindows()" ]
[ [ "numpy.savetxt", "numpy.zeros" ] ]
Tekhz/models
[ "8d01c604a03daba8766e443311f704ad02046ad4" ]
[ "official/utils/logging/logger_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for benchmark logger.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport json\nimport os\nimport tempfile\n\n\nfrom official.utils.logging import logger\nimport tensorflow as tf\n\n\nclass BenchmarkLoggerTest(tf.test.TestCase):\n\n def tearDown(self):\n super(BenchmarkLoggerTest, self).tearDown()\n tf.gfile.DeleteRecursively(self.get_temp_dir())\n\n def test_create_logging_dir(self):\n non_exist_temp_dir = os.path.join(self.get_temp_dir(), \"unknown_dir\")\n self.assertFalse(tf.gfile.IsDirectory(non_exist_temp_dir))\n\n logger.BenchmarkLogger(non_exist_temp_dir)\n self.assertTrue(tf.gfile.IsDirectory(non_exist_temp_dir))\n\n def test_log_metric(self):\n log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())\n log = logger.BenchmarkLogger(log_dir)\n log.log_metric(\"accuracy\", 0.999, global_step=1e4, extras={\"name\": \"value\"})\n\n metric_log = os.path.join(log_dir, \"metric.log\")\n self.assertTrue(tf.gfile.Exists(metric_log))\n with tf.gfile.GFile(metric_log) as f:\n metric = json.loads(f.readline())\n self.assertEqual(metric[\"name\"], \"accuracy\")\n self.assertEqual(metric[\"value\"], 0.999)\n self.assertEqual(metric[\"unit\"], None)\n self.assertEqual(metric[\"global_step\"], 1e4)\n self.assertEqual(metric[\"extras\"], {\"name\": \"value\"})\n\n def test_log_multiple_metrics(self):\n log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())\n log = logger.BenchmarkLogger(log_dir)\n log.log_metric(\"accuracy\", 0.999, global_step=1e4, extras={\"name\": \"value\"})\n log.log_metric(\"loss\", 0.02, global_step=1e4)\n\n metric_log = os.path.join(log_dir, \"metric.log\")\n self.assertTrue(tf.gfile.Exists(metric_log))\n with tf.gfile.GFile(metric_log) as f:\n accuracy = json.loads(f.readline())\n self.assertEqual(accuracy[\"name\"], \"accuracy\")\n self.assertEqual(accuracy[\"value\"], 0.999)\n self.assertEqual(accuracy[\"unit\"], None)\n self.assertEqual(accuracy[\"global_step\"], 1e4)\n self.assertEqual(accuracy[\"extras\"], {\"name\": \"value\"})\n\n loss = json.loads(f.readline())\n self.assertEqual(loss[\"name\"], \"loss\")\n self.assertEqual(loss[\"value\"], 0.02)\n self.assertEqual(loss[\"unit\"], None)\n self.assertEqual(loss[\"global_step\"], 1e4)\n\n def test_log_non_nubmer_value(self):\n log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())\n log = logger.BenchmarkLogger(log_dir)\n const = tf.constant(1)\n log.log_metric(\"accuracy\", const)\n\n metric_log = os.path.join(log_dir, \"metric.log\")\n self.assertFalse(tf.gfile.Exists(metric_log))\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.constant", "tensorflow.gfile.Exists", "tensorflow.gfile.GFile", "tensorflow.test.main", "tensorflow.gfile.IsDirectory" ] ]
pachterlab/pachterlab-MBLGLMBHGP_2021
[ "3141e78f649a6e0384073ba13cf343277773101c" ]
[ "Figure_2_Supplementary_Figure_3/make_data_faster.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\nimport argparse\nimport scipy.io\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport time\nimport copy\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy import sparse\nimport scipy\nimport anndata\nfrom matplotlib.pyplot import figure\nfrom sklearn.decomposition import TruncatedSVD\nimport sklearn\nimport anndata\nimport time\nfrom openTSNE import TSNE\nimport openTSNE\nfrom openTSNE.callbacks import ErrorLogger\nimport datetime\nfrom anndata import AnnData\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.metrics.pairwise import manhattan_distances\nimport matplotlib.pyplot as plt\nimport pickle\n\ndef make_data_faster(dataset_shortname):\n\tk_folder = '/home/single_cell_analysis/kallisto_out_single_bustools_dev/kallisto_' + dataset_shortname\n\tif dataset_shortname in [\"pbmc_1k_v3\", \"pbmc_10k_v3\", \"neuron_10k_v3\"]:\n\t dataset_shortname = dataset_shortname.split(\"_\")[0] + dataset_shortname.split(\"_\")[1] + \"_\" + dataset_shortname.split(\"_\")[2]\n\tc_folder = '/home/single_cell_analysis/cellranger_out/cellranger3_' + dataset_shortname +'_out/outs/filtered_feature_bc_matrix'\n\tc_raw_folder = '/home/single_cell_analysis/cellranger_out/cellranger3_' + dataset_shortname +'_out/outs/raw_feature_bc_matrix'\n\n\tc_raw = anndata.AnnData(scipy.io.mmread(os.path.join(c_raw_folder,'matrix.mtx.gz')).tocsr().T)\n\tc_barcodes = pd.read_csv(os.path.join(c_raw_folder,'barcodes.tsv.gz'), index_col = 0, header = None, names = ['barcode'])\n\tc_barcodes.index = c_barcodes.index.str.slice(0,16,1)\n\tc_raw.obs = c_barcodes\n\tc_raw.var = pd.read_csv(os.path.join(c_raw_folder,'features.tsv.gz'), header = None, index_col = 0, names =['ensembl_id', 'gene_name', 'kind'], sep = '\\t')\n\tprint('Loaded c raw mtx:',c_raw.X.shape)\n\n\tdel c_barcodes\n\n\t# load c filtered matrix\n\tc = anndata.AnnData(scipy.io.mmread(os.path.join(c_folder,'matrix.mtx.gz')).tocsr().T)\n\tc_barcodes = pd.read_csv(os.path.join(c_folder,'barcodes.tsv.gz'), index_col = 0, header = None, names = ['barcode'])\n\tc_barcodes.index = c_barcodes.index.str.slice(0,16,1)\n\tc.obs = c_barcodes\n\tc.var = pd.read_csv(os.path.join(c_folder,'features.tsv.gz'), header = None, index_col = 0, names =['ensembl_id', 'gene_name', 'kind'], sep = '\\t')\n\tprint('Loaded c filtered mtx:',c.X.shape)\n\n\tdel c_barcodes\n\n\n\t## load kallisto raw matrix\n\tk_raw = anndata.AnnData(scipy.io.mmread(os.path.join(k_folder,'genes.mtx')).tocsr())\n\tk_raw.obs= pd.read_csv(os.path.join(k_folder,'genes.barcodes.txt'), index_col = 0, header = None, names = ['barcode'])\n\tk_raw.var = pd.read_csv(os.path.join(k_folder,'genes.genes.txt'), header = None, index_col = 0, names =['ensembl_id'], sep = '\\t')\n\tprint('Loaded k raw mtx:',k_raw.X.shape)\n\n\n\t# truncdates the ensembl version number off the kallisto labels\n\tk_raw.var['full_emsembl_id'] = k_raw.var.index\n\tk_raw.var.index = k_raw.var['full_emsembl_id'].str.slice(0,18)\n\n\n\tif dataset_shortname in ['hgmm1k_v2', 'hgmm1k_v3', 'hgmm10k_v3']:\n\t k_raw.var.index = k_raw.var['full_emsembl_id']\n\n\t # do this as late as possible\n\tk = k_raw[c.obs.index.values]\n\tprint('Loaded k filtered mtx:', k.X.shape)\n\n\tc_raw.obs['counts'] = c_raw.X.sum(1)\n\tc_raw.obs['ngenes'] = np.array((c_raw.X > 0).sum(1))\n\tc_raw = c_raw[c_raw.obs['counts'] > 0]\n\tc_raw.layers['log1p'] = np.log1p(c_raw.X)\n\tc_raw.obs['log10counts']= np.log10(c_raw.obs['counts'])\n\tprint('Cell Ranger raw:', c_raw.shape)\n\n\n\t# count UMIs, genes, log transform raw kallisto barcodes \n\t# first remove kallisto barcodes with 0 gene counts\n\n\tk_raw.obs['counts'] = k_raw.X.sum(1)\n\tk_raw.obs['ngenes'] = np.array((k_raw.X > 0).sum(1))\n\tk_raw = k_raw[k_raw.obs['counts'] > 0]\n\tk_raw.layers['log1p'] = np.log1p(k_raw.X)\n\tk_raw.obs['log10counts'] = np.log10(k_raw.obs['counts'])\n\tprint('kallisto raw:', k_raw.shape)\n\n\tc.obs['counts'] = c.X.sum(1)\n\tc.obs['ngenes'] = np.array((c.X > 0).sum(1))\n\tc = c[c.obs['counts'] > 0]\n\tc.layers['log1p'] = np.log1p(c.X)\n\tc.obs['log10counts']= np.log10(c.obs['counts'])\n\tprint('Cell Ranger filtered:', c.shape)\n\n\n\t# count UMIs, genes, log transform filtered kallisto barcodes \n\t# first remove kallisto barcodes with 0 gene counts\n\n\tk.obs['counts'] = k.X.sum(1)\n\tk.obs['ngenes'] = np.array((k.X > 0).sum(1))\n\tk = k[k.obs['counts'] > 0]\n\tk.layers['log1p'] = np.log1p(k.X)\n\tk.obs['log10counts'] = np.log10(k.obs['counts'])\n\tprint('kallisto filtered:', k.shape)\n\n\tjoint_obs = k_raw.obs.join(c_raw.obs,how = 'outer', lsuffix='-kallisto', rsuffix='-tenx')\n\tjoint_obs = joint_obs.fillna(0)\n\tprint('Total barcodes seen')\n\tprint(len(joint_obs))\n\n\t# barcodes seen by both\n\tcommon_obs = k_raw.obs.join(c_raw.obs,how = 'inner', lsuffix='-kallisto', rsuffix='-tenx')\n\tprint('Barcodes seen by both')\n\tprint(len(common_obs))\n\n\tkobs = k_raw.obs.join(c_raw.obs,how = 'left', lsuffix='-kallisto', rsuffix='-tenx')\n\tkobs = kobs.sort_values(by=['counts-kallisto'], ascending = False)\n\tprint('Barcodes seen by kallisto missed by Cell Ranger')\n\tprint(len(joint_obs) - len(kobs))\n\n\n\t# just Cell Ranger observations\n\ttobs = c_raw.obs.copy()\n\ttobs = tobs.sort_values('counts', ascending = False)\n\tprint('Barcodes seen by Cell Ranger missed by kallisto')\n\tprint(len(joint_obs) - len(tobs))\n\n\t# ## Compute correlations between kallisto and Cell Ranger\n\t# handy and fast function for computing correlation on sparse matrices\n\tdef sparse_M_std(X):\n\t n = X.shape[1]\n\t return np.sqrt(n * X.multiply(X).sum(1) - np.multiply(X.sum(1), X.sum(1)))\n\n\tdef sparse_M_corr(X,Y):\n\t X_std = sparse_M_std(X)\n\t Y_std = sparse_M_std(Y)\n\t XY_std = np.multiply(X_std, Y_std)\n\n\t n = X.shape[1]\n\t XY_cov = n* X.multiply(Y).sum(1) - np.multiply(X.sum(1), Y.sum(1))\n\t R = np.divide(XY_cov, XY_std)\n\t return np.squeeze(np.asarray(R))\n\n\traw_counts_correlation = sparse_M_corr(k_raw[common_obs.index].layers['log1p'],c_raw[common_obs.index].layers['log1p'])\n\tfiltered_counts_correlation = sparse_M_corr(k_raw[c.obs.index].layers['log1p'],c_raw[c.obs.index].layers['log1p'])\n\tprint('Correlations computed!')\n\n\ttsvd = TruncatedSVD(n_components=10)\n\tTSVD = tsvd.fit_transform(k.layers['log1p'])\n\tk.obsm['TSVD'] = TSVD\n\tk.obsm['TSVD']\n\tprint('TSVD variance ratios:\\n', list(tsvd.explained_variance_ratio_))\n\tprint(datetime.datetime.now())\n\n\n\ttsvd = TruncatedSVD(n_components=10)\n\tTSVD = tsvd.fit_transform(c.layers['log1p'])\n\tc.obsm['TSVD'] = TSVD\n\tc.obsm['TSVD']\n\tprint('TSVD variance ratios:\\n', list(tsvd.explained_variance_ratio_))\n\tprint(datetime.datetime.now())\n\n\n\tprint('Calculating L1 distances...')\n\n\t# taking manhattan distance between matrices\n\tdnck = manhattan_distances(c.layers['log1p'], k.layers['log1p'])\n\tdnkk = manhattan_distances(k.layers['log1p'], k.layers['log1p'])\n\tprint(datetime.datetime.now())\n\n\t# nkc are the kallisto-cellranger distances \n\tnck = np.diagonal(dnck)\n\n\t# ncc are the kallisto-kallisto distances\n\tnkk = []\n\tfor row in dnkk:\n\t val = np.partition(row, 1)[1]\n\t nkk.append(val)\n\tprint('L1 distances done!')\n\tprint(datetime.datetime.now())\n\n\n\tprint('Doing t-SNE')\n\tprint(datetime.datetime.now())\n\ttsne = TSNE(perplexity=30, metric=\"euclidean\", callbacks=openTSNE.callbacks.ErrorLogger(),n_jobs=8, random_state=42, n_iter=750 )\n\tk.obsm['TSNE10'] = tsne.fit(k.obsm['TSVD'])\n\tprint('kallisto TSNE-10 done.')\n\tprint(datetime.datetime.now())\n\n\n\t# Perform TSNE on top 10 truncated SVD components of Cell Ranger filtered matrix\n\n\tprint('Doing t-SNE on top 10 PC for Cell Ranger')\n\t# \n\tprint(datetime.datetime.now())\n\ttsne = TSNE(perplexity=30, metric=\"euclidean\", callbacks=openTSNE.callbacks.ErrorLogger(),n_jobs=8, random_state=42, n_iter=750 )\n\tc.obsm['TSNE10'] = tsne.fit(c.obsm['TSVD'])\n\tprint('Cell Ranger TSNE-10 done.')\n\tprint(datetime.datetime.now())\n\n\n\tc_raw.write(os.path.join(\"./write_data/\" + dataset_shortname + '_tenx_raw.h5ad'))\n\tk_raw.write(os.path.join(\"./write_data/\" + dataset_shortname + '_kallisto_raw.h5ad'))\n\tk.write(os.path.join(\"./write_data/\" + dataset_shortname + '_kallisto.h5ad'))\n\tc.write(os.path.join(\"./write_data/\" + dataset_shortname + '_tenx.h5ad'))\n\n\n\twith open(os.path.join(\"./write_data/\" + dataset_shortname + '_kobs.pkl'), 'wb') as handle:\n\t pickle.dump(kobs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\t \n\twith open(os.path.join(\"./write_data/\" + dataset_shortname + '_tobs.pkl'), 'wb') as handle:\n\t pickle.dump(tobs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\t \n\twith open(os.path.join(\"./write_data/\" + dataset_shortname + '_common_obs.pkl'), 'wb') as handle:\n\t pickle.dump(common_obs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\twith open(os.path.join(\"./write_data/\" + dataset_shortname + '_joint_obs.pkl'), 'wb') as handle:\n\t pickle.dump(joint_obs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\t \n\twith open(os.path.join(\"./write_data/\" + dataset_shortname + '_nkk.pkl'), 'wb') as handle:\n\t pickle.dump(nkk, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\t \n\twith open(os.path.join(\"./write_data/\" + dataset_shortname + '_nck.pkl'), 'wb') as handle:\n\t pickle.dump(nck, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\t \n\twith open(os.path.join(\"./write_data/\" + dataset_shortname + '_raw_counts_correlation.pkl'), 'wb') as handle:\n\t pickle.dump(raw_counts_correlation, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\t \n\twith open(os.path.join(\"./write_data/\" + dataset_shortname + '_filtered_counts_correlation.pkl'), 'wb') as handle:\n\t pickle.dump(filtered_counts_correlation, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"processes all data\")\n parser.add_argument(\"--ds\", help=\"dataset name (i.e. SRR8599150_v2)\")\n \n\n args = parser.parse_args()\n print(\"Loading files..\")\n make_data_faster(str(args.ds))\n\n print(\"Done\")\n" ]
[ [ "sklearn.decomposition.TruncatedSVD", "numpy.partition", "numpy.multiply", "numpy.asarray", "sklearn.metrics.pairwise.manhattan_distances", "numpy.log10", "numpy.log1p", "numpy.diagonal", "numpy.divide" ] ]
WalterjhShen/qmpy
[ "686e18cecbb82a6bb523249ac1779a99fb865350" ]
[ "qmpy/analysis/xrd.py" ]
[ "#!/usr/env/bin python\n\nimport itertools\nimport numpy as np\nimport logging\n\nfrom qmpy.data import elements\nfrom qmpy.utils import *\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass Peak(object):\n \"\"\"\n Attributes:\n angle (float): \n Peak 2*theta angle in radians.\n hkl (list): \n HKL indices of the peak.\n multiplicity (int): \n Number of HKL indices which generate the peak.\n\n \"\"\"\n def __init__(self,\n angle,\n multiplicity=None,\n intensity=None,\n hkl=None,\n xrd=None,\n width=None,\n measured=False):\n self.angle = angle\n self.two_theta = angle*360/np.pi\n self.hkl = hkl\n self.multiplicity = multiplicity\n self.intensity = intensity\n self.xrd = xrd\n self.width = width\n self.measured = measured\n\n self.real = None\n self.imag = None\n\n def lp_factor(self):\n \"\"\"\n Calculates the Lorentz-polarization factor.\n \n http://reference.iucr.org/dictionary/Lorentz%E2%80%93polarization_correction\n \"\"\"\n num = (1+np.cos(2*self.angle)**2)\n den = np.cos(self.angle)*np.sin(self.angle)**2\n return num/den\n\n def calculate_intensity(self, bfactors=None, scale=None):\n intensity = self.structure_factor_squared(bfactors)\n self.intensity = intensity * scale\n self.intensity *= self.multiplicity\n self.intensity *= self.lp_factor()\n return self.intensity\n\n def thermal_factor(self, bfactor=1.0):\n \"\"\"\n Calculates the Debye-Waller factor for a peak.\n\n http://en.wikipedia.org/wiki/Debye-Waller_factor\n \"\"\"\n return np.exp(-bfactor*(np.sin(self.angle)/self.xrd.wavelength)**2)\n\n def atomic_scattering_factor(self, element):\n asfp = elements[element]['scattering_factors']\n s = np.sin(self.angle) / self.xrd.wavelength\n s2 = s*s\n if s > 2:\n msg = 'Atomic scattering factors are not optimized for'\n msg += ' s greater than 2'\n logger.warn(msg)\n\n factors = [ asfp['a'+str(i)]*np.exp(-asfp['b'+str(i)]*s2) \n for i in range(1,5) ]\n return sum(factors) + asfp['c']\n\n def structure_factor_squared(self, bfactors=None):\n if bfactors is None:\n bfactors = [ 1.0 for o in self.xrd.structure.orbits ]\n\n real = 0.0\n imag = 0.0\n\n for bfactor, orbit in zip(bfactors, self.xrd.structure.orbits):\n tf = self.thermal_factor(bfactor)\n for site in orbit:\n for atom in site:\n sf = self.atomic_scattering_factor(atom.element_id)\n dot = 2*np.pi*np.dot(self.hkl[0], atom.coord)\n pre = sf * tf * atom.occupancy\n real += pre*np.cos(dot) \n imag += pre*np.sin(dot)\n\n self.real = real\n self.imag = imag\n return real*real + imag*imag\n\n\nclass XRD(object):\n \"\"\"\n Container for an X-ray diffraction pattern.\n\n Attributes:\n peaks (List): \n List of :mod:`~qmpy.Peak` instances.\n measured (bool): \n True if the XRD is a measured pattern, otherwise False.\n min_2th (float): \n Minimum 2theta angle allowed. Defaults to 60 degrees.\n max_2th (float): \n Maximum 2theta angle allowed. Defaults to 10 degrees.\n wavelength (float): \n X-ray wavelength. Defaults to 1.5418 Ang.\n resolution (float): \n Minimum 2theta angle the XRD will distinguish between.\n\n \"\"\"\n def __init__(self, structure=None, measured=False, wavelength=1.5418,\n min_2th=10, max_2th=60, resolution=1e-2):\n self.peaks = []\n self.structure = structure\n self.measured = measured\n self.wavelength = wavelength\n self.min_2th = min_2th\n self.max_2th = max_2th\n self.resolution = resolution\n\n def add_peak(self, peak):\n for p in self.peaks:\n if abs(peak.two_theta - p.two_theta) < self.resolution:\n p.multiplicity += peak.multiplicity\n p.hkl.append(peak.hkl)\n return\n peak.xrd = self\n self.peaks.append(peak)\n\n def d_thermal_factor(self, angle, bfactor):\n temp = (np.sin(angle) / self.wavelength)**2\n return -temp * np.exp(-bfactor*temp)\n\n def bragg_angle(self, hkl):\n ratio = np.linalg.norm(self.structure.inv.dot(hkl))/2\n ratio *= self.wavelength\n if (ratio >= -1 and ratio <= 1):\n return np.arcsin(ratio)\n elif angle < -1:\n return -np.pi/2\n else:\n return np.pi/2\n\n def get_intensities(self, bfactors=None, scale=None):\n \"\"\"\n Loops over all peaks calculating intensity.\n\n Keyword Arguments:\n bfactors (list) : list of B factors for each atomic site. Care must\n taken to ensure that the order of B factors agrees with the order\n of atomic orbits.\n scale (float) : Scaling factor to multiply the intensities by. If\n scale evaluates to False, the intensities will be re-normalized at\n the end such that the highest peak is 1.\n \"\"\"\n rescale = False\n if not scale:\n rescale = True\n scale = 1.0\n\n for peak in self.peaks:\n peak.calculate_intensity(bfactors=bfactors, scale=scale)\n\n if rescale:\n m = max([p.intensity for p in self.peaks])\n for p in self.peaks:\n p.intensity /= m\n\n def get_peaks(self):\n max_mag = 2*np.sin(self.max_2th*np.pi/90) / self.wavelength\n self.structure.symmetrize()\n rots = []\n for r in self.structure.rotations:\n if np.allclose(r, np.eye(3)):\n continue\n if not any([np.allclose(r, rr) for rr in rots]):\n rots.append(r)\n\n im, jm, km = map(lambda x: int(np.ceil(max_mag*x)),\n self.structure.lat_params[:3])\n\n for h, k, l in itertools.product(range(-im, im+1),\n range(-jm, jm+1),\n range(-km, km+1)):\n if [h, k, l] == [0, 0, 0]:\n continue\n\n mult = 1\n hkl = np.array([h, k, l])\n equiv = [hkl]\n repeat = False\n for rot in rots:\n thkl = np.dot(rot, hkl)\n if thkl[0] < hkl[0] - 1e-4:\n repeat = True\n elif abs(thkl[0]-hkl[0]) < 1e-4:\n if thkl[1] < hkl[1] - 1e-4:\n repeat = True\n elif abs(thkl[1]-hkl[1]) < 1e-4:\n if thkl[2] < hkl[2] - 1e-4:\n repeat = True\n if repeat:\n break\n\n if not any([np.allclose(thkl, shkl) for shkl in equiv]):\n equiv.append(thkl)\n mult += 1\n\n angle = self.bragg_angle(hkl)\n two_theta = angle*360/np.pi\n\n if two_theta > self.max_2th or two_theta < self.min_2th:\n continue\n\n peak = Peak(angle, multiplicity=mult, hkl=equiv)\n self.add_peak(peak)\n\n def plot(self):\n renderer = Renderer()\n\n for p in self.peaks:\n l = Line([[p.two_theta, 0], \n [p.two_theta, p.intensity]], color='grey')\n renderer.add(l)\n\n renderer.xaxis.label = \"2&Theta;\"\n renderer.yaxis.max = 1.0\n renderer.xaxis.min = self.min_2th\n renderer.xaxis.max = self.max_2th\n return renderer\n\n" ]
[ [ "numpy.dot", "numpy.allclose", "numpy.arcsin", "numpy.eye", "numpy.cos", "numpy.sin", "numpy.ceil", "numpy.exp", "numpy.array" ] ]
YuyangL/SOWFA-Postprocess
[ "1c6b157a2a6afa76c9ffabe5edb5997ad57aa88a" ]
[ "Visual_EigenVectors.py" ]
[ "import numpy as np\nfrom FieldData import FieldData\nimport PostProcess_AnisotropyTensor as ppat\nimport time as t\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom mayavi import mlab\nfrom mayavi.api import Engine\nfrom mayavi.modules.axes import Axes\nimport pickle\nimport os\n\n\"\"\"\nUser Inputs\n\"\"\"\ncaseDir = 'J:'\ncaseDir = '/media/yluan/1'\ncaseName = 'ALM_N_H'\ntimes = '22000.0918025'\nfields = 'uuPrime2'\n# # Not used\n# precisionX, precisionY, precisionZ = 100j, 100j, 33j\n# # Not used\n# interpMethod = 'linear'\npickleName = 'turbs'\nfigView = 'top' # 'iso', 'front', 'left', 'top'\n\n\n\"\"\"\nProcess u'u' Tensor\n\"\"\"\n# Initialize the case\ncase = FieldData(fields = fields, times = times, caseName = caseName, caseDir = caseDir)\n# Go through all specified time directories\nfor time in case.times:\n # Check if pickle results saved for this pickleName and fields\n # If so, use pickle results\n resultNames = os.listdir(case.resultPath[time])\n usePickle = True if pickleName + '_' + fields + '.p' in resultNames else False\n # If no pickle results stored, then run the whole process\n if not usePickle:\n # [BUG]\n # The keyword for symmetric tensor in parse_data_nonuniform() of field_parser.py of Ofpp should be 'symmTensor'\n # instead of 'symmtensor'\n fieldData = case.readFieldData()\n # The data\n data = fieldData[fields]\n # Coordinates of the whole domain in 1D arrays\n ccx, ccy, ccz, cc = case.readCellCenterCoordinates()\n # Confine the domain of interest\n # For paralllel turbines, the confine box starts from the center of each rotor plane,\n # 1D upstream turbines, length is 6D, width is 0.5D, height is 1D above hub height\n # For sequential turbines, the confine box starts from the center of rotor planes,\n # 1D upstream upwind turbine, length is 13D, width is 0.5D, height is 1D above hub height\n # Box counter-clockwise rotation in x-y plane\n boxRot = np.pi/6.\n if caseName == 'ALM_N_H_ParTurb':\n # For northern turbine a.k.a. turb1 in ALM_N_H_ParTurb\n if pickleName == 'turb1':\n # Origin\n boxO = (914.464, 1380.179 - 2.5, 0)\n # For southern turbine a.k.a. turb0 in ALM_N_H_ParTurb\n elif pickleName == 'turb0':\n boxO = (1103.464, 1052.821 - 2.5, 0)\n\n boxL, boxW, boxH = 6*126, 63 + 2.5*2, 216\n elif caseName == 'ALM_N_H':\n boxO= (1008.964, 1216.5 - 2.5, 0)\n boxL, boxW, boxH = 1638, 63 + 2.5*2, 216\n\n # Confine to domain of interest\n ccx, ccy, ccz, data, box, flags = case.confineFieldDomain_Rotated(ccx, ccy, ccz, data, boxL = 6*126, boxW = 63 + 2.5*2, boxH = 216, boxO = boxO, boxRot = boxRot)\n\n # # Visualize the confine box\n # fig = plt.figure()\n # ax = fig.add_subplot(111)\n # patch = patches.PathPatch(box, facecolor='orange', lw=0)\n # ax.add_patch(patch)\n # ax.set_xlim(0,3000)\n # ax.set_ylim(0,3000)\n # plt.show()\n\n # Process anisotropy tensors\n t0 = t.time()\n data, tensors, eigVals3D, eigVecs4D = ppat.processAnisotropyTensor_Uninterpolated(data)\n t1 = t.time()\n ticToc = t1 - t0\n\n print('\\nDumping results')\n pickle.dump(ccx, open(case.resultPath[time] + pickleName + '_ccx.p', 'wb'))\n pickle.dump(ccy, open(case.resultPath[time] + pickleName + '_ccy.p', 'wb'))\n pickle.dump(ccz, open(case.resultPath[time] + pickleName + '_ccz.p', 'wb'))\n pickle.dump(data, open(case.resultPath[time] + pickleName + '_' + fields + '.p', 'wb'))\n pickle.dump(tensors, open(case.resultPath[time] + pickleName + '_' + fields + '_tensors.p', 'wb'))\n pickle.dump(eigVals3D, open(case.resultPath[time] + pickleName + '_' + fields + '_eigVals.p', 'wb'))\n pickle.dump(eigVecs4D, open(case.resultPath[time] + pickleName + '_' + fields + '_eigVecs.p', 'wb'))\n\n # # Not used, interpolation\n # x3D, y3D, z3D, data = case.interpolateFieldData_RBF(ccx, ccy, ccz, data, precisionX = precisionX, precisionY = precisionY, precisionZ = precisionZ, function = interpMethod)\n\n # If existing pickle results found for pickleName, load them\n else:\n print('\\nExisting pickle results found for ' + pickleName + ', loading them...')\n ccx = pickle.load(open(case.resultPath[time] + pickleName + '_ccx.p', 'rb'), encoding = 'latin1')\n ccy = pickle.load(open(case.resultPath[time] + pickleName + '_ccy.p', 'rb'), encoding = 'latin1')\n ccz = pickle.load(open(case.resultPath[time] + pickleName + '_ccz.p', 'rb'), encoding = 'latin1')\n eigVals3D = pickle.load(open(case.resultPath[time] + pickleName + '_' + fields + '_eigVals.p', 'rb'), encoding = 'latin1')\n eigVecs4D = pickle.load(open(case.resultPath[time] + pickleName + '_' + fields + '_eigVecs.p', 'rb'), encoding = 'latin1')\n\n\n \"\"\"\n Mayavi Quiver Visualization\n \"\"\"\n # Start engine, don't know why\n engine = Engine()\n engine.start()\n axes = Axes()\n mlab.figure(pickleName + '_quivers', engine = engine, size = (1000, 800), bgcolor = (1, 1, 1), fgcolor = (0.5, 0.5, 0.5))\n quiver = mlab.quiver3d(ccx, ccy, ccz, eigVecs4D[:, :, 0, 0].ravel(), eigVecs4D[:, :, 0, 1].ravel(), eigVecs4D[:, :, 0, 2].ravel(), scalars = eigVals3D[:, :, 0].ravel(), mask_points = 150, scale_mode = 'scalar', colormap = 'plasma', opacity = 1)\n # mlab.outline()\n # Added axis\n engine.add_filter(axes, quiver)\n quiver.glyph.color_mode = 'color_by_scalar'\n quiver.glyph.glyph_source.glyph_source.glyph_type = 'dash'\n scene = engine.scenes[0]\n scene.scene.jpeg_quality = 100\n scene.scene.anti_aliasing_frames = 20\n # Axis related settings\n axes.axes.x_label = 'x [m]'\n axes.axes.y_label = 'y [m]'\n axes.axes.z_label = 'z [m]'\n axes.title_text_property.bold, axes.label_text_property.bold = False, False\n axes.label_text_property.italic = False\n axes.title_text_property.font_family = 'times'\n # Axis texts scales to fit in the viewport?\n axes.axes.scaling = False\n # Text color\n axes.title_text_property.color, axes.label_text_property.color = (0, 0, 0), (89/255., 89/255., 89/255.)\n # Text size\n axes.title_text_property.font_size, axes.label_text_property.font_size = 14, 12\n axes.axes.font_factor = 1.0\n # Prevent corner axis label clash\n axes.axes.corner_offset = 0.05\n figW = 3.39\n figH = figW*(np.sqrt(5) - 1.0)/2.0\n if figView is 'iso':\n mlab.view(azimuth = 260, elevation = 60)\n # Move the figure left 20 pixels?\n mlab.move(right = -20)\n elif figView is 'front':\n mlab.view(azimuth = 210, elevation = 90)\n mlab.move(forward = 500)\n elif figView is 'left':\n mlab.view(azimuth = 120, elevation = 90)\n mlab.move(up = 0, right = -40)\n elif figView is 'top':\n mlab.view(azimuth = 0, elevation = 0)\n mlab.move(right = -20)\n\n # [BUG] Magnification doesn't work on axis\n mlab.savefig(case.resultPath[time] + pickleName + '_quiver_' + figView + '.png', size = (figW, figH))\n # mlab.show()\n\n\n\n" ]
[ [ "numpy.sqrt" ] ]
oleksyoleksy/youtube
[ "e8ce5f73c31966e18197c40aa4dc1a0a1c056d53" ]
[ "thumbnail-rater/get_data2.py" ]
[ "import numpy as np\nimport os\nfrom matplotlib import pyplot as plt\nimport cv2\nimport random\nimport pickle\n\n\nfile_list = []\nclass_list = []\n\nDATADIR = \"data2\"\n\n# All the categories you want your neural network to detect\nCATEGORIES = [\"bad\", \"good\"]\n\n# The size of the images that your neural network will use\nIMG_SIZE_W = 212\nIMG_SIZE_H = 120\n\n# Checking or all images in the data folder\nfor category in CATEGORIES :\n\tpath = os.path.join(DATADIR, category)\n\tfor img in os.listdir(path):\n\t\timg_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n\ntraining_data = []\n\ndef create_training_data():\n\tfor category in CATEGORIES :\n\t\tpath = os.path.join(DATADIR, category)\n\t\tclass_num = CATEGORIES.index(category)\n\t\timages = os.listdir(path)\n\t\timages = images[:355]\n\t\tfor img in images:\n\t\t\ttry :\n\t\t\t\timg_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n\t\t\t\tnew_array = cv2.resize(img_array, (IMG_SIZE_H, IMG_SIZE_W))\n\t\t\t\ttraining_data.append([new_array, class_num])\n\t\t\texcept Exception as e:\n\t\t\t\tpass\n\t\t\n\ncreate_training_data()\n\nrandom.shuffle(training_data)\n\nX = [] #features\ny = [] #labels\n\nfor features, label in training_data:\n\tX.append(features)\n\ty.append(label)\n\nprint(y)\n\n# plt.clf()\n# plt.imshow(X[0])\n# plt.show()\nX = np.array(X).reshape(-1, IMG_SIZE_H, IMG_SIZE_W, 3)\ny = np.array(y)\n\n# Creating the files containing all the information about your model\npickle_out = open(\"X.pickle\", \"wb\")\npickle.dump(X, pickle_out)\npickle_out.close()\n\npickle_out = open(\"y.pickle\", \"wb\")\npickle.dump(y, pickle_out)\npickle_out.close()\n\npickle_in = open(\"X.pickle\", \"rb\")\nX = pickle.load(pickle_in)" ]
[ [ "numpy.array" ] ]
shiyegao/mmcv
[ "c567e04d9d8e87d4b9abc3405e06a218d2011b08" ]
[ "mmcv/runner/epoch_based_runner.py" ]
[ "# Copyright (c) Open-MMLab. All rights reserved.\nimport os.path as osp\nimport platform\nimport shutil\nimport time\nimport warnings\n\nimport torch\n\nimport mmcv\nfrom .base_runner import BaseRunner\nfrom .builder import RUNNERS\nfrom .checkpoint import save_checkpoint\nfrom .utils import get_host_info\n\n\[email protected]_module()\nclass EpochBasedRunner(BaseRunner):\n \"\"\"Epoch-based Runner.\n\n This runner train models epoch by epoch.\n \"\"\"\n\n def run_iter(self, data_batch, train_mode, **kwargs):\n if self.batch_processor is not None:\n outputs = self.batch_processor(\n self.model, data_batch, train_mode=train_mode, **kwargs)\n elif train_mode:\n outputs = self.model.train_step(data_batch, self.optimizer,\n **kwargs)\n else:\n outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)\n if not isinstance(outputs, dict):\n raise TypeError('\"batch_processor()\" or \"model.train_step()\"'\n 'and \"model.val_step()\" must return a dict')\n if 'log_vars' in outputs:\n self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])\n self.outputs = outputs\n\n def train(self, data_loader, **kwargs):\n self.model.train()\n self.mode = 'train'\n self.data_loader = data_loader\n self._max_iters = self._max_epochs * len(self.data_loader)\n self.call_hook('before_train_epoch')\n time.sleep(2) # Prevent possible deadlock during epoch transition\n for i, data_batch in enumerate(self.data_loader):\n self._inner_iter = i\n self.data_batch = data_batch\n self.runner_kwargs = kwargs\n self.train_mode = True\n self.call_hook('before_train_iter')\n self.run_iter(data_batch, train_mode=self.train_mode, **kwargs)\n self.call_hook('after_train_iter')\n self._iter += 1\n\n self.call_hook('after_train_epoch')\n self._epoch += 1\n\n @torch.no_grad()\n def val(self, data_loader, **kwargs):\n self.model.eval()\n self.mode = 'val'\n self.data_loader = data_loader\n self.call_hook('before_val_epoch')\n time.sleep(2) # Prevent possible deadlock during epoch transition\n for i, data_batch in enumerate(self.data_loader):\n self._inner_iter = i\n self.data_batch = data_batch\n self.call_hook('before_val_iter')\n self.run_iter(data_batch, train_mode=False)\n self.call_hook('after_val_iter')\n\n self.call_hook('after_val_epoch')\n\n def run(self, data_loaders, workflow, max_epochs=None, **kwargs):\n \"\"\"Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, epochs) to specify the\n running order and epochs. E.g, [('train', 2), ('val', 1)] means\n running 2 epochs for training and 1 epoch for validation,\n iteratively.\n \"\"\"\n assert isinstance(data_loaders, list)\n assert mmcv.is_list_of(workflow, tuple)\n assert len(data_loaders) == len(workflow)\n if max_epochs is not None:\n warnings.warn(\n 'setting max_epochs in run is deprecated, '\n 'please set max_epochs in runner_config', DeprecationWarning)\n self._max_epochs = max_epochs\n\n assert self._max_epochs is not None, (\n 'max_epochs must be specified during instantiation')\n\n for i, flow in enumerate(workflow):\n mode, epochs = flow\n if mode == 'train':\n self._max_iters = self._max_epochs * len(data_loaders[i])\n break\n\n work_dir = self.work_dir if self.work_dir is not None else 'NONE'\n self.logger.info('Start running, host: %s, work_dir: %s',\n get_host_info(), work_dir)\n self.logger.info('Hooks will be executed in the following order:\\n%s',\n self.get_hook_info())\n self.logger.info('workflow: %s, max: %d epochs', workflow,\n self._max_epochs)\n self.call_hook('before_run')\n\n while self.epoch < self._max_epochs:\n for i, flow in enumerate(workflow):\n mode, epochs = flow\n if isinstance(mode, str): # self.train()\n if not hasattr(self, mode):\n raise ValueError(\n f'runner has no method named \"{mode}\" to run an '\n 'epoch')\n epoch_runner = getattr(self, mode)\n else:\n raise TypeError(\n 'mode in workflow must be a str, but got {}'.format(\n type(mode)))\n\n for _ in range(epochs):\n if mode == 'train' and self.epoch >= self._max_epochs:\n break\n epoch_runner(data_loaders[i], **kwargs)\n\n time.sleep(1) # wait for some hooks like loggers to finish\n self.call_hook('after_run')\n\n def save_checkpoint(self,\n out_dir,\n filename_tmpl='epoch_{}.pth',\n save_optimizer=True,\n meta=None,\n create_symlink=True):\n \"\"\"Save the checkpoint.\n\n Args:\n out_dir (str): The directory that checkpoints are saved.\n filename_tmpl (str, optional): The checkpoint filename template,\n which contains a placeholder for the epoch number.\n Defaults to 'epoch_{}.pth'.\n save_optimizer (bool, optional): Whether to save the optimizer to\n the checkpoint. Defaults to True.\n meta (dict, optional): The meta information to be saved in the\n checkpoint. Defaults to None.\n create_symlink (bool, optional): Whether to create a symlink\n \"latest.pth\" to point to the latest checkpoint.\n Defaults to True.\n \"\"\"\n if meta is None:\n meta = {}\n elif not isinstance(meta, dict):\n raise TypeError(\n f'meta should be a dict or None, but got {type(meta)}')\n if self.meta is not None:\n meta.update(self.meta)\n # Note: meta.update(self.meta) should be done before\n # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise\n # there will be problems with resumed checkpoints.\n # More details in https://github.com/open-mmlab/mmcv/pull/1108\n meta.update(epoch=self.epoch + 1, iter=self.iter)\n\n filename = filename_tmpl.format(self.epoch + 1)\n filepath = osp.join(out_dir, filename)\n optimizer = self.optimizer if save_optimizer else None\n save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)\n # in some environments, `os.symlink` is not supported, you may need to\n # set `create_symlink` to False\n if create_symlink:\n dst_file = osp.join(out_dir, 'latest.pth')\n if platform.system() != 'Windows':\n mmcv.symlink(filename, dst_file)\n else:\n shutil.copy(filepath, dst_file)\n\n\[email protected]_module()\nclass Runner(EpochBasedRunner):\n \"\"\"Deprecated name of EpochBasedRunner.\"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(\n 'Runner was deprecated, please use EpochBasedRunner instead')\n super().__init__(*args, **kwargs)\n" ]
[ [ "torch.no_grad" ] ]
neulab/guided_summarization
[ "ea4bbe91f189cdb51f7f6a827210f9adc5319b3c" ]
[ "bert/models/z_trainer.py" ]
[ "import os\n\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\n\nimport distributed\nfrom models.reporter import ReportMgr, Statistics\nfrom others.logging import logger\nfrom others.utils import test_rouge, rouge_results_to_str\n\n\ndef _tally_parameters(model):\n n_params = sum([p.nelement() for p in model.parameters()])\n return n_params\n\n\ndef build_trainer(args, device_id, model, optims,loss):\n \"\"\"\n Simplify `Trainer` creation based on user `opt`s*\n Args:\n opt (:obj:`Namespace`): user options (usually from argument parsing)\n model (:obj:`onmt.models.NMTModel`): the model to train\n fields (dict): dict of fields\n optim (:obj:`onmt.utils.Optimizer`): optimizer used during training\n data_type (str): string describing the type of data\n e.g. \"text\", \"img\", \"audio\"\n model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object\n used to save the model\n \"\"\"\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n\n\n grad_accum_count = args.accum_count\n n_gpu = args.world_size\n\n if device_id >= 0:\n gpu_rank = int(args.gpu_ranks[device_id])\n else:\n gpu_rank = 0\n n_gpu = 0\n\n print('gpu_rank %d' % gpu_rank)\n\n report_manager = None\n if not args.debug:\n tensorboard_log_dir = args.model_path\n\n writer = SummaryWriter(tensorboard_log_dir, comment=\"Unmt\")\n\n report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer)\n\n\n trainer = Trainer(args, model, optims, loss, grad_accum_count, n_gpu, gpu_rank, report_manager)\n\n if (model):\n n_params = _tally_parameters(model)\n logger.info('* number of parameters: %d' % n_params)\n\n return trainer\n\n\nclass Trainer(object):\n \"\"\"\n Class that controls the training process.\n\n Args:\n model(:py:class:`onmt.models.model.NMTModel`): translation model\n to train\n train_loss(:obj:`onmt.utils.loss.LossComputeBase`):\n training loss computation\n valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):\n training loss computation\n optim(:obj:`onmt.utils.optimizers.Optimizer`):\n the optimizer responsible for update\n trunc_size(int): length of truncated back propagation through time\n shard_size(int): compute loss in shards of this size for efficiency\n data_type(string): type of the source input: [text|img|audio]\n norm_method(string): normalization methods: [sents|tokens]\n grad_accum_count(int): accumulate gradients this many times.\n report_manager(:obj:`onmt.utils.ReportMgrBase`):\n the object that creates reports, or None\n model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is\n used to save a checkpoint.\n Thus nothing will be saved if this parameter is None\n \"\"\"\n\n def __init__(self, args, model, optims, loss,\n grad_accum_count=1, n_gpu=1, gpu_rank=1,\n report_manager=None):\n # Basic attributes.\n self.args = args\n self.save_checkpoint_steps = args.save_checkpoint_steps\n self.model = model\n self.optims = optims\n self.grad_accum_count = grad_accum_count\n self.n_gpu = n_gpu\n self.gpu_rank = gpu_rank\n self.report_manager = report_manager\n\n self.loss = loss\n\n assert grad_accum_count > 0\n # Set model in training mode.\n if (model):\n self.model.train()\n\n def train(self, train_iter_fct, train_steps, valid_iter_fct=None, valid_steps=-1):\n \"\"\"\n The main training loops.\n by iterating over training data (i.e. `train_iter_fct`)\n and running validation (i.e. iterating over `valid_iter_fct`\n\n Args:\n train_iter_fct(function): a function that returns the train\n iterator. e.g. something like\n train_iter_fct = lambda: generator(*args, **kwargs)\n valid_iter_fct(function): same as train_iter_fct, for valid data\n train_steps(int):\n valid_steps(int):\n save_checkpoint_steps(int):\n\n Return:\n None\n \"\"\"\n logger.info('Start training...')\n\n # step = self.optim._step + 1\n step = self.optims[0]._step + 1\n\n true_batchs = []\n accum = 0\n normalization = 0\n train_iter = train_iter_fct()\n\n total_stats = Statistics()\n report_stats = Statistics()\n self._start_report_manager(start_time=total_stats.start_time)\n\n while step <= train_steps:\n\n reduce_counter = 0\n for i, batch in enumerate(train_iter):\n if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):\n\n true_batchs.append(batch)\n num_tokens = batch.tgt[:, 1:].ne(self.loss.padding_idx).sum()\n normalization += num_tokens.item()\n accum += 1\n if accum == self.grad_accum_count:\n reduce_counter += 1\n if self.n_gpu > 1:\n normalization = sum(distributed\n .all_gather_list\n (normalization))\n\n self._gradient_accumulation(\n true_batchs, normalization, total_stats,\n report_stats)\n\n report_stats = self._maybe_report_training(\n step, train_steps,\n self.optims[0].learning_rate,\n report_stats)\n\n true_batchs = []\n accum = 0\n normalization = 0\n if (step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0):\n self._save(step)\n\n step += 1\n if step > train_steps:\n break\n train_iter = train_iter_fct()\n\n return total_stats\n\n def validate(self, valid_iter, step=0):\n \"\"\" Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics\n \"\"\"\n # Set model in validating mode.\n self.model.eval()\n stats = Statistics()\n\n with torch.no_grad():\n for batch in valid_iter:\n src = batch.src\n tgt = batch.tgt\n segs = batch.segs\n clss = batch.clss\n mask_src = batch.mask_src\n mask_tgt = batch.mask_tgt\n mask_cls = batch.mask_cls\n\n z = batch.z\n mask_z = batch.mask_z\n z_segs = batch.z_segs\n\n outputs, _, copy_prob = self.model(src, tgt, segs, clss, mask_src, mask_tgt, mask_cls, z, mask_z, z_segs)\n\n batch_stats = self.loss.monolithic_compute_loss(batch, outputs)\n stats.update(batch_stats)\n self._report_step(0, step, valid_stats=stats)\n return stats\n\n\n def _gradient_accumulation(self, true_batchs, normalization, total_stats,\n report_stats):\n if self.grad_accum_count > 1:\n self.model.zero_grad()\n\n for batch in true_batchs:\n if self.grad_accum_count == 1:\n self.model.zero_grad()\n\n src = batch.src\n tgt = batch.tgt\n segs = batch.segs\n clss = batch.clss\n mask_src = batch.mask_src\n mask_tgt = batch.mask_tgt\n mask_cls = batch.mask_cls\n z = batch.z\n mask_z = batch.mask_z\n z_segs = batch.z_segs\n\n outputs, scores, copy_prob = self.model(src, tgt,segs, clss, mask_src, mask_tgt, mask_cls, z, mask_z, z_segs)\n batch_stats = self.loss.sharded_compute_loss(batch, outputs, self.args.generator_shard_size, normalization)\n\n batch_stats.n_docs = int(src.size(0))\n\n total_stats.update(batch_stats)\n report_stats.update(batch_stats)\n\n # 4. Update the parameters and statistics.\n if self.grad_accum_count == 1:\n # Multi GPU gradient gather\n if self.n_gpu > 1:\n grads = [p.grad.data for p in self.model.parameters()\n if p.requires_grad\n and p.grad is not None]\n distributed.all_reduce_and_rescale_tensors(\n grads, float(1))\n\n for o in self.optims:\n o.step()\n\n # in case of multi step gradient accumulation,\n # update only after accum batches\n if self.grad_accum_count > 1:\n if self.n_gpu > 1:\n grads = [p.grad.data for p in self.model.parameters()\n if p.requires_grad\n and p.grad is not None]\n distributed.all_reduce_and_rescale_tensors(\n grads, float(1))\n for o in self.optims:\n o.step()\n\n\n def test(self, test_iter, step, cal_lead=False, cal_oracle=False):\n \"\"\" Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics\n \"\"\"\n # Set model in validating mode.\n def _get_ngrams(n, text):\n ngram_set = set()\n text_length = len(text)\n max_index_ngram_start = text_length - n\n for i in range(max_index_ngram_start + 1):\n ngram_set.add(tuple(text[i:i + n]))\n return ngram_set\n\n def _block_tri(c, p):\n tri_c = _get_ngrams(3, c.split())\n for s in p:\n tri_s = _get_ngrams(3, s.split())\n if len(tri_c.intersection(tri_s))>0:\n return True\n return False\n\n if (not cal_lead and not cal_oracle):\n self.model.eval()\n stats = Statistics()\n\n can_path = '%s_step%d.candidate'%(self.args.result_path,step)\n gold_path = '%s_step%d.gold' % (self.args.result_path, step)\n with open(can_path, 'w') as save_pred:\n with open(gold_path, 'w') as save_gold:\n with torch.no_grad():\n for batch in test_iter:\n gold = []\n pred = []\n if (cal_lead):\n selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size\n for i, idx in enumerate(selected_ids):\n _pred = []\n if(len(batch.src_str[i])==0):\n continue\n for j in selected_ids[i][:len(batch.src_str[i])]:\n if(j>=len( batch.src_str[i])):\n continue\n candidate = batch.src_str[i][j].strip()\n _pred.append(candidate)\n\n if ((not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3):\n break\n\n _pred = '<q>'.join(_pred)\n if(self.args.recall_eval):\n _pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())])\n\n pred.append(_pred)\n gold.append(batch.tgt_str[i])\n\n for i in range(len(gold)):\n save_gold.write(gold[i].strip()+'\\n')\n for i in range(len(pred)):\n save_pred.write(pred[i].strip()+'\\n')\n if(step!=-1 and self.args.report_rouge):\n rouges = test_rouge(self.args.temp_dir, can_path, gold_path)\n logger.info('Rouges at step %d \\n%s' % (step, rouge_results_to_str(rouges)))\n self._report_step(0, step, valid_stats=stats)\n\n return stats\n\n def _save(self, step):\n real_model = self.model\n # real_generator = (self.generator.module\n # if isinstance(self.generator, torch.nn.DataParallel)\n # else self.generator)\n\n model_state_dict = real_model.state_dict()\n # generator_state_dict = real_generator.state_dict()\n checkpoint = {\n 'model': model_state_dict,\n # 'generator': generator_state_dict,\n 'opt': self.args,\n 'optims': self.optims,\n }\n if not self.args.debug:\n checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step)\n logger.info(\"Saving checkpoint %s\" % checkpoint_path)\n # checkpoint_path = '%s_step_%d.pt' % (FLAGS.model_path, step)\n if (not os.path.exists(checkpoint_path)):\n torch.save(checkpoint, checkpoint_path)\n return checkpoint, checkpoint_path\n\n def _start_report_manager(self, start_time=None):\n \"\"\"\n Simple function to start report manager (if any)\n \"\"\"\n if self.report_manager is not None:\n if start_time is None:\n self.report_manager.start()\n else:\n self.report_manager.start_time = start_time\n\n def _maybe_gather_stats(self, stat):\n \"\"\"\n Gather statistics in multi-processes cases\n\n Args:\n stat(:obj:onmt.utils.Statistics): a Statistics object to gather\n or None (it returns None in this case)\n\n Returns:\n stat: the updated (or unchanged) stat object\n \"\"\"\n if stat is not None and self.n_gpu > 1:\n return Statistics.all_gather_stats(stat)\n return stat\n\n def _maybe_report_training(self, step, num_steps, learning_rate,\n report_stats):\n \"\"\"\n Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_training(\n step, num_steps, learning_rate, report_stats,\n multigpu=self.n_gpu > 1)\n\n def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None):\n \"\"\"\n Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats)\n\n def _maybe_save(self, step):\n \"\"\"\n Save the model if a model saver is set\n \"\"\"\n if self.model_saver is not None:\n self.model_saver.maybe_save(step)\n" ]
[ [ "torch.no_grad", "torch.save" ] ]
anoidgit/zero
[ "e764ce09a4c7737c6399ac2deaf104bc211ec39e" ]
[ "rnns/cell.py" ]
[ "# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport tensorflow as tf\nfrom func import linear\nfrom utils import dtype\n\n\n# This is an abstract class that deals with\n# recurrent cells, e.g. GRU, LSTM, ATR\nclass Cell(object):\n def __init__(self,\n d, # hidden state dimension\n ln=False, # whether use layer normalization\n scope=None, # the name scope for this cell\n ):\n self.d = d\n self.scope = scope\n self.ln = ln\n\n def _get_init_state(self, d, shape=None, x=None, scope=None):\n # gen init state vector\n # if no evidence x is provided, use zero initialization\n if x is None:\n assert shape is not None, \"you should provide shape\"\n if not isinstance(shape, (tuple, list)):\n shape = [shape]\n shape = shape + [d]\n return dtype.tf_to_float(tf.zeros(shape))\n else:\n return linear(\n x, d, bias=True, ln=self.ln,\n scope=\"{}_init\".format(scope or self.scope)\n )\n\n def get_hidden(self, x):\n return x\n\n @abc.abstractmethod\n def get_init_state(self, shape=None, x=None, scope=None):\n raise NotImplementedError(\"Not Supported\")\n\n @abc.abstractmethod\n def __call__(self, h_, x):\n raise NotImplementedError(\"Not Supported\")\n\n @abc.abstractmethod\n def fetch_states(self, x):\n raise NotImplementedError(\"Not Supported\")\n" ]
[ [ "tensorflow.zeros" ] ]
tungedng2710/ArcFace_pytorch
[ "fa7d7f42b07af61ad595ef87a687a79e0690f771" ]
[ "utils/losses.py" ]
[ "import torch\nimport torch.nn as nn\n\n# DEVICE=torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef l2_norm(input, axis = 1):\n norm = torch.norm(input, 2, axis, True)\n output = torch.div(input, norm)\n\n return output\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=0, eps=1e-7):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.eps = eps\n self.ce = torch.nn.CrossEntropyLoss()\n\n def forward(self, input, target):\n logp = self.ce(input, target)\n p = torch.exp(-logp)\n loss = (1 - p) ** self.gamma * logp\n return loss.mean()\n\nclass ArcFaceLoss(nn.Module):\n def __init__(self, s=30.0, m=0.50, is_cuda=True, base_loss = 'CrossEntropyLoss'):\n super(ArcFaceLoss, self).__init__()\n self.s = s\n self.m = m\n if base_loss == 'FocalLoss':\n self.criterion = FocalLoss()\n else:\n self.criterion = nn.CrossEntropyLoss()\n if is_cuda:\n self.criterion = self.criterion.cuda()\n\n def forward(self, input, label):\n theta = torch.acos(torch.clamp(input, -1.0 + 1e-7, 1.0 - 1e-7))\n target_logits = torch.cos(theta + self.m) \n one_hot = torch.zeros_like(input)\n one_hot.scatter_(1, label.view(-1, 1).long(), 1)\n output = input * (1 - one_hot) + target_logits * one_hot\n output = output * self.s\n return self.criterion(output, label)\n\nclass MLLoss(nn.Module):\n def __init__(self, s=64.0):\n super(MLLoss, self).__init__()\n self.s = s\n def forward(self, embbedings, label):\n embbedings = l2_norm(embbedings, axis=1)\n kernel_norm = l2_norm(self.kernel, axis=0)\n cos_theta = torch.mm(embbedings, kernel_norm)\n cos_theta = cos_theta.clamp(-1, 1) # for numerical stability\n cos_theta.mul_(self.s)\n return cos_theta\n\nclass ElasticArcFaceLoss(nn.Module):\n def __init__(self, in_features=512, out_features=1000, s=30.0, m=0.50,std=0.0125, is_cuda=True):\n super(ElasticArcFaceLoss, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.s = s\n self.m = m\n self.std=std\n self.criterion = torch.nn.CrossEntropyLoss()\n if is_cuda:\n self.criterion = self.criterion.cuda()\n\n def forward(self, input, label):\n cos_theta = input.clamp(-1.0 + 1e-7, 1.0 - 1e-7) # for numerical stability\n index = torch.where(label != -1)[0]\n m_hot = torch.zeros(index.size()[0], cos_theta.size()[1], device=cos_theta.device)\n margin = torch.normal(mean=self.m, std=self.std, size=label[index, None].size(), device=cos_theta.device) # Fast converge .clamp(self.m-self.std, self.m+self.std)\n m_hot.scatter_(1, label[index, None], margin)\n cos_theta.acos_()\n cos_theta[index] += m_hot\n cos_theta.cos_().mul_(self.s)\n return self.criterion(cos_theta, label) \n\ndef get_loss(name: str = 'ArcFace'):\n if name == 'ArcFace':\n loss_function = ArcFaceLoss(base_loss='FocalLoss')\n elif name == 'ElasticArcFace':\n loss_function = ElasticArcFaceLoss()\n elif name == 'FocalLoss':\n loss_function = FocalLoss()\n else:\n loss_function = nn.CrossEntropyLoss()\n return loss_function" ]
[ [ "torch.div", "torch.nn.CrossEntropyLoss", "torch.norm", "torch.mm", "torch.zeros_like", "torch.exp", "torch.where", "torch.clamp", "torch.cos" ] ]
Srivathsav-max/armnn-clone
[ "e571cde8411803aec545b1070ed677e481f46f3f" ]
[ "python/pyarmnn/examples/speech_recognition/wav2letter_mfcc.py" ]
[ "# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.\n# SPDX-License-Identifier: MIT\n\nimport numpy as np\nimport os\nimport sys\n\nscript_dir = os.path.dirname(__file__)\nsys.path.insert(1, os.path.join(script_dir, '..', 'common'))\n\nfrom mfcc import MFCC, AudioPreprocessor\n\n\nclass Wav2LetterMFCC(MFCC):\n \"\"\"Extends base MFCC class to provide Wav2Letter-specific MFCC requirements.\"\"\"\n\n def __init__(self, mfcc_params):\n super().__init__(mfcc_params)\n\n def spectrum_calc(self, audio_data):\n return np.abs(np.fft.rfft(np.hanning(self.mfcc_params.frame_len + 1)[0:self.mfcc_params.frame_len] * audio_data,\n self.mfcc_params.n_fft)) ** 2\n\n def log_mel(self, mel_energy):\n mel_energy += 1e-10\n log_mel_energy = 10.0 * np.log10(mel_energy)\n top_db = 80.0\n return np.maximum(log_mel_energy, log_mel_energy.max() - top_db)\n\n def create_dct_matrix(self, num_fbank_bins, num_mfcc_feats):\n \"\"\"\n Creates the Discrete Cosine Transform matrix to be used in the compute function.\n\n Args:\n num_fbank_bins: The number of filter bank bins\n num_mfcc_feats: the number of MFCC features\n\n Returns:\n the DCT matrix\n \"\"\"\n dct_m = np.zeros(num_fbank_bins * num_mfcc_feats)\n for k in range(num_mfcc_feats):\n for n in range(num_fbank_bins):\n if k == 0:\n dct_m[(k * num_fbank_bins) + n] = 2 * np.sqrt(1 / (4 * num_fbank_bins)) * np.cos(\n (np.pi / num_fbank_bins) * (n + 0.5) * k)\n else:\n dct_m[(k * num_fbank_bins) + n] = 2 * np.sqrt(1 / (2 * num_fbank_bins)) * np.cos(\n (np.pi / num_fbank_bins) * (n + 0.5) * k)\n\n dct_m = np.reshape(dct_m, [self.mfcc_params.num_mfcc_feats, self.mfcc_params.num_fbank_bins])\n return dct_m\n\n def mel_norm(self, weight, right_mel, left_mel):\n \"\"\"Over-riding parent class with ASR specific weight normalisation.\"\"\"\n enorm = 2.0 / (self.inv_mel_scale(right_mel, False) - self.inv_mel_scale(left_mel, False))\n return weight * enorm\n\n\nclass W2LAudioPreprocessor(AudioPreprocessor):\n\n def __init__(self, mfcc, model_input_size, stride):\n self.model_input_size = model_input_size\n self.stride = stride\n\n super().__init__(self, model_input_size, stride)\n # Savitzky - Golay differential filters\n self.savgol_order1_coeffs = np.array([6.66666667e-02, 5.00000000e-02, 3.33333333e-02,\n 1.66666667e-02, -3.46944695e-18, -1.66666667e-02,\n -3.33333333e-02, -5.00000000e-02, -6.66666667e-02])\n\n self.savgol_order2_coeffs = np.array([0.06060606, 0.01515152, -0.01731602,\n -0.03679654, -0.04329004, -0.03679654,\n -0.01731602, 0.01515152, 0.06060606])\n self._mfcc_calc = mfcc\n\n def mfcc_delta_calc(self, features):\n \"\"\"Over-riding parent class with ASR specific MFCC derivative features\"\"\"\n mfcc_delta_np = np.zeros_like(features)\n mfcc_delta2_np = np.zeros_like(features)\n\n for i in range(features.shape[1]):\n idelta = np.convolve(features[:, i], self.savgol_order1_coeffs, 'same')\n mfcc_delta_np[:, i] = idelta\n ideltadelta = np.convolve(features[:, i], self.savgol_order2_coeffs, 'same')\n mfcc_delta2_np[:, i] = ideltadelta\n\n features = np.concatenate((self._normalize(features), self._normalize(mfcc_delta_np),\n self._normalize(mfcc_delta2_np)), axis=1)\n\n return features\n" ]
[ [ "numpy.convolve", "numpy.sqrt", "numpy.reshape", "numpy.cos", "numpy.log10", "numpy.zeros_like", "numpy.hanning", "numpy.array", "numpy.zeros" ] ]
96-Zachary/PGCD-for-ABSA
[ "3d02cc2fc987827d279e61d6af3a06e7bfe01caa" ]
[ "models/lstm.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom layers.dynamic_rnn import DynamicLSTM\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\n\n\nclass LSTM(nn.Module):\n def __init__(self, embedding_matrix, opt):\n super(LSTM, self).__init__()\n self.embed_dim = embedding_matrix.shape[-1]\n self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))\n self.lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True)\n self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)\n\n def forward(self, inputs):\n text_raw_indices, aspect_indices = inputs[0], inputs[1]\n x = self.embed(text_raw_indices)\n x_len = torch.sum(text_raw_indices != 0, dim=-1)\n\n _, (h_n, _) = self.lstm(x, x_len)\n\n out = self.dense(h_n[0])\n\n return out\n\n" ]
[ [ "torch.nn.Linear", "torch.sum", "torch.tensor" ] ]
Embodimentgeniuslm3/datarobot-user-models
[ "9453482896c6cc27468d829ad037a6f164d3c5a3" ]
[ "tests/drum/test_fit.py" ]
[ "import os\nimport shutil\nfrom tempfile import NamedTemporaryFile\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom datarobot_drum.drum.common import ArgumentsOptions\nfrom datarobot_drum.drum.utils import handle_missing_colnames, unset_drum_supported_env_vars\nfrom datarobot_drum.resource.utils import (\n _cmd_add_class_labels,\n _create_custom_model_dir,\n _exec_shell_cmd,\n)\nfrom .constants import (\n ANOMALY,\n BINARY,\n BINARY_BOOL,\n BINARY_SPACES,\n BINARY_TEXT,\n DOCKER_PYTHON_SKLEARN,\n KERAS,\n MULTICLASS,\n MULTICLASS_BINARY,\n MULTICLASS_NUM_LABELS,\n PYTHON,\n PYTORCH,\n PYTORCH_MULTICLASS,\n R_FIT,\n RDS,\n REGRESSION,\n SIMPLE,\n SKLEARN,\n SKLEARN_ANOMALY,\n SKLEARN_BINARY,\n SKLEARN_MULTICLASS,\n SKLEARN_PRED_CONSISTENCY,\n SKLEARN_REGRESSION,\n SKLEARN_SPARSE,\n SKLEARN_TRANSFORM,\n SKLEARN_TRANSFORM_NO_HOOK,\n SKLEARN_TRANSFORM_NON_NUMERIC,\n SKLEARN_TRANSFORM_SPARSE_IN_OUT,\n SKLEARN_TRANSFORM_SPARSE_INPUT,\n SKLEARN_TRANSFORM_WITH_Y,\n SPARSE,\n SPARSE_COLUMNS,\n SPARSE_TARGET,\n TARGET_NAME_DUPLICATED_X,\n TARGET_NAME_DUPLICATED_Y,\n TESTS_ROOT_PATH,\n TRANSFORM,\n WEIGHTS_ARGS,\n WEIGHTS_CSV,\n XGB,\n SKLEARN_BINARY_PARAMETERS,\n SKLEARN_BINARY_HYPERPARAMETERS,\n SKLEARN_TRANSFORM_HYPERPARAMETERS,\n SKLEARN_TRANSFORM_PARAMETERS,\n RDS_HYPERPARAMETERS,\n RDS_PARAMETERS,\n SKLEARN_BINARY_SCHEMA_VALIDATION,\n)\n\n\nclass TestFit:\n @staticmethod\n def _add_weights_cmd(weights, input_csv, r_fit=False):\n df = pd.read_csv(input_csv)\n colname = \"some-colname\"\n weights_data = pd.Series(np.random.randint(1, 3, len(df)))\n __keep_this_around = NamedTemporaryFile(\"w\")\n if weights == WEIGHTS_ARGS:\n df[colname] = weights_data\n if r_fit:\n df = handle_missing_colnames(df)\n df.to_csv(__keep_this_around.name)\n return \" --row-weights \" + colname, __keep_this_around.name, __keep_this_around\n elif weights == WEIGHTS_CSV:\n weights_data.to_csv(__keep_this_around.name)\n return \" --row-weights-csv \" + __keep_this_around.name, input_csv, __keep_this_around\n\n return \"\", input_csv, __keep_this_around\n\n @pytest.mark.parametrize(\"framework\", [XGB, RDS])\n @pytest.mark.parametrize(\"problem\", [REGRESSION])\n @pytest.mark.parametrize(\"docker\", [DOCKER_PYTHON_SKLEARN, None])\n @pytest.mark.parametrize(\"weights\", [None])\n @pytest.mark.parametrize(\"use_output\", [True, False])\n @pytest.mark.parametrize(\"nested\", [True, False])\n def test_fit_for_use_output_and_nested(\n self, resources, framework, problem, docker, weights, use_output, tmp_path, nested,\n ):\n if docker and framework != SKLEARN:\n return\n if framework == RDS:\n language = R_FIT\n else:\n language = PYTHON\n\n custom_model_dir = _create_custom_model_dir(\n resources, tmp_path, framework, problem, language, is_training=True, nested=nested\n )\n\n input_dataset = resources.datasets(framework, problem)\n\n weights_cmd, input_dataset, __keep_this_around = self._add_weights_cmd(\n weights, input_dataset, r_fit=language == R_FIT\n )\n\n output = tmp_path / \"output\"\n output.mkdir()\n\n cmd = \"{} fit --target-type {} --code-dir {} --input {} --verbose \".format(\n ArgumentsOptions.MAIN_COMMAND, problem, custom_model_dir, input_dataset\n )\n if problem != ANOMALY:\n cmd += \" --target {}\".format(resources.targets(problem))\n\n if use_output:\n cmd += \" --output {}\".format(output)\n if problem == BINARY:\n cmd = _cmd_add_class_labels(\n cmd, resources.class_labels(framework, problem), target_type=problem\n )\n if docker:\n cmd += \" --docker {} \".format(docker)\n\n cmd += weights_cmd\n\n _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n @pytest.mark.parametrize(\n \"framework, problem, docker\",\n [\n (RDS, BINARY_BOOL, None),\n (RDS, BINARY_TEXT, None),\n (RDS, REGRESSION, None),\n (RDS, MULTICLASS, None),\n (RDS, MULTICLASS_BINARY, None),\n (SKLEARN_BINARY, BINARY_TEXT, DOCKER_PYTHON_SKLEARN),\n (SKLEARN_REGRESSION, REGRESSION, DOCKER_PYTHON_SKLEARN),\n (SKLEARN_ANOMALY, ANOMALY, DOCKER_PYTHON_SKLEARN),\n (SKLEARN_MULTICLASS, MULTICLASS, DOCKER_PYTHON_SKLEARN),\n (SKLEARN_BINARY, BINARY_TEXT, None),\n (SKLEARN_BINARY, BINARY_SPACES, None),\n (SKLEARN_REGRESSION, REGRESSION, None),\n (SKLEARN_ANOMALY, ANOMALY, None),\n (SKLEARN_MULTICLASS, MULTICLASS, None),\n (SKLEARN_MULTICLASS, MULTICLASS_BINARY, None),\n (SKLEARN_MULTICLASS, MULTICLASS_NUM_LABELS, None),\n (XGB, BINARY_TEXT, None),\n (XGB, REGRESSION, None),\n (XGB, MULTICLASS, None),\n (XGB, MULTICLASS_BINARY, None),\n (KERAS, BINARY_TEXT, None),\n (KERAS, REGRESSION, None),\n (KERAS, MULTICLASS, None),\n (KERAS, MULTICLASS_BINARY, None),\n (PYTORCH, BINARY_TEXT, None),\n (PYTORCH, REGRESSION, None),\n (PYTORCH_MULTICLASS, MULTICLASS, None),\n (PYTORCH_MULTICLASS, MULTICLASS_BINARY, None),\n ],\n )\n @pytest.mark.parametrize(\"weights\", [WEIGHTS_CSV, WEIGHTS_ARGS, None])\n def test_fit(\n self, resources, framework, problem, docker, weights, tmp_path,\n ):\n if framework == RDS:\n language = R_FIT\n else:\n language = PYTHON\n\n custom_model_dir = _create_custom_model_dir(\n resources, tmp_path, framework, problem, language, is_training=True,\n )\n\n input_dataset = resources.datasets(framework, problem)\n\n weights_cmd, input_dataset, __keep_this_around = self._add_weights_cmd(\n weights, input_dataset, r_fit=language == R_FIT\n )\n\n target_type = resources.target_types(problem)\n\n cmd = \"{} fit --target-type {} --code-dir {} --input {} --verbose \".format(\n ArgumentsOptions.MAIN_COMMAND, target_type, custom_model_dir, input_dataset\n )\n if problem != ANOMALY:\n cmd += \" --target {}\".format(resources.targets(problem))\n\n if problem in [BINARY, MULTICLASS]:\n cmd = _cmd_add_class_labels(\n cmd, resources.class_labels(framework, problem), target_type=target_type\n )\n if docker:\n cmd += \" --docker {} \".format(docker)\n\n cmd += weights_cmd\n\n _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n @pytest.mark.parametrize(\n \"framework, problem, docker, parameters\",\n [\n (SKLEARN_BINARY_HYPERPARAMETERS, BINARY_TEXT, None, SKLEARN_BINARY_PARAMETERS),\n (SKLEARN_BINARY_HYPERPARAMETERS, BINARY_SPACES, None, SKLEARN_BINARY_PARAMETERS),\n (SKLEARN_TRANSFORM_HYPERPARAMETERS, REGRESSION, None, SKLEARN_TRANSFORM_PARAMETERS),\n (RDS_HYPERPARAMETERS, BINARY_TEXT, None, RDS_PARAMETERS),\n ],\n )\n @pytest.mark.parametrize(\"weights\", [WEIGHTS_CSV, WEIGHTS_ARGS, None])\n def test_fit_hyperparameters(\n self, resources, framework, problem, docker, parameters, weights, tmp_path,\n ):\n if framework == RDS_HYPERPARAMETERS:\n language = R_FIT\n else:\n language = PYTHON\n\n custom_model_dir = _create_custom_model_dir(\n resources, tmp_path, framework, problem, language, is_training=True,\n )\n\n input_dataset = resources.datasets(framework, problem)\n parameter_file = resources.datasets(framework, parameters)\n\n weights_cmd, input_dataset, __keep_this_around = self._add_weights_cmd(\n weights, input_dataset, r_fit=language == R_FIT\n )\n\n target_type = resources.target_types(problem) if \"transform\" not in framework else TRANSFORM\n\n cmd = \"{} fit --target-type {} --code-dir {} --input {} --parameter-file {} --verbose \".format(\n ArgumentsOptions.MAIN_COMMAND,\n target_type,\n custom_model_dir,\n input_dataset,\n parameter_file,\n )\n if problem != ANOMALY:\n cmd += \" --target {}\".format(resources.targets(problem))\n\n if problem in [BINARY, MULTICLASS]:\n cmd = _cmd_add_class_labels(\n cmd, resources.class_labels(framework, problem), target_type=target_type\n )\n if docker:\n cmd += \" --docker {} \".format(docker)\n\n cmd += weights_cmd\n\n _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n @pytest.mark.parametrize(\n \"framework\",\n [\n SKLEARN_TRANSFORM,\n SKLEARN_TRANSFORM_WITH_Y,\n SKLEARN_TRANSFORM_NO_HOOK,\n SKLEARN_TRANSFORM_NON_NUMERIC,\n ],\n )\n @pytest.mark.parametrize(\"problem\", [REGRESSION, BINARY, ANOMALY])\n @pytest.mark.parametrize(\"weights\", [WEIGHTS_CSV, WEIGHTS_ARGS, None])\n def test_transform_fit(\n self, resources, framework, problem, weights, tmp_path,\n ):\n language = PYTHON\n custom_model_dir = _create_custom_model_dir(\n resources, tmp_path, framework, problem, language=framework,\n )\n\n input_dataset = resources.datasets(framework, problem)\n\n weights_cmd, input_dataset, __keep_this_around = self._add_weights_cmd(\n weights, input_dataset, r_fit=language == R_FIT\n )\n\n target_type = TRANSFORM\n\n cmd = \"{} fit --target-type {} --code-dir {} --input {} --verbose \".format(\n ArgumentsOptions.MAIN_COMMAND, target_type, custom_model_dir, input_dataset\n )\n if problem != ANOMALY:\n cmd += \" --target {}\".format(resources.targets(problem))\n\n if problem in [BINARY, MULTICLASS]:\n cmd = _cmd_add_class_labels(\n cmd, resources.class_labels(framework, problem), target_type=target_type\n )\n\n cmd += weights_cmd\n\n _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n @pytest.mark.parametrize(\n \"framework\", [SKLEARN_TRANSFORM_SPARSE_IN_OUT, SKLEARN_TRANSFORM_SPARSE_INPUT,],\n )\n def test_sparse_transform_fit(\n self, framework, resources, tmp_path,\n ):\n input_dataset = resources.datasets(None, SPARSE)\n target_dataset = resources.datasets(None, SPARSE_TARGET)\n\n custom_model_dir = _create_custom_model_dir(\n resources, tmp_path, framework, REGRESSION, language=framework,\n )\n columns = resources.datasets(framework, SPARSE_COLUMNS)\n\n cmd = \"{} fit --target-type {} --code-dir {} --input {} --verbose --target-csv {} --sparse-column-file {}\".format(\n ArgumentsOptions.MAIN_COMMAND,\n TRANSFORM,\n custom_model_dir,\n input_dataset,\n target_dataset,\n columns,\n )\n\n _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n def _create_fit_input_data_dir(\n self, get_target, get_dataset_filename, input_dir, problem, weights, is_sparse=False\n ):\n input_dir.mkdir(parents=True, exist_ok=True)\n\n # Training data\n if is_sparse:\n X_file = os.path.join(input_dir, \"X.mtx\")\n input_dataset = get_dataset_filename(None, SPARSE)\n shutil.copyfile(input_dataset, X_file)\n else:\n X_file = os.path.join(input_dir, \"X.csv\")\n input_dataset = get_dataset_filename(None, problem)\n with open(X_file, \"w+\") as fp:\n df = pd.read_csv(input_dataset)\n if problem == ANOMALY or is_sparse:\n feature_df = df\n else:\n feature_df = df.loc[:, df.columns != get_target(problem)]\n feature_df.to_csv(fp, index=False)\n\n if problem != ANOMALY:\n # Target data\n target_file = os.path.join(input_dir, \"y.csv\")\n if not is_sparse:\n with open(target_file, \"w+\") as fp:\n target_series = df[get_target(problem)]\n target_series.to_csv(fp, index=False, header=\"Target\")\n if is_sparse:\n shutil.copyfile(get_dataset_filename(None, SPARSE_TARGET), target_file)\n\n if is_sparse:\n columns = get_dataset_filename(None, SPARSE_COLUMNS)\n shutil.copyfile(columns, input_dir / \"X.colnames\")\n\n # Weights data\n if weights:\n df = pd.read_csv(input_dataset)\n weights_data = pd.Series(np.random.randint(1, 3, len(df)))\n with open(os.path.join(input_dir, \"weights.csv\"), \"w+\") as fp:\n weights_data.to_csv(fp)\n\n @pytest.mark.parametrize(\n \"framework, problem, parameters\",\n [\n (SKLEARN_BINARY, BINARY_TEXT, None),\n (SKLEARN_BINARY, BINARY, None),\n (SKLEARN_BINARY_HYPERPARAMETERS, BINARY, SKLEARN_BINARY_PARAMETERS),\n (SKLEARN_ANOMALY, ANOMALY, None),\n (SKLEARN_MULTICLASS, MULTICLASS, None),\n (SKLEARN_SPARSE, REGRESSION, None),\n (XGB, BINARY_TEXT, None),\n (XGB, BINARY, None),\n (XGB, MULTICLASS, None),\n (KERAS, BINARY_TEXT, None),\n (KERAS, BINARY, None),\n (KERAS, MULTICLASS, None),\n ],\n )\n @pytest.mark.parametrize(\"weights\", [WEIGHTS_CSV, None])\n def test_fit_sh(\n self, resources, framework, problem, parameters, weights, tmp_path,\n ):\n custom_model_dir = _create_custom_model_dir(\n resources, tmp_path, framework, problem, PYTHON, is_training=True,\n )\n\n env = os.environ\n fit_sh = os.path.join(\n TESTS_ROOT_PATH,\n \"..\",\n \"public_dropin_environments/{}_{}/fit.sh\".format(\n PYTHON,\n framework\n if framework\n not in [\n SKLEARN_ANOMALY,\n SKLEARN_BINARY,\n SKLEARN_MULTICLASS,\n SKLEARN_SPARSE,\n SKLEARN_BINARY_HYPERPARAMETERS,\n ]\n else SKLEARN,\n ),\n )\n\n input_dir = tmp_path / \"input_dir\"\n self._create_fit_input_data_dir(\n resources.targets,\n resources.datasets,\n input_dir,\n problem,\n weights,\n is_sparse=framework == SKLEARN_SPARSE,\n )\n\n output = tmp_path / \"output\"\n output.mkdir()\n\n unset_drum_supported_env_vars()\n\n env[\"CODEPATH\"] = str(custom_model_dir)\n env[\"INPUT_DIRECTORY\"] = str(input_dir)\n env[\"ARTIFACT_DIRECTORY\"] = str(output)\n env[\"TARGET_TYPE\"] = problem if problem != BINARY_TEXT else BINARY\n if framework == SKLEARN_SPARSE:\n env[\"TRAINING_DATA_EXTENSION\"] = \".mtx\"\n else:\n env[\"TRAINING_DATA_EXTENSION\"] = \".csv\"\n\n if problem in [BINARY, BINARY_TEXT]:\n labels = resources.class_labels(framework, problem)\n env[\"NEGATIVE_CLASS_LABEL\"] = labels[0]\n env[\"POSITIVE_CLASS_LABEL\"] = labels[1]\n elif problem == MULTICLASS:\n labels = resources.class_labels(framework, problem)\n with open(os.path.join(tmp_path, \"class_labels.txt\"), mode=\"w\") as f:\n f.write(\"\\n\".join(labels))\n env[\"CLASS_LABELS_FILE\"] = f.name\n\n if parameters:\n parameter_file = resources.datasets(framework, parameters)\n parameter_input_file = os.path.join(input_dir, \"parameters.json\")\n shutil.copyfile(parameter_file, parameter_input_file)\n\n _exec_shell_cmd(fit_sh, \"Failed cmd {}\".format(fit_sh), env=env)\n\n # clear env vars as it may affect next test cases\n unset_drum_supported_env_vars()\n\n def test_fit_simple(\n self, resources, tmp_path,\n ):\n custom_model_dir = _create_custom_model_dir(\n resources, tmp_path, SIMPLE, REGRESSION, PYTHON, is_training=True, nested=True,\n )\n\n input_dataset = resources.datasets(SKLEARN, REGRESSION)\n\n output = tmp_path / \"output\"\n output.mkdir()\n\n cmd = \"{} fit --target-type {} --code-dir {} --target {} --input {} --verbose\".format(\n ArgumentsOptions.MAIN_COMMAND,\n REGRESSION,\n custom_model_dir,\n resources.targets(REGRESSION),\n input_dataset,\n )\n _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n @pytest.mark.parametrize(\n \"framework\", [SKLEARN_SPARSE, PYTORCH, RDS,],\n )\n def test_fit_sparse(self, resources, tmp_path, framework):\n custom_model_dir = _create_custom_model_dir(\n resources,\n tmp_path,\n framework,\n SPARSE,\n language=R_FIT if framework == RDS else PYTHON,\n is_training=True,\n )\n\n input_dataset = resources.datasets(framework, SPARSE)\n target_dataset = resources.datasets(framework, SPARSE_TARGET)\n columns = resources.datasets(framework, SPARSE_COLUMNS)\n\n output = tmp_path / \"output\"\n output.mkdir()\n\n cmd = \"{} fit --code-dir {} --input {} --target-type {} --verbose --sparse-column-file {}\".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset, REGRESSION, columns\n )\n\n cmd += \" --target-csv \" + target_dataset\n _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n @pytest.mark.parametrize(\"framework, problem\", [(SKLEARN_PRED_CONSISTENCY, BINARY_BOOL)])\n def test_prediction_consistency(self, resources, tmp_path, framework, problem):\n custom_model_dir = _create_custom_model_dir(\n resources, tmp_path, framework, SPARSE, language=PYTHON, is_training=True,\n )\n\n input_dataset = resources.datasets(framework, problem)\n\n if problem in [BINARY_TEXT, BINARY_BOOL]:\n target_type = BINARY\n else:\n target_type = problem\n\n cmd = \"{} fit --target-type {} --code-dir {} --input {} --verbose \".format(\n ArgumentsOptions.MAIN_COMMAND, target_type, custom_model_dir, input_dataset\n )\n cmd += \" --target {}\".format(resources.targets(problem))\n\n if target_type in [BINARY, MULTICLASS]:\n cmd = _cmd_add_class_labels(\n cmd, resources.class_labels(framework, problem), target_type\n )\n\n _, stdout, stderr = _exec_shell_cmd(\n cmd,\n \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd),\n assert_if_fail=True,\n )\n\n # we should throw a warning, not an error\n assert \"Your predictions were different when we tried to predict twice.\" in stderr\n # but don't error out\n assert (\n \"Your model can be fit to your data, and predictions can be made on the fit model!\"\n in stdout\n )\n # clean up\n sample_dir = stderr.split(\":\")[-1]\n if sample_dir.endswith(\"\\n\"):\n sample_dir = sample_dir[:-1]\n os.remove(sample_dir.strip())\n\n def test_duplicate_target_name(self, resources, tmp_path):\n custom_model_dir = _create_custom_model_dir(\n resources, tmp_path, SKLEARN_REGRESSION, SPARSE, language=PYTHON, is_training=True,\n )\n\n input_dataset = resources.datasets(SKLEARN_REGRESSION, TARGET_NAME_DUPLICATED_X)\n target_dataset = resources.datasets(SKLEARN_REGRESSION, TARGET_NAME_DUPLICATED_Y)\n\n output = tmp_path / \"output\"\n output.mkdir()\n\n cmd = \"{} fit --code-dir {} --input {} --target-type {} --verbose \".format(\n ArgumentsOptions.MAIN_COMMAND, custom_model_dir, input_dataset, REGRESSION\n )\n\n cmd += \" --target-csv \" + target_dataset\n _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n def test_fit_schema_validation(self, resources, tmp_path):\n custom_model_dir = _create_custom_model_dir(\n resources,\n tmp_path,\n SKLEARN_BINARY_SCHEMA_VALIDATION,\n BINARY,\n PYTHON,\n is_training=True,\n include_metadata=True,\n )\n\n input_dataset = resources.datasets(SKLEARN, BINARY)\n\n output = tmp_path / \"output\"\n output.mkdir()\n\n cmd = \"{} fit --target-type {} --code-dir {} --target {} --input {} --verbose\".format(\n ArgumentsOptions.MAIN_COMMAND,\n BINARY,\n custom_model_dir,\n resources.targets(BINARY),\n input_dataset,\n )\n _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n\n def test_fit_schema_failure(self, resources, tmp_path):\n custom_model_dir = _create_custom_model_dir(\n resources,\n tmp_path,\n SKLEARN_BINARY_SCHEMA_VALIDATION,\n BINARY,\n PYTHON,\n is_training=True,\n include_metadata=True,\n )\n\n input_dataset = resources.datasets(SKLEARN, BINARY_TEXT)\n output = tmp_path / \"output\"\n output.mkdir()\n\n cmd = \"{} fit --target-type {} --code-dir {} --target {} --input {} --verbose\".format(\n ArgumentsOptions.MAIN_COMMAND,\n BINARY,\n custom_model_dir,\n resources.targets(BINARY_TEXT),\n input_dataset,\n )\n with pytest.raises(AssertionError):\n _, _, stderr = _exec_shell_cmd(\n cmd, \"Failed in {} command line! {}\".format(ArgumentsOptions.MAIN_COMMAND, cmd)\n )\n assert \"DrumSchemaValidationException\" in stderr\n" ]
[ [ "pandas.read_csv" ] ]
jonathanchu33/pix2pix_cyclegan_guess_noise
[ "c21634be42e246b562b1a1ebf26c953f351228dd" ]
[ "metrics/consolidated.py" ]
[ "import os\nimport numpy as np\nfrom PIL import Image\nimport argparse\n\nIMG_BATCH_SIZE = 14\n# palette_quantization = False # Palette quantization strategy - unused\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Calculate RH, Proposed RH, Seg/IoU Acc. Metrics.')\n parser.add_argument('--img_dir', type=str, required=True, help='Directory path containing images to analyze')\n parser.add_argument('--normalize', action='store_true', help='Normalize images before calculating metrics.')\n parser.add_argument('--print_every', type=int, default=25, help='Print progress every _ images processed')\n\n args = vars(parser.parse_args())\n\n IMG_DIR_PATH = args['img_dir']\n normalize = args['normalize']\n print_every = args['print_every']\n\n ## Metrics\n rh_scores = [] # RH\n rh_grayscale_scores = [] # RH (Grayscale)\n proposed_rh_scores = [] # Proposed RH\n proposed_rh_grayscale_scores = [] # Proposed RH (Grayscale)\n seg_scores = [] # Segmentation Accuracy\n seg_grayscale_scores = [] # Segmentation Accuracy (Grayscale)\n iou_scores = [] # IoU\n iou_grayscale_scores = [] # IoU (Grayscale)\n sn_scores = [] # SN(x) - x is the default sigma that SN photos were generated with\n sn_grayscale_scores = [] # SN(x) (Grayscale)\n\n # Collect image file names from IMG_DIR_PATH\n walk_dir_obj = os.walk(IMG_DIR_PATH)\n root, dirs, files = next(walk_dir_obj)\n image_files = sorted(files)\n if '.DS_Store' in image_files:\n image_files.remove('.DS_Store')\n\n # Calculate metrics for each image. Image sets come in batches of IMG_BATCH_SIZE files\n for i in range(0, len(image_files), IMG_BATCH_SIZE):\n if i / IMG_BATCH_SIZE % print_every == 0:\n print(i / IMG_BATCH_SIZE)\n ### (A) Prepare Images\n # Collect image names by A/B category\n imagesA, imagesB = [], []\n for j in range(0, IMG_BATCH_SIZE, 2):\n imagesA.append(image_files[i + j])\n for j in range(1, IMG_BATCH_SIZE, 2):\n imagesB.append(image_files[i + j])\n\n # Open images\n for i in range(len(imagesA)):\n imagesA[i] = Image.open(os.path.join(IMG_DIR_PATH, imagesA[i]))\n for i in range(len(imagesB)):\n imagesB[i] = Image.open(os.path.join(IMG_DIR_PATH, imagesB[i]))\n\n # Grayscale numpy arrays\n gimagesA, gimagesB = [], []\n for i in range(len(imagesA)):\n gimagesA.append(np.array(imagesA[i].convert(\"L\")))\n for i in range(len(imagesB)):\n gimagesB.append(np.array(imagesB[i].convert(\"L\")))\n\n # RGB numpy arrays\n for i in range(len(imagesA)):\n imagesA[i] = np.array(imagesA[i])\n for i in range(len(imagesB)):\n imagesB[i] = np.array(imagesB[i])\n\n # Normalize if specified\n if normalize:\n for i in range(len(imagesA)):\n imagesA[i] = imagesA[i]/255.0\n for i in range(len(imagesB)):\n imagesB[i] = imagesB[i]/255.0\n for i in range(len(gimagesA)):\n gimagesA[i] = gimagesA[i]/255.0\n for i in range(len(gimagesB)):\n gimagesB[i] = gimagesB[i]/255.0\n\n # Unpack images\n fakeA, qefakeA, qerealA, realA, recA, qerecA, snrecA = imagesA\n fakeB, qefakeB, qerealB, realB, recB, qerecB, snrecB = imagesB\n # fakeA, qefakeA, qerealA, qpfakeA, qprealA, realA, recA, qerecA, qprecA, snrecA = imagesA\n\n gfakeA, gqefakeA, gqerealA, grealA, grecA, gqerecA, gsnrecA = gimagesA\n gfakeB, gqefakeB, gqerealB, grealB, grecB, gqerecB, gsnrecB = gimagesB\n\n ### (B) Calculate metrics\n\n ## (B1) RH\n # RGB\n rec_loss = np.linalg.norm(recA - realA) # Normal reconstruction loss of real image (from richer domain)\n qrec_loss = np.linalg.norm(qerecA - realA) # Reconstruction loss using quantized intermediary\n rh_scores.append(qrec_loss - rec_loss)\n # Grayscale\n grec_loss = np.linalg.norm(grecA - grealA) # Normal reconstruction loss of real image (from richer domain)\n gqrec_loss = np.linalg.norm(gqerecA - grealA) # Reconstruction loss using quantized intermediary\n rh_grayscale_scores.append(gqrec_loss - grec_loss)\n\n ## (B2) Proposed RH\n # RGB\n trans_loss = np.linalg.norm(fakeA - realA) # Translation loss from one-to-many (input map from poorer domain)\n proposed_rh_scores.append(trans_loss - rec_loss)\n\n # Grayscale\n gtrans_loss = np.linalg.norm(gfakeA - grealA) # Translation loss from one-to-many (input map from poorer domain)\n proposed_rh_grayscale_scores.append(gtrans_loss - grec_loss)\n\n ## (B3) Segmentation Accuracy\n # RGB\n seg_acc = np.mean(qefakeB == qerealB)\n seg_scores.append(seg_acc)\n # Grayscale\n gseg_acc = np.mean(gqefakeB == gqerealB)\n seg_grayscale_scores.append(gseg_acc)\n\n ## (B4) IoU Accuracy\n # RGB\n ious = []\n segments = np.unique(qerealB)\n # if palette_quantization:\n # # Using Palette quantization\n # segments = np.array([[233, 233, 217],\n # [211, 201, 189],\n # [189, 181, 171],\n # [99, 161, 253],\n # [151, 191, 89],\n # [245, 59, -169],\n # [217, 163, 155],\n # [255, 255, 255]\n # ])\n for color in segments:\n intersection = np.sum(np.logical_and(qefakeB == color, qerealB == color))\n union = 256 * 256 * 3\n ious.append(intersection / union)\n iou_scores.append(np.array(ious).mean())\n\n # Grayscale\n gious = []\n gray_segments = np.unique(gqerealB)\n # if palette_quantization:\n # gray_segments = np.array([243, 228, 218, 203, 211, 171, 216, 255])\n for color in gray_segments:\n intersection = np.sum(np.logical_and(gqefakeB == color, gqerealB == color))\n union = 256 * 256\n gious.append(intersection / union)\n iou_grayscale_scores.append(np.array(gious).mean())\n\n ## (B5) SN\n # RGB\n sn_loss = np.linalg.norm(snrecA - recA)\n sn_scores.append(sn_loss)\n\n # Grayscale\n gsn_loss = np.linalg.norm(gsnrecA - grecA)\n sn_grayscale_scores.append(gsn_loss)\n\n ## Print scores\n print('Images were', 'not' if not normalize else '\\b', 'normalized.\\n')\n\n rh_scores = np.array(rh_scores)\n rh_grayscale_scores = np.array(rh_grayscale_scores)\n print('RH Score: ', rh_scores.mean(), \"+-\", rh_scores.std())\n print('RH (Grayscale) Score: ', rh_grayscale_scores.mean(), \"+-\", rh_grayscale_scores.std())\n\n proposed_rh_scores = np.array(proposed_rh_scores)\n proposed_rh_grayscale_scores = np.array(proposed_rh_grayscale_scores)\n print('Proposed RH Score: ', proposed_rh_scores.mean(), \"+-\", proposed_rh_scores.std())\n print('Proposed RH (Grayscale) Score: ', proposed_rh_grayscale_scores.mean(), \"+-\", proposed_rh_grayscale_scores.std())\n\n seg_scores = np.array(seg_scores)\n seg_grayscale_scores = np.array(seg_grayscale_scores)\n print('Segmentation Accuracy: ', seg_scores.mean(), \"+-\", seg_scores.std())\n print('Segmentation (Grayscale) Accuracy: ', seg_grayscale_scores.mean(), \"+-\", seg_grayscale_scores.std())\n\n iou_scores = np.array(iou_scores)\n iou_grayscale_scores = np.array(iou_grayscale_scores)\n print('IoU Accuracy: ', iou_scores.mean(), \"+-\", iou_scores.std())\n print('IoU (Grayscale) Accuracy: ', iou_grayscale_scores.mean(), \"+-\", iou_grayscale_scores.std())\n\n sn_scores = np.array(sn_scores)\n sn_grayscale_scores = np.array(sn_grayscale_scores)\n print('SN evaluated at default sigma:', sn_scores.mean(), \"+-\", sn_scores.std())\n print('SN (Grayscale) evaluated at default sigma:', sn_grayscale_scores.mean(), \"+-\", sn_grayscale_scores.std())\n" ]
[ [ "numpy.logical_and", "numpy.unique", "numpy.linalg.norm", "numpy.mean", "numpy.array" ] ]
chavdim/drl_lab
[ "5d976f11f7487d2c76eec030c15dd52e73d6a48b" ]
[ "tests/test_models.py" ]
[ "from copy import deepcopy\nimport unittest\n\nimport keras\nimport numpy as np\n\nfrom drl_lab.models import (\n build_QCNN,\n QCNN,\n dataset2XY,\n state2data,\n load_model,\n)\nfrom tests.common import (\n nn_hparams,\n get_test_model_path,\n weights_equal,\n)\n\nbatch_size = 10\nobs_shape = (5, 5, 3)\ndataset_num = 100\ndataset = [{\n 'input': np.random.randn(*obs_shape),\n 'output': np.random.permutation([1, 0, 0]),\n} for i in range(dataset_num)]\nstate_shape = obs_shape\nstate = np.random.randn(*state_shape)\nnum_actions = 3\n\n\nclass TestQCNN(unittest.TestCase):\n def setUp(self):\n self.qcnn = QCNN(state_shape, num_actions, nn_hparams)\n\n def test_init(self):\n qcnn = self.qcnn\n nn = qcnn.nn\n\n expected = 9\n self.assertEqual(expected, len(nn.layers))\n expected = [None, *state_shape]\n self.assertEqual(expected, nn.input.get_shape().as_list())\n expected = keras.layers.Conv2D\n self.assertEqual(expected, type(nn.layers[0]))\n expected = nn_hparams['layers'][0][1]\n self.assertEqual(expected, nn.layers[0].filters)\n expected = (nn_hparams['layers'][0][2],)*2\n self.assertEqual(expected, nn.layers[0].kernel_size)\n expected = (nn_hparams['layers'][0][3],)*2\n self.assertEqual(expected, nn.layers[0].strides)\n expected = 'same'\n self.assertEqual(expected, nn.layers[0].padding)\n expected = 'relu'\n self.assertEqual(expected, nn.layers[1].get_config()['activation'])\n expected = keras.layers.Conv2D\n self.assertEqual(expected, type(nn.layers[2]))\n expected = nn_hparams['layers'][1][1]\n self.assertEqual(expected, nn.layers[2].filters)\n expected = (nn_hparams['layers'][1][2],)*2\n self.assertEqual(expected, nn.layers[2].kernel_size)\n expected = (nn_hparams['layers'][1][3],)*2\n self.assertEqual(expected, nn.layers[2].strides)\n expected = 'same'\n self.assertEqual(expected, nn.layers[2].padding)\n expected = 'relu'\n self.assertEqual(expected, nn.layers[3].get_config()['activation'])\n expected = keras.layers.Conv2D\n self.assertEqual(expected, type(nn.layers[4]))\n expected = nn_hparams['layers'][2][1]\n self.assertEqual(expected, nn.layers[4].filters)\n expected = (nn_hparams['layers'][2][2],)*2\n self.assertEqual(expected, nn.layers[4].kernel_size)\n expected = (nn_hparams['layers'][2][3],)*2\n self.assertEqual(expected, nn.layers[4].strides)\n expected = 'same'\n self.assertEqual(expected, nn.layers[4].padding)\n expected = 'relu'\n self.assertEqual(expected, nn.layers[5].get_config()['activation'])\n expected = keras.layers.GlobalAveragePooling2D\n self.assertEqual(expected, type(nn.layers[6]))\n expected = 'dens'\n self.assertEqual(expected, nn.layers[7].name[:4])\n expected = nn_hparams['layers'][4][1]\n self.assertEqual(expected, nn.layers[7].units)\n expected = 'relu'\n self.assertEqual(expected, nn.layers[7].get_config()['activation'])\n expected = 'dens'\n self.assertEqual(expected, nn.layers[8].name[:4])\n expected = num_actions\n self.assertEqual(expected, nn.layers[8].units)\n expected = 'linear'\n self.assertEqual(expected, nn.layers[8].get_config()['activation'])\n expected = keras.optimizers.RMSprop\n self.assertEqual(expected, type(nn.optimizer))\n expected = keras.losses.mean_squared_error\n self.assertEqual(expected, nn.loss)\n expected = [None, num_actions]\n self.assertEqual(expected, nn.output.get_shape().as_list())\n\n def test_train(self):\n qcnn = self.qcnn\n train_X, train_Y = dataset2XY(dataset)\n\n nn_before = keras.models.clone_model(qcnn.nn)\n nn_before.set_weights(qcnn.nn.get_weights())\n qcnn.train(train_X, train_Y, batch_size, epochs=10, shuffle=True)\n nn_after = qcnn.nn\n self.assertFalse(weights_equal(nn_before, nn_after))\n\n def test_forward_prop(self):\n qcnn = self.qcnn\n train_X, _ = dataset2XY(dataset)\n\n retval = qcnn.forward_prop(train_X, batch_size)\n expected = len(train_X)\n self.assertEqual(expected, len(retval))\n\n X = train_X[:batch_size]\n retval = qcnn.forward_prop(X, batch_size)\n expected = batch_size\n\n self.assertEqual(expected, len(retval))\n expected = num_actions\n\n self.assertEqual(expected, len(retval[0]))\n\n def test_copy_model(self):\n qcnn = self.qcnn\n\n target = keras.models.clone_model(qcnn.nn)\n target.set_weights(qcnn.nn.get_weights())\n self.assertTrue(weights_equal(qcnn.nn, target))\n\n qcnn_weights = qcnn.nn.get_weights()\n qcnn_weights[0] = qcnn_weights[0]*0.99\n qcnn.nn.set_weights(qcnn_weights)\n self.assertFalse(weights_equal(qcnn.nn, target))\n\n\nclass TestModel(unittest.TestCase):\n def test_build_QCNN(self):\n model = build_QCNN(obs_shape, num_actions, nn_hparams['layers'],\n nn_hparams['learn_rate'], nn_hparams['optimizer'])\n\n expected = 9\n self.assertEqual(expected, len(model.layers))\n\n def test_dataset2XY(self):\n X, Y = dataset2XY(dataset)\n self.assertEqual(len(X), dataset_num)\n self.assertEqual(len(Y), dataset_num)\n self.assertEqual(str(type(X)), \"<class 'numpy.ndarray'>\")\n self.assertEqual(str(type(Y)), \"<class 'numpy.ndarray'>\")\n self.assertEqual(X.shape[1:], dataset[0]['input'].shape)\n self.assertEqual(Y.shape[1:], dataset[0]['output'].shape)\n\n def test_state2data(self):\n data = state2data(state)\n self.assertEqual(data.shape, (1, *state_shape))\n\n def test_load_model(self):\n test_model_path = get_test_model_path()\n\n nn = load_model(test_model_path)\n expected = \"<class 'keras.models.Sequential'>\"\n self.assertTrue(expected, str(type(nn)))\n\n _nn_hparams = deepcopy(nn_hparams)\n _nn_hparams['saved_model'] = test_model_path\n qcnn = QCNN(state_shape, num_actions, _nn_hparams)\n expected = \"<class 'keras.models.Sequential'>\"\n self.assertTrue(expected, str(type(qcnn.nn)))\n # TODO: more a more detailed test\n" ]
[ [ "numpy.random.permutation", "numpy.random.randn" ] ]
kperrynrel/rdtools
[ "4ca70e3e2cec85fead10cb8e6ef5e098eeb6f686" ]
[ "rdtools/test/interpolate_test.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom rdtools import interpolate\nimport pytest\n\n\[email protected]\ndef time_series():\n times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:15', freq='15T')\n time_series = pd.Series(data=[9, 6, 3, 3, 6, 9], index=times, name='foo')\n time_series = time_series.drop(times[4])\n return time_series\n\n\[email protected]\ndef target_index(time_series):\n return pd.date_range(time_series.index.min(), time_series.index.max(), freq='20T')\n\n\[email protected]\ndef expected_series(target_index, time_series):\n return pd.Series(data=[9.0, 5.0, 3.0, np.nan], index=target_index, name=time_series.name)\n\n\[email protected]\ndef test_df(time_series):\n time_series1 = time_series.copy()\n time_series2 = time_series.copy()\n\n time_series2.index = time_series2.index + pd.to_timedelta('30 minutes')\n time_series2.name = 'bar'\n\n test_df = pd.concat([time_series1, time_series2], axis=1)\n\n return test_df\n\n\[email protected]\ndef df_target_index(target_index):\n return target_index + pd.to_timedelta('15 minutes')\n\n\[email protected]\ndef df_expected_result(df_target_index, test_df):\n col0 = test_df.columns[0]\n col1 = test_df.columns[1]\n expected_df_result = pd.DataFrame({\n col0: [6.0, 3.0, np.nan, 9.0],\n col1: [np.nan, 8.0, 4.0, 3.0]\n }, index=df_target_index)\n\n expected_df_result = expected_df_result[test_df.columns]\n return expected_df_result\n\n\ndef test_interpolate_freq_specification(time_series, target_index, expected_series):\n # test the string specification\n interpolated = interpolate(time_series, target_index.freq.freqstr,\n pd.to_timedelta('15 minutes'), warning_threshold=0.21)\n pd.testing.assert_series_equal(interpolated, expected_series)\n\n # test the DateOffset specification\n interpolated = interpolate(time_series, target_index.freq, pd.to_timedelta('15 minutes'),\n warning_threshold=0.21)\n pd.testing.assert_series_equal(interpolated, expected_series)\n\n\ndef test_interpolate_calculation(time_series, target_index, expected_series):\n\n interpolated = interpolate(time_series, target_index, pd.to_timedelta('15 minutes'),\n warning_threshold=0.21)\n pd.testing.assert_series_equal(interpolated, expected_series)\n\n\ndef test_interpolate_two_argument(time_series, target_index, expected_series):\n\n expected_series.iloc[-1] = 6.0\n interpolated = interpolate(time_series, target_index)\n pd.testing.assert_series_equal(interpolated, expected_series)\n\n\ndef test_interpolate_tz_validation(time_series, target_index, expected_series):\n with pytest.raises(ValueError):\n interpolate(time_series, target_index.tz_localize('UTC'), pd.to_timedelta('15 minutes'))\n\n time_series = time_series.copy()\n time_series.index = time_series.index.tz_localize('UTC')\n\n with pytest.raises(ValueError):\n interpolate(time_series, target_index, pd.to_timedelta('15 minutes'))\n\n\ndef test_interpolate_same_tz(time_series, target_index, expected_series):\n time_series = time_series.copy()\n expected_series = expected_series.copy()\n\n time_series.index = time_series.index.tz_localize('America/Denver')\n target_index = target_index.tz_localize('America/Denver')\n expected_series.index = expected_series.index.tz_localize('America/Denver')\n\n interpolated = interpolate(time_series, target_index, pd.to_timedelta('15 minutes'),\n warning_threshold=0.21)\n pd.testing.assert_series_equal(interpolated, expected_series)\n\n\ndef test_interpolate_different_tz(time_series, target_index, expected_series):\n time_series = time_series.copy()\n expected_series = expected_series.copy()\n\n time_series.index = time_series.index.tz_localize('America/Denver').tz_convert('UTC')\n target_index = target_index.tz_localize('America/Denver')\n expected_series.index = expected_series.index.tz_localize('America/Denver')\n\n interpolated = interpolate(time_series, target_index, pd.to_timedelta('15 minutes'),\n warning_threshold=0.21)\n pd.testing.assert_series_equal(interpolated, expected_series)\n\n\ndef test_interpolate_dataframe(test_df, df_target_index, df_expected_result):\n interpolated = interpolate(test_df, df_target_index, pd.to_timedelta('15 minutes'),\n warning_threshold=0.21)\n pd.testing.assert_frame_equal(interpolated, df_expected_result)\n\n\ndef test_interpolate_warning(test_df, df_target_index, df_expected_result):\n N = len(test_df)\n all_idx = list(range(N))\n # drop every other value in the first third of the dataset\n index_with_gaps = all_idx[:N//3][::2] + all_idx[N//3:]\n test_df = test_df.iloc[index_with_gaps, :]\n with pytest.warns(UserWarning):\n interpolate(test_df, df_target_index, pd.to_timedelta('15 minutes'),\n warning_threshold=0.1)\n\n with pytest.warns(None) as record:\n interpolate(test_df, df_target_index, pd.to_timedelta('15 minutes'),\n warning_threshold=0.5)\n if record:\n pytest.fail(\"normalize.interpolate raised a warning about \"\n \"excluded data even though the threshold was high\")\n" ]
[ [ "pandas.concat", "pandas.testing.assert_series_equal", "pandas.Series", "pandas.DataFrame", "pandas.testing.assert_frame_equal", "pandas.date_range", "pandas.to_timedelta" ] ]
RICE-EIC/Early-Bird-GCN
[ "25a80b23f2ecfc46ffe00b1cf0e06052b32aad0f" ]
[ "run_threshold_jointEB.py" ]
[ "import os.path as osp\nimport argparse\nimport torch\nimport torch.nn.functional as F\nimport torch_geometric.utils.num_nodes as geo_num_nodes\nfrom torch_geometric.datasets import Planetoid\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import GCNConv # noga\nfrom utils import *\nfrom pytorch_train import *\nimport numpy as np\nimport logging\nimport os\nimport random\n\ndef random_label(data):\n # print(\"shuffling graph's label... \")\n labels = data.y.numpy()\n # print(\"label shape = \",labels.shape) #(3327,)\n # print(\"label max = \",np.max(labels)) #5\n # print(\"label min = \",np.min(labels)) #0\n node_num = labels.shape[0]\n labels_cnt = np.zeros((np.max(labels)+1))\n for i in range(np.min(labels),np.max(labels)+1):\n labels_cnt[i] = np.count_nonzero(labels==i)\n labels_cnt = labels_cnt.astype(np.int16)\n # print(labels)\n # print(\"labels shape\",labels.shape)\n # print(labels_cnt) #[264 590 668 701 596 508]\n randomed_labels = np.zeros((node_num)) #(3327)\n for i in range(np.min(labels)+1,np.max(labels)+1): #[1,5]\n for j in range(labels_cnt[i]):\n random_node_id = random.randint(0,node_num-1)\n while(randomed_labels[random_node_id]!=0):\n random_node_id = random.randint(0,node_num-1)\n randomed_labels[random_node_id]=i\n randomed_labels = randomed_labels.astype(np.int16)\n\n for i in range(np.min(randomed_labels),np.max(randomed_labels)+1):\n labels_cnt[i] = np.count_nonzero(randomed_labels==i)\n labels_cnt = labels_cnt.astype(np.int16)\n # print(randomed_labels)\n # print(\"randomed_labels shape\",randomed_labels.shape)\n # print(labels_cnt) #[264 590 668 701 596 508]\n data.y = torch.from_numpy(randomed_labels).long()\n # print(\"shuffling done! \")\n return data\ndef half_dataset(data):\n # print(\"half dataset...\")\n # print(\"data = \",data) # Data(edge_index=[2, 9104], test_mask=[3327], train_mask=[3327], val_mask=[3327], x=[3327, 3703], y=[3327])\n train_mask = data.train_mask.numpy() #120 [0,119]\n # test_mask = data.test_mask.numpy() #1000 [2312,3326]\n # val_mask = data.val_mask.numpy() #500 [120,619]\n train_num = np.count_nonzero(train_mask==True)\n train_mask = np.zeros((train_mask.shape[0]))\n for i in range(int(train_num)):\n if(i<int(train_num/2)):\n train_mask[i] = True\n else:\n train_mask[i] = False\n # print(np.count_nonzero(train_mask==True)) #60 [0,59]\n data.train_mask = torch.from_numpy(train_mask).bool()\n # print(\"half dataset done!\")\n return data\ndef nodeid_shuffled(data):\n # print(\"shuffle dataset...\")\n # print(\"data = \",data) # Data(edge_index=[2, 9104], test_mask=[3327], train_mask=[3327], val_mask=[3327], x=[3327, 3703], y=[3327])\n node_num = data.train_mask.shape[0]\n node_id_map = random.sample(range(node_num),node_num)\n # change node id edge \n edge_index = data.edge_index.numpy() #120 [0,119]\n for i in range(edge_index.shape[0]): \n for j in range(edge_index.shape[1]):\n edge_index[i][j] = node_id_map[edge_index[i][j]]\n data.edge_index = torch.from_numpy(edge_index).long()\n # change test mask\n test_mask = data.test_mask.numpy()\n new_test_mask = np.zeros(test_mask.shape)\n for i in range(test_mask.shape[0]):\n if (test_mask[i]==True):\n new_test_mask[node_id_map[i]] = True\n data.test_mask = torch.from_numpy(new_test_mask).bool()\n # change train mask\n train_mask = data.train_mask.numpy()\n new_train_mask = np.zeros(train_mask.shape)\n for i in range(train_mask.shape[0]):\n if (train_mask[i]==True):\n new_train_mask[node_id_map[i]] = True\n data.train_mask = torch.from_numpy(new_train_mask).bool()\n # change val mask\n val_mask = data.val_mask.numpy()\n new_val_mask = np.zeros(val_mask.shape)\n for i in range(val_mask.shape[0]):\n if (val_mask[i]==True):\n new_val_mask[node_id_map[i]] = True\n data.val_mask = torch.from_numpy(new_val_mask).bool()\n # change node feature\n features = data.x.numpy() # data.x: [node_num, node_feature]\n new_features = np.zeros(features.shape)\n for i in range(features.shape[0]):\n new_features[node_id_map[i]] = features[i]\n new_features = new_features.astype(np.float32)\n data.x = torch.from_numpy(new_features)\n # change node label\n labels = data.y.numpy()\n map_labels = np.zeros(labels.shape[0])\n for i in range(labels.shape[0]): # i is node id\n map_labels[node_id_map[i]] = labels[i]\n map_labels = map_labels.astype(np.int16)\n data.y = torch.from_numpy(map_labels).long()\n return data\ndef layerwise_rearrange(data): # m.weight.data\n new_data = data.cpu().numpy()\n np.random.shuffle(new_data)\n new_data = torch.from_numpy(new_data)\n return new_data\n\n# Update the gradient of the adjacency matrices\n# grads_vars: {name: torch.Tensor}\ndef update_gradients_adj(grads_vars, adj_mask):\n temp_grad_adj1 = 0\n var1 = None\n var2 = None\n temp_grad_adj2 = 0\n for key,var in grads_vars.items():\n grad = var.grad\n if key == \"support1\":\n temp_grad_adj = adj_mask * grad\n transposed_temp_grad_adj = torch.transpose(temp_grad_adj,1,0)\n temp_grad_adj1 = temp_grad_adj + transposed_temp_grad_adj\n var1 = var\n if key == \"support2\":\n temp_grad_adj = adj_mask * grad\n transposed_temp_grad_adj = torch.transpose(temp_grad_adj,1,0)\n temp_grad_adj2 = temp_grad_adj + transposed_temp_grad_adj\n var2 = var\n grad_adj = (temp_grad_adj1 + temp_grad_adj2) / 4 # Why are we doing this?\n var1.grad = grad_adj\n var2.grad = grad_adj\n return [var1,var2]\ndef prune_adj(oriadj:torch.Tensor, non_zero_idx:int, percent:int) -> torch.Tensor:\n original_prune_num = int(((non_zero_idx - oriadj.shape[0]) / 2) * (percent / 100))\n adj = np.copy(oriadj.detach().cpu().numpy())\n # print(f\"Pruning {percent}%\")\n low_adj = np.tril(adj, -1)\n non_zero_low_adj = low_adj[low_adj != 0]\n\n low_pcen = np.percentile(abs(non_zero_low_adj), percent)\n under_threshold = abs(low_adj) < low_pcen\n before = len(non_zero_low_adj)\n low_adj[under_threshold] = 0\n non_zero_low_adj = low_adj[low_adj != 0]\n after = len(non_zero_low_adj)\n\n rest_pruned = original_prune_num - (before - after)\n # print(adj.shape[0],original_prune_num,before,after, before-after)\n if rest_pruned > 0:\n mask_low_adj = (low_adj != 0)\n low_adj[low_adj == 0] = 2000000\n flat_indices = np.argpartition(low_adj.ravel(), rest_pruned - 1)[:rest_pruned]\n row_indices, col_indices = np.unravel_index(flat_indices, low_adj.shape)\n low_adj = np.multiply(low_adj, mask_low_adj)\n low_adj[row_indices, col_indices] = 0\n adj = low_adj + np.transpose(low_adj)\n adj = np.add(adj, np.identity(adj.shape[0]))\n return torch.from_numpy(adj).to(device)\ndef get_mask(oriadj:torch.Tensor, non_zero_idx:int, percent:int) -> torch.Tensor:\n original_prune_num = int(((non_zero_idx - oriadj.shape[0]) / 2) * (percent / 100))\n adj = np.copy(oriadj.detach().cpu().numpy())\n # print(f\"Pruning {percent}%\")\n low_adj = np.tril(adj, -1)\n non_zero_low_adj = low_adj[low_adj != 0]\n\n low_pcen = np.percentile(abs(non_zero_low_adj), percent)\n under_threshold = abs(low_adj) < low_pcen\n before = len(non_zero_low_adj)\n low_adj[under_threshold] = 0\n non_zero_low_adj = low_adj[low_adj != 0]\n after = len(non_zero_low_adj)\n\n rest_pruned = original_prune_num - (before - after)\n # print(adj.shape[0],original_prune_num,before,after, before-after)\n if rest_pruned > 0:\n mask_low_adj = (low_adj != 0)\n low_adj[low_adj == 0] = 2000000\n flat_indices = np.argpartition(low_adj.ravel(), rest_pruned - 1)[:rest_pruned]\n row_indices, col_indices = np.unravel_index(flat_indices, low_adj.shape)\n low_adj = np.multiply(low_adj, mask_low_adj)\n low_adj[row_indices, col_indices] = 0\n new_adj = low_adj + np.transpose(low_adj)\n new_adj = np.add(new_adj, np.identity(new_adj.shape[0]))\n return 1 - (new_adj != adj)\n\ndef calc_dist(m1,m2):\n return np.abs(m1 - m2).sum()\n\ndef post_processing():\n # print(\"here in post_processing\")\n adj1,adj2 = model.adj1, model.adj2\n adj1 = prune_adj(adj1 - id1, non_zero_idx, args.ratio_graph)\n adj2 = prune_adj(adj2 - id2, non_zero_idx, args.ratio_graph)\n model.adj1 = adj1\n model.adj2 = adj2\n\n # print(\"Optimization Finished!\")\n train_acc, val_acc, tmp_test_acc = test(model, data)\n log = 'After tune results: Ratio: {:d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'\n # print(log.format(args.ratio, train_acc, val_acc, tmp_test_acc))\n log_4_test = 'Tune Ratio: {:d}'\n # print(log_4_test.format(args.ratio))\n cur_adj1 = model.adj1.cpu().numpy()\n cur_adj2 = model.adj2.cpu().numpy()\n # torch.save({\"state_dict\":model.state_dict(),\"adj\":cur_adj1}, f\"./graph_pruned_eb_pytorch/{args.save_file}\")\nparser = argparse.ArgumentParser()\nparser.add_argument('--times', type=int, default=100)\nparser.add_argument('--epochs', type=int, default=1)\nparser.add_argument('--ratio_graph', type=int, default=0)\nparser.add_argument('--ratio_weight', type=int, default=90)\nparser.add_argument('--use_gdc', type=bool, default=False)\nparser.add_argument('--save_file', type=str, default=\"model.pth.tar\")\nparser.add_argument('--lookback', type=int, default=3)\nparser.add_argument(\"--thres\", type=float, default=0.1)\nparser.add_argument(\"--dataset\", type=str, default=\"Pubmed\")\nparser.add_argument(\"--log\", type=str, default=\"{:05d}\")\nparser.add_argument(\"--is_random_label\", type=int, default=0)\nparser.add_argument(\"--is_half_dataset\", type=int, default=0)\nparser.add_argument(\"--is_nodeid_shuffled\", type=int, default=0)\nparser.add_argument(\"--is_layerwise_rearrange\", type=int, default=0)\nparser.add_argument(\"--is_ticket\", type=int, default=-1) # -1 means GEBT. [0,20] means other tickets. Initial ticket = 0, partially trained = 49\nparser.add_argument(\"--is_random_prune\", type=int, default=0)\nparser.add_argument(\"--is_smart_ratio\", type=int, default=0)\nparser.add_argument(\"--is_need_thres\", type=int, default=0)\nparser.add_argument(\"--is_need_retrain_acc\", type=int, default=0)\n\nargs = parser.parse_args()\n\ng_ratio = args.ratio_graph\nw_ratio = args.ratio_weight\nmodels = [\"pruned_pytorch/model.pth.tar\",\"prune_weight_cotrain/model.pth.tar\",\"prune_weight_iterate/model.pth.tar\",\"prune_weight_first/model.pth.tar\"]\nres_list = []\ng_r_list = [20,40,60,80]\nw_r_list = [50,70,90]\n# test without pretrain, prune from scratch\npretrain_model_name = \"./pretrain_pytorch/\"+str(args.dataset)+\"_model.pth.tar\"\nos.system(\"rm \"+pretrain_model_name)\nos.system(\"python3 \"+\"pytorch_train.py\"+\" --epochs \"+str(1)+\" --dataset \"+str(args.dataset))\n# run coop and find joint EB\nexit_flag = 0\njEB = 100\n\ndataset = args.dataset\nlogging.basicConfig(filename=f\"test_{dataset}_mask_change_even.txt\",level=logging.DEBUG)\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset)\ndataset = Planetoid(path, dataset, transform=T.NormalizeFeatures()) # CiteSeer()\n# print(f\"Number of graphs in {dataset} dataset:\", len(dataset)) # len=1\ndata = dataset[0] # CiteSeer: Data(edge_index=[2, 9104], test_mask=[3327], train_mask=[3327], val_mask=[3327], x=[3327, 3703], y=[3327])\nif(args.is_random_label==1):\n data = random_label(data)\nif(args.is_half_dataset==1):\n data = half_dataset(data)\nif(args.is_nodeid_shuffled==1):\n data = nodeid_shuffled(data)\n\n\n\nmodel, data = Net(dataset, data, args).to(device), data.to(device)\ncheckpoint = torch.load(pretrain_model_name)\nmodel.load_state_dict(checkpoint)\nloss = lambda m: F.nll_loss(m()[data.train_mask], data.y[data.train_mask])\n# print(\"construct admm training\")\nsupport1 = model.adj1\nsupport2 = model.adj2\npartial_adj_mask = support1.cpu().numpy()\n# print(\"num of edges * 2 + diag in adj:\", np.count_nonzero(partial_adj_mask))\nadj_variables = [support1,support2]\nrho = 1e-3\nnon_zero_idx = np.count_nonzero(support1.cpu().numpy())\nZ1 = U1 = Z2 = U2 = torch.from_numpy(np.zeros_like(partial_adj_mask)).to(device)\nmodel.adj1.requires_grad = True\nmodel.adj2.requires_grad = True\nadj_mask = torch.from_numpy(partial_adj_mask).to(device)\nid1 = torch.eye(support1.shape[0]).to(device)\nid2 = torch.eye(support2.shape[0]).to(device)\n# Define new loss function\nadmm_loss = lambda m: loss(m) + \\\n rho * (F.mse_loss(support1 + U1, Z1 + id1) +\n F.mse_loss(support2 + U2, Z2 + id2))\nadj_optimizer = torch.optim.Adam(adj_variables,lr=0.001)\nweight_optimizer = torch.optim.Adam([\n dict(params=model.conv1.parameters(), weight_decay=5e-4),\n dict(params=model.conv2.parameters(), weight_decay=0)\n], lr=0.01)\nadj_map = {\"support1\": support1, \"support2\": support2}\n\nbest_prune_acc = 0\nlookbacks = []\ncounter = 0\npre3_mask1 = np.zeros((3703, 16))\npre3_mask2 = np.zeros((16, 6))\npre2_mask1 = np.zeros((3703, 16))\npre2_mask2 = np.zeros((16, 6))\npre1_mask1 = np.zeros((3703, 16))\npre1_mask2 = np.zeros((16, 6))\nweight_norm_baseline = -1\ngraph_norm_baseline = -1\ntotal_dist = 0\ngraph_dist = 0\nprint('times:%3d epochs:%3d dataset:%10s graph ratio:%2d weight ratio:%2d'%(args.times,args.epochs,args.dataset,g_ratio,w_ratio))\n\n# start pruning iterately, since the times*epoch is uncertain, set epoch = 1, just change times, \n# which means graph will be pruned more frequently, the graph dist will change more dramatically. \n\n\nepoch_cnt = -1\nsaved_ticket_weights = np.zeros((1,1))\nfor time in range(args.times):\n for update_epoch in range(args.epochs):\n epoch_cnt += 1\n #STEP1: warm up & update weight optimizer\n model.train()\n weight_optimizer.zero_grad()\n # Calculate gradient\n admm_loss(model).backward(retain_graph=True)\n weight_optimizer.step()\n\n train_acc, val_acc, tmp_test_acc = test(model, data)\n if val_acc > best_prune_acc:\n best_prune_acc = val_acc\n test_acc = tmp_test_acc\n log = 'Pruning Time-Epoch: {:03d}-{:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'\n\n #STEP2: prune weight & compute total_dist\n total = 0\n total_layer1 = 0\n total_layer2 = 0\n for k, m in enumerate(model.modules()):\n if isinstance(m, GCNConv):\n total += m.weight.data.numel() # number of elements\n if(k==1):\n total_layer1 += m.weight.data.numel()\n elif(k==2):\n total_layer2 += m.weight.data.numel()\n\n conv_weights = torch.zeros(total)\n ticket_conv_weights = torch.zeros(total)\n layer1_conv_weights = torch.zeros(total_layer1)\n layer2_conv_weights = torch.zeros(total_layer2)\n index = 0\n for k, m in enumerate(model.modules()):\n if isinstance(m, GCNConv):\n if(k==1):\n layer1_conv_weights[0:total_layer1] = m.weight.data.view(-1).abs().clone()\n elif(k==2):\n layer2_conv_weights[0:total_layer2] = m.weight.data.view(-1).abs().clone()\n size = m.weight.data.numel()\n conv_weights[index:(index + size)] = m.weight.data.view(-1).abs().clone()\n ticket_conv_weights[index:(index + size)] = m.weight.data.view(-1).clone()\n index += size\n \n y, i = torch.sort(conv_weights) # get the weight's global prioroty. y:sorted matrix i:index\n y1, i1 = torch.sort(layer1_conv_weights)\n y2, i2 = torch.sort(layer2_conv_weights)\n thre_index = int(total * args.ratio_weight / 100) # number of weights to be pruned\n thre = y[thre_index] # get threshold value\n\n thre_index1 = int(total_layer1 * args.ratio_weight / 100 / 4) # number of weights to be pruned\n thre1 = y1[thre_index1] # get threshold value\n thre_index2 = int(total_layer2 * args.ratio_weight * 3 / 100 / 4) # number of weights to be pruned\n thre2 = y2[thre_index2] # get threshold value\n\n\n pruned = 0\n # print('Pruning threshold: {}'.format(thre))\n zero_flag = False\n # print(model.conv1.weight.data)\n\n # save ticket's weight\n if(args.is_ticket!=-1 and epoch_cnt==args.is_ticket):\n # print(\"saved ticket's weights at epoch \",epoch_cnt)\n saved_ticket_weights = ticket_conv_weights\n # print(\"saved_ticket_weights shape = \",saved_ticket_weights.shape)\n \n # if use random prune and smart ratio\n if(args.is_random_prune==1 and args.is_smart_ratio==1): # shuffle them and prune front p%\n for k, m in enumerate(model.modules()):\n if isinstance(m, GCNConv):\n # do layerwise weight rearrange\n weight_data = m.weight.data\n weight_data = layerwise_rearrange(weight_data)\n m.weight.data = weight_data\n\n weight_copy = m.weight.data.abs().clone()\n mask = np.zeros(weight_copy.cpu().numpy().shape)\n if(k==1):\n mask = mask.reshape(-1)\n for index in range(thre_index1):\n mask[index] = 1\n mask = mask.reshape(weight_copy.cpu().numpy().shape)\n mask = torch.from_numpy(mask).float().to(device) # get prune mask\n mask_np = mask.cpu().numpy()\n current_mask1 = mask_np\n ticket_final_mask1 = mask\n elif(k==2):\n mask = mask.reshape(-1)\n for index in range(thre_index2):\n mask[index] = 1\n mask = mask.reshape(weight_copy.cpu().numpy().shape)\n mask = torch.from_numpy(mask).float().to(device) # get prune mask\n mask_np = mask.cpu().numpy()\n current_mask2 = mask_np\n ticket_final_mask2 = mask\n m.weight.data.mul_(mask) # prune weight through mask\n elif(args.is_random_prune!=1 and args.is_smart_ratio==1):\n for k, m in enumerate(model.modules()):\n if isinstance(m, GCNConv):\n if(k==1):\n weight_copy = m.weight.data.abs().clone()\n mask = weight_copy.gt(thre1).float().to(device) # if smaller than thre, set 0\n mask_np = mask.cpu().numpy()\n current_mask1 = mask_np\n ticket_final_mask1 = mask\n elif(k==2):\n weight_copy = m.weight.data.abs().clone()\n mask = weight_copy.gt(thre2).float().to(device) # if smaller than thre, set 0\n mask_np = mask.cpu().numpy()\n current_mask2 = mask_np\n ticket_final_mask2 = mask\n m.weight.data.mul_(mask) # prune weight through mask\n else:\n for k, m in enumerate(model.modules()):\n if isinstance(m, GCNConv):\n # do layerwise weight rearrange\n if(args.is_layerwise_rearrange==1):\n weight_data = m.weight.data\n weight_data = layerwise_rearrange(weight_data)\n m.weight.data = weight_data\n weight_copy = m.weight.data.abs().clone()\n mask = weight_copy.gt(thre).float().to(device) # if smaller than thre, set 0\n mask_np = mask.cpu().numpy()\n if(k==1):\n current_mask1 = mask_np\n ticket_final_mask1 = mask\n elif(k==2):\n current_mask2 = mask_np\n ticket_final_mask2 = mask\n m.weight.data.mul_(mask) # prune weight through mask\n\n if (epoch_cnt==0):\n pre1_mask1 = current_mask1\n pre1_mask2 = current_mask2\n elif (epoch_cnt==1):\n pre2_mask1 = pre1_mask1\n pre2_mask2 = pre1_mask2\n pre1_mask1 = current_mask1\n pre1_mask2 = current_mask2\n elif (epoch_cnt==2):\n pre3_mask1 = pre2_mask1\n pre3_mask2 = pre2_mask2\n pre2_mask1 = pre1_mask1\n pre2_mask2 = pre1_mask2\n pre1_mask1 = current_mask1\n pre1_mask2 = current_mask2\n else:\n dist_pre1_mask1 = calc_dist(pre1_mask1,current_mask1)\n dist_pre1_mask2 = calc_dist(pre1_mask2,current_mask2)\n dist_pre2_mask1 = calc_dist(pre2_mask1,current_mask1)\n dist_pre2_mask2 = calc_dist(pre2_mask2,current_mask2)\n dist_pre3_mask1 = calc_dist(pre3_mask1,current_mask1)\n dist_pre3_mask2 = calc_dist(pre3_mask2,current_mask2)\n dist_mask1 = np.max([dist_pre1_mask1,dist_pre2_mask1,dist_pre3_mask1])\n dist_mask2 = np.max([dist_pre1_mask2,dist_pre2_mask2,dist_pre3_mask2])\n total_dist = dist_mask1 + dist_mask2\n # print('total_dist_before = ',total_dist)\n if (weight_norm_baseline==-1 or weight_norm_baseline==0):\n weight_norm_baseline = total_dist # set the first total_dist value to be norm 1\n # print('weight_norm_baseline = ',weight_norm_baseline)\n total_dist /= weight_norm_baseline\n pre3_mask1 = pre2_mask1\n pre3_mask2 = pre2_mask2\n pre2_mask1 = pre1_mask1\n pre2_mask2 = pre1_mask2\n pre1_mask1 = current_mask1\n pre1_mask2 = current_mask2\n\n #STEP3: update graph optimizer & compute graph dist\n model.train()\n adj_optimizer.zero_grad()\n # Calculate gradient\n admm_loss(model).backward(retain_graph=True)\n # Update to correct gradient\n update_gradients_adj(adj_map, adj_mask)\n # Use the optimizer to update adjacency matrix\n adj_optimizer.step()\n train_acc, val_acc, tmp_test_acc = test(model, data)\n if val_acc > best_prune_acc:\n best_prune_acc = val_acc\n test_acc = tmp_test_acc\n cur_mask = get_mask(model.adj1 - id1, non_zero_idx, args.ratio_graph) \n if len(lookbacks) < args.lookback:\n lookbacks.append(cur_mask)\n else:\n can_return = False \n total = 0\n for mask in lookbacks:\n dist = calc_dist(mask, cur_mask) / cur_mask.size \n total = max(calc_dist(mask, cur_mask),total)\n if dist > args.thres:\n can_return = False\n # bre\n logging.info(args.log.format(total)) # Here\n # print('total_before = ',total)\n if(graph_norm_baseline==-1 or graph_norm_baseline==0):\n graph_norm_baseline = total\n # print('graph_norm_baseline = ',graph_norm_baseline)\n total /= graph_norm_baseline\n graph_dist = total\n lookbacks = lookbacks[1:]\n lookbacks.append(cur_mask)\n torch.save(cur_mask, f\"./masks/{args.dataset}_{args.ratio_graph}_{counter}_mask\")\n counter += 1\n #STEP4: update U,Z\n adj1,adj2 = model.adj1, model.adj2\n Z1 = adj1 - id1 + U1\n Z1 = prune_adj(Z1,non_zero_idx,args.ratio_graph) - id1\n U1 = U1 + (adj1 - id1 - Z1)\n Z2 = adj2 - id2 + U2\n Z2 = prune_adj(Z2,non_zero_idx,args.ratio_graph) - id2\n U2 = U2 + (adj2 - id2 - Z2)\n #STEP5: compute joint value\n if(args.ratio_graph==0):\n joint_value = total_dist\n elif(args.ratio_weight==0):\n joint_dist = graph_dist\n else:\n joint_value = np.mean([total_dist,graph_dist])\n # print('epoch = %2d, w_dist = %.3f, g_dist = %.3f, joint_dist = %.3f'%(epoch_cnt,total_dist,graph_dist,joint_value))\n \n #STEP6: find jointEB\n if(args.is_need_thres==1 and epoch_cnt>5 and joint_value<args.thres):\n # recover to ticket's weight at final epoch\n if(args.is_ticket!=-1):\n index = 0\n for m in model.modules():\n if isinstance(m, GCNConv):\n size = m.weight.data.numel()\n m.weight.data = saved_ticket_weights[index:(index + size)].view(m.weight.data.shape).clone().to(device)\n index += size\n # apply previous mask\n for k, m in enumerate(model.modules()):\n if isinstance(m, GCNConv):\n # do layerwise weight rearrange\n if(args.is_layerwise_rearrange==1):\n weight_data = m.weight.data\n weight_data = layerwise_rearrange(weight_data)\n m.weight.data = weight_data\n if(k==1):\n m.weight.data.mul_(ticket_final_mask1) # prune weight through mask\n elif(k==2):\n m.weight.data.mul_(ticket_final_mask2) # prune weight through mask\n # print(\"recoverd saved ticket's weights at final epoch\")\n\n print('EB found! thres = %.2f, current epoch:%2d'%(args.thres, epoch_cnt))\n # prune graph\n adj1,adj2 = model.adj1, model.adj2\n adj1 = prune_adj(adj1 - id1, non_zero_idx, args.ratio_graph)\n adj2 = prune_adj(adj2 - id2, non_zero_idx, args.ratio_graph)\n model.adj1 = adj1\n model.adj2 = adj2\n # test acc\n train_acc, val_acc, tmp_test_acc = test(model, data)\n log = 'After tune results: Ratio: {:d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'\n # print(log.format(args.ratio, train_acc, val_acc, tmp_test_acc))\n log_4_test = 'Tune Ratio: {:d}'\n # print(log_4_test.format(args.ratio))\n cur_adj1 = model.adj1.cpu().numpy()\n cur_adj2 = model.adj2.cpu().numpy()\n jEB = epoch_cnt\n model_name = \"jointEB_Gr\"+str(g_ratio)+\"_Wr\"+str(w_ratio)+\"_E\"+str(jEB)+\"_model.pth.tar\"\n torch.save({\"state_dict\":model.state_dict(),\"adj\":cur_adj1}, f\"./jointEB_pruned_pytorch/\"+model_name)\n exit_flag = 1\n elif(args.is_need_thres==0):\n exit_flag = 0\n if(exit_flag==1):\n break\n if(exit_flag==1):\n break\n\nif(args.is_ticket!=-1 and exit_flag==0): # recover to ticket's weight at final epoch\n index = 0\n for m in model.modules():\n if isinstance(m, GCNConv):\n size = m.weight.data.numel()\n m.weight.data = saved_ticket_weights[index:(index + size)].view(m.weight.data.shape).clone().to(device)\n index += size\n # apply previous mask\n for k, m in enumerate(model.modules()):\n if isinstance(m, GCNConv):\n # do layerwise weight rearrange\n if(args.is_layerwise_rearrange==1):\n weight_data = m.weight.data\n weight_data = layerwise_rearrange(weight_data)\n m.weight.data = weight_data\n if(k==1):\n m.weight.data.mul_(ticket_final_mask1) # prune weight through mask\n elif(k==2):\n m.weight.data.mul_(ticket_final_mask2) # prune weight through mask\n # print(\"recoverd saved ticket's weights at final epoch\")\n jEB = 0\n cur_adj1 = model.adj1.cpu().detach().numpy()\n model_name = \"jointEB_Gr\"+str(g_ratio)+\"_Wr\"+str(w_ratio)+\"_E\"+str(jEB)+\"_model.pth.tar\"\n torch.save({\"state_dict\":model.state_dict(),\"adj\":cur_adj1}, f\"./jointEB_pruned_pytorch/\"+model_name)\n exit_flag = 1\n\nif(args.times==0 and args.epochs==0):\n jEB = 0\n cur_adj1 = model.adj1.cpu().detach().numpy()\n model_name = \"jointEB_Gr\"+str(g_ratio)+\"_Wr\"+str(w_ratio)+\"_E\"+str(jEB)+\"_model.pth.tar\"\n torch.save({\"state_dict\":model.state_dict(),\"adj\":cur_adj1}, f\"./jointEB_pruned_pytorch/\"+model_name)\n exit_flag = 1\nif(args.is_smart_ratio==1):\n jEB = 0\n cur_adj1 = model.adj1.cpu().detach().numpy()\n model_name = \"jointEB_Gr\"+str(g_ratio)+\"_Wr\"+str(w_ratio)+\"_E\"+str(jEB)+\"_model.pth.tar\"\n torch.save({\"state_dict\":model.state_dict(),\"adj\":cur_adj1}, f\"./jointEB_pruned_pytorch/\"+model_name)\n exit_flag = 1\n# retrain to test jointEB acc\nif(exit_flag==1 and args.is_need_retrain_acc==1):\n # print(\"test retrain acc#######\")\n model_name = \"jointEB_pruned_pytorch/\"+\"jointEB_Gr\"+str(g_ratio)+\"_Wr\"+str(w_ratio)+\"_E\"+str(jEB)+\"_model.pth.tar\"\n os.system(\"python3 \"+\"pytorch_retrain_with_graph.py\"+\" --load_path \"+model_name+\" --dataset \"+str(args.dataset))\n\n\n\n\n\n\n\n" ]
[ [ "torch.transpose", "torch.load", "torch.zeros", "numpy.max", "numpy.zeros_like", "numpy.mean", "numpy.tril", "torch.save", "torch.eye", "torch.from_numpy", "torch.sort", "numpy.count_nonzero", "numpy.unravel_index", "numpy.zeros", "torch.optim.Adam", "numpy.multiply", "numpy.min", "torch.nn.functional.mse_loss", "numpy.identity", "numpy.transpose", "numpy.abs", "numpy.random.shuffle" ] ]
dulacp/lifelines
[ "0770bbb6179bbe8f3ce4acec3c0834f3cc0834ee" ]
[ "lifelines/statistics.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom itertools import combinations\n\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\n\nfrom lifelines.utils import group_survival_table_from_events, significance_code\n\n\ndef sample_size_necessary_under_cph(power, ratio_of_participants, p_exp, p_con,\n postulated_hazard_ratio, alpha=0.05):\n \"\"\"\n This computes the sample size for needed power to compare two groups under a Cox\n Proportional Hazard model.\n\n References:\n https://cran.r-project.org/web/packages/powerSurvEpi/powerSurvEpi.pdf\n\n Parameters:\n power: power to detect the magnitude of the hazard ratio as small as that specified by postulated_hazard_ratio.\n ratio_of_participants: ratio of participants in experimental group over control group.\n p_exp: probability of failure in experimental group over period of study.\n p_con: probability of failure in control group over period of study\n postulated_hazard_ratio: the postulated hazard ratio\n alpha: type I error rate\n\n Returns:\n n_exp, n_con: the samples sizes need for the experiment and control group, respectively, to achieve desired power\n \"\"\"\n def z(p):\n return stats.norm.ppf(p)\n\n m = 1.0 / ratio_of_participants \\\n * ((ratio_of_participants * postulated_hazard_ratio + 1.0) / (postulated_hazard_ratio - 1.0)) ** 2 \\\n * (z(1. - alpha / 2.) + z(power)) ** 2\n\n n_exp = m * ratio_of_participants / (ratio_of_participants * p_exp + p_con)\n n_con = m / (ratio_of_participants * p_exp + p_con)\n\n return int(np.ceil(n_exp)), int(np.ceil(n_con))\n\n\ndef power_under_cph(n_exp, n_con, p_exp, p_con, postulated_hazard_ratio, alpha=0.05):\n \"\"\"\n This computes the power of the hypothesis test that the two groups, experiment and control,\n have different hazards (that is, the relative hazard ratio is different from 1.)\n\n References:\n https://cran.r-project.org/web/packages/powerSurvEpi/powerSurvEpi.pdf\n\n Parameters:\n n_exp: size of the experiment group.\n n_con: size of the control group.\n p_exp: probability of failure in experimental group over period of study.\n p_con: probability of failure in control group over period of study\n postulated_hazard_ratio: the postulated hazard ratio\n alpha: type I error rate\n\n Returns:\n power: power to detect the magnitude of the hazard ratio as small as that specified by postulated_hazard_ratio.\n \"\"\"\n def z(p):\n return stats.norm.ppf(p)\n\n m = n_exp * p_exp + n_con * p_con\n k = float(n_exp) / float(n_con)\n return stats.norm.cdf(np.sqrt(k * m) * abs(postulated_hazard_ratio - 1) / (k * postulated_hazard_ratio + 1) - z(1 - alpha / 2.))\n\n\ndef logrank_test(event_times_A, event_times_B, event_observed_A=None, event_observed_B=None,\n alpha=0.95, t_0=-1, **kwargs):\n \"\"\"\n Measures and reports on whether two intensity processes are different. That is, given two\n event series, determines whether the data generating processes are statistically different.\n The test-statistic is chi-squared under the null hypothesis.\n\n H_0: both event series are from the same generating processes\n H_A: the event series are from different generating processes.\n\n See Survival and Event Analysis, page 108. This implicitly uses the log-rank weights.\n\n Parameters:\n event_times_foo: a (nx1) array of event durations (birth to death,...) for the population.\n censorship_bar: a (nx1) array of censorship flags, 1 if observed, 0 if not. Default assumes all observed.\n t_0: the period under observation, -1 for all time.\n alpha: the level of signifiance\n kwargs: add keywords and meta-data to the experiment summary\n\n Returns:\n results: a StatisticalResult object with properties 'p_value', 'summary', 'test_statistic', 'test_result'\n\n \"\"\"\n\n event_times_A, event_times_B = np.array(event_times_A), np.array(event_times_B)\n if event_observed_A is None:\n event_observed_A = np.ones(event_times_A.shape[0])\n if event_observed_B is None:\n event_observed_B = np.ones(event_times_B.shape[0])\n\n event_times = np.r_[event_times_A, event_times_B]\n groups = np.r_[np.zeros(event_times_A.shape[0], dtype=int), np.ones(event_times_B.shape[0], dtype=int)]\n event_observed = np.r_[event_observed_A, event_observed_B]\n return multivariate_logrank_test(event_times, groups, event_observed,\n alpha=alpha, t_0=t_0, **kwargs)\n\n\ndef pairwise_logrank_test(event_durations, groups, event_observed=None,\n alpha=0.95, t_0=-1, bonferroni=True, **kwargs):\n \"\"\"\n Perform the logrank test pairwise for all n>2 unique groups (use the more appropriate logrank_test for n=2).\n We have to be careful here: if there are n groups, then there are n*(n-1)/2 pairs -- so many pairs increase\n the chance that here will exist a significantly different pair purely by chance. For this reason, we use the\n Bonferroni correction (rewight the alpha value higher to accomidate the multiple tests).\n\n\n Parameters:\n event_durations: a (n,) numpy array the (partial) lifetimes of all individuals\n groups: a (n,) numpy array of unique group labels for each individual.\n event_observed: a (n,) numpy array of event_observed events: 1 if observed death, 0 if censored. Defaults\n to all observed.\n alpha: the level of signifiance desired.\n t_0: the final time to compare the series' up to. Defaults to all.\n bonferroni: If true, uses the Bonferroni correction to compare the M=n(n-1)/2 pairs, i.e alpha = alpha/M\n See (here)[http://en.wikipedia.org/wiki/Bonferroni_correction].\n kwargs: add keywords and meta-data to the experiment summary.\n\n Returns:\n R: a (n,n) dataframe of StatisticalResults (None on the diagonal)\n\n \"\"\"\n\n if event_observed is None:\n event_observed = np.ones((event_durations.shape[0], 1))\n\n n = np.max(event_durations.shape)\n assert n == np.max(event_durations.shape) == np.max(event_observed.shape), \"inputs must be of the same length.\"\n groups, event_durations, event_observed = map(lambda x: pd.Series(np.asarray(x).reshape(n,)), [groups, event_durations, event_observed])\n\n unique_groups = np.unique(groups)\n\n n = unique_groups.shape[0]\n\n if bonferroni:\n m = 0.5 * n * (n - 1)\n alpha = 1 - (1 - alpha) / m\n\n R = np.zeros((n, n), dtype=object)\n\n np.fill_diagonal(R, None)\n\n for i1, i2 in combinations(np.arange(n), 2):\n g1, g2 = unique_groups[[i1, i2]]\n ix1, ix2 = (groups == g1), (groups == g2)\n test_name = str(g1) + \" vs. \" + str(g2)\n result = logrank_test(event_durations.loc[ix1], event_durations.loc[ix2],\n event_observed.loc[ix1], event_observed.loc[ix2],\n alpha=alpha, t_0=t_0, use_bonferroni=bonferroni,\n test_name=test_name, **kwargs)\n R[i1, i2], R[i2, i1] = result, result\n\n return pd.DataFrame(R, columns=unique_groups, index=unique_groups)\n\n\ndef multivariate_logrank_test(event_durations, groups, event_observed=None,\n alpha=0.95, t_0=-1, **kwargs):\n \"\"\"\n This test is a generalization of the logrank_test: it can deal with n>2 populations (and should\n be equal when n=2):\n\n H_0: all event series are from the same generating processes\n H_A: there exist atleast one group that differs from the other.\n\n Parameters:\n event_durations: a (n,) numpy array of the (partial) lifetimes of all individuals\n groups: a (n,) numpy array of unique group labels for each individual.\n event_observed: a (n,) numpy array of event observations: 1 if observed death, 0 if censored. Defaults\n to all observed.\n alpha: the level of significance desired.\n t_0: the final time to compare the series' up to. Defaults to all.\n kwargs: add keywords and meta-data to the experiment summary.\n\n Returns\n results: a StatisticalResult object with properties 'p_value', 'summary', 'test_statistic', 'test_result'\n\n Example:\n\n >> df = pd.DataFrame({\n 'durations': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],\n 'events': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],\n 'groups': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2]\n })\n >> result = multivariate_logrank_test(df['durations'], df['groups'], df['events'])\n >> result.test_statistic\n >> result.p_value\n\n\n >> # numpy example\n >> G = [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2]\n >> T = [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7]\n >> E = [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0]\n >> result = multivariate_logrank_test(T, G, E)\n >> result.test_statistic\n\n\n\n \"\"\"\n if not (0 < alpha <= 1.):\n raise ValueError('alpha parameter must be between 0 and 1.')\n\n event_durations, groups = np.asarray(event_durations), np.asarray(groups)\n if event_observed is None:\n event_observed = np.ones((event_durations.shape[0], 1))\n else:\n event_observed = np.asarray(event_observed)\n\n n = np.max(event_durations.shape)\n assert n == np.max(event_durations.shape) == np.max(event_observed.shape), \"inputs must be of the same length.\"\n groups, event_durations, event_observed = map(lambda x: pd.Series(np.asarray(x).reshape(n,)), [groups, event_durations, event_observed])\n\n unique_groups, rm, obs, _ = group_survival_table_from_events(groups, event_durations, event_observed, limit=t_0)\n n_groups = unique_groups.shape[0]\n\n # compute the factors needed\n N_j = obs.sum(0).values\n n_ij = (rm.sum(0).values - rm.cumsum(0).shift(1).fillna(0))\n d_i = obs.sum(1)\n n_i = rm.values.sum() - rm.sum(1).cumsum().shift(1).fillna(0)\n ev = n_ij.mul(d_i / n_i, axis='index').sum(0)\n\n # vector of observed minus expected\n Z_j = N_j - ev\n\n assert abs(Z_j.sum()) < 10e-8, \"Sum is not zero.\" # this should move to a test eventually.\n\n # compute covariance matrix\n factor = (((n_i - d_i) / (n_i - 1)).replace([np.inf, np.nan], 1)) * d_i / n_i ** 2\n n_ij['_'] = n_i.values\n V_ = n_ij.mul(np.sqrt(factor), axis='index').fillna(0)\n V = -np.dot(V_.T, V_)\n ix = np.arange(n_groups)\n V[ix, ix] = V[ix, ix] - V[-1, ix]\n V = V[:-1, :-1]\n\n # take the first n-1 groups\n U = Z_j.iloc[:-1].dot(np.linalg.pinv(V[:-1, :-1])).dot(Z_j.iloc[:-1]) # Z.T*inv(V)*Z\n\n # compute the p-values and tests\n test_result, p_value = chisq_test(U, n_groups - 1, alpha)\n\n return StatisticalResult(test_result, p_value, U, t_0=t_0,\n alpha=alpha, null_distribution='chi squared',\n df=n_groups - 1, **kwargs)\n\n\nclass StatisticalResult(object):\n\n def __init__(self, test_result, p_value, test_statistic, **kwargs):\n self.p_value = p_value\n self.test_statistic = test_statistic\n\n for kw, value in kwargs.items():\n setattr(self, kw, value)\n\n self._kwargs = kwargs\n\n def print_summary(self):\n print(self.__unicode__())\n\n @property\n def summary(self):\n cols = ['test_statistic', 'p']\n return pd.DataFrame([[self.test_statistic, self.p_value]], columns=cols)\n\n def __repr__(self):\n return \"<lifelines.StatisticalResult: \\n%s\\n>\" % self.__unicode__()\n\n def __unicode__(self):\n meta_data = self._pretty_print_meta_data(self._kwargs)\n df = self.summary\n df[''] = significance_code(self.p_value)\n\n s = \"\"\n s += \"\\n\" + meta_data + \"\\n\\n\"\n s += df.to_string(float_format=lambda f: '{:4.4f}'.format(f), index=False)\n\n s += '\\n---'\n s += \"\\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 \"\n return s\n\n def _pretty_print_meta_data(self, dictionary):\n return \", \".join([str(k) + \"=\" + str(v) for k, v in dictionary.items()])\n\n\ndef chisq_test(U, degrees_freedom, alpha):\n p_value = stats.chi2.sf(U, degrees_freedom)\n if p_value < 1 - alpha:\n return True, p_value\n else:\n return None, p_value\n\n\ndef two_sided_z_test(Z, alpha):\n p_value = 1 - np.max(stats.norm.cdf(Z), 1 - stats.norm.cdf(Z))\n if p_value < 1 - alpha / 2.:\n return True, p_value\n else:\n return None, p_value\n" ]
[ [ "scipy.stats.norm.ppf", "numpy.dot", "scipy.stats.norm.cdf", "numpy.sqrt", "numpy.unique", "numpy.asarray", "numpy.arange", "pandas.DataFrame", "numpy.ones", "numpy.max", "numpy.ceil", "numpy.linalg.pinv", "numpy.fill_diagonal", "numpy.array", "numpy.zeros", "scipy.stats.chi2.sf" ] ]
observingClouds/xarray
[ "66acafa7f1f1477cfd6c5b7c3458859763433092" ]
[ "xarray/core/indexing.py" ]
[ "import enum\nimport functools\nimport operator\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom datetime import timedelta\nfrom typing import Any, Callable, Iterable, List, Sequence, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import duck_array_ops, nputils, utils\nfrom .npcompat import DTypeLike\nfrom .pycompat import (\n dask_array_type,\n integer_types,\n is_duck_dask_array,\n sparse_array_type,\n)\nfrom .utils import is_dict_like, maybe_cast_to_coords_dtype\n\n\ndef expanded_indexer(key, ndim):\n \"\"\"Given a key for indexing an ndarray, return an equivalent key which is a\n tuple with length equal to the number of dimensions.\n\n The expansion is done by replacing all `Ellipsis` items with the right\n number of full slices and then padding the key with full slices so that it\n reaches the appropriate dimensionality.\n \"\"\"\n if not isinstance(key, tuple):\n # numpy treats non-tuple keys equivalent to tuples of length 1\n key = (key,)\n new_key = []\n # handling Ellipsis right is a little tricky, see:\n # http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n found_ellipsis = False\n for k in key:\n if k is Ellipsis:\n if not found_ellipsis:\n new_key.extend((ndim + 1 - len(key)) * [slice(None)])\n found_ellipsis = True\n else:\n new_key.append(slice(None))\n else:\n new_key.append(k)\n if len(new_key) > ndim:\n raise IndexError(\"too many indices\")\n new_key.extend((ndim - len(new_key)) * [slice(None)])\n return tuple(new_key)\n\n\ndef _expand_slice(slice_, size):\n return np.arange(*slice_.indices(size))\n\n\ndef _sanitize_slice_element(x):\n from .dataarray import DataArray\n from .variable import Variable\n\n if isinstance(x, (Variable, DataArray)):\n x = x.values\n\n if isinstance(x, np.ndarray):\n if x.ndim != 0:\n raise ValueError(\n f\"cannot use non-scalar arrays in a slice for xarray indexing: {x}\"\n )\n x = x[()]\n\n return x\n\n\ndef _asarray_tuplesafe(values):\n \"\"\"\n Convert values into a numpy array of at most 1-dimension, while preserving\n tuples.\n\n Adapted from pandas.core.common._asarray_tuplesafe\n \"\"\"\n if isinstance(values, tuple):\n result = utils.to_0d_object_array(values)\n else:\n result = np.asarray(values)\n if result.ndim == 2:\n result = np.empty(len(values), dtype=object)\n result[:] = values\n\n return result\n\n\ndef _is_nested_tuple(possible_tuple):\n return isinstance(possible_tuple, tuple) and any(\n isinstance(value, (tuple, list, slice)) for value in possible_tuple\n )\n\n\ndef get_indexer_nd(index, labels, method=None, tolerance=None):\n \"\"\"Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional\n labels\n \"\"\"\n flat_labels = np.ravel(labels)\n flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance)\n indexer = flat_indexer.reshape(labels.shape)\n return indexer\n\n\ndef convert_label_indexer(index, label, index_name=\"\", method=None, tolerance=None):\n \"\"\"Given a pandas.Index and labels (e.g., from __getitem__) for one\n dimension, return an indexer suitable for indexing an ndarray along that\n dimension. If `index` is a pandas.MultiIndex and depending on `label`,\n return a new pandas.Index or pandas.MultiIndex (otherwise return None).\n \"\"\"\n new_index = None\n\n if isinstance(label, slice):\n if method is not None or tolerance is not None:\n raise NotImplementedError(\n \"cannot use ``method`` argument if any indexers are slice objects\"\n )\n indexer = index.slice_indexer(\n _sanitize_slice_element(label.start),\n _sanitize_slice_element(label.stop),\n _sanitize_slice_element(label.step),\n )\n if not isinstance(indexer, slice):\n # unlike pandas, in xarray we never want to silently convert a\n # slice indexer into an array indexer\n raise KeyError(\n \"cannot represent labeled-based slice indexer for dimension \"\n f\"{index_name!r} with a slice over integer positions; the index is \"\n \"unsorted or non-unique\"\n )\n\n elif is_dict_like(label):\n is_nested_vals = _is_nested_tuple(tuple(label.values()))\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(\n \"cannot use a dict-like object for selection on \"\n \"a dimension that does not have a MultiIndex\"\n )\n elif len(label) == index.nlevels and not is_nested_vals:\n indexer = index.get_loc(tuple(label[k] for k in index.names))\n else:\n for k, v in label.items():\n # index should be an item (i.e. Hashable) not an array-like\n if isinstance(v, Sequence) and not isinstance(v, str):\n raise ValueError(\n \"Vectorized selection is not \"\n \"available along level variable: \" + k\n )\n indexer, new_index = index.get_loc_level(\n tuple(label.values()), level=tuple(label.keys())\n )\n\n # GH2619. Raise a KeyError if nothing is chosen\n if indexer.dtype.kind == \"b\" and indexer.sum() == 0:\n raise KeyError(f\"{label} not found\")\n\n elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex):\n if _is_nested_tuple(label):\n indexer = index.get_locs(label)\n elif len(label) == index.nlevels:\n indexer = index.get_loc(label)\n else:\n indexer, new_index = index.get_loc_level(\n label, level=list(range(len(label)))\n )\n else:\n label = (\n label\n if getattr(label, \"ndim\", 1) > 1 # vectorized-indexing\n else _asarray_tuplesafe(label)\n )\n if label.ndim == 0:\n # see https://github.com/pydata/xarray/pull/4292 for details\n label_value = label[()] if label.dtype.kind in \"mM\" else label.item()\n if isinstance(index, pd.MultiIndex):\n indexer, new_index = index.get_loc_level(label_value, level=0)\n elif isinstance(index, pd.CategoricalIndex):\n if method is not None:\n raise ValueError(\n \"'method' is not a valid kwarg when indexing using a CategoricalIndex.\"\n )\n if tolerance is not None:\n raise ValueError(\n \"'tolerance' is not a valid kwarg when indexing using a CategoricalIndex.\"\n )\n indexer = index.get_loc(label_value)\n else:\n indexer = index.get_loc(label_value, method=method, tolerance=tolerance)\n elif label.dtype.kind == \"b\":\n indexer = label\n else:\n if isinstance(index, pd.MultiIndex) and label.ndim > 1:\n raise ValueError(\n \"Vectorized selection is not available along \"\n \"MultiIndex variable: \" + index_name\n )\n indexer = get_indexer_nd(index, label, method, tolerance)\n if np.any(indexer < 0):\n raise KeyError(f\"not all values found in index {index_name!r}\")\n return indexer, new_index\n\n\ndef get_dim_indexers(data_obj, indexers):\n \"\"\"Given a xarray data object and label based indexers, return a mapping\n of label indexers with only dimension names as keys.\n\n It groups multiple level indexers given on a multi-index dimension\n into a single, dictionary indexer for that dimension (Raise a ValueError\n if it is not possible).\n \"\"\"\n invalid = [\n k\n for k in indexers\n if k not in data_obj.dims and k not in data_obj._level_coords\n ]\n if invalid:\n raise ValueError(f\"dimensions or multi-index levels {invalid!r} do not exist\")\n\n level_indexers = defaultdict(dict)\n dim_indexers = {}\n for key, label in indexers.items():\n (dim,) = data_obj[key].dims\n if key != dim:\n # assume here multi-index level indexer\n level_indexers[dim][key] = label\n else:\n dim_indexers[key] = label\n\n for dim, level_labels in level_indexers.items():\n if dim_indexers.get(dim, False):\n raise ValueError(\n \"cannot combine multi-index level indexers with an indexer for \"\n f\"dimension {dim}\"\n )\n dim_indexers[dim] = level_labels\n\n return dim_indexers\n\n\ndef remap_label_indexers(data_obj, indexers, method=None, tolerance=None):\n \"\"\"Given an xarray data object and label based indexers, return a mapping\n of equivalent location based indexers. Also return a mapping of updated\n pandas index objects (in case of multi-index level drop).\n \"\"\"\n if method is not None and not isinstance(method, str):\n raise TypeError(\"``method`` must be a string\")\n\n pos_indexers = {}\n new_indexes = {}\n\n dim_indexers = get_dim_indexers(data_obj, indexers)\n for dim, label in dim_indexers.items():\n try:\n index = data_obj.indexes[dim]\n except KeyError:\n # no index for this dimension: reuse the provided labels\n if method is not None or tolerance is not None:\n raise ValueError(\n \"cannot supply ``method`` or ``tolerance`` \"\n \"when the indexed dimension does not have \"\n \"an associated coordinate.\"\n )\n pos_indexers[dim] = label\n else:\n coords_dtype = data_obj.coords[dim].dtype\n label = maybe_cast_to_coords_dtype(label, coords_dtype)\n idxr, new_idx = convert_label_indexer(index, label, dim, method, tolerance)\n pos_indexers[dim] = idxr\n if new_idx is not None:\n new_indexes[dim] = new_idx\n\n return pos_indexers, new_indexes\n\n\ndef _normalize_slice(sl, size):\n \"\"\"Ensure that given slice only contains positive start and stop values\n (stop can be -1 for full-size slices with negative steps, e.g. [-10::-1])\"\"\"\n return slice(*sl.indices(size))\n\n\ndef slice_slice(old_slice, applied_slice, size):\n \"\"\"Given a slice and the size of the dimension to which it will be applied,\n index it with another slice to return a new slice equivalent to applying\n the slices sequentially\n \"\"\"\n old_slice = _normalize_slice(old_slice, size)\n\n size_after_old_slice = len(range(old_slice.start, old_slice.stop, old_slice.step))\n if size_after_old_slice == 0:\n # nothing left after applying first slice\n return slice(0)\n\n applied_slice = _normalize_slice(applied_slice, size_after_old_slice)\n\n start = old_slice.start + applied_slice.start * old_slice.step\n if start < 0:\n # nothing left after applying second slice\n # (can only happen for old_slice.step < 0, e.g. [10::-1], [20:])\n return slice(0)\n\n stop = old_slice.start + applied_slice.stop * old_slice.step\n if stop < 0:\n stop = None\n\n step = old_slice.step * applied_slice.step\n\n return slice(start, stop, step)\n\n\ndef _index_indexer_1d(old_indexer, applied_indexer, size):\n assert isinstance(applied_indexer, integer_types + (slice, np.ndarray))\n if isinstance(applied_indexer, slice) and applied_indexer == slice(None):\n # shortcut for the usual case\n return old_indexer\n if isinstance(old_indexer, slice):\n if isinstance(applied_indexer, slice):\n indexer = slice_slice(old_indexer, applied_indexer, size)\n else:\n indexer = _expand_slice(old_indexer, size)[applied_indexer]\n else:\n indexer = old_indexer[applied_indexer]\n return indexer\n\n\nclass ExplicitIndexer:\n \"\"\"Base class for explicit indexer objects.\n\n ExplicitIndexer objects wrap a tuple of values given by their ``tuple``\n property. These tuples should always have length equal to the number of\n dimensions on the indexed array.\n\n Do not instantiate BaseIndexer objects directly: instead, use one of the\n sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer.\n \"\"\"\n\n __slots__ = (\"_key\",)\n\n def __init__(self, key):\n if type(self) is ExplicitIndexer:\n raise TypeError(\"cannot instantiate base ExplicitIndexer objects\")\n self._key = tuple(key)\n\n @property\n def tuple(self):\n return self._key\n\n def __repr__(self):\n return f\"{type(self).__name__}({self.tuple})\"\n\n\ndef as_integer_or_none(value):\n return None if value is None else operator.index(value)\n\n\ndef as_integer_slice(value):\n start = as_integer_or_none(value.start)\n stop = as_integer_or_none(value.stop)\n step = as_integer_or_none(value.step)\n return slice(start, stop, step)\n\n\nclass BasicIndexer(ExplicitIndexer):\n \"\"\"Tuple for basic indexing.\n\n All elements should be int or slice objects. Indexing follows NumPy's\n rules for basic indexing: each axis is independently sliced and axes\n indexed with an integer are dropped from the result.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(f\"key must be a tuple: {key!r}\")\n\n new_key = []\n for k in key:\n if isinstance(k, integer_types):\n k = int(k)\n elif isinstance(k, slice):\n k = as_integer_slice(k)\n else:\n raise TypeError(\n f\"unexpected indexer type for {type(self).__name__}: {k!r}\"\n )\n new_key.append(k)\n\n super().__init__(new_key)\n\n\nclass OuterIndexer(ExplicitIndexer):\n \"\"\"Tuple for outer/orthogonal indexing.\n\n All elements should be int, slice or 1-dimensional np.ndarray objects with\n an integer dtype. Indexing is applied independently along each axis, and\n axes indexed with an integer are dropped from the result. This type of\n indexing works like MATLAB/Fortran.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(f\"key must be a tuple: {key!r}\")\n\n new_key = []\n for k in key:\n if isinstance(k, integer_types):\n k = int(k)\n elif isinstance(k, slice):\n k = as_integer_slice(k)\n elif isinstance(k, np.ndarray):\n if not np.issubdtype(k.dtype, np.integer):\n raise TypeError(\n f\"invalid indexer array, does not have integer dtype: {k!r}\"\n )\n if k.ndim != 1:\n raise TypeError(\n f\"invalid indexer array for {type(self).__name__}; must have \"\n f\"exactly 1 dimension: {k!r}\"\n )\n k = np.asarray(k, dtype=np.int64)\n else:\n raise TypeError(\n f\"unexpected indexer type for {type(self).__name__}: {k!r}\"\n )\n new_key.append(k)\n\n super().__init__(new_key)\n\n\nclass VectorizedIndexer(ExplicitIndexer):\n \"\"\"Tuple for vectorized indexing.\n\n All elements should be slice or N-dimensional np.ndarray objects with an\n integer dtype and the same number of dimensions. Indexing follows proposed\n rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules\n (including broadcasting) except sliced axes are always moved to the end:\n https://github.com/numpy/numpy/pull/6256\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(f\"key must be a tuple: {key!r}\")\n\n new_key = []\n ndim = None\n for k in key:\n if isinstance(k, slice):\n k = as_integer_slice(k)\n elif isinstance(k, np.ndarray):\n if not np.issubdtype(k.dtype, np.integer):\n raise TypeError(\n f\"invalid indexer array, does not have integer dtype: {k!r}\"\n )\n if ndim is None:\n ndim = k.ndim\n elif ndim != k.ndim:\n ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]\n raise ValueError(\n \"invalid indexer key: ndarray arguments \"\n f\"have different numbers of dimensions: {ndims}\"\n )\n k = np.asarray(k, dtype=np.int64)\n else:\n raise TypeError(\n f\"unexpected indexer type for {type(self).__name__}: {k!r}\"\n )\n new_key.append(k)\n\n super().__init__(new_key)\n\n\nclass ExplicitlyIndexed:\n \"\"\"Mixin to mark support for Indexer subclasses in indexing.\"\"\"\n\n __slots__ = ()\n\n\nclass ExplicitlyIndexedNDArrayMixin(utils.NDArrayMixin, ExplicitlyIndexed):\n __slots__ = ()\n\n def __array__(self, dtype=None):\n key = BasicIndexer((slice(None),) * self.ndim)\n return np.asarray(self[key], dtype=dtype)\n\n\nclass ImplicitToExplicitIndexingAdapter(utils.NDArrayMixin):\n \"\"\"Wrap an array, converting tuples into the indicated explicit indexer.\"\"\"\n\n __slots__ = (\"array\", \"indexer_cls\")\n\n def __init__(self, array, indexer_cls=BasicIndexer):\n self.array = as_indexable(array)\n self.indexer_cls = indexer_cls\n\n def __array__(self, dtype=None):\n return np.asarray(self.array, dtype=dtype)\n\n def __getitem__(self, key):\n key = expanded_indexer(key, self.ndim)\n result = self.array[self.indexer_cls(key)]\n if isinstance(result, ExplicitlyIndexed):\n return type(self)(result, self.indexer_cls)\n else:\n # Sometimes explicitly indexed arrays return NumPy arrays or\n # scalars.\n return result\n\n\nclass LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap an array to make basic and outer indexing lazy.\"\"\"\n\n __slots__ = (\"array\", \"key\")\n\n def __init__(self, array, key=None):\n \"\"\"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : ExplicitIndexer, optional\n Array indexer. If provided, it is assumed to already be in\n canonical expanded form.\n \"\"\"\n if isinstance(array, type(self)) and key is None:\n # unwrap\n key = array.key\n array = array.array\n\n if key is None:\n key = BasicIndexer((slice(None),) * array.ndim)\n\n self.array = as_indexable(array)\n self.key = key\n\n def _updated_key(self, new_key):\n iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))\n full_key = []\n for size, k in zip(self.array.shape, self.key.tuple):\n if isinstance(k, integer_types):\n full_key.append(k)\n else:\n full_key.append(_index_indexer_1d(k, next(iter_new_key), size))\n full_key = tuple(full_key)\n\n if all(isinstance(k, integer_types + (slice,)) for k in full_key):\n return BasicIndexer(full_key)\n return OuterIndexer(full_key)\n\n @property\n def shape(self):\n shape = []\n for size, k in zip(self.array.shape, self.key.tuple):\n if isinstance(k, slice):\n shape.append(len(range(*k.indices(size))))\n elif isinstance(k, np.ndarray):\n shape.append(k.size)\n return tuple(shape)\n\n def __array__(self, dtype=None):\n array = as_indexable(self.array)\n return np.asarray(array[self.key], dtype=None)\n\n def transpose(self, order):\n return LazilyVectorizedIndexedArray(self.array, self.key).transpose(order)\n\n def __getitem__(self, indexer):\n if isinstance(indexer, VectorizedIndexer):\n array = LazilyVectorizedIndexedArray(self.array, self.key)\n return array[indexer]\n return type(self)(self.array, self._updated_key(indexer))\n\n def __setitem__(self, key, value):\n if isinstance(key, VectorizedIndexer):\n raise NotImplementedError(\n \"Lazy item assignment with the vectorized indexer is not yet \"\n \"implemented. Load your data first by .load() or compute().\"\n )\n full_key = self._updated_key(key)\n self.array[full_key] = value\n\n def __repr__(self):\n return f\"{type(self).__name__}(array={self.array!r}, key={self.key!r})\"\n\n\nclass LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap an array to make vectorized indexing lazy.\"\"\"\n\n __slots__ = (\"array\", \"key\")\n\n def __init__(self, array, key):\n \"\"\"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : VectorizedIndexer\n \"\"\"\n if isinstance(key, (BasicIndexer, OuterIndexer)):\n self.key = _outer_to_vectorized_indexer(key, array.shape)\n else:\n self.key = _arrayize_vectorized_indexer(key, array.shape)\n self.array = as_indexable(array)\n\n @property\n def shape(self):\n return np.broadcast(*self.key.tuple).shape\n\n def __array__(self, dtype=None):\n return np.asarray(self.array[self.key], dtype=None)\n\n def _updated_key(self, new_key):\n return _combine_indexers(self.key, self.shape, new_key)\n\n def __getitem__(self, indexer):\n # If the indexed array becomes a scalar, return LazilyOuterIndexedArray\n if all(isinstance(ind, integer_types) for ind in indexer.tuple):\n key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))\n return LazilyOuterIndexedArray(self.array, key)\n return type(self)(self.array, self._updated_key(indexer))\n\n def transpose(self, order):\n key = VectorizedIndexer(tuple(k.transpose(order) for k in self.key.tuple))\n return type(self)(self.array, key)\n\n def __setitem__(self, key, value):\n raise NotImplementedError(\n \"Lazy item assignment with the vectorized indexer is not yet \"\n \"implemented. Load your data first by .load() or compute().\"\n )\n\n def __repr__(self):\n return f\"{type(self).__name__}(array={self.array!r}, key={self.key!r})\"\n\n\ndef _wrap_numpy_scalars(array):\n \"\"\"Wrap NumPy scalars in 0d arrays.\"\"\"\n if np.isscalar(array):\n return np.array(array)\n else:\n return array\n\n\nclass CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin):\n __slots__ = (\"array\", \"_copied\")\n\n def __init__(self, array):\n self.array = as_indexable(array)\n self._copied = False\n\n def _ensure_copied(self):\n if not self._copied:\n self.array = as_indexable(np.array(self.array))\n self._copied = True\n\n def __array__(self, dtype=None):\n return np.asarray(self.array, dtype=dtype)\n\n def __getitem__(self, key):\n return type(self)(_wrap_numpy_scalars(self.array[key]))\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n def __setitem__(self, key, value):\n self._ensure_copied()\n self.array[key] = value\n\n def __deepcopy__(self, memo):\n # CopyOnWriteArray is used to wrap backend array objects, which might\n # point to files on disk, so we can't rely on the default deepcopy\n # implementation.\n return type(self)(self.array)\n\n\nclass MemoryCachedArray(ExplicitlyIndexedNDArrayMixin):\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = _wrap_numpy_scalars(as_indexable(array))\n\n def _ensure_cached(self):\n if not isinstance(self.array, NumpyIndexingAdapter):\n self.array = NumpyIndexingAdapter(np.asarray(self.array))\n\n def __array__(self, dtype=None):\n self._ensure_cached()\n return np.asarray(self.array, dtype=dtype)\n\n def __getitem__(self, key):\n return type(self)(_wrap_numpy_scalars(self.array[key]))\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n def __setitem__(self, key, value):\n self.array[key] = value\n\n\ndef as_indexable(array):\n \"\"\"\n This function always returns a ExplicitlyIndexed subclass,\n so that the vectorized indexing is always possible with the returned\n object.\n \"\"\"\n if isinstance(array, ExplicitlyIndexed):\n return array\n if isinstance(array, np.ndarray):\n return NumpyIndexingAdapter(array)\n if isinstance(array, pd.Index):\n return PandasIndexAdapter(array)\n if isinstance(array, dask_array_type):\n return DaskIndexingAdapter(array)\n if hasattr(array, \"__array_function__\"):\n return NdArrayLikeIndexingAdapter(array)\n\n raise TypeError(\"Invalid array type: {}\".format(type(array)))\n\n\ndef _outer_to_vectorized_indexer(key, shape):\n \"\"\"Convert an OuterIndexer into an vectorized indexer.\n\n Parameters\n ----------\n key : Outer/Basic Indexer\n An indexer to convert.\n shape : tuple\n Shape of the array subject to the indexing.\n\n Returns\n -------\n VectorizedIndexer\n Tuple suitable for use to index a NumPy array with vectorized indexing.\n Each element is an array: broadcasting them together gives the shape\n of the result.\n \"\"\"\n key = key.tuple\n\n n_dim = len([k for k in key if not isinstance(k, integer_types)])\n i_dim = 0\n new_key = []\n for k, size in zip(key, shape):\n if isinstance(k, integer_types):\n new_key.append(np.array(k).reshape((1,) * n_dim))\n else: # np.ndarray or slice\n if isinstance(k, slice):\n k = np.arange(*k.indices(size))\n assert k.dtype.kind in {\"i\", \"u\"}\n shape = [(1,) * i_dim + (k.size,) + (1,) * (n_dim - i_dim - 1)]\n new_key.append(k.reshape(*shape))\n i_dim += 1\n return VectorizedIndexer(tuple(new_key))\n\n\ndef _outer_to_numpy_indexer(key, shape):\n \"\"\"Convert an OuterIndexer into an indexer for NumPy.\n\n Parameters\n ----------\n key : Basic/OuterIndexer\n An indexer to convert.\n shape : tuple\n Shape of the array subject to the indexing.\n\n Returns\n -------\n tuple\n Tuple suitable for use to index a NumPy array.\n \"\"\"\n if len([k for k in key.tuple if not isinstance(k, slice)]) <= 1:\n # If there is only one vector and all others are slice,\n # it can be safely used in mixed basic/advanced indexing.\n # Boolean index should already be converted to integer array.\n return key.tuple\n else:\n return _outer_to_vectorized_indexer(key, shape).tuple\n\n\ndef _combine_indexers(old_key, shape, new_key):\n \"\"\"Combine two indexers.\n\n Parameters\n ----------\n old_key : ExplicitIndexer\n The first indexer for the original array\n shape : tuple of ints\n Shape of the original array to be indexed by old_key\n new_key\n The second indexer for indexing original[old_key]\n \"\"\"\n if not isinstance(old_key, VectorizedIndexer):\n old_key = _outer_to_vectorized_indexer(old_key, shape)\n if len(old_key.tuple) == 0:\n return new_key\n\n new_shape = np.broadcast(*old_key.tuple).shape\n if isinstance(new_key, VectorizedIndexer):\n new_key = _arrayize_vectorized_indexer(new_key, new_shape)\n else:\n new_key = _outer_to_vectorized_indexer(new_key, new_shape)\n\n return VectorizedIndexer(\n tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple))\n )\n\n\[email protected]\nclass IndexingSupport(enum.Enum):\n # for backends that support only basic indexer\n BASIC = 0\n # for backends that support basic / outer indexer\n OUTER = 1\n # for backends that support outer indexer including at most 1 vector.\n OUTER_1VECTOR = 2\n # for backends that support full vectorized indexer.\n VECTORIZED = 3\n\n\ndef explicit_indexing_adapter(\n key: ExplicitIndexer,\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n raw_indexing_method: Callable,\n) -> Any:\n \"\"\"Support explicit indexing by delegating to a raw indexing method.\n\n Outer and/or vectorized indexers are supported by indexing a second time\n with a NumPy array.\n\n Parameters\n ----------\n key : ExplicitIndexer\n Explicit indexing object.\n shape : Tuple[int, ...]\n Shape of the indexed array.\n indexing_support : IndexingSupport enum\n Form of indexing supported by raw_indexing_method.\n raw_indexing_method : callable\n Function (like ndarray.__getitem__) that when called with indexing key\n in the form of a tuple returns an indexed array.\n\n Returns\n -------\n Indexing result, in the form of a duck numpy-array.\n \"\"\"\n raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)\n result = raw_indexing_method(raw_key.tuple)\n if numpy_indices.tuple:\n # index the loaded np.ndarray\n result = NumpyIndexingAdapter(np.asarray(result))[numpy_indices]\n return result\n\n\ndef decompose_indexer(\n indexer: ExplicitIndexer, shape: Tuple[int, ...], indexing_support: IndexingSupport\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]:\n if isinstance(indexer, VectorizedIndexer):\n return _decompose_vectorized_indexer(indexer, shape, indexing_support)\n if isinstance(indexer, (BasicIndexer, OuterIndexer)):\n return _decompose_outer_indexer(indexer, shape, indexing_support)\n raise TypeError(f\"unexpected key type: {indexer}\")\n\n\ndef _decompose_slice(key, size):\n \"\"\"convert a slice to successive two slices. The first slice always has\n a positive step.\n \"\"\"\n start, stop, step = key.indices(size)\n if step > 0:\n # If key already has a positive step, use it as is in the backend\n return key, slice(None)\n else:\n # determine stop precisely for step > 1 case\n # e.g. [98:2:-2] -> [98:3:-2]\n stop = start + int((stop - start - 1) / step) * step + 1\n start, stop = stop + 1, start + 1\n return slice(start, stop, -step), slice(None, None, -1)\n\n\ndef _decompose_vectorized_indexer(\n indexer: VectorizedIndexer,\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]:\n \"\"\"\n Decompose vectorized indexer to the successive two indexers, where the\n first indexer will be used to index backend arrays, while the second one\n is used to index loaded on-memory np.ndarray.\n\n Parameters\n ----------\n indexer : VectorizedIndexer\n indexing_support : one of IndexerSupport entries\n\n Returns\n -------\n backend_indexer: OuterIndexer or BasicIndexer\n np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)\n\n Notes\n -----\n This function is used to realize the vectorized indexing for the backend\n arrays that only support basic or outer indexing.\n\n As an example, let us consider to index a few elements from a backend array\n with a vectorized indexer ([0, 3, 1], [2, 3, 2]).\n Even if the backend array only supports outer indexing, it is more\n efficient to load a subslice of the array than loading the entire array,\n\n >>> array = np.arange(36).reshape(6, 6)\n >>> backend_indexer = OuterIndexer((np.array([0, 1, 3]), np.array([2, 3])))\n >>> # load subslice of the array\n ... array = NumpyIndexingAdapter(array)[backend_indexer]\n >>> np_indexer = VectorizedIndexer((np.array([0, 2, 1]), np.array([0, 1, 0])))\n >>> # vectorized indexing for on-memory np.ndarray.\n ... NumpyIndexingAdapter(array)[np_indexer]\n array([ 2, 21, 8])\n \"\"\"\n assert isinstance(indexer, VectorizedIndexer)\n\n if indexing_support is IndexingSupport.VECTORIZED:\n return indexer, BasicIndexer(())\n\n backend_indexer_elems = []\n np_indexer_elems = []\n # convert negative indices\n indexer_elems = [\n np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k\n for k, s in zip(indexer.tuple, shape)\n ]\n\n for k, s in zip(indexer_elems, shape):\n if isinstance(k, slice):\n # If it is a slice, then we will slice it as-is\n # (but make its step positive) in the backend,\n # and then use all of it (slice(None)) for the in-memory portion.\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer_elems.append(bk_slice)\n np_indexer_elems.append(np_slice)\n else:\n # If it is a (multidimensional) np.ndarray, just pickup the used\n # keys without duplication and store them as a 1d-np.ndarray.\n oind, vind = np.unique(k, return_inverse=True)\n backend_indexer_elems.append(oind)\n np_indexer_elems.append(vind.reshape(*k.shape))\n\n backend_indexer = OuterIndexer(tuple(backend_indexer_elems))\n np_indexer = VectorizedIndexer(tuple(np_indexer_elems))\n\n if indexing_support is IndexingSupport.OUTER:\n return backend_indexer, np_indexer\n\n # If the backend does not support outer indexing,\n # backend_indexer (OuterIndexer) is also decomposed.\n backend_indexer1, np_indexer1 = _decompose_outer_indexer(\n backend_indexer, shape, indexing_support\n )\n np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)\n return backend_indexer1, np_indexer\n\n\ndef _decompose_outer_indexer(\n indexer: Union[BasicIndexer, OuterIndexer],\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]:\n \"\"\"\n Decompose outer indexer to the successive two indexers, where the\n first indexer will be used to index backend arrays, while the second one\n is used to index the loaded on-memory np.ndarray.\n\n Parameters\n ----------\n indexer : OuterIndexer or BasicIndexer\n indexing_support : One of the entries of IndexingSupport\n\n Returns\n -------\n backend_indexer: OuterIndexer or BasicIndexer\n np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)\n\n Notes\n -----\n This function is used to realize the vectorized indexing for the backend\n arrays that only support basic or outer indexing.\n\n As an example, let us consider to index a few elements from a backend array\n with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).\n Even if the backend array only supports basic indexing, it is more\n efficient to load a subslice of the array than loading the entire array,\n\n >>> array = np.arange(36).reshape(6, 6)\n >>> backend_indexer = BasicIndexer((slice(0, 3), slice(2, 4)))\n >>> # load subslice of the array\n ... array = NumpyIndexingAdapter(array)[backend_indexer]\n >>> np_indexer = OuterIndexer((np.array([0, 2, 1]), np.array([0, 1, 0])))\n >>> # outer indexing for on-memory np.ndarray.\n ... NumpyIndexingAdapter(array)[np_indexer]\n array([[ 2, 3, 2],\n [14, 15, 14],\n [ 8, 9, 8]])\n \"\"\"\n if indexing_support == IndexingSupport.VECTORIZED:\n return indexer, BasicIndexer(())\n assert isinstance(indexer, (OuterIndexer, BasicIndexer))\n\n backend_indexer: List[Any] = []\n np_indexer = []\n # make indexer positive\n pos_indexer = []\n for k, s in zip(indexer.tuple, shape):\n if isinstance(k, np.ndarray):\n pos_indexer.append(np.where(k < 0, k + s, k))\n elif isinstance(k, integer_types) and k < 0:\n pos_indexer.append(k + s)\n else:\n pos_indexer.append(k)\n indexer_elems = pos_indexer\n\n if indexing_support is IndexingSupport.OUTER_1VECTOR:\n # some backends such as h5py supports only 1 vector in indexers\n # We choose the most efficient axis\n gains = [\n (np.max(k) - np.min(k) + 1.0) / len(np.unique(k))\n if isinstance(k, np.ndarray)\n else 0\n for k in indexer_elems\n ]\n array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None\n\n for i, (k, s) in enumerate(zip(indexer_elems, shape)):\n if isinstance(k, np.ndarray) and i != array_index:\n # np.ndarray key is converted to slice that covers the entire\n # entries of this key.\n backend_indexer.append(slice(np.min(k), np.max(k) + 1))\n np_indexer.append(k - np.min(k))\n elif isinstance(k, np.ndarray):\n # Remove duplicates and sort them in the increasing order\n pkey, ekey = np.unique(k, return_inverse=True)\n backend_indexer.append(pkey)\n np_indexer.append(ekey)\n elif isinstance(k, integer_types):\n backend_indexer.append(k)\n else: # slice: convert positive step slice for backend\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer.append(bk_slice)\n np_indexer.append(np_slice)\n\n return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))\n\n if indexing_support == IndexingSupport.OUTER:\n for k, s in zip(indexer_elems, shape):\n if isinstance(k, slice):\n # slice: convert positive step slice for backend\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer.append(bk_slice)\n np_indexer.append(np_slice)\n elif isinstance(k, integer_types):\n backend_indexer.append(k)\n elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():\n backend_indexer.append(k)\n np_indexer.append(slice(None))\n else:\n # Remove duplicates and sort them in the increasing order\n oind, vind = np.unique(k, return_inverse=True)\n backend_indexer.append(oind)\n np_indexer.append(vind.reshape(*k.shape))\n\n return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))\n\n # basic indexer\n assert indexing_support == IndexingSupport.BASIC\n\n for k, s in zip(indexer_elems, shape):\n if isinstance(k, np.ndarray):\n # np.ndarray key is converted to slice that covers the entire\n # entries of this key.\n backend_indexer.append(slice(np.min(k), np.max(k) + 1))\n np_indexer.append(k - np.min(k))\n elif isinstance(k, integer_types):\n backend_indexer.append(k)\n else: # slice: convert positive step slice for backend\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer.append(bk_slice)\n np_indexer.append(np_slice)\n\n return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))\n\n\ndef _arrayize_vectorized_indexer(indexer, shape):\n \"\"\" Return an identical vindex but slices are replaced by arrays \"\"\"\n slices = [v for v in indexer.tuple if isinstance(v, slice)]\n if len(slices) == 0:\n return indexer\n\n arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)]\n n_dim = arrays[0].ndim if len(arrays) > 0 else 0\n i_dim = 0\n new_key = []\n for v, size in zip(indexer.tuple, shape):\n if isinstance(v, np.ndarray):\n new_key.append(np.reshape(v, v.shape + (1,) * len(slices)))\n else: # slice\n shape = (1,) * (n_dim + i_dim) + (-1,) + (1,) * (len(slices) - i_dim - 1)\n new_key.append(np.arange(*v.indices(size)).reshape(shape))\n i_dim += 1\n return VectorizedIndexer(tuple(new_key))\n\n\ndef _dask_array_with_chunks_hint(array, chunks):\n \"\"\"Create a dask array using the chunks hint for dimensions of size > 1.\"\"\"\n import dask.array as da\n\n if len(chunks) < array.ndim:\n raise ValueError(\"not enough chunks in hint\")\n new_chunks = []\n for chunk, size in zip(chunks, array.shape):\n new_chunks.append(chunk if size > 1 else (1,))\n return da.from_array(array, new_chunks)\n\n\ndef _logical_any(args):\n return functools.reduce(operator.or_, args)\n\n\ndef _masked_result_drop_slice(key, data=None):\n\n key = (k for k in key if not isinstance(k, slice))\n chunks_hint = getattr(data, \"chunks\", None)\n\n new_keys = []\n for k in key:\n if isinstance(k, np.ndarray):\n if is_duck_dask_array(data):\n new_keys.append(_dask_array_with_chunks_hint(k, chunks_hint))\n elif isinstance(data, sparse_array_type):\n import sparse\n\n new_keys.append(sparse.COO.from_numpy(k))\n else:\n new_keys.append(k)\n else:\n new_keys.append(k)\n\n mask = _logical_any(k == -1 for k in new_keys)\n return mask\n\n\ndef create_mask(indexer, shape, data=None):\n \"\"\"Create a mask for indexing with a fill-value.\n\n Parameters\n ----------\n indexer : ExplicitIndexer\n Indexer with -1 in integer or ndarray value to indicate locations in\n the result that should be masked.\n shape : tuple\n Shape of the array being indexed.\n data : optional\n Data for which mask is being created. If data is a dask arrays, its chunks\n are used as a hint for chunks on the resulting mask. If data is a sparse\n array, the returned mask is also a sparse array.\n\n Returns\n -------\n mask : bool, np.ndarray, SparseArray or dask.array.Array with dtype=bool\n Same type as data. Has the same shape as the indexing result.\n \"\"\"\n if isinstance(indexer, OuterIndexer):\n key = _outer_to_vectorized_indexer(indexer, shape).tuple\n assert not any(isinstance(k, slice) for k in key)\n mask = _masked_result_drop_slice(key, data)\n\n elif isinstance(indexer, VectorizedIndexer):\n key = indexer.tuple\n base_mask = _masked_result_drop_slice(key, data)\n slice_shape = tuple(\n np.arange(*k.indices(size)).size\n for k, size in zip(key, shape)\n if isinstance(k, slice)\n )\n expanded_mask = base_mask[(Ellipsis,) + (np.newaxis,) * len(slice_shape)]\n mask = duck_array_ops.broadcast_to(expanded_mask, base_mask.shape + slice_shape)\n\n elif isinstance(indexer, BasicIndexer):\n mask = any(k == -1 for k in indexer.tuple)\n\n else:\n raise TypeError(\"unexpected key type: {}\".format(type(indexer)))\n\n return mask\n\n\ndef _posify_mask_subindexer(index):\n \"\"\"Convert masked indices in a flat array to the nearest unmasked index.\n\n Parameters\n ----------\n index : np.ndarray\n One dimensional ndarray with dtype=int.\n\n Returns\n -------\n np.ndarray\n One dimensional ndarray with all values equal to -1 replaced by an\n adjacent non-masked element.\n \"\"\"\n masked = index == -1\n unmasked_locs = np.flatnonzero(~masked)\n if not unmasked_locs.size:\n # indexing unmasked_locs is invalid\n return np.zeros_like(index)\n masked_locs = np.flatnonzero(masked)\n prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1)\n new_index = index.copy()\n new_index[masked_locs] = index[unmasked_locs[prev_value]]\n return new_index\n\n\ndef posify_mask_indexer(indexer):\n \"\"\"Convert masked values (-1) in an indexer to nearest unmasked values.\n\n This routine is useful for dask, where it can be much faster to index\n adjacent points than arbitrary points from the end of an array.\n\n Parameters\n ----------\n indexer : ExplicitIndexer\n Input indexer.\n\n Returns\n -------\n ExplicitIndexer\n Same type of input, with all values in ndarray keys equal to -1\n replaced by an adjacent non-masked element.\n \"\"\"\n key = tuple(\n _posify_mask_subindexer(k.ravel()).reshape(k.shape)\n if isinstance(k, np.ndarray)\n else k\n for k in indexer.tuple\n )\n return type(indexer)(key)\n\n\ndef is_fancy_indexer(indexer: Any) -> bool:\n \"\"\"Return False if indexer is a int, slice, a 1-dimensional list, or a 0 or\n 1-dimensional ndarray; in all other cases return True\n \"\"\"\n if isinstance(indexer, (int, slice)):\n return False\n if isinstance(indexer, np.ndarray):\n return indexer.ndim > 1\n if isinstance(indexer, list):\n return bool(indexer) and not isinstance(indexer[0], int)\n return True\n\n\nclass NumpyIndexingAdapter(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap a NumPy array to use explicit indexing.\"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n # In NumpyIndexingAdapter we only allow to store bare np.ndarray\n if not isinstance(array, np.ndarray):\n raise TypeError(\n \"NumpyIndexingAdapter only wraps np.ndarray. \"\n \"Trying to wrap {}\".format(type(array))\n )\n self.array = array\n\n def _indexing_array_and_key(self, key):\n if isinstance(key, OuterIndexer):\n array = self.array\n key = _outer_to_numpy_indexer(key, self.array.shape)\n elif isinstance(key, VectorizedIndexer):\n array = nputils.NumpyVIndexAdapter(self.array)\n key = key.tuple\n elif isinstance(key, BasicIndexer):\n array = self.array\n # We want 0d slices rather than scalars. This is achieved by\n # appending an ellipsis (see\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes).\n key = key.tuple + (Ellipsis,)\n else:\n raise TypeError(\"unexpected key type: {}\".format(type(key)))\n\n return array, key\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n def __getitem__(self, key):\n array, key = self._indexing_array_and_key(key)\n return array[key]\n\n def __setitem__(self, key, value):\n array, key = self._indexing_array_and_key(key)\n try:\n array[key] = value\n except ValueError:\n # More informative exception if read-only view\n if not array.flags.writeable and not array.flags.owndata:\n raise ValueError(\n \"Assignment destination is a view. \"\n \"Do you want to .copy() array first?\"\n )\n else:\n raise\n\n\nclass NdArrayLikeIndexingAdapter(NumpyIndexingAdapter):\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n if not hasattr(array, \"__array_function__\"):\n raise TypeError(\n \"NdArrayLikeIndexingAdapter must wrap an object that \"\n \"implements the __array_function__ protocol\"\n )\n self.array = array\n\n\nclass DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap a dask array to support explicit indexing.\"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n \"\"\"This adapter is created in Variable.__getitem__ in\n Variable._broadcast_indexes.\n \"\"\"\n self.array = array\n\n def __getitem__(self, key):\n\n if not isinstance(key, VectorizedIndexer):\n # if possible, short-circuit when keys are effectively slice(None)\n # This preserves dask name and passes lazy array equivalence checks\n # (see duck_array_ops.lazy_array_equiv)\n rewritten_indexer = False\n new_indexer = []\n for idim, k in enumerate(key.tuple):\n if isinstance(k, Iterable) and duck_array_ops.array_equiv(\n k, np.arange(self.array.shape[idim])\n ):\n new_indexer.append(slice(None))\n rewritten_indexer = True\n else:\n new_indexer.append(k)\n if rewritten_indexer:\n key = type(key)(tuple(new_indexer))\n\n if isinstance(key, BasicIndexer):\n return self.array[key.tuple]\n elif isinstance(key, VectorizedIndexer):\n return self.array.vindex[key.tuple]\n else:\n assert isinstance(key, OuterIndexer)\n key = key.tuple\n try:\n return self.array[key]\n except NotImplementedError:\n # manual orthogonal indexing.\n # TODO: port this upstream into dask in a saner way.\n value = self.array\n for axis, subkey in reversed(list(enumerate(key))):\n value = value[(slice(None),) * axis + (subkey,)]\n return value\n\n def __setitem__(self, key, value):\n raise TypeError(\n \"this variable's data is stored in a dask array, \"\n \"which does not support item assignment. To \"\n \"assign to this variable, you must first load it \"\n \"into memory explicitly using the .load() \"\n \"method or accessing its .values attribute.\"\n )\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n\nclass PandasIndexAdapter(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap a pandas.Index to preserve dtypes and handle explicit indexing.\"\"\"\n\n __slots__ = (\"array\", \"_dtype\")\n\n def __init__(self, array: Any, dtype: DTypeLike = None):\n self.array = utils.safe_cast_to_index(array)\n if dtype is None:\n if isinstance(array, pd.PeriodIndex):\n dtype_ = np.dtype(\"O\")\n elif hasattr(array, \"categories\"):\n # category isn't a real numpy dtype\n dtype_ = array.categories.dtype\n elif not utils.is_valid_numpy_dtype(array.dtype):\n dtype_ = np.dtype(\"O\")\n else:\n dtype_ = array.dtype\n else:\n dtype_ = np.dtype(dtype)\n self._dtype = dtype_\n\n @property\n def dtype(self) -> np.dtype:\n return self._dtype\n\n def __array__(self, dtype: DTypeLike = None) -> np.ndarray:\n if dtype is None:\n dtype = self.dtype\n array = self.array\n if isinstance(array, pd.PeriodIndex):\n with suppress(AttributeError):\n # this might not be public API\n array = array.astype(\"object\")\n return np.asarray(array.values, dtype=dtype)\n\n @property\n def shape(self) -> Tuple[int]:\n return (len(self.array),)\n\n def __getitem__(\n self, indexer\n ) -> Union[NumpyIndexingAdapter, np.ndarray, np.datetime64, np.timedelta64]:\n key = indexer.tuple\n if isinstance(key, tuple) and len(key) == 1:\n # unpack key so it can index a pandas.Index object (pandas.Index\n # objects don't like tuples)\n (key,) = key\n\n if getattr(key, \"ndim\", 0) > 1: # Return np-array if multidimensional\n return NumpyIndexingAdapter(self.array.values)[indexer]\n\n result = self.array[key]\n\n if isinstance(result, pd.Index):\n result = PandasIndexAdapter(result, dtype=self.dtype)\n else:\n # result is a scalar\n if result is pd.NaT:\n # work around the impossibility of casting NaT with asarray\n # note: it probably would be better in general to return\n # pd.Timestamp rather np.than datetime64 but this is easier\n # (for now)\n result = np.datetime64(\"NaT\", \"ns\")\n elif isinstance(result, timedelta):\n result = np.timedelta64(getattr(result, \"value\", result), \"ns\")\n elif isinstance(result, pd.Timestamp):\n # Work around for GH: pydata/xarray#1932 and numpy/numpy#10668\n # numpy fails to convert pd.Timestamp to np.datetime64[ns]\n result = np.asarray(result.to_datetime64())\n elif self.dtype != object:\n result = np.asarray(result, dtype=self.dtype)\n\n # as for numpy.ndarray indexing, we always want the result to be\n # a NumPy array.\n result = utils.to_0d_array(result)\n\n return result\n\n def transpose(self, order) -> pd.Index:\n return self.array # self.array should be always one-dimensional\n\n def __repr__(self) -> str:\n return \"{}(array={!r}, dtype={!r})\".format(\n type(self).__name__, self.array, self.dtype\n )\n\n def copy(self, deep: bool = True) -> \"PandasIndexAdapter\":\n # Not the same as just writing `self.array.copy(deep=deep)`, as\n # shallow copies of the underlying numpy.ndarrays become deep ones\n # upon pickling\n # >>> len(pickle.dumps((self.array, self.array)))\n # 4000281\n # >>> len(pickle.dumps((self.array, self.array.copy(deep=False))))\n # 8000341\n array = self.array.copy(deep=True) if deep else self.array\n return PandasIndexAdapter(array, self._dtype)\n" ]
[ [ "numpy.asarray", "numpy.issubdtype", "numpy.dtype", "numpy.broadcast", "numpy.max", "numpy.zeros_like", "numpy.any", "numpy.searchsorted", "numpy.where", "numpy.unique", "numpy.arange", "numpy.flatnonzero", "numpy.diff", "numpy.ravel", "numpy.min", "numpy.broadcast_arrays", "numpy.array", "numpy.datetime64", "numpy.isscalar" ] ]
Uglamator/WRSCRYP
[ "e0d68ac05c9fc19f1bdd238eb36c272de9610ba5" ]
[ "RedditScrape.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 1 17:17:59 2021\n\n@author: \n\"\"\"\n\nimport praw\nimport pandas as pd\nimport datetime as dt\nimport sqlite3\nconn = sqlite3.connect('TestDB1.db')\nc = conn.cursor()\n#c.execute('CREATE TABLE comments (id,body,created_datetime,score)')\nconn.commit()\npd.set_option(\"display.max_colwidth\", 100000)\nreddit = praw.Reddit(client_id='JKuwICzPpSy0Ow' , client_secret= '',user_agent='JFTESTEROO')\nstream = reddit.subreddit('Cryptocurrency').stream.comments(skip_existing=True)\n\nfor x in stream:\n c.execute('insert into comments(id,body,created_datetime,score) values (?,?,?,?)',(x.id,x.body,x.created_utc,x.score))\n conn.commit()\n print(x.id)\n" ]
[ [ "pandas.set_option" ] ]
dfreilich/machine-learning-workspace
[ "a1b6e5bd84a4f5708461f3827d64e2bf5a32dffa" ]
[ "18739-hws/hw2/hw2_utils.py" ]
[ "from builtins import range\nfrom six.moves import cPickle as pickle\nimport numpy as np\nimport os\nfrom scipy.misc import imread\nimport platform\nimport tensorflow as tf\nimport numpy as np\nimport math\nfrom matplotlib import pyplot as plt\n\ndef load_pickle(f):\n version = platform.python_version_tuple()\n if version[0] == '2':\n return pickle.load(f)\n elif version[0] == '3':\n return pickle.load(f, encoding='latin1')\n raise ValueError(\"invalid python version: {}\".format(version))\n\ndef load_CIFAR_batch(filename):\n \"\"\" load single batch of cifar \"\"\"\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y\n\ndef load_CIFAR10(ROOT):\n \"\"\" load all of cifar \"\"\"\n xs = []\n ys = []\n for b in range(1,6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):\n \"\"\"\n Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n it for the two-layer neural net classifier. These are the same steps as\n we used for the SVM, but condensed to a single function. \n \"\"\"\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\ndef run_model(session, predict, mean_loss,X,y,is_training,Xd, yd,\n epochs=1, batch_size=64, print_every=100,\n training=None, plot_losses=False):\n # have tensorflow compute accuracy\n correct_prediction = tf.equal(tf.argmax(predict,1), y)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n # shuffle indicies\n train_indicies = np.arange(Xd.shape[0])\n np.random.shuffle(train_indicies)\n training_now = training is not None\n \n # setting up variables we want to compute (and optimizing)\n # if we have a training function, add that to things we compute\n variables = [mean_loss,correct_prediction,accuracy]\n if training_now:\n variables[-1] = training\n \n # counter \n iter_cnt = 0\n for e in range(epochs):\n # keep track of losses and accuracy\n correct = 0\n losses = []\n # make sure we iterate over the dataset once\n for i in range(int(math.ceil(Xd.shape[0]/batch_size))):\n # generate indicies for the batch\n start_idx = (i*batch_size)%Xd.shape[0]\n idx = train_indicies[start_idx:start_idx+batch_size]\n \n # create a feed dictionary for this batch\n feed_dict = {X: Xd[idx,:],\n y: yd[idx],\n is_training: training_now }\n # get batch size\n actual_batch_size = yd[idx].shape[0]\n \n # have tensorflow compute loss and correct predictions\n # and (if given) perform a training step\n loss, corr, _ = session.run(variables,feed_dict=feed_dict)\n \n # aggregate performance stats\n losses.append(loss*actual_batch_size)\n correct += np.sum(corr)\n \n # print every now and then\n if training_now and (iter_cnt % print_every) == 0:\n print(\"Iteration {0}: with minibatch training loss = {1:.3g} and accuracy of {2:.2g}\"\\\n .format(iter_cnt,loss,np.sum(corr)/actual_batch_size))\n iter_cnt += 1\n total_correct = correct/Xd.shape[0]\n total_loss = np.sum(losses)/Xd.shape[0]\n print(\"Epoch {2}, Overall loss = {0:.3g} and accuracy of {1:.3g}\"\\\n .format(total_loss,total_correct,e+1))\n if plot_losses:\n plt.plot(losses)\n plt.grid(True)\n plt.title('Epoch {} Loss'.format(e+1))\n plt.xlabel('minibatch number')\n plt.ylabel('minibatch loss')\n plt.show()\n return total_loss,total_correct\n" ]
[ [ "numpy.arange", "tensorflow.cast", "numpy.random.shuffle", "numpy.concatenate", "matplotlib.pyplot.plot", "numpy.mean", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "tensorflow.argmax", "numpy.array", "numpy.sum", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
BashkirovN/pytorch
[ "f720fd81dfa3f124fdd0747312496bdbfafb75a7" ]
[ "torch/ao/quantization/fx/prepare.py" ]
[ "import torch\nimport operator\nimport warnings\nfrom torch.fx import (\n GraphModule,\n)\nfrom torch.fx.graph import (\n Graph,\n Node,\n)\nfrom torch.fx.node import Argument\n\nfrom ..quantize import (\n propagate_qconfig_,\n)\nfrom ..observer import (\n ObserverBase,\n)\nfrom ..qconfig import QConfigAny\nfrom .qconfig_utils import (\n convert_dict_to_ordered_dict,\n generate_qconfig_map,\n get_flattened_qconfig_dict,\n update_qconfig_for_fusion,\n update_qconfig_for_qat,\n)\n\nfrom .quantization_patterns import (\n QuantizeHandler,\n CustomModuleQuantizeHandler,\n StandaloneModuleQuantizeHandler,\n)\n\nfrom .quantization_types import Pattern\n\nfrom ._equalize import (\n is_equalization_observer,\n node_supports_equalization,\n)\n\nfrom .graph_module import (\n ObservedGraphModule,\n ObservedStandaloneGraphModule,\n)\n\nfrom .pattern_utils import (\n MatchResult,\n get_default_quant_patterns,\n)\n\nfrom .match_utils import (\n find_matches,\n)\n\nfrom .utils import (\n _parent_name,\n get_custom_module_class_keys,\n all_node_args_have_no_tensors,\n assert_and_get_unique_device,\n node_bool_tensor_arg_indexes,\n get_new_attr_name_with_prefix,\n NON_QUANTIZABLE_WEIGHT_OPS,\n WEIGHT_INDEX_DICT,\n BIAS_INDEX_DICT,\n)\n\nfrom ..quantization_mappings import (\n get_default_qat_module_mappings,\n)\n\nfrom torch.ao.quantization.quantize import (\n is_activation_post_process,\n convert\n)\n\nfrom ..utils import (\n get_combined_dict,\n get_qconfig_dtypes,\n get_swapped_custom_module_class,\n activation_is_statically_quantized,\n activation_is_int8_quantized,\n)\n\nfrom .backend_config_dict.utils import (\n get_pattern_to_quantize_handlers,\n get_pattern_to_dtype_configs,\n get_pattern_to_input_type_to_index,\n)\n\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union, Set\nfrom collections import defaultdict\n\ndef is_activation_post_process_node(node: Node, modules: Dict[str, torch.nn.Module]) -> bool:\n return isinstance(node, torch.fx.Node) and node.op == \"call_module\" and \\\n is_activation_post_process(modules[str(node.target)])\n\ndef node_arg_is_weight(node: Node, arg: Any) -> bool:\n if isinstance(node, Node) and node.op == 'call_function' and \\\n node.target in WEIGHT_INDEX_DICT:\n for i, node_arg in enumerate(node.args):\n if arg is node_arg and i in \\\n WEIGHT_INDEX_DICT[node.target]: # type: ignore[index]\n return True\n for kwarg_name, kwarg_value in node.kwargs.items():\n if kwarg_name == 'weight' and arg is kwarg_value:\n return True\n return False\n\ndef node_arg_is_bias(node: Node, arg: Any) -> bool:\n if not isinstance(node, Node) or node.op != 'call_function' or \\\n node.target not in BIAS_INDEX_DICT:\n return False\n\n for i, node_arg in enumerate(node.args):\n if arg is node_arg and i in \\\n BIAS_INDEX_DICT[node.target]: # type: ignore[index]\n return True\n\n return node.kwargs.get('bias', None) is arg\n\ndef is_input_arg_dtype_supported_by_backend(\n arg: Argument,\n node: Node,\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n dtype_config: Dict[str, torch.dtype],\n) -> bool:\n \"\"\" Check if the configured qconfig for the argument\n is supported by the backend or not\n \"\"\"\n if isinstance(arg, (list, tuple)):\n return all(map(lambda a: is_input_arg_dtype_supported_by_backend(a, node, node_name_to_target_dtype, dtype_config), arg))\n if not isinstance(arg, Node):\n return True\n # TODO: support check for standalone module\n is_weight = node_arg_is_weight(node, arg)\n is_bias = node_arg_is_bias(node, arg)\n is_activation = not is_weight and not is_bias\n if is_activation:\n input_activation_dtype = dtype_config.get(\"input_activation_dtype\", None)\n return input_activation_dtype is None or \\\n node_name_to_target_dtype[node.name][\"input_activation_dtype\"] == input_activation_dtype\n elif is_weight:\n weight_dtype = dtype_config.get(\"weight_dtype\", None)\n return weight_dtype is None or node_name_to_target_dtype[node.name][\"weight_dtype\"] == weight_dtype\n else: # bias\n bias_dtype = dtype_config.get(\"bias_dtype\", None)\n return bias_dtype is None or node_name_to_target_dtype[node.name][\"bias_dtype\"] == bias_dtype\n\ndef is_output_dtype_supported_by_backend(\n node: Node,\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n dtype_config: Dict[str, torch.dtype],\n) -> bool:\n \"\"\" Check if the configured qconfig for the output\n is supported by the backend or not\n \"\"\"\n output_dtype = dtype_config.get(\"output_dtype\", None)\n return output_dtype is None or \\\n output_dtype == node_name_to_target_dtype[node.name][\"output_activation_dtype\"]\n\ndef is_pattern_dtype_config_supported_by_backend(\n pattern: Optional[Pattern],\n matched_nodes: Optional[List[Node]],\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n backend_config_dict: Optional[Dict[str, Any]]\n) -> bool:\n \"\"\" Check is the dtype configuration of a pattern is supported by\n the backend or not\n \"\"\"\n if backend_config_dict is None or pattern is None:\n return True\n assert matched_nodes is not None and len(matched_nodes) >= 1\n pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config_dict)\n dtype_configs: List[Dict[str, torch.dtype]] = pattern_to_dtype_configs.get(pattern, [])\n\n input_node = matched_nodes[0]\n output_node = matched_nodes[-1]\n for dtype_config in dtype_configs:\n # check if arg dtype are supported\n supported = True\n for arg in input_node.args:\n supported = supported and \\\n is_input_arg_dtype_supported_by_backend(\n arg, input_node, node_name_to_target_dtype, dtype_config)\n for k, arg in input_node.kwargs.items():\n supported = supported and \\\n is_input_arg_dtype_supported_by_backend(\n arg, input_node, node_name_to_target_dtype, dtype_config)\n # check if output dtype is supported\n supported = supported and is_output_dtype_supported_by_backend(\n output_node, node_name_to_target_dtype, dtype_config)\n if supported:\n return True\n return False\n\ndef get_standalone_module_configs(\n node: Node,\n modules: Dict[str, torch.nn.Module],\n prepare_custom_config_dict: Dict[str, Any],\n qconfig: QConfigAny,\n) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n \"\"\"\n Returns the standalone module qconfig_dict and prepare_config_dict\n for `node`, assuming that the module pointed to by `node` is\n a standalone modules.\n \"\"\"\n standalone_module = modules[node.target] # type: ignore[index]\n standalone_module_name_configs = \\\n prepare_custom_config_dict.get(\"standalone_module_name\", [])\n standalone_module_class_configs = \\\n prepare_custom_config_dict.get(\"standalone_module_class\", [])\n class_config_map = {x[0]: (x[1], x[2]) for x in standalone_module_class_configs}\n name_config_map = {x[0]: (x[1], x[2]) for x in standalone_module_name_configs}\n config = class_config_map.get(type(standalone_module), (None, None))\n config = name_config_map.get(node.target, config)\n sm_qconfig_dict = {\"\": qconfig} if config[0] is None else config[0]\n sm_prepare_config_dict = {} if config[1] is None else config[1]\n return sm_qconfig_dict, sm_prepare_config_dict\n\ndef qat_swap_modules(\n root: torch.nn.Module,\n additional_qat_module_mapping: Dict[Callable, Callable]) -> None:\n all_mappings = get_combined_dict(\n get_default_qat_module_mappings(), additional_qat_module_mapping)\n convert(root, mapping=all_mappings, inplace=True, remove_qconfig=False)\n\n# TODO: remove observed_op, looks like it's not used\ndef insert_observer(\n node: Node,\n observed_op: Node,\n observer: ObserverBase,\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module],\n graph: Graph,\n) -> Node:\n \"\"\"\n Attaches `observer` to `model`, and creates a node which calls\n `observer` on the output of `node`.\n \"\"\"\n model_device = assert_and_get_unique_device(model)\n if model_device:\n observer.to(model_device)\n # add observer module as attribute\n if is_equalization_observer(observer):\n prefix = node.name + '_equalization_process_'\n else:\n prefix = 'activation_post_process_'\n get_new_observer_name = get_new_attr_name_with_prefix(prefix)\n observer_name = get_new_observer_name(model)\n setattr(model, observer_name, observer)\n modules[observer_name] = observer\n with graph.inserting_after(node):\n new_obs = graph.create_node(\n 'call_module', observer_name, (node,), {})\n return new_obs\n\ndef get_target_activation_dtype_for_node(\n node: Node,\n qconfig: QConfigAny,\n inputs_seen_counter: int,\n outputs_seen_counter: int,\n input_quantized_idxs: List[int],\n output_quantized_idxs: List[int],\n qhandler: Optional[QuantizeHandler],\n modules: Dict[str, torch.nn.Module],\n cache_for_no_tensor_check: Dict[Node, bool],\n) -> Dict[str, Optional[torch.dtype]]:\n \"\"\"\n Returns the expected dtype of the input and output of this node after\n convert. If the value is not None, it represents the dtype of the\n Tensor. If the value is None, it means the value is not a Tensor.\n\n Note: this is for activations only, weight dtypes are not handled here.\n\n TODO(future PR, if needed): explicitly spell out the non-Tensor\n dtypes.\n \"\"\"\n if node.op == 'placeholder':\n if inputs_seen_counter in input_quantized_idxs:\n return {\n \"input_activation_dtype\": torch.quint8,\n \"output_activation_dtype\": torch.quint8,\n }\n else:\n # if dtype is fp32 (default), do nothing\n # note: other dtypes are not supported\n return {\n \"input_activation_dtype\": torch.float,\n \"output_activation_dtype\": torch.float,\n }\n\n elif node.op in ('call_module', 'call_method', 'call_function'):\n args_have_no_tensors = \\\n all_node_args_have_no_tensors(\n node, modules, cache_for_no_tensor_check)\n if args_have_no_tensors:\n return {\n \"input_activation_dtype\": None,\n \"output_activation_dtype\": None,\n }\n\n # TODO(future PR): consider stopping matching getitem\n is_getitem = node.op == 'call_function' and \\\n node.target == operator.getitem\n if is_getitem:\n return {\n \"input_activation_dtype\": torch.float,\n \"output_activation_dtype\": torch.float,\n }\n\n # get qconfig to determine the eventual dtype of this node\n if qconfig is not None:\n if qhandler is not None and qhandler.input_output_observed() and qhandler.is_output_quantized(qconfig):\n act_dtype, weight_dtype, act_compute_dtype = \\\n get_qconfig_dtypes(qconfig)\n bias_dtype = torch.float16 \\\n if act_dtype == torch.float16 and weight_dtype == torch.float16 \\\n else torch.float\n return {\n \"input_activation_dtype\": act_dtype,\n \"weight_dtype\": weight_dtype,\n \"bias_dtype\": bias_dtype,\n \"output_activation_dtype\": act_dtype,\n }\n return {\n \"input_activation_dtype\": torch.float,\n \"output_activation_dtype\": torch.float,\n }\n\n elif node.op == 'get_attr':\n return {\n \"input_activation_dtype\": torch.float,\n \"output_activation_dtype\": torch.float,\n }\n\n elif node.op == 'output':\n if outputs_seen_counter in output_quantized_idxs:\n return {\n \"input_activation_dtype\": torch.quint8,\n \"output_activation_dtype\": torch.quint8\n }\n else:\n # if dtype is fp32 (default), do nothing\n # note: other dtypes are not supported\n return {\n \"input_activation_dtype\": torch.float,\n \"output_activation_dtype\": torch.float,\n }\n\n else:\n raise AssertionError(f'need to handle {node.format_node()}')\n\ndef get_arg_target_dtype_as_output(\n arg: Node,\n modules: Dict[str, torch.nn.Module],\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n) -> Optional[torch.dtype]:\n \"\"\" Get the target output activation dtype for\n the argumnet in the original graph, skipping inserted observers\n We are assuming that the observers are inserted correctly, and the dtype for\n argument in quantized graph will match what is specified by the qconfig\n \"\"\"\n assert isinstance(arg, Node)\n if is_activation_post_process_node(arg, modules):\n observed_arg = arg.args[0]\n assert isinstance(observed_arg, Node), \"Currently we only support observing Node\"\n return node_name_to_target_dtype[observed_arg.name][\"output_activation_dtype\"]\n else:\n return node_name_to_target_dtype[arg.name][\"output_activation_dtype\"]\n\ndef get_arg_target_dtype_as_input_to_node(\n arg: Node,\n node: Node,\n modules: Dict[str, torch.nn.Module],\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n) -> Optional[torch.dtype]:\n \"\"\" Get the target argument dtype for the argument `arg`, as input\n to node `node`\n \"\"\"\n assert isinstance(arg, Node)\n is_weight = node_arg_is_weight(node, arg)\n is_bias = node_arg_is_bias(node, arg)\n is_activation = not is_weight and not is_bias\n if is_activation:\n return node_name_to_target_dtype[node.name][\"input_activation_dtype\"]\n elif is_weight:\n if node.target in NON_QUANTIZABLE_WEIGHT_OPS:\n return None\n else:\n return node_name_to_target_dtype[node.name][\"weight_dtype\"]\n else:\n return node_name_to_target_dtype[node.name][\"bias_dtype\"]\n\n\ndef maybe_insert_input_observer_for_arg_or_kwarg(\n node: Union[Node, Any],\n arg: Argument,\n qconfig: QConfigAny,\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module],\n graph: Graph,\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n qhandler: Optional[QuantizeHandler],\n prepare_custom_config_dict: Dict[str, Any],\n) -> Argument:\n \"\"\"\n Given a `node` and an `arg`, inserts an input observer between\n `node` and `arg` if necessary.\n \"\"\"\n # for ops such as torch.cat([x0, x1]),\n # traverse through the list\n if isinstance(arg, (list, tuple)):\n new_arg_to_return = []\n for inner_arg in arg:\n new_inner_arg = maybe_insert_input_observer_for_arg_or_kwarg(\n node, inner_arg, qconfig, model, modules,\n graph, node_name_to_target_dtype,\n qhandler, prepare_custom_config_dict)\n new_arg_to_return.append(new_inner_arg)\n return type(arg)(new_arg_to_return)\n\n if not isinstance(arg, Node):\n return arg\n assert isinstance(arg, Node)\n # default (no observer)\n new_arg = arg\n\n is_standalone_module = qhandler is not None and \\\n isinstance(qhandler, StandaloneModuleQuantizeHandler)\n if not is_standalone_module:\n # regular flow for most nodes, except standalone modules\n is_weight = node_arg_is_weight(node, arg)\n assert qconfig is not None\n act_post_process_ctr = qconfig.weight if is_weight else \\\n qconfig.activation\n\n arg_as_output_target_dtype = get_arg_target_dtype_as_output(arg, modules, node_name_to_target_dtype)\n arg_as_input_target_dtype = get_arg_target_dtype_as_input_to_node(arg, node, modules, node_name_to_target_dtype)\n needs_obs = (\n # if the dtypes are different, we need an observer\n (arg_as_output_target_dtype != arg_as_input_target_dtype) and\n # except if the second dtype is float, a dequant will be inserted\n # without an observer in convert\n # TODO(future PR): change this so a placeholder is inserted for\n # future dequants, to make the logic easier to understand\n (arg_as_input_target_dtype != torch.float) and\n # if arg is a bool tensor or not a tensor, do not insert observer\n (arg_as_output_target_dtype not in (torch.bool, None))\n )\n\n else:\n # custom flow for standalone modules\n _sm_qconfig_dict, sm_prepare_config_dict = \\\n get_standalone_module_configs(\n node, modules, prepare_custom_config_dict, qconfig)\n\n sm_input_quantized_idxs = \\\n sm_prepare_config_dict.get('input_quantized_idxs', [])\n # for args, this is set to the index of the current arg\n # for kwargs, this is left at None\n cur_input_idx = None\n for arg_idx, arg_to_check in enumerate(node.args):\n if arg_to_check is arg:\n cur_input_idx = arg_idx\n break\n\n if cur_input_idx is None:\n needs_obs = False\n else:\n arg_as_output_target_dtype = get_arg_target_dtype_as_output(arg, modules, node_name_to_target_dtype)\n arg_as_input_target_dtype = torch.quint8 if cur_input_idx in sm_input_quantized_idxs \\\n else torch.float\n needs_obs = (\n (arg_as_output_target_dtype != arg_as_input_target_dtype) and\n (arg_as_input_target_dtype != torch.float)\n )\n\n if needs_obs:\n\n new_obs_mod = act_post_process_ctr()\n existing_obs_node = None\n\n # Before using the new observer, check if an observer\n # of the correct type already exists. If it does, use it.\n # This prevents duplicate observer insertions if a node is\n # used by multiple nodes.\n # TODO: this is looking into how the value is used in the future\n # we should remove this\n # removing this means we insert one observer for each use, even if they\n # have the same dtype, we can have an extra pass that removes the extra observers\n for maybe_obs_node, _ in arg.users.items():\n if maybe_obs_node.op == 'call_module':\n maybe_obs_mod = modules[maybe_obs_node.target] # type: ignore[index]\n if (\n type(maybe_obs_mod) == type(new_obs_mod) and\n maybe_obs_mod.dtype == arg_as_input_target_dtype\n ):\n existing_obs_node = maybe_obs_node\n break\n\n if existing_obs_node is None:\n new_obs_node = insert_observer(\n arg, node, new_obs_mod, model, modules, graph)\n # override this arg to be the observed arg\n new_arg = new_obs_node\n else:\n new_arg = existing_obs_node\n\n return new_arg\n\n\ndef maybe_insert_input_observers_for_node(\n node: Node,\n qconfig: QConfigAny,\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module],\n graph: Graph,\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n qhandler: Optional[QuantizeHandler],\n prepare_custom_config_dict: Dict[str, Any],\n) -> None:\n \"\"\"\n If needed, inserts observers to the input args and kwargs of `node`.\n Note: modifies `node` inplace.\n\n For example, if cur_node needs an observer after prev_node, we change from\n\n prev_node -> cur_node\n\n To\n\n prev_node -> obs -> cur_node\n \"\"\"\n if qconfig is None:\n # if quantization is turned off for this node, we do not need\n # to insert input observers\n return\n assert qconfig is not None\n\n # Look through every input arg. If that arg's target dtype does not\n # match the current node's target dtype, insert an observer.\n new_args = []\n for arg in node.args:\n new_arg = maybe_insert_input_observer_for_arg_or_kwarg(\n node, arg, qconfig, model, modules, graph,\n node_name_to_target_dtype,\n qhandler, prepare_custom_config_dict)\n new_args.append(new_arg)\n\n new_kwargs = {}\n for k, kwarg in node.kwargs.items():\n new_kwarg = maybe_insert_input_observer_for_arg_or_kwarg(\n node, kwarg, qconfig, model, modules, graph,\n node_name_to_target_dtype,\n qhandler, prepare_custom_config_dict)\n new_kwargs[k] = new_kwarg\n\n # assign the new args and kwargs to the node, inplace\n node.args = tuple(new_args)\n node.kwargs = new_kwargs\n\ndef maybe_insert_input_equalization_observers_for_node(\n node: Node,\n equalization_qconfig: Any,\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module],\n graph: Graph,\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n is_branch: bool,\n) -> None:\n \"\"\"\n If `node` needs to be equalized, find the input/weight observers it needs in\n `equalization_qconfig`, creates them, and inserts it into `graph`.\n\n If `node` does not need an equalization observer, returns None.\n \"\"\"\n if equalization_qconfig is None or not node_supports_equalization(node, modules):\n return\n\n if is_branch:\n warnings.warn(\n f\"Cannot equalize {node} because it is part of a branch.\"\n )\n return\n\n new_args = []\n for arg in node.args:\n if not isinstance(arg, Node) or node_arg_is_bias(node, arg):\n new_args.append(arg)\n continue\n\n is_weight = node_arg_is_weight(node, arg)\n\n act_eq_process_ctr = equalization_qconfig.weight if is_weight else \\\n equalization_qconfig.input_activation\n\n new_eq_obs_mod = act_eq_process_ctr()\n new_eq_obs_node = insert_observer(\n arg, node, new_eq_obs_mod, model, modules, graph)\n\n new_args.append(new_eq_obs_node)\n\n # assign the new args and kwargs to the node, inplace\n node.args = tuple(new_args)\n\ndef maybe_insert_output_observer_for_node(\n node: Node,\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module],\n graph: Graph,\n matches: Dict[str, MatchResult],\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n matched_pattern: Any,\n qhandler: Optional[QuantizeHandler],\n) -> Optional[Node]:\n \"\"\"\n If `node` needs an output observer, creates it, inserts it into `graph`\n and returns it.\n\n If `node` does not need an output observer, returns None.\n \"\"\"\n root_node, matched_nodes, pattern, qhandler, qconfig = matches.get(\n node.name, (None, None, None, None, None))\n\n if qhandler is None:\n return None\n\n assert qconfig is not None\n assert node.op != 'output', 'observer insertion for outputs is handled elsewhere'\n\n is_standalone_module = qhandler is not None and \\\n isinstance(qhandler, StandaloneModuleQuantizeHandler)\n\n dtype = node_name_to_target_dtype[node.name][\"output_activation_dtype\"]\n should_insert_observer = \\\n qhandler.should_insert_observer_for_output(\n qconfig, model.training) and dtype not in (torch.bool, None, torch.float)\n # TODO(future PR): move the following logic to\n # should_insert_observer_for_output\n should_insert_observer = should_insert_observer and \\\n activation_is_statically_quantized(qconfig)\n\n # we never insert observers to output of standalone module, we assume\n # if needed, they are inserted inside the standalone module\n should_insert_observer = should_insert_observer and \\\n (not is_standalone_module)\n\n if should_insert_observer:\n act_post_process_ctr = qconfig.activation\n if activation_is_int8_quantized(qconfig):\n act_post_process_ctr = qhandler.get_activation_ctr(\n qconfig,\n matched_pattern)\n observer = act_post_process_ctr()\n new_obs = insert_observer(node, node, observer, model, modules, graph)\n return new_obs\n else:\n return None\n\ndef maybe_insert_observers_before_graph_output(\n graph_output_node: Node,\n output_quantized_idxs: List[int],\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n qconfig_map: Dict[str, QConfigAny],\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module],\n graph: Graph,\n) -> None:\n \"\"\"\n If the output needs to be quantized and there are any nodes\n in the output which are not already observed, inserts observers\n for those nodes.\n \"\"\"\n\n # TODO(future PR): update the output_quantized_idxs API to match\n # arbitrary data structures. There is always a single output, and\n # that output can have arbitrary nesting of values. List[int] is\n # not the right data type for this.\n assert output_quantized_idxs == [0] or output_quantized_idxs == [], \\\n 'unrecognized format of output_quantized_idxs'\n\n # Currently dequants are inserted in the convert step. So, we only\n # have to do anything if the output is hardcoded to be quantized\n if output_quantized_idxs == []:\n return\n # TODO(future PR): support more dtypes in model outputs, if necessary\n output_target_dtype = torch.quint8\n\n def _recursive_maybe_replace_node_with_obs(\n maybe_node: Argument,\n target_dtype: torch.dtype,\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n qconfig_map: Dict[str, QConfigAny],\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module],\n graph: Graph,\n ) -> Argument:\n \"\"\"\n Navigate an arbitrary data structure of lists, tuples, dicts.\n For each container type, recurse on all inputs. Once any Node\n is found, insert an observer if needed and do not recurse further.\n\n For example, given a structure of\n\n {'foo1': [[bar1]], 'foo2': {'foo3': [[[bar3]]]}}\n\n we recurse down to bar1 and bar3, observe them if necessary,\n and if we inserted an observer then replace the original node\n with its observer.\n\n Returns the data structure with all nodes needing observation being\n replaced by their observers.\n \"\"\"\n if isinstance(maybe_node, Node):\n # check dtype of this node\n this_node_dtype = get_arg_target_dtype_as_output(\n maybe_node, modules, node_name_to_target_dtype)\n if this_node_dtype != target_dtype:\n # insert observer\n qconfig = qconfig_map.get(maybe_node.name)\n # TODO(future PR): see if we need to allow specifying qconfig\n # on output nodes, to remove the restriction below.\n assert qconfig is not None, \\\n 'Quantizing the output node without a qconfig is not supported'\n observer_mod = qconfig.activation()\n observer_node = insert_observer(\n maybe_node, maybe_node, observer_mod, model, modules, graph)\n return observer_node\n else:\n return maybe_node\n elif isinstance(maybe_node, (list, tuple)):\n results = []\n for inner_node in maybe_node:\n results.append(_recursive_maybe_replace_node_with_obs(\n inner_node, target_dtype, node_name_to_target_dtype,\n qconfig_map, model, modules, graph))\n if isinstance(maybe_node, list):\n return results\n else:\n return tuple(results)\n elif isinstance(maybe_node, dict):\n results_dict = {}\n for k, inner_v in maybe_node.items():\n results_dict[k] = _recursive_maybe_replace_node_with_obs(\n inner_v, target_dtype, node_name_to_target_dtype,\n qconfig_map, model, modules, graph)\n return results_dict\n else:\n return results\n\n new_args = []\n for old_arg in graph_output_node.args:\n new_args.append(\n _recursive_maybe_replace_node_with_obs(\n old_arg, output_target_dtype, node_name_to_target_dtype,\n qconfig_map, model, modules, graph))\n\n graph_output_node.args = new_args # type: ignore[assignment]\n\n\ndef maybe_propagate_dtype_for_node(\n node: Node,\n target_dtype: torch.dtype,\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n matches: Dict[str, MatchResult],\n) -> None:\n \"\"\"\n Assigns `target_dtype` to `node`. If `node` is a general tensor shape op\n (see GeneralTensorShapeOpQuantizeHandler in quantization_patterns.py for more details)\n also call this function recursively on\n the first argument, to propagate the dtype to the caller.\n \"\"\"\n node_name_to_target_dtype[node.name][\"input_activation_dtype\"] = target_dtype\n node_name_to_target_dtype[node.name][\"output_activation_dtype\"] = target_dtype\n # if this is a copy node, propagate to first arg\n root_node, matched_nodes, pattern, qhandler, qconfig = matches.get(\n node.name, (None, None, None, None, None))\n if qhandler is not None and qhandler.is_general_tensor_shape_op():\n prev_node = node.args[0]\n if isinstance(prev_node, Node):\n maybe_propagate_dtype_for_node(\n prev_node, target_dtype, node_name_to_target_dtype, matches)\n\ndef propagate_dtypes_for_known_nodes(\n graph: Graph,\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]],\n matches: Dict[str, MatchResult],\n) -> None:\n \"\"\"\n Currently we assume that inputs to the graph are either `torch.float` or\n `torch.quint8`, which is not always correct. For ops such as\n `x.masked_fill(mask, value)`, we know that the dtype of `mask` is a\n `BoolTensor`. Propagate this information throughout the graph.\n\n Note: not all dtypes in the graph will be correct after this pass, but a\n higher percentage of them will be correct. Hopefully in the future we can\n replace this with a better way to reason about dtypes of tensors.\n \"\"\"\n for node in graph.nodes:\n bool_arg_idxs = node_bool_tensor_arg_indexes(node)\n for bool_arg_idx in bool_arg_idxs:\n cur_node = node.args[bool_arg_idx]\n maybe_propagate_dtype_for_node(\n cur_node, torch.bool, node_name_to_target_dtype, matches)\n\ndef maybe_make_input_output_share_observers(\n node: Node,\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module],\n) -> bool:\n \"\"\"\n Ensures that we share an observer\n for all input arguments as well as the output argument. In detail, given\n a graph of\n\n x0 -> obs0 -> op -> x2\n /\n x1 -> obs1 /\n\n where node obs0 points to observer instance observer0,\n obs1 points to observer1 and obs2 points to observer2, we make nodes obs1\n and ob2 point to observer0.\n Returns: whether the operation succeeded or not\n \"\"\"\n first_arg = None\n # find the first non-Tensor arg\n for i in range(len(node.args)):\n if isinstance(node.args[i], (Node, list, tuple)):\n first_arg = node.args[i]\n break\n\n # if there is no non-Tensor arg, return directly\n if first_arg is None:\n return False\n\n if isinstance(first_arg, (list, tuple)):\n first_arg_arg = first_arg[0]\n elif isinstance(first_arg, Node):\n first_arg_arg = first_arg\n else:\n return False\n\n # if we have a graph such as\n # observed_node -> non_observed_node -> cat\n # we need to navigate up to the first observer\n iteration_guard = 0\n while not is_activation_post_process_node(first_arg_arg, modules):\n if not isinstance(first_arg_arg, Node):\n return False\n # did not find an activation_post_process for the op\n if first_arg_arg.op == \"placeholder\":\n return False\n # trace back the args until we found the first Tensor/Node\n trace_back_node = None\n for i in range(len(first_arg_arg.args)):\n trace_back_node = first_arg_arg.args[i]\n if isinstance(trace_back_node, Node):\n break\n if trace_back_node is None:\n return False\n first_arg_arg = trace_back_node\n\n iteration_guard += 1\n if iteration_guard > 10000:\n raise AssertionError('Unable to find observer of previous node')\n\n assert isinstance(first_arg_arg, Node)\n target_to_use = first_arg_arg.target\n assert isinstance(target_to_use, str)\n obs_mod_to_use = modules[target_to_use]\n\n if isinstance(first_arg, (list, tuple)):\n # set all other input observer nodes to use that module\n for input_idx, input_arg in enumerate(first_arg):\n if input_idx == 0:\n continue\n iteration_guard = 0\n while not is_activation_post_process_node(input_arg, modules):\n input_arg = input_arg.args[0]\n iteration_guard += 1\n if iteration_guard > 10000:\n raise AssertionError('Unable to find observer of previous node')\n\n parent_name, name = _parent_name(input_arg.target)\n setattr(modules[parent_name], name, obs_mod_to_use)\n\n # set the output observer node to use that module\n for output_obs_node, _ in node.users.items():\n assert is_activation_post_process_node(output_obs_node, modules)\n parent_name, name = _parent_name(output_obs_node.target)\n setattr(modules[parent_name], name, obs_mod_to_use)\n\n # TODO(future PR): delete the orphaned observer modules\n return True\n\ndef remove_output_observer(\n node: Node,\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module]):\n items = list(node.users.items())\n for output_obs_node, _ in items:\n assert is_activation_post_process_node(output_obs_node, modules)\n output_obs_node.replace_all_uses_with(node)\n model.graph.erase_node(output_obs_node) # type: ignore[union-attr, operator]\n\ndef swap_custom_module_to_observed(\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n prepare_custom_config_dict: Dict[str, Any]):\n custom_module = modules[node.target] # type: ignore[index]\n custom_module_class_mapping = prepare_custom_config_dict.get(\n \"float_to_observed_custom_module_class\", {})\n observed_custom_module_class = \\\n get_swapped_custom_module_class(\n custom_module, custom_module_class_mapping, qconfig)\n observed_custom_module = \\\n observed_custom_module_class.from_float(custom_module)\n parent_name, name = _parent_name(node.target)\n setattr(modules[parent_name], name, observed_custom_module)\n\ndef insert_observers_for_model(\n model: GraphModule,\n modules: Dict[str, torch.nn.Module],\n matches: Dict[str, MatchResult],\n qconfig_map: Dict[str, QConfigAny],\n graph: Graph,\n prepare_custom_config_dict: Dict[str, Any],\n equalization_config_map: Dict[str, Any],\n input_quantized_idxs: List[int],\n output_quantized_idxs: List[int],\n backend_config_dict: Optional[Dict[str, Any]],\n observed_node_names: Set[str],\n) -> Optional[Node]:\n \"\"\"\n Inserts observers, using the following high level algorithm:\n\n For each node in the graph:\n 1. determine the target dtype of this node in the quantized graph, and save\n it for future steps\n 2. determine the target dtype or all args and kwargs of this node\n 3. if any arg or kwarg's target dtype does not match the current node's\n dtype, insert an observer\n 4. if the current node needs an output observer, insert it\n\n For example:\n\n - starting graph:\n x0 -> linear -> x1\n\n - observed graph after processing x0:\n x0(fp32)\n\n - observed graph after processing linear:\n x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8)\n\n - observed graph after processing x1:\n x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8) -> x1\n\n After a node is processed, the naive observer placement is guaranteed to be\n complete for that node and all of its predecessors. There can be future\n passes which optimize the graph by deduplicating observers, etc.\n \"\"\"\n\n # name of Node in original FX Graph to the target dtype information\n # that's derived from qconfig for the Node, for example, if we have\n # a conv2d node that has a qconfig\n # {\n # # information for input and bias node omitted\n # # for getattr node\n # # weight = getattr(self, 'weight')\n # 'weight': {\n # 'output_activation_dtype': torch.float,\n # }\n # # for conv2d node\n # # conv2d = call_function[target=torch.nn.functional.conv2d](\n # # args=(input, weight, bias))\n # 'conv2d': {\n # 'input_activation_dtype': torch.quint8,\n # 'weight_dtype': torch.qint8,\n # 'bias_dtype': torch.float,\n # 'output_activation_dtype': torch.quint8,\n # }\n # }\n #\n # TODO: rename this to node_name_to_target_dtype_info\n node_name_to_target_dtype: Dict[str, Dict[str, Optional[torch.dtype]]] = defaultdict(dict)\n cache_for_no_tensor_check: Dict[Node, bool] = dict()\n\n inputs_seen_counter = 0\n outputs_seen_counter = 0\n results_node = None\n\n # first, populate the dtype map based only on qconfig and qhandler\n # this assumes:\n # graph inputs are fp32 by default, and int8 where overriden\n # other nodes output dtype is specified by the qconfig\n modules = dict(model.named_modules(remove_duplicate=False))\n for node in model.graph.nodes:\n root_node, matched_nodes, pattern, qhandler, qconfig = matches.get(\n node.name, (None, None, None, None, None))\n node_name_to_target_dtype[node.name] = get_target_activation_dtype_for_node(\n node, qconfig, inputs_seen_counter, outputs_seen_counter,\n input_quantized_idxs, output_quantized_idxs, qhandler,\n modules, cache_for_no_tensor_check)\n\n # Second, for nodes with known input dtypes, propagate them throughout the\n # graph. For example, if there is a call such as\n # x1 = x0.masked_fill(mask, 1)\n # we propagate the type of mask to be torch.bool\n propagate_dtypes_for_known_nodes(\n model.graph, node_name_to_target_dtype, matches)\n\n # After this point, the current node and all of its arguments\n # have a dtype assigned. Now, we insert observers for inputs\n # of this node (if needed for this node), and the output of this node\n # (if needed for this node).\n\n # Since we are mutating the graph as we go, we iterate over the original\n # nodes before observer insertion, instead of model.graph.nodes.\n nodes_before_observation = list(model.graph.nodes)\n\n for node in nodes_before_observation:\n\n if node.op == 'placeholder':\n # if a graph input is in fp32, it does not need observation\n # if a graph input is in int8, we assume the observation happens\n # outside of the graph, and no additional observation is needed\n pass\n\n elif node.op in ('call_module', 'call_method', 'call_function', 'output'):\n # check for matches\n root_node, matched_nodes, pattern, qhandler, qconfig = matches.get(\n node.name, (None, None, None, None, None))\n equalization_qconfig = equalization_config_map.get(node.name, None)\n\n this_node_dtype = node_name_to_target_dtype[node.name]\n output_not_a_tensor = this_node_dtype is None\n # TODO(future PR): consider stopping matching getitem\n is_getitem = node.op == 'call_function' and \\\n node.target == operator.getitem\n\n skip_inserting_observers = (\n (qconfig is None) or\n output_not_a_tensor or\n is_getitem\n ) and (\n not node.op == 'output'\n )\n\n is_supported_by_backend = is_pattern_dtype_config_supported_by_backend(\n pattern, matched_nodes, node_name_to_target_dtype, backend_config_dict)\n\n if not skip_inserting_observers and is_supported_by_backend:\n modules = dict(model.named_modules(remove_duplicate=False))\n if node.op != 'output':\n assert matched_nodes is not None\n # add matched nodes to the observed node name set\n for n in matched_nodes:\n observed_node_names.add(n.name)\n\n # This is currently only used for equalization.\n # Checks if the current node is in a branch in which the two\n # first layers are both being quantized.\n #\n # ex. conv2\n # /\n # x -> conv1\n #\n # If this is the case, we will not apply equalization to the\n # initial two layers.\n is_quantized_branch = False\n if (\n len(node.args) > 0 and\n isinstance(node.args[0], Node) and\n len(node.args[0].users) > 1\n ):\n for user in node.args[0].users:\n # Checks if there exists another user being quantized\n is_user_quantized = (\n qconfig_map.get(user.name, None) is not None or\n (user.op == 'call_module' and isinstance(modules[str(user.target)], ObserverBase))\n )\n if user != node and is_user_quantized:\n is_quantized_branch = True\n\n # this modifies node inplace\n maybe_insert_input_observers_for_node(\n node, qconfig, model, modules, graph,\n node_name_to_target_dtype,\n qhandler, prepare_custom_config_dict)\n\n # Insert equalization input observers if needed\n maybe_insert_input_equalization_observers_for_node(\n node, equalization_qconfig, model, modules, graph,\n node_name_to_target_dtype, is_quantized_branch)\n\n is_last_node_of_pattern = root_node is node\n is_general_tensor_value_op = \\\n (qhandler is not None and qhandler.is_general_tensor_value_op())\n\n is_general_tensor_shape_op = \\\n (qhandler is not None and qhandler.is_general_tensor_shape_op())\n\n if is_last_node_of_pattern:\n # this returns the new observer node if it was needed\n maybe_output_obs_node = maybe_insert_output_observer_for_node(\n node, model, modules, graph, matches,\n node_name_to_target_dtype, pattern, qhandler)\n if maybe_output_obs_node is not None:\n # Update users of original node to use the output observer\n # instead. For example, change\n #\n # next_node\n # /\n # cur_node -> obs\n #\n # to\n #\n # next_node\n # /\n # cur_node -> obs\n #\n # We need to save orig users before updating uses because\n # the list of users will change as we update uses\n orig_users = list(node.users.keys())\n for user_node in orig_users:\n if user_node is maybe_output_obs_node:\n continue\n user_node.replace_input_with(node, maybe_output_obs_node)\n\n # for general tensor value ops, we modify the graph\n # to make all inputs and outputs use the first input's\n # observer\n if is_general_tensor_value_op or is_general_tensor_shape_op:\n if not maybe_make_input_output_share_observers(node, model, modules):\n remove_output_observer(node, model, modules)\n\n if isinstance(qhandler, CustomModuleQuantizeHandler):\n swap_custom_module_to_observed(node, qconfig, modules, prepare_custom_config_dict)\n\n else: # output\n maybe_insert_observers_before_graph_output(\n node, output_quantized_idxs,\n node_name_to_target_dtype, qconfig_map,\n model, modules, graph)\n\n #\n # After this point, the current node has input and output observers\n # that it needs for itself inserted.\n #\n\n # increment the counters, so future inputs and outputs are assigned\n # correct dtypes\n if node.op == 'placeholder':\n inputs_seen_counter += 1\n elif node.op == 'output':\n outputs_seen_counter += 1\n results_node = node\n\n return results_node\n\ndef run_prepare_fx_on_standalone_modules(\n model: torch.nn.Module,\n modules: Dict[str, torch.nn.Module],\n matches: Any,\n prepare_custom_config_dict: Dict[str, Any],\n) -> None:\n \"\"\"\n Runs prepare_fx on each standalone module. Note: this does\n not modify the graph, it just replaces the unobserved modules with\n their observed versions.\n \"\"\"\n for (\n node_name,\n (root_node, matched_nodes, pattern, qhandler, qconfig),\n ) in matches.items():\n if qhandler is None:\n continue\n elif not isinstance(qhandler, StandaloneModuleQuantizeHandler):\n continue\n\n sm_qconfig_dict, sm_prepare_config_dict = \\\n get_standalone_module_configs(\n root_node, modules, prepare_custom_config_dict, qconfig)\n\n standalone_module = modules[root_node.target]\n prepare = \\\n torch.ao.quantization.quantize_fx._prepare_standalone_module_fx # type: ignore[attr-defined]\n observed_standalone_module = \\\n prepare(standalone_module, sm_qconfig_dict, sm_prepare_config_dict)\n preserved_attributes = \\\n set(sm_prepare_config_dict.get(\"preserved_attributes\", []))\n observed_standalone_module = ObservedStandaloneGraphModule(\n observed_standalone_module, observed_standalone_module.graph,\n preserved_attributes)\n parent_name, name = _parent_name(root_node.target)\n setattr(modules[parent_name], name,\n observed_standalone_module)\n modules[root_node.target] = observed_standalone_module\n\ndef save_state(\n observed: GraphModule,\n qconfig_map: Dict[str, QConfigAny],\n node_name_to_scope: Dict[str, Tuple[str, type]],\n patterns: Dict[Pattern, QuantizeHandler],\n prepare_custom_config_dict: Dict[str, Any],\n equalization_qconfig_map: Dict[str, Any],\n qconfig_dict: Dict[str, Dict[Any, Any]],\n is_training: bool,\n observed_node_names: Set[str],\n) -> None:\n observed._patterns = patterns # type: ignore[assignment]\n observed._qconfig_map = qconfig_map # type: ignore[assignment]\n observed._prepare_custom_config_dict = \\\n prepare_custom_config_dict # type: ignore[assignment]\n observed._node_name_to_scope = node_name_to_scope # type: ignore[assignment]\n observed._equalization_qconfig_map = equalization_qconfig_map # type: ignore[assignment]\n observed._qconfig_dict = qconfig_dict # type: ignore[assignment]\n observed._is_training = is_training # type: ignore[assignment]\n observed._observed_node_names = observed_node_names # type: ignore[assignment]\n\ndef prepare(\n model: GraphModule,\n qconfig_dict: Any,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n prepare_custom_config_dict: Optional[Dict[str, Any]] = None,\n equalization_qconfig_dict: Optional[Dict[str, Any]] = None,\n backend_config_dict: Optional[Dict[str, Any]] = None,\n is_standalone_module: bool = False) -> ObservedGraphModule:\n \"\"\" standalone_module means it a submodule that is not inlined in\n parent module, and will be quantized separately as one unit.\n\n How the standalone module is observed is specified by `input_quantized_idxs` and\n `output_quantized_idxs` in the prepare_custom_config for the standalone module\n Args:\n node_name_to_scope: mapping from node name to the scope of the module which contains the node.\n The scope is a tuple of fully qualified path of the module and the type of the module\n Returns:\n model(GraphModule): prepared standalone module\n attributes:\n _standalone_module_input_quantized_idxs(List[Int]): a list of\n indexes for the graph input that is expected to be quantized,\n same as input_quantized_idxs configuration provided\n for the standalone module\n _standalone_module_output_quantized_idxs(List[Int]): a list of\n indexs for the graph output that is quantized\n same as input_quantized_idxs configuration provided\n for the standalone module\n \"\"\"\n if prepare_custom_config_dict is None:\n prepare_custom_config_dict = {}\n if equalization_qconfig_dict is None:\n equalization_qconfig_dict = {}\n\n additional_quant_patterns = \\\n prepare_custom_config_dict.get(\"additional_quant_pattern\", {})\n # mapping from a tuple of nodes in reverse order to uninitialized\n # QuantizeHandler subclass. For example,\n # {\n # # match a single node\n # (<class 'torch.nn.modules.conv.Conv3d'>:\n # <class 'torch.ao.quantization.fx.quantize.ConvRelu'>),\n # # match multiple nodes in reverse order\n # ((<function relu at 0x7f766a7360d0>, <built-in function add>):\n # <class 'torch.ao.quantization.fx.quantize.Add'>),\n # }\n patterns: Dict[Pattern, QuantizeHandler] = {}\n if backend_config_dict is None:\n quant_patterns = get_default_quant_patterns()\n patterns = get_combined_dict(\n quant_patterns, additional_quant_patterns)\n else:\n patterns = get_pattern_to_quantize_handlers(backend_config_dict)\n\n # TODO: make WEIGHT_INDEX_DICT and BIAS_INDEX_DICT an argument to the functions that needs them\n # TODO: refactor this part to return WEIGHT_INDEX_DICT and BIAS_INDEX_DICT\n pattern_to_input_type_to_index = get_pattern_to_input_type_to_index(backend_config_dict)\n for pattern, input_type_to_index in pattern_to_input_type_to_index.items():\n for input_type, index in input_type_to_index.items():\n index_dicts = {\n \"weight\": WEIGHT_INDEX_DICT,\n \"bias\": BIAS_INDEX_DICT,\n \"input\": {} # not used right now\n }\n assert input_type in index_dicts.keys(), \\\n f\"input type must be one of {index_dicts.keys()} but got: {input_type}\"\n index_dict = index_dicts[input_type]\n if pattern in index_dict: # type: ignore[operator]\n index_dict[pattern].append(index) # type: ignore[index]\n else:\n index_dict[pattern] = [index] # type: ignore[index]\n\n convert_dict_to_ordered_dict(qconfig_dict)\n convert_dict_to_ordered_dict(equalization_qconfig_dict)\n flattened_qconfig_dict = get_flattened_qconfig_dict(qconfig_dict)\n # TODO: support regex as well\n propagate_qconfig_(model, flattened_qconfig_dict)\n\n if model.training:\n additional_qat_module_mapping = prepare_custom_config_dict.get(\n \"additional_qat_module_mapping\", {})\n qat_swap_modules(model, additional_qat_module_mapping)\n qconfig_dict = update_qconfig_for_qat(qconfig_dict, additional_qat_module_mapping)\n\n qconfig_dict = update_qconfig_for_fusion(model, qconfig_dict)\n equalization_qconfig_dict = update_qconfig_for_fusion(model, equalization_qconfig_dict)\n\n # mapping from fully qualified module name to module instance\n # for example,\n # {\n # '': Model(...),\n # 'linear': Linear(...),\n # 'linear.weight_fake_quant': PerChannelMinMaxObserver(...),\n # }\n modules = dict(model.named_modules())\n\n # fill qconfig_map, a map from node name to qconfig, used in find_matches\n equalization_qconfig_map = generate_qconfig_map(model, modules, model.graph, equalization_qconfig_dict, node_name_to_scope)\n qconfig_map = generate_qconfig_map(model, modules, model.graph, qconfig_dict, node_name_to_scope)\n\n # match the patterns that will get quantized\n standalone_module_name_configs = prepare_custom_config_dict.get(\n \"standalone_module_name\", [])\n standalone_module_class_configs = prepare_custom_config_dict.get(\n \"standalone_module_class\", [])\n\n standalone_module_names = [config[0] for config in standalone_module_name_configs]\n standalone_module_classes = [config[0] for config in standalone_module_class_configs]\n custom_module_classes = get_custom_module_class_keys(\n prepare_custom_config_dict, \"float_to_observed_custom_module_class\")\n matches = find_matches(\n model.graph, modules, patterns, qconfig_map, standalone_module_names,\n standalone_module_classes, custom_module_classes)\n\n input_quantized_idxs: List[int] = prepare_custom_config_dict.get(\n \"input_quantized_idxs\", [])\n output_quantized_idxs: List[int] = prepare_custom_config_dict.get(\n \"output_quantized_idxs\", [])\n\n run_prepare_fx_on_standalone_modules(\n model, modules, matches, prepare_custom_config_dict)\n\n # record names for the set of observed node, so that in convert step\n # we know whether we need to convert a floating point module to reference\n # quantized module or not\n observed_node_names: Set[str] = set()\n\n result_node = insert_observers_for_model(\n model, modules, matches, qconfig_map,\n model.graph, prepare_custom_config_dict,\n equalization_qconfig_map,\n input_quantized_idxs,\n output_quantized_idxs,\n backend_config_dict,\n observed_node_names)\n\n save_state(model, qconfig_map, node_name_to_scope, patterns,\n prepare_custom_config_dict, equalization_qconfig_map, qconfig_dict, model.training, observed_node_names)\n\n preserved_attributes = set(prepare_custom_config_dict.get(\"preserved_attributes\", []))\n model = ObservedGraphModule(model, model.graph, preserved_attributes)\n if is_standalone_module:\n assert result_node is not None\n assert isinstance(result_node.args[0], Node), \\\n \"standalone module only supports returning simple value currently\"\\\n \"(not tuple, dict etc.)\"\n # these inputs are observed in parent\n # converting List[int] to Tensor since module attribute is\n # Union[Tensor, Module]\n model._standalone_module_input_quantized_idxs = \\\n torch.tensor(input_quantized_idxs)\n model._standalone_module_output_quantized_idxs = torch.tensor(output_quantized_idxs)\n return model\n" ]
[ [ "torch.ao.quantization.quantize.convert", "torch.tensor" ] ]
NTAvanHoeffelen/DAIF_CarRacing
[ "1ec51fb0d2de1541df9ca1b1d9d8a7d1130fcf8b" ]
[ "daif_CarRacing.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nimport torchvision.transforms as T\nimport car_racing as cr\nimport replay_memory as rm\nimport data_collector as dc\nimport random\n\nclass VAE(nn.Module):\n # In part taken from:\n # https://github.com/pytorch/examples/blob/master/vae/main.py\n\n def __init__(self, n_screens, n_latent_states, lr=1e-5, device='cpu'):\n super(VAE, self).__init__()\n \n self.device = device\n \n self.n_screens = n_screens\n self.n_latent_states = n_latent_states\n \n # The convolutional encoder\n self.encoder = nn.Sequential( \n nn.Conv2d(self.n_screens, 32, 4, 2), # (1, 8, 42, 42) --> (1, 32, 20, 20)\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n \n nn.Conv2d(32, 64, 4, 2), # (1, 32, 20, 20) --> (1, 64, 9, 9)\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n \n nn.Conv2d(64, 128, 5, 2), # (1, 64, 9, 9) --> (1, 128, 3, 3)\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n \n nn.Conv2d(128, 256, 3, 2), # (1, 128, 3, 3) --> (1, 256, 1, 1)\n nn.ReLU(inplace=True),\n \n ).to(self.device)\n \n # The size of the encoder output\n self.encoder_output_shape = (256, 1, 1)\n self.encoder_output_size = np.prod(self.encoder_output_shape)\n \n # The convolutional decoder\n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(256, 128, 3, 2), # (1, 256, 1, 1) --> (1, 128, 3, 3)\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n \n nn.ConvTranspose2d(128, 64, 5, 2), # (1, 128, 3, 3) --> (1, 64, 9, 9)\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n \n nn.ConvTranspose2d(64, 32, 4, 2), # (1, 64, 9, 9) --> (1, 32, 20, 20)\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n \n nn.ConvTranspose2d(32, self.n_screens, 4, 2), # (1, 32, 20, 20) --> (1, n_screens, 42, 42)\n nn.BatchNorm2d(self.n_screens),\n nn.ReLU(inplace=True),\n \n nn.Sigmoid()\n ).to(self.device)\n \n # Fully connected layers connected to encoder\n self.fc1 = nn.Linear(self.encoder_output_size, self.encoder_output_size // 2) # 1024 --> 512\n self.fc2_mu = nn.Linear(self.encoder_output_size // 2, self.n_latent_states) # 512 --> 128\n self.fc2_logvar = nn.Linear(self.encoder_output_size // 2, self.n_latent_states) # 512 --> 128\n \n # Fully connected layers connected to decoder\n self.fc3 = nn.Linear(self.n_latent_states, self.encoder_output_size // 2) # 128 --> 512\n self.fc4 = nn.Linear(self.encoder_output_size // 2, self.encoder_output_size) # 512 --> 1024\n \n self.optimizer = optim.Adam(self.parameters(), lr)\n \n self.to(self.device)\n\n def encode(self, x):\n # Deconstruct input x into a distribution over latent states\n conv = self.encoder(x)\n h1 = F.relu(self.fc1(conv.view(conv.size(0), -1)))\n mu, logvar = self.fc2_mu(h1), self.fc2_logvar(h1)\n return mu, logvar\n\n def reparameterize(self, mu, logvar):\n # Apply reparameterization trick\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n\n def decode(self, z, batch_size=1):\n # Reconstruct original input x from the (reparameterized) latent states\n h3 = F.relu(self.fc3(z))\n deconv_input = self.fc4(h3)\n deconv_input = deconv_input.view([batch_size] + [dim for dim in self.encoder_output_shape])\n y = self.decoder(deconv_input)\n return y\n\n def forward(self, x, batch_size=1):\n # Deconstruct and then reconstruct input x\n mu, logvar = self.encode(x)\n z = self.reparameterize(mu, logvar)\n recon = self.decode(z, batch_size)\n return recon, mu, logvar\n\n # Reconstruction + KL divergence losses summed over all elements and batch\n def loss_function(self, recon_x, x, mu, logvar, batch=True):\n if batch:\n BCE = F.binary_cross_entropy(recon_x, x, reduction='none')\n BCE = torch.sum(BCE, dim=(1, 2, 3))\n \n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)\n else:\n BCE = F.binary_cross_entropy(recon_x, x, reduction='sum')\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n \n return BCE + KLD\n\nclass Model(nn.Module):\n \n def __init__(self, n_inputs, n_outputs, n_hidden=64, lr=1e-3, softmax = False, device='cpu'):\n super(Model, self).__init__()\n \n self.n_inputs = n_inputs # Number of inputs\n self.n_hidden = n_hidden # Number of hidden units\n self.n_outputs = n_outputs # Number of outputs\n self.softmax = softmax\n \n self.fc1 = nn.Linear(self.n_inputs, self.n_hidden) # Hidden layer\n self.fc2 = nn.Linear(self.n_hidden, self.n_outputs) # Output layer\n \n self.optimizer = optim.Adam(self.parameters(), lr) # Adam optimizer\n \n self.device = device\n self.to(self.device)\n \n def forward(self, x):\n # Define the forward pass:\n x = x.to(self.device)\n h1 = F.relu(self.fc1(x))\n \n if self.softmax: # If true apply a softmax function to the output\n y = F.softmax(self.fc2(h1), dim=-1).clamp(min=1e-9, max=1-1e-9) # This is used to get a proper distribution over all actions with a sum of 1.\n else:\n y = self.fc2(h1)\n \n return y\n \nclass Agent():\n \n def __init__(self, device = 'cuda'):\n \n self.run_id = 1 \n self.device = device\n self.env = cr.CarRacing()\n self.render_view = False # Set to True if you want to see what it is doing\n self.print_timer = 10 # Print average result of Agent every '...' episodes\n \n self.height = self.width = 42 # observation size (height and width)\n self.color = 1 # number of colors\n self.n_screens = 8 # number of observations stacked\n \n self.obs_shape = (self.height, self.width) \n self.obs_size = int(np.prod(self.obs_shape)) # The size of the observation\n self.linear = False # True if the input is a vector\n\n # Initialize last observations array\n self.obs_batch = np.array([np.zeros((self.height, self.width), dtype = 'float32') for i in range(self.n_screens)])\n \n # Discretization of continuous action space for CarRacing-v0\n # [0] = steering, [1] = accelerating, [2] = braking\n self.discrete_actions = {0 : np.array([0,0,0]), # do nothing\n 1 : np.array([-1,0,0]), # steer sharp left\n 2 : np.array([1,0,0]), # steer sharp right\n 3 : np.array([-0.5,0,0]), # steer left\n 4 : np.array([0.5,0,0]), # steer right\n 5 : np.array([0,1,0]), # accelerate 100%\n 6 : np.array([0,0.5,0]), # accelerate 50%\n 7 : np.array([0,0.25,0]), # accelerate 25%\n 8 : np.array([0,0,1]), # brake 100%\n 9 : np.array([0,0,0.5]), # brake 50%\n 10 : np.array([0,0,0.25])} # brake 25%\n \n # The number of actions available to the agent\n self.n_actions = len(self.discrete_actions) \n \n \n self.freeze_cntr = 0 # Keeps track of when to (un)freeze the target network\n self.freeze_period = 50 # How long the network is frozen\n self.batch_size = 250\n self.freeze_vae = True\n \n self.memory_capacity = 100000 # memory size\n self.VAE_memory_capacity = 10000 # VAE pre-train memory size\n self.n_episodes = 1000 # number of episodes\n self.n_play_episodes = 150 # number of episodes used for average reward test\n self.max_length_episode = 1000 # max number of steps a episode of the CarRacing environment lasts\n \n self.gamma = 12 # Precision parameter\n self.Beta = 0.99 # Discount factor\n self.alpha = 18000 # VAE loss scaler\n\n self.n_hidden_trans = 512 # number of hidden units transition network\n self.lr_trans = 1e-3 # learning rate transition network\n self.n_hidden_pol = 512 # number of hidden units policy network\n self.lr_pol = 1e-4 # learning rate policy network\n self.n_hidden_val = 512 # number of hidden units value network\n self.lr_val = 1e-5 # learning rate value network\n \n self.n_latent_states = 128 # size latent space\n self.lr_vae = 5e-6 # learning rate VAE\n self.vae_data = 'pre_train_data/vae_data_10000.pt'\n self.vae_plot = False\n self.pre_train_vae = True # if True pre-trains the VAE\n \n self.load_pre_trained_vae = True\n self.pt_vae_load_path = \"networks/pre_trained_vae/vae_daif_CarRacing_{}_end.pth\".format(self.n_latent_states)\n \n self.load_network = True\n self.network_load_path = \"networks/daif/daif_CarRacing_{}net_r{}.pth\".format(\"{}\", self.run_id)\n \n # Initialize the networks:\n self.vae = VAE(self.n_screens, self.n_latent_states, lr = self.lr_vae, device=self.device)\n self.transition_net = Model(self.n_latent_states*2 + 1, self.n_latent_states, self.n_hidden_trans, lr=self.lr_trans, device=self.device) # + 1, for 1 action\n self.policy_net = Model(self.n_latent_states*2, self.n_actions, self.n_hidden_pol, lr=self.lr_pol, softmax=True, device=self.device)\n self.value_net = Model(self.n_latent_states*2, self.n_actions, self.n_hidden_val, lr=self.lr_val, device=self.device)\n self.target_net = Model(self.n_latent_states*2, self.n_actions, self.n_hidden_val, lr=self.lr_val, device=self.device)\n self.target_net.load_state_dict(self.value_net.state_dict())\n \n if self.load_pre_trained_vae: # If true: load a pre-trained VAE\n self.vae.load_state_dict(torch.load(self.pt_vae_load_path, map_location=self.device))\n self.vae.eval()\n print(\"Succesfully loaded a pre-trained VAE\")\n \n if self.load_network: # If true: load the networks given paths\n self.vae.load_state_dict(torch.load(self.network_load_path.format(\"vae\"), map_location=self.device))\n self.vae.eval()\n self.transition_net.load_state_dict(torch.load(self.network_load_path.format(\"trans\"), map_location=self.device))\n self.transition_net.eval()\n self.policy_net.load_state_dict(torch.load(self.network_load_path.format(\"pol\"), map_location=self.device))\n self.policy_net.eval()\n self.value_net.load_state_dict(torch.load(self.network_load_path.format(\"val\"), map_location=self.device))\n self.value_net.eval()\n print(\"Succesfully loaded networks\")\n \n \n # Initialize the replay memory\n self.memory = rm.ReplayMemory(self.memory_capacity, self.obs_shape, self.obs_size, self.linear, device=self.device)\n if self.pre_train_vae:\n self.VAE_memory = rm.ReplayMemory(self.VAE_memory_capacity, self.obs_shape, self.obs_size, self.linear, device=self.device)\n \n # When sampling from memory at index i, obs_indices indicates that we want observations with indices i-obs_indices, works the same for the others\n self.obs_indices = [(self.n_screens+1)-i for i in range(self.n_screens+2)]\n self.action_indices = [2, 1]\n self.reward_indices = [1]\n self.done_indices = [0]\n self.max_n_indices = max(max(self.obs_indices, self.action_indices, self.reward_indices, self.done_indices)) + 1\n \n # Used to pre-process the observations (screens) \n self.preprocess = T.Compose([T.ToPILImage(),\n T.Grayscale(num_output_channels=1),\n T.Resize((self.height, self.width)),\n T.ToTensor()])\n \n self.save_results = True\n self.save_network = True\n self.results_path = \"results/daif/daif_CarRacing_results_r{}.npz\".format(self.run_id)\n self.network_save_path = \"networks/daif/daif_CarRacing_{}net_r{}.pth\".format(\"{}\",self.run_id)\n \n \n self.log_path = \"logs/daif_CarRacing_log_r{}.txt\".format(self.run_id)\n self.record = open(self.log_path, \"a\")\n self.record.write(\"\\n\\n-----------------------------------------------------------------\\n\")\n self.record.write(\"File opened at {}\\n\".format(datetime.datetime.now())) \n \n \n def get_screen(self, device='cuda'):\n # Get observation, reshape and but in right order.\n screen = self.env.render(mode='state_pixels') \n screen = screen.reshape(96, 96, 3)\n screen = screen.transpose((2, 1, 0))\n \n # stips of bottom part of the image which contains a black bar with the accumulated reward and control value bars, and makes sure the width is equal size as height\n screen = screen[:, 6:90, int(96*0):int(96 * 0.875)]\n \n # Convert to to float and normalize\n screen = np.ascontiguousarray(screen, dtype=np.float32) / 255\n \n # Add resize\n screen = self.preprocess(torch.from_numpy(np.flip(screen, axis=0).copy()))\n \n return screen\n\n \"\"\" stack the X latest observations into one batch \"\"\"\n def get_obs_batch(self, obs):\n # add new observation to obs_batch, remove oldest.\n self.obs_batch = np.concatenate((obs.numpy(), self.obs_batch[0:self.n_screens-1]), axis = 0)\n \n # resize to (1, self.n_screens, 84, 84) and convert to torch\n obs_batch2 = torch.from_numpy(np.flip(self.obs_batch, axis=0).copy()).unsqueeze(0).to(self.device)\n \n return obs_batch2\n\n def select_action(self, obs):\n with torch.no_grad():\n action_index = 0\n \n if self.memory.push_count < self.batch_size + self.n_screens:\n action_index = random.randint(0, self.n_actions - 1)\n else:\n # Derive a distribution over states state from the last n observations (screens):\n prev_n_obs = self.get_obs_batch(obs)\n state_mu, state_logvar = self.vae.encode(prev_n_obs)\n x = torch.cat((state_mu, torch.exp(state_logvar)), dim=1) # does not work?\n policy = self.policy_net(x)\n action_index = torch.multinomial(policy, 1).item()\n \n return action_index\n \n def get_mini_batches(self):\n # Retrieve transition data in mini batches\n all_obs_batch, all_actions_batch, reward_batch_t1, done_batch_t2 = self.memory.sample(\n self.obs_indices, self.action_indices, self.reward_indices,\n self.done_indices, self.max_n_indices, self.batch_size)\n \n # Retrieve a batch of observations for 3 consecutive points in time\n obs_batch_t0 = all_obs_batch[:, 0:self.n_screens, :, :]\n obs_batch_t1 = all_obs_batch[:, 1:self.n_screens+1, :, :]\n obs_batch_t2 = all_obs_batch[:, 2:self.n_screens+2, :, :]\n \n # Retrieve a batch of distributions over states for 3 consecutive points in time\n state_mu_batch_t0, state_logvar_batch_t0 = self.vae.encode(obs_batch_t0)\n state_mu_batch_t1, state_logvar_batch_t1 = self.vae.encode(obs_batch_t1)\n state_mu_batch_t2, state_logvar_batch_t2 = self.vae.encode(obs_batch_t2)\n \n # Combine the sufficient statistics (mean and variance) into a single vector\n state_batch_t0 = torch.cat((state_mu_batch_t0, torch.exp(state_logvar_batch_t0)), dim=1)\n state_batch_t1 = torch.cat((state_mu_batch_t1, torch.exp(state_logvar_batch_t1)), dim=1)\n state_batch_t2 = torch.cat((state_mu_batch_t2, torch.exp(state_logvar_batch_t2)), dim=1)\n \n # Reparameterize the distribution over states for time t1\n z_batch_t1 = self.vae.reparameterize(state_mu_batch_t1, state_logvar_batch_t1)\n \n # Retrieve the agent's action history for time t0 and time t1\n action_batch_t0 = all_actions_batch[:, 0].unsqueeze(1)\n action_batch_t1 = all_actions_batch[:, 1].unsqueeze(1)\n \n # At time t0 predict the state at time t1:\n X = torch.cat((state_batch_t0.detach(), action_batch_t0.float()), dim=1)\n pred_batch_t0t1 = self.transition_net(X)\n\n # Determine the prediction error wrt time t0-t1:\n pred_error_batch_t0t1 = torch.mean(F.mse_loss(\n pred_batch_t0t1, state_mu_batch_t1, reduction='none'), dim=1).unsqueeze(1)\n \n return (state_batch_t1, state_batch_t2, action_batch_t1,\n reward_batch_t1, done_batch_t2, pred_error_batch_t0t1,\n obs_batch_t1, state_mu_batch_t1,\n state_logvar_batch_t1, z_batch_t1)\n \n def compute_value_net_loss(self, state_batch_t1, state_batch_t2,\n action_batch_t1, reward_batch_t1,\n done_batch_t2, pred_error_batch_t0t1):\n \n with torch.no_grad():\n # Determine the action distribution for time t2:\n policy_batch_t2 = self.policy_net(state_batch_t2)\n \n # Determine the target EFEs for time t2:\n target_EFEs_batch_t2 = self.target_net(state_batch_t2)\n \n # Weigh the target EFEs according to the action distribution:\n weighted_targets = ((1-done_batch_t2) * policy_batch_t2 *\n target_EFEs_batch_t2).sum(-1).unsqueeze(1)\n \n # Determine the batch of bootstrapped estimates of the EFEs:\n EFE_estimate_batch = -reward_batch_t1 + pred_error_batch_t0t1 + self.Beta * weighted_targets\n \n # Determine the EFE at time t1 according to the value network:\n EFE_batch_t1 = self.value_net(state_batch_t1).gather(1, action_batch_t1)\n\n # Determine the MSE loss between the EFE estimates and the value network output:\n value_net_loss = F.mse_loss(EFE_estimate_batch, EFE_batch_t1)\n \n return value_net_loss\n \n def compute_VFE(self, vae_loss, state_batch_t1, pred_error_batch_t0t1):\n # Determine the action distribution for time t1:\n policy_batch_t1 = self.policy_net(state_batch_t1)\n \n # Determine the EFEs for time t1:\n EFEs_batch_t1 = self.value_net(state_batch_t1)\n\n # Take a gamma-weighted Boltzmann distribution over the EFEs:\n boltzmann_EFEs_batch_t1 = torch.softmax(-self.gamma * EFEs_batch_t1, dim=1).clamp(min=1e-9, max=1-1e-9)\n \n # Weigh them according to the action distribution:\n energy_term_batch = -(policy_batch_t1 * torch.log(boltzmann_EFEs_batch_t1)).sum(-1).unsqueeze(1)\n \n # Determine the entropy of the action distribution\n entropy_batch = -(policy_batch_t1 * torch.log(policy_batch_t1)).sum(-1).unsqueeze(1)\n \n # Determine the VFE, then take the mean over all batch samples:\n VFE_batch = vae_loss + pred_error_batch_t0t1 + (energy_term_batch - entropy_batch)\n VFE = torch.mean(VFE_batch)\n \n return VFE\n \n def learn(self, ith_episode):\n \n # If there are not enough transitions stored in memory, return\n if self.memory.push_count - self.max_n_indices*2 < self.batch_size:\n return\n \n # After every freeze_period time steps, update the target network\n if self.freeze_cntr % self.freeze_period == 0:\n self.target_net.load_state_dict(self.value_net.state_dict())\n self.freeze_cntr += 1\n \n # Retrieve mini-batches of data from memory\n (state_batch_t1, state_batch_t2, action_batch_t1,\n reward_batch_t1, done_batch_t2, pred_error_batch_t0t1,\n obs_batch_t1, state_mu_batch_t1,\n state_logvar_batch_t1, z_batch_t1) = self.get_mini_batches()\n \n # Determine the reconstruction loss for time t1 \n recon_batch = self.vae.decode(z_batch_t1, self.batch_size)\n \n # Determine the VAE loss for time t1\n vae_loss = self.vae.loss_function(recon_batch, obs_batch_t1, state_mu_batch_t1, state_logvar_batch_t1, batch=True) / self.alpha\n \n # Compute the value network loss:\n value_net_loss = self.compute_value_net_loss(state_batch_t1, state_batch_t2,\n action_batch_t1, reward_batch_t1,\n done_batch_t2, pred_error_batch_t0t1)\n \n # Compute the variational free energy:\n VFE = self.compute_VFE(vae_loss, state_batch_t1.detach(), pred_error_batch_t0t1)\n\n # Reset the gradients:\n if not self.freeze_vae:\n self.vae.optimizer.zero_grad()\n self.policy_net.optimizer.zero_grad()\n self.transition_net.optimizer.zero_grad()\n self.value_net.optimizer.zero_grad()\n \n # Compute the gradients:\n VFE.backward(retain_graph=True)\n value_net_loss.backward()\n \n # Perform gradient descent:\n if not self.freeze_vae:\n self.vae.optimizer.step()\n self.policy_net.optimizer.step()\n self.transition_net.optimizer.step()\n self.value_net.optimizer.step()\n \n ''' Run a trained model without it learning. '''\n def play(self):\n \n rewards = []\n \n self.memory.push_count = self.memory_capacity - 1\n \n for ith_episode in range(self.n_play_episodes):\n \n total_reward = 0\n nr_steps = 0\n obs = self.env.reset()\n obs = self.get_screen(self.device)\n done = False\n \n while not done and nr_steps <= self.max_length_episode:\n \n # get action\n action = self.select_action(obs)\n \n # get actual action from discrete actions dictionary\n action_todo = self.discrete_actions.get(int(action))\n \n # take step\n obs, reward, done, _ = self.env.step([action_todo[0], action_todo[1], action_todo[2]])\n nr_steps = nr_steps + 1\n obs = self.get_screen(self.device)\n \n # render in visible window if True\n if self.render_view:\n self.env.render('human')\n \n # add reward to total\n total_reward += reward\n\n rewards.append(total_reward)\n print(\"Reward for this episode:\", total_reward)\n total_reward = 0 \n\n self.env.close()\n \n np.savez(\"rewards/daif_CarRacing_rewards\", np.array(rewards))\n \n \n def train_vae(self):\n \"\"\" Train the VAE using data collected via user play. \"\"\"\n vae_batch_size = 256\n vae_obs_indices = [self.n_screens-i for i in range(self.n_screens)]\n \n self.VAE_memory.push_count = vae_batch_size + self.n_screens*2\n \n try:\n # Load the pre-collected data into device\n self.VAE_memory.obs_mem = torch.load(self.vae_data, map_location = torch.device(self.device))\n except:\n # Generate data to train VAE on \n print(\"No data found to train the vae on.\")\n data_collector = dc.DataCollector(self.VAE_memory_capacity, self.n_screens, self.height, self.width, device = self.device)\n data_collector.generate_data()\n self.VAE_memory.obs_mem = torch.load(data_collector.obs_data_path, map_location = torch.device(self.device))\n \n losses = []\n \n for data_point in range(0, len(self.VAE_memory.obs_mem)):\n self.VAE_memory.push_count = self.VAE_memory.push_count + 1\n \n obs_batch, _, _, _ = self.VAE_memory.sample(vae_obs_indices, [], [], [], len(vae_obs_indices), vae_batch_size)\n obs_batch = obs_batch.view(vae_batch_size, self.n_screens, self.height, self.width)\n \n recon, mu, logvar = self.vae.forward(obs_batch, vae_batch_size)\n loss = torch.mean(self.vae.loss_function(recon, obs_batch, mu, logvar))\n \n self.vae.optimizer.zero_grad()\n loss.backward()\n self.vae.optimizer.step()\n \n losses.append(loss)\n if data_point % 50 == 0:\n print(\"obs: %2f vae loss=%5.2f\"%(data_point, loss.item()))\n \n if data_point % 1000 == 0 and data_point > 0 and self.vae_plot:\n plt.plot(losses)\n plt.show()\n plt.plot(losses[-1000:])\n plt.show()\n \n for i in range(self.n_screens): \n plt.imsave(\"vae_images/obs/vae_obs_CarRacing_ep_{}_{}.png\".format(data_point, i), obs_batch[0, i, :, :].detach().cpu().squeeze(0).permute(1, 0).numpy(), cmap='gray')\n plt.imsave(\"vae_images/recon/vae_recon_CarRacin_ep_{}_{}.png\".format(data_point, i), recon[0, i, :, :].detach().cpu().squeeze(0).permute(1, 0).numpy(), cmap='gray')\n \n self.VAE_memory.push_count = 0\n torch.save(self.vae.state_dict(), \"networks/pre_trained_vae/vae_daif_CarRacing_{}_test_end.pth\".format(self.n_latent_states))\n \n \n def train(self):\n\n if self.pre_train_vae and not self.load_pre_trained_vae: # If True: pre-train the VAE\n msg = \"Environment is: {}\\nPre-training vae. Starting at {}\".format(\"CarRacing-v0\", datetime.datetime.now())\n print(msg)\n self.record.write(msg+\"\\n\")\n self.train_vae()\n \n \n msg = \"Environment is: {}\\nTraining started at {}\".format(\"CarRacing-v0\", datetime.datetime.now())\n print(msg)\n self.record.write(msg+\"\\n\")\n \n results = []\n for ith_episode in range(self.n_episodes):\n \n total_reward = 0\n self.env.reset()\n obs = self.get_screen(self.device)\n done = False\n reward = 0\n nr_steps = 0\n \n self.prev_screen = self.env.render('rgb_array')\n \n while not done and nr_steps <= self.max_length_episode:\n \n # get action\n action = self.select_action(obs)\n \n # push to memory\n self.memory.push(obs, action, reward, done)\n \n # get actual action input\n action_real = self.discrete_actions.get(int(action))\n \n # take step\n obs, reward, done, _ = self.env.step([action_real[0], action_real[1], action_real[2]])\n obs = self.get_screen(self.device)\n nr_steps = nr_steps + 1\n \n # render in visible window if True\n if self.render_view:\n self.env.render('human')\n \n # add reward to total\n total_reward += reward\n \n # have the networks learn\n self.learn(ith_episode)\n \n if done or nr_steps == self.max_length_episode:\n self.memory.push(obs, -99, -99, True)\n \n results.append(total_reward)\n \n # Print and keep a (.txt) record of stuff\n if ith_episode > 0 and ith_episode % self.print_timer == 0:\n avg_reward = np.mean(results)\n last_x = np.mean(results[-self.print_timer:])\n msg = \"Episodes: {:4d}, avg score: {:3.2f}, over last {:d}: {:3.2f}\".format(ith_episode, avg_reward, self.print_timer, last_x)\n print(msg)\n \n # write to log\n self.record.write(msg+\"\\n\")\n \n # save log\n self.record.close()\n self.record = open(self.log_path, \"a\")\n \n self.env.close()\n \n # If enabled, save the results and the network (state_dict)\n if self.save_results:\n np.savez(self.results_path, np.array(results))\n if self.save_network:\n torch.save(self.transition_net.state_dict(), self.network_save_path.format(\"trans\"))\n torch.save(self.policy_net.state_dict(), self.network_save_path.format(\"pol\"))\n torch.save(self.value_net.state_dict(), self.network_save_path.format(\"val\"))\n torch.save(self.VAE.state_dict(), self.network_save_path.format(\"VAE\"))\n \n # Print and keep a (.txt) record of stuff\n msg = \"Training finished at {}\".format(datetime.datetime.now())\n print(msg)\n self.record.write(msg)\n self.record.close()\n \nif __name__ == \"__main__\":\n agent = Agent()\n agent.train()\n" ]
[ [ "torch.randn_like", "torch.mean", "torch.load", "torch.sum", "torch.multinomial", "matplotlib.pyplot.plot", "torch.no_grad", "numpy.mean", "torch.device", "torch.softmax", "torch.nn.Sigmoid", "numpy.zeros", "torch.nn.ConvTranspose2d", "numpy.ascontiguousarray", "torch.nn.Conv2d", "torch.exp", "torch.nn.Linear", "torch.nn.functional.mse_loss", "torch.log", "torch.nn.BatchNorm2d", "numpy.array", "matplotlib.pyplot.show", "numpy.flip", "torch.nn.functional.binary_cross_entropy", "numpy.prod", "torch.nn.ReLU" ] ]
RSEnergyGroup/incubator-airflow
[ "215b8c8170bd63f4c449614945bb4b6d90f6a860" ]
[ "tests/hooks/test_hive_hook.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport datetime\nimport itertools\nimport os\nimport random\nimport unittest\nfrom collections import OrderedDict\n\nimport mock\nimport pandas as pd\nfrom hmsclient import HMSClient\n\nfrom airflow import DAG, configuration\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.hive_hooks import HiveCliHook, HiveMetastoreHook, HiveServer2Hook\nfrom airflow.operators.hive_operator import HiveOperator\nfrom airflow.utils import timezone\nfrom airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING\nfrom airflow.utils.tests import assertEqualIgnoreMultipleSpaces\n\nconfiguration.load_test_config()\n\n\nDEFAULT_DATE = timezone.datetime(2015, 1, 1)\nDEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()\nDEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]\n\n\nclass HiveEnvironmentTest(unittest.TestCase):\n\n def setUp(self):\n configuration.load_test_config()\n args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}\n self.dag = DAG('test_dag_id', default_args=args)\n self.next_day = (DEFAULT_DATE +\n datetime.timedelta(days=1)).isoformat()[:10]\n self.database = 'airflow'\n self.partition_by = 'ds'\n self.table = 'static_babynames_partitioned'\n self.hql = \"\"\"\n CREATE DATABASE IF NOT EXISTS {{ params.database }};\n USE {{ params.database }};\n DROP TABLE IF EXISTS {{ params.table }};\n CREATE TABLE IF NOT EXISTS {{ params.table }} (\n state string,\n year string,\n name string,\n gender string,\n num int)\n PARTITIONED BY ({{ params.partition_by }} string);\n ALTER TABLE {{ params.table }}\n ADD PARTITION({{ params.partition_by }}='{{ ds }}');\n \"\"\"\n self.hook = HiveMetastoreHook()\n t = HiveOperator(\n task_id='HiveHook_' + str(random.randint(1, 10000)),\n params={\n 'database': self.database,\n 'table': self.table,\n 'partition_by': self.partition_by\n },\n hive_cli_conn_id='beeline_default',\n hql=self.hql, dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,\n ignore_ti_state=True)\n\n def tearDown(self):\n hook = HiveMetastoreHook()\n with hook.get_conn() as metastore:\n metastore.drop_table(self.database, self.table, deleteData=True)\n\n\nclass TestHiveCliHook(unittest.TestCase):\n\n def test_run_cli(self):\n hook = HiveCliHook()\n hook.run_cli(\"SHOW DATABASES\")\n\n def test_run_cli_with_hive_conf(self):\n hql = \"set key;\\n\" \\\n \"set airflow.ctx.dag_id;\\nset airflow.ctx.dag_run_id;\\n\" \\\n \"set airflow.ctx.task_id;\\nset airflow.ctx.execution_date;\\n\"\n\n dag_id_ctx_var_name = \\\n AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']\n task_id_ctx_var_name = \\\n AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']\n execution_date_ctx_var_name = \\\n AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][\n 'env_var_format']\n dag_run_id_ctx_var_name = \\\n AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][\n 'env_var_format']\n os.environ[dag_id_ctx_var_name] = 'test_dag_id'\n os.environ[task_id_ctx_var_name] = 'test_task_id'\n os.environ[execution_date_ctx_var_name] = 'test_execution_date'\n os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id'\n\n hook = HiveCliHook()\n output = hook.run_cli(hql=hql, hive_conf={'key': 'value'})\n self.assertIn('value', output)\n self.assertIn('test_dag_id', output)\n self.assertIn('test_task_id', output)\n self.assertIn('test_execution_date', output)\n self.assertIn('test_dag_run_id', output)\n\n del os.environ[dag_id_ctx_var_name]\n del os.environ[task_id_ctx_var_name]\n del os.environ[execution_date_ctx_var_name]\n del os.environ[dag_run_id_ctx_var_name]\n\n @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')\n def test_load_file(self, mock_run_cli):\n filepath = \"/path/to/input/file\"\n table = \"output_table\"\n\n hook = HiveCliHook()\n hook.load_file(filepath=filepath, table=table, create=False)\n\n query = (\n \"LOAD DATA LOCAL INPATH '{filepath}' \"\n \"OVERWRITE INTO TABLE {table} \\n\"\n .format(filepath=filepath, table=table)\n )\n mock_run_cli.assert_called_with(query)\n\n @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')\n @mock.patch('pandas.DataFrame.to_csv')\n def test_load_df(self, mock_to_csv, mock_load_file):\n df = pd.DataFrame({\"c\": [\"foo\", \"bar\", \"baz\"]})\n table = \"t\"\n delimiter = \",\"\n encoding = \"utf-8\"\n\n hook = HiveCliHook()\n hook.load_df(df=df,\n table=table,\n delimiter=delimiter,\n encoding=encoding)\n\n mock_to_csv.assert_called_once()\n kwargs = mock_to_csv.call_args[1]\n self.assertEqual(kwargs[\"header\"], False)\n self.assertEqual(kwargs[\"index\"], False)\n self.assertEqual(kwargs[\"sep\"], delimiter)\n\n mock_load_file.assert_called_once()\n kwargs = mock_load_file.call_args[1]\n self.assertEqual(kwargs[\"delimiter\"], delimiter)\n self.assertEqual(kwargs[\"field_dict\"], {\"c\": u\"STRING\"})\n self.assertTrue(isinstance(kwargs[\"field_dict\"], OrderedDict))\n self.assertEqual(kwargs[\"table\"], table)\n\n @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')\n @mock.patch('pandas.DataFrame.to_csv')\n def test_load_df_with_optional_parameters(self, mock_to_csv, mock_load_file):\n hook = HiveCliHook()\n b = (True, False)\n for create, recreate in itertools.product(b, b):\n mock_load_file.reset_mock()\n hook.load_df(df=pd.DataFrame({\"c\": range(0, 10)}),\n table=\"t\",\n create=create,\n recreate=recreate)\n\n mock_load_file.assert_called_once()\n kwargs = mock_load_file.call_args[1]\n self.assertEqual(kwargs[\"create\"], create)\n self.assertEqual(kwargs[\"recreate\"], recreate)\n\n @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')\n def test_load_df_with_data_types(self, mock_run_cli):\n d = OrderedDict()\n d['b'] = [True]\n d['i'] = [-1]\n d['t'] = [1]\n d['f'] = [0.0]\n d['c'] = ['c']\n d['M'] = [datetime.datetime(2018, 1, 1)]\n d['O'] = [object()]\n d['S'] = ['STRING'.encode('utf-8')]\n d['U'] = ['STRING']\n d['V'] = [None]\n df = pd.DataFrame(d)\n\n hook = HiveCliHook()\n hook.load_df(df, 't')\n\n query = \"\"\"\n CREATE TABLE IF NOT EXISTS t (\n b BOOLEAN,\n i BIGINT,\n t BIGINT,\n f DOUBLE,\n c STRING,\n M TIMESTAMP,\n O STRING,\n S STRING,\n U STRING,\n V STRING)\n ROW FORMAT DELIMITED\n FIELDS TERMINATED BY ','\n STORED AS textfile\n ;\n \"\"\"\n assertEqualIgnoreMultipleSpaces(self, mock_run_cli.call_args_list[0][0][0], query)\n\n\nclass TestHiveMetastoreHook(HiveEnvironmentTest):\n VALID_FILTER_MAP = {'key2': 'value2'}\n\n def test_get_max_partition_from_empty_part_specs(self):\n max_partition = \\\n HiveMetastoreHook._get_max_partition_from_part_specs([],\n 'key1',\n self.VALID_FILTER_MAP)\n self.assertIsNone(max_partition)\n\n def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self):\n with self.assertRaises(AirflowException):\n HiveMetastoreHook._get_max_partition_from_part_specs(\n [{'key1': 'value1', 'key2': 'value2'},\n {'key1': 'value3', 'key2': 'value4'}],\n 'key1',\n {'key3': 'value5'})\n\n def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self):\n with self.assertRaises(AirflowException):\n HiveMetastoreHook._get_max_partition_from_part_specs(\n [{'key1': 'value1', 'key2': 'value2'},\n {'key1': 'value3', 'key2': 'value4'}],\n 'key3',\n self.VALID_FILTER_MAP)\n\n def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self):\n with self.assertRaises(AirflowException):\n HiveMetastoreHook._get_max_partition_from_part_specs(\n [{'key1': 'value1', 'key2': 'value2'},\n {'key1': 'value3', 'key2': 'value4'}],\n None,\n self.VALID_FILTER_MAP)\n\n def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self):\n max_partition = \\\n HiveMetastoreHook._get_max_partition_from_part_specs(\n [{'key1': 'value1', 'key2': 'value2'},\n {'key1': 'value3', 'key2': 'value4'}],\n 'key1',\n None)\n\n # No partition will be filtered out.\n self.assertEqual(max_partition, b'value3')\n\n def test_get_max_partition_from_valid_part_specs(self):\n max_partition = \\\n HiveMetastoreHook._get_max_partition_from_part_specs(\n [{'key1': 'value1', 'key2': 'value2'},\n {'key1': 'value3', 'key2': 'value4'}],\n 'key1',\n self.VALID_FILTER_MAP)\n self.assertEqual(max_partition, b'value1')\n\n def test_get_metastore_client(self):\n self.assertIsInstance(self.hook.get_metastore_client(), HMSClient)\n\n def test_get_conn(self):\n self.assertIsInstance(self.hook.get_conn(), HMSClient)\n\n def test_check_for_partition(self):\n partition = \"{p_by}='{date}'\".format(date=DEFAULT_DATE_DS,\n p_by=self.partition_by)\n missing_partition = \"{p_by}='{date}'\".format(date=self.next_day,\n p_by=self.partition_by)\n self.assertTrue(\n self.hook.check_for_partition(self.database, self.table,\n partition)\n )\n self.assertFalse(\n self.hook.check_for_partition(self.database, self.table,\n missing_partition)\n )\n\n def test_check_for_named_partition(self):\n partition = \"{p_by}={date}\".format(date=DEFAULT_DATE_DS,\n p_by=self.partition_by)\n missing_partition = \"{p_by}={date}\".format(date=self.next_day,\n p_by=self.partition_by)\n self.assertTrue(\n self.hook.check_for_named_partition(self.database,\n self.table,\n partition)\n )\n self.assertFalse(\n self.hook.check_for_named_partition(self.database,\n self.table,\n missing_partition)\n )\n\n def test_get_table(self):\n table_info = self.hook.get_table(db=self.database,\n table_name=self.table)\n self.assertEqual(table_info.tableName, self.table)\n columns = ['state', 'year', 'name', 'gender', 'num']\n self.assertEqual([col.name for col in table_info.sd.cols], columns)\n\n def test_get_tables(self):\n tables = self.hook.get_tables(db=self.database,\n pattern=self.table + \"*\")\n self.assertIn(self.table, {table.tableName for table in tables})\n\n def test_get_databases(self):\n databases = self.hook.get_databases(pattern='*')\n self.assertIn(self.database, databases)\n\n def test_get_partitions(self):\n partitions = self.hook.get_partitions(schema=self.database,\n table_name=self.table)\n self.assertEqual(len(partitions), 1)\n self.assertEqual(partitions, [{self.partition_by: DEFAULT_DATE_DS}])\n\n def test_max_partition(self):\n filter_map = {self.partition_by: DEFAULT_DATE_DS}\n partition = self.hook.max_partition(schema=self.database,\n table_name=self.table,\n field=self.partition_by,\n filter_map=filter_map)\n self.assertEqual(partition, DEFAULT_DATE_DS.encode('utf-8'))\n\n def test_table_exists(self):\n self.assertTrue(self.hook.table_exists(self.table, db=self.database))\n self.assertFalse(\n self.hook.table_exists(str(random.randint(1, 10000)))\n )\n\n\nclass TestHiveServer2Hook(unittest.TestCase):\n\n def _upload_dataframe(self):\n df = pd.DataFrame({'a': [1, 2], 'b': [1, 2]})\n self.local_path = '/tmp/TestHiveServer2Hook.csv'\n df.to_csv(self.local_path, header=False, index=False)\n\n def setUp(self):\n configuration.load_test_config()\n self._upload_dataframe()\n args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}\n self.dag = DAG('test_dag_id', default_args=args)\n self.database = 'airflow'\n self.table = 'hive_server_hook'\n self.hql = \"\"\"\n CREATE DATABASE IF NOT EXISTS {{ params.database }};\n USE {{ params.database }};\n DROP TABLE IF EXISTS {{ params.table }};\n CREATE TABLE IF NOT EXISTS {{ params.table }} (\n a int,\n b int)\n ROW FORMAT DELIMITED\n FIELDS TERMINATED BY ',';\n LOAD DATA LOCAL INPATH '{{ params.csv_path }}'\n OVERWRITE INTO TABLE {{ params.table }};\n \"\"\"\n self.columns = ['{}.a'.format(self.table),\n '{}.b'.format(self.table)]\n self.hook = HiveMetastoreHook()\n t = HiveOperator(\n task_id='HiveHook_' + str(random.randint(1, 10000)),\n params={\n 'database': self.database,\n 'table': self.table,\n 'csv_path': self.local_path\n },\n hive_cli_conn_id='beeline_default',\n hql=self.hql, dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,\n ignore_ti_state=True)\n\n def tearDown(self):\n hook = HiveMetastoreHook()\n with hook.get_conn() as metastore:\n metastore.drop_table(self.database, self.table, deleteData=True)\n os.remove(self.local_path)\n\n def test_get_conn(self):\n hook = HiveServer2Hook()\n hook.get_conn()\n\n def test_get_records(self):\n hook = HiveServer2Hook()\n query = \"SELECT * FROM {}\".format(self.table)\n results = hook.get_records(query, schema=self.database)\n self.assertListEqual(results, [(1, 1), (2, 2)])\n\n def test_get_pandas_df(self):\n hook = HiveServer2Hook()\n query = \"SELECT * FROM {}\".format(self.table)\n df = hook.get_pandas_df(query, schema=self.database)\n self.assertEqual(len(df), 2)\n self.assertListEqual(df.columns.tolist(), self.columns)\n self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])\n\n def test_get_results_header(self):\n hook = HiveServer2Hook()\n query = \"SELECT * FROM {}\".format(self.table)\n results = hook.get_results(query, schema=self.database)\n self.assertListEqual([col[0] for col in results['header']],\n self.columns)\n\n def test_get_results_data(self):\n hook = HiveServer2Hook()\n query = \"SELECT * FROM {}\".format(self.table)\n results = hook.get_results(query, schema=self.database)\n self.assertListEqual(results['data'], [(1, 1), (2, 2)])\n\n def test_to_csv(self):\n hook = HiveServer2Hook()\n query = \"SELECT * FROM {}\".format(self.table)\n csv_filepath = 'query_results.csv'\n hook.to_csv(query, csv_filepath, schema=self.database,\n delimiter=',', lineterminator='\\n', output_header=True)\n df = pd.read_csv(csv_filepath, sep=',')\n self.assertListEqual(df.columns.tolist(), self.columns)\n self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])\n self.assertEqual(len(df), 2)\n\n def test_multi_statements(self):\n sqls = [\n \"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)\",\n \"SELECT * FROM {}\".format(self.table),\n \"DROP TABLE test_multi_statements\",\n ]\n hook = HiveServer2Hook()\n results = hook.get_records(sqls, schema=self.database)\n self.assertListEqual(results, [(1, 1), (2, 2)])\n\n def test_get_results_with_hive_conf(self):\n hql = [\"set key\",\n \"set airflow.ctx.dag_id\",\n \"set airflow.ctx.dag_run_id\",\n \"set airflow.ctx.task_id\",\n \"set airflow.ctx.execution_date\"]\n\n dag_id_ctx_var_name = \\\n AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']\n task_id_ctx_var_name = \\\n AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']\n execution_date_ctx_var_name = \\\n AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][\n 'env_var_format']\n dag_run_id_ctx_var_name = \\\n AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][\n 'env_var_format']\n os.environ[dag_id_ctx_var_name] = 'test_dag_id'\n os.environ[task_id_ctx_var_name] = 'test_task_id'\n os.environ[execution_date_ctx_var_name] = 'test_execution_date'\n os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id'\n\n hook = HiveServer2Hook()\n output = '\\n'.join(res_tuple[0]\n for res_tuple\n in hook.get_results(hql=hql,\n hive_conf={'key': 'value'})['data'])\n self.assertIn('value', output)\n self.assertIn('test_dag_id', output)\n self.assertIn('test_task_id', output)\n self.assertIn('test_execution_date', output)\n self.assertIn('test_dag_run_id', output)\n\n del os.environ[dag_id_ctx_var_name]\n del os.environ[task_id_ctx_var_name]\n del os.environ[execution_date_ctx_var_name]\n del os.environ[dag_run_id_ctx_var_name]\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
BIMAU/fvm
[ "fef6e8c577848b105e04273e1357a5e279d26aba" ]
[ "fvm/CylindricalDiscretization.py" ]
[ "import numpy\n\nfrom fvm import utils\nfrom fvm import BoundaryConditions\nfrom fvm import Discretization\n\nclass CylindricalDiscretization(Discretization):\n '''Finite volume discretization of the incompressible Navier-Stokes\n equations on a (possibly non-uniform) Arakawa C-grid in a\n cylindrical coordinate system. For details on the implementation\n and the ordering of the variables, see the Discretization class.\n\n '''\n\n def __init__(self, parameters, nr, ntheta, nz, dim, dof, r=None, theta=None, z=None):\n self.parameters = parameters\n\n if self.parameters.get('Grid Stretching', False) or 'Grid Stretching Factor' in self.parameters.keys():\n r = utils.create_stretched_coordinate_vector(\n self.parameters.get('R-min', 0.0), self.parameters.get('R-max', 1.0), nr,\n self.parameters.get('Grid Stretching Factor', 1.5)) if r is None else r\n else:\n r = utils.create_uniform_coordinate_vector(\n self.parameters.get('R-min', 0.0), self.parameters.get('R-max', 1.0), nr) if r is None else r\n\n theta = utils.create_uniform_coordinate_vector(\n self.parameters.get('Theta-min', 0.0), self.parameters.get('Theta-max', 2 * numpy.pi), ntheta) \\\n if theta is None else theta\n\n Discretization.__init__(self, parameters, nr, ntheta, nz, dim, dof, r, theta, z)\n\n self.y_periodic = True\n if self.parameters.get('Z-periodic', False):\n self.z_periodic = True\n\n def _linear_part_2D(self):\n '''Compute the linear part of the equation in case the domain is 2D.\n In case Re = 0 we instead compute the linear part for the Stokes\n problem.'''\n\n Re = self.get_parameter('Reynolds Number')\n\n if Re == 0:\n Re = 1\n\n return 1 / Re * (self.iruscale(self.u_rr()) + self.iru2scale(self.u_tt() - self.value_u() - 2 * self.v_t_u())\n + self.irvscale(self.v_rr()) + self.irv2scale(self.v_tt() - self.value_v() + 2 * self.u_t_v())) \\\n - (self.p_r() + self.irvscale(self.p_t())) \\\n + self.div()\n\n def _linear_part_3D(self):\n '''Compute the linear part of the equation in case the domain is 3D.\n In case Re = 0 we instead compute the linear part for the Stokes\n problem.'''\n\n Re = self.get_parameter('Reynolds Number')\n\n if Re == 0:\n Re = 1\n\n return 1 / Re * (self.iruscale(self.u_rr()) + self.iru2scale(- self.value_u())\n + self.u_zz()\n + self.irvscale(self.v_rr()) + self.irv2scale(- self.value_v())\n + self.v_zz()\n + self.irvscale(self.w_rr()) + self.w_zz()) \\\n - (self.p_r() + self.irvscale(self.p_t()) + self.p_z()) \\\n + self.div()\n\n def nonlinear_part(self, state):\n '''Compute the nonlinear part of the equation. In case Re = 0 this\n does nothing.'''\n\n state_mtx = utils.create_padded_state_mtx(state, self.nx, self.ny, self.nz, self.dof,\n self.x_periodic, self.y_periodic, self.z_periodic)\n\n atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n\n Re = self.get_parameter('Reynolds Number')\n if Re == 0:\n return (atomJ, atomF)\n\n self.u_u_r(atomJ, atomF, state_mtx)\n self.u_v_r(atomJ, atomF, state_mtx)\n self.v_u_t(atomJ, atomF, state_mtx)\n self.v_v_t(atomJ, atomF, state_mtx)\n\n self.v_v(atomJ, atomF, state_mtx)\n self.u_v(atomJ, atomF, state_mtx)\n\n if self.dim > 2:\n self.u_w_r(atomJ, atomF, state_mtx)\n self.v_w_t(atomJ, atomF, state_mtx)\n self.w_u_z(atomJ, atomF, state_mtx)\n self.w_v_z(atomJ, atomF, state_mtx)\n self.w_w_z(atomJ, atomF, state_mtx)\n\n atomJ += atomF\n\n return (atomJ, atomF)\n\n def boundaries(self, atom):\n '''Compute boundary conditions for the currently defined problem type.'''\n\n # TODO: Make it possible to interface this from the outside.\n\n boundary_conditions = BoundaryConditions(self.nx, self.ny, self.nz, self.dim, self.dof, self.x, self.y, self.z)\n\n frc = numpy.zeros(self.nx * self.ny * self.nz * self.dof)\n\n if self.problem_type_equals('Taylor-Couette'):\n vo = self.get_parameter('Outer Angular Velocity', 2)\n vi = self.get_parameter('Inner Angular Velocity', 1)\n frc += boundary_conditions.moving_lid_east(atom, vo * self.x[self.nx-1])\n frc += boundary_conditions.moving_lid_west(atom, vi * self.x[-1])\n\n if self.dim <= 2 or self.nz <= 1:\n return frc\n\n asym = self.get_parameter('Asymmetry Parameter')\n frc2 = numpy.zeros([self.nx, self.ny, self.nz, self.dof])\n frc2[self.nx-1, 0, :, 2] = asym * numpy.cos(self.z[0:self.nz] / self.z[self.nz-1] * numpy.pi)\n frc += utils.create_state_vec(frc2, self.nx, self.ny, self.nz, self.dof)\n\n if not self.z_periodic:\n boundary_conditions.no_slip_top(atom)\n boundary_conditions.no_slip_bottom(atom)\n\n else:\n raise Exception('Invalid problem type %s' % self.get_parameter('Problem Type'))\n\n return frc\n\n # Below are all of the discretizations of separate parts of\n # equations that we can solve using FVM. This takes into account\n # non-uniform grids. New discretizations such as derivatives have\n # to be implemented in a similar way.\n\n def iruscale(self, atom):\n '''Scale atom by 1/r at the location of u'''\n for i in range(self.nx):\n atom[i, :, :, :, :, :, :, :] /= self.x[i]\n return atom\n\n def irvscale(self, atom):\n '''Scale atom by 1/r at the location of v'''\n for i in range(self.nx):\n atom[i, :, :, :, :, :, :, :] /= (self.x[i] + self.x[i-1]) / 2\n return atom\n\n def iru2scale(self, atom):\n '''Scale atom by 1/r^2 at the location of u'''\n for i in range(self.nx):\n atom[i, :, :, :, :, :, :, :] /= self.x[i] * self.x[i]\n return atom\n\n def irv2scale(self, atom):\n '''Scale atom by 1/r^2 at the location of v'''\n for i in range(self.nx):\n atom[i, :, :, :, :, :, :, :] /= (self.x[i] + self.x[i-1]) * (self.x[i] + self.x[i-1]) / 4\n return atom\n\n @staticmethod\n def _u_rr(atom, i, j, k, x, y, z):\n # distance between u[i] and u[i-1]\n dx = x[i] - x[i-1]\n rv = x[i-1] + dx / 2\n # distance between u[i+1] and u[i]\n dxp1 = x[i+1] - x[i]\n rvp1 = x[i] + dxp1 / 2\n # volume size in the y direction\n dy = y[j] - y[j-1]\n # volume size in the z direction\n dz = z[k] - z[k-1]\n\n # second order finite difference\n atom[0] = rv / dx * dy * dz\n atom[2] = rvp1 / dxp1 * dy * dz\n atom[1] = -atom[0] - atom[2]\n\n def u_rr(self):\n atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n CylindricalDiscretization._u_rr(atom[i, j, k, 0, 0, :, 1, 1], i, j, k, self.x, self.y, self.z)\n return atom\n\n def v_tt(self):\n return self.v_yy()\n\n @staticmethod\n def _v_rr(atom, i, j, k, x, y, z):\n # distance between v[i] and v[i-1]\n dx = (x[i] - x[i-2]) / 2\n # distance between v[i+1] and v[i]\n dxp1 = (x[i+1] - x[i-1]) / 2\n # volume size in the y direction\n dy = (y[j+1] - y[j-1]) / 2\n # volume size in the z direction\n dz = z[k] - z[k-1]\n\n # second order finite difference\n atom[0] = x[i-1] / dx * dy * dz\n atom[2] = x[i] / dxp1 * dy * dz\n atom[1] = -atom[0] - atom[2]\n\n def u_tt(self):\n return self.u_yy()\n\n def v_rr(self):\n atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n CylindricalDiscretization._v_rr(atom[i, j, k, 1, 1, :, 1, 1], i, j, k, self.x, self.y, self.z)\n return atom\n\n def w_tt(self):\n return self.w_yy()\n\n def w_rr(self):\n atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n CylindricalDiscretization._v_rr(atom[i, j, k, 2, 2, :, 1, 1], i, k, j, self.x, self.z, self.y)\n return atom\n\n def p_r(self):\n return self.p_x()\n\n def p_t(self):\n return self.p_y()\n\n def v_t_u(self):\n atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n Discretization._backward_u_y(atom[i, j, k, 0, 1, 1, :, 1], i, j, k, self.x, self.y, self.z)\n Discretization._backward_u_y(atom[i, j, k, 0, 1, 2, :, 1], i, j, k, self.x, self.y, self.z)\n atom[i, j, k, 0, 1, :, :, :] /= 2\n return atom\n\n def u_t_v(self):\n atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n Discretization._forward_u_x(atom[i, j, k, 1, 0, 0, :, 1], j, i, k, self.y, self.x, self.z)\n Discretization._forward_u_x(atom[i, j, k, 1, 0, 1, :, 1], j, i, k, self.y, self.x, self.z)\n atom[i, j, k, 1, 0, :, :, :] /= 2\n return atom\n\n @staticmethod\n def _value_u(atom, i, j, k, x, y, z):\n # volume size in the x direction\n dx = (x[i+1] - x[i-1]) / 2\n # volume size in the y direction\n dy = y[j] - y[j-1]\n # volume size in the z direction\n dz = z[k] - z[k-1]\n\n atom[1] = dx * dy * dz\n\n def value_u(self):\n atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n CylindricalDiscretization._value_u(atom[i, j, k, 0, 0, :, 1, 1], i, j, k, self.x, self.y, self.z)\n return atom\n\n def value_v(self):\n atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n CylindricalDiscretization._value_u(atom[i, j, k, 1, 1, :, 1, 1], j, i, k, self.y, self.x, self.z)\n return atom\n\n @staticmethod\n def _backward_u_r(atom, i, j, k, x, y, z):\n # volume size in the y direction\n dy = y[j] - y[j-1]\n # volume size in the z direction\n dz = z[k] - z[k-1]\n\n # backward difference\n atom[1] = x[i] * dy * dz\n atom[0] = -x[i-1] * dy * dz\n\n def u_r(self):\n atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n CylindricalDiscretization._backward_u_r(atom[i, j, k, self.dim, 0, :, 1, 1], i, j, k,\n self.x, self.y, self.z)\n return atom\n\n def div(self):\n if self.dim == 2:\n return self.irvscale(self.u_r() + self.v_y())\n return self.irvscale(self.u_r() + self.v_y()) + self.w_z()\n\n def u_u_r(self, atomJ, atomF, state):\n Discretization.u_u_x(self, atomJ, atomF, state)\n\n def u_v_r(self, atomJ, atomF, state):\n Discretization.u_v_x(self, atomJ, atomF, state)\n\n def u_w_r(self, atomJ, atomF, state):\n Discretization.u_w_x(self, atomJ, atomF, state)\n\n def v_u_t(self, atomJ_in, atomF_in, state):\n atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n\n Discretization.v_u_y(self, atomJ, atomF, state)\n self.iruscale(atomJ)\n self.iruscale(atomF)\n\n atomJ_in += atomJ\n atomF_in += atomF\n\n def v_v_t(self, atomJ_in, atomF_in, state):\n atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n\n Discretization.v_v_y(self, atomJ, atomF, state)\n self.irvscale(atomJ)\n self.irvscale(atomF)\n\n atomJ_in += atomJ\n atomF_in += atomF\n\n def v_w_t(self, atomJ_in, atomF_in, state):\n atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n\n Discretization.v_w_y(self, atomJ, atomF, state)\n self.irvscale(atomJ)\n self.irvscale(atomF)\n\n atomJ_in += atomJ\n atomF_in += atomF\n\n def v_v(self, atomJ_in, atomF_in, state):\n averages_v = self.weighted_average_x(state[:, :, :, 1])\n averages_v = (averages_v[:, 0:self.ny, :] + averages_v[:, 1:self.ny+1, :]) / 2\n\n atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n\n atom_value = numpy.zeros(1)\n atom_average = numpy.zeros(2)\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n Discretization._mass_x(atom_value, i, j, k, self.x, self.y, self.z)\n Discretization._weighted_average(atom_average, i, self.x)\n atom[i, j, k, 0, 1, 1:3, 0, 1] += atom_value * atom_average * averages_v[i, j, k+1] * 1 / 2\n atom[i, j, k, 0, 1, 1:3, 1, 1] += atom_value * atom_average * averages_v[i, j, k+1] * 1 / 2\n\n self.iruscale(atom)\n\n atomJ_in += atom\n atomF_in += atom\n\n def u_v(self, atomJ_in, atomF_in, state):\n averages_u = self.weighted_average_y(state[:, :, :, 0])\n averages_u = (averages_u[0:self.nx, :, :] + averages_u[1:self.nx+1, :, :]) / 2\n averages_v = state[1:self.nx+1, 1:self.ny+1, 1:self.nz+1, 1]\n\n atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n\n atom_value = numpy.zeros(1)\n atom_average = numpy.zeros(2)\n for i in range(self.nx):\n for j in range(self.ny):\n for k in range(self.nz):\n Discretization._mass_x(atom_value, j, i, k, self.y, self.x, self.z)\n atomF[i, j, k, 1, 1, 1, 1, 1] -= atom_value * averages_u[i, j, k+1]\n\n Discretization._weighted_average(atom_average, j, self.y)\n atomJ[i, j, k, 1, 0, 0, 1:3, 1] -= atom_value * atom_average * averages_v[i, j, k] * 1 / 2\n atomJ[i, j, k, 1, 0, 1, 1:3, 1] -= atom_value * atom_average * averages_v[i, j, k] * 1 / 2\n\n self.irvscale(atomF)\n self.irvscale(atomJ)\n\n atomJ_in += atomJ\n atomF_in += atomF\n" ]
[ [ "numpy.zeros", "numpy.cos" ] ]
HakujouRyu/Lambdata
[ "c9c6881997d635dd09ff42dc8b5277de3e56ca14" ]
[ "lambdata_hakujouryu/df_utils_test.py" ]
[ "\"\"\" Testing functions from the df_utils module\"\"\"\n\n\nimport unittest\n\nimport pandas as pd\n\nfrom df_utils import list_to_col\n\n\nTEST_DF = pd.DataFrame(\n {'one': [1, 1, 1, 1], \n 'two': [2, 2, 2, 2],\n 'dates': ['09/09/1988', '04/02/1992', '01/10/2009', '04/20/1920']})\n\nVER_DF = pd.DataFrame(\n {'one': [1, 1, 1, 1], \n 'two': [2, 2, 2, 2],\n 'dates': ['09/09/1988', '04/02/1992', '01/10/2009', '04/20/1920'],\n 'old_list': [9, 9, 9, 9]})\n\n\nTEST_LIST = [9, 9, 9, 9]\n\nclass DateFunctionsTest(unittest.TestCase):\n \"\"\" Tests for the DateFunctions class\"\"\"\n def test_test(self):\n assert True\n\n def test_split_dates(self):\n self.assertEqual(list_to_col(TEST_LIST, TEST_DF).shape, VER_DF.shape)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.DataFrame" ] ]
wojtekwalczak/kaggle_titanic
[ "f13258dcb9e964bcad61609fdcc374e3db47824e" ]
[ "titanic/scripts/plotter.py" ]
[ "\"\"\"\n PassengerId Survived Pclass Age SibSp \\\ncount 891.000000 891.000000 891.000000 714.000000 891.000000\nmean 446.000000 0.383838 2.308642 29.699118 0.523008\nstd 257.353842 0.486592 0.836071 14.526497 1.102743\nmin 1.000000 0.000000 1.000000 0.420000 0.000000\n25% 223.500000 0.000000 2.000000 20.125000 0.000000\n50% 446.000000 0.000000 3.000000 28.000000 0.000000\n75% 668.500000 1.000000 3.000000 38.000000 1.000000\nmax 891.000000 1.000000 3.000000 80.000000 8.000000\n\n Parch Fare\ncount 891.000000 891.000000\nmean 0.381594 32.204208\nstd 0.806057 49.693429\nmin 0.000000 0.000000\n25% 0.000000 7.910400\n50% 0.000000 14.454200\n75% 0.000000 31.000000\nmax 6.000000 512.329200\n\n[8 rows x 7 columns]\n\"\"\"\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nfrom titanic.db.loader import data\n\n# u'PassengerId', u'Survived', u'Pclass', u'Name', u'Sex', u'Age', u'SibSp', u'Parch', u'Ticket', u'Fare', u'Cabin', u'Embarked'\n#print(data.describe())\n\nplt.scatter(data.Pclass, data.Survived)\n# #['Age'], data['Survived'])\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.scatter" ] ]
themattinthehatt/dreamscape
[ "3ae2a4fd0fc19bc69b705aa309f3643fb739997f" ]
[ "utils/DataReader.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\n@author: Matt Whiteway, June 2017\nDataReader class that handles mnist and cifar10 datasets\n\"\"\"\n\n\nclass DataReaderMNIST(object):\n \"\"\"DataReader class for mnist\"\"\"\n\n def __init__(self, data_dir, one_hot=True):\n\n from tensorflow.examples.tutorials.mnist import input_data\n\n mnist = input_data.read_data_sets(data_dir, one_hot=one_hot)\n self.train = mnist.train\n self.test = mnist.test\n self.validation = mnist.validation\n\n\nclass DataReaderCIFAR(object):\n \"\"\"DataReader class for cifar-10\"\"\"\n\n def __init__(self, data_dir, one_hot=True):\n\n import cifar10_input_data\n\n cifar = cifar10_input_data.read_data_sets(data_dir, one_hot=one_hot)\n self.train = cifar.train\n self.test = cifar.test\n self.validation = cifar.validation\n" ]
[ [ "tensorflow.examples.tutorials.mnist.input_data.read_data_sets" ] ]
yellowstarhx/person_search
[ "e36a3d9db5d4b21ff29a9618b4e5f818c8f35300" ]
[ "lib/fast_rcnn/test_utils.py" ]
[ "import numpy as np\nimport cv2\n\nfrom fast_rcnn.config import cfg\nfrom utils.blob import im_list_to_blob\n\n\ndef get_image_blob(im):\n \"\"\"Converts an image into a network input.\n\n Arguments:\n im (ndarray): a color image in BGR order\n\n Returns:\n blob (ndarray): a data blob holding an image pyramid\n im_scale_factors (list): list of image scales (relative to im) used\n in the image pyramid\n \"\"\"\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n im_scale_factors = []\n\n for target_size in cfg.TEST.SCALES:\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale_factors.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n\n return blob, np.array(im_scale_factors)\n\n\ndef get_rois_blob(im_rois, im_scale_factors):\n \"\"\"Converts RoIs into network inputs.\n\n Arguments:\n im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates\n im_scale_factors (list): scale factors as returned by _get_image_blob\n\n Returns:\n blob (ndarray): R x 5 matrix of RoIs in the image pyramid\n \"\"\"\n rois, levels = _project_im_rois(im_rois, im_scale_factors)\n rois_blob = np.hstack((levels, rois))\n return rois_blob.astype(np.float32, copy=False)\n\n\ndef get_gt_boxes_blob(boxes, clss, pids, im_scale_factors):\n assert boxes.shape[0] == pids.shape[0]\n gt_boxes = boxes * im_scale_factors[0]\n gt_boxes = np.hstack([gt_boxes, clss[:, np.newaxis], pids[:, np.newaxis]])\n return gt_boxes.astype(np.float32, copy=False)\n\n\ndef _project_im_rois(im_rois, scales):\n \"\"\"Project image RoIs into the image pyramid built by _get_image_blob.\n\n Arguments:\n im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates\n scales (list): scale factors as returned by _get_image_blob\n\n Returns:\n rois (ndarray): R x 4 matrix of projected RoI coordinates\n levels (list): image pyramid levels used by each projected RoI\n \"\"\"\n im_rois = im_rois.astype(np.float, copy=False)\n\n if len(scales) > 1:\n widths = im_rois[:, 2] - im_rois[:, 0] + 1\n heights = im_rois[:, 3] - im_rois[:, 1] + 1\n\n areas = widths * heights\n scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)\n diff_areas = np.abs(scaled_areas - 224 * 224)\n levels = diff_areas.argmin(axis=1)[:, np.newaxis]\n else:\n levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)\n\n rois = im_rois * scales[levels]\n\n return rois, levels" ]
[ [ "numpy.hstack", "numpy.abs", "numpy.min", "numpy.round", "numpy.max", "numpy.array", "numpy.zeros" ] ]
s-weigand/pandas
[ "ae71dc1c5a694ce35e345030c3b75650f8b0a175" ]
[ "pandas/core/array_algos/replace.py" ]
[ "\"\"\"\nMethods used by Block.replace and related methods.\n\"\"\"\nimport operator\nimport re\nfrom typing import Optional, Pattern, Union\n\nimport numpy as np\n\nfrom pandas._typing import ArrayLike, Scalar\n\nfrom pandas.core.dtypes.common import (\n is_datetimelike_v_numeric,\n is_numeric_v_string_like,\n is_re,\n is_scalar,\n)\nfrom pandas.core.dtypes.missing import isna\n\n\ndef compare_or_regex_search(\n a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: ArrayLike\n) -> Union[ArrayLike, bool]:\n \"\"\"\n Compare two array_like inputs of the same shape or two scalar values\n\n Calls operator.eq or re.search, depending on regex argument. If regex is\n True, perform an element-wise regex matching.\n\n Parameters\n ----------\n a : array_like\n b : scalar or regex pattern\n regex : bool\n mask : array_like\n\n Returns\n -------\n mask : array_like of bool\n \"\"\"\n\n def _check_comparison_types(\n result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern]\n ):\n \"\"\"\n Raises an error if the two arrays (a,b) cannot be compared.\n Otherwise, returns the comparison result as expected.\n \"\"\"\n if is_scalar(result) and isinstance(a, np.ndarray):\n type_names = [type(a).__name__, type(b).__name__]\n\n type_names[0] = f\"ndarray(dtype={a.dtype})\"\n\n raise TypeError(\n f\"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}\"\n )\n\n if not regex:\n op = lambda x: operator.eq(x, b)\n else:\n op = np.vectorize(\n lambda x: bool(re.search(b, x))\n if isinstance(x, str) and isinstance(b, (str, Pattern))\n else False\n )\n\n # GH#32621 use mask to avoid comparing to NAs\n if isinstance(a, np.ndarray):\n a = a[mask]\n\n if is_numeric_v_string_like(a, b):\n # GH#29553 avoid deprecation warnings from numpy\n return np.zeros(a.shape, dtype=bool)\n\n elif is_datetimelike_v_numeric(a, b):\n # GH#29553 avoid deprecation warnings from numpy\n _check_comparison_types(False, a, b)\n return False\n\n result = op(a)\n\n if isinstance(result, np.ndarray) and mask is not None:\n # The shape of the mask can differ to that of the result\n # since we may compare only a subset of a's or b's elements\n tmp = np.zeros(mask.shape, dtype=np.bool_)\n tmp[mask] = result\n result = tmp\n\n _check_comparison_types(result, a, b)\n return result\n\n\ndef replace_regex(values: ArrayLike, rx: re.Pattern, value, mask: Optional[np.ndarray]):\n \"\"\"\n Parameters\n ----------\n values : ArrayLike\n Object dtype.\n rx : re.Pattern\n value : Any\n mask : np.ndarray[bool], optional\n\n Notes\n -----\n Alters values in-place.\n \"\"\"\n\n # deal with replacing values with objects (strings) that match but\n # whose replacement is not a string (numeric, nan, object)\n if isna(value) or not isinstance(value, str):\n\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return value if rx.search(s) is not None else s\n else:\n return s\n\n else:\n # value is guaranteed to be a string here, s can be either a string\n # or null if it's null it gets returned\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return rx.sub(value, s)\n else:\n return s\n\n f = np.vectorize(re_replacer, otypes=[values.dtype])\n\n if mask is None:\n values[:] = f(values)\n else:\n values[mask] = f(values[mask])\n" ]
[ [ "pandas.core.dtypes.common.is_numeric_v_string_like", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.common.is_re", "numpy.vectorize", "pandas.core.dtypes.missing.isna", "numpy.zeros", "pandas.core.dtypes.common.is_datetimelike_v_numeric" ] ]
Pennycook/performance-portability
[ "3a6a2d524747bba0d5934b3d5838cb6029485079" ]
[ "metrics/scripts/box_plot.py" ]
[ "#!/usr/local/bin/python3\n# Copyright (c) 2020 Performance Portability authors\n# SPDX-License-Identifier: MIT\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\nimport argparse\n\n# Set up argument parsing\nparser = argparse.ArgumentParser(description=\"Produce histogram of efficiency\")\nparser.add_argument('input_file', help=\"CSV file containing performance data\")\nparser.add_argument('output_file', help=\"Output PDF file\")\nparser.add_argument('--calc-efficiency', action=\"store_true\", help=\"Calculate application efficiency\")\nparser.add_argument('--input-is-throughput', action=\"store_true\", help=\"If calculating application efficiency, then treat the data as throughput (higher is better)\")\n\nargs = parser.parse_args()\n\n\nprint('Performance portability metrics')\nprint()\nprint('Input file: {}'.format(args.input_file))\nprint()\n\n# Read in the CSV file as a Pandas DataFrame\ndata = pd.read_csv(args.input_file, skipinitialspace=True, sep=',\\s+', delimiter=',', na_values='X')\n\n# In the case of trailing whitespace, the X don't get converted.\n# This replaces anything starting with an X to a NaN\ndata = data.replace(r'^X', np.nan, regex=True)\n\n# Make sure the data is all floating point\ndata[list(data.columns[1:])] = data[list(data.columns[1:])].apply(pd.to_numeric)\n\n\nprint(data)\n\n# Save a version where NaN is set to 0\ndata_nona = data.fillna(float(0.0))\n\nif (args.calc_efficiency):\n print(\"Calculating application efficiency...\")\n\n # Calculate application efficiency\n if (not args.input_is_throughput):\n minimums = data.min(axis=1, skipna=True)\n for col in list(data.columns[1:]):\n data_nona[col] = 100.0 * minimums[:] / data_nona[col]\n data_nona = data_nona.replace([np.inf, -np.inf, np.nan], 0.0)\n\n else:\n maximums = data.max(axis=1, skipna=True)\n for col in list(data.columns[1:]):\n data_nona[col] = data_nona[col] / maximums[:] * 100.0\n data_nona = data_nona.replace([np.inf, -np.inf, np.nan], 0.0)\n\nelse:\n print(\"Warning: using input data as efficiencies\")\n\n# Display data information\nprint('Number of data items:')\nprint(data.count())\nprint()\n\nprint(data_nona)\n\n# Box and whisker plot\nax = data_nona.boxplot()\nax.set(ylabel='% efficiency')\nplt.xticks(rotation=45)\nplt.title(Path(args.input_file).stem)\nplt.savefig(args.output_file, bbox_inches='tight')\nplt.close()\n\nprint(80*'-')\nprint()\nprint()\n\n" ]
[ [ "matplotlib.pyplot.close", "matplotlib.pyplot.xticks", "pandas.read_csv", "matplotlib.pyplot.savefig" ] ]
VDIGPKU/OPANAS
[ "873ff09a65d3253ce8351e54880a642517f7e8b5" ]
[ "mmdet/models/necks/info_paths.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, xavier_init, normal_init\nimport torch\nfrom mmdet.core import auto_fp16\nfrom ..builder import NECKS\nfrom mmcv.ops import DeformConv2d, deform_conv2d\nfrom torch.nn.modules.utils import _pair\nfrom torch.nn import init as init\nOPS = {\n 'none': lambda in_channels, out_channels: None_(),\n 'skip_connect' :lambda in_channels, out_channels: Skip_(),\n 'TD' : lambda in_channels, out_channels: Top_down(in_channels, out_channels),\n 'BU' : lambda in_channels, out_channels: Bottom_up(in_channels, out_channels),\n 'FS' : lambda in_channels, out_channels: Fuse_split(in_channels, out_channels),\n 'SE': lambda in_channels, out_channels: Scale_equalize(in_channels, out_channels),\n\n}\n\n\nclass Top_down(nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n ):\n super(Top_down, self).__init__()\n self.tdm_convs = nn.ModuleList()\n for i in range(4):\n tdm_conv = deform_conv(\n in_channels,\n out_channels,\n 3,\n padding=1)\n self.tdm_convs.append(tdm_conv)\n\n\n def forward(self, inputs):\n # build top-down path\n\n topdown = []\n topdownconv = self.tdm_convs[-1](1, inputs[-1])\n if topdownconv.shape[2:] != inputs[-1].shape:\n topdownconv = F.interpolate(topdownconv, size=inputs[-1].shape[2:], mode='nearest')\n\n topdown.append(topdownconv)\n for i in range(3, 0, -1):\n temp = self.tdm_convs[i - 1](i - 1, inputs[i - 1] + F.interpolate(\n topdownconv.clone(), size=inputs[i - 1].shape[2:], mode='nearest'))\n topdown.insert(0, temp)\n topdownconv = temp\n return topdown\n\n\n\n\n\nclass Bottom_up(nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n ):\n super(Bottom_up, self).__init__()\n self.bun_convs = nn.ModuleList()\n for i in range(4):\n bun_conv = deform_conv(\n in_channels,\n out_channels,\n 3,\n padding=1,\n )\n self.bun_convs.append(bun_conv)\n\n\n def forward(self, inputs):\n # build bottom-up path\n\n botomup = []\n for i in range(4):\n if i == 0:\n bum = inputs[0]\n elif i == 3:\n bb = F.max_pool2d(botomup[-1].clone(), 2, stride=2)\n if bb.shape[2:] != inputs[-1].shape[2:]:\n bb = F.interpolate(\n bb, size=inputs[-1].shape[2:], mode='nearest')\n bum = bb + inputs[-1]\n else:\n bum = inputs[i] + F.max_pool2d(botomup[i - 1].clone(), 2, stride=2)\n\n botomup.append(self.bun_convs[i](i, bum))\n\n return botomup\n\n\n\n\nclass Fuse_split(nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n ):\n super(Fuse_split, self).__init__()\n self.fuse = nn.ModuleList([deform_conv(\n out_channels * 2,\n out_channels,\n 3,\n padding=1\n )] * 2)\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n\n\n def forward(self, inputs):\n # build fusing-splitting path\n\n fussplit = []\n fuse1 = inputs[1] + F.max_pool2d(inputs[0], 2, stride=2)\n fuse2 = F.interpolate(\n inputs[-1], size=inputs[2].shape[2:], mode='nearest') + inputs[2]\n fuseconv1 = self.fuse[0](1, torch.cat([fuse1.clone(), F.interpolate(\n fuse2.clone(), size=fuse1.shape[2:], mode='nearest')], 1))\n fuseconv2 = self.fuse[1](1, torch.cat([F.max_pool2d(fuse1.clone(), 2, stride=2), fuse2.clone()], 1))\n\n fussplit.append(F.interpolate(\n fuseconv1.clone(), size=inputs[0].shape[2:], mode='nearest'))\n fussplit.append(fuseconv1)\n fussplit.append(fuseconv2)\n fussplit.append(F.max_pool2d(fuseconv2.clone(), 2, stride=2, ceil_mode=False))\n if fussplit[-1].shape[2:] != inputs[-1].shape[2:]:\n fussplit[-1] = F.interpolate(fussplit[-1].clone(), size=inputs[-1].shape[2:], mode='nearest')\n return fussplit\n\n\n\n\nclass None_(nn.Module):\n def __init__(self,\n ):\n super(None_, self).__init__()\n\n self.size =0\n self.fp = 0\n def forward(self, inputs):\n\n outs = []\n for x in inputs:\n outs.append(x.new_zeros(x.shape))\n return outs\n\nclass Skip_(nn.Module):\n def __init__(self):\n super(Skip_, self).__init__()\n\n self.size = 0\n self.fp = 0\n def forward(self, inputs):\n return inputs\n\nclass Scale_equalize(nn.Module):\n def __init__(\n self,\n in_channels=256,\n out_channels=256,\n kernel_size=[3, 3, 3],\n dilation=[1, 1, 1],\n groups=[1, 1, 1],\n\n ):\n super(Scale_equalize, self).__init__()\n\n self.Pconv = nn.ModuleList()\n self.Pconv.append(\n deform_conv(in_channels,\n out_channels,\n kernel_size=kernel_size[0],\n dilation=dilation[0],\n groups=groups[0],\n padding=(kernel_size[0] + (dilation[0] - 1) * 2) // 2))\n self.Pconv.append(\n deform_conv(in_channels,\n out_channels,\n kernel_size=kernel_size[1],\n dilation=dilation[1],\n groups=groups[1],\n padding=(kernel_size[1] + (dilation[1] - 1) * 2) // 2))\n\n self.Pconv.append(\n deform_conv(in_channels,\n out_channels,\n kernel_size=kernel_size[2],\n dilation=dilation[2],\n groups=groups[2],\n padding=(kernel_size[2] + (dilation[2] - 1) * 2) // 2,\n stride=2))\n\n self.relu = nn.ReLU()\n self.init_weights()\n\n\n\n def init_weights(self):\n for m in self.Pconv:\n init.normal_(m.weight.data, 0, 0.01)\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, x):\n next_x = []\n for level, feature in enumerate(x):\n\n temp_fea = self.Pconv[1](level, feature)\n if level > 0:\n temp_fea += self.Pconv[2](level, x[level - 1])\n if level < len(x) - 1:\n temp_fea += F.interpolate(\n self.Pconv[0](level, x[level + 1]),\n size=temp_fea.size()[2:], mode='nearest'\n )\n next_x.append(temp_fea)\n\n next_x = [self.relu(item) for item in next_x]\n return next_x\n\n\n\n\nclass deform_conv(DeformConv2d):\n def __init__(self, *args, **kwargs,):\n super(deform_conv, self).__init__( *args, **kwargs)\n self.conv_offset = nn.Conv2d(\n self.in_channels,\n self.deform_groups * 2 * self.kernel_size[0] *\n self.kernel_size[1],\n kernel_size=self.kernel_size,\n stride=_pair(self.stride),\n padding=_pair(self.padding),\n bias=True)\n self.init_offset()\n\n self.bias = nn.Parameter(torch.zeros(self.out_channels))\n self.start_level = 1\n\n def init_offset(self):\n\n self.conv_offset.weight.data.zero_()\n self.conv_offset.bias.data.zero_()\n\n def forward(self, i, x):\n\n if i < self.start_level:\n return torch.nn.functional.conv2d(x, self.weight, bias=self.bias, stride=self.stride,\n padding=self.padding,\n dilation=self.dilation, groups=self.groups)\n\n offset = self.conv_offset(x)\n return deform_conv2d(x, offset, self.weight, self.stride, self.padding,\n self.dilation, self.groups, self.deform_groups) + self.bias.unsqueeze(0).unsqueeze(\n -1).unsqueeze(-1)\n\n\n\n" ]
[ [ "torch.zeros", "torch.nn.ModuleList", "torch.nn.functional.conv2d", "torch.nn.init.normal_", "torch.nn.functional.interpolate", "torch.nn.modules.utils._pair", "torch.nn.ReLU", "torch.nn.functional.max_pool2d" ] ]
Yuzz1020/ViT-pytorch
[ "702b4eedd51a14f2194ebbd84b3e43242d30c229" ]
[ "train.py" ]
[ "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nimport argparse\nimport os\nimport random\nimport numpy as np\n\nfrom datetime import timedelta\n\nimport torch\nimport torch.distributed as dist\n\nfrom tqdm import tqdm\nfrom torch.utils.tensorboard import SummaryWriter\nfrom apex import amp\nfrom apex.parallel import DistributedDataParallel as DDP\n\nfrom models.modeling import VisionTransformer, CONFIGS\nfrom utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule\nfrom utils.data_utils import get_loader\nfrom utils.dist_util import get_world_size\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\n\ndef save_model(args, model):\n model_to_save = model.module if hasattr(model, 'module') else model\n model_checkpoint = os.path.join(args.output_dir, \"%s_checkpoint.bin\" % args.name)\n torch.save(model_to_save.state_dict(), model_checkpoint)\n logger.info(\"Saved model checkpoint to [DIR: %s]\", args.output_dir)\n\n\ndef setup(args):\n # Prepare model\n config = CONFIGS[args.model_type]\n\n num_classes = 10 if args.dataset == \"cifar10\" else 100\n\n model = VisionTransformer(config, args.img_size, zero_head=True, num_classes=num_classes, target_flops=config.target_flops_ratio)\n model.load_from(np.load(args.pretrained_dir))\n model.to(args.device)\n num_params = count_parameters(model)\n\n logger.info(\"{}\".format(config))\n logger.info(\"Training parameters %s\", args)\n logger.info(\"Total Parameter: \\t%2.1fM\" % num_params)\n print(num_params)\n return args, model, config\n\n\ndef count_parameters(model):\n params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n return params/1000000\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef valid(args, model, writer, test_loader, global_step):\n # Validation!\n eval_losses = AverageMeter()\n\n logger.info(\"***** Running Validation *****\")\n logger.info(\" Num steps = %d\", len(test_loader))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n model.eval()\n all_preds, all_label = [], []\n epoch_iterator = tqdm(test_loader,\n desc=\"Validating... (loss=X.X)\",\n bar_format=\"{l_bar}{r_bar}\",\n dynamic_ncols=True,\n disable=args.local_rank not in [-1, 0])\n loss_fct = torch.nn.CrossEntropyLoss()\n for step, batch in enumerate(epoch_iterator):\n batch = tuple(t.to(args.device) for t in batch)\n x, y = batch\n with torch.no_grad():\n logits = model(x)[0]\n\n eval_loss = loss_fct(logits, y)\n eval_losses.update(eval_loss.item())\n\n preds = torch.argmax(logits, dim=-1)\n\n if len(all_preds) == 0:\n all_preds.append(preds.detach().cpu().numpy())\n all_label.append(y.detach().cpu().numpy())\n else:\n all_preds[0] = np.append(\n all_preds[0], preds.detach().cpu().numpy(), axis=0\n )\n all_label[0] = np.append(\n all_label[0], y.detach().cpu().numpy(), axis=0\n )\n epoch_iterator.set_description(\"Validating... (loss=%2.5f)\" % eval_losses.val)\n\n all_preds, all_label = all_preds[0], all_label[0]\n accuracy = simple_accuracy(all_preds, all_label)\n\n logger.info(\"\\n\")\n logger.info(\"Validation Results\")\n logger.info(\"Global Steps: %d\" % global_step)\n logger.info(\"Valid Loss: %2.5f\" % eval_losses.avg)\n logger.info(\"Valid Accuracy: %2.5f\" % accuracy)\n\n writer.add_scalar(\"test/accuracy\", scalar_value=accuracy, global_step=global_step)\n return accuracy\n\n\ndef train(args, model, config):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir, exist_ok=True)\n writer = SummaryWriter(log_dir=os.path.join(\"logs\", args.name))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n # Prepare dataset\n train_loader, test_loader = get_loader(args)\n\n # Prepare optimizer and scheduler\n optimizer = torch.optim.SGD(model.parameters(),\n lr=args.learning_rate,\n momentum=0.9,\n weight_decay=args.weight_decay)\n t_total = args.num_steps\n if args.decay_type == \"cosine\":\n scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)\n else:\n scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)\n\n if args.fp16:\n model, optimizer = amp.initialize(models=model,\n optimizers=optimizer,\n opt_level=args.fp16_opt_level)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Distributed training\n if args.local_rank != -1:\n model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size())\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Total optimization steps = %d\", args.num_steps)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps * (\n torch.distributed.get_world_size() if args.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n\n model.zero_grad()\n set_seed(args) # Added here for reproducibility (even between python 2 and 3)\n losses = AverageMeter()\n global_step, best_acc = 0, 0\n while True:\n model.train()\n epoch_iterator = tqdm(train_loader,\n desc=\"Training (X / X Steps) (loss=X.X)\",\n bar_format=\"{l_bar}{r_bar}\",\n dynamic_ncols=True,\n disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n batch = tuple(t.to(args.device) for t in batch)\n x, y = batch\n loss = model(x, y, fix_bit=config.fix_bit)\n\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n if (step + 1) % args.gradient_accumulation_steps == 0:\n losses.update(loss.item()*args.gradient_accumulation_steps)\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n scheduler.step()\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n epoch_iterator.set_description(\n \"Training (%d / %d Steps) (loss=%2.5f)\" % (global_step, t_total, losses.val)\n )\n if args.local_rank in [-1, 0]:\n writer.add_scalar(\"train/loss\", scalar_value=losses.val, global_step=global_step)\n writer.add_scalar(\"train/lr\", scalar_value=scheduler.get_lr()[0], global_step=global_step)\n if global_step % args.eval_every == 0 and args.local_rank in [-1, 0]:\n accuracy = valid(args, model, writer, test_loader, global_step)\n if best_acc < accuracy:\n save_model(args, model)\n best_acc = accuracy\n model.train()\n\n if global_step % t_total == 0:\n break\n losses.reset()\n if global_step % t_total == 0:\n break\n\n if args.local_rank in [-1, 0]:\n writer.close()\n logger.info(\"Best Accuracy: \\t%f\" % best_acc)\n logger.info(\"End Training!\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\"--name\", required=True,\n help=\"Name of this run. Used for monitoring.\")\n parser.add_argument(\"--dataset\", choices=[\"cifar10\", \"cifar100\"], default=\"cifar10\",\n help=\"Which downstream task.\")\n parser.add_argument(\"--model_type\", choices=[\"ViT-B_16\", \"ViT-B_32\", \"ViT-L_16\",\n \"ViT-L_32\", \"ViT-H_14\", \"R50-ViT-B_16\",\n \"ViT-L_32_8B\",\"ViT-B_16_8B\"],\n default=\"ViT-B_16_8B\",\n help=\"Which variant to use.\")\n parser.add_argument(\"--pretrained_dir\", type=str, default=\"checkpoint/ViT-B_16.npz\",\n help=\"Where to search for pretrained ViT models.\")\n parser.add_argument(\"--output_dir\", default=\"output\", type=str,\n help=\"The output directory where checkpoints will be written.\")\n\n parser.add_argument(\"--img_size\", default=224, type=int,\n help=\"Resolution size\")\n parser.add_argument(\"--train_batch_size\", default=512, type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\", default=64, type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--eval_every\", default=100, type=int,\n help=\"Run prediction on validation set every so many steps.\"\n \"Will always run one evaluation at the end of training.\")\n\n parser.add_argument(\"--learning_rate\", default=3e-2, type=float,\n help=\"The initial learning rate for SGD.\")\n parser.add_argument(\"--weight_decay\", default=0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--num_steps\", default=10000, type=int,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--decay_type\", choices=[\"cosine\", \"linear\"], default=\"cosine\",\n help=\"How to decay the learning rate.\")\n parser.add_argument(\"--warmup_steps\", default=500, type=int,\n help=\"Step of training to perform learning rate warmup for.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--fp16_opt_level', type=str, default='O2',\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\")\n parser.add_argument('--loss_scale', type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n args = parser.parse_args()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl',\n timeout=timedelta(minutes=60))\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\" %\n (args.local_rank, args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))\n\n # Set seed\n set_seed(args)\n\n # Model & Tokenizer Setup\n args, model, config = setup(args)\n\n # Training\n train(args, model, config)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "numpy.random.seed", "torch.cuda.set_device", "torch.manual_seed", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "numpy.load", "torch.cuda.device_count", "torch.distributed.get_world_size", "torch.argmax" ] ]
leiyangleon/RAiDER
[ "40c083a23ded02470939318daba36f3c2a25e52b" ]
[ "tools/RAiDER/models/weatherModel.py" ]
[ "import datetime\nimport os\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\nimport netCDF4\nimport rasterio\nfrom shapely.geometry import box\n\nfrom RAiDER.constants import _ZREF, _ZMIN, _g0\nfrom RAiDER import utilFcns as util\nfrom RAiDER.interpolate import interpolate_along_axis\nfrom RAiDER.interpolator import fillna3D\nfrom RAiDER.logger import logger\nfrom RAiDER.models import plotWeather as plots, weatherModel\nfrom RAiDER.utilFcns import robmax, robmin, write2NETCDF4core\n\n\nclass WeatherModel(ABC):\n '''\n Implement a generic weather model for getting estimated SAR delays\n '''\n\n def __init__(self):\n # Initialize model-specific constants/parameters\n self._k1 = None\n self._k2 = None\n self._k3 = None\n self._humidityType = 'q'\n self._a = []\n self._b = []\n\n self.files = None\n\n self._lon_res = None\n self._lat_res = None\n self._x_res = None\n self._y_res = None\n\n self._classname = None\n self._dataset = None\n self._model_level_type = 'ml'\n self._valid_range = (\n datetime.date(1900, 1, 1),\n ) # Tuple of min/max years where data is available.\n self._lag_time = datetime.timedelta(days=30) # Availability lag time in days\n self._time = None\n self._bbox = None\n\n # Define fixed constants\n self._R_v = 461.524\n self._R_d = 287.053\n self._g0 = _g0 # gravity constant\n self._zmin = _ZMIN # minimum integration height\n self._zmax = _ZREF # max integration height\n self._proj = None\n\n # setup data structures\n self._levels = []\n self._xs = np.empty((1, 1, 1)) # Use generic x/y/z instead of lon/lat/height\n self._ys = np.empty((1, 1, 1))\n self._zs = np.empty((1, 1, 1))\n\n self._lats = None\n self._lons = None\n self._ll_bounds = None\n\n self._p = None\n self._q = None\n self._rh = None\n self._t = None\n self._e = None\n self._wet_refractivity = None\n self._hydrostatic_refractivity = None\n self._wet_ztd = None\n self._hydrostatic_ztd = None\n self._svp = None\n\n def __str__(self):\n string = '\\n'\n string += '======Weather Model class object=====\\n'\n string += 'Weather model time: {}\\n'.format(self._time)\n string += 'Latitude resolution: {}\\n'.format(self._lat_res)\n string += 'Longitude resolution: {}\\n'.format(self._lon_res)\n string += 'Native projection: {}\\n'.format(self._proj)\n string += 'ZMIN: {}\\n'.format(self._zmin)\n string += 'ZMAX: {}\\n'.format(self._zmax)\n string += 'k1 = {}\\n'.format(self._k1)\n string += 'k2 = {}\\n'.format(self._k2)\n string += 'k3 = {}\\n'.format(self._k3)\n string += 'Humidity type = {}\\n'.format(self._humidityType)\n string += '=====================================\\n'\n string += 'Class name: {}\\n'.format(self._classname)\n string += 'Dataset: {}\\n'.format(self._dataset)\n string += '=====================================\\n'\n string += 'A: {}\\n'.format(self._a)\n string += 'B: {}\\n'.format(self._b)\n if self._p is not None:\n string += 'Number of points in Lon/Lat = {}/{}\\n'.format(*self._p.shape[:2])\n string += 'Total number of grid points (3D): {}\\n'.format(np.prod(self._p.shape))\n if self._xs.size == 0:\n string += 'Minimum/Maximum y: {: 4.2f}/{: 4.2f}\\n'\\\n .format(robmin(self._ys), robmax(self._ys))\n string += 'Minimum/Maximum x: {: 4.2f}/{: 4.2f}\\n'\\\n .format(robmin(self._xs), robmax(self._xs))\n string += 'Minimum/Maximum zs/heights: {: 10.2f}/{: 10.2f}\\n'\\\n .format(robmin(self._zs), robmax(self._zs))\n string += '=====================================\\n'\n return str(string)\n\n def Model(self):\n return self._Name\n\n def fetch(self, out, lats, lons, time):\n '''\n Checks the input datetime against the valid date range for the model and then\n calls the model _fetch routine\n '''\n self.checkTime(time)\n lats, lons = self.checkLL(lats, lons)\n\n self._time = time\n self._fetch(lats, lons, time, out)\n\n @abstractmethod\n def _fetch(self, lats, lons, time, out):\n '''\n Placeholder method. Should be implemented in each weather model type class\n '''\n pass\n\n def setTime(self, time, fmt='%Y-%m-%dT%H:%M:%S'):\n ''' Set the time for a weather model '''\n if isinstance(time, str):\n self._time = datetime.datetime.strptime(time, fmt)\n elif isinstance(time, datetime.datetime):\n self._time = time\n else:\n raise ValueError('\"time\" must be a string or a datetime object')\n\n def checkLL(self, lats, lons, Nextra=2):\n '''\n Need to correct lat/lon bounds because not all of the weather models have valid\n data exactly bounded by -90/90 (lats) and -180/180 (lons); for GMAO and MERRA2,\n need to adjust the longitude higher end with an extra buffer; for other models,\n the exact bounds are close to -90/90 (lats) and -180/180 (lons) and thus can be\n rounded to the above regions (either in the downloading-file API or subsetting-\n data API) without problems.\n '''\n if self._Name is 'GMAO' or self._Name is 'MERRA2':\n ex_buffer_lon_max = self._lon_res\n else:\n ex_buffer_lon_max = 0.0\n\n # These are generalized for potential extra buffer in future models\n ex_buffer_lat_min = 0.0\n ex_buffer_lat_max = 0.0\n ex_buffer_lon_min = 0.0\n\n # At boundary lats and lons, need to modify Nextra buffer so that the lats and lons do not exceed the boundary\n lats[lats < (-90.0 + Nextra * self._lat_res + ex_buffer_lat_min)] = (-90.0 + Nextra * self._lat_res + ex_buffer_lat_min)\n lats[lats > (90.0 - Nextra * self._lat_res - ex_buffer_lat_max)] = (90.0 - Nextra * self._lat_res - ex_buffer_lat_max)\n lons[lons < (-180.0 + Nextra * self._lon_res + ex_buffer_lon_min)] = (-180.0 + Nextra * self._lon_res + ex_buffer_lon_min)\n lons[lons > (180.0 - Nextra * self._lon_res - ex_buffer_lon_max)] = (180.0 - Nextra * self._lon_res - ex_buffer_lon_max)\n\n return lats, lons\n\n def load(\n self,\n outLoc,\n *args,\n outLats=None,\n outLons=None,\n _zlevels=None,\n zref=_ZREF,\n **kwargs\n ):\n '''\n Calls the load_weather method. Each model class should define a load_weather\n method appropriate for that class. 'args' should be one or more filenames.\n '''\n # If the weather file has already been processed, do nothing\n self._out_name = self.out_file(outLoc, lats=outLats, lons=outLons)\n if self.checkWeatherExists(self._out_name):\n return self._out_name\n else:\n exists_flag = False\n\n # Compute the bounds of the query points\n self._ll_bounds = self._get_ll_bounds(\n lats=outLats,\n lons=outLons,\n Nextra=2\n )\n\n # Load the weather just for the query points\n self.load_weather(*args, **kwargs)\n\n # Process the weather model data\n self._find_e()\n self._checkNotMaskedArrays()\n self._uniform_in_z(_zlevels=_zlevels)\n self._checkForNans()\n self._get_wet_refractivity()\n self._get_hydro_refractivity()\n self._adjust_grid(lats=outLats, lons=outLons)\n\n # Compute Zenith delays at the weather model grid nodes\n self._getZTD(zref)\n return None\n\n @abstractmethod\n def load_weather(self, *args, **kwargs):\n '''\n Placeholder method. Should be implemented in each weather model type class\n '''\n pass\n\n def checkWeatherExists(self, pathname):\n ''' Check whether or not the weather model has already been processed '''\n if os.path.exists(pathname):\n return True\n else:\n return False\n\n def _get_time(self, filename=None):\n if filename is None:\n filename = self.files[0]\n with netCDF4.Dataset(filename, mode='r') as f:\n time = f.attrs['datetime'].copy()\n self.time = datetime.datetime.strptime(time, \"%Y_%m_%dT%H_%M_%S\")\n\n def plot(self, plotType='pqt', savefig=True):\n '''\n Plotting method. Valid plot types are 'pqt'\n '''\n if plotType == 'pqt':\n plot = plots.plot_pqt(self, savefig)\n elif plotType == 'wh':\n plot = plots.plot_wh(self, savefig)\n else:\n raise RuntimeError('WeatherModel.plot: No plotType named {}'.format(plotType))\n return plot\n\n def checkTime(self, time):\n '''\n Checks the time against the lag time and valid date range for the given model type\n '''\n logger.info(\n 'Weather model %s is available from %s-%s',\n self.Model(), self._valid_range[0], self._valid_range[1]\n )\n if time < self._valid_range[0]:\n raise RuntimeError(\"Weather model {} is not available at {}\".format(self.Model(), time))\n if self._valid_range[1] is not None:\n if self._valid_range[1] == 'Present':\n pass\n elif self._valid_range[1] < time:\n raise RuntimeError(\"Weather model {} is not available at {}\".format(self.Model(), time))\n if time > datetime.datetime.utcnow() - self._lag_time:\n raise RuntimeError(\"Weather model {} is not available at {}\".format(self.Model(), time))\n\n def _convertmb2Pa(self, pres):\n '''\n Convert pressure in millibars to Pascals\n '''\n return 100 * pres\n\n def _get_heights(self, lats, geo_hgt, geo_ht_fill=np.nan):\n '''\n Transform geo heights to actual heights\n '''\n geo_ht_fix = np.where(geo_hgt != geo_ht_fill, geo_hgt, np.nan)\n self._zs = util._geo_to_ht(lats, geo_ht_fix, self._g0)\n\n def _find_e(self):\n \"\"\"Check the type of e-calculation needed\"\"\"\n if self._humidityType == 'rh':\n self._find_e_from_rh()\n elif self._humidityType == 'q':\n self._find_e_from_q()\n else:\n raise RuntimeError('Not a valid humidity type')\n self._rh = None\n self._q = None\n\n def _find_e_from_q(self):\n \"\"\"Calculate e, partial pressure of water vapor.\"\"\"\n self._find_svp()\n # We have q = w/(w + 1), so w = q/(1 - q)\n w = self._q / (1 - self._q)\n self._e = w * self._R_v * (self._p - self._svp) / self._R_d\n\n def _find_e_from_rh(self):\n \"\"\"Calculate partial pressure of water vapor.\"\"\"\n self._find_svp()\n self._e = self._rh / 100 * self._svp\n\n def _get_wet_refractivity(self):\n '''\n Calculate the wet delay from pressure, temperature, and e\n '''\n self._wet_refractivity = self._k2 * self._e / self._t + self._k3 * self._e / self._t**2\n\n def _get_hydro_refractivity(self):\n '''\n Calculate the hydrostatic delay from pressure and temperature\n '''\n self._hydrostatic_refractivity = self._k1 * self._p / self._t\n\n def getWetRefractivity(self):\n return self._wet_refractivity\n\n def getHydroRefractivity(self):\n return self._hydrostatic_refractivity\n\n def _adjust_grid(self, lats=None, lons=None):\n '''\n This function pads the weather grid with a level at self._zmin, if\n it does not already go that low.\n <<The functionality below has been removed.>>\n <<It also removes levels that are above self._zmax, since they are not needed.>>\n '''\n\n if self._zmin < np.nanmin(self._zs):\n # first add in a new layer at zmin\n self._zs = np.insert(self._zs, 0, self._zmin)\n\n self._lons = np.concatenate((self._lons[:, :, 0][..., np.newaxis], self._lons), axis=2)\n self._lats = np.concatenate((self._lats[:, :, 0][..., np.newaxis], self._lats), axis=2)\n\n self._p = util.padLower(self._p)\n self._t = util.padLower(self._t)\n self._e = util.padLower(self._e)\n self._wet_refractivity = util.padLower(self._wet_refractivity)\n self._hydrostatic_refractivity = util.padLower(self._hydrostatic_refractivity)\n if lats is not None:\n in_extent = self._getExtent(lats, lons)\n self._trimExtent(in_extent)\n\n def _getZTD(self, zref=None):\n '''\n Compute the full slant tropospheric delay for each weather model grid node, using the reference\n height zref\n '''\n if zref is None:\n zref = self._zmax\n\n hgts = np.tile(self._zs.copy(), self._lats.shape[:2] + (1,))\n wet = self.getWetRefractivity()\n hydro = self.getHydroRefractivity()\n\n # Get the integrated ZTD\n wet_total, hydro_total = np.zeros(wet.shape), np.zeros(hydro.shape)\n for level in range(wet.shape[2]):\n wet_total[..., level] = 1e-6 * np.trapz(\n wet[..., level:], x=self._zs[level:], axis=2\n )\n hydro_total[..., level] = 1e-6 * np.trapz(\n hydro[..., level:], x=self._zs[level:], axis=2\n )\n self._hydrostatic_ztd = hydro_total\n self._wet_ztd = wet_total\n\n def _getExtent(self, lats, lons):\n '''\n get the bounding box around a set of lats/lons\n '''\n if (lats.size == 1) & (lons.size == 1):\n return [lats - self._lat_res, lats + self._lat_res, lons - self._lon_res, lons + self._lon_res]\n elif (lats.size > 1) & (lons.size > 1):\n return [np.nanmin(lats), np.nanmax(lats), np.nanmin(lons), np.nanmax(lons)]\n elif lats.size == 1:\n return [lats - self._lat_res, lats + self._lat_res, np.nanmin(lons), np.nanmax(lons)]\n elif lons.size == 1:\n return [np.nanmin(lats), np.nanmax(lats), lons - self._lon_res, lons + self._lon_res]\n else:\n raise RuntimeError('Not a valid lat/lon shape')\n\n @property\n def bbox(self) -> list:\n \"\"\"\n Obtains the bounding box of the weather model in lat/lon CRS.\n\n Returns\n -------\n list\n xmin, ymin, xmax, ymax\n\n Raises\n ------\n ValueError\n When `self.files` is None.\n \"\"\"\n if self._bbox is None:\n if self.files is None:\n raise ValueError('Need to save weather model as netcdf')\n weather_model_path = self.files[0]\n with rasterio.open(f'netcdf:{weather_model_path}') as ds:\n datasets = ds.subdatasets\n\n with rasterio.open(datasets[0]) as ds:\n bounds = ds.bounds\n\n xmin, ymin, xmax, ymax = tuple(bounds)\n self._bbox = [xmin, ymin, xmax, ymax]\n\n return self._bbox\n\n def checkContainment(self: weatherModel,\n outLats: np.ndarray,\n outLons: np.ndarray) -> bool:\n \"\"\"\"\n Checks containment of weather model bbox of outLats and outLons\n provided.\n\n Parameters\n ----------\n weather_model : weatherModel\n outLats : np.ndarray\n An array of latitude points\n outLons : np.ndarray\n An array of longitude points\n\n Returns\n -------\n bool\n True if weather model contains bounding box of OutLats and outLons\n and False otherwise.\n \"\"\"\n xmin_input, xmax_input = np.min(outLons), np.max(outLons)\n ymin_input, ymax_input = np.min(outLats), np.max(outLats)\n input_box = box(xmin_input, ymin_input, xmax_input, ymax_input)\n\n xmin, ymin, xmax, ymax = self.bbox\n weather_model_box = box(xmin, ymin, xmax, ymax)\n\n # Logger\n input_box_str = [f'{x:1.2f}' for x in [xmin_input, ymin_input,\n xmax_input, ymax_input]]\n weath_box_str = [f'{x:1.2f}' for x in [xmin, ymin, xmax, ymax]]\n\n weath_box_str = ', '.join(weath_box_str)\n input_box_str = ', '.join(input_box_str)\n\n logger.info(f'Extent of the weather model lats/lons is:'\n f'{weath_box_str}')\n logger.info(f'Extent of the input lats/lons is: '\n f'{input_box_str}')\n\n return weather_model_box.contains(input_box)\n\n def _isOutside(self, extent1, extent2):\n '''\n Determine whether any of extent1 lies outside extent2\n extent1/2 should be a list containing [lower_lat, upper_lat, left_lon, right_lon]\n '''\n t1 = extent1[0] < extent2[0]\n t2 = extent1[1] > extent2[1]\n t3 = extent1[2] < extent2[2]\n t4 = extent1[3] > extent2[3]\n if np.any([t1, t2, t3, t4]):\n return True\n return False\n\n def _trimExtent(self, extent):\n '''\n get the bounding box around a set of lats/lons\n '''\n mask = (self._lats[:, :, 0] > extent[0]) & (self._lats[:, :, 0] < extent[1]) & \\\n (self._lons[:, :, 0] > extent[2]) & (self._lons[:, :, 0] < extent[3])\n ma1 = np.sum(mask, axis=1).astype('bool')\n ma2 = np.sum(mask, axis=0).astype('bool')\n if np.sum(ma1) == 0 and np.sum(ma2) == 0:\n # Don't need to remove any points\n return\n\n # indices of the part of the grid to keep\n ny, nx, nz = self._p.shape\n index1 = max(np.arange(len(ma1))[ma1][0] - 2, 0)\n index2 = min(np.arange(len(ma1))[ma1][-1] + 2, ny)\n index3 = max(np.arange(len(ma2))[ma2][0] - 2, 0)\n index4 = min(np.arange(len(ma2))[ma2][-1] + 2, nx)\n\n # subset around points of interest\n self._lons = self._lons[index1:index2, index3:index4, :]\n self._lats = self._lats[index1:index2, index3:index4, ...]\n self._xs = self._xs[index3:index4]\n self._ys = self._ys[index1:index2]\n self._p = self._p[index1:index2, index3:index4, ...]\n self._t = self._t[index1:index2, index3:index4, ...]\n self._e = self._e[index1:index2, index3:index4, ...]\n\n self._wet_refractivity = self._wet_refractivity[index1:index2, index3:index4, ...]\n self._hydrostatic_refractivity = self._hydrostatic_refractivity[index1:index2, index3:index4, :]\n\n def _find_svp(self):\n \"\"\"\n Calculate standard vapor presure. Should be model-specific\n \"\"\"\n # From TRAIN:\n # Could not find the wrf used equation as they appear to be\n # mixed with latent heat etc. Istead I used the equations used\n # in ERA-I (see IFS documentation part 2: Data assimilation\n # (CY25R1)). Calculate saturated water vapour pressure (svp) for\n # water (svpw) using Buck 1881 and for ice (swpi) from Alduchow\n # and Eskridge (1996) euation AERKi\n\n # TODO: figure out the sources of all these magic numbers and move\n # them somewhere more visible.\n # TODO: (Jeremy) - Need to fix/get the equation for the other\n # weather model types. Right now this will be used for all models,\n # except WRF, which is yet to be implemented in my new structure.\n t1 = 273.15 # O Celsius\n t2 = 250.15 # -23 Celsius\n\n tref = self._t - t1\n wgt = (self._t - t2) / (t1 - t2)\n svpw = (6.1121 * np.exp((17.502 * tref) / (240.97 + tref)))\n svpi = (6.1121 * np.exp((22.587 * tref) / (273.86 + tref)))\n\n svp = svpi + (svpw - svpi) * wgt**2\n ix_bound1 = self._t > t1\n svp[ix_bound1] = svpw[ix_bound1]\n ix_bound2 = self._t < t2\n svp[ix_bound2] = svpi[ix_bound2]\n\n self._svp = svp * 100\n\n def _calculategeoh(self, z, lnsp):\n '''\n Function to calculate pressure, geopotential, and geopotential height\n from the surface pressure and model levels provided by a weather model.\n The model levels are numbered from the highest eleveation to the lowest.\n Inputs:\n self - weather model object with parameters a, b defined\n z - 3-D array of surface heights for the location(s) of interest\n lnsp - log of the surface pressure\n Outputs:\n geopotential - The geopotential in units of height times acceleration\n pressurelvs - The pressure at each of the model levels for each of\n the input points\n geoheight - The geopotential heights\n '''\n geopotential = np.zeros_like(self._t)\n pressurelvs = np.zeros_like(geopotential)\n geoheight = np.zeros_like(geopotential)\n\n # surface pressure: pressure at the surface!\n # Note that we integrate from the ground up, so from the largest model level to 0\n sp = np.exp(lnsp)\n\n # t should be structured [z, y, x]\n levelSize = self._levels\n\n if len(self._a) != levelSize + 1 or len(self._b) != levelSize + 1:\n raise ValueError(\n 'I have here a model with {} levels, but parameters a '.format(levelSize) +\n 'and b have lengths {} and {} respectively. Of '.format(len(self._a), len(self._b)) +\n 'course, these three numbers should be equal.')\n\n Ph_levplusone = self._a[levelSize] + (self._b[levelSize] * sp)\n\n # Integrate up into the atmosphere from *lowest level*\n z_h = 0 # initial value\n for lev, t_level, q_level in zip(\n range(levelSize, 0, -1), self._t[::-1], self._q[::-1]):\n\n # lev is the level number 1-60, we need a corresponding index\n # into ts and qs\n # ilevel = levelSize - lev # << this was Ray's original, but is a typo\n # because indexing like that results in pressure and height arrays that\n # are in the opposite orientation to the t/q arrays.\n ilevel = lev - 1\n\n # compute moist temperature\n t_level = t_level * (1 + 0.609133 * q_level)\n\n # compute the pressures (on half-levels)\n Ph_lev = self._a[lev - 1] + (self._b[lev - 1] * sp)\n\n pressurelvs[ilevel] = Ph_lev\n\n if lev == 1:\n dlogP = np.log(Ph_levplusone / 0.1)\n alpha = np.log(2)\n else:\n dlogP = np.log(Ph_levplusone / Ph_lev)\n dP = Ph_levplusone - Ph_lev\n alpha = 1 - ((Ph_lev / dP) * dlogP)\n\n TRd = t_level * self._R_d\n\n # z_f is the geopotential of this full level\n # integrate from previous (lower) half-level z_h to the full level\n z_f = z_h + TRd * alpha\n # geoheight[ilevel] = z_f/self._g0\n\n # Geopotential (add in surface geopotential)\n geopotential[ilevel] = z_f + z\n geoheight[ilevel] = geopotential[ilevel] / self._g0\n\n # z_h is the geopotential of 'half-levels'\n # integrate z_h to next half level\n z_h += TRd * dlogP\n\n Ph_levplusone = Ph_lev\n\n return geopotential, pressurelvs, geoheight\n\n def _get_ll_bounds(self, lats=None, lons=None, Nextra=2):\n '''\n returns the extents of lat/lon plus a buffer\n '''\n if lats is None:\n lats = self._lats\n lons = self._lons\n\n lat_min = np.nanmin(lats) - Nextra * self._lat_res\n lat_max = np.nanmax(lats) + Nextra * self._lat_res\n lon_min = np.nanmin(lons) - Nextra * self._lon_res\n lon_max = np.nanmax(lons) + Nextra * self._lon_res\n\n return lat_min, lat_max, lon_min, lon_max\n\n def getProjection(self):\n '''\n Returns the native weather projection, which should be a pyproj object\n '''\n return self._proj\n\n def getPoints(self):\n return self._xs.copy(), self._ys.copy(), self._zs.copy()\n\n def getXY_gdal(self, filename):\n '''\n Pull the grid info (x,y) from a gdal-readable file\n '''\n from osgeo import gdal\n ds = gdal.Open(filename, gdal.GA_ReadOnly)\n xSize, ySize = ds.RasterXSize, ds.RasterYSize\n trans = ds.GetGeoTransform()\n del ds\n\n # make regular point grid\n pixelSizeX = trans[1]\n pixelSizeY = trans[5]\n eastOrigin = trans[0] + 0.5 * pixelSizeX\n northOrigin = trans[3] + 0.5 * pixelSizeY\n xArray = np.arange(eastOrigin, eastOrigin + pixelSizeX * xSize, pixelSizeX)\n yArray = np.arange(northOrigin, northOrigin + pixelSizeY * ySize, pixelSizeY)\n\n return xArray, yArray\n\n def _uniform_in_z(self, _zlevels=None):\n '''\n Interpolate all variables to a regular grid in z\n '''\n nx, ny = self._p.shape[:2]\n\n # new regular z-spacing\n if _zlevels is None:\n _zlevels = np.nanmean(self._zs, axis=(0, 1))\n new_zs = np.tile(_zlevels, (nx, ny, 1))\n\n # re-assign values to the uniform z\n # new variables\n self._t = interpolate_along_axis(self._zs, self._t, new_zs, axis=2, fill_value=np.nan)\n self._p = interpolate_along_axis(self._zs, self._p, new_zs, axis=2, fill_value=np.nan)\n self._e = interpolate_along_axis(self._zs, self._e, new_zs, axis=2, fill_value=np.nan)\n self._zs = _zlevels\n self._xs = np.unique(self._xs)\n self._ys = np.unique(self._ys)\n\n def _checkNotMaskedArrays(self):\n try:\n self._p = self._p.filled(fill_value=np.nan)\n except:\n pass\n try:\n self._t = self._t.filled(fill_value=np.nan)\n except:\n pass\n try:\n self._e = self._e.filled(fill_value=np.nan)\n except:\n pass\n try:\n self._wet_refractivity = self._wet_refractivity.filled(fill_value=np.nan)\n except:\n pass\n try:\n self._hydrostatic_refractivity = self._hydrostatic_refractivity.filled(fill_value=np.nan)\n except:\n pass\n\n def _checkForNans(self):\n '''\n Fill in NaN-values\n '''\n self._p = fillna3D(self._p)\n self._t = fillna3D(self._t)\n self._e = fillna3D(self._e)\n\n def out_file(self, outLoc, lats=None, lons=None):\n if lats is None:\n lats = self._lats\n if lons is None:\n lons = self._lons\n f = make_weather_model_filename(\n self._Name,\n self._time,\n self._get_ll_bounds(lats=lats, lons=lons)\n )\n return os.path.join(outLoc, f)\n\n def filename(self, time=None, outLoc='weather_files'):\n '''\n Create a filename to store the weather model\n '''\n os.makedirs(outLoc, exist_ok=True)\n\n if time is None:\n if self._time is None:\n raise ValueError('Time must be specified before the file can be written')\n else:\n time = self._time\n\n f = make_raw_weather_data_filename(\n outLoc,\n self._Name,\n time,\n )\n\n self.files = [f]\n\n def write(\n self,\n NoDataValue=-3.4028234e+38,\n chunk=(1, 128, 128),\n mapping_name='WGS84'\n ):\n '''\n By calling the abstract/modular netcdf writer\n (RAiDER.utilFcns.write2NETCDF4core), write the weather model data\n and refractivity to an NETCDF4 file that can be accessed by external programs.\n '''\n # Generate the filename\n f = self._out_name\n\n dimidY, dimidX, dimidZ = self._t.shape\n chunk_lines_Y = np.min([chunk[1], dimidY])\n chunk_lines_X = np.min([chunk[2], dimidX])\n ChunkSize = [1, chunk_lines_Y, chunk_lines_X]\n\n nc_outfile = netCDF4.Dataset(f, 'w', clobber=True, format='NETCDF4')\n nc_outfile.setncattr('Conventions', 'CF-1.6')\n nc_outfile.setncattr('datetime', datetime.datetime.strftime(self._time, \"%Y_%m_%dT%H_%M_%S\"))\n nc_outfile.setncattr('date_created', datetime.datetime.now().strftime(\"%Y_%m_%dT%H_%M_%S\"))\n title = 'Weather model data and delay calculations'\n nc_outfile.setncattr('title', title)\n\n tran = [self._xs[0], self._xs[1] - self._xs[0], 0.0, self._ys[0], 0.0, self._ys[1] - self._ys[0]]\n\n dimension_dict = {\n 'x': {'varname': 'x',\n 'datatype': np.dtype('float64'),\n 'dimensions': ('x'),\n 'length': dimidX,\n 'FillValue': None,\n 'standard_name': 'projection_x_coordinate',\n 'description': 'weather model native x',\n 'dataset': self._xs,\n 'units': 'degrees_east'},\n 'y': {'varname': 'y',\n 'datatype': np.dtype('float64'),\n 'dimensions': ('y'),\n 'length': dimidY,\n 'FillValue': None,\n 'standard_name': 'projection_y_coordinate',\n 'description': 'weather model native y',\n 'dataset': self._ys,\n 'units': 'degrees_north'},\n 'z': {'varname': 'z',\n 'datatype': np.dtype('float32'),\n 'dimensions': ('z'),\n 'length': dimidZ,\n 'FillValue': None,\n 'standard_name': 'projection_z_coordinate',\n 'description': 'vertical coordinate',\n 'dataset': self._zs,\n 'units': 'm'}\n }\n\n dataset_dict = {\n 'latitude': {'varname': 'latitude',\n 'datatype': np.dtype('float64'),\n 'dimensions': ('z', 'y', 'x'),\n 'grid_mapping': mapping_name,\n 'FillValue': NoDataValue,\n 'ChunkSize': ChunkSize,\n 'standard_name': 'latitude',\n 'description': 'latitude',\n 'dataset': self._lats.swapaxes(0, 2).swapaxes(1, 2),\n 'units': 'degrees_north'},\n 'longitude': {'varname': 'longitude',\n 'datatype': np.dtype('float64'),\n 'dimensions': ('z', 'y', 'x'),\n 'grid_mapping': mapping_name,\n 'FillValue': NoDataValue,\n 'ChunkSize': ChunkSize,\n 'standard_name': 'longitude',\n 'description': 'longitude',\n 'dataset': self._lons.swapaxes(0, 2).swapaxes(1, 2),\n 'units': 'degrees_east'},\n 't': {'varname': 't',\n 'datatype': np.dtype('float32'),\n 'dimensions': ('z', 'y', 'x'),\n 'grid_mapping': mapping_name,\n 'FillValue': NoDataValue,\n 'ChunkSize': ChunkSize,\n 'standard_name': 'temperature',\n 'description': 'temperature',\n 'dataset': self._t.swapaxes(0, 2).swapaxes(1, 2),\n 'units': 'K'},\n 'p': {'varname': 'p',\n 'datatype': np.dtype('float32'),\n 'dimensions': ('z', 'y', 'x'),\n 'grid_mapping': mapping_name,\n 'FillValue': NoDataValue,\n 'ChunkSize': ChunkSize,\n 'standard_name': 'pressure',\n 'description': 'pressure',\n 'dataset': self._p.swapaxes(0, 2).swapaxes(1, 2),\n 'units': 'Pa'},\n 'e': {'varname': 'e',\n 'datatype': np.dtype('float32'),\n 'dimensions': ('z', 'y', 'x'),\n 'grid_mapping': mapping_name,\n 'FillValue': NoDataValue,\n 'ChunkSize': ChunkSize,\n 'standard_name': 'humidity',\n 'description': 'humidity',\n 'dataset': self._e.swapaxes(0, 2).swapaxes(1, 2),\n 'units': 'Pa'},\n 'wet': {'varname': 'wet',\n 'datatype': np.dtype('float32'),\n 'dimensions': ('z', 'y', 'x'),\n 'grid_mapping': mapping_name,\n 'FillValue': NoDataValue,\n 'ChunkSize': ChunkSize,\n 'standard_name': 'wet_refractivity',\n 'description': 'wet_refractivity',\n 'dataset': self._wet_refractivity.swapaxes(0, 2).swapaxes(1, 2)},\n 'hydro': {'varname': 'hydro',\n 'datatype': np.dtype('float32'),\n 'dimensions': ('z', 'y', 'x'),\n 'grid_mapping': mapping_name,\n 'FillValue': NoDataValue,\n 'ChunkSize': ChunkSize,\n 'standard_name': 'hydrostatic_refractivity',\n 'description': 'hydrostatic_refractivity',\n 'dataset': self._hydrostatic_refractivity.swapaxes(0, 2).swapaxes(1, 2)},\n 'wet_total': {'varname': 'wet_total',\n 'datatype': np.dtype('float32'),\n 'dimensions': ('z', 'y', 'x'),\n 'grid_mapping': mapping_name,\n 'FillValue': NoDataValue,\n 'ChunkSize': ChunkSize,\n 'standard_name': 'total_wet_refractivity',\n 'description': 'total_wet_refractivity',\n 'dataset': self._wet_ztd.swapaxes(0, 2).swapaxes(1, 2)},\n 'hydro_total': {'varname': 'hydro_total',\n 'datatype': np.dtype('float32'),\n 'dimensions': ('z', 'y', 'x'),\n 'grid_mapping': mapping_name,\n 'FillValue': NoDataValue,\n 'ChunkSize': ChunkSize,\n 'standard_name': 'total_hydrostatic_refractivity',\n 'description': 'total_hydrostatic_refractivity',\n 'dataset': self._hydrostatic_ztd.swapaxes(0, 2).swapaxes(1, 2)}\n }\n\n nc_outfile = write2NETCDF4core(\n nc_outfile,\n dimension_dict,\n dataset_dict,\n tran,\n mapping_name='WGS84'\n )\n\n nc_outfile.sync() # flush data to disk\n nc_outfile.close()\n return f\n\n\ndef make_weather_model_filename(name, time, ll_bounds):\n if ll_bounds[0] < 0:\n S = 'S'\n else:\n S = 'N'\n if ll_bounds[1] < 0:\n N = 'S'\n else:\n N = 'N'\n if ll_bounds[2] < 0:\n W = 'W'\n else:\n W = 'E'\n if ll_bounds[3] < 0:\n E = 'W'\n else:\n E = 'E'\n return '{}_{}_{:.0f}{}_{:.0f}{}_{:.0f}{}_{:.0f}{}.nc'.format(\n name,\n time.strftime(\"%Y_%m_%d_T%H_%M_%S\"),\n np.ceil(np.abs(ll_bounds[0])),\n S,\n np.ceil(np.abs(ll_bounds[1])),\n N,\n np.ceil(np.abs(ll_bounds[2])),\n W,\n np.ceil(np.abs(ll_bounds[3])),\n E\n )\n\n\ndef make_raw_weather_data_filename(outLoc, name, time):\n ''' Filename generator for the raw downloaded weather model data '''\n f = os.path.join(\n outLoc,\n '{}_{}.{}'.format(\n name,\n datetime.datetime.strftime(time, '%Y_%m_%d_T%H_%M_%S'),\n 'nc'\n )\n )\n return f\n" ]
[ [ "numpy.nanmax", "numpy.nanmin", "numpy.dtype", "numpy.concatenate", "numpy.max", "numpy.zeros_like", "numpy.any", "numpy.nanmean", "numpy.exp", "numpy.where", "numpy.trapz", "numpy.unique", "numpy.arange", "numpy.insert", "numpy.zeros", "numpy.log", "numpy.min", "numpy.sum", "numpy.abs", "numpy.tile", "numpy.prod", "numpy.empty" ] ]
bluemixgarage/Mask_RCNN
[ "c8b42b0f582e0d6e6cc40e54b895de6b2e71c72d" ]
[ "mrcnn/model.py" ]
[ "\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(),array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\",\"\"))\n text += \" {}\".format(array.dtype)\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n \n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # TODO: If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keep_dims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keep_dims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name='trim_zeros'):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n" ]
[ [ "numpy.amax", "numpy.expand_dims", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.cast", "tensorflow.image.non_max_suppression", "tensorflow.equal", "tensorflow.image.crop_and_resize", "numpy.concatenate", "tensorflow.abs", "tensorflow.map_fn", "numpy.any", "tensorflow.pad", "tensorflow.where", "tensorflow.random_shuffle", "numpy.where", "tensorflow.add_n", "numpy.divide", "numpy.random.randint", "tensorflow.boolean_mask", "numpy.hstack", "tensorflow.Variable", "numpy.reshape", "numpy.fliplr", "numpy.arange", "tensorflow.squeeze", "numpy.stack", "tensorflow.divide", "tensorflow.stop_gradient", "tensorflow.gather", "numpy.copy", "numpy.argmax", "tensorflow.nn.top_k", "tensorflow.argmax", "numpy.zeros", "numpy.log", "tensorflow.gather_nd", "tensorflow.unique", "tensorflow.shape", "numpy.random.choice", "tensorflow.identity", "tensorflow.exp", "tensorflow.sparse_tensor_to_dense", "numpy.delete", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.split", "tensorflow.round", "numpy.array", "numpy.sum", "tensorflow.size", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "numpy.abs", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.expand_dims", "numpy.sort", "numpy.ones", "numpy.random.shuffle", "tensorflow.log", "numpy.broadcast_to", "tensorflow.sqrt", "numpy.empty", "tensorflow.logical_and" ] ]
oOXpycTOo/FullStackDeepLearning
[ "f53a2307d5a0472320651f202294b608bff4946c" ]
[ "lab5/text_recognizer/models/cnn.py" ]
[ "from typing import Any, Dict, Tuple\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nCONV_DIM = 64\nFC_DIM = 128\nIMAGE_SIZE = 28\nDEFAULT_BACKBONE = 'resnet'\nDEFAULT_BLOCK = 'residual'\nDEFAULT_POOL_TYPE = 'max_pool'\nSE_REDUCTION_RATE = 4\nN_BLOCKS = 2\n\n\nclass ConvBlock(nn.Module):\n \"\"\"\n Simple 3x3 conv with padding size 1 (to leave the input size unchanged), followed by a ReLU.\n \"\"\"\n\n def __init__(self, input_channels: int, output_channels: int, **kwargs: nn.Module) -> None:\n super().__init__()\n self.conv = nn.Conv2d(input_channels, output_channels, **kwargs)\n self.relu = nn.ReLU()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Parameters\n ----------\n x\n of dimensions (B, C_in, H, W)\n\n Returns\n -------\n torch.Tensor\n of dimensions (B, C_out, H, W)\n \"\"\"\n c = self.conv(x)\n r = self.relu(c)\n return r\n\n\nclass ResidualBlock(nn.Module):\n \"\"\"\n A ResNet-like block, with 3x3 conv and padding of size 1, followed by ReLU and residual connection.\n \"\"\"\n def __init__(self, input_channels: int, output_channels: int, **kwargs: nn.Module) -> None:\n super().__init__()\n downsample = kwargs.get('downsample', False)\n res_stride = 1\n if downsample:\n res_stride = 2\n self.conv1 = nn.Conv2d(input_channels,\n output_channels,\n kernel_size=3,\n stride=res_stride,\n padding=1,\n bias=False)\n self.relu = nn.ReLU()\n self.bn1 = nn.BatchNorm2d(num_features=output_channels) \n self.conv2 = nn.Conv2d(output_channels,\n output_channels,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False)\n self.bn2 = nn.BatchNorm2d(num_features=output_channels)\n self.downsample = None\n if downsample:\n self.downsample = nn.Conv2d(input_channels,\n output_channels,\n kernel_size=1,\n stride=res_stride,\n bias=False)\n self.bn_downsample = nn.BatchNorm2d(num_features=output_channels)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Parameters\n ----------\n x\n of dimensions (B, C_in, H, W)\n\n Returns\n -------\n torch.Tensor\n of dimensions (B, C_out, H, W)\n \"\"\"\n identity = x\n if self.downsample is not None:\n identity = self.downsample(identity)\n identity = self.bn_downsample(identity)\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out + identity)\n return out\n\n\nclass SqueezeExcitationBlock(nn.Module):\n \"\"\"\n A SE block with ResNet block as backbone, with 3x3 conv and padding of size 1, followed by ReLU and residual connection.\n \"\"\"\n def __init__(self, input_channels: int, output_channels: int, **kwargs: nn.Module) -> None:\n super().__init__()\n downsample = kwargs.get('downsample', False)\n reduction_rate = kwargs.get('reduction_rate', SE_REDUCTION_RATE)\n res_stride = 1\n if downsample:\n res_stride = 2\n self.conv1 = nn.Conv2d(input_channels,\n output_channels,\n kernel_size=3,\n stride=res_stride,\n padding=1,\n bias=False)\n self.conv2 = nn.Conv2d(output_channels,\n output_channels,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(num_features=output_channels)\n self.bn2 = nn.BatchNorm2d(num_features=output_channels)\n self.downsample = None\n if downsample:\n self.downsample = nn.Conv2d(input_channels,\n output_channels,\n kernel_size=1,\n stride=res_stride,\n bias=False)\n self.bn_downsample = nn.BatchNorm2d(num_features=output_channels)\n\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc_se_1 = nn.Linear(output_channels, output_channels // reduction_rate)\n self.fc_se_2 = nn.Linear(output_channels // reduction_rate, output_channels)\n\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Parameters\n ----------\n x\n of dimensions (B, C_in, H, W)\n\n Returns\n -------\n torch.Tensor\n of dimensions (B, C_out, H, W)\n \"\"\"\n identity = x\n if self.downsample is not None:\n identity = self.downsample(identity)\n identity = self.bn_downsample(identity)\n residual = self.conv1(x)\n residual = self.bn1(residual)\n residual = self.relu(residual)\n residual = self.conv2(residual)\n residual = self.bn2(residual)\n\n weights = self.avg_pool(residual).permute(0, 2, 3, 1)\n weights = self.fc_se_1(weights)\n weights = self.relu(weights)\n weights = self.fc_se_2(weights)\n weights = self.sigmoid(weights).permute(0, 3, 1, 2)\n\n out = residual * weights\n out = self.relu(out + identity)\n return out\n\n\nclass ResNetBackbone(nn.Module):\n DOWNSAMPLE_EVERY = 2\n BLOCKS = {'residual': ResidualBlock, 'squeeze_excitation': SqueezeExcitationBlock}\n def __init__(self, input_size: int,\n conv_dim: int,\n n_blocks: int,\n fc_dim: int,\n block_type: str) -> None:\n super().__init__()\n self.__block_type = block_type\n self.__conv_dim = conv_dim\n conv_output_dim = conv_dim * 2 ** ((n_blocks - 2) // self.DOWNSAMPLE_EVERY)\n self.init_conv = nn.Conv2d(input_size,\n conv_dim,\n kernel_size=7,\n padding=3,\n stride=2,\n bias=False)\n self.init_bn = nn.BatchNorm2d(conv_dim)\n self.init_pool = nn.MaxPool2d(kernel_size=3,\n stride=2,\n padding=1)\n self.blocks = self._get_blocks(n_blocks)\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.linear = nn.Linear(conv_output_dim, fc_dim)\n self.relu = nn.ReLU()\n\n def _get_blocks(self, n_blocks: int) -> torch.nn.ModuleList:\n blocks = []\n input_dim = self.__conv_dim\n output_dim = self.__conv_dim\n block_builder = self.BLOCKS[self.__block_type]\n for i in range(n_blocks):\n if i >= 2 and (i - 2) % self.DOWNSAMPLE_EVERY == 0:\n downsample = True\n output_dim *= 2\n else:\n downsample = False\n blocks.append(block_builder(input_dim, output_dim, downsample=downsample))\n input_dim = output_dim\n return nn.Sequential(*blocks)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.init_conv(x)\n x = self.init_bn(x)\n x = self.relu(x)\n x = self.init_pool(x)\n x = self.blocks(x)\n x = self.avg_pool(x)\n x = torch.flatten(x, 1)\n x = self.linear(x)\n x = self.relu(x)\n return x\n\nclass StupidSimpleBackbone(nn.Module):\n def __init__(self, input_size: int,\n conv_dim: int,\n n_blocks: int,\n fc_dim: int,\n block_type: str) -> None:\n super().__init__()\n self.conv_1_1 = ConvBlock(input_channels=input_size, output_channels=conv_dim,\n kernel_size=5, padding=2)\n self.conv_1_2 = ConvBlock(input_channels=conv_dim, output_channels=conv_dim,\n kernel_size=5, padding=2)\n self.max_pool_1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv_2_1 = ConvBlock(input_channels=conv_dim, output_channels=conv_dim // 2,\n kernel_size=3, padding=1)\n self.conv_2_2 = ConvBlock(input_channels=conv_dim // 2, output_channels=conv_dim // 2,\n kernel_size=3, padding=1)\n self.max_pool_2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.fc = nn.Linear(conv_dim // 2 * 49, fc_dim)\n self.relu = nn.ReLU()\n \n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv_1_1(x)\n x = self.conv_1_2(x)\n x = self.max_pool_1(x)\n x = self.conv_2_1(x)\n x = self.conv_2_2(x)\n x = self.max_pool_2(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n x = self.relu(x)\n return x\n\n\nclass CNN(nn.Module):\n \"\"\"Simple CNN for recognizing characters in a square image.\"\"\"\n BACKBONES = {'lenet': StupidSimpleBackbone, 'resnet': ResNetBackbone}\n\n def __init__(self, data_config: Dict[str, Any], args: argparse.Namespace = None) -> None:\n super().__init__()\n self.args = vars(args) if args is not None else {}\n\n\n input_dims = data_config[\"input_dims\"]\n num_classes = len(data_config[\"mapping\"])\n\n conv_dim = self.args.get(\"conv_dim\", CONV_DIM)\n fc_dim = self.args.get(\"fc_dim\", FC_DIM)\n\n backbone = self.args.get(\"backbone\", DEFAULT_BACKBONE)\n n_blocks = self.args.get(\"n_blocks\", N_BLOCKS)\n block_type = self.args.get(\"block_type\", DEFAULT_BLOCK)\n self.backbone = self.BACKBONES[backbone](input_dims[0], conv_dim, n_blocks, fc_dim, block_type)\n\n use_dropout = self.args.get(\"use_dropout\")\n if use_dropout:\n self.dropout = nn.Dropout(0.25)\n else:\n self.dropout = nn.Identity()\n\n self.out_fc = nn.Linear(fc_dim, num_classes)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n x\n (B, C, H, W) tensor, where H and W must equal IMAGE_SIZE\n\n Returns\n -------\n torch.Tensor\n (B, C) tensor\n \"\"\"\n _B, _C, H, W = x.shape\n assert H == W == IMAGE_SIZE\n x = self.backbone(x)\n x = self.dropout(x)\n x = self.out_fc(x)\n return x\n\n @staticmethod\n def add_to_argparse(parser):\n parser.add_argument(\"--conv_dim\", type=int, default=CONV_DIM)\n parser.add_argument(\"--fc_dim\", type=int, default=FC_DIM)\n parser.add_argument(\"--backbone\", type=str, default=DEFAULT_BACKBONE)\n parser.add_argument(\"--block_type\", type=str, default=DEFAULT_BLOCK)\n parser.add_argument(\"--n_blocks\", type=int, default=N_BLOCKS)\n parser.add_argument(\"--use_dropout\", action='store_true')\n return parser\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.flatten", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Identity", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
arthursoprano/pyJHTDB
[ "0de6e851540b49683c3c9af52a84fe94054956b5" ]
[ "pyJHTDB/test.py" ]
[ "########################################################################\n#\n# Copyright 2014 Johns Hopkins University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Contact: [email protected]\n# Website: http://turbulence.pha.jhu.edu/\n#\n########################################################################\n\nimport os\nimport sys\nimport ctypes\nimport math\nimport numpy\n\nimport pyJHTDB\nimport pyJHTDB.dbinfo\nimport pyJHTDB.interpolator\n\nif pyJHTDB.found_matplotlib:\n import matplotlib.pyplot as plt\n import matplotlib.cm as cm\nelse:\n print('matplotlib is needed for contour plots.'\n + 'You should be able to find installation instructions at http://matplotlib.sourceforge.net')\n\nif pyJHTDB.found_h5py:\n import h5py\n import pyJHTDB.cutout\nelse:\n print('h5py is needed for working with cutouts.')\n\ndef test_plain(N=10):\n #time = 0.364\n #turbc.c has a fixed time, but it makes more sense to have a random one\n time = 0.002 * numpy.random.randint(1024)\n # points must be created with the single precision data type\n points = numpy.empty((N, 3), dtype = 'float32')\n # [:,:] is there to force the conversion from the return type of random_sample to single precision\n points[:,:] = 2*math.pi*numpy.random.random_sample(size = (N, 3))[:,:]\n spatialInterp = 6 # 6 point Lagrange\n temporalInterp = 0 # no time interpolation\n FD4Lag4 = 40 # 4 point Lagrange interp for derivatives\n\n # mhdc has starttime .364 and endtime .376\n startTime = 0.002 * numpy.random.randint(1024)\n endTime = startTime + 0.012\n lag_dt = 0.0004\n\n print('Coordinates of {0} points where variables are requested:'.format(N))\n for p in range(N):\n print('{0}: {1}'.format(p, points[p]))\n print('Data is requested at time {0}'.format(time))\n\n # load shared library\n lTDB = pyJHTDB.libJHTDB()\n #initialize webservices\n lTDB.initialize()\n\n print('Requesting velocity at {0} points...'.format(N))\n result = lTDB.getData(time, points,\n sinterp = spatialInterp, tinterp = temporalInterp,\n getFunction = 'getVelocity')\n for p in range(N):\n print('{0}: {1}'.format(p, result[p]))\n print('Requesting forcing at {0} points...'.format(N))\n result = lTDB.getData(time, points,\n sinterp = spatialInterp, tinterp = temporalInterp,\n getFunction = 'getForce')\n for p in range(N):\n print('{0}: {1}'.format(p, result[p]))\n print('Requesting velocity and pressure at {0} points...'.format(N))\n result = lTDB.getData(time, points,\n sinterp = spatialInterp, tinterp = temporalInterp,\n getFunction = 'getVelocityAndPressure')\n for p in range(N):\n print('{0}: v = {1}, p = {2:+}'.format(p, result[p][0:3], result[p][3]))\n print('Requesting velocity gradient at {0} points...'.format(N))\n result = lTDB.getData(time, points,\n sinterp = FD4Lag4, tinterp = temporalInterp,\n getFunction = 'getVelocityGradient')\n for p in range(N):\n print('{0}: '.format(p) +\n 'duxdx = {0:+e}, duxdy = {1:+e}, duxdz = {2:+e}\\n '.format(result[p][0], result[p][1], result[p][2]) +\n 'duydx = {0:+e}, duydy = {1:+e}, duydz = {2:+e}\\n '.format(result[p][3], result[p][4], result[p][5]) +\n 'duzdx = {0:+e}, duzdy = {1:+e}, duzdz = {2:+e}'.format(result[p][6], result[p][7], result[p][8]))\n print('Requesting velocity hessian at {0} points...'.format(N))\n result = lTDB.getData(time, points,\n sinterp = FD4Lag4, tinterp = temporalInterp,\n getFunction = 'getVelocityHessian')\n for p in range(N):\n print('{0}: '.format(p) +\n 'd2uxdxdx = {0:+e}, d2uxdxdy = {1:+e}, d2uxdxdz = {2:+e}\\n '.format(result[p][ 0], result[p][ 1], result[p][ 2])\n + 'd2uxdydy = {0:+e}, d2uxdydz = {1:+e}, d2uxdzdz = {2:+e}\\n '.format(result[p][ 3], result[p][ 4], result[p][ 5])\n + 'd2uydxdx = {0:+e}, d2uydxdy = {1:+e}, d2uydxdz = {2:+e}\\n '.format(result[p][ 6], result[p][ 7], result[p][ 8])\n + 'd2uydydy = {0:+e}, d2uydydz = {1:+e}, d2uydzdz = {2:+e}\\n '.format(result[p][ 9], result[p][10], result[p][11])\n + 'd2uzdxdx = {0:+e}, d2uzdxdy = {1:+e}, d2uzdxdz = {2:+e}\\n '.format(result[p][12], result[p][13], result[p][14])\n + 'd2uzdydy = {0:+e}, d2uzdydz = {1:+e}, d2uzdzdz = {2:+e}'.format(result[p][15], result[p][16], result[p][17]))\n print('Requesting velocity laplacian at {0} points...'.format(N))\n result = lTDB.getData(time, points,\n sinterp = FD4Lag4, tinterp = temporalInterp,\n getFunction = 'getVelocityLaplacian')\n for p in range(N):\n print('{0}: '.format(p) +\n 'grad2ux = {0:+e}, grad2uy = {1:+e}, grad2uz = {2:+e}, '.format(result[p][0], result[p][1], result[p][2]))\n print('Requesting pressure gradient at {0} points...'.format(N))\n result = lTDB.getData(time, points,\n sinterp = FD4Lag4, tinterp = temporalInterp,\n getFunction = 'getPressureGradient')\n for p in range(N):\n print('{0}: '.format(p)\n + 'dpdx = {0:+e}, dpdy = {1:+e}, dpdz = {2:+e}, '.format(result[p][0], result[p][1], result[p][2]))\n print('Requesting pressure hessian at {0} points...'.format(N))\n result = lTDB.getData(time, points,\n sinterp = FD4Lag4, tinterp = temporalInterp,\n getFunction = 'getVelocityHessian')\n for p in range(N):\n print('{0}: '.format(p) +\n 'd2pdxdx = {0:+e}, d2pdxdy = {1:+e}, d2pdxdz = {2:+e}\\n '.format(result[p][0], result[p][1], result[p][2])\n + 'd2pdydy = {0:+e}, d2pdydz = {1:+e}, d2pdzdz = {2:+e}'.format(result[p][3], result[p][4], result[p][5]))\n\n# print 'Requesting position at {0} points, starting at time {1} and ending at time {2}...'.format(N, startTime, endTime)\n# result = pyJHTDB.getPosition(startTime, endTime, lag_dt, points, sinterp = spatialInterp)\n# print 'Coordinates of {0} points at startTime:'.format(N)\n# for p in range(N):\n# print p, points[p]\n# print 'Coordinates of {0} points at endTime:'.format(N)\n# for p in range(N):\n# print p, result[p]\n\n ## only if matplotlib is present\n if pyJHTDB.found_matplotlib:\n ken_contours(\n 'kin_en_contours',\n lTDB,\n spatialInterp = spatialInterp,\n temporalInterp = temporalInterp,\n time = 0.002 * numpy.random.randint(1024),\n spacing = 2 * math.pi * 2.**(-10),\n nx = 64, ny = 64,\n xoff = 2*math.pi * numpy.random.rand(),\n yoff = 2*math.pi * numpy.random.rand(),\n zoff = 2*math.pi * numpy.random.rand())\n\n #finalize webservices\n lTDB.finalize()\n return None\n\ndef ken_contours(\n figname,\n lTDB,\n levels = 30,\n spatialInterp = 4, temporalInterp = 0,\n time = 0.002 * 512,\n spacing = math.pi * 2.**(-9),\n nx = 64, ny = 64,\n xoff = .0, yoff = .0, zoff = .0):\n \"\"\"\n Generate a simple contour plot\n see http://matplotlib.sourceforge.net/examples/pylab_examples/contour_demo.html\n for information on how to make prettier plots.\n\n This function assumes the webservices have already been initialized,\n so call pyJHTDB.init() before calling it, and pyJHTDB.finalize() afterwards\n \"\"\"\n x = spacing * numpy.arange(0, nx, 1, dtype = 'float32') + xoff\n y = spacing * numpy.arange(0, ny, 1, dtype = 'float32') + yoff\n points = numpy.empty((nx, ny, 3), dtype = 'float32')\n points[:, :, 0] = x[:, numpy.newaxis]\n points[:, :, 1] = y[numpy.newaxis, :]\n points[:, :, 2] = zoff\n\n result = lTDB.getData(time, points,\n sinterp = spatialInterp, tinterp = temporalInterp,\n getFunction = 'getVelocity')\n\n energy = .5*(numpy.sqrt(result[:,:,0]**2 + result[:,:,1]**2 + result[:,:,2]**2)).transpose()\n fig = plt.figure(figsize=(6.,6.))\n ax = fig.add_axes([.0, .0, 1., 1.])\n contour = ax.contour(x, y, energy, levels)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n plt.clabel(contour, inline=1, fontsize=10)\n plt.title('Energy contours, t = {0:.5}, z = {1:.3}'.format(time, zoff))\n fig.savefig(figname + '.eps', format = 'eps', bbox_inches = 'tight')\n return None\n\nif pyJHTDB.found_matplotlib:\n\n def spectra_check(\n info = pyJHTDB.dbinfo.isotropic1024coarse,\n lJHTDB = None):\n if not lJHTDB:\n print ('no library given')\n return None\n nlines = 32\n print(nlines, info['nx'], 3)\n print(nlines, info['ny'], 3)\n print(nlines, info['nz'], 3)\n lines = [numpy.zeros((nlines, info['nx'], 3), dtype = numpy.float32),\n numpy.zeros((nlines, info['ny'], 3), dtype = numpy.float32),\n numpy.zeros((nlines, info['nz'], 3), dtype = numpy.float32)]\n lines[0][:, :, 0] = info['dx']*numpy.arange(.0, info['nx'], 1)\n lines[0][:, :, 1] = info['dy']*numpy.random.randint(0, info['ny'], size = (nlines))[:, numpy.newaxis]\n lines[0][:, :, 2] = info['dz']*numpy.random.randint(0, info['nz'], size = (nlines))[:, numpy.newaxis]\n lines[1][:, :, 0] = info['dx']*numpy.random.randint(0, info['nx'], size = (nlines))[:, numpy.newaxis]\n lines[1][:, :, 1] = info['dy']*numpy.arange(.0, info['ny'], 1)\n lines[1][:, :, 2] = info['dz']*numpy.random.randint(0, info['nz'], size = (nlines))[:, numpy.newaxis]\n lines[2][:, :, 0] = info['dx']*numpy.random.randint(0, info['nx'], size = (nlines))[:, numpy.newaxis]\n lines[2][:, :, 1] = info['dy']*numpy.random.randint(0, info['ny'], size = (nlines))[:, numpy.newaxis]\n lines[2][:, :, 2] = info['dz']*numpy.arange(.0, info['nz'], 1)\n fig = plt.figure(figsize=(12.,6.))\n axu = fig.add_axes([.05, .1, .4, .8])\n axp = fig.add_axes([.55, .1, .4, .8])\n coordname = ['x', 'y', 'z']\n for i in range(3):\n result = lJHTDB.getData(.0, lines[i],\n sinterp = 0, tinterp = 0,\n data_set = info['name'], getFunction = 'getVelocityAndPressure')\n spec = numpy.fft.rfft(\n numpy.sum(result[:, :, :3]**2, axis = 2), axis = 1)\n axu.plot(numpy.average(numpy.abs(spec), axis = 0), label = '$i = ' + coordname[i] + '$')\n spec = numpy.fft.rfft(\n result[:, :, 3]**2, axis = 1)\n axp.plot(numpy.average(numpy.abs(spec), axis = 0), label = '$i = ' + coordname[i] + '$')\n axu.set_ylabel('$\\\\langle u_x^2 + u_y^2 + u_z^2 \\\\rangle$')\n axp.set_ylabel('$\\\\langle p^2 \\\\rangle$')\n for ax in [axu, axp]:\n ax.set_xlabel('$k_i$')\n ax.legend(loc = 'best')\n ax.set_xscale('log')\n ax.set_yscale('log')\n fig.savefig('spec.pdf', format = 'pdf')\n return None\n\n def contour_check(\n info = pyJHTDB.dbinfo.isotropic1024coarse,\n lJHTDB = None):\n if not lJHTDB:\n print ('no library given')\n return None\n nplanes = 1\n planes = [numpy.zeros((nplanes, info['ny'], info['nz'], 3), dtype = numpy.float32),\n numpy.zeros((nplanes, info['nz'], info['nx'], 3), dtype = numpy.float32),\n numpy.zeros((nplanes, info['nx'], info['ny'], 3), dtype = numpy.float32)]\n planes[0][:, :, :, 0] = info['xnodes'][::(info['nx']/nplanes), numpy.newaxis, numpy.newaxis]\n planes[0][:, :, :, 1] = info['ynodes'][ numpy.newaxis, :, numpy.newaxis]\n planes[0][:, :, :, 2] = info['znodes'][ numpy.newaxis, numpy.newaxis, :]\n #planes[1][:, :, :, 0] = info['dx']*numpy.arange(.0, info['nx'], 1)\n #planes[1][:, :, :, 1] = info['dy']*numpy.random.randint(0, info['ny'], size = (nplanes))[:, numpy.newaxis, numpy.newaxis]\n #planes[1][:, :, :, 2] = info['dz']*numpy.arange(.0, info['nz'], 1)\n #planes[2][:, :, :, 0] = info['dx']*numpy.arange(.0, info['nx'], 1)\n #planes[2][:, :, :, 1] = info['dy']*numpy.arange(.0, info['ny'], 1)\n #planes[2][:, :, :, 2] = info['dz']*numpy.random.randint(0, info['nz'], size = (nplanes))[:, numpy.newaxis, numpy.newaxis]\n coordname = ['yz', 'zx', 'xy']\n for i in range(1):\n result = lJHTDB.getData(.1, planes[i],\n sinterp = 0, tinterp = 0,\n data_set = info['name'], getFunction = 'getVelocityAndPressure')\n fig = plt.figure(figsize = (10.24,10.24))\n ax = fig.add_axes([0, 0, 1, 1], frameon = False)\n ax.set_axis_off()\n ax.imshow(result[0, :, :, 0])\n fig.savefig('plane_' + coordname[i] + '_0.png', format = 'png', dpi = 100)\n return None\n\n def clean_2D_field(\n field_2D,\n dpi = 100,\n figname = 'tst',\n cmap = cm.jet,\n img_type = 'pdf'):\n fig = plt.figure(\n figsize=(field_2D.shape[1]*1./dpi,\n field_2D.shape[0]*1./dpi))\n ax = fig.add_axes([.0, .0, 1., 1.], frameon=False)\n ax.set_axis_off()\n im = ax.imshow(field_2D,\n interpolation='none',\n cmap = cmap)\n fig.savefig(\n figname + '.' + img_type,\n dpi = dpi,\n format = img_type)\n return None\n\n def test_misc():\n # load shared library\n lJHTDB = pyJHTDB.libJHTDB()\n #initialize webservices\n lJHTDB.initialize()\n spectra_check(lJHTDB = lJHTDB)\n contour_check(lJHTDB = lJHTDB,\n info = pyJHTDB.dbinfo.channel)\n #finalize webservices\n lJHTDB.finalize()\n return None\n\nif pyJHTDB.found_h5py:\n\n def test_cutout():\n pyJHTDB.cutout.get_big_cutout(\n t0 = 0, tl = 2,\n x0 = 243, xl = 32,\n y0 = 48, yl = 30,\n z0 = 48, zl = 26,\n chunk_xdim = 16,\n chunk_ydim = 15,\n chunk_zdim = 13,\n data_set = 'mhd1024',\n data_type = 'ub',\n filename = 'tmp',\n base_website = 'turbulence.pha.jhu.edu')\n data = h5py.File('tmp.h5', mode = 'r')\n energy = (data['u00000'][0, :, :, 0]**2\n + data['u00000'][0, :, :, 1]**2\n + data['u00000'][0, :, :, 2]**2)\n clean_2D_field(energy, figname = 'tst_0yx')\n energy = (data['u00000'][:, 0, :, 0]**2\n + data['u00000'][:, 0, :, 1]**2\n + data['u00000'][:, 0, :, 2]**2)\n clean_2D_field(energy, figname = 'tst_z0x')\n energy = (data['u00000'][:, :, 0, 0]**2\n + data['u00000'][:, :, 0, 1]**2\n + data['u00000'][:, :, 0, 2]**2)\n clean_2D_field(energy, figname = 'tst_zy0')\n return None\n\ndef test_rawData(\n info = pyJHTDB.dbinfo.channel,\n npoints = 256):\n\n start = numpy.array([0, 0, 0], dtype = numpy.int)\n width = numpy.array([npoints, npoints, 1], dtype = numpy.int)\n\n xg = info['xnodes'][0:width[0]]\n yg = info['ynodes'][0:width[1]]\n zg = info['znodes'][0:width[2]]\n x = numpy.zeros((npoints, npoints, 3), numpy.float32)\n x[:, :, 0] = xg[None, :]\n x[:, :, 1] = yg[:, None]\n x[:, :, 2] = zg[0]\n\n lJHTDB = pyJHTDB.libJHTDB()\n lJHTDB.initialize()\n res4 = lJHTDB.getData(\n 0,\n x,\n sinterp = 4,\n tinterp = 0,\n data_set = info['name'],\n getFunction = 'Velocity')\n res0 = lJHTDB.getRawData(\n 0,\n start = start,\n size = width,\n data_set = info['name'],\n getFunction = 'Velocity')\n lJHTDB.finalize()\n\n fig = plt.figure(figsize=(12,6))\n ax = fig.add_subplot(121)\n c = ax.contour(res4[:, :, 0])\n ax.clabel(c)\n ax.set_title('Lag4 result from database')\n ax = fig.add_subplot(122)\n c = ax.contour(res0[:, :, 0, 0])\n ax.clabel(c)\n ax.set_title('rawData result from database')\n fig.savefig('tst.pdf', format = 'pdf')\n return None\n\ndef test_interp_1D(\n info = pyJHTDB.dbinfo.channel,\n m = 1,\n q = 4,\n npoints = 256):\n\n start = numpy.array([0, 0, 0], dtype = numpy.int)\n width = numpy.array([51+q, 37+q, 17+q], dtype = numpy.int)\n\n i = pyJHTDB.interpolator.spline_interpolator(\n info = info,\n n = (q - 2)/2,\n m = m)\n i.generate_clib()\n\n xg = numpy.linspace(info['xnodes'][i.nx+1],\n info['xnodes'][width[0] - i.nx - 1],\n npoints)\n if info['yperiodic']:\n yg = numpy.linspace(info['ynodes'][i.ny+1],\n info['ynodes'][width[1] - i.ny - 1],\n npoints)\n else:\n yg = numpy.linspace(info['ynodes'][0],\n info['ynodes'][width[1] - i.ny - 1],\n npoints)\n zg = numpy.linspace(info['znodes'][i.nz+1],\n info['znodes'][width[2] - i.nz - 1],\n npoints)\n x = numpy.zeros((npoints, 3), numpy.float32)\n x[:, 0] = xg[0]\n x[:, 1] = yg[:]\n x[:, 2] = zg[0]\n\n lJHTDB = pyJHTDB.libJHTDB()\n lJHTDB.initialize()\n # get raw data to interpolate\n test_field = lJHTDB.getRawData(\n 0,\n start = start,\n size = width,\n data_set = info['name'],\n getFunction = 'Velocity')\n # get Lag8 velocity\n res0 = lJHTDB.getData(\n 0,\n x,\n sinterp = 8,\n tinterp = 0,\n data_set = info['name'],\n getFunction = 'getVelocity')\n # get locally interpolated values\n res1 = i.cinterpolate(\n x, test_field)\n # get Lag4 gradient\n resd0 = lJHTDB.getData(\n 0,\n x,\n sinterp = 44,\n tinterp = 0,\n data_set = info['name'],\n getFunction = 'getVelocityGradient')\n # get locally interpolated gradient\n resdx1 = i.cinterpolate(\n x,\n test_field,\n diff = [1, 0, 0])\n resdy1 = i.cinterpolate(\n x,\n test_field,\n diff = [0, 1, 0])\n resdz1 = i.cinterpolate(\n x,\n test_field,\n diff = [0, 0, 1])\n resd1 = resd0.copy()\n resd1[..., 0] = resdx1[..., 0]\n resd1[..., 1] = resdy1[..., 0]\n resd1[..., 2] = resdz1[..., 0]\n resd1[..., 3] = resdx1[..., 1]\n resd1[..., 4] = resdy1[..., 1]\n resd1[..., 5] = resdz1[..., 1]\n resd1[..., 6] = resdx1[..., 2]\n resd1[..., 7] = resdy1[..., 2]\n resd1[..., 8] = resdz1[..., 2]\n del resdx1, resdy1, resdz1\n lJHTDB.finalize()\n\n if pyJHTDB.found_matplotlib:\n def compare_results(\n fld0,\n fld1,\n figname = 'tst'):\n fig = plt.figure(figsize=(6,6))\n ax = fig.add_subplot(111)\n ax.plot(fld0, color = 'blue')\n ax.plot(fld1, color = 'red')\n fig.savefig(figname + '.pdf', format = 'pdf')\n\n compare_results(\n res0,\n res1,\n figname = 'tst')\n\n compare_results(\n resd0,\n resd1,\n figname = 'dtst')\n dist = (numpy.average(numpy.sqrt(numpy.sum((res0 - res1)**2, axis = 1))) /\n numpy.average(numpy.sqrt(numpy.sum((res0)**2, axis = 1))))\n print ('average distance for result {0}'.format(dist))\n ddist = (numpy.average(numpy.sqrt(numpy.sum((resd0 - resd1)**2, axis = 1))) /\n numpy.average(numpy.sqrt(numpy.sum((resd0)**2, axis = 1))))\n print ('average distance for dresult {0}'.format(ddist))\n return res0, res1, resd0, resd1\n\ndef test_interp_2D(\n info = pyJHTDB.dbinfo.channel,\n m = 1,\n q = 4,\n npoints = 256):\n\n start = numpy.array([0, 0, 0], dtype = numpy.int)\n width = numpy.array([91, 67, 11], dtype = numpy.int)\n\n i = pyJHTDB.interpolator.spline_interpolator(\n info = info,\n n = (q - 2)/2,\n m = m)\n i.generate_clib()\n\n xg = numpy.linspace(info['xnodes'][i.n+1], info['xnodes'][width[0] - i.n - 1], npoints)\n if info['yperiodic']:\n yg = numpy.linspace(info['ynodes'][i.n+1], info['ynodes'][width[1] - i.n - 1], npoints)\n else:\n yg = numpy.linspace(info['ynodes'][0], info['ynodes'][width[1] - i.n - 1], npoints)\n zg = numpy.linspace(info['znodes'][i.n+1], info['znodes'][width[2] - i.n - 1], npoints)\n x = numpy.zeros((npoints, npoints, 3), numpy.float32)\n x[:, :, 0] = xg[0] #None, :]\n x[:, :, 1] = yg[:, None]\n x[:, :, 2] = zg[0] #zg[:, None]\n\n lJHTDB = pyJHTDB.libJHTDB()\n lJHTDB.initialize()\n # get raw data to interpolate\n test_field = lJHTDB.getRawData(\n 0,\n start = start,\n size = width,\n data_set = info['name'],\n getFunction = 'Velocity')\n # get Lag8 velocity\n res0 = lJHTDB.getData(\n 0,\n x,\n sinterp = 8,\n tinterp = 0,\n data_set = info['name'],\n getFunction = 'getVelocity')\n # get locally interpolated values\n res1 = i.cinterpolate(\n x, test_field)\n # get Lag4 gradient\n resd0 = lJHTDB.getData(\n 0,\n x,\n sinterp = 44,\n tinterp = 0,\n data_set = info['name'],\n getFunction = 'getVelocityGradient')\n # get locally interpolated gradient\n resdx1 = i.cinterpolate(\n x,\n test_field,\n diff = [1, 0, 0])\n resdy1 = i.cinterpolate(\n x,\n test_field,\n diff = [0, 1, 0])\n resdz1 = i.cinterpolate(\n x,\n test_field,\n diff = [0, 0, 1])\n resd1 = resd0.copy()\n resd1[..., 0] = resdx1[..., 0]\n resd1[..., 1] = resdy1[..., 0]\n resd1[..., 2] = resdz1[..., 0]\n resd1[..., 3] = resdx1[..., 1]\n resd1[..., 4] = resdy1[..., 1]\n resd1[..., 5] = resdz1[..., 1]\n resd1[..., 6] = resdx1[..., 2]\n resd1[..., 7] = resdy1[..., 2]\n resd1[..., 8] = resdz1[..., 2]\n del resdx1, resdy1, resdz1\n lJHTDB.finalize()\n\n def compare_results(\n fld0,\n fld1,\n figname = 'tst'):\n fig = plt.figure(figsize=(12,6))\n ax = fig.add_subplot(121)\n c = ax.contour(\n #x[:, :, 0],\n #x[:, :, 1],\n fld0)\n ax.clabel(c)\n ax.set_title('result from database')\n ax = fig.add_subplot(122)\n c = ax.contour(\n #x[:, :, 0],\n #x[:, :, 1],\n fld1, c.levels)\n ax.clabel(c)\n ax.set_title('local M{0}Q{1:0>2} result'.format(i.m, i.n*2+2))\n fig.savefig(figname + '.pdf', format = 'pdf')\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(fld0[:, 0], color = 'blue')\n ax.plot(fld1[:, 0], color = 'red')\n fig.savefig(figname + '_1D.pdf', format = 'pdf')\n\n compare_results(\n res0[:, :, 1],\n res1[:, :, 1],\n figname = 'tst')\n\n compare_results(\n resd0[:, :, 1],\n resd1[:, :, 1],\n figname = 'dtst')\n ddist = (numpy.average(numpy.sqrt(numpy.sum((resd0 - resd1)**2, axis = 1))) /\n numpy.average(numpy.sqrt(numpy.sum((resd0)**2, axis = 1))))\n print (ddist)\n\n return res0, res1, resd0, resd1\n\ndef test_divfree(\n info = pyJHTDB.dbinfo.channel,\n m = 1,\n q = 4,\n npoints = 256,\n dbinterp = 44):\n\n start = numpy.array([0, 0, 0], dtype = numpy.int)\n width = numpy.array([91, 67, 31], dtype = numpy.int)\n\n i = pyJHTDB.interpolator.spline_interpolator(\n info = info,\n n = (q - 2)/2,\n m = m)\n i.generate_clib()\n\n xg = [info['xnodes'][i.n+1], info['xnodes'][width[0] - i.n - 1]]\n if info['yperiodic']:\n yg = [info['ynodes'][i.n+1], info['ynodes'][width[1] - i.n - 1]]\n else:\n yg = [info['ynodes'][0], info['ynodes'][width[1] - i.n - 1]]\n zg = [info['znodes'][i.n+1], info['znodes'][width[2] - i.n - 1]]\n x = numpy.random.random(size = (npoints, 3)).astype(numpy.float32)\n x[:, 0] = xg[0] + x[:, 0]*(xg[1] - xg[0])\n x[:, 1] = yg[0] + x[:, 1]*(yg[1] - yg[0])\n x[:, 2] = zg[0] + x[:, 2]*(zg[1] - zg[0])\n\n lJHTDB = pyJHTDB.libJHTDB()\n lJHTDB.initialize()\n # get raw data to interpolate\n test_field = lJHTDB.getRawData(\n 0,\n start = start,\n size = width,\n data_set = info['name'],\n getFunction = 'Velocity')\n # get Lag4 gradient\n resd0 = lJHTDB.getData(\n 0,\n x,\n sinterp = dbinterp,\n tinterp = 0,\n data_set = info['name'],\n getFunction = 'getVelocityGradient')\n # get locally interpolated gradient\n resdx1 = i.cinterpolate(\n x,\n test_field,\n diff = [1, 0, 0])\n resdy1 = i.cinterpolate(\n x,\n test_field,\n diff = [0, 1, 0])\n resdz1 = i.cinterpolate(\n x,\n test_field,\n diff = [0, 0, 1])\n resd1 = resd0.copy()\n resd1[..., 0] = resdx1[..., 0]\n resd1[..., 1] = resdy1[..., 0]\n resd1[..., 2] = resdz1[..., 0]\n resd1[..., 3] = resdx1[..., 1]\n resd1[..., 4] = resdy1[..., 1]\n resd1[..., 5] = resdz1[..., 1]\n resd1[..., 6] = resdx1[..., 2]\n resd1[..., 7] = resdy1[..., 2]\n resd1[..., 8] = resdz1[..., 2]\n del resdx1, resdy1, resdz1\n lJHTDB.finalize()\n\n dmagnitude = numpy.average(numpy.sqrt(numpy.sum((resd0)**2, axis = 1)))\n ddist = numpy.average(numpy.sqrt(numpy.sum((resd0 - resd1)**2, axis = 1))) / dmagnitude\n print ('average relative distance between dbinterp and M{0}Q{1} is {2}'.format(m, q, ddist))\n\n div0 = resd0[:, 0] + resd0[:, 4] + resd0[:, 8]\n div1 = resd1[:, 0] + resd1[:, 4] + resd1[:, 8]\n print('average divergence for dbinterp is {0}'.format(numpy.average(div0**2) / dmagnitude))\n print('average divergence for M{0}Q{1} is {2}'.format(m, q, numpy.average(div1**2) / dmagnitude))\n return None\n\ndef test_local_vs_db_interp(\n info = pyJHTDB.dbinfo.channel,\n time = 0.0,\n m = 1,\n q = 4,\n npoints = 256,\n dbinterp = [8, 44],\n start = numpy.array([0, 0, 0], dtype = numpy.int),\n width = numpy.array([91, 67, 31], dtype = numpy.int),\n messages_on = False):\n\n i = pyJHTDB.interpolator.spline_interpolator(\n info = info,\n n = (q - 2)/2,\n m = m)\n i.generate_clib()\n\n # build point array\n xg = [info['xnodes'][start[0] + i.n+1], info['xnodes'][start[0] + width[0] - i.n - 1]]\n if info['yperiodic']:\n yg = [info['ynodes'][start[1] + i.n+1], info['ynodes'][start[1] + width[1] - i.n - 1]]\n else:\n yg = [info['ynodes'][start[1] ], info['ynodes'][start[1] + width[1] - i.n - 1]]\n zg = [info['znodes'][start[2] + i.n+1], info['znodes'][start[2] + width[2] - i.n - 1]]\n x = numpy.random.random(size = (npoints, 3)).astype(numpy.float32)\n x[:, 0] = xg[0] + x[:, 0]*(xg[1] - xg[0])\n x[:, 1] = yg[0] + x[:, 1]*(yg[1] - yg[0])\n x[:, 2] = zg[0] + x[:, 2]*(zg[1] - zg[0])\n\n lJHTDB = pyJHTDB.libJHTDB()\n lJHTDB.initialize()\n # get raw data to interpolate\n test_field = lJHTDB.getRawData(\n time,\n start = start,\n size = width,\n data_set = info['name'],\n getFunction = 'Velocity')\n # get DB field\n res0 = lJHTDB.getData(\n time,\n x,\n sinterp = dbinterp[0],\n tinterp = 0,\n data_set = info['name'],\n getFunction = 'getVelocity')\n # get DB gradient\n resd0 = lJHTDB.getData(\n time,\n x,\n sinterp = dbinterp[1],\n tinterp = 0,\n data_set = info['name'],\n getFunction = 'getVelocityGradient')\n # get locally interpolated field\n res1 = i.cinterpolate(\n x,\n test_field,\n diff = [0, 0, 0],\n field_offset = start)\n # get locally interpolated gradient\n resdx1 = i.cinterpolate(\n x,\n test_field,\n diff = [1, 0, 0],\n field_offset = start)\n resdy1 = i.cinterpolate(\n x,\n test_field,\n diff = [0, 1, 0],\n field_offset = start)\n resdz1 = i.cinterpolate(\n x,\n test_field,\n diff = [0, 0, 1],\n field_offset = start)\n resd1 = resd0.copy()\n resd1[..., 0] = resdx1[..., 0]\n resd1[..., 1] = resdy1[..., 0]\n resd1[..., 2] = resdz1[..., 0]\n resd1[..., 3] = resdx1[..., 1]\n resd1[..., 4] = resdy1[..., 1]\n resd1[..., 5] = resdz1[..., 1]\n resd1[..., 6] = resdx1[..., 2]\n resd1[..., 7] = resdy1[..., 2]\n resd1[..., 8] = resdz1[..., 2]\n del resdx1, resdy1, resdz1\n lJHTDB.finalize()\n\n if messages_on:\n comp0 = ['ux', 'uy', 'uz']\n comp1 = ['dxux', 'dyux', 'dzux',\n 'dxuy', 'dyuy', 'dzuy',\n 'dxuz', 'dyuz', 'dzuz']\n\n print ('printing average relative distance between DB and local')\n print ('example point is {0}'.format(x[0]))\n print ('for direct interpolation using (DB) {0} and (local) M{1}Q{2}'.format(dbinterp[0], m, q))\n print ('printing average((DB) - (local)) / average(DB), (DB) at example point, abs((DB) - (local)) at example point ')\n for i in range(3):\n magnitude = numpy.average(numpy.abs(res0[:, i]))\n distance = numpy.average(numpy.abs(res0[:, i] - res1[:, i])) / magnitude\n print (comp0[i] + ' ' +\n '{0}, {1:+}, {2}'.format(distance, res0[0, i], numpy.abs(res0[0, i] - res1[0, i])))\n print ('for gradient interpolation using (DB) {0} and (local) M{1}Q{2}'.format(dbinterp[1], m, q))\n for i in range(9):\n magnitude = numpy.average(numpy.abs(resd0[:, i]))\n distance = numpy.average(numpy.abs(resd0[:, i] - resd1[:, i])) / magnitude\n print (comp1[i] + ' ' +\n '{0}, {1:+}, {2}'.format(distance, resd0[0, i], numpy.abs(resd0[0, i] - resd1[0, i])))\n return res0, res1, resd0, resd1\n\nclass LocalInterpTest:\n def __init__(\n self,\n info):\n self.info = info\n return None\n def set_up_field(\n self,\n xnodes = [0, 64],\n ynodes = [0, 128],\n znodes = [0, 64],\n buffer_size = 16):\n self.buffer_size = 16\n full_frame = h5py.File(\n '/stuff/data/{0}/{0}_t0000.h5'.format(self.info['name']),\n 'r')\n self.xnodes = xnodes\n self.ynodes = ynodes\n self.znodes = znodes\n self.test_field = full_frame['u00000'][\n self.znodes[0]:self.znodes[1],\n self.ynodes[0]:self.ynodes[1],\n self.xnodes[0]:self.xnodes[1]].copy()\n self.y0buffer = min(self.buffer_size, self.ynodes[0])\n full_frame.close()\n return None\n def set_up_points(\n self,\n npoints = 2**5,\n yval = None):\n self.npoints = npoints\n if type(yval) == type(None):\n if self.info['yperiodic']:\n self.yindices = range(self.ynodes[0] + self.buffer_size,\n self.ynodes[1] - self.buffer_size)\n else:\n self.yindices = range(self.ynodes[0] + self.y0buffer,\n self.ynodes[1] - self.buffer_size)\n self.p = numpy.random.random(\n size = (npoints, self.yindices.shape[0], 3)).astype(numpy.float32)\n if self.info['yperiodic']:\n self.p[..., 1] *= self.info['dy']\n else:\n self.p[..., 1] *= self.info['dy'][None, self.yindices]\n self.p[..., 1] += self.info['ynodes'][None, self.yindices]\n else:\n self.p = numpy.random.random(\n size = (npoints, 3)).astype(numpy.float32)\n if yval == 'random':\n if self.info['yperiodic']:\n self.p[..., 1] = (\n self.info['ynodes'][self.ynodes[0] + self.buffer_size] +\n self.p[..., 1]*(\n self.info['ynodes'][self.ynodes[1] - self.buffer_size] -\n self.info['ynodes'][self.ynodes[0] + self.buffer_size]))\n else:\n self.p[..., 1] = (\n self.info['ynodes'][self.ynodes[0] + self.y0buffer] +\n self.p[..., 1]*(\n self.info['ynodes'][self.ynodes[1] - self.buffer_size] -\n self.info['ynodes'][self.ynodes[0] + self.y0buffer]))\n else:\n self.p[..., 1] = yval\n self.p[..., 0] = (\n self.info['xnodes'][self.xnodes[0] + self.buffer_size] +\n self.p[..., 0]*(\n self.info['xnodes'][self.xnodes[1] - self.buffer_size] -\n self.info['xnodes'][self.xnodes[0] + self.buffer_size]))\n self.p[..., 2] = (\n self.info['znodes'][self.znodes[0] + self.buffer_size] +\n self.p[..., 2]*(\n self.info['znodes'][self.znodes[1] - self.buffer_size] -\n self.info['znodes'][self.znodes[0] + self.buffer_size]))\n return None\n def set_up_interpolators(\n self,\n pars = [[12, 3, 3, 2],\n [12, 4, 4, 2],\n [12, 5, 5, 2],\n [12, 6, 6, 2],\n [12, 7, 7, 2],\n [12, 8, 8, 2],\n [12, 9, 9, 2],\n [12,10,10, 2]]):\n self.interp = []\n self.keys = []\n for par in pars:\n self.keys.append(\n 'nx{0:0>2}_ny{1:0>2}_nz{2:0>2}_m{3}'.format(\n par[0], par[1], par[2], par[3]))\n self.interp.append(\n pyJHTDB.interpolator.spline_interpolator(\n info = self.info,\n nx = par[0],\n ny = par[1],\n nz = par[2],\n m = par[3],\n initialize = False,\n cformula_unroll = False))\n return None\n def interpolate(\n self):\n self.uval = []\n self.gradu = []\n for k in range(len(self.keys)):\n self.uval.append(self.interp[k].cinterpolate(\n self.p,\n self.test_field,\n diff = [0, 0, 0],\n field_offset = [self.xnodes[0],\n self.ynodes[0],\n self.znodes[0]]))\n dxvel = self.interp[k].cinterpolate(\n self.p,\n self.test_field,\n diff = [1, 0, 0],\n field_offset = [self.xnodes[0],\n self.ynodes[0],\n self.znodes[0]])\n dyvel = self.interp[k].cinterpolate(\n self.p,\n self.test_field,\n diff = [0, 1, 0],\n field_offset = [self.xnodes[0],\n self.ynodes[0],\n self.znodes[0]])\n dzvel = self.interp[k].cinterpolate(\n self.p,\n self.test_field,\n diff = [0, 0, 1],\n field_offset = [self.xnodes[0],\n self.ynodes[0],\n self.znodes[0]])\n self.gradu.append(numpy.zeros(\n dxvel.shape[:-1] + (9,),\n dtype = dxvel.dtype))\n self.gradu[k][..., 0] = dxvel[..., 0]\n self.gradu[k][..., 1] = dyvel[..., 0]\n self.gradu[k][..., 2] = dzvel[..., 0]\n self.gradu[k][..., 3] = dxvel[..., 1]\n self.gradu[k][..., 4] = dyvel[..., 1]\n self.gradu[k][..., 5] = dzvel[..., 1]\n self.gradu[k][..., 6] = dxvel[..., 2]\n self.gradu[k][..., 7] = dyvel[..., 2]\n self.gradu[k][..., 8] = dzvel[..., 2]\n self.uval = numpy.array(self.uval)\n self.gradu = numpy.array(self.gradu)\n return None\n def get_divergence(\n self):\n self.divu = []\n self.divu_upper = []\n self.divu_lower = []\n for k in range(len(self.keys)):\n factor = (numpy.sqrt(3) /\n numpy.average(numpy.sqrt(\n self.gradu[k][..., 0]**2 +\n self.gradu[k][..., 4]**2 +\n self.gradu[k][..., 8]**2), axis = 0))\n self.divu.append(factor*numpy.average(numpy.abs(\n self.gradu[k][..., 0] +\n self.gradu[k][..., 4] +\n self.gradu[k][..., 8]), axis = 0))\n self.divu_upper.append(factor*numpy.percentile(numpy.abs(\n self.gradu[k][..., 0] +\n self.gradu[k][..., 4] +\n self.gradu[k][..., 8]), 90, axis = 0))\n self.divu_lower.append(factor*numpy.percentile(numpy.abs(\n self.gradu[k][..., 0] +\n self.gradu[k][..., 4] +\n self.gradu[k][..., 8]), 10, axis = 0))\n self.divu = numpy.array(self.divu)\n self.divu_upper = numpy.array(self.divu_upper)\n self.divu_lower = numpy.array(self.divu_lower)\n return None\n\nif __name__ == '__main__':\n test_plain()\n\n" ]
[ [ "matplotlib.pyplot.clabel", "numpy.random.random", "numpy.linspace", "numpy.fft.rfft", "numpy.sqrt", "numpy.arange", "numpy.abs", "numpy.random.random_sample", "numpy.random.randint", "numpy.random.rand", "numpy.average", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.empty", "matplotlib.pyplot.figure" ] ]
awesome-archive/rlpyt
[ "47176abebc3a19791e34564e4fa6b1b267a68a61" ]
[ "rlpyt/models/pg/mujoco_lstm_model.py" ]
[ "\nimport numpy as np\nimport torch\n\nfrom rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims\nfrom rlpyt.models.mlp import MlpModel\nfrom rlpyt.utils.collections import namedarraytuple\n\nRnnState = namedarraytuple(\"RnnState\", [\"h\", \"c\"])\n\n\nclass MujocoLstmModel(torch.nn.Module):\n\n def __init__(\n self,\n observation_shape,\n action_size,\n hidden_sizes=None, # None for default (see below).\n lstm_size=256,\n nonlinearity=torch.nn.ReLU,\n ):\n super().__init__()\n self._obs_n_dim = len(observation_shape)\n self._action_size = action_size\n hidden_sizes = hidden_sizes or [256, 256]\n mlp_input_size = int(np.prod(observation_shape))\n self.mlp = MlpModel(\n input_size=mlp_input_size,\n hidden_sizes=hidden_sizes,\n output_size=None,\n nonlinearity=nonlinearity,\n )\n mlp_output_size = hidden_sizes[-1] if hidden_sizes else mlp_input_size\n self.lstm = torch.nn.LSTM(mlp_output_size + action_size + 1, lstm_size)\n self.head = torch.nn.Linear(lstm_size, action_size * 2 + 1)\n\n def forward(self, observation, prev_action, prev_reward, init_rnn_state):\n \"\"\"Feedforward layers process as [T*B,H]. Return same leading dims as\n input, can be [T,B], [B], or [].\"\"\"\n\n # Infer (presence of) leading dimensions: [T,B], [B], or [].\n lead_dim, T, B, _ = infer_leading_dims(observation, self._obs_n_dim)\n\n mlp_out = self.mlp(observation.view(T * B, -1))\n lstm_input = torch.cat([\n mlp_out.view(T, B, -1),\n prev_action.view(T, B, -1),\n prev_reward.view(T, B, 1),\n ], dim=2)\n init_rnn_state = None if init_rnn_state is None else tuple(init_rnn_state)\n lstm_out, (hn, cn) = self.lstm(lstm_input, init_rnn_state)\n outputs = self.head(lstm_out)\n mu = outputs[:, :self._action_size]\n log_std = outputs[:, self._action_size:-1]\n v = outputs[:, -1].squeeze(-1)\n\n # Restore leading dimensions: [T,B], [B], or [], as input.\n mu, log_std, v = restore_leading_dims((mu, log_std, v), lead_dim, T, B)\n # Model should always leave B-dimension in rnn state: [N,B,H]\n next_rnn_state = RnnState(h=hn, c=cn)\n\n return mu, log_std, v, next_rnn_state\n" ]
[ [ "torch.nn.Linear", "numpy.prod", "torch.nn.LSTM" ] ]
Arshin/flaskML
[ "eedaa7d9e33f33b962da84772e9b4220829da4ad" ]
[ "app.py" ]
[ "from flask import Flask, request, url_for, redirect, render_template, jsonify\r\nimport pickle\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\napp = Flask(__name__)\r\n\r\nfilename = 'finalized_model.sav'\r\nmodel = pickle.load(open(filename, 'rb'))\r\ncols = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age']\r\n\r\[email protected]('/')\r\ndef home():\r\n return render_template('home.html')\r\n\r\[email protected]('/predict', methods=['POST'])\r\ndef predict():\r\n int_features = [np.float(x) for x in request.form.values()]\r\n print('int_features are ',int_features)\r\n final = np.array(int_features)\r\n # final_df = pd.DataFrame([final], columns=cols)\r\n prediction = model.predict([final])\r\n return render_template('home.html', pred='Expected prediction will be {}'.format(prediction))\r\n\r\[email protected]('/predict_api',methods=['POST'])\r\ndef predict_api():\r\n data = request.get_json(force=True)\r\n # data_unseen = pd.DataFrame([data])\r\n prediction = model.predict([data])\r\n return jsonify(prediction)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)" ]
[ [ "numpy.array", "numpy.float" ] ]
LXP-Never/Speech-signal-processing
[ "50646ee4c4174d68c03613e7385519a330f71940" ]
[ "语音识别/freq_transform.py" ]
[ "import numpy as np\nfrom scipy.io import wavfile\nimport matplotlib.pyplot as plt\n\nsampling_freq, audio = wavfile.read('input_freq.wav') # 读取文件\n\naudio = audio / np.max(audio) # 归一化,标准化\n\nlen_audio = len(audio) # 3251\n\n# 应用傅里叶变换\ntransformed_signal = np.fft.fft(audio)\nprint(transformed_signal)\n# [-0.04022912+0.j -0.04068997-0.00052721j -0.03933007-0.00448355j\n# ... -0.03947908+0.00298096j -0.03933007+0.00448355j -0.04068997+0.00052721j]\nhalf_length = int(np.ceil((len_audio + 1) / 2.0)) # np.ceil向上取整(向大的方向取整)\ntransformed_signal = abs(transformed_signal[0:half_length])\nprint(transformed_signal)\n# [0.04022912 0.04069339 0.0395848 ... 0.08001755 0.09203427 0.12889393]\ntransformed_signal /= float(len_audio)\ntransformed_signal **= 2\n\n# 提取转换信号的长度\nlen_ts = len(transformed_signal) # 1626\n\n# 将部分信号乘以2\nif len_audio % 2: # 奇数\n transformed_signal[1:len_ts] *= 2\nelse: # 偶数\n transformed_signal[1:len_ts-1] *= 2\n\n# 获取功率信号\npower = 10 * np.log10(transformed_signal)\n\n# 建立时间轴\nx_values = np.arange(0, half_length, 1) * (sampling_freq / len_audio) / 1000.0\n\n# 绘制语音信号的\nplt.figure()\nplt.plot(x_values, power, color='blue')\nplt.xlabel('Freq (in kHz)')\nplt.ylabel('Power (in dB)')\nplt.show()\n\n" ]
[ [ "numpy.fft.fft", "numpy.arange", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.ylabel", "numpy.ceil", "numpy.log10", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "scipy.io.wavfile.read", "matplotlib.pyplot.figure" ] ]
phemmer/tensorflow
[ "10dfd3256852bc85cc70f7672d945f307fcec145" ]
[ "tensorflow/python/keras/engine/base_layer.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Contains the base Layer class, from which all layers inherit.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport itertools\nimport threading\nimport weakref\n\nimport numpy as np\nimport six\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nfrom google.protobuf import json_format\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.python.autograph.core import ag_ctx\nfrom tensorflow.python.autograph.impl import api as autograph\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.framework import auto_control_deps\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.engine import input_spec\nfrom tensorflow.python.keras.engine import node as node_module\nfrom tensorflow.python.keras.mixed_precision.experimental import autocast_variable\nfrom tensorflow.python.keras.mixed_precision.experimental import policy\nfrom tensorflow.python.keras.saving.saved_model import layer_serialization\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import layer_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.keras.utils import version_utils\n# A module that only depends on `keras.layers` import these from here.\nfrom tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import\nfrom tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import layer_utils as trackable_layer_utils\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import keras_export\nfrom tensorflow.tools.docs import doc_controls\n\n# Prefix that is added to the TF op layer names.\n_TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_'\n\n_keras_layers_gauge = monitoring.BoolGauge('/tensorflow/api/keras/layers',\n 'keras layers usage', 'method')\n_keras_model_gauge = monitoring.BoolGauge(\n '/tensorflow/api/keras/premade_models', 'premade keras model usage', 'type')\n\n\n@keras_export('keras.layers.Layer')\nclass Layer(module.Module, version_utils.LayerVersionSelector):\n \"\"\"This is the class from which all layers inherit.\n\n A layer is a callable object that takes as input one or more tensors and\n that outputs one or more tensors. It involves *computation*, defined\n in the `call()` method, and a *state* (weight variables), defined\n either in the constructor `__init__()` or in the `build()` method.\n\n Users will just instantiate a layer and then treat it as a callable.\n\n We recommend that descendants of `Layer` implement the following methods:\n\n * `__init__()`: Defines custom layer attributes, and creates layer state\n variables that do not depend on input shapes, using `add_weight()`.\n * `build(self, input_shape)`: This method can be used to create weights that\n depend on the shape(s) of the input(s), using `add_weight()`. `__call__()`\n will automatically build the layer (if it has not been built yet) by\n calling `build()`.\n * `call(self, *args, **kwargs)`: Called in `__call__` after making sure\n `build()` has been called. `call()` performs the logic of applying the\n layer to the input tensors (which should be passed in as argument).\n Two reserved keyword arguments you can optionally use in `call()` are:\n - `training` (boolean, whether the call is in\n inference mode or training mode)\n - `mask` (boolean tensor encoding masked timesteps in the input, used\n in RNN layers)\n * `get_config(self)`: Returns a dictionary containing the configuration used\n to initialize this layer. If the keys differ from the arguments\n in `__init__`, then override `from_config(self)` as well.\n This method is used when saving\n the layer or a model that contains this layer.\n\n Examples:\n\n Here's a basic example: a layer with two variables, `w` and `b`,\n that returns `y = w . x + b`.\n It shows how to implement `build()` and `call()`.\n Variables set as attributes of a layer are tracked as weights\n of the layers (in `layer.weights`).\n\n ```python\n class SimpleDense(Layer):\n\n def __init__(self, units=32):\n super(SimpleDense, self).__init__()\n self.units = units\n\n def build(self, input_shape): # Create the state of the layer (weights)\n w_init = tf.random_normal_initializer()\n self.w = tf.Variable(\n initial_value=w_init(shape=(input_shape[-1], self.units),\n dtype='float32'),\n trainable=True)\n b_init = tf.zeros_initializer()\n self.b = tf.Variable(\n initial_value=b_init(shape=(self.units,), dtype='float32'),\n trainable=True)\n\n def call(self, inputs): # Defines the computation from inputs to outputs\n return tf.matmul(inputs, self.w) + self.b\n\n # Instantiates the layer.\n linear_layer = SimpleDense(4)\n\n # This will also call `build(input_shape)` and create the weights.\n y = linear_layer(tf.ones((2, 2)))\n assert len(linear_layer.weights) == 2\n\n # These weights are trainable, so they're listed in `trainable_weights`:\n assert len(linear_layer.trainable_weights) == 2\n ```\n\n Note that the method `add_weight()` offers a shortcut to create weights:\n\n ```python\n class SimpleDense(Layer):\n\n def __init__(self, units=32):\n super(SimpleDense, self).__init__()\n self.units = units\n\n def build(self, input_shape):\n self.w = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)\n\n def call(self, inputs):\n return tf.matmul(inputs, self.w) + self.b\n ```\n\n Besides trainable weights, updated via backpropagation during training,\n layers can also have non-trainable weights. These weights are meant to\n be updated manually during `call()`. Here's a example layer that computes\n the running sum of its inputs:\n\n ```python\n class ComputeSum(Layer):\n\n def __init__(self, input_dim):\n super(ComputeSum, self).__init__()\n # Create a non-trainable weight.\n self.total = tf.Variable(initial_value=tf.zeros((input_dim,)),\n trainable=False)\n\n def call(self, inputs):\n self.total.assign_add(tf.reduce_sum(inputs, axis=0))\n return self.total\n\n my_sum = ComputeSum(2)\n x = tf.ones((2, 2))\n\n y = my_sum(x)\n print(y.numpy()) # [2. 2.]\n\n y = my_sum(x)\n print(y.numpy()) # [4. 4.]\n\n assert my_sum.weights == [my_sum.total]\n assert my_sum.non_trainable_weights == [my_sum.total]\n assert my_sum.trainable_weights == []\n ```\n\n For more information about creating layers, see the guide\n [Writing custom layers and models with Keras](\n https://www.tensorflow.org/guide/keras/custom_layers_and_models)\n\n Arguments:\n trainable: Boolean, whether the layer's variables should be trainable.\n name: String name of the layer.\n dtype: The dtype of the layer's computations and weights (default of\n `None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type\n of the first input in TensorFlow 1).\n dynamic: Set this to `True` if your layer should only be run eagerly, and\n should not be used to generate a static computation graph.\n This would be the case for a Tree-RNN or a recursive network,\n for example, or generally for any layer that manipulates tensors\n using Python control flow. If `False`, we assume that the layer can\n safely be used to generate a static computation graph.\n\n Attributes:\n name: The name of the layer (string).\n dtype: The dtype of the layer's computations and weights. If mixed\n precision is used with a `tf.keras.mixed_precision.experimental.Policy`,\n this is instead just the dtype of the layer's weights, as the computations\n are done in a different dtype.\n updates: List of update ops of this layer.\n losses: List of losses added by this layer.\n trainable_weights: List of variables to be included in backprop.\n non_trainable_weights: List of variables that should not be\n included in backprop.\n weights: The concatenation of the lists trainable_weights and\n non_trainable_weights (in this order).\n trainable: Whether the layer should be trained (boolean).\n input_spec: Optional (list of) `InputSpec` object(s) specifying the\n constraints on inputs that can be accepted by the layer.\n\n Each layer has a dtype, which is typically the dtype of the layer's\n computations and variables. A layer's dtype can be queried via the\n `Layer.dtype` property. The dtype is specified with the `dtype` constructor\n argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()`\n if no dtype is passed. `floatx()` itself defaults to \"float32\". Additionally,\n layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed\n precision is used, layers may have different computation and variable dtypes.\n See `tf.keras.mixed_precision.experimental.Policy` for details on layer\n dtypes.\n \"\"\"\n\n # See tf.Module for the usage of this property.\n # The key for _obj_reference_counts_dict is a Trackable, which could be a\n # variable or layer etc. tf.Module._flatten will fail to flatten the key\n # since it is trying to convert Trackable to a string. This attribute can be\n # ignored even after the fix of nest lib, since the trackable object should\n # already been available as individual attributes. _obj_reference_counts_dict\n # just contains a copy of them.\n _TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(\n ('_obj_reference_counts_dict',),\n module.Module._TF_MODULE_IGNORED_PROPERTIES\n ))\n\n @trackable.no_automatic_dependency_tracking\n def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,\n **kwargs):\n # These properties should be set by the user via keyword arguments.\n # note that 'dtype', 'input_shape' and 'batch_input_shape'\n # are only applicable to input layers: do not pass these keywords\n # to non-input layers.\n allowed_kwargs = {\n 'input_shape',\n 'batch_input_shape',\n 'batch_size',\n 'weights',\n 'activity_regularizer',\n 'autocast'\n }\n # Validate optional keyword arguments.\n generic_utils.validate_kwargs(kwargs, allowed_kwargs)\n\n # Mutable properties\n # Indicates whether the layer's weights are updated during training\n # and whether the layer's updates are run during training.\n self._trainable = trainable\n # A stateful layer is a layer whose updates are run during inference too,\n # for instance stateful RNNs.\n self._stateful = False\n # Indicates whether `build` needs to be called upon layer call, to create\n # the layer's weights.\n self.built = False\n # Record the build input shape for loading purposes.\n # TODO(kathywu): Move this to Layer._set_save_spec once cl/290121460 is\n # submitted.\n self._build_input_shape = None\n # Provides information about which inputs are compatible with the layer.\n self._input_spec = None\n self.supports_masking = False\n self._supports_ragged_inputs = False\n\n self._init_set_name(name)\n self._activity_regularizer = kwargs.pop('activity_regularizer', None)\n self._maybe_create_attribute('_trainable_weights', [])\n self._maybe_create_attribute('_non_trainable_weights', [])\n self._updates = []\n # Object to store all thread local layer properties.\n self._thread_local = threading.local()\n # A list of zero-argument lambdas which return Tensors, used for variable\n # regularizers.\n self._callable_losses = []\n # A list of symbolic Tensors containing activity regularizers and losses\n # manually added through `add_loss` in graph-building mode.\n self._losses = []\n # A list of metric instances corresponding to the symbolic metric tensors\n # added using the `add_metric` API.\n self._metrics = []\n # Ensures the same metric is not added multiple times in `MirroredStrategy`.\n self._metrics_lock = threading.Lock()\n\n # Both graph and subclassed networks have a dtype policy. For graph\n # networks, the policy's compute and variable dtypes are ignored, but other\n # fields, like the loss scale, are used by Models. For subclassed networks,\n # the compute and variable dtypes are used as like any ordinary layer.\n self._set_dtype_policy(dtype)\n # Boolean indicating whether the layer automatically casts its inputs to the\n # layer's compute_dtype.\n self._autocast = kwargs.get('autocast',\n base_layer_utils.v2_dtype_behavior_enabled())\n\n # Dependencies tracked via attribute assignment.\n # All layers in order of horizontal graph traversal.\n # Entries are unique. For models includes input and output layers.\n self._maybe_create_attribute('_layers', [])\n\n # These lists will be filled via successive calls\n # to self._add_inbound_node().\n # Used in symbolic mode only, only in conjunction with graph-networks\n self._inbound_nodes = []\n self._outbound_nodes = []\n\n self._init_call_fn_args()\n\n # Whether the `call` method can be used to build a TF graph without issues.\n # This attribute has no effect if the model is created using the Functional\n # API. Instead, `model.dynamic` is determined based on the internal layers.\n self._dynamic = dynamic\n\n # Manage input shape information if passed.\n if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:\n # In this case we will later create an input layer\n # to insert before the current layer\n if 'batch_input_shape' in kwargs:\n batch_input_shape = tuple(kwargs['batch_input_shape'])\n elif 'input_shape' in kwargs:\n if 'batch_size' in kwargs:\n batch_size = kwargs['batch_size']\n else:\n batch_size = None\n batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])\n self._batch_input_shape = batch_input_shape\n\n # Manage initial weight values if passed.\n self._initial_weights = kwargs.get('weights', None)\n\n # Whether the layer will track any layers that is set as attribute on itself\n # as sub-layers, the weights from the sub-layers will be included in the\n # parent layer's variables() as well.\n # Default to True, which means auto tracking is turned on. Certain subclass\n # might want to turn it off, like Sequential model.\n self._auto_track_sub_layers = True\n\n @trackable.no_automatic_dependency_tracking\n def build(self, input_shape):\n \"\"\"Creates the variables of the layer (optional, for subclass implementers).\n\n This is a method that implementers of subclasses of `Layer` or `Model`\n can override if they need a state-creation step in-between\n layer instantiation and layer call.\n\n This is typically used to create the weights of `Layer` subclasses.\n\n Arguments:\n input_shape: Instance of `TensorShape`, or list of instances of\n `TensorShape` if the layer expects a list of inputs\n (one instance per input).\n \"\"\"\n # Only record the build input shapes of overridden the build methods.\n if not hasattr(self.build, '_is_default'):\n self._build_input_shape = input_shape\n self.built = True\n\n @doc_controls.for_subclass_implementers\n def call(self, inputs, **kwargs): # pylint: disable=unused-argument\n \"\"\"This is where the layer's logic lives.\n\n Arguments:\n inputs: Input tensor, or list/tuple of input tensors.\n **kwargs: Additional keyword arguments.\n\n Returns:\n A tensor or list/tuple of tensors.\n \"\"\"\n return inputs\n\n @doc_controls.for_subclass_implementers\n def _add_trackable(self, trackable_object, trainable):\n \"\"\"Adds a Trackable object to this layer's state.\n\n Arguments:\n trackable_object: The tf.tracking.Trackable object to add.\n trainable: Boolean, whether the variable should be part of the layer's\n \"trainable_variables\" (e.g. variables, biases) or\n \"non_trainable_variables\" (e.g. BatchNorm mean and variance).\n\n Returns:\n The TrackableWeightHandler used to track this object.\n \"\"\"\n handler = base_layer_utils.TrackableWeightHandler(trackable_object)\n if trainable:\n self._trainable_weights.append(handler)\n else:\n self._non_trainable_weights.append(handler)\n return handler\n\n @doc_controls.for_subclass_implementers\n def add_weight(self,\n name=None,\n shape=None,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=None,\n constraint=None,\n partitioner=None,\n use_resource=None,\n synchronization=tf_variables.VariableSynchronization.AUTO,\n aggregation=tf_variables.VariableAggregation.NONE,\n **kwargs):\n \"\"\"Adds a new variable to the layer.\n\n Arguments:\n name: Variable name.\n shape: Variable shape. Defaults to scalar if unspecified.\n dtype: The type of the variable. Defaults to `self.dtype` or `float32`.\n initializer: Initializer instance (callable).\n regularizer: Regularizer instance (callable).\n trainable: Boolean, whether the variable should be part of the layer's\n \"trainable_variables\" (e.g. variables, biases)\n or \"non_trainable_variables\" (e.g. BatchNorm mean and variance).\n Note that `trainable` cannot be `True` if `synchronization`\n is set to `ON_READ`.\n constraint: Constraint instance (callable).\n partitioner: Partitioner to be passed to the `Trackable` API.\n use_resource: Whether to use `ResourceVariable`.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize. If `synchronization` is set to `ON_READ`,\n `trainable` must not be set to `True`.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n **kwargs: Additional keyword arguments. Accepted values are `getter`,\n `collections`, `experimental_autocast` and `caching_device`.\n\n Returns:\n The created variable. Usually either a `Variable` or `ResourceVariable`\n instance. If `partitioner` is not `None`, a `PartitionedVariable`\n instance is returned.\n\n Raises:\n RuntimeError: If called with partitioned variable regularization and\n eager execution is enabled.\n ValueError: When giving unsupported dtype and no initializer or when\n trainable has been set to True with synchronization set as `ON_READ`.\n \"\"\"\n if shape is None:\n shape = ()\n # Validate optional keyword arguments.\n for kwarg in kwargs:\n if kwarg not in ['getter', 'collections', 'experimental_autocast',\n 'caching_device']:\n raise TypeError('Unknown keyword argument:', kwarg)\n getter = kwargs.pop('getter', base_layer_utils.make_variable)\n collections_arg = kwargs.pop('collections', None)\n # 'experimental_autocast' can be set to False by the caller to indicate an\n # AutoCastVariable should never be created.\n autocast = kwargs.pop('experimental_autocast', True)\n # See the docstring for tf.Variable about the details for caching_device.\n caching_device = kwargs.pop('caching_device', None)\n\n if dtype is None:\n dtype = self.dtype or backend.floatx()\n dtype = dtypes.as_dtype(dtype)\n if self._dtype_policy.variable_dtype is None:\n # The policy is \"infer\", so we infer the policy from the variable dtype.\n self._dtype_policy = policy.Policy(dtype.base_dtype.name)\n initializer = initializers.get(initializer)\n regularizer = regularizers.get(regularizer)\n constraint = constraints.get(constraint)\n\n if synchronization == tf_variables.VariableSynchronization.ON_READ:\n if trainable:\n raise ValueError(\n 'Synchronization value can be set to '\n 'VariableSynchronization.ON_READ only for non-trainable variables. '\n 'You have specified trainable=True and '\n 'synchronization=VariableSynchronization.ON_READ.')\n else:\n # Set trainable to be false when variable is to be synced on read.\n trainable = False\n elif trainable is None:\n trainable = True\n\n # Initialize variable when no initializer provided\n if initializer is None:\n # If dtype is DT_FLOAT, provide a uniform unit scaling initializer\n if dtype.is_floating:\n initializer = initializers.glorot_uniform()\n # If dtype is DT_INT/DT_UINT, provide a default value `zero`\n # If dtype is DT_BOOL, provide a default value `FALSE`\n elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:\n initializer = initializers.zeros()\n # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?\n else:\n raise ValueError('An initializer for variable %s of type %s is required'\n ' for layer %s' % (name, dtype.base_dtype, self.name))\n\n if (autocast and self._dtype_policy.should_cast_variables and\n dtype.is_floating):\n # Wrap 'getter' with a version that returns an AutoCastVariable.\n old_getter = getter\n def getter(*args, **kwargs): # pylint: disable=function-redefined\n variable = old_getter(*args, **kwargs)\n return autocast_variable.create_autocast_variable(variable)\n # Also the caching_device does not work with the mixed precision API,\n # disable it if it is specified.\n # TODO(b/142020079): Reenable it once the bug is fixed.\n if caching_device is not None:\n tf_logging.warn('`caching_device` does not work with mixed precision '\n 'API. Ignoring user specified `caching_device`.')\n caching_device = None\n\n variable = self._add_variable_with_custom_getter(\n name=name,\n shape=shape,\n # TODO(allenl): a `make_variable` equivalent should be added as a\n # `Trackable` method.\n getter=getter,\n # Manage errors in Layer rather than Trackable.\n overwrite=True,\n initializer=initializer,\n dtype=dtype,\n constraint=constraint,\n trainable=trainable,\n partitioner=partitioner,\n use_resource=use_resource,\n collections=collections_arg,\n synchronization=synchronization,\n aggregation=aggregation,\n caching_device=caching_device)\n if regularizer is not None:\n # TODO(fchollet): in the future, this should be handled at the\n # level of variable creation, and weight regularization losses\n # should be variable attributes.\n name_in_scope = variable.name[:variable.name.find(':')]\n self._handle_weight_regularization(name_in_scope,\n variable,\n regularizer)\n if isinstance(variable, tf_variables.PartitionedVariable):\n for v in variable:\n backend.track_variable(v)\n if trainable:\n self._trainable_weights.append(v)\n else:\n self._non_trainable_weights.append(v)\n else:\n backend.track_variable(variable)\n if trainable:\n self._trainable_weights.append(variable)\n else:\n self._non_trainable_weights.append(variable)\n return variable\n\n @base_layer_utils.default\n def get_config(self):\n \"\"\"Returns the config of the layer.\n\n A layer config is a Python dictionary (serializable)\n containing the configuration of a layer.\n The same layer can be reinstantiated later\n (without its trained weights) from this configuration.\n\n The config of a layer does not include connectivity\n information, nor the layer class name. These are handled\n by `Network` (one layer of abstraction above).\n\n Returns:\n Python dictionary.\n \"\"\"\n all_args = tf_inspect.getfullargspec(self.__init__).args\n config = {'name': self.name, 'trainable': self.trainable}\n if hasattr(self, '_batch_input_shape'):\n config['batch_input_shape'] = self._batch_input_shape\n config['dtype'] = policy.serialize(self._dtype_policy)\n if hasattr(self, 'dynamic'):\n # Only include `dynamic` in the `config` if it is `True`\n if self.dynamic:\n config['dynamic'] = self.dynamic\n elif 'dynamic' in all_args:\n all_args.remove('dynamic')\n expected_args = config.keys()\n # Finds all arguments in the `__init__` that are not in the config:\n extra_args = [arg for arg in all_args if arg not in expected_args]\n # Check that either the only argument in the `__init__` is `self`,\n # or that `get_config` has been overridden:\n if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):\n raise NotImplementedError('Layer %s has arguments in `__init__` and '\n 'therefore must override `get_config`.' %\n self.__class__.__name__)\n return config\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates a layer from its config.\n\n This method is the reverse of `get_config`,\n capable of instantiating the same layer from the config\n dictionary. It does not handle layer connectivity\n (handled by Network), nor weights (handled by `set_weights`).\n\n Arguments:\n config: A Python dictionary, typically the\n output of get_config.\n\n Returns:\n A layer instance.\n \"\"\"\n return cls(**config)\n\n def compute_output_shape(self, input_shape):\n \"\"\"Computes the output shape of the layer.\n\n If the layer has not been built, this method will call `build` on the\n layer. This assumes that the layer will later be used with inputs that\n match the input shape provided here.\n\n Arguments:\n input_shape: Shape tuple (tuple of integers)\n or list of shape tuples (one per output tensor of the layer).\n Shape tuples can include None for free dimensions,\n instead of an integer.\n\n Returns:\n An input shape tuple.\n \"\"\"\n if context.executing_eagerly():\n # In this case we build the model first in order to do shape inference.\n # This is acceptable because the framework only calls\n # `compute_output_shape` on shape values that the layer would later be\n # built for. It would however cause issues in case a user attempts to\n # use `compute_output_shape` manually with shapes that are incompatible\n # with the shape the Layer will be called on (these users will have to\n # implement `compute_output_shape` themselves).\n self._maybe_build(input_shape)\n with func_graph.FuncGraph('graph').as_default():\n input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n def _make_placeholder_like(shape):\n ph = backend.placeholder(shape=shape, dtype=self.dtype)\n ph._keras_mask = None\n return ph\n inputs = nest.map_structure(_make_placeholder_like, input_shape)\n try:\n outputs = self(inputs, training=False)\n except TypeError as e:\n six.raise_from(\n NotImplementedError(\n 'We could not automatically infer the static shape of the '\n 'layer\\'s output. Please implement the '\n '`compute_output_shape` method on your layer (%s).' %\n self.__class__.__name__), e)\n return nest.map_structure(lambda t: t.shape, outputs)\n raise NotImplementedError\n\n @doc_controls.for_subclass_implementers\n def compute_output_signature(self, input_signature):\n \"\"\"Compute the output tensor signature of the layer based on the inputs.\n\n Unlike a TensorShape object, a TensorSpec object contains both shape\n and dtype information for a tensor. This method allows layers to provide\n output dtype information if it is different from the input dtype.\n For any layer that doesn't implement this function,\n the framework will fall back to use `compute_output_shape`, and will\n assume that the output dtype matches the input dtype.\n\n Args:\n input_signature: Single TensorSpec or nested structure of TensorSpec\n objects, describing a candidate input for the layer.\n\n Returns:\n Single TensorSpec or nested structure of TensorSpec objects, describing\n how the layer would transform the provided input.\n\n Raises:\n TypeError: If input_signature contains a non-TensorSpec object.\n \"\"\"\n def check_type_return_shape(s):\n if not isinstance(s, tensor_spec.TensorSpec):\n raise TypeError(\n 'Only TensorSpec signature types are supported, '\n 'but saw signature signature entry: {}.'.format(s))\n return s.shape\n input_shape = nest.map_structure(check_type_return_shape, input_signature)\n output_shape = self.compute_output_shape(input_shape)\n dtype = self._compute_dtype\n if dtype is None:\n input_dtypes = [s.dtype for s in nest.flatten(input_signature)]\n # Default behavior when self.dtype is None, is to use the first input's\n # dtype.\n dtype = input_dtypes[0]\n return nest.map_structure(\n lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),\n output_shape)\n\n @base_layer_utils.default\n def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument\n \"\"\"Computes an output mask tensor.\n\n Arguments:\n inputs: Tensor or list of tensors.\n mask: Tensor or list of tensors.\n\n Returns:\n None or a tensor (or list of tensors,\n one per output tensor of the layer).\n \"\"\"\n if not self.supports_masking:\n if any(m is not None for m in nest.flatten(mask)):\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n # masking not explicitly supported: return None as mask.\n return None\n # if masking is explicitly supported, by default\n # carry over the input mask\n return mask\n\n def __call__(self, *args, **kwargs):\n \"\"\"Wraps `call`, applying pre- and post-processing steps.\n\n Arguments:\n *args: Positional arguments to be passed to `self.call`.\n **kwargs: Keyword arguments to be passed to `self.call`.\n\n Returns:\n Output tensor(s).\n\n Note:\n - The following optional keyword arguments are reserved for specific uses:\n * `training`: Boolean scalar tensor of Python boolean indicating\n whether the `call` is meant for training or inference.\n * `mask`: Boolean input mask.\n - If the layer's `call` method takes a `mask` argument (as some Keras\n layers do), its default value will be set to the mask generated\n for `inputs` by the previous layer (if `input` did come from\n a layer that generated a corresponding mask, i.e. if it came from\n a Keras layer with masking support.\n\n Raises:\n ValueError: if the layer's `call` method returns None (an invalid value).\n RuntimeError: if `super().__init__()` was not called in the constructor.\n \"\"\"\n if not hasattr(self, '_thread_local'):\n raise RuntimeError(\n 'You must call `super().__init__()` in the layer constructor.')\n\n # Grab the first positional or keyword argument.\n if args:\n inputs = args[0]\n args = args[1:]\n elif self._call_fn_args[0] in kwargs:\n inputs = kwargs.pop(self._call_fn_args[0])\n else:\n raise ValueError(\n 'The first argument to `Layer.call` must always be passed.')\n\n call_context = base_layer_utils.call_context()\n input_list = nest.flatten(inputs)\n\n # We will attempt to build a TF graph if & only if all inputs are symbolic.\n # This is always the case in graph mode. It can also be the case in eager\n # mode when all inputs can be traced back to `keras.Input()` (when building\n # models using the functional API).\n build_graph = tf_utils.are_all_symbolic_tensors(input_list)\n\n # Accept NumPy and scalar inputs by converting to Tensors.\n if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):\n def _convert_non_tensor(x):\n # Don't call `ops.convert_to_tensor_v2` on all `inputs` because\n # `SparseTensors` can't be converted to `Tensor`.\n if isinstance(x, (np.ndarray, float, int)):\n return ops.convert_to_tensor_v2(x)\n return x\n inputs = nest.map_structure(_convert_non_tensor, inputs)\n input_list = nest.flatten(inputs)\n\n # Handle `mask` propagation from previous layer to current layer. Masks can\n # be propagated explicitly via the `mask` argument, or implicitly via\n # setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed\n # explicitly take priority.\n mask_arg_passed_by_framework = False\n input_masks = self._collect_input_masks(inputs, args, kwargs)\n if (self._expects_mask_arg and input_masks is not None and\n not self._call_arg_was_passed('mask', args, kwargs)):\n mask_arg_passed_by_framework = True\n kwargs['mask'] = input_masks\n\n # If `training` argument was not explicitly passed, propagate `training`\n # value from this layer's calling layer.\n training_arg_passed_by_framework = False\n # Priority 1: `training` was explicitly passed.\n if self._call_arg_was_passed('training', args, kwargs):\n training_value = self._get_call_arg_value('training', args, kwargs)\n if not self._expects_training_arg:\n kwargs.pop('training')\n else:\n training_value = None\n # Priority 2: `training` was passed to a parent layer.\n if call_context.training is not None:\n training_value = call_context.training\n # Priority 3a: `learning_phase()` has been set.\n elif backend.global_learning_phase_is_set():\n training_value = backend.learning_phase()\n # Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph.\n elif build_graph:\n with backend.get_graph().as_default():\n if base_layer_utils.is_in_keras_graph():\n training_value = backend.learning_phase()\n\n if self._expects_training_arg and training_value is not None:\n # Force the training_value to be bool type which matches to the contract\n # for layer/model call args.\n if tensor_util.is_tensor(training_value):\n training_value = math_ops.cast(training_value, dtypes.bool)\n else:\n training_value = bool(training_value)\n kwargs['training'] = training_value\n training_arg_passed_by_framework = True\n\n # Only create Keras history if at least one tensor originates from a\n # `keras.Input`. Otherwise this Layer may be being used outside the Keras\n # framework.\n if build_graph and base_layer_utils.needs_keras_history(inputs):\n base_layer_utils.create_keras_history(inputs)\n\n # Clear eager losses on top level model call.\n # We are clearing the losses only on the top level model call and not on\n # every layer/model call because layer/model may be reused.\n if (base_layer_utils.is_in_eager_or_tf_function() and\n not call_context.in_call):\n self._clear_losses()\n\n with call_context.enter(self, inputs, build_graph, training_value):\n # Check input assumptions set after layer building, e.g. input shape.\n if build_graph:\n # Symbolic execution on symbolic tensors. We will attempt to build\n # the corresponding TF subgraph inside `backend.get_graph()`\n # TODO(reedwm): We should assert input compatibility after the inputs\n # are casted, not before.\n input_spec.assert_input_compatibility(self.input_spec, inputs,\n self.name)\n if (any(isinstance(x, ragged_tensor.RaggedTensor) for x in input_list)\n and self._supports_ragged_inputs is False): # pylint: disable=g-bool-id-comparison\n raise ValueError('Layer %s does not support RaggedTensors as input. '\n 'Inputs received: %s. You can try converting your '\n 'input to an uniform tensor.' % (self.name, inputs))\n\n graph = backend.get_graph()\n with graph.as_default(), backend.name_scope(self._name_scope()):\n # Build layer if applicable (if the `build` method has been\n # overridden).\n self._maybe_build(inputs)\n cast_inputs = self._maybe_cast_inputs(inputs)\n\n if not self.dynamic:\n # Wrapping `call` function in autograph to allow for dynamic control\n # flow and control dependencies in call. We are limiting this to\n # subclassed layers as autograph is strictly needed only for\n # subclassed layers and models.\n # tf_convert will respect the value of autograph setting in the\n # enclosing tf.function, if any.\n if (base_layer_utils.is_subclassed(self) and\n not base_layer_utils.from_saved_model(self)):\n call_fn = autograph.tf_convert(\n self.call, ag_ctx.control_status_ctx())\n else:\n call_fn = self.call\n\n try:\n with base_layer_utils.autocast_context_manager(\n self._compute_dtype):\n # Add auto_control_deps in V2 when they are not already added by\n # a `tf.function`.\n if (ops.executing_eagerly_outside_functions() and\n not base_layer_utils.is_in_eager_or_tf_function()):\n with auto_control_deps.AutomaticControlDependencies() as acd:\n outputs = call_fn(cast_inputs, *args, **kwargs)\n # Wrap Tensors in `outputs` in `tf.identity` to avoid\n # circular dependencies.\n outputs = base_layer_utils.mark_as_return(outputs, acd)\n else:\n outputs = call_fn(cast_inputs, *args, **kwargs)\n\n except errors.OperatorNotAllowedInGraphError as e:\n raise TypeError('You are attempting to use Python control '\n 'flow in a layer that was not declared to be '\n 'dynamic. Pass `dynamic=True` to the class '\n 'constructor.\\nEncountered error:\\n\"\"\"\\n' +\n str(e) + '\\n\"\"\"')\n else:\n # We will use static shape inference to return symbolic tensors\n # matching the specifications of the layer outputs.\n # Since `self.dynamic` is True, we will never attempt to\n # run the underlying TF graph (which is disconnected).\n # TODO(fchollet): consider py_func as an alternative, which\n # would enable us to run the underlying graph if needed.\n outputs = self._symbolic_call(inputs)\n\n if outputs is None:\n raise ValueError('A layer\\'s `call` method should return a '\n 'Tensor or a list of Tensors, not None '\n '(layer: ' + self.name + ').')\n if base_layer_utils.have_all_keras_metadata(inputs):\n if training_arg_passed_by_framework:\n kwargs.pop('training')\n if mask_arg_passed_by_framework:\n kwargs.pop('mask')\n inputs, outputs = self._set_connectivity_metadata_(\n inputs, outputs, args, kwargs)\n self._handle_activity_regularization(inputs, outputs)\n self._set_mask_metadata(inputs, outputs, input_masks)\n if hasattr(self, '_set_inputs') and not self.inputs:\n # Subclassed network: explicitly set metadata normally set by\n # a call to self._set_inputs().\n self._set_inputs(cast_inputs, outputs)\n else:\n # Eager execution on data tensors.\n with backend.name_scope(self._name_scope()):\n self._maybe_build(inputs)\n cast_inputs = self._maybe_cast_inputs(inputs)\n with base_layer_utils.autocast_context_manager(\n self._compute_dtype):\n outputs = self.call(cast_inputs, *args, **kwargs)\n self._handle_activity_regularization(inputs, outputs)\n self._set_mask_metadata(inputs, outputs, input_masks)\n if hasattr(self, '_set_save_spec'):\n self._set_save_spec(cast_inputs)\n\n return outputs\n\n @property\n def dtype(self):\n return self._dtype_policy.variable_dtype\n\n @property\n def name(self):\n return self._name\n\n @property\n @trackable_layer_utils.cache_recursive_attribute('dynamic')\n def dynamic(self):\n # NOTE(taylorrobie): Currently self._dynamic is read-only. If that changes\n # then this cache logic must be updated.\n return self._dynamic\n\n @property\n @doc_controls.do_not_generate_docs\n @trackable_layer_utils.cache_recursive_attribute('stateful')\n def stateful(self):\n return self._stateful\n\n @stateful.setter\n @trackable_layer_utils.invalidate_recursive_cache('stateful')\n def stateful(self, value):\n self._stateful = value\n\n @property\n def trainable(self):\n return self._trainable\n\n @trainable.setter\n def trainable(self, value):\n self._trainable = value\n for layer in getattr(self, '_layers', []):\n layer.trainable = value\n\n @property\n def activity_regularizer(self):\n \"\"\"Optional regularizer function for the output of this layer.\"\"\"\n return self._activity_regularizer\n\n @activity_regularizer.setter\n def activity_regularizer(self, regularizer):\n \"\"\"Optional regularizer function for the output of this layer.\"\"\"\n self._activity_regularizer = regularizer\n\n @property\n def input_spec(self):\n return self._input_spec\n\n @input_spec.setter\n # Must be decorated to prevent tracking, since the input_spec can be nested\n # InputSpec objects.\n @trackable.no_automatic_dependency_tracking\n def input_spec(self, value):\n for v in nest.flatten(value):\n if v is not None and not isinstance(v, InputSpec):\n raise TypeError('Layer input_spec must be an instance of InputSpec. '\n 'Got: {}'.format(v))\n self._input_spec = value\n\n @property\n def trainable_weights(self):\n if self.trainable:\n children_weights = self._gather_children_attribute('trainable_weights')\n return self._dedup_weights(self._trainable_weights + children_weights)\n else:\n return []\n\n @property\n def non_trainable_weights(self):\n if self.trainable:\n children_weights = self._gather_children_attribute(\n 'non_trainable_weights')\n non_trainable_weights = self._non_trainable_weights + children_weights\n else:\n children_weights = self._gather_children_attribute('weights')\n non_trainable_weights = (\n self._trainable_weights + self._non_trainable_weights +\n children_weights)\n return self._dedup_weights(non_trainable_weights)\n\n @property\n def weights(self):\n \"\"\"Returns the list of all layer variables/weights.\n\n Returns:\n A list of variables.\n \"\"\"\n return self.trainable_weights + self.non_trainable_weights\n\n @property\n def updates(self):\n collected_updates = []\n all_layers = self._gather_unique_layers()\n with backend.get_graph().as_default():\n for layer in all_layers:\n if not layer.trainable and not layer.stateful:\n continue\n for u in layer._updates:\n if callable(u):\n try:\n u = u()\n except errors.InaccessibleTensorError:\n base_layer_utils.check_graph_consistency(\n method='add_update', force_raise=True)\n raise # check_graph_consistency may not always raise.\n base_layer_utils.check_graph_consistency(u, method='add_update')\n collected_updates.append(u)\n return collected_updates\n\n @property\n def losses(self):\n \"\"\"Losses which are associated with this `Layer`.\n\n Variable regularization tensors are created when this property is accessed,\n so it is eager safe: accessing `losses` under a `tf.GradientTape` will\n propagate gradients back to the corresponding variables.\n\n Returns:\n A list of tensors.\n \"\"\"\n collected_losses = []\n all_layers = self._gather_unique_layers()\n for layer in all_layers:\n # If any eager losses are present, we assume the model to be part of an\n # eager training loop (either a custom one or the one used when\n # `run_eagerly=True`) and so we always return just the eager losses.\n if layer._eager_losses:\n # Filter placeholder losses that may have been added by revived layers.\n # (see base_layer_utils for details).\n if (layer._eager_losses[0] is\n not base_layer_utils.REVIVED_LOSS_PLACEHOLDER):\n collected_losses.extend(layer._eager_losses)\n else:\n collected_losses.extend(layer._losses)\n for regularizer in layer._callable_losses:\n loss_tensor = regularizer()\n if loss_tensor is not None:\n collected_losses.append(loss_tensor)\n return collected_losses\n\n @doc_controls.for_subclass_implementers\n def add_loss(self, losses, inputs=None):\n \"\"\"Add loss tensor(s), potentially dependent on layer inputs.\n\n Some losses (for instance, activity regularization losses) may be dependent\n on the inputs passed when calling a layer. Hence, when reusing the same\n layer on different inputs `a` and `b`, some entries in `layer.losses` may\n be dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n This method can be used inside a subclassed layer or model's `call`\n function, in which case `losses` should be a Tensor or list of Tensors.\n\n Example:\n\n ```python\n class MyLayer(tf.keras.layers.Layer):\n def call(inputs, self):\n self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)\n return inputs\n ```\n\n This method can also be called directly on a Functional Model during\n construction. In this case, any loss Tensors passed to this Model must\n be symbolic and be able to be traced back to the model's `Input`s. These\n losses become part of the model's topology and are tracked in `get_config`.\n\n Example:\n\n ```python\n inputs = tf.keras.Input(shape=(10,))\n x = tf.keras.layers.Dense(10)(inputs)\n outputs = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs, outputs)\n # Activity regularization.\n model.add_loss(tf.abs(tf.reduce_mean(x)))\n ```\n\n If this is not the case for your loss (if, for example, your loss references\n a `Variable` of one of the model's layers), you can wrap your loss in a\n zero-argument lambda. These losses are not tracked as part of the model's\n topology since they can't be serialized.\n\n Example:\n\n ```python\n inputs = tf.keras.Input(shape=(10,))\n x = tf.keras.layers.Dense(10)(inputs)\n outputs = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs, outputs)\n # Weight regularization.\n model.add_loss(lambda: tf.reduce_mean(x.kernel))\n ```\n\n The `get_losses_for` method allows to retrieve the losses relevant to a\n specific set of inputs.\n\n Arguments:\n losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses\n may also be zero-argument callables which create a loss tensor.\n inputs: Ignored when executing eagerly. If anything other than None is\n passed, it signals the losses are conditional on some of the layer's\n inputs, and thus they should only be run where these inputs are\n available. This is the case for activity regularization losses, for\n instance. If `None` is passed, the losses are assumed\n to be unconditional, and will apply across all dataflows of the layer\n (e.g. weight regularization losses).\n \"\"\"\n def _tag_unconditional(loss):\n \"\"\"Process the loss and tag it by setting loss._unconditional_loss.\"\"\"\n if callable(loss):\n # We run the loss without autocasting, as regularizers are often\n # numerically unstable in float16.\n with base_layer_utils.autocast_context_manager(None):\n loss = loss()\n if loss is None:\n return None # Will be filtered out when computing the .losses property\n if not tensor_util.is_tensor(loss):\n loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())\n loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access\n return loss\n\n losses = nest.flatten(losses)\n\n callable_losses = []\n eager_losses = []\n symbolic_losses = []\n for loss in losses:\n if callable(loss):\n callable_losses.append(functools.partial(_tag_unconditional, loss))\n continue\n if loss is None:\n continue\n if not tensor_util.is_tensor(loss):\n loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())\n # TF Functions should take the eager path.\n if (tf_utils.is_symbolic_tensor(loss) and\n not base_layer_utils.is_in_tf_function()):\n symbolic_losses.append(_tag_unconditional(loss))\n base_layer_utils.check_graph_consistency(loss, method='add_loss')\n elif tensor_util.is_tensor(loss):\n eager_losses.append(_tag_unconditional(loss))\n\n self._callable_losses.extend(callable_losses)\n\n in_call_context = base_layer_utils.call_context().in_call\n if eager_losses and not in_call_context:\n raise ValueError(\n 'Expected a symbolic Tensors or a callable for the loss value. '\n 'Please wrap your loss computation in a zero argument `lambda`.')\n\n self._eager_losses.extend(eager_losses)\n\n if in_call_context:\n for symbolic_loss in symbolic_losses:\n self._losses.append(symbolic_loss)\n else:\n for symbolic_loss in symbolic_losses:\n if getattr(self, '_is_graph_network', False):\n self._graph_network_add_loss(symbolic_loss)\n else:\n # Possible a loss was added in a Layer's `build`.\n self._losses.append(symbolic_loss)\n\n @trackable.no_automatic_dependency_tracking\n def _clear_losses(self):\n \"\"\"Used every step in eager to reset losses.\"\"\"\n self._eager_losses = []\n if hasattr(self, '_layers'):\n for layer in trackable_layer_utils.filter_empty_layer_containers(\n self._layers):\n layer._clear_losses()\n\n @property\n def metrics(self):\n collected_metrics = []\n all_layers = self._gather_unique_layers()\n for layer in all_layers:\n with layer._metrics_lock:\n collected_metrics.extend(layer._metrics)\n return collected_metrics\n\n @doc_controls.for_subclass_implementers\n def add_metric(self, value, aggregation=None, name=None):\n \"\"\"Adds metric tensor to the layer.\n\n Args:\n value: Metric tensor.\n aggregation: Sample-wise metric reduction function. If `aggregation=None`,\n it indicates that the metric tensor provided has been aggregated\n already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by\n `model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the\n given metric tensor will be sample-wise reduced using `mean` function.\n eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',\n aggregation='mean')`.\n name: String metric name.\n\n Raises:\n ValueError: If `aggregation` is anything other than None or `mean`.\n \"\"\"\n if aggregation is not None and aggregation != 'mean':\n raise ValueError(\n 'We currently support only `mean` sample-wise metric aggregation. '\n 'You provided aggregation=`%s`' % aggregation)\n\n from_metric_obj = hasattr(value, '_metric_obj')\n is_symbolic = tf_utils.is_symbolic_tensor(value)\n in_call_context = base_layer_utils.call_context().in_call\n\n if name is None and not from_metric_obj:\n # Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')`\n # In eager mode, we use metric name to lookup a metric. Without a name,\n # a new Mean metric wrapper will be created on every model/layer call.\n # So, we raise an error when no name is provided.\n # We will do the same for symbolic mode for consistency although a name\n # will be generated if no name is provided.\n\n # We will not raise this error in the foll use case for the sake of\n # consistency as name in provided in the metric constructor.\n # mean = metrics.Mean(name='my_metric')\n # model.add_metric(mean(outputs))\n raise ValueError('Please provide a name for your metric like '\n '`self.add_metric(tf.reduce_sum(inputs), '\n 'name=\\'mean_activation\\', aggregation=\\'mean\\')`')\n elif from_metric_obj:\n name = value._metric_obj.name\n\n if in_call_context:\n # TF Function path should take the eager path.\n if is_symbolic and not base_layer_utils.is_in_tf_function():\n self._symbolic_add_metric(value, aggregation, name)\n else:\n self._eager_add_metric(value, aggregation, name)\n else:\n if not is_symbolic:\n raise ValueError('Expected a symbolic Tensor for the metric value, '\n 'received: ' + str(value))\n\n # Possible a metric was added in a Layer's `build`.\n if not getattr(self, '_is_graph_network', False):\n with backend.get_graph().as_default():\n self._symbolic_add_metric(value, aggregation, name)\n return\n\n if from_metric_obj:\n raise ValueError('Using the result of calling a `Metric` object '\n 'when calling `add_metric` on a Functional '\n 'Model is not supported. Please pass the '\n 'Tensor to monitor directly.')\n\n # Insert layers into the Keras Graph Network.\n self._graph_network_add_metric(value, aggregation, name)\n\n @deprecation.deprecated_args(None, '`inputs` is now automatically inferred',\n 'inputs')\n @doc_controls.for_subclass_implementers\n def add_update(self, updates, inputs=None):\n \"\"\"Add update op(s), potentially dependent on layer inputs.\n\n Weight updates (for instance, the updates of the moving mean and variance\n in a BatchNormalization layer) may be dependent on the inputs passed\n when calling a layer. Hence, when reusing the same layer on\n different inputs `a` and `b`, some entries in `layer.updates` may be\n dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n The `get_updates_for` method allows to retrieve the updates relevant to a\n specific set of inputs.\n\n This call is ignored when eager execution is enabled (in that case, variable\n updates are run on the fly and thus do not need to be tracked for later\n execution).\n\n Arguments:\n updates: Update op, or list/tuple of update ops, or zero-arg callable\n that returns an update op. A zero-arg callable should be passed in\n order to disable running the updates by setting `trainable=False`\n on this Layer, when executing in Eager mode.\n inputs: Deprecated, will be automatically inferred.\n \"\"\"\n call_context = base_layer_utils.call_context()\n\n if (ds_context.has_strategy() and\n ds_context.in_cross_replica_context() and\n # When saving the model, the distribution strategy context should be\n # ignored, following the default path for adding updates.\n not call_context.saving):\n # Updates don't need to be run in a cross-replica context.\n return\n\n updates = generic_utils.to_list(updates)\n\n # All updates can be run immediately in Eager or in a tf.function.\n if base_layer_utils.is_in_eager_or_tf_function():\n if not call_context.frozen:\n for update in updates:\n if callable(update):\n update()\n return\n\n if call_context.in_call:\n relevant_inputs = call_context.inputs\n else:\n inbound_nodes = getattr(self, '_inbound_nodes', [])\n relevant_inputs = [node.input_tensors for node in inbound_nodes]\n\n def process_update(x):\n \"\"\"Standardize update ops.\n\n Arguments:\n x: Tensor, op, or callable.\n\n Returns:\n An update op.\n \"\"\"\n if callable(x):\n update = lambda: process_update(x())\n if not ops.executing_eagerly_outside_functions():\n # In V1 mode, call the callable right away and process. This is needed\n # for TPU strategy.\n return update()\n elif isinstance(x, ops.Operation):\n update = x\n elif hasattr(x, 'op'):\n update = x.op\n else:\n update = ops.convert_to_tensor_v2(x)\n\n reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])\n update._unconditional_update = update not in reachable\n return update\n\n updates = [process_update(x) for x in updates]\n # Non-callable Updates are run automatically inside `call` in V2, so\n # they do not need to be tracked later.\n if ops.executing_eagerly_outside_functions() and call_context.in_call:\n updates = [u for u in updates if callable(u)]\n self._updates.extend(updates)\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the layer, from Numpy arrays.\n\n The weights of a layer represent the state of the layer. This function\n sets the weight values from numpy arrays. The weight values should be\n passed in the order they are created by the layer. Note that the layer's\n weights must be instantiated before calling this function by calling\n the layer.\n\n For example, a Dense layer returns a list of two values-- per-output\n weights and the bias value. These can be used to set the weights of another\n Dense layer:\n\n >>> a = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(1.))\n >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))\n >>> a.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n >>> b = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(2.))\n >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))\n >>> b.get_weights()\n [array([[2.],\n [2.],\n [2.]], dtype=float32), array([0.], dtype=float32)]\n >>> b.set_weights(a.get_weights())\n >>> b.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n\n Arguments:\n weights: a list of Numpy arrays. The number\n of arrays and their shape must match\n number of the dimensions of the weights\n of the layer (i.e. it should match the\n output of `get_weights`).\n\n Raises:\n ValueError: If the provided weights list does not match the\n layer's specifications.\n \"\"\"\n params = self.weights\n\n expected_num_weights = 0\n for param in params:\n if isinstance(param, base_layer_utils.TrackableWeightHandler):\n expected_num_weights += param.num_tensors\n else:\n expected_num_weights += 1\n\n if expected_num_weights != len(weights):\n raise ValueError(\n 'You called `set_weights(weights)` on layer \"%s\" '\n 'with a weight list of length %s, but the layer was '\n 'expecting %s weights. Provided weights: %s...' %\n (self.name, len(weights), expected_num_weights, str(weights)[:50]))\n\n weight_index = 0\n weight_value_tuples = []\n for param in params:\n if isinstance(param, base_layer_utils.TrackableWeightHandler):\n num_tensors = param.num_tensors\n tensors = weights[weight_index:weight_index + num_tensors]\n param.set_weights(tensors)\n weight_index += num_tensors\n else:\n weight = weights[weight_index]\n ref_shape = param.shape\n if not ref_shape.is_compatible_with(weight.shape):\n raise ValueError(\n 'Layer weight shape %s not compatible with provided weight '\n 'shape %s' % (ref_shape, weight.shape))\n weight_value_tuples.append((param, weight))\n weight_index += 1\n\n backend.batch_set_value(weight_value_tuples)\n\n def get_weights(self):\n \"\"\"Returns the current weights of the layer.\n\n The weights of a layer represent the state of the layer. This function\n returns both trainable and non-trainable weight values associated with this\n layer as a list of Numpy arrays, which can in turn be used to load state\n into similarly parameterized layers.\n\n For example, a Dense layer returns a list of two values-- per-output\n weights and the bias value. These can be used to set the weights of another\n Dense layer:\n\n >>> a = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(1.))\n >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))\n >>> a.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n >>> b = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(2.))\n >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))\n >>> b.get_weights()\n [array([[2.],\n [2.],\n [2.]], dtype=float32), array([0.], dtype=float32)]\n >>> b.set_weights(a.get_weights())\n >>> b.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n\n Returns:\n Weights values as a list of numpy arrays.\n \"\"\"\n weights = self.weights\n output_weights = []\n for weight in weights:\n if isinstance(weight, base_layer_utils.TrackableWeightHandler):\n output_weights.extend(weight.get_tensors())\n else:\n output_weights.append(weight)\n return backend.batch_get_value(output_weights)\n\n def get_updates_for(self, inputs):\n \"\"\"Retrieves updates relevant to a specific set of inputs.\n\n Arguments:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of update ops of the layer that depend on `inputs`.\n \"\"\"\n if inputs is None:\n # Requesting unconditional updates.\n return [u for u in self.updates if u._unconditional_update]\n\n # Requesting input-conditional updates.\n updates = [u for u in self.updates if not u._unconditional_update]\n inputs = nest.flatten(inputs)\n reachable = tf_utils.get_reachable_from_inputs(inputs, updates)\n return [u for u in updates if u in reachable]\n\n def get_losses_for(self, inputs):\n \"\"\"Retrieves losses relevant to a specific set of inputs.\n\n Arguments:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of loss tensors of the layer that depend on `inputs`.\n \"\"\"\n if inputs is None:\n # Requesting unconditional losses.\n return [l for l in self.losses if l._unconditional_loss]\n\n # Requesting input-conditional losses.\n losses = [l for l in self.losses if not l._unconditional_loss]\n inputs = nest.flatten(inputs)\n reachable = tf_utils.get_reachable_from_inputs(inputs, losses)\n return [l for l in losses if l in reachable]\n\n def get_input_mask_at(self, node_index):\n \"\"\"Retrieves the input mask tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple inputs).\n \"\"\"\n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)\n\n def get_output_mask_at(self, node_index):\n \"\"\"Retrieves the output mask tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple outputs).\n \"\"\"\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)\n\n @property\n def input_mask(self):\n \"\"\"Retrieves the input mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input mask tensor (potentially None) or list of input\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)\n\n @property\n def output_mask(self):\n \"\"\"Retrieves the output mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Output mask tensor (potentially None) or list of output\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)\n\n def get_input_shape_at(self, node_index):\n \"\"\"Retrieves the input shape(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A shape tuple\n (or list of shape tuples if the layer has multiple inputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'input_shapes',\n 'input shape')\n\n def get_output_shape_at(self, node_index):\n \"\"\"Retrieves the output shape(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A shape tuple\n (or list of shape tuples if the layer has multiple outputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'output_shapes',\n 'output shape')\n\n def get_input_at(self, node_index):\n \"\"\"Retrieves the input tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A tensor (or list of tensors if the layer has multiple inputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'input_tensors',\n 'input')\n\n def get_output_at(self, node_index):\n \"\"\"Retrieves the output tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A tensor (or list of tensors if the layer has multiple outputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'output_tensors',\n 'output')\n\n @property\n def input(self):\n \"\"\"Retrieves the input tensor(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input tensor or list of input tensors.\n\n Raises:\n RuntimeError: If called in Eager mode.\n AttributeError: If no inbound nodes are found.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('Layer ' + self.name +\n ' is not connected, no input to return.')\n return self._get_node_attribute_at_index(0, 'input_tensors', 'input')\n\n @property\n def output(self):\n \"\"\"Retrieves the output tensor(s) of a layer.\n\n Only applicable if the layer has exactly one output,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Output tensor or list of output tensors.\n\n Raises:\n AttributeError: if the layer is connected to more than one incoming\n layers.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')\n return self._get_node_attribute_at_index(0, 'output_tensors', 'output')\n\n @property\n def input_shape(self):\n \"\"\"Retrieves the input shape(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer, or if all inputs\n have the same shape.\n\n Returns:\n Input shape, as an integer shape tuple\n (or list of shape tuples, one tuple per input tensor).\n\n Raises:\n AttributeError: if the layer has no defined input_shape.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined input shape.')\n all_input_shapes = set(\n [str(node.input_shapes) for node in self._inbound_nodes])\n if len(all_input_shapes) == 1:\n return self._inbound_nodes[0].input_shapes\n else:\n raise AttributeError('The layer \"' + str(self.name) +\n ' has multiple inbound nodes, '\n 'with different input shapes. Hence '\n 'the notion of \"input shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_input_shape_at(node_index)` '\n 'instead.')\n\n def count_params(self):\n \"\"\"Count the total number of scalars composing the weights.\n\n Returns:\n An integer count.\n\n Raises:\n ValueError: if the layer isn't yet built\n (in which case its weights aren't yet defined).\n \"\"\"\n if not self.built:\n if getattr(self, '_is_graph_network', False):\n with tf_utils.maybe_init_scope(self):\n self._maybe_build(self.inputs)\n else:\n raise ValueError('You tried to call `count_params` on ' + self.name +\n ', but the layer isn\\'t built. '\n 'You can build it manually via: `' + self.name +\n '.build(batch_input_shape)`.')\n return layer_utils.count_params(self.weights)\n\n @property\n def output_shape(self):\n \"\"\"Retrieves the output shape(s) of a layer.\n\n Only applicable if the layer has one output,\n or if all outputs have the same shape.\n\n Returns:\n Output shape, as an integer shape tuple\n (or list of shape tuples, one tuple per output tensor).\n\n Raises:\n AttributeError: if the layer has no defined output shape.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined output shape.')\n all_output_shapes = set(\n [str(node.output_shapes) for node in self._inbound_nodes])\n if len(all_output_shapes) == 1:\n return self._inbound_nodes[0].output_shapes\n else:\n raise AttributeError('The layer \"%s\"'\n ' has multiple inbound nodes, '\n 'with different output shapes. Hence '\n 'the notion of \"output shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_output_shape_at(node_index)` '\n 'instead.' % self.name)\n\n @property\n @doc_controls.do_not_doc_inheritable\n def inbound_nodes(self):\n \"\"\"Deprecated, do NOT use! Only for compatibility with external Keras.\"\"\"\n return self._inbound_nodes\n\n @property\n @doc_controls.do_not_doc_inheritable\n def outbound_nodes(self):\n \"\"\"Deprecated, do NOT use! Only for compatibility with external Keras.\"\"\"\n return self._outbound_nodes\n\n ##############################################################################\n # Methods & attributes below are public aliases of other methods. #\n ##############################################################################\n\n @deprecation.deprecated(\n date=None, instructions='Please use `layer.__call__` method instead.')\n @doc_controls.do_not_doc_inheritable\n def apply(self, inputs, *args, **kwargs):\n \"\"\"Deprecated, do NOT use!\n\n This is an alias of `self.__call__`.\n\n Arguments:\n inputs: Input tensor(s).\n *args: additional positional arguments to be passed to `self.call`.\n **kwargs: additional keyword arguments to be passed to `self.call`.\n\n Returns:\n Output tensor(s).\n \"\"\"\n return self.__call__(inputs, *args, **kwargs)\n\n @deprecation.deprecated(\n date=None, instructions='Please use `layer.add_weight` method instead.')\n @doc_controls.do_not_doc_inheritable\n def add_variable(self, *args, **kwargs):\n \"\"\"Deprecated, do NOT use! Alias for `add_weight`.\"\"\"\n return self.add_weight(*args, **kwargs)\n\n @property\n def variables(self):\n \"\"\"Returns the list of all layer variables/weights.\n\n Alias of `self.weights`.\n\n Returns:\n A list of variables.\n \"\"\"\n return self.weights\n\n @property\n def trainable_variables(self):\n return self.trainable_weights\n\n @property\n def non_trainable_variables(self):\n return self.non_trainable_weights\n\n ##############################################################################\n # Methods & attributes below are all private and only used by the framework. #\n ##############################################################################\n\n def _set_dtype_policy(self, dtype):\n \"\"\"Sets self._dtype_policy.\"\"\"\n if isinstance(dtype, policy.Policy):\n self._dtype_policy = dtype\n elif isinstance(dtype, dict):\n self._dtype_policy = policy.deserialize(dtype)\n elif dtype:\n self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)\n else:\n self._dtype_policy = policy.global_policy()\n\n # This has no impact on the layer behavior, and is only used for printing\n # warnings.\n self._dtype_defaulted_to_floatx = (not dtype and\n policy.policy_defaults_to_floatx())\n\n # TODO(reedwm): Expose this property?\n @property\n def _compute_dtype(self):\n \"\"\"The layer's compute dtype.\n\n Unless mixed-precision is used, this is the same as `Layer.dtype`.\n\n If self._autocast is True, layer's will cast floating-point inputs to this.\n\n Returns:\n The layer's compute dtype.\n \"\"\"\n return self._dtype_policy.compute_dtype\n\n def _maybe_cast_inputs(self, inputs):\n \"\"\"Maybe casts the inputs to the compute dtype.\n\n If self._compute_dtype is floating-point, and self_autocast is True,\n floating-point inputs are casted to self._compute_dtype.\n\n Args:\n inputs: Input tensor, or structure of input tensors.\n\n Returns:\n `inputs`, but tensors may have been casted to self._compute_dtype\n \"\"\"\n compute_dtype = self._compute_dtype\n if (self._autocast and compute_dtype and\n dtypes.as_dtype(compute_dtype).is_floating):\n def f(x):\n \"\"\"Cast a single Tensor or TensorSpec to the compute dtype.\"\"\"\n cast_types = (ops.Tensor, sparse_tensor.SparseTensor,\n ragged_tensor.RaggedTensor)\n if (isinstance(x, cast_types) and x.dtype.is_floating and\n x.dtype.base_dtype.name != compute_dtype):\n if self._dtype_defaulted_to_floatx:\n self._warn_about_input_casting(x.dtype.base_dtype)\n return math_ops.cast(x, compute_dtype)\n elif isinstance(x, tensor_spec.TensorSpec) and x.dtype.is_floating:\n # Inputs may be TensorSpecs when this function is called from\n # model._set_inputs.\n return tensor_spec.TensorSpec(x.shape, compute_dtype, x.name)\n else:\n return x\n return nest.map_structure(f, inputs)\n else:\n return inputs\n\n def _warn_about_input_casting(self, input_dtype):\n # self._already_warned_about_input_casting is only retrieved or set in this\n # function.\n already_warned = getattr(self, '_already_warned_about_input_casting', False)\n if not already_warned:\n tf_logging.warn(\n \"Layer {self.name} is casting an input tensor from dtype \"\n \"{input_dtype} to the layer's dtype of {layer_dtype}, which is new \"\n \"behavior in TensorFlow 2. The layer has dtype {layer_dtype} \"\n \"because it's dtype defaults to floatx.\\n\\n\"\n \"\"\n \"If you intended to run this layer in {layer_dtype}, you can safely \"\n \"ignore this warning. If in doubt, this warning is likely only an \"\n \"issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\\n\\n\"\n \"\"\n \"To change all layers to have dtype {input_dtype} by default, call \"\n \"`tf.keras.backend.set_floatx('{input_dtype}')`. To change just this \"\n \"layer, pass dtype='{input_dtype}' to the layer constructor. If you \"\n \"are the author of this layer, you can disable autocasting by \"\n \"passing autocast=False to the base Layer constructor.\\n\".format(\n self=self,\n input_dtype=input_dtype.name,\n layer_dtype=self._compute_dtype))\n self._already_warned_about_input_casting = True\n\n # _dtype used to be an attribute set in the constructor. We still expose it\n # because some clients still use it.\n # TODO(reedwm): Deprecate, then remove the _dtype property.\n @property\n def _dtype(self):\n # This is equivalent to returning self.dtype . We do not return self.dtype\n # as it would cause infinite recursion in a few subclasses, which override\n # \"dtype\" to return self._dtype.\n return self._dtype_policy.variable_dtype\n\n @_dtype.setter\n def _dtype(self, value):\n value = dtypes.as_dtype(value).name\n self._dtype_policy = policy.Policy(value)\n\n def _name_scope(self):\n return self.name\n\n def _init_set_name(self, name, zero_based=True):\n if not name:\n self._name = backend.unique_object_name(\n generic_utils.to_snake_case(self.__class__.__name__),\n zero_based=zero_based)\n else:\n self._name = name\n\n def _get_existing_metric(self, name=None):\n match = [m for m in self._metrics if m.name == name]\n if not match:\n return\n if len(match) > 1:\n raise ValueError(\n 'Please provide different names for the metrics you have added. '\n 'We found {} metrics with the name: \"{}\"'.format(len(match), name))\n return match[0]\n\n def _eager_add_metric(self, value, aggregation=None, name=None):\n # If the given metric is available in `metrics` list we just update state\n # on it, otherwise we create a new metric instance and\n # add it to the `metrics` list.\n metric_obj = getattr(value, '_metric_obj', None)\n # Tensors that come from a Metric object already updated the Metric state.\n should_update_state = not metric_obj\n name = metric_obj.name if metric_obj else name\n\n with self._metrics_lock:\n match = self._get_existing_metric(name)\n if match:\n metric_obj = match\n elif metric_obj:\n self._metrics.append(metric_obj)\n else:\n from tensorflow.python.keras import metrics as metrics_mod # pylint:disable=g-import-not-at-top\n if aggregation is None:\n raise ValueError(\n '`aggregation` must be specified when passing a `Tensor` '\n 'to `add_metric`.')\n assert aggregation is not None\n metric_obj = metrics_mod.Mean(name=name, dtype=value.dtype)\n self._metrics.append(metric_obj)\n\n if should_update_state:\n metric_obj(value)\n return\n\n def _symbolic_add_metric(self, value, aggregation=None, name=None):\n base_layer_utils.check_graph_consistency(value, method='add_metric')\n match = self._get_existing_metric(name)\n if aggregation is None:\n # Iterate over the metrics and check if the given metric exists already.\n # This can happen when a metric instance is created in subclassed model\n # layer `__init__` and we have tracked that instance already in\n # model.__setattr__.\n if match:\n result_tensor = value\n metric_obj = match\n elif hasattr(value, '_metric_obj'):\n # We track the instance using the metadata on the result tensor.\n result_tensor = value\n metric_obj = result_tensor._metric_obj\n self._metrics.append(metric_obj)\n else:\n raise ValueError(\n 'We do not support adding an aggregated metric result tensor that '\n 'is not the output of a `tf.keras.metrics.Metric` metric instance. '\n 'Without having access to the metric instance we cannot reset the '\n 'state of a metric after every epoch during training. You can '\n 'create a `tf.keras.metrics.Metric` instance and pass the result '\n 'here or pass an un-aggregated result with `aggregation` parameter '\n 'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)'\n ', name=\\'mean_activation\\', aggregation=\\'mean\\')`')\n else:\n # If a non-aggregated tensor is given as input (ie. `aggregation` is\n # explicitly set to `mean`), we wrap the tensor in `Mean` metric.\n if match:\n result_tensor = match(value)\n metric_obj = match\n else:\n metric_obj, result_tensor = base_layer_utils.create_mean_metric(\n value, name)\n self._metrics.append(metric_obj)\n\n def _handle_weight_regularization(self, name, variable, regularizer):\n \"\"\"Create lambdas which compute regularization losses.\"\"\"\n\n def _loss_for_variable(v):\n \"\"\"Creates a regularization loss `Tensor` for variable `v`.\"\"\"\n with backend.name_scope(name + '/Regularizer'):\n regularization = regularizer(v)\n return regularization\n\n if isinstance(variable, tf_variables.PartitionedVariable):\n for v in variable:\n self.add_loss(functools.partial(_loss_for_variable, v))\n else:\n self.add_loss(functools.partial(_loss_for_variable, variable))\n\n def _handle_activity_regularization(self, inputs, outputs):\n # Apply activity regularization.\n # Note that it should be applied every time the layer creates a new\n # output, since it is output-specific.\n if self._activity_regularizer:\n output_list = nest.flatten(outputs)\n with backend.name_scope('ActivityRegularizer'):\n for output in output_list:\n activity_loss = self._activity_regularizer(output)\n batch_size = math_ops.cast(\n array_ops.shape(output)[0], activity_loss.dtype)\n # Make activity regularization strength batch-agnostic.\n mean_activity_loss = activity_loss / batch_size\n base_layer_utils.check_graph_consistency(\n mean_activity_loss, method='activity_regularizer')\n self.add_loss(mean_activity_loss, inputs=inputs)\n\n def _set_mask_metadata(self, inputs, outputs, previous_mask):\n flat_outputs = nest.flatten(outputs)\n\n mask_already_computed = (\n getattr(self, '_compute_output_and_mask_jointly', False) or\n all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))\n\n # Only compute the mask if the Layer explicitly supports masking or has\n # overridden `compute_mask`.\n should_compute_mask = (\n hasattr(self, 'compute_mask') and\n (self.supports_masking or\n not getattr(self.compute_mask, '_is_default', False)))\n\n if mask_already_computed:\n flat_masks = [getattr(x, '_keras_mask', None) for x in flat_outputs]\n elif not should_compute_mask:\n flat_masks = [None for _ in flat_outputs]\n else:\n output_masks = self.compute_mask(inputs, previous_mask)\n # `compute_mask` can return a single `None` even when a Layer\n # has multiple outputs.\n if output_masks is None:\n flat_masks = [None for _ in flat_outputs]\n else:\n flat_masks = nest.flatten(output_masks)\n\n for output, mask in zip(flat_outputs, flat_masks):\n try:\n output._keras_mask = mask\n except AttributeError:\n # C Type such as np.ndarray.\n pass\n\n if tf_utils.are_all_symbolic_tensors(flat_outputs):\n for output in flat_outputs:\n if getattr(output, '_keras_mask', None) is not None:\n # Do not track masks for `TensorFlowOpLayer` construction.\n output._keras_mask._keras_history_checked = True\n\n def _collect_input_masks(self, inputs, args, kwargs):\n \"\"\"Checks if `mask` argument was passed, else gathers mask from inputs.\"\"\"\n if self._call_arg_was_passed('mask', args, kwargs):\n return self._get_call_arg_value('mask', args, kwargs)\n\n if not self._should_compute_mask:\n return None\n\n input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None),\n inputs)\n if generic_utils.is_all_none(input_masks):\n return None\n return input_masks\n\n def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):\n if arg_name in kwargs:\n return True\n call_fn_args = self._call_fn_args\n if not inputs_in_args:\n # Ignore `inputs` arg.\n call_fn_args = call_fn_args[1:]\n if arg_name in dict(zip(call_fn_args, args)):\n return True\n return False\n\n def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):\n if arg_name in kwargs:\n return kwargs[arg_name]\n call_fn_args = self._call_fn_args\n if not inputs_in_args:\n # Ignore `inputs` arg.\n call_fn_args = call_fn_args[1:]\n args_dict = dict(zip(call_fn_args, args))\n return args_dict[arg_name]\n\n def _set_connectivity_metadata_(self, inputs, outputs, args, kwargs):\n\n # If the layer returns tensors from its inputs, unmodified,\n # we copy them to avoid loss of tensor metadata.\n output_ls = nest.flatten(outputs)\n inputs_ls = object_identity.ObjectIdentitySet(nest.flatten(inputs))\n output_ls_copy = []\n for x in output_ls:\n if x in inputs_ls:\n with backend.name_scope(self.name):\n x = array_ops.identity(x)\n output_ls_copy.append(x)\n outputs = nest.pack_sequence_as(outputs, output_ls_copy)\n\n # Ignore `inputs` arg.\n arguments = dict(zip(self._call_fn_args[1:], args))\n arguments.update(kwargs)\n\n # Add an inbound node to the layer, so it can keep track of this call.\n # This updates the layer history of the output tensor(s).\n self._add_inbound_node(\n input_tensors=inputs, output_tensors=outputs, arguments=arguments)\n return inputs, outputs\n\n def _add_inbound_node(self,\n input_tensors,\n output_tensors,\n arguments=None):\n \"\"\"Internal method to create an inbound node for the layer.\n\n Arguments:\n input_tensors: list of input tensors.\n output_tensors: list of output tensors.\n arguments: dictionary of keyword arguments that were passed to the\n `call` method of the layer at the call that created the node.\n \"\"\"\n inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,\n input_tensors)\n node_indices = nest.map_structure(lambda t: t._keras_history.node_index,\n input_tensors)\n tensor_indices = nest.map_structure(lambda t: t._keras_history.tensor_index,\n input_tensors)\n\n # Create node, add it to inbound nodes.\n node_module.Node(\n self,\n inbound_layers=inbound_layers,\n node_indices=node_indices,\n tensor_indices=tensor_indices,\n input_tensors=input_tensors,\n output_tensors=output_tensors,\n arguments=arguments)\n\n # Update tensor history metadata.\n # The metadata attribute consists of\n # 1) a layer instance\n # 2) a node index for the layer\n # 3) a tensor index for the node.\n # The allows layer reuse (multiple nodes per layer) and multi-output\n # or multi-input layers (e.g. a layer can return multiple tensors,\n # and each can be sent to a different layer).\n for i, tensor in enumerate(nest.flatten(output_tensors)):\n tensor._keras_history = KerasHistory(self,\n len(self._inbound_nodes) - 1, i) # pylint: disable=protected-access\n\n def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n \"\"\"Private utility to retrieves an attribute (e.g. inputs) from a node.\n\n This is used to implement the methods:\n - get_input_shape_at\n - get_output_shape_at\n - get_input_at\n etc...\n\n Arguments:\n node_index: Integer index of the node from which\n to retrieve the attribute.\n attr: Exact node attribute name.\n attr_name: Human-readable attribute name, for error messages.\n\n Returns:\n The layer's attribute `attr` at the node of index `node_index`.\n\n Raises:\n RuntimeError: If the layer has no inbound nodes, or if called in Eager\n mode.\n ValueError: If the index provided does not match any node.\n \"\"\"\n if not self._inbound_nodes:\n raise RuntimeError('The layer has never been called '\n 'and thus has no defined ' + attr_name + '.')\n if not len(self._inbound_nodes) > node_index:\n raise ValueError('Asked to get ' + attr_name + ' at node ' +\n str(node_index) + ', but the layer has only ' +\n str(len(self._inbound_nodes)) + ' inbound nodes.')\n values = getattr(self._inbound_nodes[node_index], attr)\n if isinstance(values, list) and len(values) == 1:\n return values[0]\n else:\n return values\n\n def _maybe_build(self, inputs):\n # Check input assumptions set before layer building, e.g. input rank.\n if not self.built:\n input_spec.assert_input_compatibility(\n self.input_spec, inputs, self.name)\n input_list = nest.flatten(inputs)\n if input_list and self._dtype_policy.compute_dtype is None:\n try:\n dtype = input_list[0].dtype.base_dtype.name\n except AttributeError:\n pass\n else:\n self._dtype_policy = policy.Policy(dtype)\n input_shapes = None\n if all(hasattr(x, 'shape') for x in input_list):\n input_shapes = nest.map_structure(lambda x: x.shape, inputs)\n # Only call `build` if the user has manually overridden the build method.\n if not hasattr(self.build, '_is_default'):\n # Any setup work performed only once should happen in an `init_scope`\n # to avoid creating symbolic Tensors that will later pollute any eager\n # operations.\n with tf_utils.maybe_init_scope(self):\n self.build(input_shapes) # pylint:disable=not-callable\n # We must set also ensure that the layer is marked as built, and the build\n # shape is stored since user defined build functions may not be calling\n # `super.build()`\n Layer.build(self, input_shapes)\n\n # Optionally load weight values specified at layer instantiation.\n if self._initial_weights is not None:\n if ops.executing_eagerly_outside_functions():\n with ops.init_scope():\n # Using `init_scope` since we want variable assignment in\n # `set_weights` to be treated like variable initialization.\n self.set_weights(self._initial_weights)\n else:\n self.set_weights(self._initial_weights)\n self._initial_weights = None\n\n def _symbolic_call(self, inputs):\n input_shapes = nest.map_structure(lambda x: x.shape, inputs)\n output_shapes = self.compute_output_shape(input_shapes)\n # Convert to TensorShape so that nest.map_structure will not map into\n # individual dim of the shape.\n output_shapes = tf_utils.convert_shapes(output_shapes, to_tuples=False)\n\n def _make_placeholder_like(shape):\n ph = backend.placeholder(shape=shape, dtype=self.dtype)\n ph._keras_mask = None\n return ph\n return nest.map_structure(_make_placeholder_like, output_shapes)\n\n def _get_trainable_state(self):\n \"\"\"Get the `trainable` state of each sublayer.\n\n Returns:\n A dict mapping all sublayers to their `trainable` value.\n \"\"\"\n layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)\n # Keep track of each top-level layers' `trainable` as well as the\n # state of all of its sublayers.\n trainable_state = weakref.WeakKeyDictionary()\n trainable_state[self] = self.trainable\n for layer in layers:\n trainable_state.update(layer._get_trainable_state())\n return trainable_state\n\n def _set_trainable_state(self, trainable_state):\n \"\"\"Set `trainable` state for each sublayer.\"\"\"\n layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)\n if self in trainable_state:\n self.trainable = trainable_state[self]\n for layer in layers:\n layer._set_trainable_state(trainable_state)\n\n @property\n def _obj_reference_counts(self):\n \"\"\"A dictionary counting the number of attributes referencing an object.\"\"\"\n self._maybe_create_attribute('_obj_reference_counts_dict',\n object_identity.ObjectIdentityDictionary())\n return self._obj_reference_counts_dict\n\n @trackable.no_automatic_dependency_tracking\n def _maybe_create_attribute(self, name, default_value):\n \"\"\"Create the attribute with the default value if it hasn't been created.\n\n This is useful for fields that is used for tracking purpose,\n _trainable_weights, or _layers. Note that user could create a layer subclass\n and assign an internal field before invoking the Layer.__init__(), the\n __setattr__() need to create the tracking fields and __init__() need to not\n override them.\n\n Args:\n name: String, the name of the attribute.\n default_value: Object, the default value of the attribute.\n \"\"\"\n if not hasattr(self, name):\n super(Layer, self).__setattr__(name, default_value)\n\n def __delattr__(self, name):\n # For any super.__delattr__() call, we will directly use the implementation\n # in Trackable and skip the behavior in AutoTrackable. The Layer was\n # originally use Trackable as base class, the change of using Module as base\n # class forced us to have AutoTrackable in the class hierarchy. Skipping\n # the __delattr__ and __setattr__ in AutoTrackable will keep the status quo.\n existing_value = getattr(self, name, None)\n\n # If this value is replacing an existing object assigned to an attribute, we\n # should clean it out to avoid leaking memory. First we check if there are\n # other attributes referencing it.\n reference_counts = self._obj_reference_counts\n if existing_value not in reference_counts:\n super(tracking.AutoTrackable, self).__delattr__(name)\n return\n\n reference_count = reference_counts[existing_value]\n if reference_count > 1:\n # There are other remaining references. We can't remove this object from\n # _layers etc.\n reference_counts[existing_value] = reference_count - 1\n super(tracking.AutoTrackable, self).__delattr__(name)\n return\n else:\n # This is the last remaining reference.\n del reference_counts[existing_value]\n\n super(tracking.AutoTrackable, self).__delattr__(name)\n\n if (isinstance(existing_value, Layer)\n or trackable_layer_utils.has_weights(existing_value)):\n super(tracking.AutoTrackable, self).__setattr__(\n '_layers',\n [l for l in self._layers if l is not existing_value])\n self._attribute_sentinel.invalidate_all()\n if isinstance(existing_value, tf_variables.Variable):\n super(tracking.AutoTrackable, self).__setattr__(\n '_trainable_weights',\n [w for w in self._trainable_weights if w is not existing_value])\n super(tracking.AutoTrackable, self).__setattr__(\n '_non_trainable_weights',\n [w for w in self._non_trainable_weights if w is not existing_value])\n\n # Any time we change `_layers` (either by deleting the attribute or by\n # reassigning it which will call __delattr__ from __setattr__) the topology\n # of the subgraph of Layers may change. In that case we will need to\n # recompute any attribute which depends on that subgraph.\n if name == '_layers':\n self._attribute_sentinel.invalidate_all()\n\n def __setattr__(self, name, value):\n if (name == '_self_setattr_tracking' or\n not getattr(self, '_self_setattr_tracking', True) or\n # Exclude @property.setters from tracking\n hasattr(self.__class__, name)):\n try:\n super(tracking.AutoTrackable, self).__setattr__(name, value)\n except AttributeError:\n raise AttributeError(\n ('Can\\'t set the attribute \"{}\", likely because it conflicts with '\n 'an existing read-only @property of the object. Please choose a '\n 'different name.').format(name))\n return\n\n # Keep track of trackable objects, for the needs of `Network.save_weights`.\n value = data_structures.sticky_attribute_assignment(\n trackable=self, value=value, name=name)\n\n reference_counts = self._obj_reference_counts\n reference_counts[value] = reference_counts.get(value, 0) + 1\n\n # Clean out the old attribute, which clears _layers and _trainable_weights\n # if necessary.\n try:\n self.__delattr__(name)\n except AttributeError:\n pass\n\n # TODO(scottzhu): Need to track Module object as well for weight tracking.\n # Be careful about metric if it becomes a Module in future.\n # Append value to self._layers if relevant\n if (getattr(self, '_auto_track_sub_layers', True) and\n (isinstance(value, Layer) or trackable_layer_utils.has_weights(value))):\n self._maybe_create_attribute('_layers', [])\n # We need to check object identity to avoid de-duplicating empty\n # container types which compare equal.\n if not any((layer is value for layer in self._layers)):\n self._layers.append(value)\n if hasattr(value, '_attribute_sentinel'):\n value._attribute_sentinel.add_parent(self._attribute_sentinel)\n if hasattr(value, '_use_resource_variables'):\n # Legacy layers (V1 tf.layers) must always use\n # resource variables.\n value._use_resource_variables = True\n\n # Append value to list of trainable / non-trainable weights if relevant\n # TODO(b/125122625): This won't pick up on any variables added to a\n # list/dict after creation.\n for val in nest.flatten(value):\n # TODO(b/126450014): Remove `_UnreadVariable` check here when assign ops\n # no longer return True for isinstance Variable checks.\n if not isinstance(val, tf_variables.Variable):\n continue\n if isinstance(val, resource_variable_ops._UnreadVariable): # pylint: disable=protected-access\n continue\n\n # Users may add extra weights/variables\n # simply by assigning them to attributes (invalid for graph networks)\n self._maybe_create_attribute('_trainable_weights', [])\n self._maybe_create_attribute('_non_trainable_weights', [])\n if val.trainable:\n if any(val is w for w in self._trainable_weights):\n continue\n self._trainable_weights.append(val)\n else:\n if any(val is w for w in self._non_trainable_weights):\n continue\n self._non_trainable_weights.append(val)\n\n backend.track_variable(val)\n\n # Skip the auto trackable from tf.Module to keep status quo. See the comment\n # at __delattr__.\n super(tracking.AutoTrackable, self).__setattr__(name, value)\n\n def _gather_children_attribute(self, attribute):\n assert attribute in {\n 'weights', 'trainable_weights', 'non_trainable_weights'\n }\n if hasattr(self, '_layers'):\n nested_layers = trackable_layer_utils.filter_empty_layer_containers(\n self._layers)\n return list(\n itertools.chain.from_iterable(\n getattr(layer, attribute) for layer in nested_layers))\n return []\n\n def _gather_unique_layers(self):\n \"\"\"Returns the current layer and all its children depth first deduped.\n\n We are deduping after getting the layers to maintain the order.\n \"\"\"\n all_layers = self._gather_layers()\n unique_layers, seen_layers = [], object_identity.ObjectIdentitySet()\n for layer in all_layers:\n if layer not in seen_layers:\n unique_layers.append(layer)\n # Track the Variable's identity to avoid __eq__ issues.\n seen_layers.add(layer)\n return unique_layers\n\n def _gather_layers(self):\n \"\"\"Returns the current layer and all its children depth first.\"\"\"\n all_layers = [self]\n if hasattr(self, '_layers'):\n child_layers = trackable_layer_utils.filter_empty_layer_containers(\n self._layers)\n for child_layer in child_layers:\n all_layers.extend(child_layer._gather_layers())\n return all_layers\n\n @property\n @tracking.cached_per_instance\n def _attribute_sentinel(self):\n return trackable_layer_utils.AttributeSentinel()\n\n # This is a hack so that the is_layer (within\n # training/trackable/layer_utils.py) check doesn't get the weights attr.\n # TODO(b/110718070): Remove when fixed.\n def _is_layer(self):\n return True\n\n def _init_call_fn_args(self):\n # Clear cached call function arguments.\n self.__class__._call_full_argspec.fget.cache.pop(self, None)\n self.__class__._call_fn_args.fget.cache.pop(self, None)\n self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)\n\n call_fn_args = self._call_fn_args\n self._expects_training_arg = ('training' in call_fn_args or\n self._call_accepts_kwargs)\n self._expects_mask_arg = ('mask' in call_fn_args or\n self._call_accepts_kwargs)\n\n @property\n @tracking.cached_per_instance\n def _call_full_argspec(self):\n # Argspec inspection is expensive and the call spec is used often, so it\n # makes sense to cache the result.\n return tf_inspect.getfullargspec(self.call)\n\n @property\n @tracking.cached_per_instance\n def _call_fn_args(self):\n all_args = self._call_full_argspec.args\n # Scrub `self` that appears if a decorator was applied.\n if all_args and all_args[0] == 'self':\n return all_args[1:]\n return all_args\n\n @property\n @tracking.cached_per_instance\n def _call_accepts_kwargs(self):\n return self._call_full_argspec.varkw is not None\n\n @property\n @tracking.cached_per_instance\n def _should_compute_mask(self):\n return ('mask' in self._call_fn_args or\n getattr(self, 'compute_mask', None) is not None)\n\n @property\n def _eager_losses(self):\n # A list of loss values containing activity regularizers and losses\n # manually added through `add_loss` during eager execution. It is cleared\n # after every batch.\n # Because we plan on eventually allowing a same model instance to be trained\n # in eager mode or graph mode alternatively, we need to keep track of\n # eager losses and symbolic losses via separate attributes.\n if not hasattr(self._thread_local, '_eager_losses'):\n self._thread_local._eager_losses = []\n return self._thread_local._eager_losses\n\n @_eager_losses.setter\n def _eager_losses(self, losses):\n self._thread_local._eager_losses = losses\n\n def _dedup_weights(self, weights):\n \"\"\"Dedupe weights while maintaining order as much as possible.\"\"\"\n output, seen_weights = [], object_identity.ObjectIdentitySet()\n for w in weights:\n if w not in seen_weights:\n output.append(w)\n # Track the Variable's identity to avoid __eq__ issues.\n seen_weights.add(w)\n return output\n\n # SavedModel properties. Please see keras/saving/saved_model for details.\n\n @property\n def _trackable_saved_model_saver(self):\n return layer_serialization.LayerSavedModelSaver(self)\n\n @property\n def _object_identifier(self):\n return self._trackable_saved_model_saver.object_identifier\n\n @property\n def _tracking_metadata(self):\n return self._trackable_saved_model_saver.tracking_metadata\n\n def _list_extra_dependencies_for_serialization(self, serialization_cache):\n return (self._trackable_saved_model_saver\n .list_extra_dependencies_for_serialization(serialization_cache))\n\n def _list_functions_for_serialization(self, serialization_cache):\n return (self._trackable_saved_model_saver\n .list_functions_for_serialization(serialization_cache))\n\n def __getstate__(self):\n # Override to support `copy.deepcopy` and pickling.\n # Thread-local objects cannot be copied in Python 3, so pop these.\n # Thread-local objects are used to cache losses in MirroredStrategy, and\n # so shouldn't be copied.\n state = self.__dict__.copy()\n state.pop('_thread_local', None)\n state.pop('_metrics_lock', None)\n return state\n\n def __setstate__(self, state):\n state['_thread_local'] = threading.local()\n state['_metrics_lock'] = threading.Lock()\n # Bypass Trackable logic as `__dict__` already contains this info.\n object.__setattr__(self, '__dict__', state)\n\n\nclass TensorFlowOpLayer(Layer):\n \"\"\"Wraps a TensorFlow Operation in a Layer.\n\n This class is used internally by the Functional API. When a user\n uses a raw TensorFlow Operation on symbolic tensors originating\n from an `Input` Layer, the resultant operation will be wrapped\n with this Layer object in order to make the operation compatible\n with the Keras API.\n\n This Layer will create a new, identical operation (except for inputs\n and outputs) every time it is called. If `run_eagerly` is `True`,\n the op creation and calculation will happen inside an Eager function.\n\n Instances of this Layer are created when `autolambda` is called, which\n is whenever a Layer's `__call__` encounters symbolic inputs that do\n not have Keras metadata, or when a Network's `__init__` encounters\n outputs that do not have Keras metadata.\n\n Attributes:\n node_def: String, the serialized NodeDef of the Op this layer will wrap.\n name: String, the name of the Layer.\n constants: Dict of NumPy arrays, the values of any Tensors needed for this\n Operation that do not originate from a Keras `Input` Layer. Since all\n placeholders must come from Keras `Input` Layers, these Tensors must be\n treated as constant in the Functional API.\n trainable: Bool, whether this Layer is trainable. Currently Variables are\n not supported, and so this parameter has no effect.\n dtype: The default dtype of this Layer. Inherited from `Layer` and has no\n effect on this class, however is used in `get_config`.\n \"\"\"\n\n @trackable.no_automatic_dependency_tracking\n def __init__(self,\n node_def,\n name,\n constants=None,\n trainable=True,\n dtype=None):\n # Pass autocast=False, as if inputs are cast, input types might not match\n # Operation type.\n super(TensorFlowOpLayer, self).__init__(\n name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype,\n autocast=False)\n _keras_layers_gauge.get_cell('TensorflowOpLayer').set(True)\n if isinstance(node_def, dict):\n self.node_def = json_format.ParseDict(node_def, node_def_pb2.NodeDef())\n else:\n if not isinstance(node_def, bytes):\n node_def = node_def.encode('utf-8')\n self.node_def = node_def_pb2.NodeDef.FromString(node_def)\n # JSON serialization stringifies keys which are integer input indices.\n self.constants = ({\n int(index): constant for index, constant in constants.items()\n } if constants is not None else {})\n # Layer uses original op unless it is called on new inputs.\n # This means `built` is not set in `__call__`.\n self.built = True\n\n def call(self, inputs):\n if context.executing_eagerly():\n return self._defun_call(inputs)\n return self._make_op(inputs)\n\n def _make_node_def(self, graph):\n node_def = node_def_pb2.NodeDef()\n node_def.CopyFrom(self.node_def)\n # Used in TPUReplicateContext to indicate whether this node has been cloned\n # and to not add TPU attributes.\n node_def.attr['_cloned'].b = True\n node_def.name = graph.unique_name(node_def.name)\n return node_def\n\n def _make_op(self, inputs):\n inputs = nest.flatten(inputs)\n graph = inputs[0].graph\n node_def = self._make_node_def(graph)\n with graph.as_default():\n for index, constant in self.constants.items():\n # Recreate constant in graph to add distribution context.\n value = tensor_util.constant_value(constant)\n if value is not None:\n constant = constant_op.constant(value, name=node_def.input[index])\n inputs.insert(index, constant)\n c_op = ops._create_c_op(graph, node_def, inputs, control_inputs=[])\n op = graph._create_op_from_tf_operation(c_op)\n op._control_flow_post_processing()\n\n # Record the gradient because custom-made ops don't go through the\n # code-gen'd eager call path\n op_type = compat.as_str(op.op_def.name)\n attr_names = [compat.as_str(attr.name) for attr in op.op_def.attr]\n attrs = []\n for attr_name in attr_names:\n attrs.append(attr_name)\n attrs.append(op.get_attr(attr_name))\n attrs = tuple(attrs)\n execute.record_gradient(op_type, op.inputs, attrs, op.outputs)\n\n if len(op.outputs) == 1:\n return op.outputs[0]\n return op.outputs\n\n @function.defun\n def _defun_call(self, inputs):\n \"\"\"Wraps the op creation method in an Eager function for `run_eagerly`.\"\"\"\n return self._make_op(inputs)\n\n def get_config(self):\n config = super(TensorFlowOpLayer, self).get_config()\n config.update({\n # `__init__` prefixes the name. Revert to the constructor argument.\n 'name': config['name'][len(_TF_OP_LAYER_NAME_PREFIX):],\n 'node_def': json_format.MessageToDict(self.node_def),\n 'constants': {\n i: backend.get_value(c) for i, c in self.constants.items()\n }\n })\n return config\n\n\nclass AddLoss(Layer):\n \"\"\"Adds its inputs as a loss.\n\n Attributes:\n unconditional: Whether or not the loss should be conditioned on the inputs.\n \"\"\"\n\n def __init__(self, unconditional, **kwargs):\n # Pass autocast=False, as there is no reason to cast loss to a different\n # dtype.\n kwargs['autocast'] = False\n super(AddLoss, self).__init__(**kwargs)\n self.unconditional = unconditional\n\n def call(self, inputs):\n self.add_loss(inputs, inputs=(not self.unconditional))\n return inputs\n\n def get_config(self):\n config = super(AddLoss, self).get_config()\n config.update({'unconditional': self.unconditional})\n return config\n\n\nclass AddMetric(Layer):\n \"\"\"Adds its inputs as a metric.\n\n Attributes:\n aggregation: 'mean' or None. How the inputs should be aggregated.\n metric_name: The name to use for this metric.\n \"\"\"\n\n def __init__(self, aggregation=None, metric_name=None, **kwargs):\n super(AddMetric, self).__init__(**kwargs)\n self.aggregation = aggregation\n self.metric_name = metric_name\n\n def call(self, inputs):\n self.add_metric(inputs, self.aggregation, self.metric_name)\n return inputs\n\n def get_config(self):\n config = super(AddMetric, self).get_config()\n config.update({\n 'aggregation': self.aggregation,\n 'metric_name': self.metric_name\n })\n return config\n\n\nclass KerasHistory(\n collections.namedtuple('KerasHistory',\n ['layer', 'node_index', 'tensor_index'])):\n \"\"\"Tracks the Layer call that created a Tensor, for Keras Graph Networks.\n\n During construction of Keras Graph Networks, this metadata is added to\n each Tensor produced as the output of a Layer, starting with an\n `InputLayer`. This allows Keras to track how each Tensor was produced, and\n this information is later retraced by the `keras.engine.Network` class to\n reconstruct the Keras Graph Network.\n\n Attributes:\n layer: The Layer that produced the Tensor.\n node_index: The specific call to the Layer that produced this Tensor. Layers\n can be called multiple times in order to share weights. A new node is\n created every time a Tensor is called.\n tensor_index: The output index for this Tensor. Always zero if the Layer\n that produced this Tensor only has one output. Nested structures of\n Tensors are deterministically assigned an index via `nest.flatten`.\n \"\"\"\n # Added to maintain memory and performance characteristics of `namedtuple`\n # while subclassing.\n __slots__ = ()\n\n\n# Avoid breaking users who directly import this symbol from this file.\n# TODO(fchollet): remove this.\nInputSpec = input_spec.InputSpec # pylint:disable=invalid-name\n" ]
[ [ "tensorflow.python.keras.mixed_precision.experimental.policy.serialize", "tensorflow.python.keras.backend.batch_get_value", "tensorflow.python.keras.mixed_precision.experimental.policy.policy_defaults_to_floatx", "tensorflow.python.framework.ops.executing_eagerly_outside_functions", "tensorflow.python.training.tracking.layer_utils.has_weights", "tensorflow.python.keras.initializers.zeros", "tensorflow.python.keras.backend.track_variable", "tensorflow.python.keras.engine.input_spec.assert_input_compatibility", "tensorflow.python.training.tracking.layer_utils.AttributeSentinel", "tensorflow.python.keras.engine.base_layer_utils.is_in_tf_function", "tensorflow.python.keras.constraints.get", "tensorflow.python.keras.utils.tf_utils.get_reachable_from_inputs", "tensorflow.python.training.tracking.layer_utils.cache_recursive_attribute", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.python.keras.engine.base_layer_utils.is_in_eager_or_tf_function", "tensorflow.python.keras.engine.node.Node", "tensorflow.python.autograph.core.ag_ctx.control_status_ctx", "tensorflow.python.keras.utils.generic_utils.validate_kwargs", "tensorflow.python.keras.backend.get_graph", "tensorflow.python.keras.engine.base_layer_utils.TrackableWeightHandler", "tensorflow.python.util.nest.flatten", "tensorflow.python.keras.utils.generic_utils.to_list", "tensorflow.python.keras.backend.global_learning_phase_is_set", "tensorflow.python.framework.ops.convert_to_tensor_v2", "tensorflow.python.keras.engine.base_layer_utils.create_mean_metric", "tensorflow.python.keras.mixed_precision.experimental.policy.global_policy", "tensorflow.python.eager.execute.record_gradient", "tensorflow.python.training.tracking.layer_utils.invalidate_recursive_cache", "tensorflow.python.keras.mixed_precision.experimental.policy.Policy", "tensorflow.python.keras.utils.tf_utils.maybe_init_scope", "tensorflow.python.distribute.distribution_strategy_context.in_cross_replica_context", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.keras.backend.get_value", "tensorflow.python.keras.engine.base_layer_utils.have_all_keras_metadata", "tensorflow.python.keras.backend.batch_set_value", "tensorflow.python.keras.utils.generic_utils.to_snake_case", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.keras.engine.base_layer_utils.from_saved_model", "tensorflow.python.keras.engine.base_layer_utils.v2_dtype_behavior_enabled", "tensorflow.python.keras.utils.tf_utils.are_all_symbolic_tensors", "tensorflow.python.keras.utils.tf_utils.is_symbolic_tensor", "tensorflow.core.framework.node_def_pb2.NodeDef.FromString", "tensorflow.python.keras.engine.base_layer_utils.is_subclassed", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.util.tf_inspect.getfullargspec", "tensorflow.python.keras.backend.placeholder", "tensorflow.python.keras.metrics.Mean", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.framework.auto_control_deps.AutomaticControlDependencies", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.utils.tf_utils.convert_shapes", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.keras.backend.floatx", "tensorflow.python.util.compat.as_str", "tensorflow.python.keras.saving.saved_model.layer_serialization.LayerSavedModelSaver", "tensorflow.python.framework.func_graph.FuncGraph", "tensorflow.python.keras.engine.base_layer_utils.call_context", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.keras.engine.base_layer_utils.check_graph_consistency", "tensorflow.python.keras.mixed_precision.experimental.policy.deserialize", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.python.util.object_identity.ObjectIdentitySet", "tensorflow.python.keras.engine.base_layer_utils.create_keras_history", "tensorflow.core.framework.node_def_pb2.NodeDef", "tensorflow.python.keras.backend.learning_phase", "tensorflow.python.training.tracking.layer_utils.filter_empty_layer_containers", "tensorflow.python.keras.backend.name_scope", "tensorflow.python.keras.engine.base_layer_utils.autocast_context_manager", "tensorflow.python.keras.regularizers.get", "tensorflow.python.distribute.distribution_strategy_context.has_strategy", "tensorflow.python.keras.engine.base_layer_utils.needs_keras_history", "tensorflow.python.eager.monitoring.BoolGauge", "tensorflow.python.keras.engine.base_layer_utils.is_in_keras_graph", "tensorflow.python.keras.mixed_precision.experimental.autocast_variable.create_autocast_variable", "tensorflow.python.keras.engine.base_layer_utils.mark_as_return", "tensorflow.python.util.nest.map_structure", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.framework.ops._create_c_op", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.training.tracking.data_structures.sticky_attribute_assignment", "tensorflow.python.keras.utils.layer_utils.count_params", "tensorflow.python.util.object_identity.ObjectIdentityDictionary", "tensorflow.python.keras.utils.generic_utils.is_all_none", "tensorflow.python.keras.initializers.get", "tensorflow.python.keras.initializers.glorot_uniform", "tensorflow.python.framework.constant_op.constant" ] ]
nateagr/tf-yarn
[ "1f958256291a4cacc3c122900c86831b7882f1e3" ]
[ "tf_yarn/pytorch/model_ckpt.py" ]
[ "import os\nimport re\nimport logging\nfrom typing import Optional, Union, Dict, Any\n\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom cluster_pack import filesystem\n\nfrom tf_yarn.pytorch.tasks.worker import PYTORCH_DPP_RANK\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef find_latest_ckpt(model_dir: str) -> Optional[str]:\n latest_ckpt = None\n latest_epoch = -1\n pattern = r\".*model_(\\d+).pt\"\n resolved_fs, _ = filesystem.resolve_filesystem_and_path(model_dir)\n if resolved_fs.exists(model_dir):\n for p in resolved_fs.ls(model_dir):\n groups = re.match(pattern, p)\n if groups:\n epoch = int(groups.group(1))\n if epoch > latest_epoch:\n latest_ckpt = groups.group(0)\n latest_epoch = epoch\n return latest_ckpt\n\n\ndef load_latest_ckpt(\n model_dir: str, model: Union[DDP, torch.nn.Module],\n optimizer: torch.optim.Optimizer, device: Union[int, str]\n) -> Optional[Dict[Any, Any]]:\n latest_ckpt = find_latest_ckpt(model_dir)\n if not latest_ckpt:\n _logger.info(\"No checkpoint to load\")\n return None\n return load_ckpt(latest_ckpt, model, optimizer, device)\n\n\ndef load_ckpt(\n model_ckpt_path: str, model: Union[DDP, torch.nn.Module],\n optimizer: torch.optim.Optimizer, device: Union[int, str]\n) -> Dict[Any, Any]:\n resolved_fs, _ = filesystem.resolve_filesystem_and_path(model_ckpt_path)\n _logger.info(f\"Loading model checkpoint {model_ckpt_path}\")\n with resolved_fs.open(model_ckpt_path, \"rb\") as fd:\n checkpoint = torch.load(fd, map_location=torch.device(device))\n _unwrap_model(model).load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n return checkpoint\n\n\ndef save_ckpt(\n model_dir: str, model: Union[DDP, torch.nn.Module], optimizer: torch.optim.Optimizer,\n epoch: int, **kwargs: Dict[Any, Any]\n) -> Optional[str]:\n if int(os.environ[PYTORCH_DPP_RANK]) != 0:\n return None\n\n state = {\n 'model': _unwrap_model(model).state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch,\n **kwargs\n }\n resolved_fs, _ = filesystem.resolve_filesystem_and_path(model_dir)\n if not resolved_fs.exists(model_dir):\n resolved_fs.mkdir(model_dir)\n model_ckpt_path = os.path.join(model_dir, f\"model_{epoch}.pt\")\n with resolved_fs.open(model_ckpt_path, \"wb\") as fd:\n torch.save(state, fd)\n return model_ckpt_path\n\n\ndef _unwrap_model(model: Union[DDP, torch.nn.Module]) -> torch.nn.Module:\n return model.module if isinstance(model, DDP) else model\n" ]
[ [ "torch.device", "torch.save" ] ]
leonardowei/pandas
[ "e0edc9912001e74f714958a49a79e8b62d1f0e5e" ]
[ "pandas/tests/groupby/test_grouping.py" ]
[ "\"\"\" test where we are determining what we are grouping, or getting groups \"\"\"\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n CategoricalIndex,\n DataFrame,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.groupby.grouper import Grouping\n\n# selection\n# --------------------------------\n\n\nclass TestSelection:\n def test_select_bad_cols(self):\n df = DataFrame([[1, 2]], columns=[\"A\", \"B\"])\n g = df.groupby(\"A\")\n with pytest.raises(KeyError, match=\"\\\"Columns not found: 'C'\\\"\"):\n g[[\"C\"]]\n\n with pytest.raises(KeyError, match=\"^[^A]+$\"):\n # A should not be referenced as a bad column...\n # will have to rethink regex if you change message!\n g[[\"A\", \"C\"]]\n\n def test_groupby_duplicated_column_errormsg(self):\n # GH7511\n df = DataFrame(\n columns=[\"A\", \"B\", \"A\", \"C\"], data=[range(4), range(2, 6), range(0, 8, 2)]\n )\n\n msg = \"Grouper for 'A' not 1-dimensional\"\n with pytest.raises(ValueError, match=msg):\n df.groupby(\"A\")\n with pytest.raises(ValueError, match=msg):\n df.groupby([\"A\", \"B\"])\n\n grouped = df.groupby(\"B\")\n c = grouped.count()\n assert c.columns.nlevels == 1\n assert c.columns.size == 3\n\n def test_column_select_via_attr(self, df):\n result = df.groupby(\"A\").C.sum()\n expected = df.groupby(\"A\")[\"C\"].sum()\n tm.assert_series_equal(result, expected)\n\n df[\"mean\"] = 1.5\n result = df.groupby(\"A\").mean()\n expected = df.groupby(\"A\").agg(np.mean)\n tm.assert_frame_equal(result, expected)\n\n def test_getitem_list_of_columns(self):\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"three\", \"two\", \"two\", \"one\", \"three\"],\n \"C\": np.random.randn(8),\n \"D\": np.random.randn(8),\n \"E\": np.random.randn(8),\n }\n )\n\n result = df.groupby(\"A\")[[\"C\", \"D\"]].mean()\n result2 = df.groupby(\"A\")[df.columns[2:4]].mean()\n\n expected = df.loc[:, [\"A\", \"C\", \"D\"]].groupby(\"A\").mean()\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n def test_getitem_numeric_column_names(self):\n # GH #13731\n df = DataFrame(\n {\n 0: list(\"abcd\") * 2,\n 2: np.random.randn(8),\n 4: np.random.randn(8),\n 6: np.random.randn(8),\n }\n )\n result = df.groupby(0)[df.columns[1:3]].mean()\n result2 = df.groupby(0)[[2, 4]].mean()\n\n expected = df.loc[:, [0, 2, 4]].groupby(0).mean()\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # per GH 23566 this should raise a FutureWarning\n with tm.assert_produces_warning(FutureWarning):\n df.groupby(0)[2, 4].mean()\n\n def test_getitem_single_list_of_columns(self, df):\n # per GH 23566 this should raise a FutureWarning\n with tm.assert_produces_warning(FutureWarning):\n df.groupby(\"A\")[\"C\", \"D\"].mean()\n\n def test_getitem_single_column(self):\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"three\", \"two\", \"two\", \"one\", \"three\"],\n \"C\": np.random.randn(8),\n \"D\": np.random.randn(8),\n \"E\": np.random.randn(8),\n }\n )\n\n result = df.groupby(\"A\")[\"C\"].mean()\n\n as_frame = df.loc[:, [\"A\", \"C\"]].groupby(\"A\").mean()\n as_series = as_frame.iloc[:, 0]\n expected = as_series\n\n tm.assert_series_equal(result, expected)\n\n\n# grouping\n# --------------------------------\n\n\nclass TestGrouping:\n def test_grouper_index_types(self):\n # related GH5375\n # groupby misbehaving when using a Floatlike index\n df = DataFrame(np.arange(10).reshape(5, 2), columns=list(\"AB\"))\n for index in [\n tm.makeFloatIndex,\n tm.makeStringIndex,\n tm.makeUnicodeIndex,\n tm.makeIntIndex,\n tm.makeDateIndex,\n tm.makePeriodIndex,\n ]:\n\n df.index = index(len(df))\n df.groupby(list(\"abcde\")).apply(lambda x: x)\n\n df.index = list(reversed(df.index.tolist()))\n df.groupby(list(\"abcde\")).apply(lambda x: x)\n\n def test_grouper_multilevel_freq(self):\n\n # GH 7885\n # with level and freq specified in a pd.Grouper\n from datetime import date, timedelta\n\n d0 = date.today() - timedelta(days=14)\n dates = date_range(d0, date.today())\n date_index = MultiIndex.from_product([dates, dates], names=[\"foo\", \"bar\"])\n df = DataFrame(np.random.randint(0, 100, 225), index=date_index)\n\n # Check string level\n expected = (\n df.reset_index()\n .groupby([pd.Grouper(key=\"foo\", freq=\"W\"), pd.Grouper(key=\"bar\", freq=\"W\")])\n .sum()\n )\n # reset index changes columns dtype to object\n expected.columns = Index([0], dtype=\"int64\")\n\n result = df.groupby(\n [pd.Grouper(level=\"foo\", freq=\"W\"), pd.Grouper(level=\"bar\", freq=\"W\")]\n ).sum()\n tm.assert_frame_equal(result, expected)\n\n # Check integer level\n result = df.groupby(\n [pd.Grouper(level=0, freq=\"W\"), pd.Grouper(level=1, freq=\"W\")]\n ).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_creation_bug(self):\n\n # GH 8795\n df = DataFrame({\"A\": [0, 0, 1, 1, 2, 2], \"B\": [1, 2, 3, 4, 5, 6]})\n g = df.groupby(\"A\")\n expected = g.sum()\n\n g = df.groupby(pd.Grouper(key=\"A\"))\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n g = df.groupby(pd.Grouper(key=\"A\", axis=0))\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n result = g.apply(lambda x: x.sum())\n expected[\"A\"] = [0, 2, 4]\n expected = expected.loc[:, [\"A\", \"B\"]]\n tm.assert_frame_equal(result, expected)\n\n # GH14334\n # pd.Grouper(key=...) may be passed in a list\n df = DataFrame(\n {\"A\": [0, 0, 0, 1, 1, 1], \"B\": [1, 1, 2, 2, 3, 3], \"C\": [1, 2, 3, 4, 5, 6]}\n )\n # Group by single column\n expected = df.groupby(\"A\").sum()\n g = df.groupby([pd.Grouper(key=\"A\")])\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n # Group by two columns\n # using a combination of strings and Grouper objects\n expected = df.groupby([\"A\", \"B\"]).sum()\n\n # Group with two Grouper objects\n g = df.groupby([pd.Grouper(key=\"A\"), pd.Grouper(key=\"B\")])\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n # Group with a string and a Grouper object\n g = df.groupby([\"A\", pd.Grouper(key=\"B\")])\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n # Group with a Grouper object and a string\n g = df.groupby([pd.Grouper(key=\"A\"), \"B\"])\n result = g.sum()\n tm.assert_frame_equal(result, expected)\n\n # GH8866\n s = Series(\n np.arange(8, dtype=\"int64\"),\n index=MultiIndex.from_product(\n [list(\"ab\"), range(2), date_range(\"20130101\", periods=2)],\n names=[\"one\", \"two\", \"three\"],\n ),\n )\n result = s.groupby(pd.Grouper(level=\"three\", freq=\"M\")).sum()\n expected = Series(\n [28], index=Index([Timestamp(\"2013-01-31\")], freq=\"M\", name=\"three\")\n )\n tm.assert_series_equal(result, expected)\n\n # just specifying a level breaks\n result = s.groupby(pd.Grouper(level=\"one\")).sum()\n expected = s.groupby(level=\"one\").sum()\n tm.assert_series_equal(result, expected)\n\n def test_grouper_column_and_index(self):\n # GH 14327\n\n # Grouping a multi-index frame by a column and an index level should\n # be equivalent to resetting the index and grouping by two columns\n idx = MultiIndex.from_tuples(\n [(\"a\", 1), (\"a\", 2), (\"a\", 3), (\"b\", 1), (\"b\", 2), (\"b\", 3)]\n )\n idx.names = [\"outer\", \"inner\"]\n df_multi = DataFrame(\n {\"A\": np.arange(6), \"B\": [\"one\", \"one\", \"two\", \"two\", \"one\", \"one\"]},\n index=idx,\n )\n result = df_multi.groupby([\"B\", pd.Grouper(level=\"inner\")]).mean()\n expected = df_multi.reset_index().groupby([\"B\", \"inner\"]).mean()\n tm.assert_frame_equal(result, expected)\n\n # Test the reverse grouping order\n result = df_multi.groupby([pd.Grouper(level=\"inner\"), \"B\"]).mean()\n expected = df_multi.reset_index().groupby([\"inner\", \"B\"]).mean()\n tm.assert_frame_equal(result, expected)\n\n # Grouping a single-index frame by a column and the index should\n # be equivalent to resetting the index and grouping by two columns\n df_single = df_multi.reset_index(\"outer\")\n result = df_single.groupby([\"B\", pd.Grouper(level=\"inner\")]).mean()\n expected = df_single.reset_index().groupby([\"B\", \"inner\"]).mean()\n tm.assert_frame_equal(result, expected)\n\n # Test the reverse grouping order\n result = df_single.groupby([pd.Grouper(level=\"inner\"), \"B\"]).mean()\n expected = df_single.reset_index().groupby([\"inner\", \"B\"]).mean()\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_levels_and_columns(self):\n # GH9344, GH9049\n idx_names = [\"x\", \"y\"]\n idx = MultiIndex.from_tuples([(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names)\n df = DataFrame(np.arange(12).reshape(-1, 3), index=idx)\n\n by_levels = df.groupby(level=idx_names).mean()\n # reset_index changes columns dtype to object\n by_columns = df.reset_index().groupby(idx_names).mean()\n\n tm.assert_frame_equal(by_levels, by_columns, check_column_type=False)\n\n by_columns.columns = Index(by_columns.columns, dtype=np.int64)\n tm.assert_frame_equal(by_levels, by_columns)\n\n def test_groupby_categorical_index_and_columns(self, observed):\n # GH18432, adapted for GH25871\n columns = [\"A\", \"B\", \"A\", \"B\"]\n categories = [\"B\", \"A\"]\n data = np.array(\n [[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]], int\n )\n cat_columns = CategoricalIndex(columns, categories=categories, ordered=True)\n df = DataFrame(data=data, columns=cat_columns)\n result = df.groupby(axis=1, level=0, observed=observed).sum()\n expected_data = np.array([[4, 2], [4, 2], [4, 2], [4, 2], [4, 2]], int)\n expected_columns = CategoricalIndex(\n categories, categories=categories, ordered=True\n )\n expected = DataFrame(data=expected_data, columns=expected_columns)\n tm.assert_frame_equal(result, expected)\n\n # test transposed version\n df = DataFrame(data.T, index=cat_columns)\n result = df.groupby(axis=0, level=0, observed=observed).sum()\n expected = DataFrame(data=expected_data.T, index=expected_columns)\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_getting_correct_binner(self):\n\n # GH 10063\n # using a non-time-based grouper and a time-based grouper\n # and specifying levels\n df = DataFrame(\n {\"A\": 1},\n index=MultiIndex.from_product(\n [list(\"ab\"), date_range(\"20130101\", periods=80)], names=[\"one\", \"two\"]\n ),\n )\n result = df.groupby(\n [pd.Grouper(level=\"one\"), pd.Grouper(level=\"two\", freq=\"M\")]\n ).sum()\n expected = DataFrame(\n {\"A\": [31, 28, 21, 31, 28, 21]},\n index=MultiIndex.from_product(\n [list(\"ab\"), date_range(\"20130101\", freq=\"M\", periods=3)],\n names=[\"one\", \"two\"],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_grouper_iter(self, df):\n assert sorted(df.groupby(\"A\").grouper) == [\"bar\", \"foo\"]\n\n def test_empty_groups(self, df):\n # see gh-1048\n with pytest.raises(ValueError, match=\"No group keys passed!\"):\n df.groupby([])\n\n def test_groupby_grouper(self, df):\n grouped = df.groupby(\"A\")\n\n result = df.groupby(grouped.grouper).mean()\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_dict_mapping(self):\n # GH #679\n from pandas import Series\n\n s = Series({\"T1\": 5})\n result = s.groupby({\"T1\": \"T2\"}).agg(sum)\n expected = s.groupby([\"T2\"]).agg(sum)\n tm.assert_series_equal(result, expected)\n\n s = Series([1.0, 2.0, 3.0, 4.0], index=list(\"abcd\"))\n mapping = {\"a\": 0, \"b\": 0, \"c\": 1, \"d\": 1}\n\n result = s.groupby(mapping).mean()\n result2 = s.groupby(mapping).agg(np.mean)\n expected = s.groupby([0, 0, 1, 1]).mean()\n expected2 = s.groupby([0, 0, 1, 1]).mean()\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result, result2)\n tm.assert_series_equal(result, expected2)\n\n def test_groupby_grouper_f_sanity_checked(self):\n dates = date_range(\"01-Jan-2013\", periods=12, freq=\"MS\")\n ts = Series(np.random.randn(12), index=dates)\n\n # GH3035\n # index.map is used to apply grouper to the index\n # if it fails on the elements, map tries it on the entire index as\n # a sequence. That can yield invalid results that cause trouble\n # down the line.\n # the surprise comes from using key[0:6] rather than str(key)[0:6]\n # when the elements are Timestamp.\n # the result is Index[0:6], very confusing.\n\n msg = r\"Grouper result violates len\\(labels\\) == len\\(data\\)\"\n with pytest.raises(AssertionError, match=msg):\n ts.groupby(lambda key: key[0:6])\n\n def test_grouping_error_on_multidim_input(self, df):\n msg = \"Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional\"\n with pytest.raises(ValueError, match=msg):\n Grouping(df.index, df[[\"A\", \"A\"]])\n\n def test_multiindex_passthru(self):\n\n # GH 7997\n # regression from 0.14.1\n df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n df.columns = MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)])\n\n result = df.groupby(axis=1, level=[0, 1]).first()\n tm.assert_frame_equal(result, df)\n\n def test_multiindex_negative_level(self, mframe):\n # GH 13901\n result = mframe.groupby(level=-1).sum()\n expected = mframe.groupby(level=\"second\").sum()\n tm.assert_frame_equal(result, expected)\n\n result = mframe.groupby(level=-2).sum()\n expected = mframe.groupby(level=\"first\").sum()\n tm.assert_frame_equal(result, expected)\n\n result = mframe.groupby(level=[-2, -1]).sum()\n expected = mframe\n tm.assert_frame_equal(result, expected)\n\n result = mframe.groupby(level=[-1, \"first\"]).sum()\n expected = mframe.groupby(level=[\"second\", \"first\"]).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_multifunc_select_col_integer_cols(self, df):\n df.columns = np.arange(len(df.columns))\n\n # it works!\n df.groupby(1, as_index=False)[2].agg({\"Q\": np.mean})\n\n def test_multiindex_columns_empty_level(self):\n lst = [[\"count\", \"values\"], [\"to filter\", \"\"]]\n midx = MultiIndex.from_tuples(lst)\n\n df = DataFrame([[1, \"A\"]], columns=midx)\n\n grouped = df.groupby(\"to filter\").groups\n assert grouped[\"A\"] == [0]\n\n grouped = df.groupby([(\"to filter\", \"\")]).groups\n assert grouped[\"A\"] == [0]\n\n df = DataFrame([[1, \"A\"], [2, \"B\"]], columns=midx)\n\n expected = df.groupby(\"to filter\").groups\n result = df.groupby([(\"to filter\", \"\")]).groups\n assert result == expected\n\n df = DataFrame([[1, \"A\"], [2, \"A\"]], columns=midx)\n\n expected = df.groupby(\"to filter\").groups\n result = df.groupby([(\"to filter\", \"\")]).groups\n tm.assert_dict_equal(result, expected)\n\n def test_groupby_multiindex_tuple(self):\n # GH 17979\n df = DataFrame(\n [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],\n columns=MultiIndex.from_arrays([[\"a\", \"b\", \"b\", \"c\"], [1, 1, 2, 2]]),\n )\n expected = df.groupby([(\"b\", 1)]).groups\n result = df.groupby((\"b\", 1)).groups\n tm.assert_dict_equal(expected, result)\n\n df2 = DataFrame(\n df.values,\n columns=MultiIndex.from_arrays(\n [[\"a\", \"b\", \"b\", \"c\"], [\"d\", \"d\", \"e\", \"e\"]]\n ),\n )\n expected = df2.groupby([(\"b\", \"d\")]).groups\n result = df.groupby((\"b\", 1)).groups\n tm.assert_dict_equal(expected, result)\n\n df3 = DataFrame(df.values, columns=[(\"a\", \"d\"), (\"b\", \"d\"), (\"b\", \"e\"), \"c\"])\n expected = df3.groupby([(\"b\", \"d\")]).groups\n result = df.groupby((\"b\", 1)).groups\n tm.assert_dict_equal(expected, result)\n\n @pytest.mark.parametrize(\"sort\", [True, False])\n def test_groupby_level(self, sort, mframe, df):\n # GH 17537\n frame = mframe\n deleveled = frame.reset_index()\n\n result0 = frame.groupby(level=0, sort=sort).sum()\n result1 = frame.groupby(level=1, sort=sort).sum()\n\n expected0 = frame.groupby(deleveled[\"first\"].values, sort=sort).sum()\n expected1 = frame.groupby(deleveled[\"second\"].values, sort=sort).sum()\n\n expected0.index.name = \"first\"\n expected1.index.name = \"second\"\n\n assert result0.index.name == \"first\"\n assert result1.index.name == \"second\"\n\n tm.assert_frame_equal(result0, expected0)\n tm.assert_frame_equal(result1, expected1)\n assert result0.index.name == frame.index.names[0]\n assert result1.index.name == frame.index.names[1]\n\n # groupby level name\n result0 = frame.groupby(level=\"first\", sort=sort).sum()\n result1 = frame.groupby(level=\"second\", sort=sort).sum()\n tm.assert_frame_equal(result0, expected0)\n tm.assert_frame_equal(result1, expected1)\n\n # axis=1\n\n result0 = frame.T.groupby(level=0, axis=1, sort=sort).sum()\n result1 = frame.T.groupby(level=1, axis=1, sort=sort).sum()\n tm.assert_frame_equal(result0, expected0.T)\n tm.assert_frame_equal(result1, expected1.T)\n\n # raise exception for non-MultiIndex\n msg = \"level > 0 or level < -1 only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n df.groupby(level=1)\n\n def test_groupby_level_index_names(self, axis):\n # GH4014 this used to raise ValueError since 'exp'>1 (in py2)\n df = DataFrame({\"exp\": [\"A\"] * 3 + [\"B\"] * 3, \"var1\": range(6)}).set_index(\n \"exp\"\n )\n if axis in (1, \"columns\"):\n df = df.T\n df.groupby(level=\"exp\", axis=axis)\n msg = f\"level name foo is not the name of the {df._get_axis_name(axis)}\"\n with pytest.raises(ValueError, match=msg):\n df.groupby(level=\"foo\", axis=axis)\n\n @pytest.mark.parametrize(\"sort\", [True, False])\n def test_groupby_level_with_nas(self, sort):\n # GH 17537\n index = MultiIndex(\n levels=[[1, 0], [0, 1, 2, 3]],\n codes=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],\n )\n\n # factorizing doesn't confuse things\n s = Series(np.arange(8.0), index=index)\n result = s.groupby(level=0, sort=sort).sum()\n expected = Series([6.0, 22.0], index=[0, 1])\n tm.assert_series_equal(result, expected)\n\n index = MultiIndex(\n levels=[[1, 0], [0, 1, 2, 3]],\n codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],\n )\n\n # factorizing doesn't confuse things\n s = Series(np.arange(8.0), index=index)\n result = s.groupby(level=0, sort=sort).sum()\n expected = Series([6.0, 18.0], index=[0.0, 1.0])\n tm.assert_series_equal(result, expected)\n\n def test_groupby_args(self, mframe):\n # PR8618 and issue 8015\n frame = mframe\n\n msg = \"You have to supply one of 'by' and 'level'\"\n with pytest.raises(TypeError, match=msg):\n frame.groupby()\n\n msg = \"You have to supply one of 'by' and 'level'\"\n with pytest.raises(TypeError, match=msg):\n frame.groupby(by=None, level=None)\n\n @pytest.mark.parametrize(\n \"sort,labels\",\n [\n [True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]],\n [False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]],\n ],\n )\n def test_level_preserve_order(self, sort, labels, mframe):\n # GH 17537\n grouped = mframe.groupby(level=0, sort=sort)\n exp_labels = np.array(labels, np.intp)\n tm.assert_almost_equal(grouped.grouper.codes[0], exp_labels)\n\n def test_grouping_labels(self, mframe):\n grouped = mframe.groupby(mframe.index.get_level_values(0))\n exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3], dtype=np.intp)\n tm.assert_almost_equal(grouped.grouper.codes[0], exp_labels)\n\n def test_list_grouper_with_nat(self):\n # GH 14715\n df = DataFrame({\"date\": pd.date_range(\"1/1/2011\", periods=365, freq=\"D\")})\n df.iloc[-1] = pd.NaT\n grouper = pd.Grouper(key=\"date\", freq=\"AS\")\n\n # Grouper in a list grouping\n result = df.groupby([grouper])\n expected = {pd.Timestamp(\"2011-01-01\"): Index(list(range(364)))}\n tm.assert_dict_equal(result.groups, expected)\n\n # Test case without a list\n result = df.groupby(grouper)\n expected = {pd.Timestamp(\"2011-01-01\"): 365}\n tm.assert_dict_equal(result.groups, expected)\n\n @pytest.mark.parametrize(\n \"func,expected\",\n [\n (\n \"transform\",\n Series(name=2, dtype=np.float64, index=pd.RangeIndex(0, 0, 1)),\n ),\n (\n \"agg\",\n Series(name=2, dtype=np.float64, index=pd.Float64Index([], name=1)),\n ),\n (\n \"apply\",\n Series(name=2, dtype=np.float64, index=pd.Float64Index([], name=1)),\n ),\n ],\n )\n def test_evaluate_with_empty_groups(self, func, expected):\n # 26208\n # test transform'ing empty groups\n # (not testing other agg fns, because they return\n # different index objects.\n df = DataFrame({1: [], 2: []})\n g = df.groupby(1)\n result = getattr(g[2], func)(lambda x: x)\n tm.assert_series_equal(result, expected)\n\n def test_groupby_empty(self):\n # https://github.com/pandas-dev/pandas/issues/27190\n s = Series([], name=\"name\", dtype=\"float64\")\n gr = s.groupby([])\n\n result = gr.mean()\n tm.assert_series_equal(result, s)\n\n # check group properties\n assert len(gr.grouper.groupings) == 1\n tm.assert_numpy_array_equal(\n gr.grouper.group_info[0], np.array([], dtype=np.dtype(\"int64\"))\n )\n\n tm.assert_numpy_array_equal(\n gr.grouper.group_info[1], np.array([], dtype=np.dtype(\"int\"))\n )\n\n assert gr.grouper.group_info[2] == 0\n\n # check name\n assert s.groupby(s).grouper.names == [\"name\"]\n\n def test_groupby_level_index_value_all_na(self):\n # issue 20519\n df = DataFrame(\n [[\"x\", np.nan, 10], [None, np.nan, 20]], columns=[\"A\", \"B\", \"C\"]\n ).set_index([\"A\", \"B\"])\n result = df.groupby(level=[\"A\", \"B\"]).sum()\n expected = DataFrame(\n data=[],\n index=MultiIndex(\n levels=[Index([\"x\"], dtype=\"object\"), Index([], dtype=\"float64\")],\n codes=[[], []],\n names=[\"A\", \"B\"],\n ),\n columns=[\"C\"],\n dtype=\"int64\",\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_multiindex_level_empty(self):\n # https://github.com/pandas-dev/pandas/issues/31670\n df = DataFrame(\n [[123, \"a\", 1.0], [123, \"b\", 2.0]], columns=[\"id\", \"category\", \"value\"]\n )\n df = df.set_index([\"id\", \"category\"])\n empty = df[df.value < 0]\n result = empty.groupby(\"id\").sum()\n expected = DataFrame(\n dtype=\"float64\", columns=[\"value\"], index=pd.Int64Index([], name=\"id\")\n )\n tm.assert_frame_equal(result, expected)\n\n\n# get_group\n# --------------------------------\n\n\nclass TestGetGroup:\n def test_get_group(self):\n # GH 5267\n # be datelike friendly\n df = DataFrame(\n {\n \"DATE\": pd.to_datetime(\n [\n \"10-Oct-2013\",\n \"10-Oct-2013\",\n \"10-Oct-2013\",\n \"11-Oct-2013\",\n \"11-Oct-2013\",\n \"11-Oct-2013\",\n ]\n ),\n \"label\": [\"foo\", \"foo\", \"bar\", \"foo\", \"foo\", \"bar\"],\n \"VAL\": [1, 2, 3, 4, 5, 6],\n }\n )\n\n g = df.groupby(\"DATE\")\n key = list(g.groups)[0]\n result1 = g.get_group(key)\n result2 = g.get_group(Timestamp(key).to_pydatetime())\n result3 = g.get_group(str(Timestamp(key)))\n tm.assert_frame_equal(result1, result2)\n tm.assert_frame_equal(result1, result3)\n\n g = df.groupby([\"DATE\", \"label\"])\n\n key = list(g.groups)[0]\n result1 = g.get_group(key)\n result2 = g.get_group((Timestamp(key[0]).to_pydatetime(), key[1]))\n result3 = g.get_group((str(Timestamp(key[0])), key[1]))\n tm.assert_frame_equal(result1, result2)\n tm.assert_frame_equal(result1, result3)\n\n # must pass a same-length tuple with multiple keys\n msg = \"must supply a tuple to get_group with multiple grouping keys\"\n with pytest.raises(ValueError, match=msg):\n g.get_group(\"foo\")\n with pytest.raises(ValueError, match=msg):\n g.get_group(\"foo\")\n msg = \"must supply a same-length tuple to get_group with multiple grouping keys\"\n with pytest.raises(ValueError, match=msg):\n g.get_group((\"foo\", \"bar\", \"baz\"))\n\n def test_get_group_empty_bins(self, observed):\n\n d = DataFrame([3, 1, 7, 6])\n bins = [0, 5, 10, 15]\n g = d.groupby(pd.cut(d[0], bins), observed=observed)\n\n # TODO: should prob allow a str of Interval work as well\n # IOW '(0, 5]'\n result = g.get_group(pd.Interval(0, 5))\n expected = DataFrame([3, 1], index=[0, 1])\n tm.assert_frame_equal(result, expected)\n\n msg = r\"Interval\\(10, 15, closed='right'\\)\"\n with pytest.raises(KeyError, match=msg):\n g.get_group(pd.Interval(10, 15))\n\n def test_get_group_grouped_by_tuple(self):\n # GH 8121\n df = DataFrame([[(1,), (1, 2), (1,), (1, 2)]], index=[\"ids\"]).T\n gr = df.groupby(\"ids\")\n expected = DataFrame({\"ids\": [(1,), (1,)]}, index=[0, 2])\n result = gr.get_group((1,))\n tm.assert_frame_equal(result, expected)\n\n dt = pd.to_datetime([\"2010-01-01\", \"2010-01-02\", \"2010-01-01\", \"2010-01-02\"])\n df = DataFrame({\"ids\": [(x,) for x in dt]})\n gr = df.groupby(\"ids\")\n result = gr.get_group((\"2010-01-01\",))\n expected = DataFrame({\"ids\": [(dt[0],), (dt[0],)]}, index=[0, 2])\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_with_empty(self):\n index = pd.DatetimeIndex(())\n data = ()\n series = Series(data, index, dtype=object)\n grouper = pd.Grouper(freq=\"D\")\n grouped = series.groupby(grouper)\n assert next(iter(grouped), None) is None\n\n def test_groupby_with_single_column(self):\n df = DataFrame({\"a\": list(\"abssbab\")})\n tm.assert_frame_equal(df.groupby(\"a\").get_group(\"a\"), df.iloc[[0, 5]])\n # GH 13530\n exp = DataFrame(index=Index([\"a\", \"b\", \"s\"], name=\"a\"))\n tm.assert_frame_equal(df.groupby(\"a\").count(), exp)\n tm.assert_frame_equal(df.groupby(\"a\").sum(), exp)\n tm.assert_frame_equal(df.groupby(\"a\").nth(1), exp)\n\n def test_gb_key_len_equal_axis_len(self):\n # GH16843\n # test ensures that index and column keys are recognized correctly\n # when number of keys equals axis length of groupby\n df = DataFrame(\n [[\"foo\", \"bar\", \"B\", 1], [\"foo\", \"bar\", \"B\", 2], [\"foo\", \"baz\", \"C\", 3]],\n columns=[\"first\", \"second\", \"third\", \"one\"],\n )\n df = df.set_index([\"first\", \"second\"])\n df = df.groupby([\"first\", \"second\", \"third\"]).size()\n assert df.loc[(\"foo\", \"bar\", \"B\")] == 2\n assert df.loc[(\"foo\", \"baz\", \"C\")] == 1\n\n\n# groups & iteration\n# --------------------------------\n\n\nclass TestIteration:\n def test_groups(self, df):\n grouped = df.groupby([\"A\"])\n groups = grouped.groups\n assert groups is grouped.groups # caching works\n\n for k, v in grouped.groups.items():\n assert (df.loc[v][\"A\"] == k).all()\n\n grouped = df.groupby([\"A\", \"B\"])\n groups = grouped.groups\n assert groups is grouped.groups # caching works\n\n for k, v in grouped.groups.items():\n assert (df.loc[v][\"A\"] == k[0]).all()\n assert (df.loc[v][\"B\"] == k[1]).all()\n\n def test_grouping_is_iterable(self, tsframe):\n # this code path isn't used anywhere else\n # not sure it's useful\n grouped = tsframe.groupby([lambda x: x.weekday(), lambda x: x.year])\n\n # test it works\n for g in grouped.grouper.groupings[0]:\n pass\n\n def test_multi_iter(self):\n s = Series(np.arange(6))\n k1 = np.array([\"a\", \"a\", \"a\", \"b\", \"b\", \"b\"])\n k2 = np.array([\"1\", \"2\", \"1\", \"2\", \"1\", \"2\"])\n\n grouped = s.groupby([k1, k2])\n\n iterated = list(grouped)\n expected = [\n (\"a\", \"1\", s[[0, 2]]),\n (\"a\", \"2\", s[[1]]),\n (\"b\", \"1\", s[[4]]),\n (\"b\", \"2\", s[[3, 5]]),\n ]\n for i, ((one, two), three) in enumerate(iterated):\n e1, e2, e3 = expected[i]\n assert e1 == one\n assert e2 == two\n tm.assert_series_equal(three, e3)\n\n def test_multi_iter_frame(self, three_group):\n k1 = np.array([\"b\", \"b\", \"b\", \"a\", \"a\", \"a\"])\n k2 = np.array([\"1\", \"2\", \"1\", \"2\", \"1\", \"2\"])\n df = DataFrame(\n {\"v1\": np.random.randn(6), \"v2\": np.random.randn(6), \"k1\": k1, \"k2\": k2},\n index=[\"one\", \"two\", \"three\", \"four\", \"five\", \"six\"],\n )\n\n grouped = df.groupby([\"k1\", \"k2\"])\n\n # things get sorted!\n iterated = list(grouped)\n idx = df.index\n expected = [\n (\"a\", \"1\", df.loc[idx[[4]]]),\n (\"a\", \"2\", df.loc[idx[[3, 5]]]),\n (\"b\", \"1\", df.loc[idx[[0, 2]]]),\n (\"b\", \"2\", df.loc[idx[[1]]]),\n ]\n for i, ((one, two), three) in enumerate(iterated):\n e1, e2, e3 = expected[i]\n assert e1 == one\n assert e2 == two\n tm.assert_frame_equal(three, e3)\n\n # don't iterate through groups with no data\n df[\"k1\"] = np.array([\"b\", \"b\", \"b\", \"a\", \"a\", \"a\"])\n df[\"k2\"] = np.array([\"1\", \"1\", \"1\", \"2\", \"2\", \"2\"])\n grouped = df.groupby([\"k1\", \"k2\"])\n groups = {key: gp for key, gp in grouped}\n assert len(groups) == 2\n\n # axis = 1\n three_levels = three_group.groupby([\"A\", \"B\", \"C\"]).mean()\n grouped = three_levels.T.groupby(axis=1, level=(1, 2))\n for key, group in grouped:\n pass\n\n def test_dictify(self, df):\n dict(iter(df.groupby(\"A\")))\n dict(iter(df.groupby([\"A\", \"B\"])))\n dict(iter(df[\"C\"].groupby(df[\"A\"])))\n dict(iter(df[\"C\"].groupby([df[\"A\"], df[\"B\"]])))\n dict(iter(df.groupby(\"A\")[\"C\"]))\n dict(iter(df.groupby([\"A\", \"B\"])[\"C\"]))\n\n def test_groupby_with_small_elem(self):\n # GH 8542\n # length=2\n df = DataFrame(\n {\"event\": [\"start\", \"start\"], \"change\": [1234, 5678]},\n index=pd.DatetimeIndex([\"2014-09-10\", \"2013-10-10\"]),\n )\n grouped = df.groupby([pd.Grouper(freq=\"M\"), \"event\"])\n assert len(grouped.groups) == 2\n assert grouped.ngroups == 2\n assert (pd.Timestamp(\"2014-09-30\"), \"start\") in grouped.groups\n assert (pd.Timestamp(\"2013-10-31\"), \"start\") in grouped.groups\n\n res = grouped.get_group((pd.Timestamp(\"2014-09-30\"), \"start\"))\n tm.assert_frame_equal(res, df.iloc[[0], :])\n res = grouped.get_group((pd.Timestamp(\"2013-10-31\"), \"start\"))\n tm.assert_frame_equal(res, df.iloc[[1], :])\n\n df = DataFrame(\n {\"event\": [\"start\", \"start\", \"start\"], \"change\": [1234, 5678, 9123]},\n index=pd.DatetimeIndex([\"2014-09-10\", \"2013-10-10\", \"2014-09-15\"]),\n )\n grouped = df.groupby([pd.Grouper(freq=\"M\"), \"event\"])\n assert len(grouped.groups) == 2\n assert grouped.ngroups == 2\n assert (pd.Timestamp(\"2014-09-30\"), \"start\") in grouped.groups\n assert (pd.Timestamp(\"2013-10-31\"), \"start\") in grouped.groups\n\n res = grouped.get_group((pd.Timestamp(\"2014-09-30\"), \"start\"))\n tm.assert_frame_equal(res, df.iloc[[0, 2], :])\n res = grouped.get_group((pd.Timestamp(\"2013-10-31\"), \"start\"))\n tm.assert_frame_equal(res, df.iloc[[1], :])\n\n # length=3\n df = DataFrame(\n {\"event\": [\"start\", \"start\", \"start\"], \"change\": [1234, 5678, 9123]},\n index=pd.DatetimeIndex([\"2014-09-10\", \"2013-10-10\", \"2014-08-05\"]),\n )\n grouped = df.groupby([pd.Grouper(freq=\"M\"), \"event\"])\n assert len(grouped.groups) == 3\n assert grouped.ngroups == 3\n assert (pd.Timestamp(\"2014-09-30\"), \"start\") in grouped.groups\n assert (pd.Timestamp(\"2013-10-31\"), \"start\") in grouped.groups\n assert (pd.Timestamp(\"2014-08-31\"), \"start\") in grouped.groups\n\n res = grouped.get_group((pd.Timestamp(\"2014-09-30\"), \"start\"))\n tm.assert_frame_equal(res, df.iloc[[0], :])\n res = grouped.get_group((pd.Timestamp(\"2013-10-31\"), \"start\"))\n tm.assert_frame_equal(res, df.iloc[[1], :])\n res = grouped.get_group((pd.Timestamp(\"2014-08-31\"), \"start\"))\n tm.assert_frame_equal(res, df.iloc[[2], :])\n\n def test_grouping_string_repr(self):\n # GH 13394\n mi = MultiIndex.from_arrays([list(\"AAB\"), list(\"aba\")])\n df = DataFrame([[1, 2, 3]], columns=mi)\n gr = df.groupby(df[(\"A\", \"a\")])\n\n result = gr.grouper.groupings[0].__repr__()\n expected = \"Grouping(('A', 'a'))\"\n assert result == expected\n" ]
[ [ "pandas._testing.assert_almost_equal", "pandas.to_datetime", "pandas.Series", "pandas.RangeIndex", "pandas._testing.assert_dict_equal", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.dtype", "numpy.random.randn", "pandas._testing.assert_frame_equal", "numpy.random.randint", "numpy.arange", "pandas.Index", "pandas.DatetimeIndex", "pandas.cut", "pandas.Int64Index", "pandas._testing.assert_series_equal", "pandas.core.groupby.grouper.Grouping", "pandas._testing.assert_produces_warning", "pandas.MultiIndex", "pandas.Float64Index", "pandas.MultiIndex.from_product", "pandas.Interval", "pandas.date_range", "numpy.array", "pandas.CategoricalIndex", "pandas.Grouper", "pandas.MultiIndex.from_arrays", "pandas.Timestamp" ] ]
fun-math/Autumn-of-Automation
[ "08c04510f3500ac335f5c830ce3fbabb9c3fa05c" ]
[ "OpenCV/Q1,3.py" ]
[ "import cv2\nimport numpy as np\n\nimg=cv2.imread(\"meme.jpg\",1)\nrows,cols,channels=img.shape\n\nimg_hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\nmask=cv2.inRange(img_hsv,(-5,50,70),(5,255,255))\nmask_inv=cv2.bitwise_not(mask)\n\ndst_bg=cv2.bitwise_and(img,img,mask=mask_inv)\nblue=np.zeros([rows,cols,channels])\nblue[:,:,0]=255*np.ones([rows,cols])\nblue=blue.astype(np.uint8)\n\ndst_fg=cv2.bitwise_and(blue,blue,mask=mask)\ndst=cv2.add(dst_bg,dst_fg)\n\ncv2.imshow('mask',mask)\ncv2.imshow('result',dst)\ncv2.imshow('original',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()" ]
[ [ "numpy.zeros", "numpy.ones" ] ]
GalDude33/nmt2
[ "de602d04f9bea96ee099b0c23906211bb436ad90" ]
[ "nmt/utils/nmt_utils.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utility functions specifically for NMT.\"\"\"\nfrom __future__ import print_function\n\nimport codecs\nimport time\nimport numpy as np\nimport tensorflow as tf\n\nfrom ..utils import evaluation_utils\nfrom ..utils import misc_utils as utils\n\n__all__ = [\"decode_and_evaluate\", \"get_translation\"]\n\n\ndef decode_and_evaluate(name,\n model,\n sess,\n trans_file,\n src_ref_file,\n tgt_ref_file,\n metrics,\n subword_option,\n beam_width,\n tgt_eos,\n src_eos,\n hparams,\n num_translations_per_input=1,\n decode=True):\n \"\"\"Decode a test set and compute a score according to the evaluation task.\"\"\"\n # Decode\n if decode:\n utils.print_out(\" decoding to output %s.\" % trans_file)\n\n start_time = time.time()\n num_sentences = 0\n with codecs.getwriter(\"utf-8\")(\n tf.gfile.GFile(trans_file+'.'+hparams.src, mode=\"wb\")) as src_trans_f:\n with codecs.getwriter(\"utf-8\")(\n tf.gfile.GFile(trans_file+'.'+hparams.tgt, mode=\"wb\")) as tgt_trans_f:\n src_trans_f.write(\"\") # Write empty string to ensure file is created.\n tgt_trans_f.write(\"\") # Write empty string to ensure file is created.\n\n num_translations_per_input = max(\n min(num_translations_per_input, beam_width), 1)\n while True:\n try:\n (nmt_outputs_src, _), (nmt_outputs_tgt, _) = model.decode_cross(sess)\n if beam_width == 0:\n nmt_outputs_src = np.expand_dims(nmt_outputs_src, 0)\n\n batch_size = nmt_outputs_src.shape[1]\n num_sentences += batch_size\n\n for sent_id in range(batch_size):\n for beam_id in range(num_translations_per_input):\n src_translation = get_translation(\n nmt_outputs_src[beam_id],\n sent_id,\n tgt_eos=src_eos,\n subword_option=subword_option)\n src_trans_f.write((src_translation + b\"\\n\").decode(\"utf-8\"))\n tgt_translation = get_translation(\n nmt_outputs_tgt[beam_id],\n sent_id,\n tgt_eos=tgt_eos,\n subword_option=subword_option)\n tgt_trans_f.write((tgt_translation + b\"\\n\").decode(\"utf-8\"))\n except tf.errors.OutOfRangeError:\n utils.print_time(\n \" done, num sentences %d, num translations per input %d\" %\n (num_sentences, num_translations_per_input), start_time)\n break\n\n # Evaluation\n evaluation_scores = {}\n if src_ref_file and tf.gfile.Exists(trans_file):\n for metric in metrics:\n score = evaluation_utils.evaluate(\n src_ref_file,\n src_trans_file,\n metric,\n subword_option=subword_option)\n evaluation_scores[metric] = score\n utils.print_out(\" %s %s: %.1f\" % (metric, name, score))\n\n return evaluation_scores\n\n\ndef get_translation(nmt_outputs, sent_id, tgt_eos, subword_option):\n \"\"\"Given batch decoding outputs, select a sentence and turn to text.\"\"\"\n if tgt_eos: tgt_eos = tgt_eos.encode(\"utf-8\")\n # Select a sentence\n output = nmt_outputs[sent_id, :].tolist()\n\n # If there is an eos symbol in outputs, cut them at that point.\n if tgt_eos and tgt_eos in output:\n output = output[:output.index(tgt_eos)]\n\n if subword_option == \"bpe\": # BPE\n translation = utils.format_bpe_text(output)\n elif subword_option == \"spm\": # SPM\n translation = utils.format_spm_text(output)\n else:\n translation = utils.format_text(output)\n\n return translation\n" ]
[ [ "tensorflow.gfile.Exists", "tensorflow.gfile.GFile", "numpy.expand_dims" ] ]
g-guichard/IT-Blog
[ "f4ee2c215ff1a5536178e6e3c90acce3fc646935" ]
[ "samples/DataScience/link-prediction-in-large-scale-networks/data_processing.py" ]
[ "\"\"\" Data processing module. \"\"\"\r\n\r\n# Imports\r\n\r\nimport networkx as nx\r\nfrom networkit import linkprediction as lp, nxadapter\r\nfrom functools import partial\r\nimport pandas as pd\r\n\r\n\r\n# Define local functions\r\n\r\ndef assign_label(pair, graph):\r\n u, v = pair[0], pair[1]\r\n return (int(graph.hasEdge(u, v)))\r\n\r\n\r\ndef concatenate(node_set, label):\r\n dataset = pd.DataFrame({'nodes': node_set, 'label': label})\r\n return dataset\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Create training and testing graphs, compute feature engineering\r\n and save datasets.\r\n \"\"\"\r\n\r\n # Graph import\r\n G = nx.read_edgelist('data/graph.txt', comments='#')\r\n valid_graph = nxadapter.nx2nk(G)\r\n\r\n # Training and test graphs creation\r\n test_graph = lp.RandomLinkSampler.byPercentage(valid_graph, 0.9)\r\n train_graph = lp.RandomLinkSampler.byPercentage(test_graph, 0.7)\r\n\r\n # Training and testing sets creation\r\n testing_set = lp.MissingLinksFinder(test_graph).findAtDistance(2)\r\n training_set = lp.MissingLinksFinder(train_graph).findAtDistance(2)\r\n\r\n # Label creation\r\n y_train = list(map(partial(assign_label, graph=test_graph), training_set))\r\n y_test = list(map(partial(assign_label, graph=valid_graph), testing_set))\r\n\r\n # Concatenation of labels with samples\r\n train = concatenate(training_set, y_train)\r\n test = concatenate(testing_set, y_test)\r\n trainingSet = train.nodes.values\r\n testingSet = test.nodes.values\r\n\r\n # Feature engineering\r\n trainLPs = [\r\n lp.CommonNeighborsIndex(train_graph), lp.JaccardIndex(train_graph),\r\n lp.AdamicAdarIndex(train_graph), lp.ResourceAllocationIndex(train_graph),\r\n lp.PreferentialAttachmentIndex(train_graph), lp.AdjustedRandIndex(train_graph),\r\n lp.NeighborhoodDistanceIndex(train_graph), lp.TotalNeighborsIndex(train_graph),\r\n lp.SameCommunityIndex(train_graph), lp.UDegreeIndex(train_graph),\r\n lp.VDegreeIndex(train_graph)\r\n ]\r\n\r\n testLPs = [\r\n lp.CommonNeighborsIndex(test_graph), lp.JaccardIndex(test_graph),\r\n lp.AdamicAdarIndex(test_graph), lp.ResourceAllocationIndex(test_graph),\r\n lp.PreferentialAttachmentIndex(test_graph), lp.AdjustedRandIndex(test_graph),\r\n lp.NeighborhoodDistanceIndex(test_graph), lp.TotalNeighborsIndex(test_graph),\r\n lp.SameCommunityIndex(test_graph), lp.UDegreeIndex(test_graph), lp.VDegreeIndex(test_graph)\r\n ]\r\n\r\n X_train = lp.getFeatures(trainingSet, *trainLPs)\r\n X_test = lp.getFeatures(testingSet, *testLPs)\r\n\r\n # Concatenate features with samples and labels\r\n features = ['CN', 'JC', 'AA', 'RA', 'PA', 'AR', 'ND', 'TN', 'SC', 'UD', 'VD']\r\n train_features = pd.DataFrame(X_train, columns=features)\r\n test_features = pd.DataFrame(X_test, columns=features)\r\n train = pd.concat([train, train_features], axis=1)\r\n test = pd.concat([test, test_features], axis=1)\r\n\r\n # Export files as csv\r\n train.to_csv('data/train.csv', sep=';', header=True, decimal='.', encoding='utf-8', index=False)\r\n test.to_csv('data/test.csv', sep=';', header=True, decimal='.', encoding='utf-8', index=False)\r\n\r\n\r\nif __name__ == \"main\":\r\n main()\r\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
Eshikamahajan/Airlines-Sentiment-Analysis-
[ "9790b4311c717cf3d223649b78403748415e771a" ]
[ "sentiment.py" ]
[ "import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nfrom wordcloud import WordCloud, STOPWORDS\nimport matplotlib.pyplot as plt\n\n#If required to read a data online , replace the existing URL with the URL of the online dataset\nDATA_URL = (\n \"E:/python coding/coursera/dashboard/my/Tweets.csv\"\n)\n\nst.title(\"Sentiment Analysis of Tweets about US Airlines\")\nst.sidebar.title(\"Sentiment Analysis of Tweets\")\nst.markdown(\"This application is a Streamlit dashboard used \"\n \"to analyze sentiments of tweets 🐦\")\nst.sidebar.markdown(\"This application is a Streamlit dashboard used \"\n \"to analyze sentiments of tweets 🐦\")\n\[email protected](persist=True) #To use cache data and prevent the data reloading every time slight changes are made in the code\ndef load_data():\n data = pd.read_csv(DATA_URL)\n data['tweet_created'] = pd.to_datetime(data['tweet_created']) #converting the date in the dataset to proper dat time format\n return data\n\ndata = load_data()\n#st.write(data) #Display whole dataset in the main window\nst.sidebar.subheader(\"Show random tweet\")\nrandom_tweet = st.sidebar.radio('Sentiment', ('positive', 'neutral', 'negative')) #creating radio buttons in the sidebar \nst.sidebar.markdown(data.query(\"airline_sentiment == @random_tweet\")[[\"text\"]].sample(n=1).iat[0, 0]) #displaying one feedback with sentiment = the one selected as radio button\n\n\n#plotting no of sentiments by counts using bar graph and pie chart\nst.sidebar.markdown(\"### Number of tweets by sentiment\")\nselect = st.sidebar.selectbox('Visualization type', ['Bar plot', 'Pie chart'], key='1')\nsentiment_count = data['airline_sentiment'].value_counts()\nsentiment_count = pd.DataFrame({'Sentiment':sentiment_count.index, 'Tweets':sentiment_count.values})\n#a checkbox to hide and show the created visualisations\nif not st.sidebar.checkbox(\"Hide\", True):\n st.markdown(\"### Number of tweets by sentiment\")\n if select == 'Bar plot':\n fig = px.bar(sentiment_count, x='Sentiment', y='Tweets', color='Tweets', height=500)\n st.plotly_chart(fig)\n else:\n fig = px.pie(sentiment_count, values='Tweets', names='Sentiment')\n st.plotly_chart(fig)\n\n#Plotting the feedback by their location\t\t\nst.sidebar.subheader(\"When and where are users tweeting from?\")\nhour = st.sidebar.slider(\"Hour to look at\", 0, 23)\nmodified_data = data[data['tweet_created'].dt.hour == hour]\n#a checkbox to hide and show the created visualisations\n\nif not st.sidebar.checkbox(\"Close\", True, key='1'):\n st.markdown(\"### Tweet locations based on time of day\")\n st.markdown(\"%i tweets between %i:00 and %i:00\" % (len(modified_data), hour, (hour + 1) % 24))\n st.map(modified_data)\n if st.sidebar.checkbox(\"Show raw data\", False):\n st.write(modified_data)\n\n#Plotting the feedback count of each airline \nst.sidebar.subheader(\"Total number of tweets for each airline\")\neach_airline = st.sidebar.selectbox('Visualization type', ['Bar plot', 'Pie chart'], key='2')\nairline_sentiment_count = data.groupby('airline')['airline_sentiment'].count().sort_values(ascending=False)\nairline_sentiment_count = pd.DataFrame({'Airline':airline_sentiment_count.index, 'Tweets':airline_sentiment_count.values.flatten()})\n#creating a checkbox to hide and show the created visualisations\nif not st.sidebar.checkbox(\"Close\", True, key='2'):\n if each_airline == 'Bar plot':\n st.subheader(\"Total number of tweets for each airline\")\n fig_1 = px.bar(airline_sentiment_count, x='Airline', y='Tweets', color='Tweets', height=500)\n st.plotly_chart(fig_1)\n if each_airline == 'Pie chart':\n st.subheader(\"Total number of tweets for each airline\")\n fig_2 = px.pie(airline_sentiment_count, values='Tweets', names='Airline')\n st.plotly_chart(fig_2)\n\n\n\[email protected](persist=True)\n\n#Function for plotting the feedback count of airline selected in a the multiselect box using streamlit as a bar plot or a pie chart\n\ndef plot_sentiment(airline):\n df = data[data['airline']==airline]\n count = df['airline_sentiment'].value_counts()\n count = pd.DataFrame({'Sentiment':count.index, 'Tweets':count.values.flatten()})\n return count\n\n\nst.sidebar.subheader(\"Breakdown airline by sentiment\")\nchoice = st.sidebar.multiselect('Pick airlines', ('US Airways','United','American','Southwest','Delta','Virgin America'))\nif len(choice) > 0:\n st.subheader(\"Breakdown airline by sentiment\")\n breakdown_type = st.sidebar.selectbox('Visualization type', ['Pie chart', 'Bar plot', ], key='3')\n fig_3 = make_subplots(rows=1, cols=len(choice), subplot_titles=choice)\n if breakdown_type == 'Bar plot':\n for i in range(1):\n for j in range(len(choice)):\n fig_3.add_trace(\n go.Bar(x=plot_sentiment(choice[j]).Sentiment, y=plot_sentiment(choice[j]).Tweets, showlegend=False),\n row=i+1, col=j+1\n )\n fig_3.update_layout(height=600, width=800)\n st.plotly_chart(fig_3)\n else:\n fig_3 = make_subplots(rows=1, cols=len(choice), specs=[[{'type':'domain'}]*len(choice)], subplot_titles=choice)\n for i in range(1):\n for j in range(len(choice)):\n fig_3.add_trace(\n go.Pie(labels=plot_sentiment(choice[j]).Sentiment, values=plot_sentiment(choice[j]).Tweets, showlegend=True),\n i+1, j+1\n )\n fig_3.update_layout(height=600, width=800)\n st.plotly_chart(fig_3)\n\n\n\n#Plotting a histogram \nst.sidebar.subheader(\"Breakdown airline by sentiment\")\nchoice = st.sidebar.multiselect('Pick airlines', ('US Airways','United','American','Southwest','Delta','Virgin America'), key=0)\nif len(choice) > 0:\n choice_data = data[data.airline.isin(choice)]\n fig_0 = px.histogram(\n choice_data, x='airline', y='airline_sentiment',\n histfunc='count', color='airline_sentiment',\n facet_col='airline_sentiment', labels={'airline_sentiment':'tweets'},\n height=600, width=800)\n st.plotly_chart(fig_0)\n\n\n#Creating a Wordcloud of the feedback by sentiment type selected by the user\nst.sidebar.header(\"Word Cloud\")\nword_sentiment = st.sidebar.radio('Display word cloud for what sentiment?', ('positive', 'neutral', 'negative'))\nif not st.sidebar.checkbox(\"Close\", True, key='3'):\n st.subheader('Word cloud for %s sentiment' % (word_sentiment))\n df = data[data['airline_sentiment']==word_sentiment]\n words = ' '.join(df['text'])\n processed_words = ' '.join([word for word in words.split() if 'http' not in word and not word.startswith('@') and word != 'RT'])\n wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=800, height=640).generate(processed_words)\n plt.imshow(wordcloud)\n plt.xticks([])\n plt.yticks([])\n st.pyplot()\n" ]
[ [ "matplotlib.pyplot.imshow", "pandas.read_csv", "pandas.to_datetime", "matplotlib.pyplot.yticks", "pandas.DataFrame", "matplotlib.pyplot.xticks" ] ]
Daniel1586/Initiative_RNN_tutorials
[ "a72f3da670e2c89581059334d4f593258dde240b" ]
[ "keras_working_with_text/001_reuters_mlp.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Trains and evaluate a simple MLP on the Reuters newswire topic classification task.\n# 训练并评估一个简单的MLP(对路透社新闻主题分类)\n\nimport keras\nimport numpy as np\nfrom keras.datasets import reuters\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.preprocessing.text import Tokenizer\n\nmax_words = 1000 # vocab大小\nbatch_size = 32 # min-batch size\nepochs = 5 # 循环次数\n\n# 数据集来源路透社新闻专线,共11228条新闻,标记46个类别\n# 每条数据被编码为一条索引序列(索引数字越小,代表单词出现次数越多)\n# num_words: 选取的每条数据里的索引值不能超过num_words\n# test_split: test data所占数据集比例\nprint('========== 1.Loading data...')\n(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=max_words, test_split=0.2)\nprint('----- train sequences', len(x_train))\nprint('----- test sequences', len(x_test))\nnum_classes = np.max(y_train) + 1\nprint('----- classes num', num_classes)\n\n# 对每条词索引组成的数据(train/test)转换为词典长度的0/1值序列(one-hot),若单词出现则为1,否则为0\nprint('========== 2.Vectorizing sequence data...')\ntokenizer = Tokenizer(num_words=max_words) # 只记录max_words数量的单词信息\nx_train = tokenizer.sequences_to_matrix(x_train, mode='binary')\nx_test = tokenizer.sequences_to_matrix(x_test, mode='binary')\nprint('----- x_train shape:', x_train.shape)\nprint('----- x_test shape:', x_test.shape)\n\n# 对每条数据的类别标签(train/test)转换为类别数目的0/1值序列(one-hot)\nprint('Convert class vector to binary class matrix ''(for use with categorical_crossentropy)')\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\nprint('----- y_train shape:', y_train.shape)\nprint('----- y_test shape:', y_test.shape)\n\n# 搭建神经网络模型\nprint('========== 3.Building model...')\nmodel = Sequential()\n# 第一层\nmodel.add(Dense(512, input_shape=(max_words,))) # 输入(*,max_words), 输出(*,512)\nmodel.add(Activation('relu')) # 输出(*,512)\nmodel.add(Dropout(0.5)) # 输出(*,512)\n# 第二层\nmodel.add(Dense(num_classes)) # 输出(*,num_classes)\nmodel.add(Activation('softmax')) # 输出(*,num_classes)\n\n# 损失函数设置,优化函数设置,模型评估性能指标\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.summary()\n\n# 神经网络训练和交叉验证模型性能\nhistory = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.1)\n# 测试集性能测试\nscore = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)\nprint('----- Test loss:', score[0])\nprint('----- Test accuracy:', score[1])\n" ]
[ [ "numpy.max" ] ]
MiaoBao/MatchZoo-py
[ "752aa448b6df4a9d62eb7e8461481d9bb1c7cc30", "752aa448b6df4a9d62eb7e8461481d9bb1c7cc30" ]
[ "matchzoo/models/model_versions/bertknrm_ex_v6.py", "matchzoo/models/bertknrm_ex_v5.py" ]
[ "\"\"\"An implementation of Bert Model with knrm output layers with explanable structure \nv6\n- same as v5, but using shared bert for query and document\n- change kernel weight activation to relu\n\"\"\"\nimport typing\n\nimport torch\nimport torch.nn as nn\nfrom pytorch_transformers import BertModel\nimport torch.nn.functional as F\n\nfrom matchzoo import preprocessors\nfrom matchzoo.engine.param_table import ParamTable\nfrom matchzoo.engine.param import Param\nfrom matchzoo.engine.base_model import BaseModel\nfrom matchzoo.engine.base_preprocessor import BasePreprocessor\nfrom matchzoo.engine import hyper_spaces\nfrom matchzoo.dataloader import callbacks\nfrom matchzoo.modules import BertModule\nfrom matchzoo.modules import GaussianKernel\n\n\n\n\nclass BertKNRMex(BaseModel):\n \"\"\"\n Bert Model with knrm output layers and explanable output structure \n\n \"\"\"\n\n @classmethod\n def get_default_params(cls) -> ParamTable:\n \"\"\":return: model default parameters.\"\"\"\n params = super().get_default_params(with_embedding=True)\n params.add(Param(name='mode', value='bert-base-uncased',\n desc=\"Pretrained Bert model.\"))\n params.add(Param(\n 'dropout_rate', 0.0,\n hyper_space=hyper_spaces.quniform(\n low=0.0, high=0.8, q=0.01),\n desc=\"The dropout rate.\"\n )) \n params.add(Param(\n name='kernel_num',\n value=11,\n hyper_space=hyper_spaces.quniform(low=5, high=20),\n desc=\"The number of RBF kernels.\"\n ))\n params.add(Param(\n name='sigma',\n value=0.1,\n hyper_space=hyper_spaces.quniform(\n low=0.01, high=0.2, q=0.01),\n desc=\"The `sigma` defines the kernel width.\"\n ))\n params.add(Param(\n name='exact_sigma', value=0.001,\n desc=\"The `exact_sigma` denotes the `sigma` \"\n \"for exact match.\"\n ))\n \n params.add(Param(\n name='token_dim', value=512,\n desc=\"The maximum number of tokens for BERT.\"\n ))\n \n return params\n @classmethod\n def get_default_preprocessor(\n cls,\n mode: str = 'bert-base-uncased'\n ) -> BasePreprocessor:\n \"\"\":return: Default preprocessor.\"\"\"\n return preprocessors.BertPreprocessor(mode=mode)\n\n @classmethod\n def get_default_padding_callback(\n cls,\n fixed_length_left: int = None,\n fixed_length_right: int = None,\n pad_value: typing.Union[int, str] = 0,\n pad_mode: str = 'pre'\n ):\n \"\"\":return: Default padding callback.\"\"\"\n return callbacks.BertPaddingSingle(\n fixed_length_left=fixed_length_left,\n fixed_length_right=fixed_length_right,\n pad_value=pad_value,\n pad_mode=pad_mode)\n \n def build(self):\n \"\"\"Build model structure.\"\"\"\n self.bert = BertModule(mode=self._params['mode'])\n self.dropout = nn.Dropout(p=self._params['dropout_rate'])\n self.q_w = nn.Parameter(torch.tensor(1.1, requires_grad=True))\n self.q_b = nn.Parameter(torch.tensor(0.1, requires_grad=True))\n \n if 'base' in self._params['mode']:\n dim = 768\n elif 'large' in self._params['mode']:\n dim = 1024\n self.kernels = nn.ModuleList()\n for i in range(self._params['kernel_num']):\n mu = 1. / (self._params['kernel_num'] - 1) + (2. * i) / (self._params['kernel_num'] - 1) - 1.0\n sigma = 0.1\n if mu > 1.0:\n sigma = 0.01\n mu = 1.0\n self.kernels.append(GaussianKernel(mu=mu, sigma=sigma))\n \n self.weighted_kernel = self._make_perceptron_layer(self._params['kernel_num'], 1, nn.ReLU())\n self.out = self._make_output_layer(self._params['token_dim'])\n\n def forward(self, inputs):\n \"\"\"Forward.\"\"\"\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # D = embedding size\n # L = `input_left` sequence length\n # R = `input_right` sequence length\n # K = number of kernels\n\n # Left input and right input.\n # shape = [B, L]\n # shape = [B, R]\n \n query, doc = inputs['text_left'], inputs['text_right']\n bert_q = self.bert(query)[0]\n bert_d = self.bert(doc)[0]\n \n # shape = [B, (L+1), (R+1)]\n matching_matrix = torch.einsum('bld,brd->blr',\n F.normalize(bert_q, p=2, dim=-1),\n F.normalize(bert_d, p=2, dim=-1)\n )\n # sim(query token, query CLS) dim = [B,L]\n query_token_d_weight = nn.ReLU()(self.q_b + self.q_w * torch.squeeze(torch.einsum('bid,bld->bil',\n F.normalize(bert_q[:,0:1], p=2, dim=-1),\n F.normalize(bert_q, p=2, dim=-1)\n ), 1))[:,1:]\n \n \n # sim(doc token, doc CLS) dim = [B,R]\n doc_token_d_weight = nn.ReLU()(self.q_b + self.q_w * torch.squeeze(torch.einsum('bid,brd->bir',\n F.normalize(bert_d[:,0:1], p=2, dim=-1),\n F.normalize(bert_d, p=2, dim=-1)\n ), 1))[:,1:]\n \n KM = []\n \n for kernel in self.kernels:\n # shape = [B, L]\n K_q = torch.log1p(torch.einsum('blr,br -> bl', \n kernel(matching_matrix[:,1:,1:]),\n doc_token_d_weight)) #add weight here in the future\n KM.append(K_q)\n\n # KM shape K of [B, L]\n \n # shape = [B,L,K]\n phi = torch.stack(KM, dim=2)\n \n # shape = [B,L], activation is tanh\n word_score = torch.squeeze(self.weighted_kernel(phi), -1)\n #query token weight = sim(query_token, doc_CLS), dim = [B, L]\n \n weighted_word_score = query_token_d_weight * word_score\n target = torch.zeros(word_score.shape[0], self._params['token_dim'])\n target[:, :word_score.shape[1]] = word_score\n out = self.out(target)\n return out\n\n'''\nEpoch 1/5: 100%\n64/64 [55:32<00:00, 52.08s/it, loss=0.694]\n[Iter-128 Loss-0.692]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.3204 - normalized_discounted_cumulative_gain@5(0.0): 0.3979 - mean_average_precision(0.0): 0.3669\n\nEpoch 2/5: 100%\n64/64 [44:55<00:00, 42.12s/it, loss=0.687]\n[Iter-192 Loss-0.692]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.3468 - normalized_discounted_cumulative_gain@5(0.0): 0.4181 - mean_average_precision(0.0): 0.3826\n\nEpoch 3/5: 100%\n64/64 [42:53<00:00, 40.22s/it, loss=0.678]\n[Iter-256 Loss-0.686]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.509 - normalized_discounted_cumulative_gain@5(0.0): 0.569 - mean_average_precision(0.0): 0.5099\n\nEpoch 4/5: 100%\n64/64 [43:12<00:00, 40.51s/it, loss=0.690]\n[Iter-320 Loss-0.681]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5554 - normalized_discounted_cumulative_gain@5(0.0): 0.6279 - mean_average_precision(0.0): 0.5665\n\nEpoch 5/5: 100%\n64/64 [42:35<00:00, 39.93s/it, loss=0.683]\n[Iter-384 Loss-0.671]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.6346 - normalized_discounted_cumulative_gain@5(0.0): 0.6973 - mean_average_precision(0.0): 0.6465\n\nCost time: 13750.721867084503s\n'''\n\n\n\n\n", "\"\"\"An implementation of Bert Model with knrm output layers with explanable structure \nv5\n- add similarity between query token and query CLS as weight\n- add similarity between doc token and doc CLS as weight\n\"\"\"\nimport typing\n\nimport torch\nimport torch.nn as nn\nfrom pytorch_transformers import BertModel\nimport torch.nn.functional as F\n\nfrom matchzoo import preprocessors\nfrom matchzoo.engine.param_table import ParamTable\nfrom matchzoo.engine.param import Param\nfrom matchzoo.engine.base_model import BaseModel\nfrom matchzoo.engine.base_preprocessor import BasePreprocessor\nfrom matchzoo.engine import hyper_spaces\nfrom matchzoo.dataloader import callbacks\nfrom matchzoo.modules import BertModule\nfrom matchzoo.modules import GaussianKernel\n\n\n\n\nclass BertKNRMex(BaseModel):\n \"\"\"\n Bert Model with knrm output layers and explanable output structure \n\n \"\"\"\n\n @classmethod\n def get_default_params(cls) -> ParamTable:\n \"\"\":return: model default parameters.\"\"\"\n params = super().get_default_params(with_embedding=True)\n params.add(Param(name='mode', value='bert-base-uncased',\n desc=\"Pretrained Bert model.\"))\n params.add(Param(\n 'dropout_rate', 0.0,\n hyper_space=hyper_spaces.quniform(\n low=0.0, high=0.8, q=0.01),\n desc=\"The dropout rate.\"\n )) \n params.add(Param(\n name='kernel_num',\n value=11,\n hyper_space=hyper_spaces.quniform(low=5, high=20),\n desc=\"The number of RBF kernels.\"\n ))\n params.add(Param(\n name='sigma',\n value=0.1,\n hyper_space=hyper_spaces.quniform(\n low=0.01, high=0.2, q=0.01),\n desc=\"The `sigma` defines the kernel width.\"\n ))\n params.add(Param(\n name='exact_sigma', value=0.001,\n desc=\"The `exact_sigma` denotes the `sigma` \"\n \"for exact match.\"\n ))\n \n params.add(Param(\n name='token_dim', value=512,\n desc=\"The maximum number of tokens for BERT.\"\n ))\n \n return params\n @classmethod\n def get_default_preprocessor(\n cls,\n mode: str = 'bert-base-uncased'\n ) -> BasePreprocessor:\n \"\"\":return: Default preprocessor.\"\"\"\n return preprocessors.BertPreprocessor(mode=mode)\n\n @classmethod\n def get_default_padding_callback(\n cls,\n fixed_length_left: int = None,\n fixed_length_right: int = None,\n pad_value: typing.Union[int, str] = 0,\n pad_mode: str = 'pre'\n ):\n \"\"\":return: Default padding callback.\"\"\"\n return callbacks.BertPaddingSingle(\n fixed_length_left=fixed_length_left,\n fixed_length_right=fixed_length_right,\n pad_value=pad_value,\n pad_mode=pad_mode)\n \n def build(self):\n \"\"\"Build model structure.\"\"\"\n self.bert_q = BertModule(mode=self._params['mode'])\n self.bert_d = BertModule(mode=self._params['mode'])\n self.dropout = nn.Dropout(p=self._params['dropout_rate'])\n self.q_w = nn.Parameter(torch.tensor(1.1, requires_grad=True))\n self.q_b = nn.Parameter(torch.tensor(0.1, requires_grad=True))\n \n if 'base' in self._params['mode']:\n dim = 768\n elif 'large' in self._params['mode']:\n dim = 1024\n self.kernels = nn.ModuleList()\n for i in range(self._params['kernel_num']):\n mu = 1. / (self._params['kernel_num'] - 1) + (2. * i) / (self._params['kernel_num'] - 1) - 1.0\n sigma = 0.1\n if mu > 1.0:\n sigma = 0.01\n mu = 1.0\n self.kernels.append(GaussianKernel(mu=mu, sigma=sigma))\n \n self.weighted_kernel = self._make_perceptron_layer(self._params['kernel_num'], 1, nn.Tanh())\n self.out = self._make_output_layer(self._params['token_dim'])\n\n def forward(self, inputs):\n \"\"\"Forward.\"\"\"\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # D = embedding size\n # L = `input_left` sequence length\n # R = `input_right` sequence length\n # K = number of kernels\n\n # Left input and right input.\n # shape = [B, L]\n # shape = [B, R]\n \n query, doc = inputs['text_left'], inputs['text_right']\n bert_q = self.bert_q(query)[0]\n bert_d = self.bert_d(doc)[0]\n \n # shape = [B, (L+1), (R+1)]\n matching_matrix = torch.einsum('bld,brd->blr',\n F.normalize(bert_q, p=2, dim=-1),\n F.normalize(bert_d, p=2, dim=-1)\n )\n # sim(query token, query CLS) dim = [B,L]\n query_token_d_weight = nn.ReLU()(self.q_b + self.q_w * torch.squeeze(torch.einsum('bid,bld->bil',\n F.normalize(bert_q[:,0:1], p=2, dim=-1),\n F.normalize(bert_q, p=2, dim=-1)\n ), 1))[:,1:]\n \n # sim(doc token, doc CLS) dim = [B,R]\n doc_token_d_weight = nn.ReLU()(self.q_b + self.q_w * torch.squeeze(torch.einsum('bid,brd->bir',\n F.normalize(bert_d[:,0:1], p=2, dim=-1),\n F.normalize(bert_d, p=2, dim=-1)\n ), 1))[:,1:]\n \n KM = []\n for kernel in self.kernels:\n # shape = [B, L]\n K_q = torch.log1p(torch.einsum('blr,br -> bl', \n kernel(matching_matrix[:,1:,1:]),\n doc_token_d_weight)) #add weight here in the future\n KM.append(K_q)\n\n # KM shape K of [B, L]\n \n # shape = [B,L,K]\n phi = torch.stack(KM, dim=2)\n \n # shape = [B,L], activation is tanh\n word_score = torch.squeeze(self.weighted_kernel(phi), -1)\n #query token weight = sim(query_token, doc_CLS), dim = [B, L]\n \n weighted_word_score = query_token_d_weight * word_score\n target = torch.zeros(word_score.shape[0], self._params['token_dim'])\n target[:, :word_score.shape[1]] = word_score\n out = self.out(target)\n return out\n\n'''\n[Iter-64 Loss-0.567]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5874 - normalized_discounted_cumulative_gain@5(0.0): 0.6398 - mean_average_precision(0.0): 0.5999\n[Iter-128 Loss-0.562]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.6049 - normalized_discounted_cumulative_gain@5(0.0): 0.6585 - mean_average_precision(0.0): 0.6095\n\n[Iter-192 Loss-0.564]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.6039 - normalized_discounted_cumulative_gain@5(0.0): 0.6607 - mean_average_precision(0.0): 0.6161\n\n[Iter-256 Loss-0.564]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5981 - normalized_discounted_cumulative_gain@5(0.0): 0.6581 - mean_average_precision(0.0): 0.6166\n\n[Iter-320 Loss-0.561]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5791 - normalized_discounted_cumulative_gain@5(0.0): 0.6382 - mean_average_precision(0.0): 0.6021\n\n[Iter-384 Loss-0.562]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.6112 - normalized_discounted_cumulative_gain@5(0.0): 0.6566 - mean_average_precision(0.0): 0.6124\n\n[Iter-448 Loss-0.561]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.5965 - normalized_discounted_cumulative_gain@5(0.0): 0.6538 - mean_average_precision(0.0): 0.6072\n\n[Iter-512 Loss-0.562]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.583 - normalized_discounted_cumulative_gain@5(0.0): 0.6415 - mean_average_precision(0.0): 0.5963\n\n[Iter-576 Loss-0.562]:\n Validation: normalized_discounted_cumulative_gain@3(0.0): 0.6076 - normalized_discounted_cumulative_gain@5(0.0): 0.6615 - mean_average_precision(0.0): 0.6222\n'''\n\n\n" ]
[ [ "torch.nn.functional.normalize", "torch.nn.Dropout", "torch.zeros", "torch.nn.ModuleList", "torch.tensor", "torch.stack", "torch.nn.ReLU" ], [ "torch.nn.functional.normalize", "torch.nn.Dropout", "torch.zeros", "torch.nn.ModuleList", "torch.tensor", "torch.nn.Tanh", "torch.stack", "torch.nn.ReLU" ] ]
ic/depthai-python
[ "fe6424277641dbff0f0fe705ddacdbb04a7bf06d" ]
[ "examples/rgb_mobilenet.py" ]
[ "#!/usr/bin/env python3\n\nfrom pathlib import Path\nimport cv2\nimport depthai as dai\nimport numpy as np\nimport time\nimport argparse\n\nnnPathDefault = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_6shave.blob')).resolve().absolute())\nparser = argparse.ArgumentParser()\nparser.add_argument('nnPath', nargs='?', help=\"Path to mobilenet detection network blob\", default=nnPathDefault)\nparser.add_argument('-s', '--sync', action=\"store_true\", help=\"Sync RGB output with NN output\", default=False)\nargs = parser.parse_args()\n\nif not Path(nnPathDefault).exists():\n import sys\n raise FileNotFoundError(f'Required file/s not found, please run \"{sys.executable} install_requirements.py\"')\n\n# MobilenetSSD label texts\nlabelMap = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\",\n \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n\n# Create pipeline\npipeline = dai.Pipeline()\npipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_2)\n\n# Define sources and outputs\ncamRgb = pipeline.createColorCamera()\nnn = pipeline.createMobileNetDetectionNetwork()\nxoutRgb = pipeline.createXLinkOut()\nnnOut = pipeline.createXLinkOut()\n\nxoutRgb.setStreamName(\"rgb\")\nnnOut.setStreamName(\"nn\")\n\n# Properties\ncamRgb.setPreviewSize(300, 300)\ncamRgb.setInterleaved(False)\ncamRgb.setFps(40)\n# Define a neural network that will make predictions based on the source frames\nnn.setConfidenceThreshold(0.5)\nnn.setBlobPath(args.nnPath)\nnn.setNumInferenceThreads(2)\nnn.input.setBlocking(False)\n\n# Linking\nif args.sync:\n nn.passthrough.link(xoutRgb.input)\nelse:\n camRgb.preview.link(xoutRgb.input)\n\ncamRgb.preview.link(nn.input)\nnn.out.link(nnOut.input)\n\n# Connect to device and start pipeline\nwith dai.Device(pipeline) as device:\n\n # Output queues will be used to get the rgb frames and nn data from the outputs defined above\n qRgb = device.getOutputQueue(name=\"rgb\", maxSize=4, blocking=False)\n qDet = device.getOutputQueue(name=\"nn\", maxSize=4, blocking=False)\n\n frame = None\n detections = []\n startTime = time.monotonic()\n counter = 0\n color2 = (255, 255, 255)\n\n # nn data (bounding box locations) are in <0..1> range - they need to be normalized with frame width/height\n def frameNorm(frame, bbox):\n normVals = np.full(len(bbox), frame.shape[0])\n normVals[::2] = frame.shape[1]\n return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)\n\n def displayFrame(name, frame):\n color = (255, 0, 0)\n for detection in detections:\n bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))\n cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)\n cv2.putText(frame, f\"{int(detection.confidence * 100)}%\", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)\n cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)\n # Show the frame\n cv2.imshow(name, frame)\n\n while True:\n if args.sync:\n # Use blocking get() call to catch frame and inference result synced\n inRgb = qRgb.get()\n inDet = qDet.get()\n else:\n # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise\n inRgb = qRgb.tryGet()\n inDet = qDet.tryGet()\n\n if inRgb is not None:\n frame = inRgb.getCvFrame()\n cv2.putText(frame, \"NN fps: {:.2f}\".format(counter / (time.monotonic() - startTime)),\n (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color2)\n\n if inDet is not None:\n detections = inDet.detections\n counter += 1\n\n # If the frame is available, draw bounding boxes on it and show the frame\n if frame is not None:\n displayFrame(\"rgb\", frame)\n\n if cv2.waitKey(1) == ord('q'):\n break\n" ]
[ [ "numpy.array" ] ]
NegriAndrea/soxs
[ "bb60eda228923fecbfdaec3c47c8d2d237c8a058" ]
[ "soxs/tests/test_spectra.py" ]
[ "from soxs.spectra import Spectrum, ConvolvedSpectrum\nfrom soxs.response import AuxiliaryResponseFile\nfrom numpy.testing import assert_allclose, assert_array_equal\nimport os\nimport tempfile\nimport shutil\n\n\ndef test_arithmetic():\n spec1 = Spectrum.from_powerlaw(1.0, 0.05, 1.0e-4, 0.1, 10.0, 10000)\n spec2 = Spectrum.from_powerlaw(2.0, 0.01, 1.0e-3, 0.1, 10.0, 10000)\n spec3 = spec1+spec2\n flux3 = spec1.flux+spec2.flux\n\n assert_allclose(spec3.flux, flux3)\n\n spec4 = spec3*3.0\n spec5 = 3.0*spec3\n\n flux4 = spec3.flux*3.0\n\n assert_allclose(spec4.flux, spec5.flux)\n assert_allclose(spec4.flux, flux4)\n\n spec6 = spec3/2.5\n flux6 = spec3.flux/2.5\n\n assert_allclose(spec6.flux, flux6)\n\n spec7 = Spectrum.from_constant(1.0e-4, 0.1, 10.0, 10000)\n spec8 = spec1+spec7\n\n assert_allclose(spec8.flux.value, spec1.flux.value+1.0e-4)\n\n\ndef test_read_write():\n\n tmpdir = tempfile.mkdtemp()\n curdir = os.getcwd()\n os.chdir(tmpdir)\n\n spec1 = Spectrum.from_powerlaw(1.0, 0.05, 1.0e-4, 0.1, 10.0, 10000)\n spec1.write_file(\"test_spec.dat\", overwrite=True)\n spec2 = Spectrum.from_file(\"test_spec.dat\")\n\n assert_allclose(spec1.flux, spec2.flux)\n assert_allclose(spec1.emid, spec2.emid)\n assert_allclose(spec1.ebins, spec2.ebins)\n assert_allclose(spec1.cumspec, spec2.cumspec)\n\n os.chdir(curdir)\n shutil.rmtree(tmpdir)\n\n\ndef test_rescale_flux():\n spec = Spectrum.from_powerlaw(2.0, 0.01, 1.0, 0.1, 10.0, 10000)\n\n spec.rescale_flux(1.0e-4, emin=0.5, emax=7.0, flux_type=\"photons\")\n f = spec.get_flux_in_band(0.5, 7.0)[0]\n assert_allclose(1.0e-4, f.value)\n\n spec.rescale_flux(1.0e-12, emin=0.4, emax=1.0, flux_type=\"energy\")\n f = spec.get_flux_in_band(0.4, 1.0)[1]\n assert_allclose(1.0e-12, f.value)\n\n\ndef test_convolved_spectra():\n arf = AuxiliaryResponseFile(\"xrs_hdxi_3x10.arf\")\n spec1 = Spectrum.from_powerlaw(2.0, 0.01, 1.0, 0.1, 10.0, 1000)\n cspec1 = ConvolvedSpectrum.convolve(spec1, arf)\n cspec2 = spec1*arf\n spec2 = cspec1.deconvolve()\n assert_array_equal(cspec1.ebins.value, cspec2.ebins.value)\n assert_array_equal(spec1.ebins.value, spec2.ebins.value)\n assert_array_equal(cspec1.flux.value, cspec2.flux.value)\n assert_allclose(spec1.flux.value, spec2.flux.value)" ]
[ [ "numpy.testing.assert_array_equal", "numpy.testing.assert_allclose" ] ]
eecshope/optuna
[ "b2daf0d9d032db03b7be49fc4856c78ac7d406e1" ]
[ "optuna/samplers/tpe/sampler.py" ]
[ "import math\n\nimport numpy as np\nimport scipy.special\nfrom scipy.stats import truncnorm\n\nfrom optuna import distributions\nfrom optuna.pruners import HyperbandPruner\nfrom optuna.samplers import base\nfrom optuna.samplers import random\nfrom optuna.samplers.tpe.parzen_estimator import _ParzenEstimator\nfrom optuna.samplers.tpe.parzen_estimator import _ParzenEstimatorParameters\nfrom optuna.study import StudyDirection\nfrom optuna.trial import TrialState\nfrom optuna import type_checking\n\nif type_checking.TYPE_CHECKING:\n from typing import Any # NOQA\n from typing import Callable # NOQA\n from typing import Dict # NOQA\n from typing import List # NOQA\n from typing import Optional # NOQA\n from typing import Tuple # NOQA\n\n from optuna.distributions import BaseDistribution # NOQA\n from optuna.study import Study # NOQA\n from optuna.trial import FrozenTrial # NOQA\n\nEPS = 1e-12\n\n\ndef default_gamma(x):\n # type: (int) -> int\n\n return min(int(np.ceil(0.1 * x)), 25)\n\n\ndef hyperopt_default_gamma(x):\n # type: (int) -> int\n\n return min(int(np.ceil(0.25 * np.sqrt(x))), 25)\n\n\ndef default_weights(x):\n # type: (int) -> np.ndarray\n\n if x == 0:\n return np.asarray([])\n elif x < 25:\n return np.ones(x)\n else:\n ramp = np.linspace(1.0 / x, 1.0, num=x - 25)\n flat = np.ones(25)\n return np.concatenate([ramp, flat], axis=0)\n\n\nclass TPESampler(base.BaseSampler):\n \"\"\"Sampler using TPE (Tree-structured Parzen Estimator) algorithm.\n\n This sampler is based on *independent sampling*.\n See also :class:`~optuna.samplers.BaseSampler` for more details of 'independent sampling'.\n\n On each trial, for each parameter, TPE fits one Gaussian Mixture Model (GMM) ``l(x)`` to\n the set of parameter values associated with the best objective values, and another GMM\n ``g(x)`` to the remaining parameter values. It chooses the parameter value ``x`` that\n maximizes the ratio ``l(x)/g(x)``.\n\n For further information about TPE algorithm, please refer to the following papers:\n\n - `Algorithms for Hyper-Parameter Optimization\n <https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf>`_\n - `Making a Science of Model Search: Hyperparameter Optimization in Hundreds of\n Dimensions for Vision Architectures <http://proceedings.mlr.press/v28/bergstra13.pdf>`_\n\n Example:\n\n .. testcode::\n\n import optuna\n from optuna.samplers import TPESampler\n\n def objective(trial):\n x = trial.suggest_uniform('x', -10, 10)\n return x**2\n\n study = optuna.create_study(sampler=TPESampler())\n study.optimize(objective, n_trials=10)\n\n Args:\n consider_prior:\n Enhance the stability of Parzen estimator by imposing a Gaussian prior when\n :obj:`True`. The prior is only effective if the sampling distribution is\n either :class:`~optuna.distributions.UniformDistribution`,\n :class:`~optuna.distributions.DiscreteUniformDistribution`,\n :class:`~optuna.distributions.LogUniformDistribution`,\n or :class:`~optuna.distributions.IntUniformDistribution`.\n prior_weight:\n The weight of the prior. This argument is used in\n :class:`~optuna.distributions.UniformDistribution`,\n :class:`~optuna.distributions.DiscreteUniformDistribution`,\n :class:`~optuna.distributions.LogUniformDistribution`,\n :class:`~optuna.distributions.IntUniformDistribution` and\n :class:`~optuna.distributions.CategoricalDistribution`.\n consider_magic_clip:\n Enable a heuristic to limit the smallest variances of Gaussians used in\n the Parzen estimator.\n consider_endpoints:\n Take endpoints of domains into account when calculating variances of Gaussians\n in Parzen estimator. See the original paper for details on the heuristics\n to calculate the variances.\n n_startup_trials:\n The random sampling is used instead of the TPE algorithm until the given number\n of trials finish in the same study.\n n_ei_candidate:\n Number of candidate samples used to calculate the expected improvement.\n gamma:\n A function that takes the number of finished trials and returns the number\n of trials to form a density function for samples with low grains.\n See the original paper for more details.\n weights:\n A function that takes the number of finished trials and returns a weight for them.\n See `Making a Science of Model Search: Hyperparameter Optimization in Hundreds of\n Dimensions for Vision Architectures <http://proceedings.mlr.press/v28/bergstra13.pdf>`_\n for more details.\n seed:\n Seed for random number generator.\n \"\"\"\n\n def __init__(\n self,\n consider_prior=True, # type: bool\n prior_weight=1.0, # type: float\n consider_magic_clip=True, # type: bool\n consider_endpoints=False, # type: bool\n n_startup_trials=10, # type: int\n n_ei_candidates=24, # type: int\n gamma=default_gamma, # type: Callable[[int], int]\n weights=default_weights, # type: Callable[[int], np.ndarray]\n seed=None, # type: Optional[int]\n ):\n # type: (...) -> None\n\n self._parzen_estimator_parameters = _ParzenEstimatorParameters(\n consider_prior, prior_weight, consider_magic_clip, consider_endpoints, weights\n )\n self._prior_weight = prior_weight\n self._n_startup_trials = n_startup_trials\n self._n_ei_candidates = n_ei_candidates\n self._gamma = gamma\n self._weights = weights\n\n self._rng = np.random.RandomState(seed)\n self._random_sampler = random.RandomSampler(seed=seed)\n\n def reseed_rng(self) -> None:\n\n self._rng = np.random.RandomState()\n self._random_sampler.reseed_rng()\n\n def infer_relative_search_space(self, study, trial):\n # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]\n\n return {}\n\n def sample_relative(self, study, trial, search_space):\n # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n\n return {}\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n # type: (Study, FrozenTrial, str, BaseDistribution) -> Any\n\n values, scores = _get_observation_pairs(study, param_name, trial)\n\n n = len(values)\n\n if n < self._n_startup_trials:\n return self._random_sampler.sample_independent(\n study, trial, param_name, param_distribution\n )\n\n below_param_values, above_param_values = self._split_observation_pairs(values, scores)\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._sample_uniform(param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n return self._sample_loguniform(\n param_distribution, below_param_values, above_param_values\n )\n elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):\n return self._sample_discrete_uniform(\n param_distribution, below_param_values, above_param_values\n )\n elif isinstance(param_distribution, distributions.IntUniformDistribution):\n return self._sample_int(param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n index = self._sample_categorical_index(\n param_distribution, below_param_values, above_param_values\n )\n return param_distribution.choices[index]\n else:\n distribution_list = [\n distributions.UniformDistribution.__name__,\n distributions.LogUniformDistribution.__name__,\n distributions.DiscreteUniformDistribution.__name__,\n distributions.IntUniformDistribution.__name__,\n distributions.CategoricalDistribution.__name__,\n ]\n raise NotImplementedError(\n \"The distribution {} is not implemented. \"\n \"The parameter distribution should be one of the {}\".format(\n param_distribution, distribution_list\n )\n )\n\n def _split_observation_pairs(\n self,\n config_vals, # type: List[float]\n loss_vals, # type: List[Tuple[float, float]]\n ):\n # type: (...) -> Tuple[np.ndarray, np.ndarray]\n\n config_vals = np.asarray(config_vals)\n loss_vals = np.asarray(loss_vals, dtype=[(\"step\", float), (\"score\", float)])\n\n n_below = self._gamma(len(config_vals))\n loss_ascending = np.argsort(loss_vals)\n below = config_vals[np.sort(loss_ascending[:n_below])]\n above = config_vals[np.sort(loss_ascending[n_below:])]\n return below, above\n\n def _sample_uniform(self, distribution, below, above):\n # type: (distributions.UniformDistribution, np.ndarray, np.ndarray) -> float\n\n low = distribution.low\n high = distribution.high\n return self._sample_numerical(low, high, below, above)\n\n def _sample_loguniform(self, distribution, below, above):\n # type: (distributions.LogUniformDistribution, np.ndarray, np.ndarray) -> float\n\n low = distribution.low\n high = distribution.high\n return self._sample_numerical(low, high, below, above, is_log=True)\n\n def _sample_discrete_uniform(self, distribution, below, above):\n # type:(distributions.DiscreteUniformDistribution, np.ndarray, np.ndarray) -> float\n\n q = distribution.q\n r = distribution.high - distribution.low\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n low = 0 - 0.5 * q\n high = r + 0.5 * q\n\n # Shift below and above to [0, r]\n above -= distribution.low\n below -= distribution.low\n\n best_sample = self._sample_numerical(low, high, below, above, q=q) + distribution.low\n return min(max(best_sample, distribution.low), distribution.high)\n\n def _sample_int(self, distribution, below, above):\n # type: (distributions.IntUniformDistribution, np.ndarray, np.ndarray) -> int\n\n d = distributions.DiscreteUniformDistribution(\n low=distribution.low, high=distribution.high, q=distribution.step\n )\n return int(self._sample_discrete_uniform(d, below, above))\n\n def _sample_numerical(\n self,\n low, # type: float\n high, # type: float\n below, # type: np.ndarray\n above, # type: np.ndarray\n q=None, # type: Optional[float]\n is_log=False, # type: bool\n ):\n # type: (...) -> float\n\n if is_log:\n low = np.log(low)\n high = np.log(high)\n below = np.log(below)\n above = np.log(above)\n\n size = (self._n_ei_candidates,)\n\n parzen_estimator_below = _ParzenEstimator(\n mus=below, low=low, high=high, parameters=self._parzen_estimator_parameters\n )\n samples_below = self._sample_from_gmm(\n parzen_estimator=parzen_estimator_below, low=low, high=high, q=q, size=size,\n )\n log_likelihoods_below = self._gmm_log_pdf(\n samples=samples_below,\n parzen_estimator=parzen_estimator_below,\n low=low,\n high=high,\n q=q,\n )\n\n parzen_estimator_above = _ParzenEstimator(\n mus=above, low=low, high=high, parameters=self._parzen_estimator_parameters\n )\n\n log_likelihoods_above = self._gmm_log_pdf(\n samples=samples_below,\n parzen_estimator=parzen_estimator_above,\n low=low,\n high=high,\n q=q,\n )\n\n ret = float(\n TPESampler._compare(\n samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above\n )[0]\n )\n return math.exp(ret) if is_log else ret\n\n def _sample_categorical_index(self, distribution, below, above):\n # type: (distributions.CategoricalDistribution, np.ndarray, np.ndarray) -> int\n\n choices = distribution.choices\n below = list(map(int, below))\n above = list(map(int, above))\n upper = len(choices)\n size = (self._n_ei_candidates,)\n\n weights_below = self._weights(len(below))\n counts_below = np.bincount(below, minlength=upper, weights=weights_below)\n weighted_below = counts_below + self._prior_weight\n weighted_below /= weighted_below.sum()\n samples_below = self._sample_from_categorical_dist(weighted_below, size)\n log_likelihoods_below = TPESampler._categorical_log_pdf(samples_below, weighted_below)\n\n weights_above = self._weights(len(above))\n counts_above = np.bincount(above, minlength=upper, weights=weights_above)\n weighted_above = counts_above + self._prior_weight\n weighted_above /= weighted_above.sum()\n log_likelihoods_above = TPESampler._categorical_log_pdf(samples_below, weighted_above)\n\n return int(\n TPESampler._compare(\n samples=samples_below, log_l=log_likelihoods_below, log_g=log_likelihoods_above\n )[0]\n )\n\n def _sample_from_gmm(\n self,\n parzen_estimator, # type: _ParzenEstimator\n low, # type: float\n high, # type: float\n q=None, # type: Optional[float]\n size=(), # type: Tuple\n ):\n # type: (...) -> np.ndarray\n\n weights = parzen_estimator.weights\n mus = parzen_estimator.mus\n sigmas = parzen_estimator.sigmas\n weights, mus, sigmas = map(np.asarray, (weights, mus, sigmas))\n\n if low >= high:\n raise ValueError(\n \"The 'low' should be lower than the 'high'. \"\n \"But (low, high) = ({}, {}).\".format(low, high)\n )\n\n active = np.argmax(self._rng.multinomial(1, weights, size=size), axis=-1)\n trunc_low = (low - mus[active]) / sigmas[active]\n trunc_high = (high - mus[active]) / sigmas[active]\n while True:\n samples = truncnorm.rvs(\n trunc_low,\n trunc_high,\n size=size,\n loc=mus[active],\n scale=sigmas[active],\n random_state=self._rng,\n )\n if (samples < high).all():\n break\n\n if q is None:\n return samples\n else:\n return np.round(samples / q) * q\n\n def _gmm_log_pdf(\n self,\n samples, # type: np.ndarray\n parzen_estimator, # type: _ParzenEstimator\n low, # type: float\n high, # type: float\n q=None, # type: Optional[float]\n ):\n # type: (...) -> np.ndarray\n\n weights = parzen_estimator.weights\n mus = parzen_estimator.mus\n sigmas = parzen_estimator.sigmas\n samples, weights, mus, sigmas = map(np.asarray, (samples, weights, mus, sigmas))\n if samples.size == 0:\n return np.asarray([], dtype=float)\n if weights.ndim != 1:\n raise ValueError(\n \"The 'weights' should be 2-dimension. \"\n \"But weights.shape = {}\".format(weights.shape)\n )\n if mus.ndim != 1:\n raise ValueError(\n \"The 'mus' should be 2-dimension. \" \"But mus.shape = {}\".format(mus.shape)\n )\n if sigmas.ndim != 1:\n raise ValueError(\n \"The 'sigmas' should be 2-dimension. \" \"But sigmas.shape = {}\".format(sigmas.shape)\n )\n\n p_accept = np.sum(\n weights\n * (\n TPESampler._normal_cdf(high, mus, sigmas)\n - TPESampler._normal_cdf(low, mus, sigmas)\n )\n )\n\n if q is None:\n distance = samples[..., None] - mus\n mahalanobis = (distance / np.maximum(sigmas, EPS)) ** 2\n Z = np.sqrt(2 * np.pi) * sigmas\n coefficient = weights / Z / p_accept\n return TPESampler._logsum_rows(-0.5 * mahalanobis + np.log(coefficient))\n else:\n cdf_func = TPESampler._normal_cdf\n upper_bound = np.minimum(samples + q / 2.0, high)\n lower_bound = np.maximum(samples - q / 2.0, low)\n probabilities = np.sum(\n weights[..., None]\n * (\n cdf_func(upper_bound[None], mus[..., None], sigmas[..., None])\n - cdf_func(lower_bound[None], mus[..., None], sigmas[..., None])\n ),\n axis=0,\n )\n return np.log(probabilities + EPS) - np.log(p_accept + EPS)\n\n def _sample_from_categorical_dist(self, probabilities, size):\n # type: (np.ndarray, Tuple[int]) -> np.ndarray\n\n if probabilities.size == 1 and isinstance(probabilities[0], np.ndarray):\n probabilities = probabilities[0]\n probabilities = np.asarray(probabilities)\n\n if size == (0,):\n return np.asarray([], dtype=float)\n assert len(size)\n assert probabilities.ndim == 1\n\n n_draws = int(np.prod(size))\n sample = self._rng.multinomial(n=1, pvals=probabilities, size=int(n_draws))\n assert sample.shape == size + (probabilities.size,)\n return_val = np.dot(sample, np.arange(probabilities.size))\n return_val.shape = size\n return return_val\n\n @classmethod\n def _categorical_log_pdf(\n cls,\n sample, # type: np.ndarray\n p, # type: np.ndarray\n ):\n # type: (...) -> np.ndarray\n\n if sample.size:\n return np.log(np.asarray(p)[sample])\n else:\n return np.asarray([])\n\n @classmethod\n def _compare(cls, samples, log_l, log_g):\n # type: (np.ndarray, np.ndarray, np.ndarray) -> np.ndarray\n\n samples, log_l, log_g = map(np.asarray, (samples, log_l, log_g))\n if samples.size:\n score = log_l - log_g\n if samples.size != score.size:\n raise ValueError(\n \"The size of the 'samples' and that of the 'score' \"\n \"should be same. \"\n \"But (samples.size, score.size) = ({}, {})\".format(samples.size, score.size)\n )\n\n best = np.argmax(score)\n return np.asarray([samples[best]] * samples.size)\n else:\n return np.asarray([])\n\n @classmethod\n def _logsum_rows(cls, x):\n # type: (np.ndarray) -> np.ndarray\n\n x = np.asarray(x)\n m = x.max(axis=1)\n return np.log(np.exp(x - m[:, None]).sum(axis=1)) + m\n\n @classmethod\n def _normal_cdf(cls, x, mu, sigma):\n # type: (float, np.ndarray, np.ndarray) -> np.ndarray\n\n mu, sigma = map(np.asarray, (mu, sigma))\n denominator = x - mu\n numerator = np.maximum(np.sqrt(2) * sigma, EPS)\n z = denominator / numerator\n return 0.5 * (1 + scipy.special.erf(z))\n\n @classmethod\n def _log_normal_cdf(cls, x, mu, sigma):\n # type: (float, np.ndarray, np.ndarray) -> np.ndarray\n\n mu, sigma = map(np.asarray, (mu, sigma))\n if x < 0:\n raise ValueError(\"Negative argument is given to _lognormal_cdf. x: {}\".format(x))\n denominator = np.log(np.maximum(x, EPS)) - mu\n numerator = np.maximum(np.sqrt(2) * sigma, EPS)\n z = denominator / numerator\n return 0.5 + 0.5 * scipy.special.erf(z)\n\n @staticmethod\n def hyperopt_parameters():\n # type: () -> Dict[str, Any]\n \"\"\"Return the the default parameters of hyperopt (v0.1.2).\n\n :class:`~optuna.samplers.TPESampler` can be instantiated with the parameters returned\n by this method.\n\n Example:\n\n Create a :class:`~optuna.samplers.TPESampler` instance with the default\n parameters of `hyperopt <https://github.com/hyperopt/hyperopt/tree/0.1.2>`_.\n\n .. testcode::\n\n import optuna\n from optuna.samplers import TPESampler\n\n def objective(trial):\n x = trial.suggest_uniform('x', -10, 10)\n return x**2\n\n sampler = TPESampler(**TPESampler.hyperopt_parameters())\n study = optuna.create_study(sampler=sampler)\n study.optimize(objective, n_trials=10)\n\n Returns:\n A dictionary containing the default parameters of hyperopt.\n\n \"\"\"\n\n return {\n \"consider_prior\": True,\n \"prior_weight\": 1.0,\n \"consider_magic_clip\": True,\n \"consider_endpoints\": False,\n \"n_startup_trials\": 20,\n \"n_ei_candidates\": 24,\n \"gamma\": hyperopt_default_gamma,\n \"weights\": default_weights,\n }\n\n\ndef _get_observation_pairs(study, param_name, trial):\n # type: (Study, str, FrozenTrial) -> Tuple[List[float], List[Tuple[float, float]]]\n \"\"\"Get observation pairs from the study.\n\n This function collects observation pairs from the complete or pruned trials of the study.\n The trials that don't contain the parameter named ``param_name`` are excluded\n from the result.\n\n An observation pair fundamentally consists of a parameter value and an objective value.\n However, due to the pruning mechanism of Optuna, final objective values are not always\n available. Therefore, this function uses intermediate values in addition to the final\n ones, and reports the value with its step count as ``(-step, value)``.\n Consequently, the structure of the observation pair is as follows:\n ``(param_value, (-step, value))``.\n\n The second element of an observation pair is used to rank observations in\n ``_split_observation_pairs`` method (i.e., observations are sorted lexicographically by\n ``(-step, value)``).\n \"\"\"\n\n sign = 1\n if study.direction == StudyDirection.MAXIMIZE:\n sign = -1\n\n if isinstance(study.pruner, HyperbandPruner):\n # Create `_BracketStudy` to use trials that have the same bracket id.\n pruner = study.pruner # type: HyperbandPruner\n study = pruner._create_bracket_study(study, pruner._get_bracket_id(study, trial))\n\n values = []\n scores = []\n for trial in study.get_trials(deepcopy=False):\n if param_name not in trial.params:\n continue\n\n if trial.state is TrialState.COMPLETE and trial.value is not None:\n score = (-float(\"inf\"), sign * trial.value)\n elif trial.state is TrialState.PRUNED:\n if len(trial.intermediate_values) > 0:\n step, intermediate_value = max(trial.intermediate_values.items())\n if math.isnan(intermediate_value):\n score = (-step, float(\"inf\"))\n else:\n score = (-step, sign * intermediate_value)\n else:\n score = (float(\"inf\"), 0.0)\n else:\n continue\n\n distribution = trial.distributions[param_name]\n param_value = distribution.to_internal_repr(trial.params[param_name])\n values.append(param_value)\n scores.append(score)\n\n return values, scores\n" ]
[ [ "numpy.minimum", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.concatenate", "numpy.round", "numpy.exp", "numpy.arange", "numpy.ceil", "numpy.argmax", "numpy.log", "numpy.argsort", "numpy.random.RandomState", "numpy.maximum", "numpy.sort", "numpy.ones", "numpy.bincount", "numpy.prod", "scipy.stats.truncnorm.rvs" ] ]
prashravoor/reid-strong-baseline
[ "9f964250a4418ae2389c7eac87e6132aeb5dbce7" ]
[ "tools/train.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: sherlock\n@contact: [email protected]\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport torch\n\nfrom torch.backends import cudnn\n\nsys.path.append('.')\nfrom config import cfg\nfrom data import make_data_loader\nfrom engine.trainer import do_train, do_train_with_center\nfrom modeling import build_model\nfrom layers import make_loss, make_loss_with_center\nfrom solver import make_optimizer, make_optimizer_with_center, WarmupMultiStepLR\n\nfrom utils.logger import setup_logger\nimport random\nimport numpy as np\n\ndef train(cfg, logger):\n # prepare dataset\n train_loader, val_loader, num_query, num_classes = make_data_loader(cfg, logger)\n\n # prepare model\n model = build_model(cfg, num_classes)\n\n if cfg.MODEL.IF_WITH_CENTER == 'no':\n print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE)\n optimizer = make_optimizer(cfg, model)\n # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n\n loss_func = make_loss(cfg, num_classes) # modified by gu\n\n # Add for using self trained model\n if cfg.MODEL.PRETRAIN_CHOICE == 'self':\n start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1])\n print('Start epoch:', start_epoch)\n path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer')\n print('Path to the checkpoint of optimizer:', path_to_optimizer)\n model = torch.load(cfg.MODEL.PRETRAIN_PATH)\n optimizer = torch.load(path_to_optimizer)\n scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch)\n elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':\n start_epoch = 0\n scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n else:\n print('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE))\n\n arguments = {}\n\n do_train(\n cfg,\n model,\n train_loader,\n val_loader,\n optimizer,\n scheduler, # modify for using self trained model\n loss_func,\n num_query,\n start_epoch # add for using self trained model\n )\n elif cfg.MODEL.IF_WITH_CENTER == 'yes':\n print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE)\n loss_func, center_criterion = make_loss_with_center(cfg, num_classes) # modified by gu\n optimizer, optimizer_center = make_optimizer_with_center(cfg, model, center_criterion)\n # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n\n arguments = {}\n\n # Add for using self trained model\n if cfg.MODEL.PRETRAIN_CHOICE == 'self':\n start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1])\n print('Start epoch:', start_epoch)\n path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer')\n print('Path to the checkpoint of optimizer:', path_to_optimizer)\n path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace('model', 'center_param')\n print('Path to the checkpoint of center_param:', path_to_center_param)\n path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer_center')\n print('Path to the checkpoint of optimizer_center:', path_to_optimizer_center)\n model = torch.load(cfg.MODEL.PRETRAIN_PATH)\n optimizer = torch.load(path_to_optimizer)\n center_criterion = torch.load(path_to_center_param)\n optimizer_center = torch.load(path_to_optimizer_center)\n scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch)\n elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':\n start_epoch = 0\n scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,\n cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n else:\n print('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE))\n\n do_train_with_center(\n cfg,\n model,\n center_criterion,\n train_loader,\n val_loader,\n optimizer,\n optimizer_center,\n scheduler, # modify for using self trained model\n loss_func,\n num_query,\n start_epoch # add for using self trained model\n )\n else:\n print(\"Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\\n\".format(cfg.MODEL.IF_WITH_CENTER))\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"ReID Baseline Training\")\n parser.add_argument(\n \"--config_file\", default=\"\", help=\"path to config file\", type=str\n )\n parser.add_argument(\"opts\", help=\"Modify config options using the command-line\", default=None,\n nargs=argparse.REMAINDER)\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n\n if args.config_file != \"\":\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n output_dir = cfg.OUTPUT_DIR\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n logger = setup_logger(\"reid_baseline\", output_dir, 0)\n logger.info(\"Using {} GPUS\".format(num_gpus))\n logger.info(args)\n\n if args.config_file != \"\":\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n with open(args.config_file, 'r') as cf:\n config_str = \"\\n\" + cf.read()\n logger.info(config_str)\n # logger.info(\"Running with config:\\n{}\".format(cfg))\n\n if cfg.MODEL.DEVICE == \"cuda\":\n os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID # new add by gu\n # cudnn.benchmark = True\n train(cfg, logger)\n\n\nif __name__ == '__main__':\n manualSeed = 42\n\n np.random.seed(manualSeed)\n random.seed(manualSeed)\n torch.manual_seed(manualSeed)\n cudnn.deterministic = True\n cudnn.benchmark = False\n \n main()\n" ]
[ [ "torch.manual_seed", "numpy.random.seed", "torch.load" ] ]
Umasangavi/Thyroid_Disorder_Prediction
[ "6ef2b0a3f52e945b78dd756067cae01478e078a9" ]
[ "src/imbalance.py" ]
[ "import pandas as pd\r\nimport yaml\r\nimport os\r\nimport argparse\r\nfrom imblearn.over_sampling import RandomOverSampler\r\nfrom logger import App_Logger\r\n\r\nfile_object=open(\"application_logging/Loggings.txt\", 'a+')\r\nlogger_object=App_Logger()\r\n\r\ndef read_params(config_path):\r\n with open(config_path) as yaml_file:\r\n config = yaml.safe_load(yaml_file)\r\n return config\r\n \r\ndef balance(config_path):\r\n config = read_params(config_path)\r\n train_class_path=config[\"balanced_data\"][\"train_class\"]\r\n train_label_path=config[\"balanced_data\"][\"train_label\"]\r\n test_class_path=config[\"balanced_data\"][\"test_class\"]\r\n test_label_path=config[\"balanced_data\"][\"test_label\"]\r\n train_processed_path =config[\"processed\"][\"train_path\"]\r\n test_processed_path =config[\"processed\"][\"test_path\"]\r\n\r\n \"\"\"\r\n Method Name: balance\r\n Description: This method drops the Class in train and test data and perform the RandomOverSampler in train data.\r\n Output: A pandas DataFrame .csv.\r\n On Failure: Raise Exception\r\n \"\"\"\r\n logger_object.log(file_object,'Entered the balance')\r\n\r\n\r\n try:\r\n train_data=pd.read_csv(train_processed_path)\r\n test_data=pd.read_csv(test_processed_path)\r\n\r\n train_class=train_data[\"Class\"].copy()\r\n train_label=train_data.drop('Class',axis=1).copy()\r\n\r\n test_class=test_data[\"Class\"].copy()\r\n test_label=test_data.drop('Class',axis=1).copy()\r\n test_label.to_csv(test_label_path,index=False)\r\n test_class.to_csv(test_class_path,index=False)\r\n\r\n ros=RandomOverSampler(sampling_strategy='all')\r\n X_train_res,y_train_res=ros.fit_resample(train_label,train_class)\r\n \r\n X_train_res.to_csv(train_label_path,index=False)\r\n y_train_res.to_csv(train_class_path,index=False)\r\n logger_object.log(file_object,'balance done Successful and Exited')\r\n \r\n except Exception as e:\r\n logger_object.log(file_object,'Exception occured in balance. Exception message: '+str(e))\r\n logger_object.log(file_object,'balance Unsuccessful')\r\n raise Exception() \r\n\r\n\r\nif __name__==\"__main__\":\r\n args = argparse.ArgumentParser()\r\n args.add_argument(\"--config\", default=\"params.yaml\")\r\n parsed_args = args.parse_args()\r\n data = balance(config_path=parsed_args.config)" ]
[ [ "pandas.read_csv" ] ]
SuryaThiru/mlgauge
[ "fcdab7dcccaeea5c48972754dcbbbe289c51e775" ]
[ "mlgauge/analysis.py" ]
[ "import os\nfrom copy import deepcopy\nfrom datetime import datetime\n\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport seaborn as sns\nimport pmlb\nimport openml\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import check_random_state\n\nfrom mlgauge.method import Method\nfrom mlgauge.utils import redirect_stdout, colors\n\n\nclass Analysis:\n \"\"\"The analysis class to run the method comparisons.\n\n The class gathers datasets, methods and runs the given methods across\n different datasets and compiles the results.\n\n Attributes:\n results (xr.DataArray): Named array containing resulting metrics of the analysis.\n\n The dimensions are named \"datasets\", \"methods\", \"metrics\", \"splits\".\n You can index on each dimension using the name of the dataset, method, metrics and split (\"train\"/\"test\" if method uses test, \"fold_1\", \"fold_2\", ... otherwise) using the ``loc`` attribute similar to pandas.\n\n For example to identify the `test` `mse` score of your `linear` model on the `houses` dataset:\n\n .. code-block:: python\n\n result.loc['houses', 'linear', 'mse', 'test']\n\n\n .. note::\n\n When integer IDs are specified for openml datasets, the ``results`` attribute's dataset key will be set as string.\n\n Refer the documentation of `xarray <https://xarray.pydata.org/en/stable/quick-overview.html>`_ for a more detailed usage.\n \"\"\"\n\n def __init__(\n self,\n methods,\n metric_names=None,\n datasets=\"all\",\n n_datasets=20,\n data_source=\"pmlb\",\n drop_na=False,\n use_test_set=True,\n test_size=0.25,\n random_state=None,\n output_dir=None,\n local_cache_dir=None,\n disable_progress=False,\n ):\n \"\"\"Initialize analysis.\n\n Args:\n methods (list): List of tuple containing the method name and a method object.\n metric_names (list): List of strings representing the names of the metric. The names are only used to represent the metrics output by the method objects. If `None` will not collect metrics from methods.\n\n The size of the list should be the same as that returned by the `Method`'s instance train and test methods.\n datasets (str or list): One of the following options:\n\n **\"all\"**: randomly select `n_datasets` from all available datasets in pmlb.\n\n **\"classification\"**: randomly select `n_datasets` from all available classification datasets in pmlb.\n\n **\"regression\"**: randomly select `n_datasets` from all available regression datasets in pmlb.\n\n **list of strings**: a list of valid pmlb/openml dataset names.\n **list of ints**: a list of valid openml dataset IDs. This is recommended for openml to avoid issues with versions.\n\n **list of ('dataset_name', (X, y)) tuples**: Use the method to pass a custom dataset in the X y format.\n\n **list of ('dataset_name', (X_train, y_train), (X_test, y_test)) tuples**: Use the method to pass a custom training and testing set in the X y format.\n\n Here, X y could be a numpy array or a pandas DataFrame, using a DataFrame will allow the input feature names to be passed to the methods.\n\n n_datasets (int): Number of datasets to randomly sample from the available pmlb datasets. Ignored if `datasets` is not a string.\n data_source (str): Source to fetch from when dataset names/IDs are passed. 'pmlb' or 'openml'\n drop_na (bool): If True will drop all rows in the dataset with null values.\n random_state (None, int or RandomState instance): seed for the PRNG.\n use_test_set (bool): If the methods use a testing set.\n test_size (float): The size of the test set. Ignored if `use_test_set` is False.\n output_dir (str): Path of the output directory where method artifacts will be stored. A separate directory for each method will be created inside the directory. Defaults to an \"output\" directory in the current working directory.\n local_cache_dir (str): Local cache to use for pmlb datasets. If None will not use cached data.\n \"\"\"\n self.random_state = check_random_state(random_state)\n self.seed = self.random_state.get_state()[1][\n 0\n ] # will be used with train-test split to ensure reproducibility outside class\n\n self.__methods = self._precheck_methods(methods)\n self.metric_names = metric_names\n\n if data_source != \"openml\" and data_source != \"pmlb\":\n raise TypeError(\"Data source must be 'openml' or 'pmlb'\")\n if data_source == \"openml\" and not isinstance(datasets, list):\n raise TypeError(\"Provide list of dataset IDs/names for openml\")\n\n self.data_source = data_source\n self.datasets = self._precheck_dataset(datasets)\n if isinstance(\n self.datasets, str\n ): # expand \"all\", \"classification\" or \"regression\"\n self.datasets = self._expand_dataset_str(self.datasets, n_datasets)\n\n # display collected datasets\n print(f\"{colors.GREEN}Collected datasets{colors.ENDC}\")\n print(\n \"\\n\".join(\n f\"{colors.CYAN}{{0: >2}}{colors.ENDC}: {{1}}\".format(*k)\n for k in enumerate(self.datasets, 1)\n )\n )\n self.drop_na = drop_na\n self.local_cache_dir = local_cache_dir\n\n self.use_test_set = use_test_set\n self.test_size = test_size\n\n # create output directory\n self.output_dir = (\n output_dir if output_dir else os.path.join(os.getcwd(), \"output\")\n )\n\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n self.output_dir = os.path.join(self.output_dir, timestamp)\n os.makedirs(self.output_dir, exist_ok=True)\n\n self.results = self._initialize_results()\n self.disable_progress = disable_progress\n\n def run(self):\n \"\"\"Load the datasets, run the methods and collect the results.\"\"\"\n # redirect stdout\n with redirect_stdout() as stdout:\n\n # linespacing logic (18 additional chars for title etc.)\n _datasets = map(\n lambda x: x[0] if isinstance(x, tuple) else x, self.datasets\n ) # get dataset names\n maxl = min(max([len(str(x)) for x in _datasets]) + 18, 80)\n\n # iterate datasets\n datasets = tqdm(\n self.datasets,\n file=stdout,\n dynamic_ncols=True,\n disable=self.disable_progress,\n )\n for dataset in datasets:\n _dataset_name = dataset[0] if isinstance(dataset, tuple) else dataset\n datasets.set_description(\n f\"{{0: <{maxl}}}\".format(\n f\"{colors.GREEN}Datasets [{_dataset_name}]{colors.ENDC}\"\n )\n )\n if self.use_test_set:\n (\n dataset_name,\n feature_names,\n category_indicator,\n (X_train, y_train),\n (X_test, y_test),\n ) = self._get_dataset(dataset)\n else:\n (\n dataset_name,\n feature_names,\n category_indicator,\n (X_train, y_train),\n ) = self._get_dataset(dataset)\n\n # iterate methods\n methods = tqdm(\n self.__methods,\n leave=False,\n file=stdout,\n dynamic_ncols=True,\n disable=self.disable_progress,\n )\n for method_name, method in methods:\n methods.set_description(\n f\"{{0: <{maxl}}}\".format(\n f\"{colors.CYAN}Models [{method_name}]{colors.ENDC}\"\n )\n )\n method = deepcopy(method)\n # set attributes for the dataset and method\n method.set_test_set(self.use_test_set)\n # create output directory\n output_dir = os.path.join(\n self.output_dir, str(dataset_name), method_name\n )\n os.makedirs(output_dir, exist_ok=True)\n method.set_output_dir(output_dir)\n\n # get training scores\n train_scores = method.train(\n X_train, y_train, feature_names, category_indicator\n )\n\n # change keys for result to string\n if isinstance(dataset_name, int):\n dataset_name = str(dataset_name)\n\n # get optional testing scores\n if self.use_test_set:\n test_scores = method.test(\n X_test, y_test, feature_names, category_indicator\n )\n if self.metric_names:\n self.results.loc[\n dataset_name, method_name, :, \"train\"\n ] = np.array(train_scores)\n self.results.loc[\n dataset_name, method_name, :, \"test\"\n ] = np.array(test_scores)\n else:\n if self.metric_names:\n self.results.loc[dataset_name, method_name] = np.array(\n train_scores\n )\n\n # TODO recursively remove empty directories\n\n def _precheck_dataset(self, datasets):\n \"\"\"Check if the passed value for the datasets input argument is correct.\n\n Raises:\n TypeError: raised if the methods argument of incorrect type is passed.\n ValueError: raised if an invalid value is passed.\n \"\"\"\n if isinstance(datasets, str):\n if datasets not in [\"all\", \"classification\", \"regression\"]:\n raise ValueError(\n \"String input for datasets should be one of 'all', 'classification' or 'regression'.\"\n )\n elif isinstance(datasets, list):\n for d in datasets:\n if isinstance(d, str):\n # should be a valid pmlb dataset name\n if self.data_source == \"pmlb\":\n if d not in pmlb.dataset_names:\n raise ValueError(f\"Dataset {d} not in pmlb\")\n\n elif isinstance(d, int):\n if self.data_source != \"openml\":\n raise ValueError(\"Integer data IDs are only valid for OpenML\")\n\n elif isinstance(d, tuple):\n if not isinstance(d[0], str):\n raise ValueError(\n \"First element of the tuple must be the name of the dataset\"\n )\n if len(d) not in [2, 3]:\n raise ValueError(\n \"Custom dataset input should be a tuple of length 2 or 3\"\n )\n\n else:\n raise TypeError(f\"Invalid type {type(d)} for dataset.\")\n else:\n raise TypeError(f\"Invalid type {type(datasets)} for datasets.\")\n return datasets\n\n def _precheck_methods(self, methods):\n \"\"\"Check if the passed value for the methods is a list of `Method` instances\n\n Raises:\n TypeError: raised if the dataset argument of incorrect type is passed\n \"\"\"\n for _, m in methods:\n if not isinstance(m, Method):\n raise TypeError(\n f\"Input methods should be an instance of Method class, found {type(m)} instead\"\n )\n return methods\n\n def _expand_dataset_str(self, dataset_str, n_datasets):\n \"\"\"Convert the dataset string to list of pmlb dataset names\"\"\"\n if dataset_str == \"all\":\n datasets = self.random_state.choice(\n pmlb.dataset_names, n_datasets, replace=False\n )\n elif dataset_str == \"classification\":\n datasets = self.random_state.choice(\n pmlb.classification_dataset_names, n_datasets, replace=False\n )\n elif dataset_str == \"regression\":\n datasets = self.random_state.choice(\n pmlb.regression_dataset_names, n_datasets, replace=False\n )\n return datasets\n\n def _initialize_results(self):\n \"\"\"Define a results object to store results generated during analysis.\"\"\"\n if not self.metric_names:\n return None\n\n dims = [\"datasets\", \"methods\", \"metrics\", \"splits\"]\n\n # co-ords\n dataset_names = [self._get_dataset_name(data) for data in self.datasets]\n method_names = [name for (name, _) in self.__methods]\n metric_names = self.metric_names\n coords = {\n \"datasets\": dataset_names,\n \"methods\": method_names,\n \"metrics\": metric_names,\n }\n\n if self.use_test_set:\n coords[\"splits\"] = [\"train\", \"test\"]\n else:\n n_folds = self.__methods[0][1].cv # get the number of folds from a method\n coords[\"splits\"] = [\"fold_\" + str(i) for i in range(1, n_folds + 1)]\n\n return xr.DataArray(np.nan, coords=coords, dims=dims)\n\n def _get_dataset_name(self, dataset):\n \"\"\"Get the supplied name of the dataset\"\"\"\n if isinstance(dataset, str):\n return dataset\n elif isinstance(dataset, int):\n return str(dataset)\n elif isinstance(dataset, tuple):\n return dataset[0]\n\n def _get_dataset(self, dataset):\n \"\"\"Load and return the dataset as X, y numpy arrays\"\"\"\n category_indicator = None # list indicating categorical columns\n\n if isinstance(dataset, str): # Use pmlb or openml\n if self.data_source == \"pmlb\":\n data = pmlb.fetch_data(\n dataset, local_cache_dir=self.local_cache_dir, dropna=False\n )\n\n # Get feature names and get X,y numpy arrays\n X = data.drop(\"target\", axis=1)\n y = data[\"target\"]\n elif self.data_source == \"openml\":\n X, y, category_indicator = self._fetch_openml_data(dataset)\n\n elif isinstance(dataset, int):\n X, y, category_indicator = self._fetch_openml_data(dataset)\n\n elif isinstance(dataset, tuple):\n if len(dataset) == 2:\n dataset, (X, y) = dataset\n\n else: # Test set present in the inputs, will directly return\n dataset, (X_train, y_train), (X_test, y_test) = dataset\n feature_names = self._get_feature_names(X_train)\n X_train, y_train = self._format_na(X_train, y_train)\n\n if self.use_test_set:\n X_test, y_test = self._format_na(X_test, y_test)\n return (\n dataset,\n feature_names,\n category_indicator,\n (X_train, y_train),\n (X_test, y_test),\n )\n else:\n return (\n dataset,\n feature_names,\n category_indicator,\n (X_train, y_train),\n )\n\n if self.use_test_set: # Perform train-test splits\n X_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=self.test_size,\n shuffle=True,\n random_state=self.seed,\n )\n feature_names = self._get_feature_names(X_train)\n X_train, y_train = self._format_na(X_train, y_train)\n X_test, y_test = self._format_na(X_test, y_test)\n return (\n dataset,\n feature_names,\n category_indicator,\n (X_train, y_train),\n (X_test, y_test),\n )\n else: # Directly format and return train set\n feature_names = self._get_feature_names(X)\n X_train, y_train = self._format_na(X, y)\n return dataset, feature_names, category_indicator, (X_train, y_train)\n\n def _fetch_openml_data(self, dataset_id):\n \"\"\"Get the openml dataset with the category indicator\"\"\"\n data = openml.datasets.get_dataset(dataset_id)\n X, y, category_indicator, attribute_names = data.get_data(\n dataset_format=\"dataframe\", target=data.default_target_attribute\n )\n return X, y, category_indicator\n\n def _get_feature_names(self, X):\n \"\"\"Get the list of feature names from input data\"\"\"\n if hasattr(X, \"columns\"):\n return X.columns.tolist() # Dataframe column names\n else:\n return list(map(str, range(X.shape[1]))) # Array indices as strings\n\n def _format_na(self, X, y):\n \"\"\"Convert data to numpy arrays and drop null valued rows\"\"\"\n # convert to numpy arrays\n if isinstance(X, pd.DataFrame):\n X = X.to_numpy()\n if isinstance(y, pd.Series) or isinstance(y, pd.DataFrame):\n y = y.to_numpy()\n\n # remove rows with NaNs\n if self.drop_na:\n idx = ~(pd.isnull(X).any(axis=1) | pd.isnull(y))\n X, y = X[idx], y[idx]\n\n return X, y\n\n def get_result(self):\n \"\"\"get result of the analysis.\n\n Returns:\n (xr.DataArray): A 4d named array containing the result metrics.\n \"\"\"\n return self.results\n\n def get_result_as_df(self, metric=None, train=False, mean_folds=True):\n \"\"\"Get results as a pandas dataframe.\n\n Args:\n metric (str): Enter the metric string for which the result should\n be displayed. Defaults to the first name in `metric_names`.\n train (bool): If true, will also return the train scores. Ignored if `use_test_set` is False.\n mean_folds (bool): If true, will return mean and std deviation of the k-fold results, otherwise returns all folds.\n Ignored if `use_test_set` is True.\n\n Returns:\n (pd.DataFrame): Pandas dataframe with datasets for rows.\n When `use_test_set` is True, the columns contain the train and test results\n otherwise the mean and standard deviation of the k-fold validation is returned.\n If `mean_folds` is set to False, all folds scores are returned.\n \"\"\"\n if not self.metric_names:\n raise AttributeError(\"No results available to show.\")\n\n if not metric:\n metric = self.metric_names[-1]\n\n dataset_names = [self._get_dataset_name(data) for data in self.datasets]\n method_names = [name for (name, _) in self.__methods]\n index = dataset_names\n\n if self.use_test_set:\n if train: # train & test split scores\n columns = pd.MultiIndex.from_product([method_names, [\"train\", \"test\"]])\n\n df = pd.DataFrame(columns=columns, index=index)\n df.loc[:, (slice(None), \"train\")] = self.results.loc[\n :, :, metric, \"train\"\n ].values\n df.loc[:, (slice(None), \"test\")] = self.results.loc[\n :, :, metric, \"test\"\n ].values\n else: # only test split scores\n columns = method_names\n\n df = pd.DataFrame(\n self.results.loc[:, :, metric, \"test\"].values,\n columns=columns,\n index=index,\n )\n\n else: # return mean & standard deviation across folds\n if mean_folds:\n columns = pd.MultiIndex.from_product([method_names, [\"mean\", \"std\"]])\n\n df = pd.DataFrame(columns=columns, index=index)\n df.loc[:, (slice(None), \"mean\")] = (\n self.results.loc[:, :, metric].mean(\"splits\").values\n )\n df.loc[:, (slice(None), \"std\")] = (\n self.results.loc[:, :, metric].std(\"splits\").values\n )\n else:\n n_folds = self.results.shape[-1]\n fold_cols = [\"fold_\" + str(i + 1) for i in range(n_folds)]\n columns = pd.MultiIndex.from_product([method_names, fold_cols])\n\n df = pd.DataFrame(columns=columns, index=index)\n for col in fold_cols:\n df.loc[:, (slice(None), col)] = self.results.loc[:, :, metric, col]\n\n df = df.rename_axis(index=\"datasets\")\n return df\n\n def plot_results(self, metric=None, ax=None):\n \"\"\"Plot results as a bar plot.\n\n Args:\n metric (str): Enter the metric string for which the result should\n be displayed.\n ax (matplotlib Axes): Axes in which to draw the plot, otherwise use the currently-active Axes.\n\n Returns:\n (matplotlib Axes): Axes containing the plot.\n \"\"\"\n # if ax is None:\n # ax = plt.gca()\n if not self.metric_names:\n raise AttributeError(\"No results available to show.\")\n\n metric = metric if metric else self.metric_names[0]\n\n if self.use_test_set: # only test set\n df = self.get_result_as_df(metric)\n df_bar = df.reset_index().melt(\n id_vars=[\"datasets\"], var_name=\"methods\", value_name=metric\n )\n return sns.barplot(\n data=df_bar, x=\"datasets\", y=metric, hue=\"methods\", ax=ax\n )\n\n else: # k-fold validation\n df = self.get_result_as_df(metric, mean_folds=False)\n df_bar = (\n df.stack([0, 1])\n .rename_axis(index=[\"datasets\", \"methods\", \"folds\"])\n .reset_index()\n .rename(columns={0: metric})\n )\n\n return sns.barplot(\n data=df_bar, x=\"datasets\", y=metric, capsize=0.1, hue=\"methods\", ax=ax\n )\n" ]
[ [ "pandas.isnull", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "pandas.MultiIndex.from_product", "numpy.array", "sklearn.utils.check_random_state" ] ]
text-machine-lab/HierarchicalTransformer
[ "639e620484f8b6e8b8b87fd9424130aeeaa74f65" ]
[ "transformer/Translator.py" ]
[ "''' This module will handle the text generation with beam search. '''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom transformer.Models import Transformer\nfrom transformer.Beam import Beam\nimport transformer.Constants as Constants\n\nclass Translator(object):\n ''' Load with trained model and handle the beam search '''\n\n def __init__(self, opt=None, model=None, beam_size=None, max_seq_len=None, n_best=1):\n assert opt is not None or model is not None\n self.opt = opt\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n if beam_size is None:\n self.beam_size = opt.beam_size\n else:\n self.beam_size = beam_size\n\n self.max_seq_len = max_seq_len\n self.n_best = n_best\n\n if opt is not None:\n self.n_best = opt.n_best\n\n if model is None:\n checkpoint = torch.load(opt.model)\n model_opt = checkpoint['settings']\n self.model_opt = model_opt\n\n if self.max_seq_len is None:\n self.max_seq_len = self.model_opt.max_token_seq_len\n\n model = Transformer(\n model_opt.src_vocab_size,\n model_opt.tgt_vocab_size,\n model_opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=model_opt.proj_share_weight,\n emb_src_tgt_weight_sharing=model_opt.embs_share_weight,\n d_k=model_opt.d_k,\n d_v=model_opt.d_v,\n d_model=model_opt.d_model,\n d_word_vec=model_opt.d_word_vec,\n d_inner=model_opt.d_inner_hid,\n n_layers=model_opt.n_layers,\n n_head=model_opt.n_head,\n dropout=model_opt.dropout)\n\n model.load_state_dict(checkpoint['model'])\n print('[Info] Trained model state loaded.')\n\n model.word_prob_prj = nn.LogSoftmax(dim=1)\n\n model = model.to(self.device)\n\n self.model = model\n\n def sample_topk_batch(self, src_seq, src_pos, src_segs=None, k=20):\n\n def predict_word(dec_seq, dec_pos, src_seq, enc_output):\n dec_output, *_ = self.model.decoder(dec_seq, dec_pos, src_seq, enc_output)\n dec_output = dec_output[:, -1, :] # Pick the last step: (bh * bm) * d_h\n word_prob = F.softmax(self.model.tgt_word_prj(dec_output), dim=1)\n\n return word_prob\n\n with torch.no_grad():\n #-- Encode\n src_seq, src_pos = src_seq.to(self.device), src_pos.to(self.device)\n\n if src_segs is not None:\n src_segs.to(self.device)\n\n src_enc, *_ = self.model.encoder(src_seq, src_pos, src_segs=src_segs)\n\n batch_size = src_seq.shape[0]\n\n dec_seq= torch.LongTensor([Constants.BOS] * batch_size).view(batch_size, 1).to(src_seq.device)\n dec_pos = torch.LongTensor([[i for i in range(self.max_seq_len)] for _ in range(batch_size)]).to(src_seq.device)\n\n for i in range(self.max_seq_len):\n # assume word_prob shape [batch_size, vocab_size]\n word_prob = predict_word(dec_seq, dec_pos[:, :i+1], src_seq, src_enc)\n\n if k is None:\n k = word_prob.shape[-1]\n # [batch_size, k]\n topk, _ = torch.topk(word_prob, k, dim=1)\n # [batch_size,]\n kth_value = topk[:, -1].unsqueeze(1)\n word_topk = word_prob * (word_prob >= kth_value).float()\n # normalize\n word_prob_topk = word_topk / word_topk.norm(dim=1).unsqueeze(1)\n next_word = word_prob_topk.multinomial(1)\n dec_seq = torch.cat([dec_seq, next_word], 1)\n\n # remove go token before returning sampled sequence\n return dec_seq[:, 1:]\n\n def translate_batch(self, src_seq, src_pos, src_segs=None):\n ''' Translation work in one batch '''\n\n self.model.eval()\n\n def get_inst_idx_to_tensor_position_map(inst_idx_list):\n ''' Indicate the position of an instance in a tensor. '''\n return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}\n\n def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):\n ''' Collect tensor parts associated to active instances. '''\n\n _, *d_hs = beamed_tensor.size()\n n_curr_active_inst = len(curr_active_inst_idx)\n new_shape = (n_curr_active_inst * n_bm, *d_hs)\n\n beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)\n beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)\n beamed_tensor = beamed_tensor.view(*new_shape)\n\n return beamed_tensor\n\n def collate_active_info(\n src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list):\n # Sentences which are still active are collected,\n # so the decoder will not run on completed sentences.\n n_prev_active_inst = len(inst_idx_to_position_map)\n active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]\n active_inst_idx = torch.LongTensor(active_inst_idx).to(self.device)\n\n active_src_seq = collect_active_part(src_seq, active_inst_idx, n_prev_active_inst, n_bm)\n active_src_enc = collect_active_part(src_enc, active_inst_idx, n_prev_active_inst, n_bm)\n active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)\n\n return active_src_seq, active_src_enc, active_inst_idx_to_position_map\n\n def beam_decode_step(\n inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm):\n ''' Decode and update beam status, and then return active beam idx '''\n\n def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):\n dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]\n dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)\n dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)\n return dec_partial_seq\n\n def prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm):\n dec_partial_pos = torch.arange(1, len_dec_seq + 1, dtype=torch.long, device=self.device)\n dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_inst * n_bm, 1)\n return dec_partial_pos\n\n def predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm):\n dec_output, *_ = self.model.decoder(dec_seq, dec_pos, src_seq, enc_output)\n dec_output = dec_output[:, -1, :] # Pick the last step: (bh * bm) * d_h\n word_prob = F.log_softmax(self.model.tgt_word_prj(dec_output), dim=1)\n word_prob = word_prob.view(n_active_inst, n_bm, -1)\n\n return word_prob\n\n def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map):\n active_inst_idx_list = []\n for inst_idx, inst_position in inst_idx_to_position_map.items():\n is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])\n if not is_inst_complete:\n active_inst_idx_list += [inst_idx]\n\n return active_inst_idx_list\n\n n_active_inst = len(inst_idx_to_position_map)\n\n dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)\n dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm)\n word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm)\n\n # Update the beam with predicted word prob information and collect incomplete instances\n active_inst_idx_list = collect_active_inst_idx_list(\n inst_dec_beams, word_prob, inst_idx_to_position_map)\n\n return active_inst_idx_list\n\n def collect_hypothesis_and_scores(inst_dec_beams, n_best):\n all_hyp, all_scores = [], []\n for inst_idx in range(len(inst_dec_beams)):\n scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()\n all_scores += [scores[:n_best]]\n\n hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]\n all_hyp += [hyps]\n return all_hyp, all_scores\n\n with torch.no_grad():\n #-- Encode\n src_seq, src_pos = src_seq.to(self.device), src_pos.to(self.device)\n\n if src_segs is not None:\n src_segs.to(self.device)\n\n src_enc, *_ = self.model.encoder(src_seq, src_pos, src_segs=src_segs)\n\n #-- Repeat data for beam search\n n_bm = self.beam_size\n n_inst, len_s, d_h = src_enc.size()\n src_seq = src_seq.repeat(1, n_bm).view(n_inst * n_bm, len_s)\n src_enc = src_enc.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)\n\n #-- Prepare beams\n inst_dec_beams = [Beam(n_bm, device=self.device) for _ in range(n_inst)]\n\n #-- Bookkeeping for active or not\n active_inst_idx_list = list(range(n_inst))\n inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)\n\n #-- Decode\n for len_dec_seq in range(1, self.max_seq_len + 1):\n\n active_inst_idx_list = beam_decode_step(\n inst_dec_beams, len_dec_seq, src_seq, src_enc, inst_idx_to_position_map, n_bm)\n\n if not active_inst_idx_list:\n break # all instances have finished their path to <EOS>\n\n src_seq, src_enc, inst_idx_to_position_map = collate_active_info(\n src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list)\n\n batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, self.n_best)\n\n return batch_hyp, batch_scores\n" ]
[ [ "torch.LongTensor", "torch.nn.LogSoftmax", "torch.load", "torch.cat", "torch.no_grad", "torch.cuda.is_available", "torch.arange", "torch.topk", "torch.stack" ] ]
bstellato/mlopt
[ "b72930ab9e219a94895b731a306e3023e33d862e" ]
[ "mlopt/tests/test_parallel.py" ]
[ "import unittest\nimport numpy as np\nimport numpy.testing as npt\nfrom mlopt.optimizer import Optimizer\nfrom mlopt.settings import PYTORCH\nfrom mlopt.problem import Problem\nfrom mlopt.tests.settings import TEST_TOL as TOL\nfrom mlopt.sampling import uniform_sphere_sample\nimport pandas as pd\nimport cvxpy as cp\n\n\nclass TestParallel(unittest.TestCase):\n\n def test_parallel_vs_serial_learning(self):\n \"\"\"Test parallel VS serial learning\"\"\"\n\n # Generate data\n np.random.seed(1)\n T = 5\n M = 2.\n h = 1.\n c = 1.\n p = 1.\n x_init = 2.\n radius = 2.\n n_train = 1000 # Number of samples\n\n # Define problem\n x = cp.Variable(T+1)\n u = cp.Variable(T)\n\n # Define parameter and sampling points\n d = cp.Parameter(T, nonneg=True, name=\"d\")\n d_bar = 3. * np.ones(T)\n X_d = uniform_sphere_sample(d_bar, radius, n=n_train)\n df = pd.DataFrame({'d': list(X_d)})\n\n # Constaints\n constraints = [x[0] == x_init]\n for t in range(T):\n constraints += [x[t+1] == x[t] + u[t] - d[t]]\n constraints += [u >= 0, u <= M]\n\n # Objective\n cost = cp.sum(cp.maximum(h * x, -p * x)) + c * cp.sum(u)\n\n # Define problem\n cvxpy_problem = cp.Problem(cp.Minimize(cost), constraints)\n problem = Problem(cvxpy_problem)\n\n # Solve for all theta in serial\n results_serial = problem.solve_parametric(df,\n parallel=False)\n\n # Solve for all theta in parallel\n results_parallel = problem.solve_parametric(df,\n parallel=True)\n\n # Assert all results match\n for i in range(n_train):\n serial = results_serial[i]\n parallel = results_parallel[i]\n\n # Compare x\n npt.assert_array_almost_equal(serial['x'],\n parallel['x'],\n decimal=TOL)\n # Compare cost\n npt.assert_array_almost_equal(serial['cost'],\n parallel['cost'],\n decimal=TOL)\n\n # Compare strategy\n self.assertTrue(serial['strategy'] == parallel['strategy'])\n\n def test_parallel_resolve(self):\n \"\"\"Test parallel resolve (to avoid hanging)\"\"\"\n\n np.random.seed(1)\n # This needs to work for different\n p = 10\n n = p * 10\n F = np.random.randn(n, p)\n D = np.diag(np.random.rand(n)*np.sqrt(p))\n Sigma = F.dot(F.T) + D\n gamma = 1.0\n mu = cp.Parameter(n, name='mu')\n x = cp.Variable(n)\n cost = - mu @ x + gamma * cp.quad_form(x, Sigma)\n constraints = [cp.sum(x) == 1, x >= 0]\n\n # Define optimizer\n problem = cp.Problem(cp.Minimize(cost), constraints)\n m = Optimizer(problem, name=\"portfolio\")\n\n '''\n Sample points\n '''\n theta_bar = np.random.randn(n)\n radius = 1.0\n\n '''\n Train and solve\n '''\n\n # Training and testing data\n n_train = 1000\n n_test = 1000\n # Sample points from multivariate ball\n X_d = uniform_sphere_sample(theta_bar, radius, n=n_train)\n X_d_test = uniform_sphere_sample(theta_bar, radius, n=n_test)\n df = pd.DataFrame({'mu': list(X_d)})\n df_test = pd.DataFrame({'mu': list(X_d_test)})\n\n # Train and test using pytorch\n m.train(df,\n parallel=True,\n filter_strategies=True,\n n_train_trials=10,\n learner=PYTORCH)\n m.performance(df_test, parallel=True)\n\n # Run parallel loop again to enforce instability\n # in multiprocessing\n m.performance(df_test, parallel=True)\n\n # DOES NOT WORK YET BECAUSE IT CANNOT PICKLE pardiso objects\n # def test_parallel_strategy_selection(self):\n # \"\"\"Choose best strategy in parallel\"\"\"\n # np.random.seed(1)\n # # This needs to work for different\n # p = 10\n # n = p * 10\n # F = np.random.randn(n, p)\n # D = np.diag(np.random.rand(n)*np.sqrt(p))\n # Sigma = F.dot(F.T) + D\n # gamma = 1.0\n # mu = cp.Parameter(n, name='mu')\n # x = cp.Variable(n)\n # cost = - mu * x + gamma * cp.quad_form(x, Sigma)\n # constraints = [cp.sum(x) == 1, x >= 0]\n #\n # # Define optimizer\n # # Force mosek to be single threaded\n # m = Optimizer(cp.Minimize(cost), constraints)\n #\n # '''\n # Sample points\n # '''\n # theta_bar = np.random.randn(n)\n # radius = 0.6\n #\n # '''\n # Train and solve\n # '''\n #\n # # Training and testing data\n # n_train = 100\n # n_test = 10\n #\n # # Sample points from multivariate ball\n # X_d = uniform_sphere_sample(theta_bar, radius, n=n_train)\n # df = pd.DataFrame({'mu': list(X_d)})\n # X_d_test = uniform_sphere_sample(theta_bar, radius, n=n_test)\n # df_test = pd.DataFrame({'mu': list(X_d_test)})\n #\n # # Train and test using pytorch\n # params = {\n # 'learning_rate': [0.01],\n # 'batch_size': [32],\n # 'n_epochs': [200]\n # }\n #\n # m.train(df, parallel=True, learner=PYTORCH, params=params)\n #\n # # Test\n # serial = m.solve(df_test, parallel=False)\n # parallel = m.solve(df_test, parallel=True)\n #\n # for i in range(n_test):\n # npt.assert_array_almost_equal(serial[i]['x'],\n # parallel[i]['x'],\n # decimal=TOL)\n #\n # npt.assert_array_almost_equal(serial[i]['cost'],\n # parallel[i]['cost'],\n # decimal=TOL)\n #\n" ]
[ [ "numpy.sqrt", "numpy.random.seed", "numpy.ones", "numpy.random.randn", "numpy.random.rand", "numpy.testing.assert_array_almost_equal" ] ]
pl8787/DeepRank_PyTorch
[ "ec24f83168aeadad7f89fd9fae5992abd0da89b1" ]
[ "deeprank/dataset.py" ]
[ "\"\"\"This is the Data Utils for Letor source code.\n\nThis module is used to read data from letor dataset.\n\"\"\"\n\n__version__ = '0.2'\n__author__ = 'Liang Pang'\n\nimport json\nimport random\nimport sys\n\nimport numpy as np\nimport torch\n\nfrom deeprank import utils\n\n\nclass DataLoader():\n\n def __init__(self, config_file):\n self.config_file = config_file\n self.config = json.loads( open(config_file).read() )\n\n self.Letor07Path = self.config['data_dir'] #'/home/pangliang/matching/data/letor/r5w/'\n\n self.word_dict, self.iword_dict = utils.read_word_dict(\n filename=self.Letor07Path + '/word_dict.txt')\n self.query_data = utils.read_data(\n filename=self.Letor07Path + '/qid_query.txt')\n self.doc_data = utils.read_data(\n filename=self.Letor07Path + '/docid_doc.txt')\n self.embed_dict = utils.read_embedding(\n filename=self.Letor07Path + '/embed_wiki-pdc_d50_norm')\n self.idf_dict = utils.read_embedding(\n filename=self.Letor07Path + '/embed.idf')\n\n self.feat_size = self.config['feat_size']\n\n self._PAD_ = len(self.word_dict)\n self.word_dict[self._PAD_] = '[PAD]'\n self.iword_dict['[PAD]'] = self._PAD_\n\n self.embed_dict[self._PAD_] = np.zeros((50, ), dtype=np.float32)\n self.W_init_embed = np.float32(\n np.random.uniform(-0.02, 0.02, [len(self.word_dict), 50]))\n self.embedding = utils.convert_embed_2_numpy(\n self.embed_dict, embed = self.W_init_embed)\n\n self.W_init_idf = np.float32(\n np.zeros([len(self.word_dict), 1]))\n self.idf_embedding = utils.convert_embed_2_numpy(\n self.idf_dict, embed = self.W_init_idf)\n\nclass PairGenerator():\n def __init__(self, rel_file, config):\n rel = utils.read_relation(filename=rel_file)\n self.pair_list = self.make_pair(rel)\n self.config = config\n\n def make_pair(self, rel):\n rel_set = {}\n pair_list = []\n for label, d1, d2 in rel:\n if d1 not in rel_set:\n rel_set[d1] = {}\n if label not in rel_set[d1]:\n rel_set[d1][label] = []\n rel_set[d1][label].append(d2)\n for d1 in rel_set:\n label_list = sorted(rel_set[d1].keys(), reverse = True)\n for hidx, high_label in enumerate(label_list[:-1]):\n for low_label in label_list[hidx+1:]:\n for high_d2 in rel_set[d1][high_label]:\n for low_d2 in rel_set[d1][low_label]:\n pair_list.append( (d1, high_d2, low_d2) )\n print('Pair Instance Count:', len(pair_list))\n return pair_list\n\n def get_batch(self, data1, data2):\n config = self.config\n X1 = np.zeros(\n (config['batch_size']*2, config['data1_maxlen']), dtype=np.int64)\n X1_len = np.zeros((config['batch_size']*2,), dtype=np.int64)\n X1_id = [''] * (config['batch_size']*2)\n X2 = np.zeros(\n (config['batch_size']*2, config['data2_maxlen']), dtype=np.int64)\n X2_len = np.zeros((config['batch_size']*2,), dtype=np.int64)\n X2_id = [''] * (config['batch_size']*2)\n Y = np.zeros((config['batch_size']*2,), dtype=np.int64)\n F = np.zeros(\n (config['batch_size']*2, config['feat_size']), dtype=np.float32)\n\n Y[::2] = 1\n X1[:] = config['fill_word']\n X2[:] = config['fill_word']\n for i in range(config['batch_size']):\n d1, d2p, d2n = random.choice(self.pair_list)\n\n d1_len = min(config['data1_maxlen'], len(data1[d1]))\n d2p_len = min(config['data2_maxlen'], len(data2[d2p]))\n d2n_len = min(config['data2_maxlen'], len(data2[d2n]))\n\n X1[i*2, :d1_len], X1_len[i*2] = data1[d1][:d1_len], d1_len\n X2[i*2, :d2p_len], X2_len[i*2] = data2[d2p][:d2p_len], d2p_len\n X1[i*2+1, :d1_len], X1_len[i*2+1] = data1[d1][:d1_len], d1_len\n X2[i*2+1, :d2n_len], X2_len[i*2+1] = data2[d2n][:d2n_len], d2n_len\n\n X1_id[i * 2], X2_id[i * 2] = d1, d2p\n X1_id[i * 2 + 1], X2_id[i * 2 + 1] = d1, d2n\n #F[i*2] = features[(d1, d2p)]\n #F[i*2+1] = features[(d1, d2n)]\n\n return X1, X1_len, X1_id, X2, X2_len, X2_id, Y, F\n\n\nclass ListGenerator():\n def __init__(self, rel_file, config):\n rel = utils.read_relation(filename=rel_file)\n self.list_list = self.make_list(rel)\n self.config = config\n\n def make_list(self, rel):\n list_list = {}\n for label, d1, d2 in rel:\n if d1 not in list_list:\n list_list[d1] = []\n list_list[d1].append( (label, d2) )\n for d1 in list_list:\n list_list[d1] = sorted(list_list[d1], reverse = True)\n print('List Instance Count:', len(list_list))\n return list_list.items()\n\n def get_batch(self, data1, data2):\n config = self.config\n for i, (d1, d2_list) in enumerate(self.list_list):\n X1 = np.zeros(\n (len(d2_list), config['data1_maxlen']), dtype=np.int64)\n X1_len = np.zeros((len(d2_list),), dtype=np.int64)\n X1_id = [''] * len(d2_list)\n X2 = np.zeros(\n (len(d2_list), config['data2_maxlen']), dtype=np.int64)\n X2_len = np.zeros((len(d2_list),), dtype=np.int64)\n X2_id = [''] * len(d2_list)\n Y = np.zeros((len(d2_list),), dtype= np.int64)\n F = np.zeros((len(d2_list), config['feat_size']), dtype=np.float32)\n X1[:] = config['fill_word']\n X2[:] = config['fill_word']\n d1_len = min(config['data1_maxlen'], len(data1[d1]))\n for j, (l, d2) in enumerate(d2_list):\n d2_len = min(config['data2_maxlen'], len(data2[d2]))\n X1[j, :d1_len], X1_len[j] = data1[d1][:d1_len], d1_len\n X2[j, :d2_len], X2_len[j] = data2[d2][:d2_len], d2_len\n Y[j] = l\n X1_id[j], X2_id[j] = d1, d2\n #F[j] = features[(d1, d2)]\n yield X1, X1_len, X1_id, X2, X2_len, X2_id, Y, F\n\n\nif __name__ == '__main__':\n loader = DataLoader('./config/letor07_mp_fold1.model')\n" ]
[ [ "numpy.zeros" ] ]
solstag/isgc-congress
[ "c1ea92cec81f14d8cdc355730d3d85cd248b75b6" ]
[ "src/sashimi/__init__.py" ]
[ "#! /usr/bin/env python\n\nimport pandas as pd\nfrom .clean import clean_text\n\n\"\"\" NOTES\n\n- Data columns:\n ['abstract_text',\n 'abstract_title',\n 'bibliography',\n 'cancelled',\n 'code',\n 'figure_legend_1',\n 'figure_legend_2',\n 'figure_title_1',\n 'figure_title_2',\n 'final_status',\n 'id',\n 'is_complete',\n 'keyword_1',\n 'keyword_2',\n 'keyword_3',\n 'keyword_4',\n 'keywords',\n 'legend_1',\n 'legend_2',\n 'not_to_remind',\n 'program_day',\n 'program_session',\n 'publish_onsite',\n 'relance_register',\n 'topic_1',\n 'topic_2',\n 'topic_3',\n 'user_id',\n 'validate',\n 'year']\n\"\"\"\n\n\ndef get_data(file_paths, clean=True):\n df = pd.concat(\n [\n pd.read_csv(file, sep=\"\\t\", dtype=str)\n for file in file_paths\n ],\n ignore_index=True,\n )\n print(f\"Found {len(df)} entries.\")\n df = df.dropna(subset=[\"abstract_text\"])\n print(f\" Kept {len(df)} entries containing an abstract.\")\n \n if clean:\n df[\"abstract_text\"] = clean_text(df)\n df = df.dropna(subset=[\"abstract_text\"])\n print(f\" Kept {len(df)} entries after cleaning.\")\n\n df.index.name = \"index\"\n return df\n" ]
[ [ "pandas.read_csv" ] ]
HakobJak/ml-mipt
[ "ab0cbd5d553e9da309bda54d35b4e93a8eb99696" ]
[ "homeworks_advanced/extra_lab_qa/util.py" ]
[ "\"\"\"Utility classes and methods.\n\nAuthor:\n Chris Chute ([email protected])\n\"\"\"\nimport logging\nimport os\nimport queue\nimport re\nimport shutil\nimport string\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.data as data\nimport tqdm\nimport numpy as np\nimport ujson as json\n\nfrom collections import Counter\n\n\nclass SQuAD(data.Dataset):\n \"\"\"Stanford Question Answering Dataset (SQuAD).\n\n Each item in the dataset is a tuple with the following entries (in order):\n - context_idxs: Indices of the words in the context.\n Shape (context_len,).\n - context_char_idxs: Indices of the characters in the context.\n Shape (context_len, max_word_len).\n - question_idxs: Indices of the words in the question.\n Shape (question_len,).\n - question_char_idxs: Indices of the characters in the question.\n Shape (question_len, max_word_len).\n - y1: Index of word in the context where the answer begins.\n -1 if no answer.\n - y2: Index of word in the context where the answer ends.\n -1 if no answer.\n - id: ID of the example.\n\n Args:\n data_path (str): Path to .npz file containing pre-processed dataset.\n use_v2 (bool): Whether to use SQuAD 2.0 questions. Otherwise only use SQuAD 1.1.\n \"\"\"\n def __init__(self, data_path, use_v2=True):\n super(SQuAD, self).__init__()\n\n dataset = np.load(data_path)\n self.context_idxs = torch.from_numpy(dataset['context_idxs']).long()\n self.context_char_idxs = torch.from_numpy(dataset['context_char_idxs']).long()\n self.question_idxs = torch.from_numpy(dataset['ques_idxs']).long()\n self.question_char_idxs = torch.from_numpy(dataset['ques_char_idxs']).long()\n self.y1s = torch.from_numpy(dataset['y1s']).long()\n self.y2s = torch.from_numpy(dataset['y2s']).long()\n\n if use_v2:\n # SQuAD 2.0: Use index 0 for no-answer token (token 1 = OOV)\n batch_size, c_len, w_len = self.context_char_idxs.size()\n ones = torch.ones((batch_size, 1), dtype=torch.int64)\n self.context_idxs = torch.cat((ones, self.context_idxs), dim=1)\n self.question_idxs = torch.cat((ones, self.question_idxs), dim=1)\n\n ones = torch.ones((batch_size, 1, w_len), dtype=torch.int64)\n self.context_char_idxs = torch.cat((ones, self.context_char_idxs), dim=1)\n self.question_char_idxs = torch.cat((ones, self.question_char_idxs), dim=1)\n\n self.y1s += 1\n self.y2s += 1\n\n # SQuAD 1.1: Ignore no-answer examples\n self.ids = torch.from_numpy(dataset['ids']).long()\n self.valid_idxs = [idx for idx in range(len(self.ids))\n if use_v2 or self.y1s[idx].item() >= 0]\n\n def __getitem__(self, idx):\n idx = self.valid_idxs[idx]\n example = (self.context_idxs[idx],\n self.context_char_idxs[idx],\n self.question_idxs[idx],\n self.question_char_idxs[idx],\n self.y1s[idx],\n self.y2s[idx],\n self.ids[idx])\n\n return example\n\n def __len__(self):\n return len(self.valid_idxs)\n\n\ndef collate_fn(examples):\n \"\"\"Create batch tensors from a list of individual examples returned\n by `SQuAD.__getitem__`. Merge examples of different length by padding\n all examples to the maximum length in the batch.\n\n Args:\n examples (list): List of tuples of the form (context_idxs, context_char_idxs,\n question_idxs, question_char_idxs, y1s, y2s, ids).\n\n Returns:\n examples (tuple): Tuple of tensors (context_idxs, context_char_idxs, question_idxs,\n question_char_idxs, y1s, y2s, ids). All of shape (batch_size, ...), where\n the remaining dimensions are the maximum length of examples in the input.\n\n Adapted from:\n https://github.com/yunjey/seq2seq-dataloader\n \"\"\"\n def merge_0d(scalars, dtype=torch.int64):\n return torch.tensor(scalars, dtype=dtype)\n\n def merge_1d(arrays, dtype=torch.int64, pad_value=0):\n lengths = [(a != pad_value).sum() for a in arrays]\n padded = torch.zeros(len(arrays), max(lengths), dtype=dtype)\n for i, seq in enumerate(arrays):\n end = lengths[i]\n padded[i, :end] = seq[:end]\n return padded\n\n def merge_2d(matrices, dtype=torch.int64, pad_value=0):\n heights = [(m.sum(1) != pad_value).sum() for m in matrices]\n widths = [(m.sum(0) != pad_value).sum() for m in matrices]\n padded = torch.zeros(len(matrices), max(heights), max(widths), dtype=dtype)\n for i, seq in enumerate(matrices):\n height, width = heights[i], widths[i]\n padded[i, :height, :width] = seq[:height, :width]\n return padded\n\n # Group by tensor type\n context_idxs, context_char_idxs, \\\n question_idxs, question_char_idxs, \\\n y1s, y2s, ids = zip(*examples)\n\n # Merge into batch tensors\n context_idxs = merge_1d(context_idxs)\n context_char_idxs = merge_2d(context_char_idxs)\n question_idxs = merge_1d(question_idxs)\n question_char_idxs = merge_2d(question_char_idxs)\n y1s = merge_0d(y1s)\n y2s = merge_0d(y2s)\n ids = merge_0d(ids)\n\n return (context_idxs, context_char_idxs,\n question_idxs, question_char_idxs,\n y1s, y2s, ids)\n\n\nclass AverageMeter:\n \"\"\"Keep track of average values over time.\n\n Adapted from:\n > https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n def __init__(self):\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n \"\"\"Reset meter.\"\"\"\n self.__init__()\n\n def update(self, val, num_samples=1):\n \"\"\"Update meter with new value `val`, the average of `num` samples.\n\n Args:\n val (float): Average value to update the meter with.\n num_samples (int): Number of samples that were averaged to\n produce `val`.\n \"\"\"\n self.count += num_samples\n self.sum += val * num_samples\n self.avg = self.sum / self.count\n\n\nclass EMA:\n \"\"\"Exponential moving average of model parameters.\n Args:\n model (torch.nn.Module): Model with parameters whose EMA will be kept.\n decay (float): Decay rate for exponential moving average.\n \"\"\"\n def __init__(self, model, decay):\n self.decay = decay\n self.shadow = {}\n self.original = {}\n\n # Register model parameters\n for name, param in model.named_parameters():\n if param.requires_grad:\n self.shadow[name] = param.data.clone()\n\n def __call__(self, model, num_updates):\n decay = min(self.decay, (1.0 + num_updates) / (10.0 + num_updates))\n for name, param in model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n new_average = \\\n (1.0 - decay) * param.data + decay * self.shadow[name]\n self.shadow[name] = new_average.clone()\n\n def assign(self, model):\n \"\"\"Assign exponential moving average of parameter values to the\n respective parameters.\n Args:\n model (torch.nn.Module): Model to assign parameter values.\n \"\"\"\n for name, param in model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n self.original[name] = param.data.clone()\n param.data = self.shadow[name]\n\n def resume(self, model):\n \"\"\"Restore original parameters to a model. That is, put back\n the values that were in each parameter at the last call to `assign`.\n Args:\n model (torch.nn.Module): Model to assign parameter values.\n \"\"\"\n for name, param in model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n param.data = self.original[name]\n\n\nclass CheckpointSaver:\n \"\"\"Class to save and load model checkpoints.\n\n Save the best checkpoints as measured by a metric value passed into the\n `save` method. Overwrite checkpoints with better checkpoints once\n `max_checkpoints` have been saved.\n\n Args:\n save_dir (str): Directory to save checkpoints.\n max_checkpoints (int): Maximum number of checkpoints to keep before\n overwriting old ones.\n metric_name (str): Name of metric used to determine best model.\n maximize_metric (bool): If true, best checkpoint is that which maximizes\n the metric value passed in via `save`. Otherwise, best checkpoint\n minimizes the metric.\n log (logging.Logger): Optional logger for printing information.\n \"\"\"\n def __init__(self, save_dir, max_checkpoints, metric_name,\n maximize_metric=False, log=None):\n super(CheckpointSaver, self).__init__()\n\n self.save_dir = save_dir\n self.max_checkpoints = max_checkpoints\n self.metric_name = metric_name\n self.maximize_metric = maximize_metric\n self.best_val = None\n self.ckpt_paths = queue.PriorityQueue()\n self.log = log\n self._print(f\"Saver will {'max' if maximize_metric else 'min'}imize {metric_name}...\")\n\n def is_best(self, metric_val):\n \"\"\"Check whether `metric_val` is the best seen so far.\n\n Args:\n metric_val (float): Metric value to compare to prior checkpoints.\n \"\"\"\n if metric_val is None:\n # No metric reported\n return False\n\n if self.best_val is None:\n # No checkpoint saved yet\n return True\n\n return ((self.maximize_metric and self.best_val < metric_val)\n or (not self.maximize_metric and self.best_val > metric_val))\n\n def _print(self, message):\n \"\"\"Print a message if logging is enabled.\"\"\"\n if self.log is not None:\n self.log.info(message)\n\n def save(self, step, model, metric_val, device):\n \"\"\"Save model parameters to disk.\n\n Args:\n step (int): Total number of examples seen during training so far.\n model (torch.nn.DataParallel): Model to save.\n metric_val (float): Determines whether checkpoint is best so far.\n device (torch.device): Device where model resides.\n \"\"\"\n ckpt_dict = {\n 'model_name': model.__class__.__name__,\n 'model_state': model.cpu().state_dict(),\n 'step': step\n }\n model.to(device)\n\n checkpoint_path = os.path.join(self.save_dir,\n f'step_{step}.pth.tar')\n torch.save(ckpt_dict, checkpoint_path)\n self._print(f'Saved checkpoint: {checkpoint_path}')\n\n if self.is_best(metric_val):\n # Save the best model\n self.best_val = metric_val\n best_path = os.path.join(self.save_dir, 'best.pth.tar')\n shutil.copy(checkpoint_path, best_path)\n self._print(f'New best checkpoint at step {step}...')\n\n # Add checkpoint path to priority queue (lowest priority removed first)\n if self.maximize_metric:\n priority_order = metric_val\n else:\n priority_order = -metric_val\n\n self.ckpt_paths.put((priority_order, checkpoint_path))\n\n # Remove a checkpoint if more than max_checkpoints have been saved\n if self.ckpt_paths.qsize() > self.max_checkpoints:\n _, worst_ckpt = self.ckpt_paths.get()\n try:\n os.remove(worst_ckpt)\n self._print(f'Removed checkpoint: {worst_ckpt}')\n except OSError:\n # Avoid crashing if checkpoint has been removed or protected\n pass\n\n\ndef load_model(model, checkpoint_path, gpu_ids, return_step=True):\n \"\"\"Load model parameters from disk.\n\n Args:\n model (torch.nn.DataParallel): Load parameters into this model.\n checkpoint_path (str): Path to checkpoint to load.\n gpu_ids (list): GPU IDs for DataParallel.\n return_step (bool): Also return the step at which checkpoint was saved.\n\n Returns:\n model (torch.nn.DataParallel): Model loaded from checkpoint.\n step (int): Step at which checkpoint was saved. Only if `return_step`.\n \"\"\"\n device = f\"cuda:{gpu_ids[0] if gpu_ids else 'cpu'}\"\n ckpt_dict = torch.load(checkpoint_path, map_location=device)\n\n # Build model, load parameters\n model.load_state_dict(ckpt_dict['model_state'])\n\n if return_step:\n step = ckpt_dict['step']\n return model, step\n\n return model\n\n\ndef get_available_devices():\n \"\"\"Get IDs of all available GPUs.\n\n Returns:\n device (torch.device): Main device (GPU 0 or CPU).\n gpu_ids (list): List of IDs of all GPUs that are available.\n \"\"\"\n gpu_ids = []\n if torch.cuda.is_available():\n gpu_ids += [gpu_id for gpu_id in range(torch.cuda.device_count())]\n device = torch.device(f'cuda:{gpu_ids[0]}')\n torch.cuda.set_device(device)\n else:\n device = torch.device('cpu')\n\n return device, gpu_ids\n\n\ndef masked_softmax(logits, mask, dim=-1, log_softmax=False):\n \"\"\"Take the softmax of `logits` over given dimension, and set\n entries to 0 wherever `mask` is 0.\n\n Args:\n logits (torch.Tensor): Inputs to the softmax function.\n mask (torch.Tensor): Same shape as `logits`, with 0 indicating\n positions that should be assigned 0 probability in the output.\n dim (int): Dimension over which to take softmax.\n log_softmax (bool): Take log-softmax rather than regular softmax.\n E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax.\n\n Returns:\n probs (torch.Tensor): Result of taking masked softmax over the logits.\n \"\"\"\n mask = mask.type(torch.float32)\n masked_logits = mask * logits + (1 - mask) * -1e30\n softmax_fn = F.log_softmax if log_softmax else F.softmax\n probs = softmax_fn(masked_logits, dim)\n\n return probs\n\n\ndef visualize(tbx, pred_dict, eval_path, step, split, num_visuals):\n \"\"\"Visualize text examples to TensorBoard.\n\n Args:\n tbx (tensorboardX.SummaryWriter): Summary writer.\n pred_dict (dict): dict of predictions of the form id -> pred.\n eval_path (str): Path to eval JSON file.\n step (int): Number of examples seen so far during training.\n split (str): Name of data split being visualized.\n num_visuals (int): Number of visuals to select at random from preds.\n \"\"\"\n if num_visuals <= 0:\n return\n if num_visuals > len(pred_dict):\n num_visuals = len(pred_dict)\n\n visual_ids = np.random.choice(list(pred_dict), size=num_visuals, replace=False)\n\n with open(eval_path, 'r') as eval_file:\n eval_dict = json.load(eval_file)\n for i, id_ in enumerate(visual_ids):\n pred = pred_dict[id_] or 'N/A'\n example = eval_dict[str(id_)]\n question = example['question']\n context = example['context']\n answers = example['answers']\n\n gold = answers[0] if answers else 'N/A'\n tbl_fmt = (f'- **Question:** {question}\\n'\n + f'- **Context:** {context}\\n'\n + f'- **Answer:** {gold}\\n'\n + f'- **Prediction:** {pred}')\n tbx.add_text(tag=f'{split}/{i+1}_of_{num_visuals}',\n text_string=tbl_fmt,\n global_step=step)\n\n\ndef save_preds(preds, save_dir, file_name='predictions.csv'):\n \"\"\"Save predictions `preds` to a CSV file named `file_name` in `save_dir`.\n\n Args:\n preds (list): List of predictions each of the form (id, start, end),\n where id is an example ID, and start/end are indices in the context.\n save_dir (str): Directory in which to save the predictions file.\n file_name (str): File name for the CSV file.\n\n Returns:\n save_path (str): Path where CSV file was saved.\n \"\"\"\n # Validate format\n if (not isinstance(preds, list)\n or any(not isinstance(p, tuple) or len(p) != 3 for p in preds)):\n raise ValueError('preds must be a list of tuples (id, start, end)')\n\n # Make sure predictions are sorted by ID\n preds = sorted(preds, key=lambda p: p[0])\n\n # Save to a CSV file\n save_path = os.path.join(save_dir, file_name)\n np.savetxt(save_path, np.array(preds), delimiter=',', fmt='%d')\n\n return save_path\n\n\ndef get_save_dir(base_dir, name, training, id_max=100):\n \"\"\"Get a unique save directory by appending the smallest positive integer\n `id < id_max` that is not already taken (i.e., no dir exists with that id).\n\n Args:\n base_dir (str): Base directory in which to make save directories.\n name (str): Name to identify this training run. Need not be unique.\n training (bool): Save dir. is for training (determines subdirectory).\n id_max (int): Maximum ID number before raising an exception.\n\n Returns:\n save_dir (str): Path to a new directory with a unique name.\n \"\"\"\n for uid in range(1, id_max):\n subdir = 'train' if training else 'test'\n save_dir = os.path.join(base_dir, subdir, f'{name}-{uid:02d}')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n return save_dir\n\n raise RuntimeError('Too many save directories created with the same name. \\\n Delete old save directories or use another name.')\n\n\ndef get_logger(log_dir, name):\n \"\"\"Get a `logging.Logger` instance that prints to the console\n and an auxiliary file.\n\n Args:\n log_dir (str): Directory in which to create the log file.\n name (str): Name to identify the logs.\n\n Returns:\n logger (logging.Logger): Logger instance for logging events.\n \"\"\"\n class StreamHandlerWithTQDM(logging.Handler):\n \"\"\"Let `logging` print without breaking `tqdm` progress bars.\n\n See Also:\n > https://stackoverflow.com/questions/38543506\n \"\"\"\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n # Create logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # Log everything (i.e., DEBUG level and above) to a file\n log_path = os.path.join(log_dir, 'log.txt')\n file_handler = logging.FileHandler(log_path)\n file_handler.setLevel(logging.DEBUG)\n\n # Log everything except DEBUG level (i.e., INFO level and above) to console\n console_handler = StreamHandlerWithTQDM()\n console_handler.setLevel(logging.INFO)\n\n # Create format for the logs\n file_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n file_handler.setFormatter(file_formatter)\n console_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n console_handler.setFormatter(console_formatter)\n\n # add the handlers to the logger\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n return logger\n\n\ndef torch_from_json(path, dtype=torch.float32):\n \"\"\"Load a PyTorch Tensor from a JSON file.\n\n Args:\n path (str): Path to the JSON file to load.\n dtype (torch.dtype): Data type of loaded array.\n\n Returns:\n tensor (torch.Tensor): Tensor loaded from JSON file.\n \"\"\"\n with open(path, 'r') as fh:\n array = np.array(json.load(fh))\n\n tensor = torch.from_numpy(array).type(dtype)\n\n return tensor\n\n\ndef discretize(p_start, p_end, max_len=15, no_answer=False):\n \"\"\"Discretize soft predictions to get start and end indices.\n\n Choose the pair `(i, j)` of indices that maximizes `p1[i] * p2[j]`\n subject to `i <= j` and `j - i + 1 <= max_len`.\n\n Args:\n p_start (torch.Tensor): Soft predictions for start index.\n Shape (batch_size, context_len).\n p_end (torch.Tensor): Soft predictions for end index.\n Shape (batch_size, context_len).\n max_len (int): Maximum length of the discretized prediction.\n I.e., enforce that `preds[i, 1] - preds[i, 0] + 1 <= max_len`.\n no_answer (bool): Treat 0-index as the no-answer prediction. Consider\n a prediction no-answer if `preds[0, 0] * preds[0, 1]` is greater\n than the probability assigned to the max-probability span.\n\n Returns:\n start_idxs (torch.Tensor): Hard predictions for start index.\n Shape (batch_size,)\n end_idxs (torch.Tensor): Hard predictions for end index.\n Shape (batch_size,)\n \"\"\"\n if p_start.min() < 0 or p_start.max() > 1 \\\n or p_end.min() < 0 or p_end.max() > 1:\n raise ValueError('Expected p_start and p_end to have values in [0, 1]')\n\n # Compute pairwise probabilities\n p_start = p_start.unsqueeze(dim=2)\n p_end = p_end.unsqueeze(dim=1)\n p_joint = torch.matmul(p_start, p_end) # (batch_size, c_len, c_len)\n\n # Restrict to pairs (i, j) such that i <= j <= i + max_len - 1\n c_len, device = p_start.size(1), p_start.device\n is_legal_pair = torch.triu(torch.ones((c_len, c_len), device=device))\n is_legal_pair -= torch.triu(torch.ones((c_len, c_len), device=device),\n diagonal=max_len)\n if no_answer:\n # Index 0 is no-answer\n p_no_answer = p_joint[:, 0, 0].clone()\n is_legal_pair[0, :] = 0\n is_legal_pair[:, 0] = 0\n else:\n p_no_answer = None\n p_joint *= is_legal_pair\n\n # Take pair (i, j) that maximizes p_joint\n max_in_row, _ = torch.max(p_joint, dim=2)\n max_in_col, _ = torch.max(p_joint, dim=1)\n start_idxs = torch.argmax(max_in_row, dim=-1)\n end_idxs = torch.argmax(max_in_col, dim=-1)\n\n if no_answer:\n # Predict no-answer whenever p_no_answer > max_prob\n max_prob, _ = torch.max(max_in_col, dim=-1)\n start_idxs[p_no_answer > max_prob] = 0\n end_idxs[p_no_answer > max_prob] = 0\n\n return start_idxs, end_idxs\n\n\ndef convert_tokens(eval_dict, qa_id, y_start_list, y_end_list, no_answer):\n \"\"\"Convert predictions to tokens from the context.\n\n Args:\n eval_dict (dict): Dictionary with eval info for the dataset. This is\n used to perform the mapping from IDs and indices to actual text.\n qa_id (int): List of QA example IDs.\n y_start_list (list): List of start predictions.\n y_end_list (list): List of end predictions.\n no_answer (bool): Questions can have no answer. E.g., SQuAD 2.0.\n\n Returns:\n pred_dict (dict): Dictionary index IDs -> predicted answer text.\n sub_dict (dict): Dictionary UUIDs -> predicted answer text (submission).\n \"\"\"\n pred_dict = {}\n sub_dict = {}\n for qid, y_start, y_end in zip(qa_id, y_start_list, y_end_list):\n context = eval_dict[str(qid)][\"context\"]\n spans = eval_dict[str(qid)][\"spans\"]\n uuid = eval_dict[str(qid)][\"uuid\"]\n if no_answer and (y_start == 0 or y_end == 0):\n pred_dict[str(qid)] = ''\n sub_dict[uuid] = ''\n else:\n if no_answer:\n y_start, y_end = y_start - 1, y_end - 1\n start_idx = spans[y_start][0]\n end_idx = spans[y_end][1]\n pred_dict[str(qid)] = context[start_idx: end_idx]\n sub_dict[uuid] = context[start_idx: end_idx]\n return pred_dict, sub_dict\n\n\ndef metric_max_over_ground_truths(metric_fn, prediction, ground_truths):\n if not ground_truths:\n return metric_fn(prediction, '')\n scores_for_ground_truths = []\n for ground_truth in ground_truths:\n score = metric_fn(prediction, ground_truth)\n scores_for_ground_truths.append(score)\n return max(scores_for_ground_truths)\n\n\ndef eval_dicts(gold_dict, pred_dict, no_answer):\n avna = f1 = em = total = 0\n for key, value in pred_dict.items():\n total += 1\n ground_truths = gold_dict[key]['answers']\n prediction = value\n em += metric_max_over_ground_truths(compute_em, prediction, ground_truths)\n f1 += metric_max_over_ground_truths(compute_f1, prediction, ground_truths)\n if no_answer:\n avna += compute_avna(prediction, ground_truths)\n\n eval_dict = {'EM': 100. * em / total,\n 'F1': 100. * f1 / total}\n\n if no_answer:\n eval_dict['AvNA'] = 100. * avna / total\n\n return eval_dict\n\n\ndef compute_avna(prediction, ground_truths):\n \"\"\"Compute answer vs. no-answer accuracy.\"\"\"\n return float(bool(prediction) == bool(ground_truths))\n\n\n# All methods below this line are from the official SQuAD 2.0 eval script\n# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/\ndef normalize_answer(s):\n \"\"\"Convert to lowercase and remove punctuation, articles and extra whitespace.\"\"\"\n\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef get_tokens(s):\n if not s:\n return []\n return normalize_answer(s).split()\n\n\ndef compute_em(a_gold, a_pred):\n return int(normalize_answer(a_gold) == normalize_answer(a_pred))\n\n\ndef compute_f1(a_gold, a_pred):\n gold_toks = get_tokens(a_gold)\n pred_toks = get_tokens(a_pred)\n common = Counter(gold_toks) & Counter(pred_toks)\n num_same = sum(common.values())\n if len(gold_toks) == 0 or len(pred_toks) == 0:\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n return int(gold_toks == pred_toks)\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(pred_toks)\n recall = 1.0 * num_same / len(gold_toks)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n" ]
[ [ "torch.ones", "torch.max", "torch.cuda.set_device", "torch.load", "torch.cat", "torch.from_numpy", "torch.tensor", "torch.matmul", "torch.save", "torch.cuda.is_available", "torch.device", "numpy.load", "torch.cuda.device_count", "numpy.array", "torch.argmax" ] ]
tonghe90/MinkowskiEngine-1
[ "0f1bc41a2c7ce85a25e575663fcc5582996ffbff" ]
[ "MinkowskiEngine/SparseTensor.py" ]
[ "# Copyright (c) Chris Choy ([email protected]).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part\n# of the code.\nimport os\nimport warnings\nimport torch\nimport copy\nfrom enum import Enum\nfrom typing import Union\nfrom collections import Sequence\nimport numpy as np\n\nfrom Common import convert_to_int_list\nfrom MinkowskiCoords import CoordsKey, CoordsManager\nimport MinkowskiEngineBackend as MEB\n\n\nclass SparseTensorOperationMode(Enum):\n \"\"\"\n SEPARATE_COORDS_MANAGER: always create a new coordinate manager.\n SHARE_COORDS_MANAGER: always use the globally defined coordinate manager.\n \"\"\"\n SEPARATE_COORDS_MANAGER = 0\n SHARE_COORDS_MANAGER = 1\n\n\nclass SparseTensorQuantizationMode(Enum):\n \"\"\"\n RANDOM_SUBSAMPLE: Subsample one coordinate per each quantization block randomly.\n UNWEIGHTED_AVERAGE: average all features within a quantization block equally.\n \"\"\"\n RANDOM_SUBSAMPLE = 0\n UNWEIGHTED_AVERAGE = 1\n\n\n_sparse_tensor_operation_mode = SparseTensorOperationMode.SEPARATE_COORDS_MANAGER\n_global_coords_man = None\nCOORDS_MAN_DIFFERENT_ERROR = \"SparseTensors must share the same coordinate manager for this operation. Please refer to the SparseTensor creation API (https://stanfordvl.github.io/MinkowskiEngine/sparse_tensor.html) to share the coordinate manager, or set the sparse tensor operation mode with `set_sparse_tensor_operation_mode` to share it by default.\"\nCOORDS_KEY_DIFFERENT_ERROR = \"SparseTensors must have the same coords_key.\"\n\n\ndef set_sparse_tensor_operation_mode(operation_mode: SparseTensorOperationMode):\n assert isinstance(operation_mode, SparseTensorOperationMode), \\\n f\"Input must be an instance of SparseTensorOperationMode not {operation_mode}\"\n global _sparse_tensor_operation_mode\n _sparse_tensor_operation_mode = operation_mode\n\n\ndef sparse_tensor_operation_mode():\n global _sparse_tensor_operation_mode\n return copy.deepcopy(_sparse_tensor_operation_mode)\n\n\ndef clear_global_coords_man():\n r\"\"\"\n When using the operation mode:\n `SparseTensorOperationMode.SHARE_COORDS_MANAGER`, you must explicitly clear\n the coordinate manager when done using it.\n \"\"\"\n global _global_coords_man\n _global_coords_man = None\n\n\nclass SparseTensor():\n r\"\"\"A sparse tensor class. Can be accessed via\n :attr:`MinkowskiEngine.SparseTensor`.\n\n The :attr:`SparseTensor` class is the basic tensor in MinkowskiEngine. For\n the definition of a sparse tensor, please visit `the terminology page\n <https://stanfordvl.github.io/MinkowskiEngine/terminology.html#sparse-tensor>`_.\n We use the COOrdinate (COO) format to save a sparse tensor `[1]\n <http://groups.csail.mit.edu/commit/papers/2016/parker-thesis.pdf>`_. This\n representation is simply a concatenation of coordinates in a matrix\n :math:`C` and associated features :math:`F`.\n\n .. math::\n\n \\mathbf{C} = \\begin{bmatrix}\n b_1 & x_1^1 & x_1^2 & \\cdots & x_1^D \\\\\n \\vdots & \\vdots & \\vdots & \\ddots & \\vdots \\\\\n b_N & x_N^1 & x_N^2 & \\cdots & x_N^D\n \\end{bmatrix}, \\; \\mathbf{F} = \\begin{bmatrix}\n \\mathbf{f}_1^T\\\\\n \\vdots\\\\\n \\mathbf{f}_N^T\n \\end{bmatrix}\n\n where :math:`\\mathbf{x}_i \\in \\mathcal{Z}^D` is a :math:`D`-dimensional\n coordinate and :math:`b_i \\in \\mathcal{Z}_+` denotes the corresponding\n batch index. :math:`N` is the number of non-zero elements in the sparse\n tensor, each with the coordinate :math:`(b_i, x_i^1, x_i^1, \\cdots,\n x_i^D)`, and the associated feature :math:`\\mathbf{f}_i`. Internally, we\n handle the batch index as an additional spatial dimension.\n\n .. warning::\n\n Before MinkowskiEngine version 0.4, we put the batch indices on the last\n column. Thus, direct manipulation of coordinates will be incompatible\n with the latest versions. Instead, please use\n :attr:`MinkowskiEngine.utils.batched_coordinates` or\n :attr:`MinkowskiEngine.utils.sparse_collate` to create batched\n coordinates.\n\n Also, to access coordinates or features batch-wise, use the functions\n :attr:`coordinates_at(batch_index : int)`, :attr:`features_at(batch_index : int)` of\n a sparse tensor. Or to access all batch-wise coordinates and features,\n `decomposed_coordinates`, `decomposed_features`,\n `decomposed_coordinates_and_features` of a sparse tensor.\n\n Example::\n\n >>> coords, feats = ME.utils.sparse_collate([coords_batch0, coords_batch1], [feats_batch0, feats_batch1])\n >>> A = ME.SparseTensor(feats=feats, coords=coords)\n >>> coords_batch0 = A.coordinates_at(batch_index=0)\n >>> feats_batch1 = A.features_at(batch_index=1)\n >>> list_of_coords, list_of_featurs = A.decomposed_coordinates_and_features\n\n \"\"\"\n\n def __init__(\n self,\n feats,\n coords=None,\n coords_key=None,\n coords_manager=None,\n force_creation=False,\n allow_duplicate_coords=False,\n quantization_mode=SparseTensorQuantizationMode.RANDOM_SUBSAMPLE,\n tensor_stride=1):\n r\"\"\"\n\n Args:\n :attr:`feats` (:attr:`torch.FloatTensor`,\n :attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or\n :attr:`torch.cuda.DoubleTensor`): The features of the sparse\n tensor.\n\n :attr:`coords` (:attr:`torch.IntTensor`): The coordinates\n associated to the features. If not provided, :attr:`coords_key`\n must be provided.\n\n :attr:`coords_key` (:attr:`MinkowskiEngine.CoordsKey`): When the\n coordinates are already cached in the MinkowskiEngine, we could\n reuse the same coordinates by simply providing the coordinate hash\n key. In most case, this process is done automatically. When you\n provide a `coords_key`, all other arguments will be be ignored.\n\n :attr:`coords_manager` (:attr:`MinkowskiEngine.CoordsManager`): The\n MinkowskiEngine creates a dynamic computation graph and all\n coordinates inside the same computation graph are managed by a\n CoordsManager object. If not provided, the MinkowskiEngine will\n create a new computation graph. In most cases, this process is\n handled automatically and you do not need to use this. When you use\n it, make sure you understand what you are doing.\n\n :attr:`force_creation` (:attr:`bool`): Force creation of the\n coordinates. This allows generating a new set of coordinates even\n when there exists another set of coordinates with the same\n tensor stride. This could happen when you manually feed the same\n :attr:`coords_manager`.\n\n :attr:`allow_duplicate_coords` (:attr:`bool`): Allow duplicate\n coordinates when creating the sparse tensor. Internally, it will\n generate a new unique set of coordinates and use features of at the\n corresponding unique coordinates. In general, setting\n `allow_duplicate_coords=True` is not recommended as it could hide\n obvious errors in your data loading and preprocessing steps. Please\n refer to the quantization and data loading tutorial on `here\n <https://stanfordvl.github.io/MinkowskiEngine/demo/training.html>`_\n for more details.\n\n :attr:`quantizatino_mode`\n (:attr:`MinkowskiEngine.SparseTensorQuantizationMode`): Defines the\n quantization method and how to define features of a sparse tensor.\n Please refer to :attr:`SparseTensorQuantizationMode` for details.\n\n :attr:`tensor_stride` (:attr:`int`, :attr:`list`,\n :attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride\n of the current sparse tensor. By default, it is 1.\n\n \"\"\"\n assert isinstance(feats,\n torch.Tensor), \"Features must be a torch.Tensor\"\n assert isinstance(quantization_mode, SparseTensorQuantizationMode)\n self.quantization_mode = quantization_mode\n\n if coords is None and coords_key is None:\n raise ValueError('Either coords or coords_key must be provided')\n\n if coords_key is None:\n assert coords_manager is not None or coords is not None\n D = -1\n if coords_manager is None:\n D = coords.size(1) - 1\n else:\n D = coords_manager.D\n coords_key = CoordsKey(D)\n coords_key.setTensorStride(convert_to_int_list(tensor_stride, D))\n else:\n assert isinstance(coords_key, CoordsKey)\n\n if coords is not None:\n assert isinstance(coords, torch.Tensor), \\\n \"Coordinate must be of type torch.Tensor\"\n\n if not isinstance(coords, torch.IntTensor):\n warnings.warn(\n 'Coords implicitly converted to torch.IntTensor. ' +\n 'To remove this warning, use `.int()` to convert the ' +\n 'coords into an torch.IntTensor')\n coords = torch.floor(coords).int()\n\n if coords.device.type != 'cpu':\n warnings.warn(\n 'Coords implicitly converted to CPU type. ' +\n 'To remove this warning, use `.cpu()` to convert the ' +\n 'coords into a CPU type')\n coords = coords.cpu()\n\n assert feats.shape[0] == coords.shape[0], \\\n \"The number of rows in features and coordinates do not match.\"\n\n coords = coords.contiguous()\n\n ##########################\n # Setup CoordsManager\n ##########################\n if coords_manager is None:\n # If set to share the coords man, use the global coords man\n global _sparse_tensor_operation_mode, _global_coords_man\n if _sparse_tensor_operation_mode == SparseTensorOperationMode.SHARE_COORDS_MANAGER:\n if _global_coords_man is None:\n _global_coords_man = CoordsManager(D=coords.size(1) - 1)\n coords_manager = _global_coords_man\n else:\n assert coords is not None, \"Initial coordinates must be given\"\n coords_manager = CoordsManager(D=coords.size(1) - 1)\n\n else:\n assert isinstance(coords_manager, CoordsManager)\n\n ##########################\n # Initialize coords\n ##########################\n if not coords_key.isKeySet() and coords is not None and len(coords) > 0:\n if quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE:\n force_remap = True\n return_inverse = False\n elif quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE:\n force_remap = True\n return_inverse = True\n\n self.unique_index, self.inverse_mapping = coords_manager.initialize(\n coords,\n coords_key,\n force_creation=force_creation,\n force_remap=force_remap,\n allow_duplicate_coords=allow_duplicate_coords,\n return_inverse=return_inverse)\n\n if quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE:\n self._CF = feats\n self._CC = coords\n feats = MEB.quantization_average_features(\n feats, torch.arange(len(feats)), self.inverse_mapping,\n len(self.unique_index), 0)\n coords = coords[self.unique_index]\n elif force_remap:\n assert len(self.unique_index) > 0\n self._CC = coords\n self._CF = feats\n coords = coords[self.unique_index]\n feats = feats[self.unique_index]\n\n elif coords is not None: # empty / invalid coords\n assert isinstance(coords, torch.IntTensor)\n assert coords.ndim == 2\n coords_manager.initialize(\n coords,\n coords_key,\n force_creation=force_creation,\n force_remap=False,\n allow_duplicate_coords=False,\n return_inverse=False)\n elif coords_key is not None:\n assert coords_key.isKeySet()\n\n self._F = feats.contiguous()\n self._C = coords\n self.coords_key = coords_key\n self.coords_man = coords_manager\n\n @property\n def tensor_stride(self):\n return self.coords_key.getTensorStride()\n\n @tensor_stride.setter\n def tensor_stride(self, p):\n r\"\"\"\n This function is not recommended to be used directly.\n \"\"\"\n p = convert_to_int_list(p, self.D)\n self.coords_key.setTensorStride(p)\n\n def _get_coords(self):\n return self.coords_man.get_coords(self.coords_key)\n\n @property\n def C(self):\n r\"\"\"The alias of :attr:`coords`.\n \"\"\"\n return self.coords\n\n @property\n def coords(self):\n r\"\"\"\n The coordinates of the current sparse tensor. The coordinates are\n represented as a :math:`N \\times (D + 1)` dimensional matrix where\n :math:`N` is the number of points in the space and :math:`D` is the\n dimension of the space (e.g. 3 for 3D, 4 for 3D + Time). Additional\n dimension of the column of the matrix C is for batch indices which is\n internally treated as an additional spatial dimension to disassociate\n different instances in a batch.\n \"\"\"\n if self._C is None:\n self._C = self._get_coords()\n return self._C\n\n @property\n def decomposed_coordinates(self):\n r\"\"\"Returns a list of coordinates per batch.\n\n Returns a list of torch.IntTensor :math:`C \\in \\mathcal{R}^{N_i\n \\times D}` coordinates per batch where :math:`N_i` is the number of non\n zero elements in the :math:`i`th batch index in :math:`D` dimensional\n space.\n \"\"\"\n row_inds_list = self.coords_man.get_row_indices_per_batch(\n self.coords_key)\n return [self.C[row_inds, 1:] for row_inds in row_inds_list]\n\n def coordinates_at(self, batch_index):\n r\"\"\"Return coordinates at the specified batch index.\n\n Returns a torch.IntTensor :math:`C \\in \\mathcal{R}^{N_i\n \\times D}` coordinates at the specified batch index where :math:`N_i`\n is the number of non zero elements in the :math:`i`th batch index in\n :math:`D` dimensional space.\n \"\"\"\n row_inds = self.coords_man.get_row_indices_at(self.coords_key,\n batch_index)\n return self.C[row_inds, 1:]\n\n @property\n def F(self):\n r\"\"\"The alias of :attr:`feats`.\n \"\"\"\n return self._F\n\n @property\n def feats(self):\n r\"\"\"\n The features of the current sparse tensor. The features are :math:`N\n \\times D_F` where :math:`N` is the number of points in the space and\n :math:`D_F` is the dimension of each feature vector. Please refer to\n :attr:`coords` to access the associated coordinates.\n \"\"\"\n return self._F\n\n @property\n def decomposed_features(self):\n r\"\"\"Returns a list of features per batch.\n\n Returns a list of torch.Tensor :math:`C \\in \\mathcal{R}^{N_i\n \\times N_F}` features per batch where :math:`N_i` is the number of non\n zero elements in the :math:`i`th batch index in :math:`D` dimensional\n space.\n \"\"\"\n row_inds_list = self.coords_man.get_row_indices_per_batch(\n self.coords_key)\n return [self._F[row_inds] for row_inds in row_inds_list]\n\n def features_at(self, batch_index):\n r\"\"\"Returns a feature matrix at the specified batch index.\n\n Returns a torch.Tensor :math:`C \\in \\mathcal{R}^{N\n \\times N_F}` feature matrix :math:`N` is the number of non\n zero elements in the specified batch index and :math:`N_F` is the\n number of channels.\n \"\"\"\n row_inds = self.coords_man.get_row_indices_at(self.coords_key,\n batch_index)\n return self._F[row_inds]\n\n def coordinates_and_features_at(self, batch_index):\n r\"\"\"Returns a coordinate and feature matrix at the specified batch index.\n\n Returns a coordinate and feature matrix at the specified `batch_index`.\n The coordinate matrix is a torch.IntTensor :math:`C \\in \\mathcal{R}^{N\n \\times D}` where :math:`N` is the number of non zero elements in the\n specified batch index in :math:`D` dimensional space. The feature\n matrix is a torch.Tensor :math:`C \\in \\mathcal{R}^{N \\times N_F}`\n matrix :math:`N` is the number of non zero elements in the specified\n batch index and :math:`N_F` is the number of channels.\n \"\"\"\n row_inds = self.coords_man.get_row_indices_at(self.coords_key,\n batch_index)\n return self.C[row_inds, 1:], self._F[row_inds]\n\n @property\n def decomposed_coordinates_and_features(self):\n r\"\"\"Returns a list of coordinates and a list of features per batch.abs\n\n \"\"\"\n row_inds_list = self.coords_man.get_row_indices_per_batch(\n self.coords_key)\n return [self.C[row_inds, 1:] for row_inds in row_inds_list], \\\n [self._F[row_inds] for row_inds in row_inds_list]\n\n @property\n def D(self):\n r\"\"\"\n The spatial dimension of the sparse tensor. This is equal to the number\n of columns of :attr:`C` minus 1.\n \"\"\"\n return self.coords_key.D\n\n @property\n def dimension(self):\n r\"\"\"Alias of attr:`D`\n \"\"\"\n return self.D\n\n @property\n def requires_grad(self):\n return self._F.requires_grad\n\n def requires_grad_(self, requires_grad: bool = True):\n self._F.requires_grad_(requires_grad)\n\n def float(self):\n self._F = self._F.float()\n\n def double(self):\n self._F = self._F.double()\n\n def set_tensor_stride(self, s):\n ss = convert_to_int_list(s, self.D)\n self.coords_key.setTensorStride(ss)\n\n def __repr__(self):\n return self.__class__.__name__ + '(' + os.linesep \\\n + ' Coords=' + str(self.C) + os.linesep \\\n + ' Feats=' + str(self.F) + os.linesep \\\n + ' coords_key=' + str(self.coords_key) \\\n + ' tensor_stride=' + str(self.coords_key.getTensorStride()) + os.linesep \\\n + ' coords_man=' + str(self.coords_man) \\\n + ' spatial dimension=' + str(self.D) + ')'\n\n def __len__(self):\n return len(self._F)\n\n def size(self):\n return self._F.size()\n\n @property\n def shape(self):\n return self._F.shape\n\n def to(self, device):\n self._F = self._F.to(device)\n return self\n\n def cpu(self):\n self._F = self._F.cpu()\n return self\n\n @property\n def device(self):\n return self._F.device\n\n @property\n def dtype(self):\n return self._F.dtype\n\n def get_device(self):\n return self._F.get_device()\n\n # Operation overloading\n def __iadd__(self, other):\n assert isinstance(other, SparseTensor)\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n assert self.coords_key == other.coords_key, COORDS_KEY_DIFFERENT_ERROR\n\n self._F += other.F\n return self\n\n def __isub__(self, other):\n assert isinstance(other, SparseTensor)\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n assert self.coords_key == other.coords_key, COORDS_KEY_DIFFERENT_ERROR\n\n self._F -= other.F\n return self\n\n def __imul__(self, other):\n assert isinstance(other, SparseTensor)\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n assert self.coords_key == other.coords_key, COORDS_KEY_DIFFERENT_ERROR\n\n self._F *= other.F\n return self\n\n def __idiv__(self, other):\n assert isinstance(other, SparseTensor)\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n assert self.coords_key == other.coords_key, COORDS_KEY_DIFFERENT_ERROR\n\n self._F /= other.F\n return self\n\n def __add__(self, other):\n r\"\"\"\n Add its feature with the corresponding feature of the other\n :attr:`MinkowskiEngine.SparseTensor` or a :attr:`torch.Tensor`\n element-wise. For coordinates that exist on one sparse tensor but not\n on the other, features of the counterpart that do not exist will be set\n to 0.\n \"\"\"\n assert isinstance(other, (SparseTensor, torch.Tensor))\n if isinstance(other, SparseTensor):\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n\n if self.coords_key == other.coords_key:\n return SparseTensor(\n self._F + other.F,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n else:\n # Generate union maps\n out_key = CoordsKey(self.coords_man.D)\n ins, outs = self.coords_man.get_union_map(\n (self.coords_key, other.coords_key), out_key)\n N_out = self.coords_man.get_coords_size_by_coords_key(out_key)\n out_F = torch.zeros((N_out, self._F.size(1)),\n dtype=self.dtype,\n device=self.device)\n out_F[outs[0]] = self._F[ins[0]]\n out_F[outs[1]] += other._F[ins[1]]\n return SparseTensor(\n out_F, coords_key=out_key, coords_manager=self.coords_man)\n else: # when it is a torch.Tensor\n return SparseTensor(\n self._F + other,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n def __sub__(self, other):\n r\"\"\"\n Subtract the feature of the other :attr:`MinkowskiEngine.SparseTensor`\n or a :attr:`torch.Tensor` from its corresponding feature element-wise.\n For coordinates that exist on one sparse tensor but not on the other,\n features of the counterpart that do not exist will be set to 0.\n \"\"\"\n assert isinstance(other, (SparseTensor, torch.Tensor))\n if isinstance(other, SparseTensor):\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n\n if self.coords_key == other.coords_key:\n return SparseTensor(\n self._F - other.F,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n else:\n # Generate union maps\n out_key = CoordsKey(self.coords_man.D)\n ins, outs = self.coords_man.get_union_map(\n (self.coords_key, other.coords_key), out_key)\n N_out = self.coords_man.get_coords_size_by_coords_key(out_key)\n out_F = torch.zeros((N_out, self._F.size(1)),\n dtype=self.dtype,\n device=self.device)\n out_F[outs[0]] = self._F[ins[0]]\n out_F[outs[1]] -= other._F[ins[1]]\n return SparseTensor(\n out_F, coords_key=out_key, coords_manager=self.coords_man)\n\n else: # when it is a torch.Tensor\n return SparseTensor(\n self._F - other,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n def __mul__(self, other):\n r\"\"\"\n Multiply its feature of with the corresponding feature of the other\n :attr:`MinkowskiEngine.SparseTensor` or a :attr:`torch.Tensor`\n element-wise. For coordinates that exist on one sparse tensor but not\n on the other, features of the counterpart that do not exist will be set\n to 0.\n \"\"\"\n assert isinstance(other, (SparseTensor, torch.Tensor))\n if isinstance(other, SparseTensor):\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n\n if self.coords_key == other.coords_key:\n return SparseTensor(\n self._F * other.F,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n else:\n # Generate union maps\n out_key = CoordsKey(self.coords_man.D)\n ins, outs = self.coords_man.get_union_map(\n (self.coords_key, other.coords_key), out_key)\n N_out = self.coords_man.get_coords_size_by_coords_key(out_key)\n out_F = torch.zeros((N_out, self._F.size(1)),\n dtype=self.dtype,\n device=self.device)\n out_F[outs[0]] = self._F[ins[0]]\n out_F[outs[1]] *= other._F[ins[1]]\n return SparseTensor(\n out_F, coords_key=out_key, coords_manager=self.coords_man)\n else: # when it is a torch.Tensor\n return SparseTensor(\n self._F * other,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n def __truediv__(self, other):\n r\"\"\"\n Divide its feature by the corresponding feature of the other\n :attr:`MinkowskiEngine.SparseTensor` or a :attr:`torch.Tensor`\n element-wise. For coordinates that exist on one sparse tensor but not\n on the other, features of the counterpart that do not exist will be set\n to 0.\n \"\"\"\n assert isinstance(other, (SparseTensor, torch.Tensor))\n if isinstance(other, SparseTensor):\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n\n if self.coords_key == other.coords_key:\n return SparseTensor(\n self._F / other.F,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n else:\n # Generate union maps\n out_key = CoordsKey(self.coords_man.D)\n ins, outs = self.coords_man.get_union_map(\n (self.coords_key, other.coords_key), out_key)\n N_out = self.coords_man.get_coords_size_by_coords_key(out_key)\n out_F = torch.zeros((N_out, self._F.size(1)),\n dtype=self.dtype,\n device=self.device)\n out_F[outs[0]] = self._F[ins[0]]\n out_F[outs[1]] /= other._F[ins[1]]\n return SparseTensor(\n out_F, coords_key=out_key, coords_manager=self.coords_man)\n else: # when it is a torch.Tensor\n return SparseTensor(\n self._F / other,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n def __power__(self, power):\n return SparseTensor(\n self._F**power,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n # Conversion functions\n def sparse(self, min_coords=None, max_coords=None, contract_coords=True):\n r\"\"\"Convert the :attr:`MinkowskiEngine.SparseTensor` to a torch sparse\n tensor.\n\n Args:\n :attr:`min_coords` (torch.IntTensor, optional): The min\n coordinates of the output sparse tensor. Must be divisible by the\n current :attr:`tensor_stride`.\n\n :attr:`max_coords` (torch.IntTensor, optional): The max coordinates\n of the output sparse tensor (inclusive). Must be divisible by the\n current :attr:`tensor_stride`.\n\n :attr:`contract_coords` (bool, optional): Given True, the output\n coordinates will be divided by the tensor stride to make features\n contiguous.\n\n Returns:\n :attr:`spare_tensor` (torch.sparse.Tensor): the torch sparse tensor\n representation of the self in `[Batch Dim, Spatial Dims..., Feature\n Dim]`. The coordinate of each feature can be accessed via\n `min_coord + tensor_stride * [the coordinate of the dense tensor]`.\n\n :attr:`min_coords` (torch.IntTensor): the D-dimensional vector\n defining the minimum coordinate of the output sparse tensor. If\n :attr:`contract_coords` is True, the :attr:`min_coords` will also\n be contracted.\n\n :attr:`tensor_stride` (torch.IntTensor): the D-dimensional vector\n defining the stride between tensor elements.\n\n \"\"\"\n\n if min_coords is not None:\n assert isinstance(min_coords, torch.IntTensor)\n assert min_coords.numel() == self.D\n if max_coords is not None:\n assert isinstance(max_coords, torch.IntTensor)\n assert min_coords.numel() == self.D\n\n def torch_sparse_Tensor(coords, feats, size=None):\n if size is None:\n if feats.dtype == torch.float64:\n return torch.sparse.DoubleTensor(coords, feats)\n elif feats.dtype == torch.float32:\n return torch.sparse.FloatTensor(coords, feats)\n else:\n raise ValueError('Feature type not supported.')\n else:\n if feats.dtype == torch.float64:\n return torch.sparse.DoubleTensor(coords, feats, size)\n elif feats.dtype == torch.float32:\n return torch.sparse.FloatTensor(coords, feats, size)\n else:\n raise ValueError('Feature type not supported.')\n\n # Use int tensor for all operations\n tensor_stride = torch.IntTensor(self.tensor_stride)\n\n # New coordinates\n coords = self.C\n coords, batch_indices = coords[:, 1:], coords[:, 0]\n\n # TODO, batch first\n if min_coords is None:\n min_coords, _ = coords.min(0, keepdim=True)\n elif min_coords.ndim == 1:\n min_coords = min_coords.unsqueeze(0)\n\n assert (min_coords % tensor_stride).sum() == 0, \\\n \"The minimum coordinates must be divisible by the tensor stride.\"\n\n if max_coords is not None:\n if max_coords.ndim == 1:\n max_coords = max_coords.unsqueeze(0)\n assert (max_coords % tensor_stride).sum() == 0, \\\n \"The maximum coordinates must be divisible by the tensor stride.\"\n\n coords -= min_coords\n\n if coords.ndim == 1:\n coords = coords.unsqueeze(1)\n if batch_indices.ndim == 1:\n batch_indices = batch_indices.unsqueeze(1)\n\n # return the contracted tensor\n if contract_coords:\n coords = coords // tensor_stride\n if max_coords is not None:\n max_coords = max_coords // tensor_stride\n min_coords = min_coords // tensor_stride\n\n new_coords = torch.cat((batch_indices, coords), dim=1).long()\n\n size = None\n if max_coords is not None:\n size = max_coords - min_coords + 1 # inclusive\n # Squeeze to make the size one-dimensional\n size = size.squeeze()\n\n max_batch = max(self.coords_man.get_batch_indices())\n size = torch.Size([max_batch + 1, *size, self.F.size(1)])\n\n sparse_tensor = torch_sparse_Tensor(new_coords.t().to(self.F.device),\n self.F, size)\n tensor_stride = torch.IntTensor(self.tensor_stride)\n return sparse_tensor, min_coords, tensor_stride\n\n def dense(self, min_coords=None, max_coords=None, contract_coords=True):\n r\"\"\"Convert the :attr:`MinkowskiEngine.SparseTensor` to a torch dense\n tensor.\n\n Args:\n :attr:`min_coords` (torch.IntTensor, optional): The min\n coordinates of the output sparse tensor. Must be divisible by the\n current :attr:`tensor_stride`.\n\n :attr:`max_coords` (torch.IntTensor, optional): The max coordinates\n of the output sparse tensor (inclusive). Must be divisible by the\n current :attr:`tensor_stride`.\n\n :attr:`contract_coords` (bool, optional): Given True, the output\n coordinates will be divided by the tensor stride to make features\n contiguous.\n\n Returns:\n :attr:`spare_tensor` (torch.sparse.Tensor): the torch sparse tensor\n representation of the self in `[Batch Dim, Spatial Dims..., Feature\n Dim]`. The coordinate of each feature can be accessed via\n `min_coord + tensor_stride * [the coordinate of the dense tensor]`.\n\n :attr:`min_coords` (torch.IntTensor): the D-dimensional vector\n defining the minimum coordinate of the output sparse tensor. If\n :attr:`contract_coords` is True, the :attr:`min_coords` will also\n be contracted.\n\n :attr:`tensor_stride` (torch.IntTensor): the D-dimensional vector\n defining the stride between tensor elements.\n\n \"\"\"\n if min_coords is not None:\n assert isinstance(min_coords, torch.IntTensor)\n assert min_coords.numel() == self.D\n if max_coords is not None:\n assert isinstance(max_coords, torch.IntTensor)\n assert min_coords.numel() == self.D\n\n # Use int tensor for all operations\n tensor_stride = torch.IntTensor(self.tensor_stride)\n\n # New coordinates\n coords = self.C\n coords, batch_indices = coords[:, 1:], coords[:, 0]\n\n # TODO, batch first\n if min_coords is None:\n min_coords, _ = coords.min(0, keepdim=True)\n elif min_coords.ndim == 1:\n min_coords = min_coords.unsqueeze(0)\n\n assert (min_coords % tensor_stride).sum() == 0, \\\n \"The minimum coordinates must be divisible by the tensor stride.\"\n\n if max_coords is not None:\n if max_coords.ndim == 1:\n max_coords = max_coords.unsqueeze(0)\n assert (max_coords % tensor_stride).sum() == 0, \\\n \"The maximum coordinates must be divisible by the tensor stride.\"\n\n coords -= min_coords\n\n if coords.ndim == 1:\n coords = coords.unsqueeze(1)\n\n # return the contracted tensor\n if contract_coords:\n coords = coords // tensor_stride\n if max_coords is not None:\n max_coords = max_coords // tensor_stride\n min_coords = min_coords // tensor_stride\n\n size = None\n nchannels = self.F.size(1)\n max_batch = max(self.coords_man.get_batch_indices())\n if max_coords is not None:\n size = max_coords - min_coords + 1 # inclusive\n # Squeeze to make the size one-dimensional\n size = size.squeeze()\n size = torch.Size([max_batch + 1, nchannels, *size])\n else:\n size = coords.max(0)[0] + 1\n size = torch.Size([max_batch + 1, nchannels, *size.numpy()])\n\n dense_F = torch.zeros(size, dtype=self.F.dtype, device=self.F.device)\n\n tcoords = coords.t().long()\n batch_indices = batch_indices.long()\n exec(\"dense_F[batch_indices, :, \" +\n \", \".join([f\"tcoords[{i}]\" for i in range(len(tcoords))]) +\n \"] = self.F\")\n\n tensor_stride = torch.IntTensor(self.tensor_stride)\n return dense_F, min_coords, tensor_stride\n\n def slice(self, X, slicing_mode=0):\n r\"\"\"\n\n Args:\n :attr:`X` (:attr:`MinkowskiEngine.SparseTensor`): a sparse tensor\n that discretized the original input.\n\n :attr:`slicing_mode`: For future updates.\n\n Returns:\n :attr:`sliced_feats` (:attr:`torch.Tensor`): the resulting feature\n matrix that slices features on the discretized coordinates to the\n original continuous coordinates that generated the input X.\n\n Example::\n\n >>> # coords, feats from a data loader\n >>> print(len(coords)) # 227742\n >>> sinput = ME.SparseTensor(coords=coords, feats=feats, quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE)\n >>> print(len(sinput)) # 161890 quantization results in fewer voxels\n >>> soutput = network(sinput)\n >>> print(len(soutput)) # 161890 Output with the same resolution\n >>> outputs = soutput.slice(sinput)\n >>> assert(outputs, torch.Tensor) # regular differentiable pytorch tensor\n >>> len(outputs) == len(coords) # recovers the original ordering and length\n \"\"\"\n # Currently only supports unweighted slice.\n return self.feats[X.inverse_mapping]\n\n\ndef _get_coords_key(\n input: SparseTensor,\n coords: Union[torch.IntTensor, CoordsKey, SparseTensor] = None,\n tensor_stride: Union[Sequence, np.ndarray, torch.IntTensor] = 1):\n r\"\"\"Process coords according to its type.\n \"\"\"\n if coords is not None:\n assert isinstance(coords, (CoordsKey, torch.IntTensor, SparseTensor))\n if isinstance(coords, torch.IntTensor):\n coords_key = input.coords_man.create_coords_key(\n coords,\n tensor_stride=tensor_stride,\n force_creation=True,\n force_remap=True,\n allow_duplicate_coords=True)\n elif isinstance(coords, SparseTensor):\n coords_key = coords.coords_key\n else: # CoordsKey type due to the previous assertion\n coords_key = coords\n else:\n coords_key = CoordsKey(input.D)\n return coords_key\n" ]
[ [ "torch.Size", "torch.floor", "torch.zeros", "torch.cat", "torch.sparse.DoubleTensor", "torch.IntTensor", "torch.sparse.FloatTensor" ] ]
team-oss/scrape-cran
[ "e46e4c4dfb0079c954112adaa9d230480c2970ae" ]
[ "src/dspg17/ckelling/ScrapingCode/source_forge/LastYear/cleaningJH.py" ]
[ "import pandas as pd\nimport datetime\n\ncategories = ['Audio and Video', 'Business and Enterprise', 'Communications', 'Development', 'Games', 'Graphics', 'Home and Education', 'Science and Engineering', 'Security and Utilities', 'System Administration']\ntimes = [datetime.datetime(year = 2016, month = 10, day = 12, hour = 23, minute = 18), \n datetime.datetime(year = 2016, month = 10, day = 12, hour = 22, minute = 56), \n datetime.datetime(year = 2016, month = 10, day = 13, hour = 8, minute = 37), \n datetime.datetime(year = 2016, month = 10, day = 13, hour = 8, minute = 52), \n datetime.datetime(year = 2016, month = 10, day = 13, hour = 9, minute = 36), \n datetime.datetime(year = 2016, month = 10, day = 13, hour = 13, minute = 28), \n datetime.datetime(year = 2016, month = 10, day = 13, hour = 9, minute = 10), \n datetime.datetime(year = 2016, month = 10, day = 12, hour = 8, minute = 47), \n datetime.datetime(year = 2016, month = 10, day = 13, hour = 13, minute = 42), \n datetime.datetime(year = 2016, month = 10, day = 14, hour = 8, minute = 13)]\n\ndf = pd.read_csv('SourceForge Data.csv')\ndf['last_update_days_ago'] = -1\ndf['last_update_hours_ago'] = -1\ndf['last_update_date'] = -1\nfor i in range(len(df)):\n time = times[categories.index(df['Category'][i])]\n if 'day' in df.last_update[i]:\n df.last_update_days_ago[i] = int(df.last_update[i].split()[0])\n df.last_update_hours_ago[i] = int(time.hour)+(df.last_update_days_ago[i]-1)*24\n temptime = time - datetime.timedelta(days = df.last_update_days_ago[i])\n df.last_update_date[i] = str(temptime.year)+'-'+str(temptime.month)+'-'+str(temptime.day)\n else:\n if 'hour' in df.last_update[i]:\n df.last_update_hours_ago[i] = int(df.last_update[i].split()[0])\n last_update_days_ago = int(time.hour)-(df.last_update_hours_ago[i])\n if last_update_days_ago > 0:\n df.last_update_days_ago[i] = 0\n else:\n df.last_update_days_ago[i] = 1\n temptime = time - datetime.timedelta(hours = df.last_update_hours_ago[i])\n df.last_update_date[i] = str(temptime.year)+'-'+str(temptime.month)+'-'+str(temptime.day)\n else:\n if 'minute' in df.last_update[i]: \n df.last_update_hours_ago[i] = 1\n df.last_update_days_ago[i] = 0\n df.last_update_date[i] = str(time.year)+'-'+str(time.month)+'-'+str(time.day)\n else:\n if 'decade' in df.last_update[i]: \n df.last_update_days_ago[i] = int(df.last_update[i].split()[0])*3650\n df.last_update_hours_ago[i] = df.last_update_days_ago[i]*24\n df.last_update_date[i] = str(int(time.year)-int(df.last_update[i].split()[0])*10)+'-'+str(time.month)+'-'+str(time.day)\n else:\n df.last_update_date[i] = df.last_update[i]\n dt = datetime.datetime(year = int(df.last_update[i].split('-')[0]), month = int(df.last_update[i].split('-')[1]), day = int(df.last_update[i].split('-')[2]))\n df.last_update_days_ago[i] = int((time - dt).days)\n df.last_update_hours_ago[i] = df.last_update_days_ago[i]*24\n print(float(i*100)/float(len(df)))\n\ndf.to_csv('SourceForge Data Clean.csv')\n\n" ]
[ [ "pandas.read_csv" ] ]
davidoj/RL_Aggregation
[ "d1f7fa01016660963e87dd4bcdb475a7b4aed466" ]
[ "Agents.py" ]
[ "'''\nReinforcement learning agents.\n\n\n\nDavid Johnston 2015\n'''\n\n\nimport numpy as np\nimport collections\nimport numbers\nimport random\n\nrandom.seed(1)\n\nclass OnlineAgent:\n \"\"\"\n Generic online agent class; executes e-greedy policy, looks up values\n \"\"\"\n \n def __init__(self,problem,epsilon=1e-1,tiles=False):\n self.epsilon = epsilon\n self.problem = problem\n self.qValues = problem.getZeroQTable()\n self.reset = self.problem.reset\n if tiles:\n self.getQValue = self.getQTile\n else:\n self.getQValue = self.getQDisc\n \n \n def executePolicy(self, state ,tiebreak='first'):\n\n qs = self.getQArray(state)\n \n test = random.random()\n if test < self.epsilon:\n return random.choice(range(len(qs)))\n elif tiebreak == 'first':\n return np.where(qs==max(qs))[0][0]\n elif tiebreak == 'random':\n return random.choice(np.where(qs==max(qs))[0]) \n\n \n def episode(self,deltaMin=1e-3,timeout=int(1e5),decayAlpha=True):\n '''\n Runs an episode, updates q-values and returns the length of the episode.\n '''\n \n for i in range(timeout):\n\n currentState = self.problem.getAgentState()\n \n action = self.executePolicy(currentState)\n\n self.preUpdate(currentState,action)\n \n if self.problem.isEpisodic:\n terminal, nextState, reward = self.problem.result(action)\n \n if terminal:\n\n self.update(currentState,nextState,action,reward,decayAlpha,\n terminal=1)\n self.problem.reset()\n return i\n else:\n \n nextState, reward = self.problem.result(action)\n \n\n self.update(currentState,nextState,action,reward,decayAlpha)\n\n return i\n \n def run_n_episodes(self,n,decayAlpha=False,timeout=int(1e5)):\n e_lengths = []\n e_avgs = np.zeros(int(np.log2(n)))\n j = 1\n\n for i in range(n):\n l = self.episode(timeout=timeout,decayAlpha=decayAlpha)\n if l<timeout:\n e_lengths.append(l)\n if i == 2**j:\n s = min(1000,(len(e_lengths)+1)/2)\n e_avgs[j-1]= np.average(e_lengths[-s:-1])\n print(np.average(e_lengths[-s:-1]))\n j += 1\n\n else:\n e_lengths.append(timeout)\n self.reset()\n print(\"Episode timed out {}\".format(l))\n return e_avgs\n\n def getQDisc(self,state,action):\n return self.qValues[state,action]\n \n def getQTile(self,state,action):\n return sum(self.qValues[state,action])\n\n \n def getValue(self,state):\n qValues = self.getQArray(state)\n return max(qValues)\n\n def getQArray(self,state):\n return np.array([self.getQValue(state,a) for a in self.problem.actions])\n \nclass QAgent(OnlineAgent):\n \"\"\"\n Q-learning agent \n \"\"\"\n def __init__(self,problem,alpha=1e-1,\n epsilon=1e-1):\n OnlineAgent.__init__(self,problem,epsilon=epsilon)\n self.alpha = problem.setAlpha(alpha)\n self.counter = problem.getZeroQTable()\n \n def update(self,state,nextState,action,reward,decayAlpha,terminal=0):\n '''\n Q-learning update. State is either an integer or list(array) of integers\n '''\n if terminal:\n nextV = 0\n else:\n nextV = self.getValue(nextState)\n\n currentQV = self.getQValue(state,action)\n\n delta = reward - currentQV + self.problem.gamma*nextV\n\n if decayAlpha:\n alpha = self.alpha/(self.counter[state,action]+1)\n else:\n alpha = self.alpha\n\n self.qValues[state,action] += alpha * delta\n self.counter[state,action] += 1\n\n def preUpdate(self,state,action):\n return\n\n \nclass SarsaLambda(OnlineAgent):\n \"\"\"\n SARSA with eligibility traces\n \"\"\"\n def __init__(self,problem,alpha,lamda=0.5,policy='e-greedy',\n epsilon=1e-1,debug=False):\n OnlineAgent.__init__(self,problem,epsilon=epsilon)\n self.alpha = problem.setAlpha(alpha)\n self.e = problem.getZeroQTable()\n self.counter = problem.getZeroQTable()\n self.lamda = lamda\n\n def reset(self):\n self.problem.reset\n self.e = problem.getZeroQTable()\n\n def preUpdate(self,state,action):\n self.e *= self.problem.gamma*self.lamda\n\n for a in self.problem.actions:\n if a == action:\n self.e[state,a] = 1\n else:\n self.e[state,a] = 0\n\n def update(self,state,nextState,action,reward,decayAlpha,terminal=0):\n '''\n Sarsa(Lambda) update\n '''\n nextAction = self.executePolicy(nextState,epsilon=self.epsilon)\n if terminal:\n nextV=0\n else:\n nextV = self.getQValue(nextState,nextAction)\n \n delta = reward - self.getQValue(state,action)\n delta += self.problem.gamma*nextV\n\n\n if decayAlpha:\n alpha = self.alpha*((self.counter[state]+1)**(-1))\n else:\n alpha = self.alpha\n\n \n self.counter[state,action] += 1\n self.qValues += delta*alpha*self.e\n\n\n\n \n\nclass VIAgent(): \n \"\"\"\n Offline value iteration agent\n \"\"\"\n\n def __init__(self,problem, policy=\"e-greedy\",epsilon=1e-1,timeout=int(1e6)):\n '''\n Must be initialised with a problem with known transition and reward matrices\n '''\n\n self.problem = problem\n self.epsilon = epsilon\n self.qValues = problem.getZeroQTable()\n self.transitionMatrix = problem.transitions\n self.rewardMatrix = problem.rewards\n self.timeout = timeout\n #if policy == \"e-greedy\":\n self.policyMatrix = np.zeros(self.qValues.shape) + 1/self.qValues.shape[0]\n\n def executePolicy(self, state, epsilon=1e-1,tiebreak='random'):\n\n qs = self.getQArray(state)\n \n test = random.random()\n if test < epsilon:\n return random.choice(range(len(qs)))\n elif tiebreak == 'first':\n return np.where(qs==max(qs))[0][0]\n elif tiebreak == 'random':\n return random.choice(np.where(qs==max(qs))[0]) \n\n \n def getQValue(self,state,action):\n '''\n Get Q(s,a). S may be either an integer of list of ints if \n function approximation is used.\n '''\n\n if isinstance(state,collections.Container):\n state=np.array(state)\n return sum(self.qValues[state,action])\n return self.qValues[state,action]\n\n \n def getValue(self,state):\n qValues = self.getQArray(state)\n return max(qValues)\n\n def getQArray(self,state):\n return np.array([self.getQValue(state,a) for a in self.problem.actions])\n\n \n def greedifyPolicy(self,epsilon=1e-1):\n \n old_policy = self.policyMatrix\n\n self.policyMatrix = np.full_like(self.policyMatrix,epsilon/self.qValues.shape[0])\n\n for state, policy in enumerate(self.policyMatrix):\n policy_choice = self.executePolicy(state,epsilon=0)\n policy[policy_choice] += 1-epsilon\n \n if (self.policyMatrix == old_policy).all():\n return 1\n else:\n return 0\n\n def VISweep(self):\n \n while True:\n self.evalPolicy()\n\n if self.greedifyPolicy(): \n break\n\n def evalPolicy(self, deltaMin=1e-5):\n \n delta = float('inf')\n counter = 0\n\n while delta>deltaMin and counter<self.timeout:\n delta = 0\n for state, aValues in enumerate(self.qValues):\n for action, action_value in enumerate(aValues):\n temp = action_value\n states = range(len(self.qValues))\n new_values = [self.transitionMatrix[action,state,nstate]* \n (self.rewardMatrix[action,state,nstate]+\n self.problem.gamma*self.getValue(nstate))\n for nstate in states ]\n new_action_value = sum(new_values)\n \n self.qValues[state,action] = new_action_value\n delta = max(delta, abs(temp-new_action_value))\n counter += 1\n if counter >= self.timeout-1:\n print(\"Value iteration did not converge, delta = {}\".format(delta))\n\n" ]
[ [ "numpy.log2", "numpy.full_like", "numpy.average", "numpy.array", "numpy.zeros" ] ]
lu791019/iii_HA_Image_Recognition_DL
[ "d5f56d62af6d3aac1c216ca4ff309db08a8c9072" ]
[ "src/keras/keras/datasets/reuters.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"Reuters topic classification dataset.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom ..utils.data_utils import get_file\r\nfrom ..preprocessing.sequence import _remove_long_seq\r\nimport numpy as np\r\nimport json\r\nimport warnings\r\n\r\n\r\ndef load_data(path='reuters.npz', num_words=None, skip_top=0,\r\n maxlen=None, test_split=0.2, seed=113,\r\n start_char=1, oov_char=2, index_from=3, **kwargs):\r\n \"\"\"Loads the Reuters newswire classification dataset.\r\n\r\n # Arguments\r\n path: where to cache the data (relative to `~/.keras/dataset`).\r\n num_words: max number of words to include. Words are ranked\r\n by how often they occur (in the training set) and only\r\n the most frequent words are kept\r\n skip_top: skip the top N most frequently occurring words\r\n (which may not be informative).\r\n maxlen: truncate sequences after this length.\r\n test_split: Fraction of the dataset to be used as test data.\r\n seed: random seed for sample shuffling.\r\n start_char: The start of a sequence will be marked with this character.\r\n Set to 1 because 0 is usually the padding character.\r\n oov_char: words that were cut out because of the `num_words`\r\n or `skip_top` limit will be replaced with this character.\r\n index_from: index actual words with this index and higher.\r\n\r\n # Returns\r\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\r\n\r\n Note that the 'out of vocabulary' character is only used for\r\n words that were present in the training set but are not included\r\n because they're not making the `num_words` cut here.\r\n Words that were not seen in the training set but are in the test set\r\n have simply been skipped.\r\n \"\"\"\r\n # Legacy support\r\n if 'nb_words' in kwargs:\r\n warnings.warn('The `nb_words` argument in `load_data` '\r\n 'has been renamed `num_words`.')\r\n num_words = kwargs.pop('nb_words')\r\n if kwargs:\r\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\r\n\r\n path = get_file(path,\r\n origin='https://s3.amazonaws.com/text-datasets/reuters.npz',\r\n file_hash='87aedbeb0cb229e378797a632c1997b6')\r\n with np.load(path, allow_pickle=True) as f:\r\n xs, labels = f['x'], f['y']\r\n\r\n rng = np.random.RandomState(seed)\r\n indices = np.arange(len(xs))\r\n rng.shuffle(indices)\r\n xs = xs[indices]\r\n labels = labels[indices]\r\n\r\n if start_char is not None:\r\n xs = [[start_char] + [w + index_from for w in x] for x in xs]\r\n elif index_from:\r\n xs = [[w + index_from for w in x] for x in xs]\r\n\r\n if maxlen:\r\n xs, labels = _remove_long_seq(maxlen, xs, labels)\r\n\r\n if not num_words:\r\n num_words = max([max(x) for x in xs])\r\n\r\n # by convention, use 2 as OOV word\r\n # reserve 'index_from' (=3 by default) characters:\r\n # 0 (padding), 1 (start), 2 (OOV)\r\n if oov_char is not None:\r\n xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs]\r\n else:\r\n xs = [[w for w in x if skip_top <= w < num_words] for x in xs]\r\n\r\n idx = int(len(xs) * (1 - test_split))\r\n x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])\r\n x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])\r\n\r\n return (x_train, y_train), (x_test, y_test)\r\n\r\n\r\ndef get_word_index(path='reuters_word_index.json'):\r\n \"\"\"Retrieves the dictionary mapping words to word indices.\r\n\r\n # Arguments\r\n path: where to cache the data (relative to `~/.keras/dataset`).\r\n\r\n # Returns\r\n The word index dictionary.\r\n \"\"\"\r\n path = get_file(\r\n path,\r\n origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.json',\r\n file_hash='4d44cc38712099c9e383dc6e5f11a921')\r\n with open(path) as f:\r\n return json.load(f)\r\n" ]
[ [ "numpy.load", "numpy.array", "numpy.random.RandomState" ] ]
jmettes/PyRate
[ "4b61cefca9522f4546cd45e1691eb00d3e10ec34" ]
[ "tests/test_refpixel.py" ]
[ "# This Python module is part of the PyRate software package.\n#\n# Copyright 2020 Geoscience Australia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis Python module contains tests for the refpixel.py PyRate module.\n\"\"\"\nimport os\nimport copy\nimport shutil\nfrom subprocess import run, PIPE\nfrom pathlib import Path\nimport pytest\nimport itertools\nimport numpy as np\nfrom numpy import nan, mean, std, isnan\n\nimport pyrate.configuration\nimport pyrate.core.refpixel\nfrom pyrate.core import config as cf\nfrom pyrate.core.refpixel import ref_pixel, _step, RefPixelError, ref_pixel_calc_wrapper, \\\n convert_geographic_coordinate_to_pixel_value, convert_pixel_value_to_geographic_coordinate\nfrom pyrate.core import shared, ifgconstants as ifc\nfrom pyrate import correct, conv2tif, prepifg\nfrom pyrate.configuration import Configuration\nfrom tests.common import TEST_CONF_ROIPAC, TEST_CONF_GAMMA, SML_TEST_DEM_TIF\nfrom tests.common import small_data_setup, MockIfg, copy_small_ifg_file_list, \\\n copy_and_setup_small_data, manipulate_test_conf, assert_two_dirs_equal, PYTHON3P6\n\n\n# TODO: figure out how editing resource.setrlimit fixes the error\n# to fix the open to many files error\n# https://stackoverflow.com/questions/18280612/ioerror-errno-24-too-many-open-files\n\n# default testing values\nREFNX = 5\nREFNY = 7\nMIN_FRAC = 0.7\nCHIPSIZE = 3\nPARALLEL = False\n\n\nclass TestReferencePixelInputTests:\n '''\n Verifies error checking capabilities of the reference pixel function\n '''\n\n @classmethod\n def setup_method(cls):\n cls.ifgs = small_data_setup()\n cls.params = cf.get_config_params(TEST_CONF_ROIPAC)\n cls.params[cf.REFNX] = REFNX\n cls.params[cf.REFNY] = REFNY\n cls.params[cf.REF_CHIP_SIZE] = CHIPSIZE\n cls.params[cf.REF_MIN_FRAC] = MIN_FRAC\n cls.params[cf.PARALLEL] = PARALLEL\n\n def test_missing_chipsize(self):\n self.params[cf.REF_CHIP_SIZE] = None\n with pytest.raises(cf.ConfigException):\n ref_pixel(self.ifgs, self.params)\n\n def test_chipsize_valid(self):\n for illegal in [0, -1, -15, 1, 2, self.ifgs[0].ncols+1, 4, 6, 10, 20]:\n self.params[cf.REF_CHIP_SIZE] = illegal\n with pytest.raises(RefPixelError):\n ref_pixel(self.ifgs, self.params)\n\n def test_minimum_fraction_missing(self):\n self.params[cf.REF_MIN_FRAC] = None\n with pytest.raises(cf.ConfigException):\n ref_pixel(self.ifgs, self.params)\n\n def test_minimum_fraction_threshold(self):\n for illegal in [-0.1, 1.1, 1.000001, -0.0000001]:\n self.params[cf.REF_MIN_FRAC] = illegal\n with pytest.raises(RefPixelError):\n ref_pixel(self.ifgs, self.params)\n\n def test_search_windows(self):\n # 45 is max # cells a width 3 sliding window can iterate over\n for illegal in [-5, -1, 0, 46, 50, 100]:\n self.params[cf.REFNX] = illegal\n with pytest.raises(RefPixelError):\n ref_pixel(self.ifgs, self.params)\n\n # 40 is max # cells a width 3 sliding window can iterate over\n for illegal in [-5, -1, 0, 71, 85, 100]:\n self.params[cf.REFNY] = illegal\n with pytest.raises(RefPixelError):\n ref_pixel(self.ifgs, self.params)\n\n def test_missing_search_windows(self):\n self.params[cf.REFNX] = None\n with pytest.raises(cf.ConfigException):\n ref_pixel(self.ifgs, self.params)\n\n self.params[cf.REFNX] = REFNX\n self.params[cf.REFNY] = None\n\n with pytest.raises(cf.ConfigException):\n ref_pixel(self.ifgs, self.params)\n\n\nclass TestReferencePixelTests:\n \"\"\"\n Tests reference pixel search\n \"\"\"\n\n @classmethod\n def setup_method(cls):\n cls.params = cf.get_config_params(TEST_CONF_ROIPAC)\n cls.params[cf.OUT_DIR], cls.ifgs = copy_and_setup_small_data()\n cls.params[cf.REFNX] = REFNX\n cls.params[cf.REFNY] = REFNY\n cls.params[cf.REF_CHIP_SIZE] = CHIPSIZE\n cls.params[cf.REF_MIN_FRAC] = MIN_FRAC\n cls.params[cf.PARALLEL] = PARALLEL\n\n def test_all_below_threshold_exception(self):\n # test failure when no valid stacks in dataset\n\n # rig mock data to be below threshold\n mock_ifgs = [MockIfg(i, 6, 7) for i in self.ifgs]\n for m in mock_ifgs:\n m.phase_data[:1] = nan\n m.phase_data[1:5] = 0.1\n m.phase_data[5:] = nan\n\n self.params[cf.REFNX] = 2\n self.params[cf.REFNY] = 2\n self.params[cf.REF_CHIP_SIZE] = CHIPSIZE\n self.params[cf.REF_MIN_FRAC] = MIN_FRAC\n self.params[cf.PARALLEL] = PARALLEL\n with pytest.raises(ValueError):\n ref_pixel(mock_ifgs, self.params)\n\n def test_refnxy_step_1(self):\n # test step of 1 for refnx|y gets the reference pixel for axis centre\n mock_ifgs = [MockIfg(i, 47, 72) for i in self.ifgs]\n for m in mock_ifgs:\n m.phase_data[:1] = 0.2\n m.phase_data[1:5] = 0.1\n m.phase_data[5:] = 0.3\n exp_refpx = (1, 1)\n self.params[cf.REFNX] = 1\n self.params[cf.REFNY] = 1\n self.params[cf.REF_CHIP_SIZE] = CHIPSIZE\n self.params[cf.REF_MIN_FRAC] = MIN_FRAC\n self.params[cf.PARALLEL] = PARALLEL\n res = ref_pixel(mock_ifgs, self.params)\n assert exp_refpx == res\n\n def test_large_window(self):\n # 5x5 view over a 5x5 ifg with 1 window/ref pix search\n chps = 5\n mockifgs = [MockIfg(i, chps, chps) for i in self.ifgs]\n self.params[cf.REFNX] = 1\n self.params[cf.REFNY] = 1\n self.params[cf.REF_CHIP_SIZE] = chps\n self.params[cf.REF_MIN_FRAC] = MIN_FRAC\n self.params[cf.PARALLEL] = PARALLEL\n res = ref_pixel(mockifgs, self.params)\n assert (2, 2) == res\n\n def test_step(self):\n # test different search windows to verify x/y step calculation\n\n # convenience testing function\n def assert_equal(actual, expected):\n for a, e in zip(actual, expected):\n assert a == e\n\n # start with simple corner only test\n width = 47\n radius = 2\n refnx = 2\n exp = [2, 25, 44]\n act = _step(width, refnx, radius)\n assert_equal(act, exp)\n\n # test with 3 windows\n refnx = 3\n exp = [2, 17, 32]\n act = _step(width, refnx, radius)\n assert_equal(act, exp)\n\n # test 4 search windows\n refnx = 4\n exp = [2, 13, 24, 35]\n act = _step(width, refnx, radius)\n assert_equal(act, exp)\n\n def test_ref_pixel(self):\n exp_refpx = (2, 25)\n self.params[cf.REFNX] = 2\n self.params[cf.REFNY] = 2\n self.params[cf.REF_CHIP_SIZE] = 5\n self.params[cf.REF_MIN_FRAC] = MIN_FRAC\n self.params[cf.PARALLEL] = PARALLEL\n res = ref_pixel(self.ifgs, self.params)\n assert res == exp_refpx\n\n # Invalidate first data stack, get new refpix coods & retest\n for i in self.ifgs:\n i.phase_data[:30, :50] = nan\n\n exp_refpx = (38, 2)\n res = ref_pixel(self.ifgs, self.params)\n assert res == exp_refpx\n\n\ndef _expected_ref_pixel(ifgs, cs):\n \"\"\"Helper function for finding reference pixel when refnx/y=2\"\"\"\n\n # calculate expected data\n data = [i.phase_data for i in ifgs] # len 17 list of arrays\n ul = [i[:cs, :cs] for i in data] # upper left corner stack\n ur = [i[:cs, -cs:] for i in data]\n ll = [i[-cs:, :cs] for i in data]\n lr = [i[-cs:, -cs:] for i in data]\n\n ulm = mean([std(i[~isnan(i)]) for i in ul]) # mean std of all the layers\n urm = mean([std(i[~isnan(i)]) for i in ur])\n llm = mean([std(i[~isnan(i)]) for i in ll])\n lrm = mean([std(i[~isnan(i)]) for i in lr])\n assert isnan([ulm, urm, llm, lrm]).any() is False\n\n # coords of the smallest mean is the result\n mn = [ulm, urm, llm, lrm]\n\n\nclass TestLegacyEqualityTest:\n\n @classmethod\n def setup_method(cls):\n cls.params = cf.get_config_params(TEST_CONF_ROIPAC)\n cls.params[cf.PARALLEL] = 0\n cls.params[cf.OUT_DIR], cls.ifg_paths = copy_small_ifg_file_list()\n conf_file = Path(cls.params[cf.OUT_DIR], 'conf_file.conf')\n pyrate.configuration.write_config_file(params=cls.params, output_conf_file=conf_file)\n cls.params = Configuration(conf_file).__dict__\n cls.params_alt_ref_frac = copy.copy(cls.params)\n cls.params_alt_ref_frac[cf.REF_MIN_FRAC] = 0.5\n cls.params_all_2s = copy.copy(cls.params)\n cls.params_all_2s[cf.REFNX] = 2\n cls.params_all_2s[cf.REFNY] = 2\n cls.params_chipsize_15 = copy.copy(cls.params_all_2s)\n cls.params_chipsize_15[cf.REF_CHIP_SIZE] = 15\n cls.params_all_1s = copy.copy(cls.params)\n cls.params_all_1s[cf.REFNX] = 1\n cls.params_all_1s[cf.REFNY] = 1\n cls.params_all_1s[cf.REF_MIN_FRAC] = 0.7\n\n for p, q in zip(cls.params[cf.INTERFEROGRAM_FILES], cls.ifg_paths): # hack\n p.sampled_path = q\n p.tmp_sampled_path = q\n\n @classmethod\n def teardown_method(cls):\n shutil.rmtree(cls.params[cf.OUT_DIR])\n\n def test_small_test_data_ref_pixel_lat_lon_provided(self):\n self.params[cf.REFX], self.params[cf.REFY] = 150.941666654, -34.218333314\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)\n assert refx == 38\n assert refy == 58\n assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])\n\n def test_small_test_data_ref_pixel(self):\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)\n assert refx == 38\n assert refy == 58\n assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])\n\n def test_small_test_data_ref_chipsize_15(self):\n\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)\n assert refx == 7\n assert refy == 7\n assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])\n\n def test_metadata(self):\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)\n for i in self.ifg_paths:\n ifg = shared.Ifg(i)\n ifg.open(readonly=True)\n md = ifg.meta_data\n for k, v in zip([ifc.PYRATE_REFPIX_X, ifc.PYRATE_REFPIX_Y, ifc.PYRATE_REFPIX_LAT,\n ifc.PYRATE_REFPIX_LON, ifc.PYRATE_MEAN_REF_AREA, ifc.PYRATE_STDDEV_REF_AREA],\n [str(refx), str(refy), 0, 0, 0, 0]):\n assert k in md # metadata present\n # assert values\n ifg.close()\n\n def test_small_test_data_ref_all_1(self):\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_1s)\n assert 0.7 == pytest.approx(self.params_all_1s[cf.REF_MIN_FRAC])\n assert 1 == self.params_all_1s[cf.REFNX]\n assert 1 == self.params_all_1s[cf.REFNY]\n assert refx == 2\n assert refy == 2\n\n\nclass TestLegacyEqualityTestMultiprocessParallel:\n\n @classmethod\n def setup_method(cls):\n cls.params = cf.get_config_params(TEST_CONF_ROIPAC)\n cls.params[cf.PARALLEL] = 1\n cls.params[cf.OUT_DIR], cls.ifg_paths = copy_small_ifg_file_list()\n conf_file = Path(cls.params[cf.OUT_DIR], 'conf_file.conf')\n pyrate.configuration.write_config_file(params=cls.params, output_conf_file=conf_file)\n cls.params = Configuration(conf_file).__dict__\n cls.params_alt_ref_frac = copy.copy(cls.params)\n cls.params_alt_ref_frac[cf.REF_MIN_FRAC] = 0.5\n cls.params_all_2s = copy.copy(cls.params)\n cls.params_all_2s[cf.REFNX] = 2\n cls.params_all_2s[cf.REFNY] = 2\n cls.params_chipsize_15 = copy.copy(cls.params_all_2s)\n cls.params_chipsize_15[cf.REF_CHIP_SIZE] = 15\n cls.params_all_1s = copy.copy(cls.params)\n cls.params_all_1s[cf.REFNX] = 1\n cls.params_all_1s[cf.REFNY] = 1\n cls.params_all_1s[cf.REF_MIN_FRAC] = 0.7\n\n for p, q in zip(cls.params[cf.INTERFEROGRAM_FILES], cls.ifg_paths): # hack\n p.sampled_path = q\n p.tmp_sampled_path = q\n\n @classmethod\n def teardown_method(cls):\n shutil.rmtree(cls.params[cf.OUT_DIR])\n\n def test_small_test_data_ref_pixel(self):\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)\n assert refx == 38\n assert refy == 58\n assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])\n\n def test_more_small_test_data_ref_pixel(self):\n\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_alt_ref_frac)\n assert refx == 38\n assert refy == 58\n assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])\n\n def test_small_test_data_ref_pixel_all_2(self):\n\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_2s)\n assert refx == 25\n assert refy == 2\n assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])\n\n def test_small_test_data_ref_chipsize_15(self):\n\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)\n assert refx == 7\n assert refy == 7\n assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])\n\n def test_small_test_data_ref_all_1(self):\n\n refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_1s)\n\n assert 0.7 == pytest.approx(self.params_all_1s[cf.REF_MIN_FRAC])\n assert 1 == self.params_all_1s[cf.REFNX]\n assert 1 == self.params_all_1s[cf.REFNY]\n assert refx == 2\n assert refy == 2\n\n\[email protected]\ndef test_error_msg_refpixel_out_of_bounds(tempdir, gamma_conf):\n \"check correct latitude/longitude refpixel error is raised when specified refpixel is out of bounds\"\n for x, (refx, refy) in zip(['longitude', 'latitude', 'longitude and latitude'],\n [(150., -34.218333314), (150.941666654, -34.), (150, -34)]):\n _, err = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=refx, refy=refy)\n msg = \"Supplied {} value is outside the bounds of the interferogram data\"\n assert msg.format(x) in err\n\n\[email protected]\ndef test_gamma_ref_pixel_search_vs_lat_lon(tempdir, gamma_conf):\n params_1, _ = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=-1, refy=-1)\n params_2, _ = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=150.941666654, refy=-34.218333314)\n assert_two_dirs_equal(params_1[cf.OUT_DIR], params_2[cf.OUT_DIR], [\"*_ifg.tif\", '*_coh.tif', 'dem.tif'], 35)\n\n\ndef _get_mlooked_files(gamma_conf, tdir, refx, refy):\n params = manipulate_test_conf(gamma_conf, tdir)\n params[cf.REFX] = refx\n params[cf.REFY] = refy\n output_conf_file = 'config.conf'\n output_conf = tdir.joinpath(output_conf_file)\n pyrate.configuration.write_config_file(params=params, output_conf_file=output_conf)\n params = Configuration(output_conf).__dict__\n conv2tif.main(params)\n params = Configuration(output_conf).__dict__\n prepifg.main(params)\n err = run(f\"pyrate correct -f {output_conf}\", shell=True, universal_newlines=True, stderr=PIPE).stderr\n return params, err\n\n\nclass TestRefPixelReuseLoadsSameFileAndPixels:\n\n @classmethod\n def setup_method(cls):\n cls.conf = TEST_CONF_GAMMA\n params = Configuration(cls.conf).__dict__\n conv2tif.main(params)\n params = Configuration(cls.conf).__dict__\n prepifg.main(params)\n params = Configuration(cls.conf).__dict__\n correct._copy_mlooked(params)\n cls.params = params\n\n @classmethod\n def teardown_method(cls):\n shutil.rmtree(cls.params[cf.OUT_DIR])\n\n @pytest.mark.slow()\n def test_ref_pixel_multiple_runs_reuse_from_disc(self, ref_pixel):\n params = self.params\n params[cf.REFX], params[cf.REFY] = ref_pixel\n params[cf.REF_PIXEL_FILE] = Configuration.ref_pixel_path(params)\n ref_pixel_calc_wrapper(params)\n\n ref_pixel_file = self.params[cf.REF_PIXEL_FILE]\n time_written = os.stat(ref_pixel_file).st_mtime\n assert self.params[cf.REFX_FOUND] == 38\n assert self.params[cf.REFY_FOUND] == 58\n # run again\n ref_pixel_calc_wrapper(self.params)\n ref_pixel_file = self.params[cf.REF_PIXEL_FILE]\n time_written_1 = os.stat(ref_pixel_file).st_mtime\n assert self.params[cf.REFX_FOUND] == 38\n assert self.params[cf.REFY_FOUND] == 58\n\n # run a third time\n ref_pixel_calc_wrapper(self.params)\n ref_pixel_file = self.params[cf.REF_PIXEL_FILE]\n time_written_2 = os.stat(ref_pixel_file).st_mtime\n assert time_written == time_written_2 == time_written_1\n assert self.params[cf.REFX], self.params[cf.REFY] == ref_pixel\n assert self.params[cf.REFX_FOUND] == 38\n assert self.params[cf.REFY_FOUND] == 58\n\n\[email protected](scope='module')\ndef x_y_pixel():\n dem = shared.DEM(SML_TEST_DEM_TIF)\n dem.open()\n Y = dem.nrows\n X = dem.ncols\n x = np.random.choice(range(X), 5)\n y = np.random.choice(range(Y), 5)\n return itertools.product(x, y) # returns a matrix of 5x5 random x, y pairs\n\n\ndef test_convert_pixel_value_to_geographic_coordinate(x_y_pixel):\n transform = dem_transform()\n for x, y in x_y_pixel:\n lon, lat = convert_pixel_value_to_geographic_coordinate(x, y, transform)\n out = run(f\"gdallocationinfo -geoloc {SML_TEST_DEM_TIF} {lon} {lat}\", shell=True, universal_newlines=True,\n stdout=PIPE).stdout\n xs = (x, x+1, x-1)\n ys = (y, y+1, y-1)\n assert any(f\"({xx}P,{yy}L)\" in out for xx, yy in itertools.product(xs, ys))\n\n\ndef dem_transform():\n dem = shared.DEM(SML_TEST_DEM_TIF)\n dem.open()\n transform = dem.dataset.GetGeoTransform()\n return transform\n\n\[email protected](PYTHON3P6, reason='Skipped in python3p6')\ndef test_convert_geographic_coordinate_to_pixel_value(x_y_pixel):\n transform = dem_transform()\n for x, y in x_y_pixel:\n lon, lat = convert_pixel_value_to_geographic_coordinate(x, y, transform)\n xp, yp = convert_geographic_coordinate_to_pixel_value(lon, lat, transform)\n assert (xp == x) & (yp == y)\n" ]
[ [ "numpy.isnan" ] ]
fhooton/FoodMine
[ "2120adc535df1df79c14c20eea8695a794cb5b52" ]
[ "src/tools/chemidr/id_map.py" ]
[ "# Author: Forrest Hooton\n\nimport numpy as np\nimport math\nimport urllib.request as request\nimport requests\nimport time\nimport json\nfrom lxml import etree\n\n\ndef cid2prop(cid, prop):\n\t# Create url for InChI query\n\turl = f\"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{str(int(cid))}/property/{prop}/JSON\"\n\n\tr = __safe_urlopen__(url)\n\n\tif r is None:\n\t\treturn np.nan\n\t\n\tprop_value = __safe_object_access__(json.loads(r)['PropertyTable']['Properties'][0], prop)\n\t\n\treturn prop_value\n\n\ndef cids2props(cids, prop, as_dict=False):\n\t\"\"\"\n\t Retrieves properties from PubChem using Pubchem CIDS\n\t See property section of https://pubchemdocs.ncbi.nlm.nih.gov/pug-rest$_Toc494865567\n\n\t Input\n\t ----------------------------------------------------------------\n\t cids : list\n\t list of pubchem cid's for properties (needs to be ints, but also included int typecast)\n\t as_dict : bool (default False)\n\t returns dictionary of info if true, list otherwise\n\n\t Returns\n\t ----------------------------------------------------------------\n\t props : dict or list\n\t dictionary with CID's as keys and properties as values if as_dict is True, otherwise list\n\t of properties to preserve order\n\t\"\"\"\n\tcids = __divide_list__([str(int(i)) for i in cids])\n\n\tif as_dict: props = {}\n\telse: props = []\n\n\t# Loop over divisions of ids to avoid overloading query\n\tfor ids in cids:\n\n\t\t# Create url for InChI query\n\t\turl = f\"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{','.join(ids)}/property/{prop}/JSON\"\n\n\t\tr = __safe_urlopen__(url)\n\n\t\tif r is None:\n\t\t\tnew_props = batch_error_handler(ids, prop, as_dict=as_dict)\n\n\t\t\tif as_dict: props.update(new_props)\n\t\t\telse: props += new_props\n\n\t\t\tcontinue\n\n\t\t# option to return InChIKey's as list or as dict (dict has certainty in case some cids aren't\n\t\t# retrieved, list preserves order)\n\t\tif as_dict:\n\t\t\tnew_dict = {\n\t\t\t\tp['CID'] : __safe_object_access__(p, prop) for p in json.loads(r)['PropertyTable']['Properties']\n\t\t\t}\n\t\t\tprops.update(new_dict)\n\n\t\telse:\n\t\t\tnew_list = [\n\t\t\t\t__safe_object_access__(p, prop) for p in json.loads(r)['PropertyTable']['Properties']\n\t\t\t]\n\t\t\tprops += new_list\n\n\treturn props\n\n\ndef cids2names(cids, as_dict=False):\n\t\"\"\"\n\t Retrieves properties from PubChem using Pubchem CIDS\n\t See property section of https://pubchemdocs.ncbi.nlm.nih.gov/pug-rest$_Toc494865567\n\n\t Input\n\t ----------------------------------------------------------------\n\t cids : list\n\t list of pubchem cid's for properties (needs to be ints, but also included int typecast)\n\t as_dict : bool (default False)\n\t returns dictionary of info if true, list otherwise\n\n\t Returns\n\t ----------------------------------------------------------------\n\t names : dict or list\n\t dictionary with CID's as keys and chemical names as values if as_dict is True, otherwise list\n\t of chemical names to preserve order\n\t\"\"\"\n\tcids = __divide_list__([str(int(i)) for i in cids])\n\n\tif as_dict: names = {}\n\telse: names = []\n\n\t# Loop over divisions of ids to avoid overloading query\n\tfor ids in cids:\n\n\t\t# Create url for InChI query\n\t\turl = f\"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{','.join(ids)}/synonyms/JSON\"\n\n\t\tr = __safe_urlopen__(url)\n\n\t\t# option to return InChIKey's as list or as dict (dict has certainty in case some cids aren't\n\t\t# retrieved, list preserves order)\n\t\tif as_dict:\n\t\t\tnew_dict = {\n\t\t\t\tp['CID'] : __safe_object_access__(p, 'Synonym') for p in json.loads(r)['InformationList']['Information']\n\t\t\t}\n\t\t\tnames.update(new_dict)\n\n\t\telse:\n\t\t\tnew_list = [\n\t\t\t\t__safe_object_access__(p, 'Synonym') for p in json.loads(r)['InformationList']['Information']\n\t\t\t]\n\t\t\tnames += new_list\n\n\treturn names\n\n\ndef batch_error_handler(cids, prop, as_dict=False):\n\tif as_dict: props = {}\n\telse: props = []\n\n\tfor cid in cids:\n\t\tval = cid2prop(cid, prop)\n\t\t\n\t\tif as_dict: props.update({cid : val})\n\t\telse: props += [val]\n\t\n\treturn props\n\n\ndef cids2inchis(cids, as_dict=False, use_prefix=False, keys = True):\n\t\"\"\"\n\t Retrieves InChIKeys from PubChem using Pubchem CIDS\n\n\t Input\n\t ----------------------------------------------------------------\n\t cids : list\n\t list of pubchem cid's for InChIKeys (needs to be ints, but also included int typecast)\n\t \tuse_prefix : bool (default False)\n\t \t\tonly return the prefix of inchikeys (before the first -), which contains the structural information\n\t \t\t(to find out more see https://www.inchi-trust.org/technical-faq-2/)\n\t \tkeys : bool (default True)\n\t \t\treturn inchikeys rather than the full inchi code\n\n\t Returns\n\t ----------------------------------------------------------------\n\t inchikeys : dict or list\n\t dictionary with CID's as keys and InChIKeys as values if as_dict is True, otherwise list\n\t of InChIKeys to preserve order\n\t\"\"\"\n\tif keys:\n\t\t# Create url for InChIKey query\n\t\tquery_type = 'InChIKey'\n\telse:\n\t\t# Create url for InChI quer\n\t\tquery_type = 'InChI'\n\t\n\tinchikeys = cids2props(cids, query_type, as_dict=as_dict)\n\n\tif use_prefix:\n\t\tif isinstance(inchikeys, dict): inchikeys = {cid : ikey.split('-')[0] for cid, ikey in inchikeys.items()}\n\t\telse: inchikeys = [ikey.split('-')[0] for ikey in inchikeys]\n\n\treturn inchikeys\n\n\ndef __safe_object_access__(obj, key):\n\tif key in obj:\n\t\treturn obj[key]\n\telse:\n\t\treturn np.nan\n\n\ndef cids2upacs(cids, as_dict=False):\n\tupacs = cids2props(cids, 'IUPACName', as_dict=as_dict)\n\n\treturn upacs\n\n\ndef cids2smiles(cids, as_dict=False):\n\tSMILES = cids2props(cids, 'CanonicalSMILES', as_dict=as_dict)\n\treturn SMILES\n\n\n# Divides list into even divisions with a maximum of 100 elements\ndef __divide_list__(ids):\n\tnum_divisions = int(math.ceil(len(ids) / 100))\n\n\tsplit_ids = np.array_split(np.asarray(ids), num_divisions)\n\tsplit_ids = [np.ndarray.tolist(split_ids[i]) for i in range(len(split_ids))]\n\n\treturn split_ids\n\n\ndef __safe_urlopen__(url):\n \"\"\"\n Retrieves information from url query without throwing errors, such as for values that do not\n exist or excessive querying. Designed for Pubchem and Pubmed apis\n\n Input\n ----------------------------------------------------------------\n url : str\n url to query\n\n Returns\n ----------------------------------------------------------------\n response.content : str (maybe bytes) or None\n Returns the response of a url query if it exists, else None\n \"\"\"\n try:\n response = requests.get(url)\n except TimeoutError:\n time.sleep(.5)\n return __safe_urlopen__(url)\n\n if response.status_code == 200: # Successful\n return response.content\n\n elif response.status_code == 429: # Too many requests\n time.sleep(.5)\n return __safe_urlopen__(url)\n\n elif response.status_code == 503: # PUGREST.ServerBusy\n time.sleep(1)\n return __safe_urlopen__(url)\n\n elif response.status_code == 404: # PUGREST.NotFound (aka doesn't exist)\n return None\n\n\n### Antiquated and might be removed in future\ndef cid2smile(cid):\n \"\"\"\n Retrieves a chemical SMILE from Pubchem using a cid\n\n Input\n ----------------------------------------------------------------\n cid : str\n cid for which to retrieve the chemical SMILE\n\n Returns\n ----------------------------------------------------------------\n SMILE : str or None\n Returns the chemical SMILE corresponding to the cid if it exists, else None\n \"\"\"\n url = \"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/\" + cid + \"/property/CanonicalSMILES/XML\"\n \n xml = __safe_urlopen__(url)\n\n if xml is None:\n return np.nan\n \n root = etree.fromstring(xml)\n SMILE = root.findall(\".//{http://pubchem.ncbi.nlm.nih.gov/pug_rest}CanonicalSMILES\")[0].xpath('.//text()')[0]\n \n return SMILE\n\n\ndef mesh2pid(mesh):\n\t\"\"\"\n\t Retrieves pubchem id's (both cid and sid) from Pubchem by searching the substances\n\n\t Input\n\t ----------------------------------------------------------------\n\t mesh : str\n\t mesh for which to retrieve the Pubchem id's\n\n\t Returns\n\t ----------------------------------------------------------------\n\t _ : dict (of dicts)\n\t dictionary with mesh id as keys, and dictionaries of mesh ids with corresponding cids and sids as values\n\t\"\"\"\n\turl = f'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pcsubstance&term={mesh}&retmode=json'\n\n\tr = __safe_urlopen__(url)\n\n\tif r is not None:\n\t\tj = json.loads(r)\n\n\t\t# No results from searching mesh id\n\t\tif j['esearchresult']['count'] != 0:\n\n\t\t\tsid = j['esearchresult']['idlist'][0] # get first sid result\n\n\t\t\turl = f'https://pubchem.ncbi.nlm.nih.gov/rest/pug/substance/sid/{sid}/xml'\n\n\t\t\txml = __safe_urlopen__(url)\n\n\t\t\tif xml is None:\n\t\t\t\treturn {mesh : {'mesh' : mesh, 'sid' : sid, 'cid' : cid}}\n\n\t\t\troot = etree.fromstring(xml)\n\n\t\t\tcids = root.findall(\".//{http://www.ncbi.nlm.nih.gov}PC-CompoundType_id_cid\")\n\n\t\t\tif len(cids) > 0:\n\t\t\t\tcid = cids[0].xpath('./text()')[0]\n\t\t\telse:\n\t\t\t\tcid = np.nan # No cids\n\n\t\t\treturn {mesh : {'mesh' : mesh, 'sid' : sid, 'cid' : cid}}\n\n\t\telse:\n\t\t\treturn {mesh : {'mesh' : mesh, 'sid' : np.nan, 'cid' : np.nan}}\n\n\telse:\n\t\treturn {mesh : {'mesh' : mesh, 'sid' : np.nan, 'cid' : np.nan}}\n\n\ndef cid2tax(cid, taxonomy='ChEBI'):\n\turl = f'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{cid}/classification/JSON'\n\n\tr = __safe_urlopen__(url)\n\n\tif r is None: return np.nan\n\n\tall_taxonomies = json.loads(r)['Hierarchies']['Hierarchy']\n\n\t# I think should only have one occurrence of taxonomy source name\n\traw_tax = [all_taxonomies[t] for t in range(len(all_taxonomies)) if all_taxonomies[t]['SourceName'] == taxonomy]\n\n\tif len(raw_tax) == 0: return np.nan\n\telse: raw_tax = raw_tax[0]\n\n\tnodes = raw_tax['Node']\n\ttax = []\n\n\tchebi_name = lambda x: x['Information']['Name']\n\tchebi_id = lambda x: int(x['Information']['URL'].lstrip('http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:'))\n\n\tlast_node = nodes[0]['NodeID']\n\ttax.append( (chebi_name(nodes[0]), chebi_id(nodes[0])) )\n\tn=1\n\n\twhile int(last_node.lstrip('node_')) >= int(nodes[n]['NodeID'].lstrip('node_')):\n\t\ttax.append( (chebi_name(nodes[n]), chebi_id(nodes[n])) )\n\t\tlast_node = nodes[n]['NodeID']\n\t\tn += 1\n\n\t\tif n == len(nodes):\n\t\t\tbreak\n\n\treturn tax" ]
[ [ "numpy.asarray", "numpy.ndarray.tolist" ] ]
heumchri/bark
[ "867e1e4a289f185bae52d659b99abbf108fe1fd4" ]
[ "modules/world/tests/py_map_interface_tests.py" ]
[ "# Copyright (c) 2019 fortiss GmbH\n#\n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n\nimport unittest\nimport time\nimport math\nimport filecmp\nimport matplotlib.pyplot as plt\nfrom bark.world.agent import *\nfrom bark.models.behavior import *\nfrom bark.world import *\nfrom bark.geometry import *\nfrom bark.models.dynamic import *\nfrom bark.models.execution import *\nfrom bark.geometry import *\nfrom bark.geometry.standard_shapes import *\nfrom modules.runtime.commons.parameters import ParameterServer\nfrom bark.world.opendrive import *\nfrom bark.world.map import *\nfrom modules.runtime.commons.xodr_parser import XodrParser\nfrom modules.runtime.viewer.matplotlib_viewer import MPViewer\nimport numpy as np\n\n\nclass EnvironmentTests(unittest.TestCase):\n def test_Crossing8Course(self):\n xodr_parser = XodrParser(\"modules/runtime/tests/data/Crossing8Course.xodr\")\n\n params = ParameterServer()\n world = World(params)\n\n map_interface = MapInterface()\n map_interface.set_open_drive_map(xodr_parser.map)\n world.set_map(map_interface)\n\n start_point = Point2d(0, -11)\n lanes_near_start = map_interface.find_nearest_lanes(start_point, 1)\n assert(len(lanes_near_start) == 1)\n\n goal_point = Point2d(-191.789, -50.1725)\n lanes_near_goal = map_interface.find_nearest_lanes(goal_point, 1)\n assert(len(lanes_near_goal) == 1)\n\n driving_corridor = map_interface.compute_driving_corridor_from_start_to_goal(\n lanes_near_start[0].lane_id, lanes_near_goal[0].lane_id)\n print(driving_corridor)\n for id in driving_corridor.get_lane_ids():\n l = map_interface.get_lane(id[1])\n assert(l.lane_type == LaneType.driving)\n\n time.sleep(2) # if this is not here, the second unit test is not executed (maybe parsing takes too long?)\n\n def test_driving_corridor_adjacency_4way_intersection(self):\n #xodr_parser = XodrParser(\"modules/runtime/tests/data/urban_road.xodr\")\n #xodr_parser = XodrParser(\"modules/runtime/tests/data/city_highway_straight.xodr\")\n xodr_parser = XodrParser(\"modules/runtime/tests/data/4way_intersection.xodr\")\n\n params = ParameterServer()\n world = World(params)\n\n map_interface = MapInterface()\n map_interface.set_open_drive_map(xodr_parser.map)\n # xodr_parser.roadgraph.print_graph(\"/home/esterle/4way_intersection.dot\")\n world.set_map(map_interface)\n\n map_interface.compute_all_driving_corridors()\n\n all_corridors = map_interface.get_all_corridors()\n c = all_corridors[10]\n right_adj_corridors = map_interface.get_adjacent_corridors_same_direction(c, [151, 168, 0.0])\n assert(len(right_adj_corridors) == 2)\n\n right_adj_corridors = map_interface.get_adjacent_corridors_same_direction(c, [169, 169, 0.0])\n assert(len(right_adj_corridors) == 1)\n\n viewer = MPViewer(params=params, use_world_bounds=True)\n viewer.drawWorld(world)\n viewer.drawDrivingCorridor(c)\n if right_adj_corridors:\n for rc in right_adj_corridors:\n viewer.drawDrivingCorridor(rc)\n\n viewer.show(block=True)\n time.sleep(0.1)\n\n def test_driving_corridor_splitting_4way_intersection(self):\n #xodr_parser = XodrParser(\"modules/runtime/tests/data/urban_road.xodr\")\n #xodr_parser = XodrParser(\"modules/runtime/tests/data/city_highway_straight.xodr\")\n xodr_parser = XodrParser(\"modules/runtime/tests/data/4way_intersection.xodr\")\n\n params = ParameterServer()\n world = World(params)\n\n map_interface = MapInterface()\n map_interface.set_open_drive_map(xodr_parser.map)\n # xodr_parser.roadgraph.print_graph(\"/home/esterle/4way_intersection.dot\")\n world.set_map(map_interface)\n\n map_interface.compute_all_driving_corridors()\n\n all_corridors = map_interface.get_all_corridors()\n\n c = all_corridors[11]\n\n splittingcorridors = map_interface.get_splitting_corridors(c, [168, 161, 0.0])\n assert(len(splittingcorridors) == 0)\n\n splittingcorridors = map_interface.get_splitting_corridors(c, [152, 168, 0.0])\n assert(len(splittingcorridors) == 2)\n\n viewer = MPViewer(params=params, use_world_bounds=True)\n viewer.drawWorld(world)\n viewer.drawDrivingCorridor(c)\n if splittingcorridors:\n for sc in splittingcorridors:\n viewer.drawDrivingCorridor(sc)\n\n viewer.show(block=True)\n time.sleep(0.1)\n\n def test_between_lanes(self):\n xodr_parser = XodrParser(\"modules/runtime/tests/data/city_highway_straight.xodr\")\n np.set_printoptions(precision=8)\n params = ParameterServer()\n\n world = World(params)\n map_interface = MapInterface()\n map_interface.set_open_drive_map(xodr_parser.map)\n world.set_map(map_interface)\n\n # Simple test\n point_close = Point2d(5112.68262, 5086.44971)\n lane_sw = map_interface.find_lane(point_close)\n self.assertIsNotNone(lane_sw, \"This point is still in the left lane! Lane boundary is 5112.683\")\n\n switched_lane = False\n lng_coord = 5086.44971\n i = 5112.0\n lane_sw = map_interface.find_lane(Point2d(i, lng_coord))\n assert lane_sw != None\n prev = lane_sw.lane_id\n prev_i = i\n while (i < 5113.0):\n lane_sw = map_interface.find_lane(Point2d(i, lng_coord))\n self.assertIsNotNone(lane_sw, \"Should always be on at least one lane! Currently at ({}, {})\".format(i, lng_coord))\n if prev != lane_sw.lane_id:\n # print(prev)\n # print(prev_i)\n # print(lane_sw.lane_id)\n # print(i)\n self.assertFalse(switched_lane, \"Lane switch should only happens once!\")\n switched_lane = True\n prev_i = i\n prev = lane_sw.lane_id\n i = i + 0.0001\n self.assertTrue(switched_lane, \"Eventually should have switched lanes!\")\n\n def test_find_lane(self):\n\n xodr_parser = XodrParser(\"modules/runtime/tests/data/urban_road.xodr\")\n\n params = ParameterServer()\n world = World(params)\n\n map_interface = MapInterface()\n map_interface.set_open_drive_map(xodr_parser.map)\n world.set_map(map_interface)\n\n lane_sw = map_interface.find_lane(Point2d(46, 180))\n assert lane_sw.lane_type == LaneType.sidewalk\n\n lane_rl = map_interface.find_lane(Point2d(52, 130))\n assert lane_rl.lane_type == LaneType.driving\n\n lane_no_lane = map_interface.find_lane(Point2d(120, 140))\n assert lane_no_lane == None\n\n xodr_parser = XodrParser(\"modules/runtime/tests/data/city_highway_straight.xodr\")\n np.set_printoptions(precision=8)\n params = ParameterServer()\n world = World(params)\n\n map_interface = MapInterface()\n map_interface.set_open_drive_map(xodr_parser.map)\n world.set_map(map_interface)\n point = Point2d(5111, 5072)\n viewer = MPViewer(params=params, use_world_bounds=True)\n viewer.drawWorld(world)\n polygon = world.map.get_roadgraph().get_lane_polygon_by_id(241)\n polygon2 = world.map.get_roadgraph().get_lane_polygon_by_id(242)\n viewer.drawPolygon2d(polygon, 'blue', 1.0)\n viewer.drawPolygon2d(polygon2, 'green', 1.0)\n viewer.drawPoint2d(point, 'red', 1.0)\n viewer.show(block=True)\n time.sleep(0.1)\n lane_sw = map_interface.find_lane(point)\n self.assertIsNotNone(lane_sw, \"This point is clearly on a lane!\")\n\n def test_line_segment_within_driving_corridor(self):\n\n xodr_parser = XodrParser(\"modules/runtime/tests/data/urban_road.xodr\")\n\n params = ParameterServer()\n world = World(params)\n\n map_interface = MapInterface()\n map_interface.set_open_drive_map(xodr_parser.map)\n world.set_map(map_interface)\n\n map_interface.compute_all_driving_corridors()\n all_corridors = map_interface.get_all_corridors()\n\n point_sw = Point2d(46, 180) # this point lies inside the sidewalk left of the road\n point_rl = Point2d(52, 130) # this point lies in the right lane\n point_outside = Point2d(140, 37) # this point lies far outside (in lane of type NONE)\n point_no_lane = Point2d(120, 140) # this point lies far outside, not in any lane\n\n assert not map_interface.line_segment_inside_corridor(all_corridors[0], point_sw, point_rl)\n assert not map_interface.line_segment_inside_corridor(all_corridors[0], point_sw, point_no_lane)\n assert not map_interface.line_segment_inside_corridor(all_corridors[0], point_outside, point_no_lane)\n assert not map_interface.line_segment_inside_corridor(all_corridors[0], point_sw, point_outside)\n\n def test_driving_direction(self):\n\n xodr_parser = XodrParser(\"modules/runtime/tests/data/urban_road.xodr\")\n\n params = ParameterServer()\n world = World(params)\n\n map_interface = MapInterface()\n map_interface.set_open_drive_map(xodr_parser.map)\n world.set_map(map_interface)\n\n point_rl = Point2d(52, 130) # this point lies in the right lane\n point_ll = Point2d(68, 72) # this point lies in the left lane\n point_no_lane = Point2d(120, 140) # this point lies far outside, not in any lane\n\n assert map_interface.has_correct_driving_direction(point_rl, math.pi/2)\n assert map_interface.has_correct_driving_direction(point_rl, math.pi/2+0.2)\n assert map_interface.has_correct_driving_direction(point_rl, math.pi/2-0.2)\n assert not map_interface.has_correct_driving_direction(point_rl, -math.pi/2)\n\n assert map_interface.has_correct_driving_direction(point_ll, -math.pi/4)\n assert not map_interface.has_correct_driving_direction(point_ll, -math.pi/4 + math.pi)\n\n assert not map_interface.has_correct_driving_direction(point_no_lane, 0)\n assert not map_interface.has_correct_driving_direction(point_no_lane, math.pi/2)\n assert not map_interface.has_correct_driving_direction(point_no_lane, math.pi)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.set_printoptions" ] ]
Stevenah/keras-training-system
[ "ef15519d84335621f3e8f73db68cd54134e723fe" ]
[ "experiments/densenet169_trained_on_medical_dataset/_sources/logging_44c21d0c4e2f675554adcf905290024d.py" ]
[ "\nfrom utils.util import pad_string\n\nimport numpy as np\n\nimport os\n\nBASIC_METRICS = {\n 'TP': 'true positive',\n 'TN': 'true negative',\n 'FP': 'false positive',\n 'FN': 'false negative'\n}\n\nADVANCED_METRICS = {\n 'f1': 'f1',\n 'rec': 'recall',\n 'acc': 'accuracy',\n 'prec': 'metthews',\n 'spec': 'precision',\n 'mcc': 'specificity'\n}\n\nTABLE_ROW_DIVIDER = '-'\n\nTEMP_DIRECTORY = '../tmp'\n\ndef write_table( file_path, table_header, table_content ):\n \n file_action = 'a' if os.path.exists(file_path) else 'w'\n table_size = 100\n\n if len(table_header) > table_size:\n table_size = len(table_header) + 10\n\n with open( file_path, file_action ) as f:\n\n f.write(pad_string('', table_size, '-', '-'))\n f.write(pad_string(f' {table_header}', table_size, ' ', '|'))\n f.write(pad_string('', table_size, '-', '|'))\n\n for table_row in table_content:\n if table_row == TABLE_ROW_DIVIDER: f.write( pad_string( '', table_size, '-', '|' ) )\n else: f.write( pad_string( table_row, table_size, ' ', '|' ) )\n\n f.write(pad_string('', table_size, '-', '-'))\n f.write('\\n')\n\ndef log_file_evaluation( file_name, image_name, prediction_label, prediction_confidence, prediction_time ):\n file_path = os.path.join(TEMP_DIRECTORY, file_name)\n file_action = 'a' if os.path.exists(file_path) else 'w'\n with open( file_path, file_action ) as f:\n f.write(f'{image_name},{prediction_label},{prediction_confidence},{prediction_time}\\n')\n \ndef log_cross_validation_results( file_name, results, experiment_name, folds ):\n\n file_path = os.path.join( TEMP_DIRECTORY, file_name )\n \n table_header = f'{ folds } K-fold summary for { experiment_name }'\n table_content = [ ]\n\n division_point = max( map( len, ADVANCED_METRICS.values() ) )\n\n for key, metric in ADVANCED_METRICS.items():\n table_content.append( f'{ metric.ljust( division_point ) } | { np.mean( results[key] ) }' )\n\n write_table( file_path, table_header, table_content )\n\ndef log_class_legend( file_name, class_names ):\n\n file_path = os.path.join( TEMP_DIRECTORY, file_name )\n\n table_header = 'Class legend'\n table_content = [ ]\n\n division_point = max( map( len, class_names ) )\n\n for index, class_name in enumerate( class_names ):\n table_content.append( f'{ class_name.ljust( division_point ) } | { index }' )\n\n write_table( file_path, table_header, table_content )\n\ndef log_confusion_table( file_name, confusion_matrix ):\n\n file_path = os.path.join( TEMP_DIRECTORY, file_name )\n\n table_header = 'Confusion table'\n\n table_content = [ f' { row }' for row in np.array_str(confusion_matrix, max_line_width=1000000).split('\\n') ]\n\n write_table( file_path, table_header, table_content )\n\ndef log_to_results_comparison( results, experiment_name, folds ): \n\n file_path = 'all_results.txt'\n\n table_header = f' { folds } K-fold summary for { experiment_name }'\n table_content = [ ]\n\n division_point = max( map( len, ADVANCED_METRICS.values() ) )\n \n for key, metric in ADVANCED_METRICS.items():\n table_content.append( f'{ metric.ljust( division_point ) } | { np.mean( results[key] ) }' )\n\n write_table(file_path, table_header, table_content)\n\ndef log_class_results( file_name, results, class_name, class_index ):\n\n file_path = os.path.join(TEMP_DIRECTORY, file_name)\n\n table_header = f'{class_name} summary'\n table_content = [ ]\n\n division_point = max(max(map(len, ADVANCED_METRICS.values())), max(map(len, BASIC_METRICS.values())))\n\n for key, metric in BASIC_METRICS.items():\n table_content.append( f'{ metric.ljust(division_point) } | { results[key] }' )\n\n table_content.append(TABLE_ROW_DIVIDER)\n\n for key, metric in ADVANCED_METRICS.items():\n table_content.append( f'{ metric.ljust(division_point) } | { results[key] }' )\n \n write_table(file_path, table_header, table_content)\n\ndef log_misclassifications( file_name, misclassifications, class_name):\n\n file_path = os.path.join(TEMP_DIRECTORY, file_name)\n\n table_header = f'Misclassified {class_name}'\n table_content = [ f' { miss_file_name }' for miss_file_name in misclassifications ]\n\n write_table(file_path, table_header, table_content)\n\ndef log_model_results( file_name, results, model_name ):\n\n file_path = os.path.join(TEMP_DIRECTORY, file_name)\n \n table_header = f'Model summary for split {model_name}'\n table_content = []\n\n division_point = max(map(len, ADVANCED_METRICS.values()))\n\n for key, metric in ADVANCED_METRICS.items():\n table_content.append( f'{ metric.ljust( division_point ) } | { np.mean( np.mean(results[key]) ) }' )\n\n write_table(file_path, table_header, table_content)\n" ]
[ [ "numpy.array_str", "numpy.mean" ] ]
jonpodtu/jonpo_02476
[ "8c34d80fb205f546976df0051c5fbd5f0c87fe37" ]
[ "src/models/train_model.py" ]
[ "import os\n\nimport hydra\nimport matplotlib.pyplot as plt\nimport torch\nfrom hydra.utils import to_absolute_path\nfrom model import MyAwesomeModel\nfrom omegaconf import DictConfig\nfrom torch import optim\nfrom torch.utils.data import DataLoader, TensorDataset\n\n\[email protected](config_path=\"config\", config_name=\"training_conf.yaml\")\ndef main(cfg: DictConfig):\n print(\"Training day and night...\")\n\n trainset = TensorDataset(\n torch.load(to_absolute_path(cfg.paths[\"images\"])),\n torch.load(to_absolute_path(cfg.paths[\"labels\"])),\n )\n train_set = DataLoader(\n trainset, batch_size=cfg.hyperparameters[\"batch_size\"], shuffle=True\n )\n print(\"The trainingset is {} long!\".format(len(trainset)))\n # Criterion: We use the negative log likelihood as our output is logSoftMax\n criterion = torch.nn.NLLLoss()\n if cfg.hyperparameters[\"optimizer\"].lower() == \"adam\":\n optimizer = optim.Adam(model.parameters(), lr=cfg.hyperparameters[\"lr\"])\n elif cfg.hyperparameters[\"optimizer\"].lower() == \"sgd\":\n optimizer = optim.SGD(model.parameters(), lr=cfg.hyperparameters[\"lr\"])\n else:\n print('Not a valid optimizer! Please choose \"adam\" or \"sgd\".')\n\n # Epochs and train_loss\n epochs = cfg.hyperparameters[\"epochs\"]\n train_loss = []\n\n for e in range(epochs):\n # Dropout should be one ie. we set model to training mode\n model.train()\n running_loss = 0\n\n \"\"\"\n The for-loop does the following:\n We use convolutional network, so first we unsqueeze\n Resets the gradients\n 1. Makes a forward pass through the network\n 2. Use the logits to calculate the loss. We use the computed\n logits from our output.\n 3. Perform a backward pass through the network with\n loss.backward() to calculate the gradients\n 4. Take a step with the optimizer to update the weights\n \"\"\"\n\n for images, labels in train_set:\n images = images.unsqueeze(1)\n optimizer.zero_grad()\n\n output = model(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n train_loss.append(loss.item())\n print(\"[%d] loss: %.3f\" % (e + 1, running_loss / len(train_set)))\n\n models_dir = to_absolute_path(cfg.paths[\"model_save\"])\n os.makedirs(models_dir, exist_ok=True)\n torch.save(model, to_absolute_path(os.path.join(models_dir, \"trained_model.pt\")))\n\n fig_dir = to_absolute_path(cfg.paths[\"figures\"])\n os.makedirs(fig_dir, exist_ok=True)\n plt.plot(train_loss, label=\"Training loss\")\n plt.legend()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.savefig(os.path.join(fig_dir, \"loss.png\"))\n\n\nif __name__ == \"__main__\":\n model = MyAwesomeModel()\n hydra.core.global_hydra.GlobalHydra.instance().clear()\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "torch.nn.NLLLoss", "torch.utils.data.DataLoader", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
usert5432/lstm_ee
[ "342ed4f5245311924d6a06b38c4f28eac77778e9" ]
[ "tests/data_generator/tests_data_generator_base.py" ]
[ "\"\"\"Helper blocks to create IDataGenerator tests\"\"\"\n\nimport numpy as np\n\nfrom lstm_ee.data.data_loader.dict_loader import DictLoader\nfrom lstm_ee.data.data_generator.data_generator import DataGenerator\nfrom ..data import (\n TEST_DATA, TEST_INPUT_VARS_SLICE, TEST_INPUT_VARS_PNG3D,\n TEST_INPUT_VARS_PNG2D, TEST_TARGET_VAR_TOTAL, TEST_TARGET_VAR_PRIMARY,\n nan_equal\n)\n\ndef make_data_generator(\n data_loader = DictLoader(TEST_DATA),\n vars_input_slice = TEST_INPUT_VARS_SLICE,\n vars_input_png3d = TEST_INPUT_VARS_PNG3D,\n vars_input_png2d = TEST_INPUT_VARS_PNG2D,\n var_target_total = TEST_TARGET_VAR_TOTAL,\n var_target_primary = TEST_TARGET_VAR_PRIMARY,\n **kwargs\n):\n \"\"\"Create simple `DataGenerator`\"\"\"\n # pylint: disable=dangerous-default-value\n\n return DataGenerator(\n data_loader = data_loader,\n vars_input_slice = vars_input_slice,\n vars_input_png3d = vars_input_png3d,\n vars_input_png2d = vars_input_png2d,\n var_target_total = var_target_total,\n var_target_primary = var_target_primary,\n **kwargs,\n )\n\nclass TestsDataGeneratorBase():\n \"\"\"Functions to compare generated data batches to the expected ones\"\"\"\n # pylint: disable=no-member\n\n def _compare_np_arrays(self, label, batch_idx, test, null):\n self.assertEqual(\n (label in test), (label in null),\n \"Label: %s. Batch Index: %d. Label missing.\" % (label, batch_idx)\n )\n\n if label not in test:\n return\n\n test = np.array(test[label])\n null = np.array(null[label])\n\n self.assertEqual(\n test.shape, null.shape,\n \"Label: %s. Batch Index: %d. Shapes differ\" % (label, batch_idx)\n )\n\n self.assertTrue(\n nan_equal(test, null),\n \"Label: %s. Batch Index: %d. Values differ: %s != %s\" % (\n label, batch_idx, test, null\n )\n )\n\n def _compare_dgen_to_batch_data(self, dgen, batch_data):\n self.assertEqual(len(dgen), len(batch_data))\n\n # pylint: disable=consider-using-enumerate\n for i in range(len(dgen)):\n inputs_test, targets_test = dgen[i][:2]\n null = batch_data[i]\n\n self._compare_np_arrays('input_slice', i, inputs_test, null)\n self._compare_np_arrays('input_png3d', i, inputs_test, null)\n self._compare_np_arrays('input_png2d', i, inputs_test, null)\n self._compare_np_arrays('target_total', i, targets_test, null)\n self._compare_np_arrays('target_primary', i, targets_test, null)\n\n" ]
[ [ "numpy.array" ] ]
xroynard/ms_deepvoxscene
[ "e1800a5628e6b9ab20c12d1939e04ac2fd3b4cfc" ]
[ "utils/tester.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Xavier Roynard\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport time\nimport os\nimport sys\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as F\n\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\n# pour importer les modules \nsys.path.insert(0, os.path.abspath('..'))\n\nclass Tester(object):\n \n ###########################################################################\n # Initializes Tester\n ###########################################################################\n def __init__(self,\n params,\n dset_loader,\n model,\n ):\n \n self.params = params\n self.dset_loader = dset_loader\n self.model = model\n \n # Dataset size\n self.dataset_size = len(self.dset_loader.dataset)\n \n ###########################################################################\n # Tests self.model\n ###########################################################################\n def test_model(self):\n \n # Monitor total training time\n self.start_total_time = time.perf_counter()\n \n # Set model to evaluate mode\n self.model.train(False) \n \n # \n running_corrects = 0\n \n # initialize outputs of self.test_model\n true_class = np.zeros((self.dataset_size,1), dtype=np.int64)-1\n pred_class = np.zeros((self.dataset_size,1), dtype=np.int64)-1\n pred_proba_class = np.zeros((self.dataset_size,self.params.getp(\"NB_CLASSES\")), dtype=np.float32)\n \n # disable gradients\n with torch.set_grad_enabled(False):\n # Iterate over data.\n for i,data in enumerate(self.dset_loader):\n \n # get the inputs\n inputs = [d.cuda(device=self.params.getp(\"DEVICE_ID\")) for d in data['input']]\n batch_size = inputs[0].size(0)\n \n # forward\n outputs = self.model(inputs)\n if isinstance(outputs,list):\n outputs = outputs[0]\n \n # get predicted class\n _, preds = torch.max(outputs.data, 1)\n \n #\n preds = torch.squeeze(preds)\n \n if not(self.params.getp(\"USE_NO_LABELS\")):\n # get the labels\n labels = data['label'].cuda(device=self.params.getp(\"DEVICE_ID\"))\n # statistics\n batch_corrects = torch.sum(preds == labels.data)\n running_corrects += batch_corrects\n \n #\n true_class[i*self.params.getp(\"BATCH_SIZE\"):i*self.params.getp(\"BATCH_SIZE\") + batch_size] = labels.data.cpu().numpy().reshape( (batch_size,1) )\n \n #\n pred_class[i*self.params.getp(\"BATCH_SIZE\"):i*self.params.getp(\"BATCH_SIZE\") + batch_size] = preds.cpu().numpy().reshape( (batch_size,1) )\n pred_proba_class[i*self.params.getp(\"BATCH_SIZE\"):i*self.params.getp(\"BATCH_SIZE\") + batch_size] = F.softmax(outputs,dim=1).data.cpu().numpy()\n \n if self.params.getp(\"USE_NO_LABELS\"):\n print(\"\\r{:6.2f}%, Duration: {:.2f} s, Expected Total Duration: {:.1f} s\".format(100*i/len(self.dset_loader), time.perf_counter() - self.start_total_time, len(self.dset_loader)/(i+1) * (time.perf_counter() - self.start_total_time)), end=\"\")\n else:\n print(\"\\r{:6.2f}%, Batch Acc: {:6.2f}%, Duration: {:6.4f}s, Expected Total Duration: {:.1f} s\".format(100*i/len(self.dset_loader), 100*batch_corrects/batch_size, time.perf_counter() - self.start_total_time, len(self.dset_loader)/(i+1) * (time.perf_counter() - self.start_total_time)), end=\"\")\n \n if not(self.params.getp(\"USE_NO_LABELS\")):\n accuracy = running_corrects / self.dataset_size\n \n time_elapsed = time.perf_counter() - self.start_total_time\n print('\\n\\nTesting complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n if not(self.params.getp(\"USE_NO_LABELS\")):\n print('Testing Accuracy: {}'.format(accuracy))\n \n return (true_class, pred_class, pred_proba_class)\n" ]
[ [ "torch.nn.functional.softmax", "torch.max", "torch.sum", "torch.set_grad_enabled", "numpy.zeros", "torch.squeeze" ] ]
danielchen-pyc/gmr
[ "6d87623cbc642fbdfab4044ed40cbabed12f3986" ]
[ "gmr/tests/test_sklearn.py" ]
[ "import numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom gmr.utils import check_random_state\nfrom nose.tools import assert_less, assert_greater\nfrom nose.plugins.skip import SkipTest\n\n\ndef test_sklearn_regression():\n \"\"\"Test regression with GaussianMixtureRegressor.\"\"\"\n try:\n from gmr.sklearn import GaussianMixtureRegressor\n except ImportError:\n raise SkipTest(\"sklearn is not available\")\n\n random_state = check_random_state(0)\n\n n_samples = 200\n x = np.linspace(0, 2, n_samples)[:, np.newaxis]\n y1 = 3 * x[:n_samples // 2] + 1\n y2 = -3 * x[n_samples // 2:] + 7\n noise = random_state.randn(n_samples, 1) * 0.01\n y = np.vstack((y1, y2)) + noise\n\n gmr = GaussianMixtureRegressor(n_components=2, random_state=random_state)\n gmr.fit(x, y)\n assert_array_almost_equal(gmr.gmm_.priors, 0.5 * np.ones(2), decimal=2)\n assert_array_almost_equal(gmr.gmm_.means[0], np.array([0.5, 2.5]), decimal=2)\n assert_array_almost_equal(gmr.gmm_.means[1], np.array([1.5, 2.5]), decimal=1)\n\n pred = gmr.predict(x)\n mse = np.sum((y - pred) ** 2) / n_samples\n assert_less(mse, 0.01)\n\n\ndef test_sklearn_regression_with_2d_input():\n \"\"\"Test regression with GaussianMixtureRegressor and two-dimensional input.\"\"\"\n try:\n from gmr.sklearn import GaussianMixtureRegressor\n except ImportError:\n raise SkipTest(\"sklearn is not available\")\n\n random_state = check_random_state(0)\n\n n_samples = 200\n x = np.linspace(0, 2, n_samples)[:, np.newaxis]\n y1 = 3 * x[:n_samples // 2] + 1\n y2 = -3 * x[n_samples // 2:] + 7\n noise = random_state.randn(n_samples, 1) * 0.01\n y = np.vstack((y1, y2)) + noise\n\n gmr = GaussianMixtureRegressor(n_components=2, random_state=random_state)\n gmr.fit(x, y)\n\n pred = gmr.predict(x)\n mse = np.sum((y - pred) ** 2) / n_samples\n assert_less(mse, 0.01)\n\n\ndef test_sklearn_regression_with_1d_output():\n \"\"\"Test regression with GaussianMixtureRegressor and two-dimensional input.\"\"\"\n try:\n from gmr.sklearn import GaussianMixtureRegressor\n except ImportError:\n raise SkipTest(\"sklearn is not available\")\n\n random_state = check_random_state(0)\n\n n_samples = 200\n x = np.linspace(0, 2, n_samples)[:, np.newaxis]\n y = 3 * x + 1\n y = y.flatten()\n\n gmr = GaussianMixtureRegressor(n_components=1, random_state=random_state)\n gmr.fit(x, y)\n\n pred = gmr.predict(x)\n mse = np.sum((y - pred) ** 2) / n_samples\n assert_greater(mse, 0.01)\n\n\ndef test_sklearn_regression_without_noise():\n \"\"\"Test regression without noise.\"\"\"\n try:\n from gmr.sklearn import GaussianMixtureRegressor\n except ImportError:\n raise SkipTest(\"sklearn is not available\")\n\n random_state = 0\n\n n_samples = 200\n x = np.linspace(0, 2, n_samples)[:, np.newaxis]\n y1 = 3 * x[:n_samples // 2] + 1\n y2 = -3 * x[n_samples // 2:] + 7\n y = np.vstack((y1, y2))\n\n gmr = GaussianMixtureRegressor(n_components=2, random_state=random_state)\n gmr.fit(x, y)\n assert_array_almost_equal(gmr.gmm_.priors, 0.5 * np.ones(2), decimal=2)\n assert_array_almost_equal(gmr.gmm_.means[0], np.array([1.5, 2.5]), decimal=2)\n assert_array_almost_equal(gmr.gmm_.means[1], np.array([0.5, 2.5]), decimal=1)\n\n pred = gmr.predict(x)\n mse = np.sum((y - pred) ** 2) / n_samples\n assert_less(mse, 0.01)\n" ]
[ [ "numpy.linspace", "numpy.ones", "numpy.array", "numpy.sum", "numpy.vstack" ] ]
lidayuls/conv-emotion
[ "954805e4236ca8c64ee1f2fb25130deec8a2d52f" ]
[ "DialogueRNN/model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pad_sequence\n\nclass SimpleAttention(nn.Module):\n\n def __init__(self, input_dim):\n super(SimpleAttention, self).__init__()\n self.input_dim = input_dim\n self.scalar = nn.Linear(self.input_dim,1,bias=False)\n\n def forward(self, M, x=None):\n \"\"\"\n M -> (seq_len, batch, vector)\n x -> dummy argument for the compatibility with MatchingAttention\n \"\"\"\n scale = self.scalar(M) # seq_len, batch, 1\n alpha = F.softmax(scale, dim=0).permute(1,2,0) # batch, 1, seq_len\n attn_pool = torch.bmm(alpha, M.transpose(0,1))[:,0,:] # batch, vector\n\n return attn_pool, alpha\n\nclass MatchingAttention(nn.Module):\n\n def __init__(self, mem_dim, cand_dim, alpha_dim=None, att_type='general'):\n super(MatchingAttention, self).__init__()\n assert att_type!='concat' or alpha_dim!=None\n assert att_type!='dot' or mem_dim==cand_dim\n self.mem_dim = mem_dim\n self.cand_dim = cand_dim\n self.att_type = att_type\n if att_type=='general':\n self.transform = nn.Linear(cand_dim, mem_dim, bias=False)\n if att_type=='general2':\n self.transform = nn.Linear(cand_dim, mem_dim, bias=True)\n #torch.nn.init.normal_(self.transform.weight,std=0.01)\n elif att_type=='concat':\n self.transform = nn.Linear(cand_dim+mem_dim, alpha_dim, bias=False)\n self.vector_prod = nn.Linear(alpha_dim, 1, bias=False)\n\n def forward(self, M, x, mask=None):\n \"\"\"\n M -> (seq_len, batch, mem_dim)\n x -> (batch, cand_dim)\n mask -> (batch, seq_len)\n \"\"\"\n if type(mask)==type(None):\n mask = torch.ones(M.size(1), M.size(0)).type(M.type())\n\n if self.att_type=='dot':\n # vector = cand_dim = mem_dim\n M_ = M.permute(1,2,0) # batch, vector, seqlen\n x_ = x.unsqueeze(1) # batch, 1, vector\n alpha = F.softmax(torch.bmm(x_, M_), dim=2) # batch, 1, seqlen\n elif self.att_type=='general':\n M_ = M.permute(1,2,0) # batch, mem_dim, seqlen\n x_ = self.transform(x).unsqueeze(1) # batch, 1, mem_dim\n alpha = F.softmax(torch.bmm(x_, M_), dim=2) # batch, 1, seqlen\n elif self.att_type=='general2':\n M_ = M.permute(1,2,0) # batch, mem_dim, seqlen\n x_ = self.transform(x).unsqueeze(1) # batch, 1, mem_dim\n alpha_ = F.softmax((torch.bmm(x_, M_))*mask.unsqueeze(1), dim=2) # batch, 1, seqlen\n alpha_masked = alpha_*mask.unsqueeze(1) # batch, 1, seqlen\n alpha_sum = torch.sum(alpha_masked, dim=2, keepdim=True) # batch, 1, 1\n alpha = alpha_masked/alpha_sum # batch, 1, 1 ; normalized\n #import ipdb;ipdb.set_trace()\n else:\n M_ = M.transpose(0,1) # batch, seqlen, mem_dim\n x_ = x.unsqueeze(1).expand(-1,M.size()[0],-1) # batch, seqlen, cand_dim\n M_x_ = torch.cat([M_,x_],2) # batch, seqlen, mem_dim+cand_dim\n mx_a = F.tanh(self.transform(M_x_)) # batch, seqlen, alpha_dim\n alpha = F.softmax(self.vector_prod(mx_a),1).transpose(1,2) # batch, 1, seqlen\n\n attn_pool = torch.bmm(alpha, M.transpose(0,1))[:,0,:] # batch, mem_dim\n\n return attn_pool, alpha\n\n\nclass DialogueRNNCell(nn.Module):\n\n def __init__(self, D_m, D_g, D_p, D_e, listener_state=False,\n context_attention='simple', D_a=100, dropout=0.5):\n super(DialogueRNNCell, self).__init__()\n\n self.D_m = D_m\n self.D_g = D_g\n self.D_p = D_p\n self.D_e = D_e\n\n self.listener_state = listener_state\n self.g_cell = nn.GRUCell(D_m+D_p,D_g)\n self.p_cell = nn.GRUCell(D_m+D_g,D_p)\n self.e_cell = nn.GRUCell(D_p,D_e)\n if listener_state:\n self.l_cell = nn.GRUCell(D_m+D_p,D_p)\n\n self.dropout = nn.Dropout(dropout)\n\n if context_attention=='simple':\n self.attention = SimpleAttention(D_g)\n else:\n self.attention = MatchingAttention(D_g, D_m, D_a, context_attention)\n\n def _select_parties(self, X, indices):\n q0_sel = []\n for idx, j in zip(indices, X):\n q0_sel.append(j[idx].unsqueeze(0))\n q0_sel = torch.cat(q0_sel,0)\n return q0_sel\n\n def forward(self, U, qmask, g_hist, q0, e0):\n \"\"\"\n U -> batch, D_m\n qmask -> batch, party\n g_hist -> t-1, batch, D_g\n q0 -> batch, party, D_p\n e0 -> batch, self.D_e\n \"\"\"\n qm_idx = torch.argmax(qmask, 1)\n q0_sel = self._select_parties(q0, qm_idx)\n\n g_ = self.g_cell(torch.cat([U,q0_sel], dim=1),\n torch.zeros(U.size()[0],self.D_g).type(U.type()) if g_hist.size()[0]==0 else\n g_hist[-1])\n g_ = self.dropout(g_)\n if g_hist.size()[0]==0:\n c_ = torch.zeros(U.size()[0],self.D_g).type(U.type())\n alpha = None\n else:\n c_, alpha = self.attention(g_hist,U)\n # c_ = torch.zeros(U.size()[0],self.D_g).type(U.type()) if g_hist.size()[0]==0\\\n # else self.attention(g_hist,U)[0] # batch, D_g\n U_c_ = torch.cat([U,c_], dim=1).unsqueeze(1).expand(-1,qmask.size()[1],-1)\n qs_ = self.p_cell(U_c_.contiguous().view(-1,self.D_m+self.D_g),\n q0.view(-1, self.D_p)).view(U.size()[0],-1,self.D_p)\n qs_ = self.dropout(qs_)\n\n if self.listener_state:\n U_ = U.unsqueeze(1).expand(-1,qmask.size()[1],-1).contiguous().view(-1,self.D_m)\n ss_ = self._select_parties(qs_, qm_idx).unsqueeze(1).\\\n expand(-1,qmask.size()[1],-1).contiguous().view(-1,self.D_p)\n U_ss_ = torch.cat([U_,ss_],1)\n ql_ = self.l_cell(U_ss_,q0.view(-1, self.D_p)).view(U.size()[0],-1,self.D_p)\n ql_ = self.dropout(ql_)\n else:\n ql_ = q0\n qmask_ = qmask.unsqueeze(2)\n q_ = ql_*(1-qmask_) + qs_*qmask_\n e0 = torch.zeros(qmask.size()[0], self.D_e).type(U.type()) if e0.size()[0]==0\\\n else e0\n e_ = self.e_cell(self._select_parties(q_,qm_idx), e0)\n e_ = self.dropout(e_)\n\n return g_,q_,e_,alpha\n\nclass DialogueRNN(nn.Module):\n\n def __init__(self, D_m, D_g, D_p, D_e, listener_state=False,\n context_attention='simple', D_a=100, dropout=0.5):\n super(DialogueRNN, self).__init__()\n\n self.D_m = D_m\n self.D_g = D_g\n self.D_p = D_p\n self.D_e = D_e\n self.dropout = nn.Dropout(dropout)\n\n self.dialogue_cell = DialogueRNNCell(D_m, D_g, D_p, D_e,\n listener_state, context_attention, D_a, dropout)\n\n def forward(self, U, qmask):\n \"\"\"\n U -> seq_len, batch, D_m\n qmask -> seq_len, batch, party\n \"\"\"\n\n g_hist = torch.zeros(0).type(U.type()) # 0-dimensional tensor\n q_ = torch.zeros(qmask.size()[1], qmask.size()[2],\n self.D_p).type(U.type()) # batch, party, D_p\n e_ = torch.zeros(0).type(U.type()) # batch, D_e\n e = e_\n\n alpha = []\n for u_,qmask_ in zip(U, qmask):\n g_, q_, e_, alpha_ = self.dialogue_cell(u_, qmask_, g_hist, q_, e_)\n g_hist = torch.cat([g_hist, g_.unsqueeze(0)],0)\n e = torch.cat([e, e_.unsqueeze(0)],0)\n if type(alpha_)!=type(None):\n alpha.append(alpha_[:,0,:])\n\n return e,alpha # seq_len, batch, D_e\nclass BiModel(nn.Module):\n\n def __init__(self, D_m, D_g, D_p, D_e, D_h,\n n_classes=7, listener_state=False, context_attention='simple', D_a=100, dropout_rec=0.5,\n dropout=0.5):\n super(BiModel, self).__init__()\n\n self.D_m = D_m\n self.D_g = D_g\n self.D_p = D_p\n self.D_e = D_e\n self.D_h = D_h\n self.n_classes = n_classes\n self.dropout = nn.Dropout(dropout)\n self.dropout_rec = nn.Dropout(dropout+0.15)\n self.dialog_rnn_f = DialogueRNN(D_m, D_g, D_p, D_e,listener_state,\n context_attention, D_a, dropout_rec)\n self.dialog_rnn_r = DialogueRNN(D_m, D_g, D_p, D_e,listener_state,\n context_attention, D_a, dropout_rec)\n self.linear = nn.Linear(2*D_e, 2*D_h)\n self.smax_fc = nn.Linear(2*D_h, n_classes)\n self.matchatt = MatchingAttention(2*D_e,2*D_e,att_type='general2')\n\n def _reverse_seq(self, X, mask):\n \"\"\"\n X -> seq_len, batch, dim\n mask -> batch, seq_len\n \"\"\"\n X_ = X.transpose(0,1)\n mask_sum = torch.sum(mask, 1).int()\n\n xfs = []\n for x, c in zip(X_, mask_sum):\n xf = torch.flip(x[:c], [0])\n xfs.append(xf)\n\n return pad_sequence(xfs)\n\n\n def forward(self, U, qmask, umask,att2=True):\n \"\"\"\n U -> seq_len, batch, D_m\n qmask -> seq_len, batch, party\n \"\"\"\n\n emotions_f, alpha_f = self.dialog_rnn_f(U, qmask) # seq_len, batch, D_e\n emotions_f = self.dropout_rec(emotions_f)\n rev_U = self._reverse_seq(U, umask)\n rev_qmask = self._reverse_seq(qmask, umask)\n emotions_b, alpha_b = self.dialog_rnn_r(rev_U, rev_qmask)\n emotions_b = self._reverse_seq(emotions_b, umask)\n emotions_b = self.dropout_rec(emotions_b)\n emotions = torch.cat([emotions_f,emotions_b],dim=-1)\n if att2:\n att_emotions = []\n alpha = []\n for t in emotions:\n att_em, alpha_ = self.matchatt(emotions,t,mask=umask)\n att_emotions.append(att_em.unsqueeze(0))\n alpha.append(alpha_[:,0,:])\n att_emotions = torch.cat(att_emotions,dim=0)\n hidden = F.relu(self.linear(att_emotions))\n else:\n hidden = F.relu(self.linear(emotions))\n #hidden = F.relu(self.linear(emotions))\n hidden = self.dropout(hidden)\n log_prob = F.log_softmax(self.smax_fc(hidden), 2) # seq_len, batch, n_classes\n return log_prob, alpha, alpha_f, alpha_b\n\nclass BiE2EModel(nn.Module):\n\n def __init__(self, D_emb, D_m, D_g, D_p, D_e, D_h, word_embeddings,\n n_classes=7, listener_state=False, context_attention='simple', D_a=100, dropout_rec=0.5,\n dropout=0.5):\n super(BiE2EModel, self).__init__()\n\n self.D_emb = D_emb\n self.D_m = D_m\n self.D_g = D_g\n self.D_p = D_p\n self.D_e = D_e\n self.D_h = D_h\n self.n_classes = n_classes\n self.dropout = nn.Dropout(dropout)\n #self.dropout_rec = nn.Dropout(0.2)\n self.dropout_rec = nn.Dropout(dropout)\n self.turn_rnn = nn.GRU(D_emb, D_m)\n self.dialog_rnn_f = DialogueRNN(D_m, D_g, D_p, D_e,listener_state,\n context_attention, D_a, dropout_rec)\n self.dialog_rnn_r = DialogueRNN(D_m, D_g, D_p, D_e,listener_state,\n context_attention, D_a, dropout_rec)\n self.linear1 = nn.Linear(2*D_e, D_h)\n #self.linear2 = nn.Linear(D_h, D_h)\n #self.linear3 = nn.Linear(D_h, D_h)\n self.smax_fc = nn.Linear(D_h, n_classes)\n self.embedding = nn.Embedding(word_embeddings.shape[0],word_embeddings.shape[1])\n self.embedding.weight.data.copy_(word_embeddings)\n self.embedding.weight.requires_grad = True\n self.matchatt = MatchingAttention(2*D_e,2*D_e,att_type='general2')\n def _reverse_seq(self, X, mask):\n \"\"\"\n X -> seq_len, batch, dim\n mask -> batch, seq_len\n \"\"\"\n X_ = X.transpose(0,1)\n mask_sum = torch.sum(mask, 1).int()\n\n xfs = []\n for x, c in zip(X_, mask_sum):\n xf = torch.flip(x[:c], [0])\n xfs.append(xf)\n\n return pad_sequence(xfs)\n\n def forward(self, data, att2=False):\n\n #T1 = word_embeddings[data.turn1] # seq_len, batch, D_emb\n #T2 = word_embeddings[data.turn2] # seq_len, batch, D_emb\n #T3 = word_embeddings[data.turn3] # seq_len, batch, D_emb\n\n T1 = (self.embedding(data.turn1))\n T2 = (self.embedding(data.turn2))\n T3 = (self.embedding(data.turn3))\n\n T1_, h_out1 = self.turn_rnn(T1,\n torch.zeros(1, T1.size(1), self.D_m).type(T1.type()))\n T2_, h_out2 = self.turn_rnn(T2,\n torch.zeros(1, T1.size(1), self.D_m).type(T1.type()))\n T3_, h_out3 = self.turn_rnn(T3,\n torch.zeros(1, T1.size(1), self.D_m).type(T1.type()))\n\n U = torch.cat([h_out1, h_out2, h_out3], 0) # 3, batch, D_m\n\n qmask = torch.FloatTensor([[1,0],[0,1],[1,0]]).type(T1.type())\n qmask = qmask.unsqueeze(1).expand(-1, T1.size(1), -1)\n\n umask = torch.FloatTensor([[1,1,1]]).type(T1.type())\n umask = umask.expand( T1.size(1),-1)\n\n emotions_f, alpha_f = self.dialog_rnn_f(U, qmask) # seq_len, batch, D_e\n emotions_f = self.dropout_rec(emotions_f)\n rev_U = self._reverse_seq(U, umask)\n rev_qmask = self._reverse_seq(qmask, umask)\n emotions_b, alpha_b = self.dialog_rnn_r(rev_U, rev_qmask)\n emotions_b = self._reverse_seq(emotions_b, umask)\n #emotions_b = self.dropout_rec(emotions_b)\n emotions = torch.cat([emotions_f,emotions_b],dim=-1)\n #print(emotions)\n emotions = self.dropout_rec(emotions)\n\n #emotions = emotions.unsqueeze(1)\n if att2:\n att_emotion, _ = self.matchatt(emotions, emotions[-1])\n hidden = F.relu(self.linear1(att_emotion))\n else:\n hidden = F.relu(self.linear1(emotions[-1]))\n #hidden = F.relu(self.linear2(hidden))\n #hidden = F.relu(self.linear3(hidden))\n # hidden = self.dropout(hidden)\n log_prob = F.log_softmax(self.smax_fc(hidden), -1) # batch, n_classes\n return log_prob\n\nclass E2EModel(nn.Module):\n\n def __init__(self, D_emb, D_m, D_g, D_p, D_e, D_h,\n n_classes=7, listener_state=False, context_attention='simple', D_a=100, dropout_rec=0.5,\n dropout=0.5):\n super(E2EModel, self).__init__()\n\n self.D_emb = D_emb\n self.D_m = D_m\n self.D_g = D_g\n self.D_p = D_p\n self.D_e = D_e\n self.D_h = D_h\n self.n_classes = n_classes\n self.dropout = nn.Dropout(dropout)\n #self.dropout_rec = nn.Dropout(0.2)\n self.dropout_rec = nn.Dropout(dropout+0.15)\n self.turn_rnn = nn.GRU(D_emb, D_m)\n self.dialog_rnn = DialogueRNN(D_m, D_g, D_p, D_e,listener_state,\n context_attention, D_a, dropout_rec)\n self.linear1 = nn.Linear(D_e, D_h)\n #self.linear2 = nn.Linear(D_h, D_h)\n #self.linear3 = nn.Linear(D_h, D_h)\n self.smax_fc = nn.Linear(D_h, n_classes)\n\n self.matchatt = MatchingAttention(D_e,D_e,att_type='general2')\n\n def forward(self, data, word_embeddings, att2=False):\n\n T1 = word_embeddings[data.turn1] # seq_len, batch, D_emb\n T2 = word_embeddings[data.turn2] # seq_len, batch, D_emb\n T3 = word_embeddings[data.turn3] # seq_len, batch, D_emb\n\n T1_, h_out1 = self.turn_rnn(T1,\n torch.zeros(1, T1.size(1), self.D_m).type(T1.type()))\n T2_, h_out2 = self.turn_rnn(T2,\n torch.zeros(1, T1.size(1), self.D_m).type(T1.type()))\n T3_, h_out3 = self.turn_rnn(T3,\n torch.zeros(1, T1.size(1), self.D_m).type(T1.type()))\n\n U = torch.cat([h_out1, h_out2, h_out3], 0) # 3, batch, D_m\n\n qmask = torch.FloatTensor([[1,0],[0,1],[1,0]]).type(T1.type())\n qmask = qmask.unsqueeze(1).expand(-1, T1.size(1), -1)\n\n emotions, _ = self.dialog_rnn(U, qmask) # seq_len, batch, D_e\n #print(emotions)\n emotions = self.dropout_rec(emotions)\n\n #emotions = emotions.unsqueeze(1)\n if att2:\n att_emotion, _ = self.matchatt(emotions,emotions[-1])\n hidden = F.relu(self.linear1(att_emotion))\n else:\n hidden = F.relu(self.linear1(emotions[-1]))\n #hidden = F.relu(self.linear2(hidden))\n #hidden = F.relu(self.linear3(hidden))\n hidden = self.dropout(hidden)\n log_prob = F.log_softmax(self.smax_fc(hidden), -1) # batch, n_classes\n return log_prob\nclass Model(nn.Module):\n\n def __init__(self, D_m, D_g, D_p, D_e, D_h,\n n_classes=7, listener_state=False, context_attention='simple', D_a=100, dropout_rec=0.5,\n dropout=0.5):\n super(Model, self).__init__()\n\n self.D_m = D_m\n self.D_g = D_g\n self.D_p = D_p\n self.D_e = D_e\n self.D_h = D_h\n self.n_classes = n_classes\n self.dropout = nn.Dropout(dropout)\n #self.dropout_rec = nn.Dropout(0.2)\n self.dropout_rec = nn.Dropout(dropout+0.15)\n self.dialog_rnn = DialogueRNN(D_m, D_g, D_p, D_e,listener_state,\n context_attention, D_a, dropout_rec)\n self.linear1 = nn.Linear(D_e, D_h)\n #self.linear2 = nn.Linear(D_h, D_h)\n #self.linear3 = nn.Linear(D_h, D_h)\n self.smax_fc = nn.Linear(D_h, n_classes)\n\n self.matchatt = MatchingAttention(D_e,D_e,att_type='general2')\n\n def forward(self, U, qmask, umask=None, att2=False):\n \"\"\"\n U -> seq_len, batch, D_m\n qmask -> seq_len, batch, party\n \"\"\"\n\n emotions = self.dialog_rnn(U, qmask) # seq_len, batch, D_e\n #print(emotions)\n emotions = self.dropout_rec(emotions)\n\n #emotions = emotions.unsqueeze(1)\n if att2:\n att_emotions = []\n for t in emotions:\n att_emotions.append(self.matchatt(emotions,t,mask=umask)[0].unsqueeze(0))\n att_emotions = torch.cat(att_emotions,dim=0)\n hidden = F.relu(self.linear1(att_emotions))\n else:\n hidden = F.relu(self.linear1(emotions))\n #hidden = F.relu(self.linear2(hidden))\n #hidden = F.relu(self.linear3(hidden))\n hidden = self.dropout(hidden)\n log_prob = F.log_softmax(self.smax_fc(hidden), 2) # seq_len, batch, n_classes\n return log_prob\n\nclass AVECModel(nn.Module):\n\n def __init__(self, D_m, D_g, D_p, D_e, D_h, attr, listener_state=False,\n context_attention='simple', D_a=100, dropout_rec=0.5, dropout=0.5):\n super(AVECModel, self).__init__()\n\n self.D_m = D_m\n self.D_g = D_g\n self.D_p = D_p\n self.D_e = D_e\n self.D_h = D_h\n self.attr = attr\n self.dropout = nn.Dropout(dropout)\n self.dropout_rec = nn.Dropout(dropout)\n self.dialog_rnn = DialogueRNN(D_m, D_g, D_p, D_e,listener_state,\n context_attention, D_a, dropout_rec)\n self.linear = nn.Linear(D_e, D_h)\n self.smax_fc = nn.Linear(D_h, 1)\n\n def forward(self, U, qmask):\n \"\"\"\n U -> seq_len, batch, D_m\n qmask -> seq_len, batch, party\n \"\"\"\n\n emotions,_ = self.dialog_rnn(U, qmask) # seq_len, batch, D_e\n emotions = self.dropout_rec(emotions)\n hidden = torch.tanh(self.linear(emotions))\n hidden = self.dropout(hidden)\n if self.attr!=4:\n pred = (self.smax_fc(hidden).squeeze()) # seq_len, batch\n else:\n pred = (self.smax_fc(hidden).squeeze()) # seq_len, batch\n return pred.transpose(0,1).contiguous().view(-1)\n\nclass MaskedNLLLoss(nn.Module):\n\n def __init__(self, weight=None):\n super(MaskedNLLLoss, self).__init__()\n self.weight = weight\n self.loss = nn.NLLLoss(weight=weight,\n reduction='sum')\n\n def forward(self, pred, target, mask):\n \"\"\"\n pred -> batch*seq_len, n_classes\n target -> batch*seq_len\n mask -> batch, seq_len\n \"\"\"\n mask_ = mask.view(-1,1) # batch*seq_len, 1\n if type(self.weight)==type(None):\n loss = self.loss(pred*mask_, target)/torch.sum(mask)\n else:\n loss = self.loss(pred*mask_, target)\\\n /torch.sum(self.weight[target]*mask_.squeeze())\n return loss\n\nclass MaskedMSELoss(nn.Module):\n\n def __init__(self):\n super(MaskedMSELoss, self).__init__()\n self.loss = nn.MSELoss(reduction='sum')\n\n def forward(self, pred, target, mask):\n \"\"\"\n pred -> batch*seq_len\n target -> batch*seq_len\n mask -> batch*seq_len\n \"\"\"\n loss = self.loss(pred*mask, target)/torch.sum(mask)\n return loss\n\nif torch.cuda.is_available():\n FloatTensor = torch.cuda.FloatTensor\n LongTensor = torch.cuda.LongTensor\n ByteTensor = torch.cuda.ByteTensor\n\nelse:\n FloatTensor = torch.FloatTensor\n LongTensor = torch.LongTensor\n ByteTensor = torch.ByteTensor\n\nclass CNNFeatureExtractor(nn.Module):\n \n def __init__(self, vocab_size, embedding_dim, output_size, filters, kernel_sizes, dropout):\n super(CNNFeatureExtractor, self).__init__()\n\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n self.convs = nn.ModuleList([nn.Conv1d(in_channels=embedding_dim, out_channels=filters, kernel_size=K) for K in kernel_sizes])\n self.dropout = nn.Dropout(dropout)\n self.fc = nn.Linear(len(kernel_sizes) * filters, output_size)\n self.feature_dim = output_size\n\n\n def init_pretrained_embeddings_from_numpy(self, pretrained_word_vectors):\n self.embedding.weight = nn.Parameter(torch.from_numpy(pretrained_word_vectors).float())\n # if is_static:\n self.embedding.weight.requires_grad = False\n\n\n def forward(self, x, umask):\n \n num_utt, batch, num_words = x.size()\n \n x = x.type(LongTensor) # (num_utt, batch, num_words)\n x = x.view(-1, num_words) # (num_utt, batch, num_words) -> (num_utt * batch, num_words)\n emb = self.embedding(x) # (num_utt * batch, num_words) -> (num_utt * batch, num_words, 300) \n emb = emb.transpose(-2, -1).contiguous() # (num_utt * batch, num_words, 300) -> (num_utt * batch, 300, num_words) \n \n convoluted = [F.relu(conv(emb)) for conv in self.convs] \n pooled = [F.max_pool1d(c, c.size(2)).squeeze() for c in convoluted] \n concated = torch.cat(pooled, 1)\n features = F.relu(self.fc(self.dropout(concated))) # (num_utt * batch, 150) -> (num_utt * batch, 100)\n features = features.view(num_utt, batch, -1) # (num_utt * batch, 100) -> (num_utt, batch, 100)\n mask = umask.unsqueeze(-1).type(FloatTensor) # (batch, num_utt) -> (batch, num_utt, 1)\n mask = mask.transpose(0, 1) # (batch, num_utt, 1) -> (num_utt, batch, 1)\n mask = mask.repeat(1, 1, self.feature_dim) # (num_utt, batch, 1) -> (num_utt, batch, 100)\n features = (features * mask) # (num_utt, batch, 100) -> (num_utt, batch, 100)\n\n return features\n\nclass DailyDialogueModel(nn.Module):\n\n def __init__(self, D_m, D_g, D_p, D_e, D_h,\n vocab_size, n_classes=7, embedding_dim=300, \n cnn_output_size=100, cnn_filters=50, cnn_kernel_sizes=(3,4,5), cnn_dropout=0.5,\n listener_state=False, context_attention='simple', D_a=100, dropout_rec=0.5,\n dropout=0.5, att2=True):\n \n super(DailyDialogueModel, self).__init__()\n\n self.cnn_feat_extractor = CNNFeatureExtractor(vocab_size, embedding_dim, cnn_output_size, cnn_filters, cnn_kernel_sizes, cnn_dropout)\n \n self.D_m = D_m\n self.D_g = D_g\n self.D_p = D_p\n self.D_e = D_e\n self.D_h = D_h\n self.dropout = nn.Dropout(dropout)\n self.dropout_rec = nn.Dropout(dropout_rec)\n self.dialog_rnn_f = DialogueRNN(D_m, D_g, D_p, D_e, listener_state,\n context_attention, D_a, dropout_rec)\n self.dialog_rnn_r = DialogueRNN(D_m, D_g, D_p, D_e, listener_state,\n context_attention, D_a, dropout_rec)\n self.linear = nn.Linear(2*D_e, 2*D_h)\n self.matchatt = MatchingAttention(2*D_e,2*D_e,att_type='general2')\n\n self.n_classes = n_classes\n self.smax_fc = nn.Linear(2*D_h, n_classes)\n self.att2 = att2\n\n \n \n def init_pretrained_embeddings(self, pretrained_word_vectors):\n self.cnn_feat_extractor.init_pretrained_embeddings_from_numpy(pretrained_word_vectors)\n\n\n def _reverse_seq(self, X, mask):\n \"\"\"\n X -> seq_len, batch, dim\n mask -> batch, seq_len\n \"\"\"\n X_ = X.transpose(0,1)\n mask_sum = torch.sum(mask, 1).int()\n\n xfs = []\n for x, c in zip(X_, mask_sum):\n xf = torch.flip(x[:c], [0])\n xfs.append(xf)\n\n return pad_sequence(xfs)\n\n\n def forward(self, input_seq, qmask, umask):\n \"\"\"\n U -> seq_len, batch, D_m\n qmask -> seq_len, batch, party\n \"\"\"\n\n U = self.cnn_feat_extractor(input_seq, umask)\n\n emotions_f, alpha_f = self.dialog_rnn_f(U, qmask) # seq_len, batch, D_e\n emotions_f = self.dropout_rec(emotions_f)\n rev_U = self._reverse_seq(U, umask)\n rev_qmask = self._reverse_seq(qmask, umask)\n emotions_b, alpha_b = self.dialog_rnn_r(rev_U, rev_qmask)\n emotions_b = self._reverse_seq(emotions_b, umask)\n emotions_b = self.dropout_rec(emotions_b)\n emotions = torch.cat([emotions_f, emotions_b], dim=-1)\n if self.att2:\n att_emotions = []\n alpha = []\n for t in emotions:\n att_em, alpha_ = self.matchatt(emotions,t,mask=umask)\n att_emotions.append(att_em.unsqueeze(0))\n alpha.append(alpha_[:,0,:])\n att_emotions = torch.cat(att_emotions,dim=0)\n hidden = F.relu(self.linear(att_emotions))\n else:\n hidden = F.relu(self.linear(emotions))\n # hidden = F.relu(self.linear(emotions))\n hidden = self.dropout(hidden)\n log_prob = F.log_softmax(self.smax_fc(hidden), 2) # seq_len, batch, n_classes\n return log_prob, alpha, alpha_f, alpha_b\n\nclass UnMaskedWeightedNLLLoss(nn.Module):\n\n def __init__(self, weight=None):\n super(UnMaskedWeightedNLLLoss, self).__init__()\n self.weight = weight\n self.loss = nn.NLLLoss(weight=weight,\n reduction='sum')\n\n def forward(self, pred, target):\n \"\"\"\n pred -> batch*seq_len, n_classes\n target -> batch*seq_len\n \"\"\"\n if type(self.weight)==type(None):\n loss = self.loss(pred, target)\n else:\n loss = self.loss(pred, target)\\\n /torch.sum(self.weight[target])\n return loss\n\n" ]
[ [ "torch.nn.functional.softmax", "torch.cat", "torch.zeros", "torch.nn.utils.rnn.pad_sequence", "torch.nn.GRU", "torch.sum", "torch.nn.Embedding", "torch.FloatTensor", "torch.cuda.is_available", "torch.nn.Dropout", "torch.from_numpy", "torch.bmm", "torch.nn.GRUCell", "torch.nn.NLLLoss", "torch.nn.Linear", "torch.nn.Conv1d", "torch.flip", "torch.nn.MSELoss", "torch.argmax" ] ]
WingsUpete/RSODP
[ "35478b02a438c9af2d5d04ee3c60ce70f7c15269" ]
[ "utils/Utils.py" ]
[ "\"\"\"\nUtility functions\n\"\"\"\nimport math\n\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport torch\n\nimport Config\n\n\ndef haversine(c0, c1):\n \"\"\"\n :param c0: coordinate 0 in form (lat0, lng0) with degree as unit\n :param c1: coordinate 1 in form (lat1, lng1) with degree as unit\n :return: The haversine distance of c0 and c1 in km\n Compute the haversine distance between\n https://en.wikipedia.org/wiki/Haversine_formula\n \"\"\"\n dLat = math.radians(c1[0] - c0[0])\n dLng = math.radians(c1[1] - c0[1])\n lat0 = math.radians(c0[0])\n lat1 = math.radians(c1[0])\n form0 = math.pow(math.sin(dLat / 2), 2)\n form1 = math.cos(lat0) * math.cos(lat1) * math.pow(math.sin(dLng / 2), 2)\n radius_of_earth = 6371 # km\n dist = 2 * radius_of_earth * math.asin(math.sqrt(form0 + form1))\n return dist\n\n\ndef batch2device(record, record_GD: dict, query, target_G: torch.Tensor, target_D: torch.Tensor, device):\n \"\"\" Transfer all sample data into the device (cpu/gpu) \"\"\"\n # Transfer record\n for temp_feat in Config.TEMP_FEAT_NAMES:\n if record is not None:\n record[temp_feat] = [(fg.to(device), bg.to(device), gg.to(device)) for (fg, bg, gg) in record[temp_feat]]\n record_GD[temp_feat] = [(curD.to(device), curG.to(device)) for (curD, curG) in record_GD[temp_feat]]\n\n # Transfer query\n if query is not None:\n query = query.to(device)\n\n # Transfer target\n target_G = target_G.to(device)\n target_D = target_D.to(device)\n\n return record, record_GD, query, target_G, target_D\n\n\ndef RMSE(y_pred: torch.Tensor, y_true: torch.Tensor, threshold=torch.Tensor([0])):\n \"\"\"\n RMSE (Root Mean Squared Error)\n :param y_pred: prediction tensor\n :param y_true: target tensor\n :param threshold: single-value tensor - only values above the threshold are considered\n :return: RMSE-threshold, number of items considered\n \"\"\"\n y_true_mask = y_true > threshold\n y_pred_filter = y_pred[y_true_mask]\n y_true_filter = y_true[y_true_mask]\n return torch.sum(torch.pow((y_true_filter - y_pred_filter), 2)), len(y_true_filter)\n\n\ndef MAE(y_pred: torch.Tensor, y_true: torch.Tensor, threshold=torch.Tensor([0])):\n \"\"\"\n MAE (Mean Absolute Error)\n :param y_pred: prediction tensor\n :param y_true: target tensor\n :param threshold: single-value tensor - only values above the threshold are considered (if threshold=3, result is MAE-3)\n :return: MAE-threshold, number of items considered\n \"\"\"\n y_true_mask = y_true > threshold\n y_pred_filter = y_pred[y_true_mask]\n y_true_filter = y_true[y_true_mask]\n return torch.sum(torch.abs(y_true_filter - y_pred_filter)), len(y_true_filter)\n\n\ndef MAPE(y_pred: torch.Tensor, y_true: torch.Tensor, threshold=torch.Tensor([0])):\n \"\"\"\n MAPE (Mean Absolute Percentage Error)\n :param y_pred: prediction tensor\n :param y_true: target tensor\n :param threshold: single-value tensor - only values above the threshold are considered (if threshold=3, result is MAPE-3)\n :return: MAPE-threshold, number of items considered\n \"\"\"\n y_true_mask = y_true > threshold\n y_pred_filter = y_pred[y_true_mask]\n y_true_filter = y_true[y_true_mask]\n return torch.sum(torch.abs((y_true_filter - y_pred_filter)/(y_true_filter + 1))), len(y_true_filter)\n\n\nMETRICS_FUNCTIONS_MAP = {\n 'RMSE': RMSE,\n 'MAPE': MAPE,\n 'MAE': MAE,\n}\n\n\ndef path2FileNameWithoutExt(path):\n \"\"\"\n get file name without extension from path\n :param path: file path\n :return: file name without extension\n \"\"\"\n return os.path.splitext(path)[0]\n\n\ndef trainLog2LossCurve(logfn='train.log'):\n if not os.path.isfile(logfn):\n print('{} is not a valid file.'.format(logfn))\n exit(-1)\n\n x_epoch = []\n y_loss_train = []\n train_time_list = []\n\n print('Analyzing log file: {}'.format(logfn))\n f = open(logfn, 'r')\n lines = f.readlines()\n for line in lines:\n if not line.startswith('Training Round'):\n continue\n items = line.strip().split(sep=' ')\n\n epoch = int(items[2][:-1])\n x_epoch.append(epoch)\n\n loss = float(items[5][:-1])\n y_loss_train.append(loss)\n\n train_time = float(items[10][1:])\n train_time_list.append(train_time)\n\n # Count average TTpS\n avgTTpS = sum(train_time_list) / len(train_time_list)\n print('Average TTpS: %.4f sec' % avgTTpS)\n\n # Plot training loss curve\n print('Plotting loss curve.')\n plt.plot(x_epoch, y_loss_train, c='purple', label='Train Loss', alpha=0.8)\n plt.title('Epoch - Training Loss')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.legend(loc='upper right')\n # plt.show()\n figpath = '{}.png'.format(path2FileNameWithoutExt(logfn))\n plt.savefig(figpath)\n print('Loss curve saved to {}'.format(figpath))\n\n print('All analysis tasks finished.')\n\n\n# by RoshanRane in https://discuss.pytorch.org/t/check-gradient-flow-in-network/15063/10\ndef plot_grad_flow(named_parameters):\n \"\"\"\n Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing/exploding problems.\n Usage: Plug this function in Trainer class after loss.backwards() as \"plot_grad_flow(model.named_parameters())\" to\n visualize the gradient flow.\n \"\"\"\n avg_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and (p.grad is not None) and ('bias' not in n):\n layers.append(n)\n avg_grads.append(p.grad.cpu().abs().mean())\n max_grads.append(p.grad.cpu().abs().max())\n\n plt.figure(figsize=(7, 20))\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(avg_grads)), avg_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(avg_grads)+1, lw=2, color='k')\n plt.xticks(range(0, len(avg_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(avg_grads))\n plt.ylim(bottom=-0.001, top=0.5) # zoom in on the lower gradient regions\n plt.xlabel('Layers')\n plt.ylabel('Average Gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4),\n Line2D([0], [0], color='b', lw=4),\n Line2D([0], [0], color='k', lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])\n plt.tight_layout()\n plt.ion()\n plt.show()\n plt.pause(5)\n plt.ioff()\n\n\ndef genMetricsResStorage(num_metrics_threshold=len(Config.EVAL_METRICS_THRESHOLD_SET), tasks=Config.METRICS_FOR_WHAT):\n metrics_res = {}\n for metrics_for_what in tasks:\n metrics_res[metrics_for_what] = {}\n for metrics in METRICS_FUNCTIONS_MAP:\n metrics_res[metrics_for_what][metrics] = {'val': torch.zeros(num_metrics_threshold),\n 'num': torch.zeros(num_metrics_threshold)}\n return metrics_res\n\n\ndef aggrMetricsRes(metrics_res, metrics_thresholds, num_metrics_threshold, res_D, target_D, res_G, target_G):\n for mi in range(num_metrics_threshold): # for the (mi)th threshold\n for metrics_for_what in metrics_res:\n curRes, curTar = (res_D, target_D) if metrics_for_what == 'Demand' else (res_G, target_G)\n for metrics in metrics_res[metrics_for_what]:\n curFunc = METRICS_FUNCTIONS_MAP[metrics]\n res, resN = curFunc(curRes, curTar, metrics_thresholds[mi])\n metrics_res[metrics_for_what][metrics]['val'][mi] += res.item()\n metrics_res[metrics_for_what][metrics]['num'][mi] += resN\n\n return metrics_res\n\n\ndef wrapMetricsRes(metrics_res):\n for metrics_for_what in metrics_res:\n for metrics in metrics_res[metrics_for_what]:\n metrics_res[metrics_for_what][metrics]['val'] /= metrics_res[metrics_for_what][metrics]['num']\n if metrics == 'RMSE':\n metrics_res[metrics_for_what][metrics]['val'] = torch.sqrt(metrics_res[metrics_for_what][metrics]['val'])\n\n return metrics_res\n\n\ndef evalMetrics(dataloader, eval_type, getResMethod, device, logr, *args):\n # Metrics with thresholds\n num_metrics_threshold = len(Config.EVAL_METRICS_THRESHOLD_SET)\n metrics_res = genMetricsResStorage(num_metrics_threshold=num_metrics_threshold, tasks=Config.METRICS_FOR_WHAT)\n metrics_thresholds = [torch.Tensor([threshold]) for threshold in Config.EVAL_METRICS_THRESHOLD_SET]\n if device:\n metrics_thresholds = [torch.Tensor([threshold]).to(device) for threshold in Config.EVAL_METRICS_THRESHOLD_SET]\n with torch.no_grad():\n for j, batch in enumerate(dataloader):\n # Clean GPU memory\n if device.type == 'cuda':\n torch.cuda.empty_cache()\n\n res_D, res_G, target_D, target_G = getResMethod(batch, device, args)\n\n metrics_res = aggrMetricsRes(metrics_res, metrics_thresholds, num_metrics_threshold,\n res_D, target_D, res_G, target_G)\n\n metrics_res = wrapMetricsRes(metrics_res)\n\n logr.log('> Metrics Evaluations for %s Set:\\n' % eval_type)\n for metrics_for_what in metrics_res:\n logr.log('%s:\\n' % metrics_for_what)\n for metrics in metrics_res[metrics_for_what]:\n for mi in range(num_metrics_threshold):\n cur_threshold = Config.EVAL_METRICS_THRESHOLD_SET[mi]\n logr.log('%s-%d = %.4f%s' % (metrics,\n cur_threshold,\n metrics_res[metrics_for_what][metrics]['val'][mi],\n (', ' if mi != num_metrics_threshold - 1 else '\\n')))\n\n return metrics_res\n\n\n# Test\nif __name__ == '__main__':\n # print(haversine((40.4944, -74.2655), (40.9196, -73.6957))) # 67.39581283189828\n # trainLog2LossCurve(logfn='../res/Gallat_retrain/20210522_14_44_12.log')\n # trainLog2LossCurve(logfn='../res/GallatExt_pretrain/low_dimension/best/20210518_15_20_40.log')\n trainLog2LossCurve(logfn='../res/20210530_05_30_19.log')\n" ]
[ [ "matplotlib.pyplot.legend", "torch.abs", "torch.zeros", "matplotlib.pyplot.plot", "torch.no_grad", "torch.pow", "matplotlib.pyplot.tight_layout", "torch.sqrt", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "torch.cuda.empty_cache", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ion", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "torch.Tensor", "matplotlib.lines.Line2D", "matplotlib.pyplot.ioff", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.pause" ] ]
ziashen/deblocking_pytorch
[ "b156c5cbd26f0833089e5f1aee6e6d046579b56d" ]
[ "dataset/h5_data.py" ]
[ "# h5_data.py\n\n\nfrom __future__ import print_function\n\nimport os\nimport Image\nimport numpy\nimport h5py\n# import math\n\n'''\ngenerate train_data.h5 and test_data.h5\n\n'''\n# ori images have only 1 channel, while compressed images have 3 channels\nTRAIN_DATA_PATH = \"cmp_y/train/\"\nTRAIN_LABEL_PATH = \"ori_y/bmp_ver/train/\"\n\nTEST_DATA_PATH = \"cmp_y/val/\"\nTEST_LABEL_PATH = \"ori_y/bmp_ver/val/\"\n\noutput_train_filename = \"data_hdf5/train_data.h5\"\noutput_test_filename = \"data_hdf5/test_data.h5\"\n\npatch_size = 32\nstride = 10\n\ndef prepare_data(path):\n names = os.listdir(path)\n names = sorted(names) \n nums = names.__len__()\n \n data = []\n for i in range(nums):\n name = path + names[i]\n print(name)\n img = Image.open(name)\n\n # img, _, _, = img.split()\n img = numpy.asarray(img)\n\t# img = img[:, numpy.newaxis] # is that correct to add the dimension at last?\n size = img.shape\n print(size)\n x_start = 0\n x_end = x_start + patch_size - 1\n y_start = 0\n y_end = y_start + patch_size - 1 \n while x_end < size[0]:\n while y_end < size [1]:\n sub_img = img[x_start:(x_end+1), y_start:(y_end+1)]\n sub_img = sub_img[numpy.newaxis, :, :]\n # notice \":\" \n # print(sub_img.shape)\n data.append(sub_img)\n\n y_start = y_start + stride\n y_end = y_end + stride\n \n y_start = 0\n y_end = y_start + patch_size - 1\n x_start = x_start + stride\n x_end = x_end + stride\n \n data = numpy.array(data) # notice that list has no shape\n print(\"data.shape:\", data.shape) \n\n return data\n\ndef write_hdf5(data, label, output_filename):\n # x = data.astype(int) # ? what's the propose of these 2 lines?\n # y = label.astype(int) \n with h5py.File(output_filename, \"w\") as h:\n h.create_dataset(\"data\", data=data, shape=data.shape)\n h.create_dataset(\"label\", data=label, shape=label.shape)\n\n\nif __name__ == \"__main__\":\n data = prepare_data(TRAIN_DATA_PATH)\n label = prepare_data(TRAIN_LABEL_PATH) \n write_hdf5(data, label, output_train_filename)\n\n data = prepare_data(TEST_DATA_PATH)\n label = prepare_data(TEST_LABEL_PATH) \n write_hdf5(data, label, output_test_filename)\n \n\n\n" ]
[ [ "numpy.asarray", "numpy.array" ] ]
Valts-M/openvslam-1
[ "12e1778511eb30af4a5ea9153824fe8dc1460996" ]
[ "report_generator/tools/data_linewidth_plot.py" ]
[ "import matplotlib.pyplot as plt\n\nclass data_linewidth_plot():\n def __init__(self, x, y, **kwargs):\n self.ax = kwargs.pop(\"ax\", plt.gca())\n self.fig = self.ax.get_figure()\n self.lw_data = kwargs.pop(\"linewidth\", 1)\n self.lw = 1\n self.fig.canvas.draw()\n\n self.ppd = 72./self.fig.dpi\n self.trans = self.ax.transData.transform\n self.linehandle, = self.ax.plot([],[],**kwargs)\n if \"label\" in kwargs: kwargs.pop(\"label\")\n self.line, = self.ax.plot(x, y, **kwargs)\n self.line.set_color(self.linehandle.get_color())\n self._resize()\n self.cid = self.fig.canvas.mpl_connect('draw_event', self._resize)\n\n def _resize(self, event=None):\n lw = ((self.trans((1, self.lw_data))-self.trans((0, 0)))*self.ppd)[1]\n if lw != self.lw:\n self.line.set_linewidth(lw)\n self.lw = lw\n self._redraw_later()\n\n def _redraw_later(self):\n self.timer = self.fig.canvas.new_timer(interval=10)\n self.timer.single_shot = True\n self.timer.add_callback(lambda : self.fig.canvas.draw_idle())\n self.timer.start()" ]
[ [ "matplotlib.pyplot.gca" ] ]
VisualComputingInstitute/reid-tracking
[ "13c90ec698c6ce39aff8bc88d1ca9510b94bf931" ]
[ "simple_track_duke.py" ]
[ "# -*- coding: utf-8 -*-\n#TODO: comments/doc\n\nimport numpy as np\nfrom filterpy.kalman import KalmanFilter\nimport scipy\nfrom scipy import ndimage\nfrom scipy import signal\nfrom scipy.linalg import block_diag,inv\nfrom filterpy.common import Q_discrete_white_noise\nfrom filterpy.stats import plot_covariance_ellipse\nimport matplotlib.pyplot as plt\nfrom os.path import join as pjoin\n\nimport lib\nimport lbtoolbox.plotting as lbplt\n\n# all_bs for bbox regression\nall_bs = np.array([[256.3190, -0.0207, 136.6533, 0.1978],\n [212.9634, 0.0055, 126.0157, 0.2036],\n [277.3869, -0.0154, 5.2019, 0.4442],\n [-296.1867, 0.3356, 54.3528, 0.3093],\n [258.1709, -0.0258, 144.2437, 0.2030],\n [152.2878, 0.0296, -271.9162, 0.6985],\n [208.9894, 0.0349, -298.6897, 0.7266],\n [170.6156, 0.0128, 81.8043, 0.1659]])\n\nHOT_CMAP = lib.get_transparent_colormap()\n\n\nclass Track(object):\n\n \"\"\" Implements a track (not a tracker, a track).\n With KalmanFilter and some other stuff like status for track management\n\n Attributes\n ----------\n TODO\n\n \"\"\"\n\n def __init__(self, dt, curr_frame, init_pose, track_dim=4, det_dim=2, track_id=-1,\n embedding=None, debug_out_dir=None, init_thresh=3, delete_thresh=5,):\n self.debug_out_dir = debug_out_dir\n\n init_x = [init_pose[0], 0.0, init_pose[1], 0.0]\n init_P = [[200.0, 0, 0, 0], [0, 100.0, 0, 0], [0, 0, 200.0, 0], [0, 0, 0, 100.0]]\n\n self.track_id = track_id\n self.color = np.random.rand(3)\n self.xs=[init_x]\n self.Ps=[init_P]\n\n self.KF = KalmanFilter(dim_x=track_dim, dim_z=det_dim)\n self.KF.F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=np.float64)\n q = Q_discrete_white_noise(dim=2, dt=dt, var=50.)\n self.KF.Q = block_diag(q, q)\n self.KF.H = np.array([[1, 0, 0, 0],\n [0, 0, 1, 0]], dtype=np.float64)\n self.KF.R = np.array([[50.0, 0],\n [0, 50.0]], dtype=np.float64)\n self.KF.x = init_x\n self.KF.P = init_P\n\n self.missed_for = 0\n self.deleted_at = 0\n self.last_matched_at = curr_frame\n self.created_at = curr_frame\n\n self.age = 1 #age in frames\n\n #missed for [delete_thresh] times? delete!\n self.delete_thresh = delete_thresh #240=4 seconds (\"occluded by car\"-scenario in cam1)\n self.init_thresh = init_thresh #of consecutive detection responses before reporting this track\n # set status: {init, matched, missed, deleted}\n if self.init_thresh == 1:\n self.status='matched'\n else:\n self.status='init'\n\n self.poses=[init_pose]\n\n #only if ReID is used for DA\n self.embedding = embedding\n\n # ==Track state==\n def track_predict(self):\n # standard KF\n self.KF.predict()\n\n def track_update(self, z):\n self.KF.update(z)\n\n # ==Track status management==\n def track_is_missed(self,curr_frame):\n self.missed_for += 1\n self.status = 'missed'\n if (self.missed_for >= self.delete_thresh) or (self.status=='init'):\n self.track_is_deleted(curr_frame)\n else:\n self.age += 1\n self.xs.append(self.KF.x)\n self.Ps.append(self.KF.P)\n self.poses.append([self.KF.x[0],self.KF.x[2]])\n\n def track_is_matched(self,curr_frame):\n self.last_matched_at = curr_frame\n self.missed_for = 0\n self.age += 1\n self.xs.append(self.KF.x)\n self.Ps.append(self.KF.P)\n self.poses.append([self.KF.x[0],self.KF.x[2]])\n if ((self.status=='init') and (curr_frame-self.created_at+1 < self.init_thresh)):\n pass # stay in init as long as threshold not exceeded\n else:\n self.status = 'matched' # in all other cases, go to matched\n\n def track_is_deleted(self,curr_frame):\n self.deleted_at = curr_frame\n self.status = 'deleted'\n\n # ==Evaluation==\n def get_track_eval_line(self,cid=1,frame=0):\n if (self.status == 'deleted' or self.status == 'init'):\n return None\n\n #pymot format\n #[height,width,id,y,x,z]\n #return {\"height\": 0, \"width\": 0, \"id\": self.track_id, \"y\": self.KF.x[2], \"x\": self.KF.x[0], \"z\": 0}\n #motchallenge format\n #TODO\n #dukeMTMC format\n #[cam, ID, frame, left, top, width, height, worldX, worldY]\n cX,cY = self.poses[-1]\n h = int(((all_bs[cid-1][0]+all_bs[cid-1][1]*cX) + (all_bs[cid-1][2]+all_bs[cid-1][3]*cY))/2)\n w = int(0.4*h)\n l = int(cX-w/2)\n t = int(cY-h/2)\n # id-shift-quick-hack for multi-cam eval.\n return [cid, self.track_id+cid*100000, lib.glob2loc(frame,cid), l, t, w, h, -1, -1]\n\n\n # ==Visualization==\n def plot_track(self, ax, plot_past_trajectory=False, output_shape=None):\n if (self.status == 'deleted' or self.status == 'init'):\n return\n\n #plot_covariance_ellipse((self.KF.x[0], self.KF.x[2]), self.KF.P, fc=self.color, alpha=0.4, std=[1,2,3])\n #print(self.poses)\n cX, vX, cY, vY = self.xs[-1]\n #print('vX: {}, vY: {}'.format(vX,vY))\n ax.plot(cX, cY, color=self.color, marker='o')\n ax.arrow(cX, cY, vX, vY, head_width=50, head_length=20, fc=self.color, ec=self.color)\n plot_covariance_ellipse((cX+vX, cY+vY), self.KF.P[1::2,1::2], fc=self.color, alpha=0.5, std=[3])\n plot_covariance_ellipse((cX, cY), self.KF.P[::2,::2], fc=self.color, alpha=0.5, std=[1, 2, 3])\n #plt.text(*self.state_to_output(*self.poses[-1], output_shape=output_shape), s='{}'.format(self.track_id))\n if plot_past_trajectory and len(self.poses)>1:\n outputs_xy = np.array(self.poses)\n ax.plot(*outputs_xy.T, linewidth=2.0, color=self.color)\n" ]
[ [ "numpy.array", "numpy.random.rand", "scipy.linalg.block_diag" ] ]
RyusukeYamano/nngen
[ "9ed1f7fb83908794aa94d70287d89545d45fe875" ]
[ "tests/onnx_matrix_conv2d_resblock/onnx_matrix_conv2d_resblock.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport functools\nimport math\nimport numpy as np\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd\n\n# the next line can be removed after installation\nsys.path.insert(0, os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__)))))\n\nimport nngen as ng\n\nfrom veriloggen import *\nimport veriloggen.thread as vthread\nimport veriloggen.types.axi as axi\n\n\nclass MatrixConv2dResblock(nn.Module):\n def __init__(self, weight_shape, stride=1, padding=0,\n with_batchnorm=False, act_func='ReLU'):\n\n super(MatrixConv2dResblock, self).__init__()\n\n self.conv = nn.Conv2d(weight_shape[3], weight_shape[0], weight_shape[1],\n stride=stride, padding=padding, bias=not with_batchnorm)\n\n if with_batchnorm:\n self.bn = nn.BatchNorm2d(weight_shape[0])\n else:\n self.bn = None\n\n if act_func is not None:\n self.f = getattr(nn, act_func)()\n else:\n self.f = None\n\n def forward(self, x):\n y = self.conv(x)\n\n if self.bn is not None:\n y = self.bn(y)\n\n if self.f is not None:\n y = self.f(y)\n\n y = torch.add(x, y)\n\n return y\n\n\ndef run(act_shape=(1, 7, 7, 15), weight_shape=(15, 3, 3, 15),\n act_dtype=ng.int32, weight_dtype=ng.int32,\n stride=1, padding=1,\n with_batchnorm=False, act_func='ReLU', disable_fusion=False,\n par_ich=1, par_och=1, par_col=1, par_row=1,\n concur_och=None, stationary='filter',\n chunk_size=64,\n axi_datawidth=32, silent=False,\n filename=None, simtype='iverilog', outputfile=None):\n\n if weight_shape[0] != weight_shape[3]:\n raise ValueError('not supported shape: weight_shape[0] != weight_shape[3]')\n\n # pytorch model\n model = MatrixConv2dResblock(weight_shape, stride, padding,\n with_batchnorm, act_func)\n\n # overwrite weight values for test\n # model.conv.weight.data = torch.from_numpy(np.ones_like(model.conv.weight.data.numpy()))\n # model.conv.bias.data = torch.from_numpy(np.zeros_like(model.conv.bias.data.numpy()))\n\n # Pytorch to ONNX\n onnx_filename = 'onnx_matrix_conv2d_resblock.onnx'\n dummy_input = torch.randn(*act_shape).transpose(1, 3)\n input_names = ['act']\n output_names = ['out']\n model.eval()\n torch.onnx.export(model, dummy_input, onnx_filename,\n input_names=input_names, output_names=output_names)\n\n # --------------------\n # (1) Represent a DNN model as a dataflow by NNgen operators\n # --------------------\n\n # ONNX to NNgen\n value_dtypes = {'act': act_dtype,\n '0.weight': weight_dtype,\n 'out': act_dtype}\n\n (outputs, placeholders, variables,\n constants, operators) = ng.from_onnx(onnx_filename,\n value_dtypes=value_dtypes,\n default_placeholder_dtype=act_dtype,\n default_variable_dtype=weight_dtype,\n default_constant_dtype=weight_dtype,\n default_operator_dtype=act_dtype,\n default_scale_dtype=ng.int32,\n default_bias_dtype=ng.int32,\n disable_fusion=disable_fusion)\n\n # --------------------\n # (2) Assign quantized weights to the NNgen operators\n # --------------------\n\n if act_dtype.width > 8:\n act_scale_factor = 128\n else:\n act_scale_factor = int(round(2 ** (act_dtype.width - 1) * 0.5))\n\n input_scale_factors = {'act': act_scale_factor}\n\n ng.quantize(outputs, input_scale_factors)\n\n # --------------------\n # (3) Assign hardware attributes\n # --------------------\n\n for op in operators.values():\n if isinstance(op, ng.conv2d):\n op.attribute(par_ich=par_ich, par_och=par_och,\n par_row=par_row, par_col=par_col,\n concur_och=concur_och)\n\n # --------------------\n # (4) Verify the DNN model behavior by executing the NNgen dataflow as a software\n # --------------------\n\n act = placeholders['act']\n out = outputs['out']\n\n # verification data\n # random data\n std = 0.2\n mean = 0.5\n img = np.random.normal(size=act.length).astype(np.float32).reshape(act.shape)\n img = img * std + mean\n\n # execution on pytorch\n model_input = img\n\n if act.perm is not None:\n model_input = np.transpose(model_input, act.reversed_perm)\n\n model.eval()\n model_out = model(torch.from_numpy(model_input)).detach().numpy()\n if act.perm is not None and len(model_out.shape) == len(act.shape):\n model_out = np.transpose(model_out, act.perm)\n scaled_model_out = model_out * out.scale_factor\n\n # software-based verification\n vact = img * act_scale_factor\n vact = np.clip(vact,\n -1.0 * (2 ** (act.dtype.width - 1) - 1),\n 1.0 * (2 ** (act.dtype.width - 1) - 1))\n vact = np.round(vact).astype(np.int64)\n\n eval_outs = ng.eval([out], act=vact)\n vout = eval_outs[0]\n\n mean_square_error = np.sum((vout - scaled_model_out) ** 2) / vout.size\n corrcoef = np.corrcoef(model_out.reshape([-1]), vout.reshape([-1]))\n\n # breakpoint()\n\n # --------------------\n # (5) Convert the NNgen dataflow to a hardware description (Verilog HDL and IP-XACT)\n # --------------------\n\n targ = ng.to_veriloggen([out], 'onnx_matrix_conv2d', silent=silent,\n config={'maxi_datawidth': axi_datawidth,\n 'chunk_size': chunk_size})\n\n # --------------------\n # (6) Simulate the generated hardware by Veriloggen and Verilog simulator\n # --------------------\n\n if simtype is None:\n sys.exit()\n\n # to memory image\n param_data = ng.export_ndarray([out], chunk_size)\n param_bytes = len(param_data)\n\n variable_addr = int(math.ceil((act.addr + act.memory_size) / chunk_size)) * chunk_size\n check_addr = int(math.ceil((variable_addr + param_bytes) / chunk_size)) * chunk_size\n tmp_addr = int(math.ceil((check_addr + out.memory_size) / chunk_size)) * chunk_size\n\n memimg_datawidth = 32\n mem = np.zeros([1024 * 1024 * 8 // (memimg_datawidth // 8)], dtype=np.int64)\n mem = mem + [100]\n\n # placeholder\n axi.set_memory(mem, vact, memimg_datawidth,\n act_dtype.width, act.addr,\n max(int(math.ceil(axi_datawidth / act_dtype.width)), par_ich))\n\n # parameters (variable and constant)\n axi.set_memory(mem, param_data, memimg_datawidth,\n 8, variable_addr)\n\n # verification data\n axi.set_memory(mem, vout, memimg_datawidth,\n act_dtype.width, check_addr,\n max(int(math.ceil(axi_datawidth / act_dtype.width)), par_och))\n\n # test controller\n m = Module('test')\n params = m.copy_params(targ)\n ports = m.copy_sim_ports(targ)\n clk = ports['CLK']\n resetn = ports['RESETN']\n rst = m.Wire('RST')\n rst.assign(Not(resetn))\n\n # AXI memory model\n if outputfile is None:\n outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'\n\n memimg_name = 'memimg_' + outputfile\n\n memory = axi.AxiMemoryModel(m, 'memory', clk, rst,\n datawidth=axi_datawidth,\n memimg=mem, memimg_name=memimg_name,\n memimg_datawidth=memimg_datawidth)\n memory.connect(ports, 'maxi')\n\n # AXI-Slave controller\n _saxi = vthread.AXIMLite(m, '_saxi', clk, rst, noio=True)\n _saxi.connect(ports, 'saxi')\n\n # timer\n time_counter = m.Reg('time_counter', 32, initval=0)\n seq = Seq(m, 'seq', clk, rst)\n seq(\n time_counter.inc()\n )\n\n def ctrl():\n for i in range(100):\n pass\n\n ng.sim.set_global_addrs(_saxi, tmp_addr)\n\n start_time = time_counter.value\n ng.sim.start(_saxi)\n\n print('# start')\n\n ng.sim.wait(_saxi)\n end_time = time_counter.value\n\n print('# end')\n print('# execution cycles: %d' % (end_time - start_time))\n\n # verify\n ok = True\n for bat in range(out.shape[0]):\n for y in range(out.shape[1]):\n for x in range(out.shape[2]):\n for ch in range(out.shape[3]):\n orig = memory.read_word(\n bat * out.aligned_shape[1] * out.aligned_shape[2] * out.aligned_shape[3] +\n y * out.aligned_shape[2] * out.aligned_shape[3] +\n x * out.aligned_shape[3] + ch,\n out.addr, act_dtype.width)\n check = memory.read_word(\n bat * out.aligned_shape[1] * out.aligned_shape[2] * out.aligned_shape[3] +\n y * out.aligned_shape[2] * out.aligned_shape[3] +\n x * out.aligned_shape[3] + ch,\n check_addr, act_dtype.width)\n\n if vthread.verilog.NotEql(orig, check):\n print('NG (', bat, y, x, ch,\n ') orig: ', orig, ' check: ', check)\n ok = False\n # else:\n # print('OK (', bat, y, x, ch,\n # ') orig: ', orig, ' check: ', check)\n\n if ok:\n print('# verify: PASSED')\n else:\n print('# verify: FAILED')\n\n vthread.finish()\n\n th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)\n fsm = th.start()\n\n uut = m.Instance(targ, 'uut',\n params=m.connect_params(targ),\n ports=m.connect_ports(targ))\n\n # simulation.setup_waveform(m, uut)\n simulation.setup_clock(m, clk, hperiod=5)\n init = simulation.setup_reset(m, resetn, m.make_reset(), period=100, polarity='low')\n\n init.add(\n Delay(10000000),\n Systask('finish'),\n )\n\n # output source code\n if filename is not None:\n m.to_verilog(filename)\n\n # run simulation\n sim = simulation.Simulator(m, sim=simtype)\n rslt = sim.run(outputfile=outputfile)\n lines = rslt.splitlines()\n if simtype == 'verilator' and lines[-1].startswith('-'):\n rslt = '\\n'.join(lines[:-1])\n return rslt\n\n\nif __name__ == '__main__':\n rslt = run(silent=False, filename='tmp.v')\n print(rslt)\n" ]
[ [ "torch.onnx.export", "torch.add", "numpy.clip", "torch.randn", "torch.nn.Conv2d", "torch.from_numpy", "numpy.round", "numpy.random.normal", "torch.nn.BatchNorm2d", "numpy.transpose", "numpy.zeros", "numpy.sum" ] ]
lcubelongren/BCCN_classwork
[ "0f5cf9e44bca54b5aba5aeb826586d1aaaa777d1" ]
[ "MHBF/given/exercise8/gridworld.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport base64\nfrom tempfile import NamedTemporaryFile\nfrom IPython.display import HTML\n\n\ndef display_animation(anim):\n \"\"\"\n Create a display object that displays an animation as interactive\n Javascript widget in Jupyter.\n\n Parameters\n ----------\n anim : matplotlib.animation\n An animation object create with matplotlib.\n \"\"\"\n plt.close(anim._fig)\n return HTML(anim.to_jshtml())\n\n\nclass Gridworld:\n \"\"\"\n A class that implements a quadratic NxN gridworld.\n\n Methods:\n\n run(N_trials=10,N_runs=1) : Run 'N_trials' trials. A trial is finished,\n when the agent reaches the reward location.\n visualize_trial() : Run a single trial with graphical output.\n reset() : Make the agent forget everything he has learned.\n plot_Q() : Plot of the Q-values .\n learning_curve() : Plot the time it takes the agent to reach the target\n as a function of trial number.\n navigation_map() : Plot the movement direction with the highest\n Q-value for all positions.\n \"\"\"\n\n def __init__(self, N, reward_position=(0,0), epsilon=.5,\n obstacle=True, lambda_eligibility=0.):\n \"\"\"\n Creates a quadratic NxN gridworld.\n\n Mandatory argument:\n N: size of the gridworld\n\n Optional arguments:\n reward_position = (x_coordinate,y_coordinate): the reward location\n obstacle = True: Add a wall to the gridworld.\n \"\"\"\n\n # gridworld size\n self.N = N\n\n # reward location\n self.reward_position = reward_position\n\n # reward administered t the target location and when\n # bumping into walls\n self.reward_at_target = 1.\n self.reward_at_wall = -0.5\n\n # probability at which the agent chooses a random\n # action. This makes sure the agent explores the grid.\n self.epsilon = epsilon\n\n # learning rate\n self.eta = 0.1\n\n # discount factor - quantifies how far into the future\n # a reward is still considered important for the\n # current action\n self.gamma = 0.99\n\n # the decay factor for the eligibility trace the\n # default is 0., which corresponds to no eligibility\n # trace at all.\n self.lambda_eligibility = lambda_eligibility\n\n # is there an obstacle in the room?\n self.obstacle = obstacle\n\n # draw animation of agent exploring?\n self._visualize = False\n\n # initialize the Q-values etc.\n self._init_run()\n\n def run(self,N_trials=10,N_runs=1):\n self.latencies = np.zeros(N_trials)\n\n for _ in range(N_runs):\n self._init_run()\n latencies = self._learn_run(N_trials=N_trials)\n self.latencies += latencies/N_runs\n\n def visualize_trial(self):\n \"\"\"\n Run a single trial with a graphical display that shows in\n red - the position of the agent\n blue - walls/obstacles\n green - the reward position\n\n Note that for the simulation, exploration is reduced -> self.epsilon=0.1\n\n \"\"\"\n # store the old exploration/exploitation parameter\n epsilon = self.epsilon\n\n # favor exploitation, i.e. use the action with the\n # highest Q-value most of the time\n self.epsilon = 0.1\n\n self._init_visualization()\n self._run_trial()\n\n # restore the old exploration/exploitation factor\n self.epsilon = epsilon\n\n return self._finish_visualization()\n\n def learning_curve(self,log=False,filter_t=1.):\n \"\"\"\n Show a running average of the time it takes the agent to reach the target location.\n\n Options:\n filter_t=1. : timescale of the running average.\n log : Logarithmic y axis.\n \"\"\"\n plt.figure()\n plt.xlabel('trials')\n plt.ylabel('time to reach target')\n latencies = np.array(self.latency_list)\n # calculate a running average over the latencies with a averaging time 'filter_t'\n for i in range(1,latencies.shape[0]):\n latencies[i] = latencies[i-1] + (latencies[i] - latencies[i-1])/float(filter_t)\n\n if not log:\n plt.plot(self.latencies)\n else:\n plt.semilogy(self.latencies)\n\n def navigation_map(self):\n \"\"\"\n Plot the direction with the highest Q-value for every position.\n Useful only for small gridworlds, otherwise the plot becomes messy.\n \"\"\"\n self.x_direction = np.zeros((self.N,self.N))\n self.y_direction = np.zeros((self.N,self.N))\n\n self.actions = np.argmax(self.Q[:,:,:],axis=2)\n self.y_direction[self.actions==0] = 1.\n self.y_direction[self.actions==1] = -1.\n self.y_direction[self.actions==2] = 0.\n self.y_direction[self.actions==3] = 0.\n\n self.x_direction[self.actions==0] = 0.\n self.x_direction[self.actions==1] = 0.\n self.x_direction[self.actions==2] = 1.\n self.x_direction[self.actions==3] = -1.\n\n plt.figure(figsize=(self.N, self.N))\n plt.quiver(self.x_direction,self.y_direction, pivot=\"mid\")\n plt.axis([-0.5, self.N - 0.5, -0.5, self.N - 0.5])\n\n def reset(self):\n \"\"\"\n Reset the Q-values (and the latency_list).\n\n Instant amnesia - the agent forgets everything he has learned before\n \"\"\"\n self.Q = np.random.rand(self.N,self.N,4)\n self.latency_list = []\n\n def plot_Q(self):\n \"\"\"\n Plot the dependence of the Q-values on position.\n The figure consists of 4 subgraphs, each of which shows the Q-values\n colorcoded for one of the actions.\n \"\"\"\n plt.figure(figsize=(0.75*self.N, 0.75*self.N))\n for i in range(4):\n plt.subplot(2,2,i+1)\n plt.imshow(self.Q[:,:,i],interpolation='nearest',origin='lower',vmax=1.1)\n if i==0:\n plt.title('Up')\n elif i==1:\n plt.title('Down')\n elif i==2:\n plt.title('Right')\n else:\n plt.title('Left')\n\n plt.colorbar()\n\n def _init_run(self):\n \"\"\"\n Initialize the Q-values, eligibility trace, position etc.\n \"\"\"\n # initialize the Q-values and the eligibility trace\n self.Q = 0.01 * np.random.rand(self.N,self.N,4) + 0.1\n self.e = np.zeros((self.N,self.N,4))\n\n # list that contains the times it took the agent to reach the target for all trials\n # serves to track the progress of learning\n self.latency_list = []\n\n # initialize the state and action variables\n self.x_position = None\n self.y_position = None\n self.action = None\n\n def _learn_run(self,N_trials=10):\n \"\"\"\n Run a learning period consisting of N_trials trials.\n\n Options:\n N_trials : Number of trials\n\n Note: The Q-values are not reset. Therefore, running this routine\n several times will continue the learning process. If you want to run\n a completely new simulation, call reset() before running it.\n\n \"\"\"\n for _ in range(N_trials):\n # run a trial and store the time it takes to the target\n latency = self._run_trial()\n self.latency_list.append(latency)\n\n return np.array(self.latency_list)\n\n def _run_trial(self):\n \"\"\"\n Run a single trial on the gridworld until the agent reaches the reward position.\n Return the time it takes to get there.\n \"\"\"\n # Initialize the latency (time to reach the target) for this trial\n latency = 0.\n\n # Choose a random initial position and make sure that it is not in the wall.\n # Needed here:\n # self.x_position, self.y_position, self._is_wall\n\n\n # Run the trial by choosing an action and repeatedly applying SARSA\n # until the reward has been reached.\n # Needed here:\n # self._choose_action, self._arrived, self._update_state, self._update_Q\n\n\n return latency\n\n def _update_Q(self):\n \"\"\"\n Update the current estimate of the Q-values according to SARSA.\n \"\"\"\n # Update the eligibility trace\n\n # Update the Q-values\n # deltaQ = eta * e * [r - (Q_old - gamma * Q)]\n # Needed here:\n # self.action, self.x_position, self.y_position, self._reward\n # plus _old versions of above and more.\n\n # Finally we visualize the state if requested by calling code.\n self._visualize_current_state()\n\n def _choose_action(self):\n \"\"\"\n Choose the next action based on the current estimate of the Q-values.\n The parameter epsilon determines, how often agent chooses the action\n with the highest Q-value (probability 1-epsilon). In the rest of the cases\n a random action is chosen.\n \"\"\"\n # Be sure to store the old action before choosing a new one.\n # Needed here:\n # self.action, self.action_old, self.epsilon, self.Q, self.x_position, self.y_position\n pass\n\n def _arrived(self):\n \"\"\"\n Check if the agent has arrived.\n \"\"\"\n return (self.x_position == self.reward_position[0] and self.y_position == self.reward_position[1])\n\n def _reward(self):\n \"\"\"\n Evaluates how much reward should be administered when performing the\n chosen action at the current location\n \"\"\"\n if self._arrived():\n return self.reward_at_target\n\n if self._wall_touch:\n return self.reward_at_wall\n else:\n return 0.\n\n def _update_state(self):\n \"\"\"\n Update the state according to the old state and the current action.\n \"\"\"\n # remember the old position of the agent\n self.x_position_old = self.x_position\n self.y_position_old = self.y_position\n\n # update the agents position according to the action\n # move to the down?\n if self.action == 0:\n self.x_position += 1\n # move to the up\n elif self.action == 1:\n self.x_position -= 1\n # move right?\n elif self.action == 2:\n self.y_position += 1\n # move left?\n elif self.action == 3:\n self.y_position -= 1\n else:\n print(\"There must be a bug. This is not a valid action!\")\n\n # check if the agent has bumped into a wall.\n self._wall_touch = self._is_wall()\n if self._wall_touch:\n self.x_position = self.x_position_old\n self.y_position = self.y_position_old\n\n def _is_wall(self,x_position=None,y_position=None):\n \"\"\"\n This function returns, if the given position is within an obstacle\n If you want to put the obstacle somewhere else, this is what you have\n to modify. The default is a wall that starts in the middle of the room\n and ends at the right wall.\n\n If no position is given, the current position of the agent is evaluated.\n \"\"\"\n if x_position == None or y_position == None:\n x_position = self.x_position\n y_position = self.y_position\n\n # check if the agent is trying to leave the gridworld\n if x_position < 0 or x_position >= self.N or y_position < 0 or y_position >= self.N:\n return True\n\n # check if the agent has bumped into an obstacle in the room\n if self.obstacle:\n if y_position == int(self.N/2) and x_position>self.N/2:\n return True\n\n # if none of the above is the case, this position is not a wall\n return False\n\n def _visualize_current_state(self):\n \"\"\"\n Show the gridworld. The squares are colored in\n red - the position of the agent - turns yellow when reaching the target or running into a wall\n blue - walls\n green - reward\n \"\"\"\n if self._visualize:\n self._display = np.copy(self._display)\n # set the agents color\n self._display[self.x_position_old,self.y_position_old,:] = 0\n self._display[self.x_position,self.y_position,0] = 1\n if self._wall_touch:\n self._display[self.x_position,self.y_position,:] = 1\n\n # update the figure\n self._append_image(self._display)\n\n def _init_visualization(self):\n import __main__ as main\n self._notebook = not hasattr(main, '__file__')\n\n self._visualize = True\n # create the figure\n self._anifig = plt.figure()\n self._aniax = self._anifig.add_subplot(1,1,1)\n # initialize the content of the figure (RGB at each position)\n self._anim = []\n self._display = np.zeros((self.N,self.N,3))\n\n # position of the agent\n self._display[self.x_position,self.y_position,0] = 1\n # set the reward locations\n self._display[self.reward_position[0],self.reward_position[1],[0,1]] = 1\n\n for x in range(self.N):\n for y in range(self.N):\n if self._is_wall(x_position=x,y_position=y):\n self._display[x,y,2] = 1.\n\n self._append_image(self._display)\n\n def _append_image(self, display):\n display = (self._aniax.imshow(display,interpolation='nearest',origin='lower'),)\n self._anim.append(display)\n\n def _finish_visualization(self):\n self._visualize = False\n if self._notebook:\n anim = animation.ArtistAnimation(self._anifig, self._anim, blit=True)\n return anim\n else:\n fps = 5\n _ani = animation.ArtistAnimation(\n self._anifig, self._anim, interval=1000./fps, blit=True,\n repeat_delay=1000)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.plot", "matplotlib.pyplot.quiver", "numpy.copy", "numpy.argmax", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.animation.ArtistAnimation", "matplotlib.pyplot.title", "numpy.random.rand", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.semilogy", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel" ] ]