repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
OlegPt/spark | [
"c79fd911ca85f883c493c5e888f7690868d7b5ea"
] | [
"python/pyspark/statcounter.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# This file is ported from spark/util/StatCounter.scala\n\nimport copy\nimport math\n\ntry:\n from numpy import maximum, minimum, sqrt\nexcept ImportError:\n maximum = max\n minimum = min\n sqrt = math.sqrt\n\n\nclass StatCounter(object):\n\n def __init__(self, values=None):\n if values is None:\n values = list()\n self.n = 0 # Running count of our values\n self.mu = 0.0 # Running mean of our values\n self.m2 = 0.0 # Running variance numerator (sum of (x - mean)^2)\n self.maxValue = float(\"-inf\")\n self.minValue = float(\"inf\")\n\n for v in values:\n self.merge(v)\n\n # Add a value into this StatCounter, updating the internal statistics.\n def merge(self, value):\n delta = value - self.mu\n self.n += 1\n self.mu += delta / self.n\n self.m2 += delta * (value - self.mu)\n self.maxValue = maximum(self.maxValue, value)\n self.minValue = minimum(self.minValue, value)\n\n return self\n\n # Merge another StatCounter into this one, adding up the internal statistics.\n def mergeStats(self, other):\n if not isinstance(other, StatCounter):\n raise Exception(\"Can only merge Statcounters!\")\n\n if other is self: # reference equality holds\n self.merge(copy.deepcopy(other)) # Avoid overwriting fields in a weird order\n else:\n if self.n == 0:\n self.mu = other.mu\n self.m2 = other.m2\n self.n = other.n\n self.maxValue = other.maxValue\n self.minValue = other.minValue\n\n elif other.n != 0:\n delta = other.mu - self.mu\n if other.n * 10 < self.n:\n self.mu = self.mu + (delta * other.n) / (self.n + other.n)\n elif self.n * 10 < other.n:\n self.mu = other.mu - (delta * self.n) / (self.n + other.n)\n else:\n self.mu = (self.mu * self.n + other.mu * other.n) / (self.n + other.n)\n\n self.maxValue = maximum(self.maxValue, other.maxValue)\n self.minValue = minimum(self.minValue, other.minValue)\n\n self.m2 += other.m2 + (delta * delta * self.n * other.n) / (self.n + other.n)\n self.n += other.n\n return self\n\n # Clone this StatCounter\n def copy(self):\n return copy.deepcopy(self)\n\n def count(self):\n return int(self.n)\n\n def mean(self):\n return self.mu\n\n def sum(self):\n return self.n * self.mu\n\n def min(self):\n return self.minValue\n\n def max(self):\n return self.maxValue\n\n # Return the variance of the values.\n def variance(self):\n if self.n == 0:\n return float('nan')\n else:\n return self.m2 / self.n\n\n #\n # Return the sample variance, which corrects for bias in estimating the variance by dividing\n # by N-1 instead of N.\n #\n def sampleVariance(self):\n if self.n <= 1:\n return float('nan')\n else:\n return self.m2 / (self.n - 1)\n\n # Return the standard deviation of the values.\n def stdev(self):\n return sqrt(self.variance())\n\n #\n # Return the sample standard deviation of the values, which corrects for bias in estimating the\n # variance by dividing by N-1 instead of N.\n #\n def sampleStdev(self):\n return sqrt(self.sampleVariance())\n\n def asDict(self, sample=False):\n \"\"\"Returns the :class:`StatCounter` members as a ``dict``.\n\n >>> sc.parallelize([1., 2., 3., 4.]).stats().asDict()\n {'count': 4L,\n 'max': 4.0,\n 'mean': 2.5,\n 'min': 1.0,\n 'stdev': 1.2909944487358056,\n 'sum': 10.0,\n 'variance': 1.6666666666666667}\n \"\"\"\n return {\n 'count': self.count(),\n 'mean': self.mean(),\n 'sum': self.sum(),\n 'min': self.min(),\n 'max': self.max(),\n 'stdev': self.stdev() if sample else self.sampleStdev(),\n 'variance': self.variance() if sample else self.sampleVariance()\n }\n\n def __repr__(self):\n return (\"(count: %s, mean: %s, stdev: %s, max: %s, min: %s)\" %\n (self.count(), self.mean(), self.stdev(), self.max(), self.min()))\n"
] | [
[
"numpy.minimum",
"numpy.maximum"
]
] |
Jackqu/mmpose | [
"ad8acc5ff5da7993c6befdc4b1ced2c2ecb64533"
] | [
"mmpose/models/detectors/mesh.py"
] | [
"import numpy as np\nimport torch\n\nfrom mmpose.models.misc.discriminator import SMPLDiscriminator\nfrom .. import builder\nfrom ..builder import POSENETS\nfrom .base import BasePose\n\ntry:\n from smplx import SMPL\n has_smpl = True\nexcept (ImportError, ModuleNotFoundError):\n has_smpl = False\n\n\ndef set_requires_grad(nets, requires_grad=False):\n \"\"\"Set requies_grad for all the networks.\n\n Args:\n nets (nn.Module | list[nn.Module]): A list of networks or a single\n network.\n requires_grad (bool): Whether the networks require gradients or not\n \"\"\"\n if not isinstance(nets, list):\n nets = [nets]\n for net in nets:\n if net is not None:\n for param in net.parameters():\n param.requires_grad = requires_grad\n\n\[email protected]_module()\nclass ParametricMesh(BasePose):\n \"\"\"Model-based 3D human mesh detector. Take a single color image as input\n and output 3D joints, SMPL parameters and camera parameters.\n\n Args:\n backbone (dict): Backbone modules to extract feature.\n mesh_head (dict): Mesh head to process feature.\n smpl (dict): Config for SMPL model.\n disc (dict): Discriminator for SMPL parameters. Default: None.\n loss_gan (dict): Config for adversarial loss. Default: None.\n loss_mesh (dict): Config for mesh loss. Default: None.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path to the pretrained models.\n \"\"\"\n\n def __init__(self,\n backbone,\n mesh_head,\n smpl,\n disc=None,\n loss_gan=None,\n loss_mesh=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n super().__init__()\n\n assert has_smpl, 'Please install smplx to use SMPL.'\n\n self.backbone = builder.build_backbone(backbone)\n self.mesh_head = builder.build_head(mesh_head)\n self.generator = torch.nn.Sequential(self.backbone, self.mesh_head)\n\n self.smpl = SMPL(\n model_path=smpl['smpl_path'],\n create_betas=False,\n create_global_orient=False,\n create_body_pose=False,\n create_transl=False)\n\n joints_regressor = torch.tensor(\n np.load(smpl['joints_regressor']), dtype=torch.float).unsqueeze(0)\n self.register_buffer('joints_regressor', joints_regressor)\n\n self.with_gan = disc is not None and loss_gan is not None\n if self.with_gan:\n self.discriminator = SMPLDiscriminator(**disc)\n self.loss_gan = builder.build_loss(loss_gan)\n self.disc_step_count = 0\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.loss_mesh = builder.build_loss(loss_mesh)\n self.init_weights(pretrained=pretrained)\n\n def init_weights(self, pretrained=None):\n \"\"\"Weight initialization for model.\"\"\"\n self.backbone.init_weights(pretrained)\n self.mesh_head.init_weights()\n if self.with_gan:\n self.discriminator.init_weights()\n\n def train_step(self, data_batch, optimizer, **kwargs):\n \"\"\"Train step function.\n\n In this function, the detector will finish the train step following\n the pipeline:\n 1. get fake and real SMPL parameters\n 2. optimize discriminator (if have)\n 3. optimize generator\n\n If `self.train_cfg.disc_step > 1`, the train step will contain multiple\n iterations for optimizing discriminator with different input data and\n only one iteration for optimizing generator after `disc_step`\n iterations for discriminator.\n\n Args:\n data_batch (torch.Tensor): Batch of data as input.\n optimizer (dict[torch.optim.Optimizer]): Dict with optimizers for\n generator and discriminator (if have).\n\n Returns:\n outputs (dict): Dict with loss, information for logger,\n the number of samples.\n \"\"\"\n\n img = data_batch['img']\n pred_smpl = self.generator(img)\n pred_pose, pred_beta, pred_camera = pred_smpl\n\n # optimize discriminator (if have)\n if self.train_cfg['disc_step'] > 0 and self.with_gan:\n set_requires_grad(self.discriminator, True)\n fake_data = (pred_camera.detach(), pred_pose.detach(),\n pred_beta.detach())\n mosh_theta = data_batch['mosh_theta']\n real_data = (mosh_theta[:, :3], mosh_theta[:,\n 3:75], mosh_theta[:,\n 75:])\n fake_score = self.discriminator(fake_data)\n real_score = self.discriminator(real_data)\n\n disc_losses = {}\n disc_losses['real_loss'] = self.loss_gan(\n real_score, target_is_real=True, is_disc=True)\n disc_losses['fake_loss'] = self.loss_gan(\n fake_score, target_is_real=False, is_disc=True)\n loss_disc, log_vars_d = self._parse_losses(disc_losses)\n\n optimizer['discriminator'].zero_grad()\n loss_disc.backward()\n optimizer['discriminator'].step()\n self.disc_step_count = \\\n (self.disc_step_count + 1) % self.train_cfg['disc_step']\n\n if self.disc_step_count != 0:\n outputs = dict(\n loss=loss_disc,\n log_vars=log_vars_d,\n num_samples=len(next(iter(data_batch.values()))))\n return outputs\n\n # optimize generator\n pred_out = self.smpl(\n betas=pred_beta,\n body_pose=pred_pose[:, 1:],\n global_orient=pred_pose[:, :1],\n pose2rot=False)\n pred_vertices = pred_out.vertices\n pred_joints_3d = self.get_3d_joints_from_mesh(pred_vertices)\n gt_beta = data_batch['beta']\n gt_pose = data_batch['pose']\n gt_vertices = self.smpl(\n betas=gt_beta,\n body_pose=gt_pose[:, 3:],\n global_orient=gt_pose[:, :3]).vertices\n pred = dict(\n pose=pred_pose,\n beta=pred_beta,\n camera=pred_camera,\n vertices=pred_vertices,\n joints_3d=pred_joints_3d)\n\n target = {\n key: data_batch[key]\n for key in [\n 'pose', 'beta', 'has_smpl', 'joints_3d', 'joints_2d',\n 'joints_3d_visible', 'joints_2d_visible'\n ]\n }\n target['vertices'] = gt_vertices\n\n losses = self.loss_mesh(pred, target)\n\n if self.with_gan:\n set_requires_grad(self.discriminator, False)\n pred_theta = (pred_camera, pred_pose, pred_beta)\n pred_score = self.discriminator(pred_theta)\n loss_adv = self.loss_gan(\n pred_score, target_is_real=True, is_disc=False)\n losses['adv_loss'] = loss_adv\n\n loss, log_vars = self._parse_losses(losses)\n optimizer['generator'].zero_grad()\n loss.backward()\n optimizer['generator'].step()\n\n outputs = dict(\n loss=loss,\n log_vars=log_vars,\n num_samples=len(next(iter(data_batch.values()))))\n\n return outputs\n\n def forward_train(self, *args, **kwargs):\n \"\"\"Forward function for training.\n\n For ParametricMesh, we do not use this interface.\n \"\"\"\n raise NotImplementedError('This interface should not be used in '\n 'current training schedule. Please use '\n '`train_step` for training.')\n\n def val_step(self, data_batch, **kwargs):\n \"\"\"Forward function for evaluation.\n\n Args:\n data_batch (dict): Contain data for forward.\n\n Returns:\n dict: Contain the results from model.\n \"\"\"\n output = self.forward_test(**data_batch, **kwargs)\n return output\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network FLOPs.\n\n See ``tools/get_flops.py``.\n\n Args:\n img (torch.Tensor): Input image.\n\n Returns:\n Tensor: Outputs.\n \"\"\"\n output = self.generator(img)\n return output\n\n def forward_test(self, img, img_metas, **kwargs):\n \"\"\"Defines the computation performed at every call when testing.\"\"\"\n assert img.size(0) == 1\n assert len(img_metas) == 1\n\n pred_smpl = self.generator(img)\n pred_pose, pred_beta, pred_camera = pred_smpl\n pred_out = self.smpl(\n betas=pred_beta,\n body_pose=pred_pose[:, 1:],\n global_orient=pred_pose[:, :1],\n pose2rot=False)\n pred_vertices = pred_out.vertices\n pred_joints_3d = self.get_3d_joints_from_mesh(pred_vertices)\n\n all_preds = (pred_joints_3d.detach().cpu().numpy(),\n (pred_pose.detach().cpu().numpy(),\n pred_beta.detach().cpu().numpy()),\n pred_camera.detach().cpu().numpy())\n\n all_boxes = np.zeros((1, 6), dtype=np.float32)\n image_path = []\n\n img_metas = img_metas[0]\n c = img_metas['center'].reshape(1, -1)\n s = img_metas['scale'].reshape(1, -1)\n\n score = 1.0\n if 'bbox_score' in img_metas:\n score = np.array(img_metas['bbox_score']).reshape(-1)\n\n all_boxes[0, 0:2] = c[:, 0:2]\n all_boxes[0, 2:4] = s[:, 0:2]\n all_boxes[0, 4] = np.prod(s * 200.0, axis=1)\n all_boxes[0, 5] = score\n image_path.extend(img_metas['image_file'])\n\n return all_preds, all_boxes, image_path\n\n def get_3d_joints_from_mesh(self, vertices):\n \"\"\"Get 3D joints from 3D mesh using predefined joints regressor.\"\"\"\n return torch.matmul(\n self.joints_regressor.to(vertices.device), vertices)\n\n def forward(self, img, img_metas=None, return_loss=False, **kwargs):\n \"\"\"Forward function.\n\n Calls either forward_train or forward_test depending on whether\n return_loss=True.\n\n Note:\n batch_size: N\n num_img_channel: C (Default: 3)\n img height: imgH\n img width: imgW\n\n Args:\n img (torch.Tensor[N x C x imgH x imgW]): Input images.\n img_metas (list(dict)): Information about data augmentation\n By default this includes:\n - \"image_file: path to the image file\n - \"center\": center of the bbox\n - \"scale\": scale of the bbox\n - \"rotation\": rotation of the bbox\n - \"bbox_score\": score of bbox\n return_loss (bool): Option to `return loss`. `return loss=True`\n for training, `return loss=False` for validation & test.\n\n Returns:\n Return predicted 3D joints, SMPL parameters, boxes and image paths.\n \"\"\"\n\n if return_loss:\n return self.forward_train(img, img_metas, **kwargs)\n return self.forward_test(img, img_metas, **kwargs)\n\n def show_result(self, **kwargs):\n pass\n"
] | [
[
"numpy.array",
"numpy.zeros",
"torch.nn.Sequential",
"numpy.load",
"numpy.prod"
]
] |
MuliangDu-sudo/thermal-semantic-segmentation | [
"8a37af1cd0b5ca3f41eb9c2235157c9f727aed38"
] | [
"data/base_dataset.py"
] | [
"from abc import ABC\n\nfrom torch.utils import data\nimport torch\nfrom PIL import Image\nimport os\nimport numpy as np\nimport tqdm\nfrom typing import Sequence, Optional, Dict, Callable\n\n\nclass BaseDataset(data.Dataset):\n \"\"\"A generic Dataset class for domain adaptation in image segmentation\n\n Args:\n root (str): Root directory of dataset\n classes (seq[str]): The names of all the classes\n data_list_file (str): File to read the image list from.\n label_list_file (str): File to read the label list from.\n data_folder (str): Sub-directory of the image.\n label_folder (str): Sub-directory of the label.\n mean (seq[float]): mean BGR value. Normalize and convert to the image if not None. Default: None.\n id_to_train_id (dict, optional): the map between the id on the label and the actual train id.\n train_id_to_color (seq, optional): the map between the train id and the color.\n transforms (callable, optional): A function/transform that takes in (PIL Image, label) pair \\\n and returns a transformed version. E.g, :class:`~common.vision.transforms.segmentation.Resize`.\n\n .. note:: In ``data_list_file``, each line is the relative path of an image.\n If your data_list_file has different formats, please over-ride :meth:`~SegmentationList.parse_data_file`.\n ::\n source_dir/dog_xxx.png\n target_dir/dog_xxy.png\n\n In ``label_list_file``, each line is the relative path of an label.\n If your label_list_file has different formats, please over-ride :meth:`~SegmentationList.parse_label_file`.\n\n .. warning:: When mean is not None, please do not provide Normalize and ToTensor in transforms.\n\n \"\"\"\n def __init__(self, root: str, classes: Sequence[str], data_list_file: str, label_list_file: str,\n data_folder: str, label_folder: str,\n id_to_train_id: Optional[Dict] = None, train_id_to_color: Optional[Sequence] = None,\n transforms: Optional[Callable] = None, train_mode=True):\n self.root = root\n self.classes = classes\n self.data_list_file = data_list_file\n self.label_list_file = label_list_file\n self.data_folder = data_folder\n self.label_folder = label_folder\n self.ignore_label = 255\n self.id_to_train_id = id_to_train_id\n self.train_id_to_color = np.array(train_id_to_color)\n self.data_list = self.parse_data_file(self.data_list_file)\n self.label_list = self.parse_label_file(self.label_list_file)\n self.transforms = transforms\n self.train_mode = train_mode\n\n def parse_data_file(self, file_name):\n \"\"\"Parse file to image list\n\n Args:\n file_name (str): The path of data file\n\n Returns:\n List of image path\n \"\"\"\n with open(file_name, \"r\") as f:\n data_list = [line.strip() for line in f.readlines()]\n return data_list\n\n def parse_label_file(self, file_name):\n \"\"\"Parse file to label list\n\n Args:\n file_name (str): The path of data file\n\n Returns:\n List of label path\n \"\"\"\n with open(file_name, \"r\") as f:\n label_list = [line.strip() for line in f.readlines()]\n return label_list\n\n def __len__(self):\n return len(self.data_list)\n\n def __getitem__(self, index):\n image_name = self.data_list[index]\n image = Image.open(os.path.join(image_name)).convert('RGB') # 2048x1024\n input_dict = {}\n if self.train_mode:\n label_name = self.label_list[index]\n label = Image.open(os.path.join(label_name))\n input_dict['image'], label = self.transforms(image, label)\n # remap label\n if isinstance(label, torch.Tensor):\n label = label.numpy()\n label = np.asarray(label, np.int64)\n label_copy = self.ignore_label * np.ones(label.shape, dtype=np.int64)\n if self.id_to_train_id:\n for k, v in self.id_to_train_id.items():\n label_copy[label == k] = v\n input_dict['label'] = label_copy\n return input_dict\n\n if not self.train_mode:\n image = self.transforms(image)\n translation_name = image_name.replace(\"leftImg8bit\", \"translation\")\n\n return image, translation_name\n\n @property\n def num_classes(self) -> int:\n \"\"\"Number of classes\"\"\"\n return len(self.classes)\n\n def decode_target(self, target):\n \"\"\" Decode label (each value is integer) into the corresponding RGB value.\n\n Args:\n target (numpy.array): label in shape H x W\n\n Returns:\n RGB label (PIL Image) in shape H x W x 3\n \"\"\"\n target = target.copy()\n target[target == 255] = self.num_classes # unknown label is black on the RGB label\n target = self.train_id_to_color[target]\n return Image.fromarray(target.astype(np.uint8))\n\n def collect_image_paths(self):\n \"\"\"Return a list of the absolute path of all the images\"\"\"\n return [os.path.join(self.root, self.data_folder, image_name) for image_name in self.data_list]\n\n @staticmethod\n def _save_pil_image(image, path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n image.save(path)\n\n def translate(self, transform: Callable, target_root: str, color=False):\n \"\"\" Translate an image and save it into a specified directory\n\n Args:\n transform (callable): a transform function that maps (image, label) pair from one domain to another domain\n target_root (str): the root directory to save images and labels\n\n \"\"\"\n os.makedirs(target_root, exist_ok=True)\n for image_name, label_name in zip(tqdm.tqdm(self.data_list), self.label_list):\n image_path = os.path.join(target_root, self.data_folder, image_name)\n label_path = os.path.join(target_root, self.label_folder, label_name)\n if os.path.exists(image_path) and os.path.exists(label_path):\n continue\n image = Image.open(os.path.join(self.root, self.data_folder, image_name)).convert('RGB')\n label = Image.open(os.path.join(self.root, self.label_folder, label_name))\n\n translated_image, translated_label = transform(image, label)\n self._save_pil_image(translated_image, image_path)\n self._save_pil_image(translated_label, label_path)\n if color:\n colored_label = self.decode_target(np.array(translated_label))\n file_name, file_ext = os.path.splitext(label_name)\n self._save_pil_image(colored_label, os.path.join(target_root, self.label_folder,\n \"{}_color{}\".format(file_name, file_ext)))\n\n @property\n def evaluate_classes(self):\n \"\"\"The name of classes to be evaluated\"\"\"\n return self.classes\n\n @property\n def ignore_classes(self):\n \"\"\"The name of classes to be ignored\"\"\"\n return list(set(self.classes) - set(self.evaluate_classes))"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.asarray"
]
] |
jiahuei/test-caption-actions | [
"cbf68dc29a0fdafe92730bf4881319bcbd41eb7f"
] | [
"caption_vae/scripts/collect_captions_plus_OLD.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 28 Aug 2019 17:15:59\n\n@author: jiahuei\n\"\"\"\nfrom link_dirs import pjoin\nimport os\nimport json\nimport pickle\nimport math\nimport random\nimport textwrap\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fmng\nfrom PIL import Image, ImageEnhance, ImageOps, ImageFont, ImageDraw\n\n# Variables\nSORT_BY_METRIC = 'CIDEr'\nJUMP_TO_IDX = 4970\nVISUALISE_ATTENTION = True\nRADIX_SAMPLE_TIMESTEP = False\nRADIX_NUM_TOKENS = 2\nMODEL_NAMES = ['sps_80.0', 'sps_97.5']\nBASELINE_NAME = 'baseline'\nOUTPUT_DIR = '/home/jiahuei/Documents/1_TF_files/radix_v2/compiled_mscoco_test'\nIMAGE_DIR = '/master/datasets/mscoco/val2014'\n# OUTPUT_DIR = '/home/jiahuei/Documents/1_TF_files/radix_v2/compiled_insta_val'\n# IMAGE_DIR = '/master/datasets/insta/images'\nJSON_ROOT = '/home/jiahuei/Documents/1_TF_files/radix_v2'\n\nBASELINE_JSON = pjoin(\n JSON_ROOT,\n '/home/jiahuei/Documents/1_TF_files/radix_v2/mscoco_v2/word_w256_LSTM_r512_h1_none_cnnFT_SCST_b7C1.0B0.0/run_01___infer_test_b1_lp0.0___08-11_14-58/captions___113287.json'\n)\nMODEL_JSON = [\n pjoin(JSON_ROOT,\n '/home/jiahuei/Documents/1_TF_files/radix_v2/mscoco_v2/word_w256_LSTM_r512_h1_none_cnnFT_SCST_b7C1.0B0.0/run_01___infer_test_b1_lp0.0___08-11_14-58/captions___113287.json'),\n pjoin(JSON_ROOT,\n '/home/jiahuei/Documents/1_TF_files/radix_v2/mscoco_v2/word_w256_LSTM_r512_h1_none_cnnFT_SCST_b7C1.0B0.0/run_01___infer_test_b1_lp0.0___08-11_14-58/captions___113287.json')\n]\n\nBASELINE_SCORES_JSON = pjoin(\n JSON_ROOT,\n '/home/jiahuei/Documents/1_TF_files/radix_v2/mscoco_v2/word_w256_LSTM_r512_h1_none_cnnFT_SCST_b7C1.0B0.0/run_01___infer_test_b1_lp0.0___08-11_14-58/metric_scores_detailed_113287.json'\n)\nMODEL_SCORES_JSON = [\n pjoin(JSON_ROOT,\n '/home/jiahuei/Documents/1_TF_files/radix_v2/mscoco_v2/word_w256_LSTM_r512_h1_none_cnnFT_SCST_b7C1.0B0.0/run_01___infer_test_b1_lp0.0___08-11_14-58/metric_scores_detailed_113287.json'),\n pjoin(JSON_ROOT,\n '/home/jiahuei/Documents/1_TF_files/radix_v2/mscoco_v2/word_w256_LSTM_r512_h1_none_cnnFT_SCST_b7C1.0B0.0/run_01___infer_test_b1_lp0.0___08-11_14-58/metric_scores_detailed_113287.json')\n]\n\nSHORTLISTED_IMGS = ['COCO_val2014_000000346067.jpg']\n\n# Constants\nrandom.seed(3310)\nCATEGORIES = dict(\n x='both_wrong',\n y='both_correct',\n b='baseline_correct',\n m='model_correct',\n a='ambiguous',\n)\nMETRICS = ['CIDEr', 'Bleu_4', 'Bleu_3', 'Bleu_2', 'Bleu_1', 'ROUGE_L', 'METEOR']\nIMG_RESIZE = 512\nIMG_CROP = int(224 / 256 * IMG_RESIZE)\nDISPLAY_BG_SIZE = [int(IMG_RESIZE * 4.5), int(IMG_RESIZE * 3.0)]\nBORDER = int(DISPLAY_BG_SIZE[0] / 20)\nTEXT_SIZE = int(IMG_RESIZE / 7)\ntry:\n font = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', TEXT_SIZE)\nexcept OSError:\n FONT_LIST = [f for f in fmng.findSystemFonts(fontpaths=None, fontext='ttf')\n if 'mono' in os.path.basename(f).lower()]\n font = ImageFont.truetype(FONT_LIST[0], TEXT_SIZE)\n\n\ndef _img_id_to_name(img_id):\n if type(img_id) == str:\n # Insta-1.1M\n img_name = img_id\n else:\n img_name = 'COCO_val2014_{:012d}.jpg'.format(img_id)\n return img_name\n\n\ndef _load_caption_json(res_dict, json_path, name):\n with open(json_path, 'r') as ff:\n captions = json.load(ff)\n\n pickle_path = json_path.replace(\"captions___\", \"outputs___\").replace(\".json\", \".pkl\")\n if not os.path.isfile(pickle_path):\n data = None\n else:\n with open(pickle_path, \"rb\") as ff:\n data = pickle.load(ff)\n\n for c in captions:\n img_id = c['image_id']\n img_name = _img_id_to_name(img_id)\n if img_id not in res_dict:\n res_dict[img_id] = dict(image_id=img_id, image_name=img_name)\n res_dict[img_id][name] = dict(caption=c['caption'])\n res_dict[img_id][name]['attention'] = data['attention'] if data is not None else None\n\n\ndef _load_score_json(res_dict, json_path, name):\n with open(json_path, 'r') as ff:\n scores = json.load(ff)\n for sc in scores:\n img_id = sc['image_id']\n assert img_id in res_dict\n for m in METRICS:\n res_dict[img_id][name][m] = sc[m]\n\n\ndef _sort_captions(res_dict, sort_metric, sort_model, use_diff=False):\n \"\"\"\n Return a list of sorted captions.\n :param res_dict: id_to_results\n :param sort_metric: Metric used to sort. If `random`, return list with randomised order.\n :param sort_model: Model result used to sort.\n :param use_diff: If True, use the difference in score between model and baseline to sort.\n :return: A list of sorted captions.\n \"\"\"\n if isinstance(sort_model, list):\n assert len(sort_model) > 0\n else:\n sort_model = [sort_model]\n res = list(res_dict.values())\n if sort_metric in METRICS:\n def _get_model_mean(elem):\n sc_m = [elem[m][sort_metric] for m in sort_model]\n return sum(sc_m) / len(sc_m)\n\n if use_diff:\n def _key_fn(elem):\n sc_m = _get_model_mean(elem)\n sc_b = elem[BASELINE_NAME][sort_metric]\n return sc_m - sc_b\n else:\n def _key_fn(elem):\n return _get_model_mean(elem)\n res_sorted = sorted(res, key=_key_fn, reverse=True)\n elif sort_metric == 'random':\n res_sorted = random.shuffle(res)\n else:\n raise ValueError('`sort_metric` must be one of: {}'.format(METRICS + ['random']))\n return res_sorted\n\n\ndef _prepare_img(img_path):\n print(os.path.basename(img_path))\n img = Image.open(img_path)\n img = ImageEnhance.Brightness(img).enhance(1.10)\n img = ImageEnhance.Contrast(img).enhance(1.050)\n\n # Resize to 512 x 512 instead of 256 x 256\n # Crop to 448 x 448 instead of 224 x 224\n img = img.resize([IMG_RESIZE, IMG_RESIZE], Image.BILINEAR)\n img = ImageOps.crop(img, (IMG_RESIZE - IMG_CROP) / 2)\n return img\n\n\ndef _display_captions(captions_list, sort_metric):\n # Display captions\n print('')\n instructions = [\n '\"x\" if both are wrong',\n '\"y\" if both are correct',\n '\"b\" if baseline is correct',\n '\"m\" if model is correct',\n '\"a\" if ambiguous',\n '\"e\" to exit',\n 'other keys to skip.',\n '---\\n',\n ]\n instructions = '\\n'.join(instructions)\n global JUMP_TO_IDX\n if JUMP_TO_IDX < 0 or JUMP_TO_IDX >= len(captions_list):\n JUMP_TO_IDX = 0\n\n img_plot = None\n fig = plt.figure(figsize=(20, 10))\n for cap_idx, cap in enumerate(captions_list[JUMP_TO_IDX:]):\n if len(SHORTLISTED_IMGS) > 0 and not any(str(cap['image_id']) in _ for _ in SHORTLISTED_IMGS):\n # Skip if no partial match with any shortlisted images\n continue\n\n img = _prepare_img(pjoin(IMAGE_DIR, cap['image_name']))\n\n # Collect info\n base_score = cap[BASELINE_NAME][sort_metric]\n model_score = [cap[n][sort_metric] for n in MODEL_NAMES]\n base_cap = '{} ({:.2f}): {}'.format(\n BASELINE_NAME, base_score, cap[BASELINE_NAME]['caption'])\n model_cap = ['{} ({:.2f}): {}'.format(\n n, model_score[i], cap[n]['caption']) for i, n in enumerate(MODEL_NAMES)]\n\n # Visualise\n bg_big = Image.new('RGB', DISPLAY_BG_SIZE)\n bg_big.paste(img, (BORDER, int(BORDER * 1.5)))\n draw = ImageDraw.Draw(bg_big)\n draw.text(\n (BORDER, int(BORDER * 0.5)),\n '# {} / {}'.format(JUMP_TO_IDX + cap_idx + 1, len(captions_list)),\n font=font\n )\n\n # Draw captions\n texts_wrp = []\n for t in [base_cap] + model_cap:\n print(t)\n texts_wrp.append(textwrap.wrap(t, width=45))\n print('')\n offset = int(BORDER * 1.5)\n for text_group in texts_wrp:\n for text in text_group:\n draw.text((BORDER, IMG_RESIZE + offset), text, font=font)\n offset += int(TEXT_SIZE * 1.05)\n offset += TEXT_SIZE\n\n if img_plot is None:\n img_plot = plt.imshow(bg_big)\n else:\n img_plot.set_data(bg_big)\n plt.show(block=False)\n fig.canvas.draw()\n\n # Get key press\n # key_input = raw_input(instructions)\n key_input = input(instructions)\n fig.canvas.flush_events()\n\n if key_input == 'e':\n plt.close()\n break\n elif key_input in CATEGORIES:\n _save_captions(CATEGORIES[key_input], img, cap, bg_big, sort_metric)\n print('')\n\n\ndef _display_attention(captions_list, sort_metric, radix_sample_timestep):\n # Display captions\n print('')\n instructions = [\n '\"y\" to save',\n '\"r\" to repeat',\n '\"e\" to exit',\n 'other keys to skip.',\n '---\\n',\n ]\n instructions = '\\n'.join(instructions)\n global JUMP_TO_IDX\n if JUMP_TO_IDX < 0 or JUMP_TO_IDX >= len(captions_list):\n JUMP_TO_IDX = 0\n\n model_name = MODEL_NAMES[0]\n img_plot = None\n fig = plt.figure(figsize=(20, 10))\n for cap_idx, cap in enumerate(captions_list[JUMP_TO_IDX:]):\n if len(SHORTLISTED_IMGS) > 0 and not any(str(cap['image_id']) in _ for _ in SHORTLISTED_IMGS):\n # Skip if no partial match with any shortlisted images\n continue\n\n # Draw attention maps if available (only for the 1st model)\n att_dict = cap[MODEL_NAMES[0]]['attention']\n if att_dict is None:\n continue\n img = _prepare_img(pjoin(IMAGE_DIR, cap['image_name']))\n\n # Collect info\n model_cap = '{} ({:.2f}): {}'.format(\n model_name, cap[model_name][sort_metric], cap[model_name]['caption']\n )\n sent_len = len(cap[model_name]['caption'].split(' '))\n\n # Visualise\n bg_big = Image.new('RGB', [IMG_CROP * 6, IMG_CROP * 4])\n bg_big.paste(img, (BORDER, BORDER))\n draw = ImageDraw.Draw(bg_big)\n draw.text(\n (IMG_CROP + BORDER * 2, BORDER),\n '# {} / {}'.format(JUMP_TO_IDX + cap_idx + 1, len(captions_list)),\n font=font\n )\n text_group = textwrap.wrap(model_cap, width=45)\n print(model_cap + '\\n')\n for i, text in enumerate(text_group):\n draw.text((IMG_CROP + BORDER * 2, BORDER * 2 + int(TEXT_SIZE * 1.05) * (i + 1)), text, font=font)\n\n # assert isinstance(att_map_list, list)\n # atts = [_[img_name] for _ in att_map_list]\n # max_len = max(_.shape[1] for _ in atts) + 2\n # bgs = [Image.new('RGB', [IMG_CROP * 6, IMG_CROP * 4]) for _ in range(max_len)]\n # for i, att in enumerate(atts):\n\n assert isinstance(att_dict, dict)\n att = att_dict[cap['image_id']]\n hw = int(math.sqrt(att.shape[-1]))\n num_heads = att.shape[0]\n att = np.reshape(att, [num_heads, att.shape[1], hw, hw])\n ori_timesteps = att.shape[1]\n if radix_sample_timestep:\n att = att[:, ::RADIX_NUM_TOKENS, :, :]\n sampled_timesteps = att.shape[1]\n att = att[:, :sent_len, :]\n\n # Apply attention map\n bg = Image.new('RGB', [IMG_CROP, IMG_CROP])\n # border = int(IMG_CROP / 4)\n offset = IMG_CROP + BORDER\n att_comp = [bg_big.copy() for _ in range(att.shape[1] + 1)]\n\n all_comps = []\n for head in range(num_heads):\n maps = att[head, :, :, :]\n m_max = maps.max()\n # if m_max < 0.01:\n # maps *= (255.0 / m_max / 5)\n # else:\n # maps *= (255.0 / m_max)\n maps *= (255.0 / m_max)\n maps = maps.astype(np.uint8)\n\n comps = []\n for t, m in enumerate(maps):\n m = Image.fromarray(m)\n m = m.convert('L')\n m = m.resize([IMG_CROP, IMG_CROP], Image.BILINEAR)\n comp = Image.composite(img, bg, m)\n comp = ImageEnhance.Brightness(comp).enhance(2.0)\n comp = ImageEnhance.Contrast(comp).enhance(1.5)\n x = (head % 4) * offset + BORDER\n y = int(head / 4) * offset + BORDER * 2 + IMG_CROP\n att_comp[t].paste(comp, (x, y))\n comps.append(comp)\n all_comps.append(comps)\n\n key_input = 'r'\n while key_input == 'r':\n for comp in att_comp:\n if img_plot is None:\n img_plot = plt.imshow(comp)\n else:\n img_plot.set_data(comp)\n plt.show(block=False)\n fig.canvas.draw()\n plt.pause(.05)\n\n # Get key press\n # key_input = raw_input(instructions)\n key_input = input(instructions)\n fig.canvas.flush_events()\n\n if key_input == 'e':\n plt.close()\n break\n elif key_input == 'y':\n img_id = cap['image_id']\n score = score = '{:1.3f}'.format(cap[model_name][sort_metric]).replace('.', '-')\n if type(img_id) == str:\n output_dir = pjoin(OUTPUT_DIR, '{}_{}'.format(score, img_id))\n else:\n output_dir = pjoin(OUTPUT_DIR, '{}_{:012d}'.format(score, img_id))\n os.makedirs(output_dir, exist_ok=True)\n\n img.save(pjoin(output_dir, 'base.jpg'))\n footnote = [\n 'Num words (including <EOS>): {}'.format(sent_len + 1),\n 'Original attention time steps: {}'.format(ori_timesteps),\n 'Sampled time steps before truncation: {}'.format(sampled_timesteps),\n 'Sampled time steps after truncation: {}'.format(att.shape[1]),\n ]\n draw.text((BORDER, IMG_CROP + BORDER * 4), '\\n\\n'.join(footnote), font=font)\n bg_big.save(pjoin(output_dir, 'comp.jpg'))\n with open(pjoin(output_dir, 'caption.txt'), 'w') as f:\n f.write(cap[model_name]['caption'])\n for i, h in enumerate(all_comps):\n for j, t in enumerate(h):\n if radix_sample_timestep:\n j *= RADIX_NUM_TOKENS\n t.save(pjoin(output_dir, 'h{}_t{}.jpg'.format(i, j)))\n print('')\n\n\ndef _save_captions(caption_type, img, caption, composite, sort_metric):\n img_id = caption['image_id']\n base_score = caption[BASELINE_NAME][sort_metric]\n model_score = [caption[n][sort_metric] for n in MODEL_NAMES]\n base_out = '{} ({}): {}'.format(\n BASELINE_NAME, base_score, caption[BASELINE_NAME]['caption'])\n model_out = ['{} ({}): {}'.format(\n n, model_score[i], caption[n]['caption']) for i, n in enumerate(MODEL_NAMES)]\n # Save image\n score = '{:1.3f}'.format(model_score[-1]).replace('.', '-')\n type_short = {v: k for k, v in CATEGORIES.items()}\n if type(img_id) == str:\n img_out_name = '{}_{}_{}.jpg'.format(type_short[caption_type], score, img_id)\n else:\n img_out_name = '{}_{}_{:012d}.jpg'.format(type_short[caption_type], score, img_id)\n img.save(pjoin(OUTPUT_DIR, img_out_name))\n\n draw = ImageDraw.Draw(composite)\n offset = int(IMG_RESIZE - TEXT_SIZE) / 2\n draw.text((IMG_CROP + BORDER * 2, offset), img_out_name, font=font)\n draw.text((IMG_CROP + BORDER * 2, offset + TEXT_SIZE), 'Type: ' + caption_type, font=font)\n composite.save(pjoin(OUTPUT_DIR, 'comp_' + img_out_name))\n\n # Write captions\n out_str = '{}\\r\\n{}\\r\\n\\r\\n'.format(base_out, '\\r\\n'.join(model_out))\n with open(pjoin(OUTPUT_DIR, 'captions_{}.txt'.format(caption_type)), 'a') as f:\n f.write('{}\\r\\n{}'.format(img_out_name, out_str))\n\n # Write captions in LATEX format\n modcap = ' \\\\begin{{modcap}}\\n'\n modcap += ' {}\\n'\n modcap += ' \\\\end{{modcap}} \\\\\\\\\\n'\n out_str = [\n ' \\\\gph{{1.0}}{{resources/xxx/{}}} &'.format(img_out_name),\n ' \\\\begin{tabular}{M{\\\\linewidth}}',\n ' \\\\begin{basecap}',\n ' {}'.format(caption[BASELINE_NAME]['caption']),\n ' \\\\end{basecap} \\\\\\\\',\n ]\n for n in MODEL_NAMES:\n out_str += [modcap.format(caption[n]['caption'])]\n out_str += [\n ' \\\\end{tabular} &',\n ' ',\n ]\n\n with open(pjoin(OUTPUT_DIR, 'captions_latex_{}.txt'.format(caption_type)), 'a') as f:\n f.write('\\n'.join(out_str) + '\\n')\n\n\ndef main():\n if len(SHORTLISTED_IMGS) > 0:\n global JUMP_TO_IDX\n JUMP_TO_IDX = 0\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n id_to_results = {}\n\n config = dict(\n sort_by_metric=SORT_BY_METRIC,\n baseline_json=BASELINE_JSON,\n model_json=MODEL_JSON,\n )\n with open(pjoin(OUTPUT_DIR, 'config.json'), 'w') as f:\n json.dump(config, f)\n\n # Load captions\n for j, n in zip(MODEL_JSON, MODEL_NAMES):\n _load_caption_json(id_to_results, j, n)\n _load_caption_json(id_to_results, BASELINE_JSON, BASELINE_NAME)\n\n # Load scores\n for j, n in zip(MODEL_SCORES_JSON, MODEL_NAMES):\n _load_score_json(id_to_results, j, n)\n _load_score_json(id_to_results, BASELINE_SCORES_JSON, BASELINE_NAME)\n\n # Sort captions\n caption_list = _sort_captions(id_to_results,\n sort_metric=SORT_BY_METRIC,\n sort_model=MODEL_NAMES,\n use_diff=not VISUALISE_ATTENTION)\n if VISUALISE_ATTENTION:\n _display_attention(caption_list, SORT_BY_METRIC, RADIX_SAMPLE_TIMESTEP)\n else:\n _display_captions(caption_list, SORT_BY_METRIC)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.reshape",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"matplotlib.font_manager.findSystemFonts",
"matplotlib.pyplot.imshow"
]
] |
puneetjain-iclp/xgboost | [
"9d40789faa2361bb95c6532342fbaa3a9fd5ddf5"
] | [
"tests/python/test_sparse_dmatrix.py"
] | [
"import numpy as np\nimport xgboost as xgb\nfrom scipy.sparse import rand\n\nrng = np.random.RandomState(1)\n\nparam = {'max_depth': 3, 'objective': 'binary:logistic', 'verbosity': 0}\n\n\ndef test_sparse_dmatrix_csr():\n nrow = 100\n ncol = 1000\n x = rand(nrow, ncol, density=0.0005, format='csr', random_state=rng)\n assert x.indices.max() < ncol - 1\n x.data[:] = 1\n dtrain = xgb.DMatrix(x, label=np.random.binomial(1, 0.3, nrow))\n assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)\n watchlist = [(dtrain, 'train')]\n bst = xgb.train(param, dtrain, 5, watchlist)\n bst.predict(dtrain)\n\n\ndef test_sparse_dmatrix_csc():\n nrow = 1000\n ncol = 100\n x = rand(nrow, ncol, density=0.0005, format='csc', random_state=rng)\n assert x.indices.max() < nrow - 1\n x.data[:] = 1\n dtrain = xgb.DMatrix(x, label=np.random.binomial(1, 0.3, nrow))\n assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)\n watchlist = [(dtrain, 'train')]\n bst = xgb.train(param, dtrain, 5, watchlist)\n bst.predict(dtrain)\n"
] | [
[
"numpy.random.binomial",
"scipy.sparse.rand",
"numpy.random.RandomState"
]
] |
dscho15/LUDO_AE | [
"39cc60f3913353ecfe842319a60864ec60eeac82"
] | [
"mutator.py"
] | [
"import numpy as np\n\nclass mutator_uniform:\n\n def __init__(self, L, U):\n self.prob = 0.01\n self.L = L\n self.U = U\n\n def mutate(self, genes):\n \n # Indices \n indices_to_update = np.random.uniform(0, 1, genes.shape) > (1-self.prob)\n\n # Uniform mutation\n genes[indices_to_update] = np.random.uniform(self.L, self.U, genes.shape)[indices_to_update]\n\n # Ensure nobody is above or below 1\n genes[genes > 1] = self.U\n genes[genes < -1] = self.L"
] | [
[
"numpy.random.uniform"
]
] |
h4iku/bug-localization | [
"1d3150c4a7aa65eb0f84674938ca64d1462cc190"
] | [
"buglocalizer/semantic_similarity.py"
] | [
"import json\nimport pickle\n\nimport spacy\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom datasets import DATASET\n\n\ndef calculate_similarity(src_files, bug_reports):\n\n # Loading word vectors\n nlp = spacy.load('en_core_web_lg')\n\n src_docs = [nlp(' '.join(src.file_name['unstemmed'] + src.class_names['unstemmed']\n + src.attributes['unstemmed']\n + src.comments['unstemmed']\n + src.method_names['unstemmed']))\n for src in src_files.values()]\n\n min_max_scaler = MinMaxScaler()\n\n all_simis = []\n for report in bug_reports.values():\n report_doc = nlp(' '.join(report.summary['unstemmed']\n + report.pos_tagged_description['unstemmed']))\n scores = []\n for src_doc in src_docs:\n simi = report_doc.similarity(src_doc)\n scores.append(simi)\n\n scores = np.array([float(count)\n for count in scores]).reshape(-1, 1)\n normalized_scores = np.concatenate(\n min_max_scaler.fit_transform(scores)\n )\n\n all_simis.append(normalized_scores.tolist())\n\n return all_simis\n\n\ndef main():\n\n with open(DATASET.root / 'preprocessed_src.pickle', 'rb') as file:\n src_files = pickle.load(file)\n with open(DATASET.root / 'preprocessed_reports.pickle', 'rb') as file:\n bug_reports = pickle.load(file)\n\n all_simis = calculate_similarity(src_files, bug_reports)\n\n with open(DATASET.root / 'semantic_similarity.json', 'w') as file:\n json.dump(all_simis, file)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"sklearn.preprocessing.MinMaxScaler"
]
] |
akulaarora/pre-training | [
"474d549aa402b4cdd5e3629d23d035c31b60a360"
] | [
"uncertainty/utils/calibration_tools.py"
] | [
"import numpy as np\n\n\ndef calib_err(confidence, correct, p='2', beta=100):\n # beta is target bin size\n idxs = np.argsort(confidence)\n confidence = confidence[idxs]\n correct = correct[idxs]\n bins = [[i * beta, (i + 1) * beta] for i in range(len(confidence) // beta)]\n bins[-1] = [bins[-1][0], len(confidence)]\n\n cerr = 0\n total_examples = len(confidence)\n for i in range(len(bins) - 1):\n bin_confidence = confidence[bins[i][0]:bins[i][1]]\n bin_correct = correct[bins[i][0]:bins[i][1]]\n num_examples_in_bin = len(bin_confidence)\n\n if num_examples_in_bin > 0:\n difference = np.abs(np.nanmean(bin_confidence) - np.nanmean(bin_correct))\n\n if p == '2':\n cerr += num_examples_in_bin / total_examples * np.square(difference)\n elif p == '1':\n cerr += num_examples_in_bin / total_examples * difference\n elif p == 'infty' or p == 'infinity' or p == 'max':\n cerr = np.maximum(cerr, difference)\n else:\n assert False, \"p must be '1', '2', or 'infty'\"\n\n if p == '2':\n cerr = np.sqrt(cerr)\n\n return cerr\n\n\ndef soft_f1(confidence, correct):\n wrong = 1 - correct\n\n # # the incorrectly classified samples are our interest\n # # so they make the positive class\n # tp_soft = np.sum((1 - confidence) * wrong)\n # fp_soft = np.sum((1 - confidence) * correct)\n # fn_soft = np.sum(confidence * wrong)\n\n # return 2 * tp_soft / (2 * tp_soft + fn_soft + fp_soft)\n return 2 * ((1 - confidence) * wrong).sum()/(1 - confidence + wrong).sum()\n\n # if True:\n # import torch\n # import torch.nn.functional as F\n #\n # best_loss = float('inf')\n # t = lower\n # logits = torch.FloatTensor(logits)\n # labels = torch.LongTensor(labels)\n # for temp in np.arange(lower, upper, 0.001):\n # loss = float(F.cross_entropy(logits / temp, labels))\n # if loss < best_loss:\n # best_loss = loss\n # t = temp\n\n\ndef tune_temp(logits, labels, binary_search=True, lower=0.2, upper=5.0, eps=0.0001):\n logits = np.array(logits)\n\n if binary_search:\n import torch\n import torch.nn.functional as F\n\n logits = torch.FloatTensor(logits)\n labels = torch.LongTensor(labels)\n t_guess = torch.FloatTensor([0.5*(lower + upper)]).requires_grad_()\n\n while upper - lower > eps:\n if torch.autograd.grad(F.cross_entropy(logits / t_guess, labels), t_guess)[0] > 0:\n upper = 0.5 * (lower + upper)\n else:\n lower = 0.5 * (lower + upper)\n t_guess = t_guess * 0 + 0.5 * (lower + upper)\n\n t = min([lower, 0.5 * (lower + upper), upper], key=lambda x: float(F.cross_entropy(logits / x, labels)))\n else:\n import cvxpy as cx\n\n set_size = np.array(logits).shape[0]\n\n t = cx.Variable()\n\n expr = sum((cx.Minimize(cx.log_sum_exp(logits[i, :] * t) - logits[i, labels[i]] * t)\n for i in range(set_size)))\n p = cx.Problem(expr, [lower <= t, t <= upper])\n\n p.solve() # p.solve(solver=cx.SCS)\n t = 1 / t.value\n\n return t\n\n\ndef get_measures(confidence, correct):\n rms = calib_err(confidence, correct, p='2')\n mad = calib_err(confidence, correct, p='1')\n sf1 = soft_f1(confidence, correct)\n\n return rms, mad, sf1\n\n\ndef print_measures(rms, mad, sf1, method_name='Baseline'):\n print('\\t\\t\\t\\t\\t\\t\\t' + method_name)\n print('RMS Calib Error (%): \\t\\t{:.2f}'.format(100 * rms))\n print('MAD Calib Error (%): \\t\\t{:.2f}'.format(100 * mad))\n print('Soft F1 Score (%): \\t\\t{:.2f}'.format(100 * sf1))\n\n\ndef print_measures_with_std(rmss, mads, sf1s, method_name='Baseline'):\n print('\\t\\t\\t\\t\\t\\t\\t' + method_name)\n print('RMS Calib Error (%): \\t\\t{:.2f}\\t+/- {:.2f}'.format(100 * np.mean(rmss), 100 * np.std(rmss)))\n print('MAD Calib Error (%): \\t\\t{:.2f}\\t+/- {:.2f}'.format(100 * np.mean(mads), 100 * np.std(mads)))\n print('Soft F1 Score (%): \\t\\t{:.2f}\\t+/- {:.2f}'.format(100 * np.mean(sf1s), 100 * np.std(sf1s)))\n\n\ndef show_calibration_results(confidence, correct, method_name='Baseline'):\n\n print('\\t\\t\\t\\t' + method_name)\n print('RMS Calib Error (%): \\t\\t{:.2f}'.format(\n 100 * calib_err(confidence, correct, p='2')))\n\n print('MAD Calib Error (%): \\t\\t{:.2f}'.format(\n 100 * calib_err(confidence, correct, p='1')))\n\n # print('Max Calib Error (%): \\t\\t{:.2f}'.format(\n # 100 * calib_err(confidence, correct, p='infty')))\n\n print('Soft F1-Score (%): \\t\\t{:.2f}'.format(\n 100 * soft_f1(confidence, correct)))\n\n # add error detection measures?\n"
] | [
[
"numpy.square",
"numpy.array",
"torch.FloatTensor",
"numpy.mean",
"numpy.std",
"numpy.nanmean",
"torch.nn.functional.cross_entropy",
"torch.LongTensor",
"numpy.sqrt",
"numpy.argsort",
"numpy.maximum"
]
] |
yanghr/vision | [
"bb8e9ddd60d4682044017e889fa073664eba77ed"
] | [
"references/classification/utils.py"
] | [
"from __future__ import print_function\nfrom collections import defaultdict, deque\nimport datetime\nimport time\nimport torch\nimport torch.distributed as dist\n\nimport errno\nimport os\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({global_avg:.4f})\"\n self.deque = deque(maxlen=window_size)\n self.total = 0.0\n self.count = 0\n self.fmt = fmt\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n def synchronize_between_processes(self):\n \"\"\"\n Warning: does not synchronize the deque!\n \"\"\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.count = int(t[0])\n self.total = t[1]\n\n @property\n def median(self):\n d = torch.tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = torch.tensor(list(self.deque), dtype=torch.float32)\n return d.mean().item()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n @property\n def max(self):\n return max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n def __str__(self):\n return self.fmt.format(\n median=self.median,\n avg=self.avg,\n global_avg=self.global_avg,\n max=self.max,\n value=self.value)\n\n\nclass MetricLogger(object):\n def __init__(self, delimiter=\"\\t\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n type(self).__name__, attr))\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(\n \"{}: {}\".format(name, str(meter))\n )\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def log_every(self, iterable, print_freq, header=None):\n i = 0\n if not header:\n header = ''\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt='{avg:.4f}')\n data_time = SmoothedValue(fmt='{avg:.4f}')\n space_fmt = ':' + str(len(str(len(iterable)))) + 'd'\n log_msg = self.delimiter.join([\n header,\n '[{0' + space_fmt + '}/{1}]',\n 'eta: {eta}',\n '{meters}',\n 'time: {time}',\n 'data: {data}',\n 'max mem: {memory:.0f}'\n ])\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0:\n eta_seconds = iter_time.global_avg * (len(iterable) - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n print(log_msg.format(\n i, len(iterable), eta=eta_string,\n meters=str(self),\n time=str(iter_time), data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB))\n i += 1\n end = time.time()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('{} Total time: {}'.format(header, total_time_str))\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target[None])\n\n res = []\n for k in topk:\n correct_k = correct[:k].flatten().sum(dtype=torch.float32)\n res.append(correct_k * (100.0 / batch_size))\n return res\n\n\ndef mkdir(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef setup_for_distributed(is_master):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as __builtin__\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop('force', False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()\n\n\ndef get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()\n\n\ndef is_main_process():\n return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n if is_main_process():\n torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n if 'SLURM_PROCID' in os.environ:\n args.rank = int(os.environ['SLURM_PROCID'])\n args.gpu = args.rank % torch.cuda.device_count()\n elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ['WORLD_SIZE'])\n args.gpu = int(os.environ['LOCAL_RANK'])\n else:\n print('Not using distributed mode')\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = 'nccl'\n print('| distributed init (rank {}): {}'.format(\n args.rank, args.dist_url), flush=True)\n torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n setup_for_distributed(args.rank == 0)\n"
] | [
[
"torch.distributed.get_world_size",
"torch.distributed.is_available",
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.save",
"torch.cuda.max_memory_allocated",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.distributed.is_initialized",
"torch.tensor",
"torch.distributed.all_reduce",
"torch.distributed.get_rank",
"torch.distributed.barrier"
]
] |
yudasong/Reinforcement-Learning-Branch-and-Bound | [
"052a64425ca969f421a079dc62049fb68b4957c5"
] | [
"REINFORCE/BB.py"
] | [
"import numpy as np\nimport pyibex as pi\n\n\nclass BB():\n \"\"\"\n This class specifies the base Game class. To define your own game, subclass\n this class and implement the functions below. This works when the game is\n two-player, adversarial and turn-based.\n\n Use 1 for player1 and -1 for player2.\n\n See othello/OthelloGame.py for an example implementation.\n \"\"\"\n def __init__(self, function, input_box, output_range):\n self.function = function\n self.input_box = input_box\n self.output_range = output_range\n self.contractor = pi.CtcFwdBwd(self.function, self.output_range)\n\n # size of representation for each variable\n self.embedding_size = 3\n\n def getRoot(self):\n \"\"\"\n Returns:\n : a representation of the board (ideally this is the form\n that will be the input to your neural network)\n \"\"\"\n\n return getBoardFromInput_box(self.input_box)\n\n\n\n def getBoardFromInput_box(self, currentInput_box):\n\n\n shape = self.getBoardSize()\n\n embedding = np.zeros(shape)\n\n for i in range(shape[0]):\n lower = currentInput_box[i][0]\n upper = currentInput_box[i][1]\n middle = float((lower + upper) / 2)\n\n embedding[i,0] = lower\n embedding[i,1] = middle\n embedding[i,2] = upper\n\n return embedding\n\n def getBoardSize(self):\n \"\"\"\n Returns:\n (x,y): a tuple of board dimensions\n \"\"\"\n return len(self.input_box),self.embedding_size\n\n def getActionSize(self):\n \"\"\"\n Returns:\n actionSize: number of all possible actions\n \"\"\"\n return 2**len(self.input_box)\n\n def getNextState(self, currentInput_box, action):\n \"\"\"\n Input:\n state: current state\n action: action taken by current player\n\n Returns:\n nextBoard: board after applying action\n \"\"\"\n var_index = int(action / 2)\n direction = action % 2\n\n # split the interval of the selected variable ([(),()],[(),()])\n new_boxes = currentInput_box.bisect(var_index, 0.5)\n\n # choose go to half of the interval\n currentInput_box = new_boxes[direction]\n\n self.contractor.contract(currentInput_box)\n\n #TODO: return the STATE\n\n return currentInput_box\n\n\n\n def getValidMoves(self,currentInput_box, threshold):\n \"\"\"\n Input:\n board: current board\n player: current player\n\n Returns:\n validMoves: a binary vector of length self.getActionSize(), 1 for\n moves that are valid from the current board and player,\n 0 for invalid moves\n \"\"\"\n\n mask = np.zeros(self.getActionSize())\n\n for i in range(len(currentInput_box)):\n if currentInput_box[i].diam() > threshold:\n mask[2*i] = 1\n mask[2*i+1] = 1\n\n return mask\n\n\n def getGameEnded(self,currentInput_box, threshold):\n \"\"\"\n Input:\n board: current board\n player: current player (1 or -1)\n\n Returns:\n r: 0 if game has not ended. 1 if player won, -1 if player lost,\n small non-zero value for draw.\n\n \"\"\"\n\n\n # TODO: situation for empty box\n if currentInput_box.is_empty():\n return -5\n\n\n if 1 not in self.getValidMoves(currentInput_box, threshold):\n currentValue = [[currentInput_box[i].diam()/2 + currentInput_box[i][0],currentInput_box[i].diam()/2 + currentInput_box[i][0]] for i in range(len(currentInput_box))]\n #print(pi.IntervalVector(currentValue)[0])\n #print(self.function.eval(pi.IntervalVector(currentValue))[0])\n return 1 - np.abs(self.function.eval(pi.IntervalVector(currentValue))[0])\n else:\n return 0\n\n\n\n def stringRepresentation(self, currentInput_box):\n \"\"\"\n Input:\n board: current board\n\n Returns:\n boardString: a quick conversion of board to a string format.\n Required by MCTS for hashing.\n \"\"\"\n string = ''.join(str(x) for x in currentInput_box)\n return string\n"
] | [
[
"numpy.zeros"
]
] |
maskot1977/scikitallstar | [
"ff376cc73d2c9448f97eb4bac99eb68a7f138aaf"
] | [
"scikitallstars/depict.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import (auc, confusion_matrix, precision_recall_curve,\n r2_score, roc_curve)\n\n\ndef best_scores(allstars_model):\n keys = list(allstars_model.best_scores.keys())\n values = allstars_model.best_scores.values()\n plt.figure(figsize=(6, int(len(keys) / 3)))\n plt.title(\"Best scores\")\n plt.barh(keys, values)\n plt.grid()\n plt.show()\n\n\ndef training_summary(objective):\n fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16, 8))\n\n names = [n for n in reversed(list(objective.get_model_names()))]\n\n score_means = []\n score_stds = []\n second_means = []\n second_stds = []\n selected = []\n sum_second = []\n for name in names:\n score_means.append(np.array(objective.scores[name]).mean())\n score_stds.append(np.array(objective.scores[name]).std())\n second_means.append(np.array(objective.times[name]).mean())\n second_stds.append(np.array(objective.times[name]).std())\n selected.append(len(objective.times[name]))\n sum_second.append(sum(objective.times[name]))\n\n axes[0].barh(names, score_means, xerr=score_stds)\n axes[0].set_xlabel(\"score\")\n axes[0].set_xlim([0.0, 1.0])\n axes[0].grid()\n axes[1].barh(names, selected)\n axes[1].set_xlabel(\"selected (times)\")\n axes[1].grid()\n axes[1].yaxis.set_visible(False)\n axes[2].barh(names, second_means, xerr=second_stds)\n axes[2].set_xlabel(\"calculation time (seconds)\")\n axes[2].grid()\n axes[2].yaxis.set_visible(False)\n axes[3].barh(names, sum_second)\n axes[3].set_xlabel(\"total calculation time (seconds)\")\n axes[3].grid()\n axes[3].yaxis.set_visible(False)\n plt.show()\n\n\ndef feature_importances(allstars_model):\n barh_dict = {}\n for key, value in zip(\n list(allstars_model.x_train.iloc[:, allstars_model.support].columns),\n allstars_model.best_models[\"RandomForest\"].model.feature_importances_,\n ):\n barh_dict[key] = value\n\n keys = list(barh_dict.keys())\n values = barh_dict.values()\n\n plt.figure(figsize=(6, int(len(keys) / 3) + 1))\n plt.title(\"Feature importances in RF\")\n plt.barh(keys, values)\n plt.grid()\n plt.show()\n\n\ndef model_importances(stacking_model):\n plt.title(\"Model importances in stacking\")\n plt.barh(\n list(stacking_model.best_model.named_estimators_.keys()),\n stacking_model.best_model.final_estimator_.feature_importances_,\n )\n plt.grid()\n plt.show()\n\n\ndef metrics(model, X_train, y_train, X_test=None, y_test=None):\n X_train = pd.DataFrame(X_train)\n if type(y_train) is not pd.core.series.Series:\n y_train = pd.DataFrame(y_train)[0]\n if X_test is not None:\n X_test = pd.DataFrame(X_test)\n if y_test is not None:\n y_test = pd.DataFrame(y_test)\n if hasattr(model, \"is_regressor\"):\n if model.is_regressor:\n regression_metrics(model, X_train, y_train, X_test, y_test)\n else:\n classification_metrics(model, X_train, y_train, X_test, y_test)\n elif hasattr(model, \"predict_proba\") or hasattr(model, \"decision_function\"):\n classification_metrics(model, X_train, y_train, X_test, y_test)\n else:\n regression_metrics(model, X_train, y_train, X_test, y_test)\n\n\ndef regression_metrics(model, X_train, y_train, X_test=None, y_test=None):\n\n if X_test is None:\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 4))\n ax = axes\n else:\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))\n ax = axes[0]\n\n y_pred = model.predict(X_train)\n score = model.score(X_train, y_train)\n y_min = min(y_train.min(), y_pred.min())\n y_max = min(y_train.max(), y_pred.max())\n\n ax.set_title(\"Training data\")\n ax.scatter(y_train, y_pred, alpha=0.5)\n ax.plot([y_min, y_max], [y_min, y_max])\n ax.text(\n y_max - 0.3,\n y_min + 0.3,\n (\"%.3f\" % score).lstrip(\"0\"),\n size=15,\n horizontalalignment=\"right\",\n )\n ax.set_xlabel(\"Real\")\n ax.set_ylabel(\"Predicted\")\n\n if X_test is not None:\n y_pred = model.predict(X_test)\n score = model.score(X_test, y_test)\n # y_min = min(y_test.ravel()min(), y_pred.min())\n # y_max = min(y_test.max(), y_pred.max())\n\n axes[1].set_title(\"Test data\")\n axes[1].scatter(y_test, y_pred, alpha=0.5)\n axes[1].plot([y_min, y_max], [y_min, y_max])\n axes[1].text(\n y_max - 0.3,\n y_min + 0.3,\n (\"%.3f\" % score).lstrip(\"0\"),\n size=15,\n horizontalalignment=\"right\",\n )\n axes[1].set_xlabel(\"Real\")\n axes[1].set_ylabel(\"Predicted\")\n plt.show()\n\n\ndef classification_metrics(model, X_train, y_train, X_test, y_test):\n if model.support is not None:\n X_train = X_train.iloc[:, model.support]\n X_test = X_test.iloc[:, model.support]\n if hasattr(model, \"best_model\"):\n if hasattr(model.best_model, \"model\"):\n model = model.best_model.model\n else:\n model = model.best_model\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(4 * 2, 4 * 3))\n i = 0\n for XX, YY, name in [\n [X_train, y_train, \"Training data\"],\n [X_test, y_test, \"Test data\"],\n ]:\n if hasattr(model, \"predict_proba\"):\n probas = model.predict_proba(XX)\n elif hasattr(model, \"decision_function\"):\n probas = np.array([[x, x] for x in model.decision_function(XX)])\n else:\n probas = np.array([[x, x] for x in model.model.decision_function(XX)])\n\n fpr, tpr, thresholds = roc_curve(YY, probas[:, 1])\n roc_auc = auc(fpr, tpr)\n precision, recall, thresholds = precision_recall_curve(YY, probas[:, 1])\n area = auc(recall, precision)\n\n matrix = confusion_matrix(model.predict(XX), YY)\n TN = matrix[0][0]\n FP = matrix[1][0]\n FN = matrix[0][1]\n TP = matrix[1][1]\n\n data = [TP, FN, FP, TN]\n axes[0][i].set_title(name)\n axes[0][i].pie(\n data,\n counterclock=False,\n startangle=90,\n # autopct=lambda x: \"{}\".format(int(x * sum(data) / 100)),\n autopct=lambda x: \"{}\".format(x),\n labels=[\"TP\", \"FN\", \"FP\", \"TN\"],\n wedgeprops=dict(width=1, edgecolor=\"w\"),\n colors=[\"skyblue\", \"orange\", \"tan\", \"lime\"],\n )\n axes[0][i].text(\n 1.0 - 0.5,\n 0.0 + 0.7,\n (\"%.3f\" % ((TN + TP) / (TN + TP + FN + FP))).lstrip(\"0\"),\n size=20,\n horizontalalignment=\"right\",\n )\n axes[1][i].plot([0, 1], [0, 1])\n axes[1][i].plot(fpr, tpr, label=\"ROC curve (area = %0.2f)\" % roc_auc)\n axes[1][i].fill_between(fpr, tpr, alpha=0.5)\n axes[1][i].set_xlim([0.0, 1.0])\n axes[1][i].set_ylim([0.0, 1.0])\n axes[1][i].set_xlabel(\"False Positive Rate\")\n if i == 0:\n axes[1][i].set_ylabel(\"True Positive Rate\")\n axes[1][i].text(\n 1.0 - 0.3,\n 0.0 + 0.3,\n (\"%.3f\" % roc_auc).lstrip(\"0\"),\n size=20,\n horizontalalignment=\"right\",\n )\n axes[2][i].plot(recall, precision, label=\"Precision-Recall curve\")\n axes[2][i].fill_between(recall, precision, alpha=0.5)\n axes[2][i].set_xlabel(\"Recall\")\n if i == 0:\n axes[2][i].set_ylabel(\"Precision\")\n axes[2][i].set_xlim([0.0, 1.0])\n axes[2][i].set_ylim([0.0, 1.0])\n axes[2][i].text(\n 1.0 - 0.3,\n 0.0 + 0.3,\n (\"%.3f\" % area).lstrip(\"0\"),\n size=20,\n horizontalalignment=\"right\",\n )\n i += 1\n plt.show()\n\n\ndef all_classification_metrics(objective, X_test, y_test):\n fig, axes = plt.subplots(\n nrows=3,\n ncols=len(objective.best_models.keys()),\n figsize=(4 * len(objective.best_models.keys()), 4 * 3),\n )\n i = 0\n for name in objective.best_models.keys():\n model = objective.best_models[name]\n if hasattr(model.model, \"predict_proba\"):\n probas = model.predict_proba(X_test.iloc[:, objective.support])\n else:\n probas = np.array(\n [\n [x, x]\n for x in model.model.decision_function(\n X_test.iloc[:, objective.support]\n )\n ]\n )\n\n fpr, tpr, thresholds = roc_curve(y_test, probas[:, 1])\n roc_auc = auc(fpr, tpr)\n precision, recall, thresholds = precision_recall_curve(y_test, probas[:, 1])\n area = auc(recall, precision)\n matrix = confusion_matrix(\n model.predict(X_test.iloc[:, objective.support]), y_test\n )\n TN = matrix[0][0]\n FP = matrix[1][0]\n FN = matrix[0][1]\n TP = matrix[1][1]\n data = [TP, FN, FP, TN]\n axes[0][i].set_title(name)\n axes[0][i].pie(\n data,\n counterclock=False,\n startangle=90,\n autopct=lambda x: \"{}\".format(int(x * sum(data) / 100)),\n labels=[\"TP\", \"FN\", \"FP\", \"TN\"],\n wedgeprops=dict(width=1, edgecolor=\"w\"),\n colors=[\"skyblue\", \"orange\", \"tan\", \"lime\"],\n )\n axes[0][i].text(\n 1.0 - 0.5,\n 0.0 + 0.7,\n (\"%.3f\" % ((TN + TP) / (TN + TP + FN + FP))).lstrip(\"0\"),\n size=20,\n horizontalalignment=\"right\",\n )\n axes[1][i].plot([0, 1], [0, 1])\n axes[1][i].plot(fpr, tpr, label=\"ROC curve (area = %0.2f)\" % roc_auc)\n axes[1][i].fill_between(fpr, tpr, alpha=0.5)\n axes[1][i].set_xlim([0.0, 1.0])\n axes[1][i].set_ylim([0.0, 1.0])\n axes[1][i].set_xlabel(\"False Positive Rate\")\n if i == 0:\n axes[1][i].set_ylabel(\"True Positive Rate\")\n axes[1][i].text(\n 1.0 - 0.3,\n 0.0 + 0.3,\n (\"%.3f\" % roc_auc).lstrip(\"0\"),\n size=20,\n horizontalalignment=\"right\",\n )\n axes[2][i].plot(recall, precision, label=\"Precision-Recall curve\")\n axes[2][i].fill_between(recall, precision, alpha=0.5)\n axes[2][i].set_xlabel(\"Recall\")\n if i == 0:\n axes[2][i].set_ylabel(\"Precision\")\n axes[2][i].set_xlim([0.0, 1.0])\n axes[2][i].set_ylim([0.0, 1.0])\n axes[2][i].text(\n 1.0 - 0.3,\n 0.0 + 0.3,\n (\"%.3f\" % area).lstrip(\"0\"),\n size=20,\n horizontalalignment=\"right\",\n )\n i += 1\n plt.show()\n\n\ndef all_regression_metrics(objective, X_test, y_test):\n fig, axes = plt.subplots(\n nrows=1,\n ncols=len(objective.best_models.keys()),\n figsize=(4 * len(objective.best_models.keys()), 4),\n )\n i = 0\n for name in objective.best_models.keys():\n y_pred = objective.best_models[name].predict(X_test, support=objective.support)\n score = r2_score(np.array(y_pred).ravel(), np.array(y_test).ravel())\n axes[i].set_title(name)\n axes[i].scatter(y_test, y_pred, alpha=0.5)\n y_min = min(y_test.min(), y_pred.min())\n y_max = min(y_test.max(), y_pred.max())\n axes[i].plot([y_min, y_max], [y_min, y_max])\n axes[i].text(\n y_max - 0.3,\n y_min + 0.3,\n (\"%.3f\" % score).lstrip(\"0\"),\n size=15,\n horizontalalignment=\"right\",\n )\n axes[i].set_xlabel(\"Real\")\n if i == 0:\n axes[i].set_ylabel(\"Predicted\")\n i += 1\n plt.show()\n\n\ndef all_metrics(objective, X_test, y_test):\n X_test = pd.DataFrame(X_test)\n if type(y_test) is not pd.core.series.Series:\n y_test = pd.DataFrame(y_test)[0]\n if objective.is_regressor:\n all_regression_metrics(objective, X_test, y_test)\n else:\n all_classification_metrics(objective, X_test, y_test)\n"
] | [
[
"numpy.array",
"sklearn.metrics.precision_recall_curve",
"matplotlib.pyplot.grid",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.barh",
"sklearn.metrics.auc",
"matplotlib.pyplot.show",
"sklearn.metrics.roc_curve"
]
] |
rbeauchemin/SUOD | [
"560b49a2aff4dea0443895c69e2bc6064ba4aed0"
] | [
"examples/demo_base.py"
] | [
"import os\nimport sys\n\nimport scipy as sp\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\nfrom pyod.models.iforest import IForest\nfrom pyod.models.lof import LOF\nfrom pyod.utils.data import evaluate_print\n\nfrom combo.models.score_comb import majority_vote, maximization, average\n\n# suppress warnings\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n# temporary solution for relative imports in case combo is not installed\n# if combo is installed, no need to use the following line\nsys.path.append(\n os.path.abspath(os.path.join(os.path.dirname(\"__file__\"), '..')))\n\nfrom suod.models.base import SUOD\nfrom suod.utils.utility import get_estimators_small\n\nif __name__ == \"__main__\":\n # load files\n mat_file_list = [\n 'cardio.mat',\n # 'satellite.mat',\n # 'satimage-2.mat',\n # 'mnist.mat',\n ]\n\n mat_file = mat_file_list[0]\n mat_file_name = mat_file.replace('.mat', '')\n print(\"\\n... Processing\", mat_file_name, '...')\n mat = sp.io.loadmat(os.path.join('', 'datasets', mat_file))\n\n X = mat['X']\n y = mat['y']\n\n # standardize data to be digestible for most algorithms\n X = StandardScaler().fit_transform(X)\n\n X_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=0.4, random_state=42)\n\n contamination = y.sum() / len(y)\n base_estimators = get_estimators_small(contamination)\n\n model = SUOD(base_estimators=base_estimators, n_jobs=6, bps_flag=True,\n contamination=contamination, approx_flag_global=True)\n\n model.fit(X_train) # fit all models with X\n model.approximate(X_train) # conduct model approximation if it is enabled\n predicted_labels = model.predict(X_test) # predict labels\n predicted_scores = model.decision_function(X_test) # predict scores\n predicted_probs = model.predict_proba(X_test) # predict scores\n\n ###########################################################################\n # compared with other approaches\n evaluate_print('majority vote', y_test, majority_vote(predicted_labels))\n evaluate_print('average', y_test, average(predicted_scores))\n evaluate_print('maximization', y_test, maximization(predicted_scores))\n\n clf = LOF()\n clf.fit(X_train)\n evaluate_print('LOF', y_test, clf.decision_function(X_test))\n\n clf = IForest()\n clf.fit(X_train)\n evaluate_print('IForest', y_test, clf.decision_function(X_test))\n"
] | [
[
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.StandardScaler"
]
] |
kiminh/cmaes | [
"0e7847b555b350324e2873af3d53c99bf4f38680"
] | [
"cmaes/_sepcma.py"
] | [
"import math\nimport sys\nimport numpy as np\n\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\n\nclass SepCMA:\n \"\"\"Separable CMA-ES stochastic optimizer class with ask-and-tell interface.\n\n Example:\n\n .. code::\n\n import numpy as np\n from cmaes import SepCMA\n\n def quadratic(x1, x2):\n return (x1 - 3) ** 2 + (10 * (x2 + 2)) ** 2\n\n optimizer = SepCMA(mean=np.zeros(2), sigma=1.3)\n\n for generation in range(50):\n solutions = []\n for _ in range(optimizer.population_size):\n # Ask a parameter\n x = optimizer.ask()\n value = quadratic(x[0], x[1])\n solutions.append((x, value))\n print(f\"#{generation} {value} (x1={x[0]}, x2 = {x[1]})\")\n\n # Tell evaluation values.\n optimizer.tell(solutions)\n\n Args:\n\n mean:\n Initial mean vector of multi-variate gaussian distributions.\n\n sigma:\n Initial standard deviation of covariance matrix.\n\n bounds:\n Lower and upper domain boundaries for each parameter (optional).\n\n n_max_resampling:\n A maximum number of resampling parameters (default: 100).\n If all sampled parameters are infeasible, the last sampled one\n will be clipped with lower and upper bounds.\n\n seed:\n A seed number (optional).\n\n population_size:\n A population size (optional).\n \"\"\"\n\n def __init__(\n self,\n mean: np.ndarray,\n sigma: float,\n bounds: Optional[np.ndarray] = None,\n n_max_resampling: int = 100,\n seed: Optional[int] = None,\n population_size: Optional[int] = None,\n ):\n assert sigma > 0, \"sigma must be non-zero positive value\"\n\n n_dim = len(mean)\n assert n_dim > 1, \"The dimension of mean must be larger than 1\"\n\n if population_size is None:\n population_size = 4 + math.floor(3 * math.log(n_dim)) # (eq. 48)\n assert population_size > 0, \"popsize must be non-zero positive value.\"\n\n mu = population_size // 2\n\n # (eq.49)\n weights_prime = np.array(\n [math.log(mu + 1) - math.log(i + 1) for i in range(mu)]\n )\n weights = weights_prime / sum(weights_prime)\n mu_eff = 1 / sum(weights ** 2)\n\n # learning rate for the rank-one update\n alpha_cov = 2\n c1 = alpha_cov / ((n_dim + 1.3) ** 2 + mu_eff)\n # learning rate for the rank-μ update\n cmu_full = 2 / mu_eff / ((n_dim + np.sqrt(2)) ** 2) + (1 - 1 / mu_eff) * min(\n 1, (2 * mu_eff - 1) / ((n_dim + 2) ** 2 + mu_eff)\n )\n cmu = (n_dim + 2) / 3 * cmu_full\n\n cm = 1 # (eq. 54)\n\n # learning rate for the cumulation for the step-size control\n c_sigma = (mu_eff + 2) / (n_dim + mu_eff + 3)\n d_sigma = 1 + 2 * max(0, math.sqrt((mu_eff - 1) / (n_dim + 1)) - 1) + c_sigma\n assert (\n c_sigma < 1\n ), \"invalid learning rate for cumulation for the step-size control\"\n\n # learning rate for cumulation for the rank-one update\n cc = 4 / (n_dim + 4)\n assert cc <= 1, \"invalid learning rate for cumulation for the rank-one update\"\n\n self._n_dim = n_dim\n self._popsize = population_size\n self._mu = mu\n self._mu_eff = mu_eff\n\n self._cc = cc\n self._c1 = c1\n self._cmu = cmu\n self._c_sigma = c_sigma\n self._d_sigma = d_sigma\n self._cm = cm\n\n # E||N(0, I)|| (p.28)\n self._chi_n = math.sqrt(self._n_dim) * (\n 1.0 - (1.0 / (4.0 * self._n_dim)) + 1.0 / (21.0 * (self._n_dim ** 2))\n )\n\n self._weights = weights\n\n # evolution path\n self._p_sigma = np.zeros(n_dim)\n self._pc = np.zeros(n_dim)\n\n self._mean = mean\n self._sigma = sigma\n self._D: Optional[np.ndarray] = None\n self._C: np.ndarray = np.ones(n_dim)\n\n # bounds contains low and high of each parameter.\n assert (\n bounds is None or (mean.size, 2) == bounds.shape\n ), \"bounds should be (n_dim, 2)-dim matrix\"\n self._bounds = bounds\n self._n_max_resampling = n_max_resampling\n\n self._g = 0\n self._rng = np.random.RandomState(seed)\n\n # Termination criteria\n self._tolx = 1e-12 * sigma\n self._tolxup = 1e4\n self._tolfun = 1e-12\n self._tolconditioncov = 1e14\n\n self._funhist_term = 10 + math.ceil(30 * n_dim / population_size)\n self._funhist_values = np.empty(self._funhist_term * 2)\n\n # for avoid numerical errors\n self._epsilon = 1e-8\n\n @property\n def dim(self) -> int:\n \"\"\"A number of dimensions\"\"\"\n return self._n_dim\n\n @property\n def population_size(self) -> int:\n \"\"\"A population size\"\"\"\n return self._popsize\n\n @property\n def generation(self) -> int:\n \"\"\"Generation number which is monotonically incremented\n when multi-variate gaussian distribution is updated.\"\"\"\n return self._g\n\n def __getstate__(self) -> Dict[str, Any]:\n attrs = {}\n for name in self.__dict__:\n # Remove _rng in pickle serialized object.\n if name == \"_rng\":\n continue\n attrs[name] = getattr(self, name)\n return attrs\n\n def __setstate__(self, state: Dict[str, Any]) -> None:\n self.__dict__.update(state)\n # Set _rng for unpickled object.\n setattr(self, \"_rng\", np.random.RandomState())\n\n def set_bounds(self, bounds: Optional[np.ndarray]) -> None:\n \"\"\"Update boundary constraints\"\"\"\n assert (\n bounds is None or (self._mean.size, 2) == bounds.shape\n ), \"bounds should be (n_dim, 2)-dim matrix\"\n self._bounds = bounds\n\n def ask(self) -> np.ndarray:\n \"\"\"Sample a parameter\"\"\"\n for i in range(self._n_max_resampling):\n x = self._sample_solution()\n if self._is_feasible(x):\n return x\n x = self._sample_solution()\n x = self._repair_infeasible_params(x)\n return x\n\n def _eigen_decomposition(self) -> np.ndarray:\n if self._D is not None:\n return self._D\n self._D = np.sqrt(np.where(self._C < 0, self._epsilon, self._C))\n return self._D\n\n def _sample_solution(self) -> np.ndarray:\n D = self._eigen_decomposition()\n z = self._rng.randn(self._n_dim) # ~ N(0, I)\n y = D * z # ~ N(0, C)\n x = self._mean + self._sigma * y # ~ N(m, σ^2 C)\n return x\n\n def _is_feasible(self, param: np.ndarray) -> bool:\n if self._bounds is None:\n return True\n return np.all(param >= self._bounds[:, 0]) and np.all(\n param <= self._bounds[:, 1]\n )\n\n def _repair_infeasible_params(self, param: np.ndarray) -> np.ndarray:\n if self._bounds is None:\n return param\n\n # clip with lower and upper bound.\n param = np.where(param < self._bounds[:, 0], self._bounds[:, 0], param)\n param = np.where(param > self._bounds[:, 1], self._bounds[:, 1], param)\n return param\n\n def tell(self, solutions: List[Tuple[np.ndarray, float]]) -> None:\n \"\"\"Tell evaluation values\"\"\"\n if len(solutions) != self._popsize:\n raise ValueError(\"Must tell popsize-length solutions.\")\n\n self._g += 1\n solutions.sort(key=lambda s: s[1])\n\n # Stores 'best' and 'worst' values of the\n # last 'self._funhist_term' generations.\n funhist_idx = 2 * (self.generation % self._funhist_term)\n self._funhist_values[funhist_idx] = solutions[0][1]\n self._funhist_values[funhist_idx + 1] = solutions[-1][1]\n\n # Sample new population of search_points, for k=1, ..., popsize\n D = self._eigen_decomposition()\n self._D = None\n\n x_k = np.array([s[0] for s in solutions]) # ~ N(m, σ^2 C)\n y_k = (x_k - self._mean) / self._sigma # ~ N(0, C)\n\n # Selection and recombination\n y_w = np.sum(y_k[: self._mu].T * self._weights[: self._mu], axis=1)\n self._mean += self._cm * self._sigma * y_w\n\n # Step-size control\n self._p_sigma = (1 - self._c_sigma) * self._p_sigma + math.sqrt(\n self._c_sigma * (2 - self._c_sigma) * self._mu_eff\n ) * (y_w / D)\n\n norm_p_sigma = np.linalg.norm(self._p_sigma)\n self._sigma *= np.exp(\n (self._c_sigma / self._d_sigma) * (norm_p_sigma / self._chi_n - 1)\n )\n self._sigma = min(self._sigma, sys.float_info.max / 5)\n\n # Covariance matrix adaption\n h_sigma_cond_left = norm_p_sigma / math.sqrt(\n 1 - (1 - self._c_sigma) ** (2 * (self._g + 1))\n )\n h_sigma_cond_right = (1.4 + 2 / (self._n_dim + 1)) * self._chi_n\n h_sigma = 1.0 if h_sigma_cond_left < h_sigma_cond_right else 0.0 # (p.28)\n\n # (eq.45)\n self._pc = (1 - self._cc) * self._pc + h_sigma * math.sqrt(\n self._cc * (2 - self._cc) * self._mu_eff\n ) * y_w\n\n delta_h_sigma = (1 - h_sigma) * self._cc * (2 - self._cc) # (p.28)\n assert delta_h_sigma <= 1\n\n # (eq.47)\n rank_one = self._pc ** 2\n rank_mu = np.sum(\n np.array([w * (y ** 2) for w, y in zip(self._weights, y_k)]), axis=0\n )\n self._C = (\n (\n 1\n + self._c1 * delta_h_sigma\n - self._c1\n - self._cmu * np.sum(self._weights)\n )\n * self._C\n + self._c1 * rank_one\n + self._cmu * rank_mu\n )\n\n def should_stop(self) -> bool:\n D = self._eigen_decomposition()\n\n # Stop if the range of function values of the recent generation is below tolfun.\n if (\n self.generation > self._funhist_term\n and np.max(self._funhist_values) - np.min(self._funhist_values)\n < self._tolfun\n ):\n return True\n\n # Stop if the std of the normal distribution is smaller than tolx\n # in all coordinates and pc is smaller than tolx in all components.\n if np.all(self._sigma * self._C < self._tolx) and np.all(\n self._sigma * self._pc < self._tolx\n ):\n return True\n\n # Stop if detecting divergent behavior.\n if self._sigma * np.max(D) > self._tolxup:\n return True\n\n # No effect coordinates: stop if adding 0.2-standard deviations\n # in any single coordinate does not change m.\n if np.any(self._mean == self._mean + (0.2 * self._sigma * np.sqrt(self._C))):\n return True\n\n # No effect axis: stop if adding 0.1-standard deviation vector in\n # any principal axis direction of C does not change m. \"pycma\" check\n # axis one by one at each generation.\n i = self.generation % self.dim\n if np.all(\n self._mean == self._mean + (0.1 * self._sigma * D[i] * np.ones(self._n_dim))\n ):\n return True\n\n # Stop if the condition number of the covariance matrix exceeds 1e14.\n condition_cov = np.max(D) / np.min(D)\n if condition_cov > self._tolconditioncov:\n return True\n\n return False\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.linalg.norm",
"numpy.empty",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.sum",
"numpy.ones",
"numpy.exp",
"numpy.min",
"numpy.where",
"numpy.sqrt",
"numpy.all"
]
] |
AJSVB/pytorch-lightning | [
"00211c1de3c5901789417263f14a36c846cc42d1"
] | [
"tests/models/data/horovod/train_default_model.py"
] | [
"\"\"\"This script is meant to be executed from `../../test_horovod.py`.\n\nBecause Horovod uses a parallel programming model similar to MPI, unit tests for collective\nops like allreduce need to be run in parallel. The most common approach for running parallel\nHorovod workers is to launch multiple replicas of the training script via the `horovodrun`\ncommand-line tool:\n\n.. code-block:: bash\n\n horovodrun -np 2 python train_default_model.py ...\n\nIndividual test parameters are configured by the serialized `--trainer-options` JSON object.\n\nAn non-zero exit code from this script on any rank will indicate failure, while a zero exit code\nacross all ranks indicates success.\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport sys\n\nimport torch\n\n# this is needed because Conda does not use `PYTHONPATH` env var while pip and virtualenv do\nPYTHONPATH = os.getenv(\"PYTHONPATH\", \"\")\nif \":\" in PYTHONPATH:\n sys.path = PYTHONPATH.split(\":\") + sys.path\n\nfrom pytorch_lightning import Trainer # noqa: E402\nfrom pytorch_lightning.callbacks import ModelCheckpoint # noqa: E402\nfrom pytorch_lightning.utilities import _HOROVOD_AVAILABLE # noqa: E402\n\nif _HOROVOD_AVAILABLE:\n import horovod.torch as hvd\nelse:\n print(\"You requested to import Horovod which is missing or not supported for your OS.\")\n\nfrom tests.helpers import BoringModel # noqa: E402\nfrom tests.helpers.utils import reset_seed, set_random_main_port # noqa: E402\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--trainer-options\", required=True)\nparser.add_argument(\"--on-gpu\", action=\"store_true\", default=False)\n\n\ndef run_test_from_config(trainer_options, on_gpu, check_size=True):\n \"\"\"Trains the default model with the given config.\"\"\"\n set_random_main_port()\n reset_seed()\n\n ckpt_path = trainer_options[\"weights_save_path\"]\n trainer_options.update(callbacks=[ModelCheckpoint(dirpath=ckpt_path)])\n\n class TestModel(BoringModel):\n def on_train_start(self) -> None:\n expected_device = torch.device(\"cuda\", self.trainer.local_rank) if on_gpu else torch.device(\"cpu\")\n assert self.device == expected_device\n\n def training_epoch_end(self, outputs) -> None:\n res = self.trainer.strategy.reduce(torch.tensor(1.0, device=self.device), reduce_op=\"sum\")\n assert res.sum() == self.trainer.strategy.world_size\n\n model = TestModel()\n trainer = Trainer(**trainer_options)\n\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n trainer.test(model)\n\n assert model.device == torch.device(\"cpu\")\n\n # Horovod should be initialized following training. If not, this will raise an exception.\n if check_size:\n assert hvd.size() == 2\n\n if trainer.global_rank > 0:\n return\n\n # test model loading\n pretrained_model = BoringModel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)\n\n # test new model accuracy\n test_loaders = model.test_dataloader()\n if not isinstance(test_loaders, list):\n test_loaders = [test_loaders]\n\n for dataloader in test_loaders:\n batch = next(iter(dataloader))\n pretrained_model(batch)\n\n # test HPC saving\n # save logger to make sure we get all the metrics\n if trainer.logger:\n trainer.logger.finalize(\"finished\")\n hpc_save_path = trainer._checkpoint_connector.hpc_save_path(ckpt_path)\n trainer.save_checkpoint(hpc_save_path)\n # test HPC loading\n checkpoint_path = trainer._checkpoint_connector._CheckpointConnector__get_max_ckpt_path_from_folder(ckpt_path)\n trainer._checkpoint_connector.restore(checkpoint_path)\n\n if on_gpu:\n trainer = Trainer(gpus=1, strategy=\"horovod\", max_epochs=1)\n # Test the root_gpu property\n assert trainer.root_gpu == hvd.local_rank()\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n run_test_from_config(json.loads(args.trainer_options), args.on_gpu)\n"
] | [
[
"torch.device",
"torch.tensor"
]
] |
jbalint/aurum-datadiscovery | [
"443b777c13be47e6da60b4af528e19a02608e158"
] | [
"benchmarking/query_processing_benchmarks.py"
] | [
"from knowledgerepr import syn_network_generator as syn\nfrom api.apiutils import Relation\nimport numpy as np\nfrom ddapi import API\n\nimport timeit\nimport time\n\n\napi = None\n\n\"\"\"\nGlobal variables to use by the queries\n\"\"\"\nin_drs = None\n\ndef query2():\n # Neighbor queries\n s = time.time()\n res = api.similar_content_to(in_drs)\n e = time.time()\n return s, e\n\ndef query3():\n # Intersection queries\n res1 = api.similar_content_to(in_drs)\n res2 = api.similar_schema_name_to(in_drs)\n s = time.time()\n res = api.intersection(res1, res2)\n e = time.time()\n return s, e\n\n\ndef query4():\n # TC queries\n hits = [x for x in in_drs]\n h1 = api.drs_from_hit(hits[0])\n h2 = api.drs_from_hit(hits[len(hits) - 1])\n s = time.time()\n res = api.paths_between(h1, h2, Relation.CONTENT_SIM)\n e = time.time()\n return s, e\n\n\ndef run_all_queries(repetitions, api_obj=None, in_drs_obj=None):\n\n # Define global variables to use by the queries\n global api, in_drs\n api = api_obj\n in_drs = in_drs_obj\n\n # Query 2\n query2_times = []\n for i in range(repetitions):\n s, e = query2()\n query2_times.append((e - s))\n\n # Query 3\n query3_times = []\n for i in range(repetitions):\n s, e = query3()\n query3_times.append((e - s))\n\n # Query 4\n query4_times = []\n for i in range(repetitions):\n s = time.time()\n query4()\n e = time.time()\n query4_times.append((e - s))\n\n return query2_times, query3_times, query4_times\n\n\ndef experiment_changing_input_size(repetitions=100):\n\n # Create a graph\n\n fn = syn.generate_network_with(num_nodes=100000, num_nodes_per_table=10, num_schema_sim=90000,\n num_content_sim=90000, num_pkfk=90000)\n\n api = API(fn)\n\n perf_results = dict()\n\n # input size from 1 to 100\n for i in range(50):\n i = i + 1\n nodes = fn.fields_degree(i)\n nids = [x for x, y in nodes]\n info = fn.get_info_for(nids)\n hits = fn.get_hits_from_info(info)\n in_drs = api.drs_from_hits(hits)\n\n q2, q3, q4 = run_all_queries(repetitions, api_obj=api, in_drs_obj=in_drs)\n percentile_results = get_percentiles([q2, q3, q4])\n perf_results[i] = percentile_results\n return perf_results\n\n\ndef experiment_changing_graph_size_constant_density(repetitions=10):\n sizes = [100, 1000, 10000, 100000, 1000000]\n perf_results = dict()\n for size in sizes:\n relations = int(size)\n fn = syn.generate_network_with(num_nodes=size, num_nodes_per_table=10, num_schema_sim=relations,\n num_content_sim=relations, num_pkfk=relations)\n\n api = API(fn)\n\n nodes = fn.fields_degree(1)\n nids = [x for x, y in nodes]\n info = fn.get_info_for(nids)\n hits = fn.get_hits_from_info(info)\n in_drs = api.drs_from_hits(hits)\n\n q2, q3, q4 = run_all_queries(repetitions, api_obj=api, in_drs_obj=in_drs)\n percentile_results = get_percentiles([q2, q3, q4])\n perf_results[size] = percentile_results\n\n return perf_results\n\n\ndef experiment_changing_graph_density_constant_size(repetitions=10):\n size = 100000\n densities = [100, 1000, 10000, 100000, 1000000]\n perf_results = dict()\n for density in densities:\n fn = syn.generate_network_with(num_nodes=size, num_nodes_per_table=10, num_schema_sim=density,\n num_content_sim=density, num_pkfk=density)\n\n api = API(fn)\n\n nodes = fn.fields_degree(3)\n nids = [x for x, y in nodes]\n info = fn.get_info_for(nids)\n hits = fn.get_hits_from_info(info)\n in_drs = api.drs_from_hits(hits)\n\n q2, q3, q4 = run_all_queries(repetitions, api_obj=api, in_drs_obj=in_drs)\n percentile_results = get_percentiles([q2, q3, q4])\n perf_results[density] = percentile_results\n\n return perf_results\n\n\ndef experiment_changing_max_hops_tc_queries(repetitions=100):\n perf_results = dict()\n for i in range(10):\n i = i +1\n fn = syn.generate_network_with(num_nodes=100000, num_nodes_per_table=10, num_schema_sim=100000,\n num_content_sim=100000, num_pkfk=100000)\n\n api = API(fn)\n\n nodes = fn.fields_degree(1)\n nids = [x for x, y in nodes]\n info = fn.get_info_for(nids)\n hits = fn.get_hits_from_info(info)\n in_drs = api.drs_from_hits(hits)\n\n query_times = []\n for repet in range(repetitions):\n s = time.time()\n res = api.traverse(in_drs, Relation.SCHEMA_SIM, max_hops=i)\n e = time.time()\n query_times.append((e - s))\n\n percentile_results = get_percentiles([query_times])\n perf_results[i] = percentile_results\n return perf_results\n\n\ndef get_percentiles(list_of_lists):\n results = []\n for l in list_of_lists:\n nq = np.array(l)\n p5 = np.percentile(nq, 5)\n p50 = np.percentile(nq, 50)\n p95 = np.percentile(nq, 95)\n percentiles = (p5, p50, p95)\n results.append(percentiles)\n return results\n\n\ndef test():\n # Fixed graph density, differing sizes (nodes)\n\n fn = syn.generate_network_with(num_nodes=100, num_nodes_per_table=10, num_schema_sim=200,\n num_content_sim=150, num_pkfk=50)\n api = API(fn)\n\n nodes = fn.fields_degree(3)\n nids = [x for x, y in nodes]\n info = fn.get_info_for(nids)\n hits = fn.get_hits_from_info(info)\n in_drs = api.drs_from_hits(hits)\n\n q2, q3, q4 = run_all_queries(100, api_obj=api, in_drs_obj=in_drs)\n\n nq2 = np.array(q2)\n p5 = np.percentile(nq2, 5)\n p50 = np.percentile(nq2, 50)\n p95 = np.percentile(nq2, 95)\n print(\"q2: \" + str(p5) + \" - \" + str(p50) + \" - \" + str(p95))\n\n nq3 = np.array(q3)\n p5 = np.percentile(nq3, 5)\n p50 = np.percentile(nq3, 50)\n p95 = np.percentile(nq3, 95)\n print(\"q3: \" + str(p5) + \" - \" + str(p50) + \" - \" + str(p95))\n\n nq4 = np.array(q4)\n p5 = np.percentile(nq4, 5)\n p50 = np.percentile(nq4, 50)\n p95 = np.percentile(nq4, 95)\n print(\"q4: \" + str(p5) + \" - \" + str(p50) + \" - \" + str(p95))\n\n\ndef write_csv(name, lines):\n with open(name, 'a') as csvfile:\n for line in lines:\n csvfile.write(line)\n csvfile.write('\\n')\n\n\ndef write_results_to_csv_three_queries(name, results, csv=False, dat=False):\n lines = []\n\n from collections import OrderedDict\n od = OrderedDict(sorted(results.items()))\n\n header = None\n if csv:\n header = \"x_axis,q2_5p,q2_median,q2_95p,q3_5p,q3_median,q3_95p,q4_5p,q4_median,q4_95p\"\n elif dat:\n header = \"# x_axis q2_5p q2_median q2_95p q3_5p q3_median q3_95p q4_5p q4_median q4_95p\"\n lines.append(header)\n for k, v in od.items():\n (q2, q3, q4) = v\n (fivep_2, median_2, ninetyp_2) = q2\n (fivep_3, median_3, ninetyp_3) = q3\n (fivep_4, median_4, ninetyp_4) = q4\n separator = None\n if csv:\n separator = ','\n elif dat:\n separator = ' '\n string = separator.join([str(k), str(fivep_2), str(median_2), str(ninetyp_2), str(fivep_3),\n str(median_3), str(ninetyp_3), str(fivep_4), str(median_4), str(ninetyp_4)])\n lines.append(string)\n\n write_csv(name, lines)\n\n\ndef write_results_to_csv_one_query(name, results, csv=False, dat=False):\n lines = []\n\n from collections import OrderedDict\n od = OrderedDict(sorted(results.items()))\n\n header = None\n if csv:\n header = \"x_axis,q2_5p,q2_median,q2_95p,q3_5p,q3_median,q3_95p,q4_5p,q4_median,q4_95p\"\n elif dat:\n header = \"# x_axis q2_5p q2_median q2_95p q3_5p q3_median q3_95p q4_5p q4_median q4_95p\"\n lines.append(header)\n for k, v in od.items():\n (fivep_2, median_2, ninetyp_2) = v[0]\n separator = None\n if csv:\n separator = ','\n elif dat:\n separator = ' '\n string = separator.join([str(k), str(fivep_2), str(median_2), str(ninetyp_2)])\n lines.append(string)\n\n write_csv(name, lines)\n\n\nif __name__ == \"__main__\":\n\n changing_size_results = experiment_changing_input_size(repetitions=10)\n for k, v in changing_size_results.items():\n print(str(k) + \" -> \" + str(v))\n write_results_to_csv_three_queries(\"/Users/ra-mit/research/data-discovery/papers/dd-paper/evaluation_results/qp_performance/data/changing_input_size.dat\", changing_size_results, dat=True)\n\n changing_graph_size_results = experiment_changing_graph_size_constant_density(repetitions=10)\n for k, v in changing_graph_size_results.items():\n print(str(k) + \" -> \" + str(v))\n write_results_to_csv_three_queries(\"/Users/ra-mit/research/data-discovery/papers/dd-paper/evaluation_results/qp_performance/data/changing_graph_size_density_constant.dat\", changing_graph_size_results, dat=True)\n\n changing_graph_density_results = experiment_changing_graph_density_constant_size(repetitions=10)\n for k, v in changing_graph_density_results.items():\n print(str(k) + \" -> \" + str(v))\n write_results_to_csv_three_queries(\"/Users/ra-mit/research/data-discovery/papers/dd-paper/evaluation_results/qp_performance/data/changing_graph_density_fixed_graph_size.dat\", changing_graph_density_results, dat=True)\n\n changing_hops_results = experiment_changing_max_hops_tc_queries(repetitions=10)\n for k, v in changing_hops_results.items():\n print(str(k) + \" -> \" + str(v))\n write_results_to_csv_one_query(\"/Users/ra-mit/research/data-discovery/papers/dd-paper/evaluation_results/qp_performance/data/changing_hops_tc.dat\", changing_hops_results, dat=True)\n\n"
] | [
[
"numpy.percentile",
"numpy.array"
]
] |
harshkothari410/snn-image-segmentation | [
"18fb28e8b2fee3d7583f6e62fd512ba90863c0ee"
] | [
"test.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport nengo\n\nfrom nengo.dists import Uniform\n\nmodel = nengo.Network(label='A Single Neuron')\n\nwith model:\n neuron = nengo.Ensemble(1, dimensions=1, # Represent a scalar\n intercepts=Uniform(-.5, -.5), # Set intercept to 0.5\n max_rates=[20], # Set the maximum firing rate of the neuron to 100hz\n encoders=[[1]]) # Sets the neurons firing rate to increase for positive input\n\nwith model:\n my_node = nengo.Node(output=680)\n\nwith model:\n nengo.Connection(my_node, neuron)\n\nwith model:\n cos_probe = nengo.Probe(my_node, synapse=0.01) # The original input\n spikes = nengo.Probe(neuron.neurons, synapse=0.01) # The raw spikes from the neuron\n voltage = nengo.Probe(neuron.neurons, 'voltage', synapse=0.01) # Subthreshold soma voltage of the neuron\n filtered = nengo.Probe(neuron, synapse=0.01) # Spikes filtered by a 10ms post-synaptic filter\n\nsim = nengo.Simulator(model)\nsim.run(0.01)\n# print sim.data\nplt.plot(sim.trange(), sim.data[filtered])\nplt.plot(sim.trange(), sim.data[cos_probe])\nplt.xlim(0, 0.01)\n\n\n# Plot the spiking output of the ensemble\nfrom nengo.utils.matplotlib import rasterplot\nplt.figure(figsize=(10, 8))\nplt.subplot(221)\nrasterplot(sim.trange(), sim.data[spikes])\nplt.ylabel(\"Neuron\")\nplt.xlim(0, 0.01)\n\nimport pylab\npylab.show()"
] | [
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.figure"
]
] |
yoshi8102-p/smalysis-register | [
"5fc76e75a532e128fe36cf0b74325a54dc2fd4d6"
] | [
"result-analyzer/main/src/image/MaskGenerator.py"
] | [
"import cv2\nimport numpy as np\n\nfrom constant.Constant import Constant\nfrom model.Range import Range\nfrom util.ImageUtil import ImageUtil\n\n\nclass MaskGenerator:\n BLACK_IMG = np.zeros((1080, 1920))\n WHITE = (255, 255, 255)\n\n @staticmethod\n def createResultMask():\n mask = MaskGenerator.BLACK_IMG.copy()\n MaskGenerator.__createResultMask(mask, Constant.OWN_FIGHTER_NAME_RANGE.left)\n MaskGenerator.__createResultMask(mask, Constant.OPPONENT_FIGHTER_NAME_RANGE.left)\n return mask\n\n @staticmethod\n def createWinLoseMask():\n mask = MaskGenerator.BLACK_IMG.copy()\n MaskGenerator.__createWinLoseMask(mask, 667)\n MaskGenerator.__createWinLoseMask(mask, 1458)\n return mask\n\n @staticmethod\n def createGoMask(img):\n return MaskGenerator.__createMaskForHsv(img, np.array([0, 1, 1]), np.array([30, 255, 255]), Range(150, 575, 450, 1400))\n\n @staticmethod\n def createGamesetMask(img):\n return MaskGenerator.__createMaskForHsv(img, np.array([50, 1, 1]), np.array([100, 255, 255]), Range(0, 1080, 0, 1920))\n\n @staticmethod\n def __createResultMask(img, start):\n wight = 606\n end = start + wight\n bottom = 1016\n leftTop = 68\n rightTop = 10\n resultZone = np.array([[start, leftTop], [end, rightTop], [end, bottom], [start, bottom]], np.int32)\n cv2.fillPoly(img, [resultZone], MaskGenerator.WHITE)\n\n @staticmethod\n def __createWinLoseMask(img, start):\n height = 200\n cv2.rectangle(img, (start, 0), (start + height, height), (255, 255, 255), -1)\n\n @staticmethod\n def __createMaskForHsv(img, lower: np.array, upper: np.array, r: Range):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n inRange = cv2.inRange(hsv, lower, upper)\n image = ImageUtil.cutImage(inRange, r)\n image = cv2.medianBlur(image, 11)\n # 2回やらないとうまくノイズが消えないので一旦2回やる\n MaskGenerator.__deleteNoise(image, 18000)\n MaskGenerator.__deleteNoise(image, 18000)\n return image\n\n @staticmethod\n def __deleteNoise(img, areaSize):\n contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n for i in range(len(contours)):\n contour = contours[i]\n area = abs(cv2.contourArea(contour))\n if area >= areaSize:\n continue\n cv2.fillConvexPoly(img, contours[i], 0)\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
exemplary-citizen/dask | [
"9748ca2898d592a959df45f8f0abe7fb13db0f2f"
] | [
"dask/array/routines.py"
] | [
"import inspect\nimport math\nimport warnings\nfrom collections.abc import Iterable\nfrom functools import wraps, partial\nfrom numbers import Real, Integral\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nfrom toolz import concat, sliding_window, interleave\n\nfrom ..compatibility import apply\nfrom ..core import flatten\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import funcname, derived_from, is_arraylike\nfrom . import chunk\nfrom .creation import arange, diag, empty, indices\nfrom .utils import safe_wraps, validate_axis, meta_from_array, zeros_like_safe\nfrom .wrap import ones\nfrom .ufunc import multiply, sqrt\n\nfrom .core import (\n Array,\n map_blocks,\n elemwise,\n from_array,\n asarray,\n asanyarray,\n concatenate,\n stack,\n blockwise,\n broadcast_shapes,\n is_scalar_for_elemwise,\n broadcast_to,\n tensordot_lookup,\n implements,\n)\n\nfrom .einsumfuncs import einsum # noqa\nfrom .numpy_compat import _unravel_index_keyword\n\n\n@derived_from(np)\ndef array(x, dtype=None, ndmin=None):\n x = asarray(x)\n while ndmin is not None and x.ndim < ndmin:\n x = x[None, :]\n if dtype is not None and x.dtype != dtype:\n x = x.astype(dtype)\n return x\n\n\n@derived_from(np)\ndef result_type(*args):\n args = [a if is_scalar_for_elemwise(a) else a.dtype for a in args]\n return np.result_type(*args)\n\n\n@derived_from(np)\ndef atleast_3d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None, None, None]\n elif x.ndim == 1:\n x = x[None, :, None]\n elif x.ndim == 2:\n x = x[:, :, None]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef atleast_2d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None, None]\n elif x.ndim == 1:\n x = x[None, :]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef atleast_1d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef vstack(tup, allow_unknown_chunksizes=False):\n tup = tuple(atleast_2d(x) for x in tup)\n return concatenate(tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes)\n\n\n@derived_from(np)\ndef hstack(tup, allow_unknown_chunksizes=False):\n if all(x.ndim == 1 for x in tup):\n return concatenate(\n tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes\n )\n else:\n return concatenate(\n tup, axis=1, allow_unknown_chunksizes=allow_unknown_chunksizes\n )\n\n\n@derived_from(np)\ndef dstack(tup, allow_unknown_chunksizes=False):\n tup = tuple(atleast_3d(x) for x in tup)\n return concatenate(tup, axis=2, allow_unknown_chunksizes=allow_unknown_chunksizes)\n\n\n@derived_from(np)\ndef swapaxes(a, axis1, axis2):\n if axis1 == axis2:\n return a\n if axis1 < 0:\n axis1 = axis1 + a.ndim\n if axis2 < 0:\n axis2 = axis2 + a.ndim\n ind = list(range(a.ndim))\n out = list(ind)\n out[axis1], out[axis2] = axis2, axis1\n\n return blockwise(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2, dtype=a.dtype)\n\n\n@derived_from(np)\ndef transpose(a, axes=None):\n if axes:\n if len(axes) != a.ndim:\n raise ValueError(\"axes don't match array\")\n else:\n axes = tuple(range(a.ndim))[::-1]\n axes = tuple(d + a.ndim if d < 0 else d for d in axes)\n return blockwise(\n np.transpose, axes, a, tuple(range(a.ndim)), dtype=a.dtype, axes=axes\n )\n\n\ndef flip(m, axis):\n \"\"\"\n Reverse element order along axis.\n\n Parameters\n ----------\n axis : int\n Axis to reverse element order of.\n\n Returns\n -------\n reversed array : ndarray\n \"\"\"\n\n m = asanyarray(m)\n\n sl = m.ndim * [slice(None)]\n try:\n sl[axis] = slice(None, None, -1)\n except IndexError:\n raise ValueError(\n \"`axis` of %s invalid for %s-D array\" % (str(axis), str(m.ndim))\n )\n sl = tuple(sl)\n\n return m[sl]\n\n\n@derived_from(np)\ndef flipud(m):\n return flip(m, 0)\n\n\n@derived_from(np)\ndef fliplr(m):\n return flip(m, 1)\n\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nALPHABET = alphabet.upper()\n\n\ndef _tensordot(a, b, axes):\n x = max([a, b], key=lambda x: x.__array_priority__)\n tensordot = tensordot_lookup.dispatch(type(x))\n\n # workaround may be removed when numpy version (currently 1.13.0) is bumped\n a_dims = np.array([a.shape[i] for i in axes[0]])\n b_dims = np.array([b.shape[i] for i in axes[1]])\n if (\n len(a_dims) > 0\n and (a_dims == b_dims).all()\n and a_dims.min() == 0\n and LooseVersion(np.__version__) < LooseVersion(\"1.14\")\n ):\n x = np.zeros(\n tuple(\n [s for i, s in enumerate(a.shape) if i not in axes[0]]\n + [s for i, s in enumerate(b.shape) if i not in axes[1]]\n )\n )\n else:\n x = tensordot(a, b, axes=axes)\n\n ind = [slice(None, None)] * x.ndim\n for a in sorted(axes[0]):\n ind.insert(a, None)\n x = x[tuple(ind)]\n return x\n\n\n@derived_from(np)\ndef tensordot(lhs, rhs, axes=2):\n if isinstance(axes, Iterable):\n left_axes, right_axes = axes\n else:\n left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))\n right_axes = tuple(range(0, axes))\n\n if isinstance(left_axes, Integral):\n left_axes = (left_axes,)\n if isinstance(right_axes, Integral):\n right_axes = (right_axes,)\n if isinstance(left_axes, list):\n left_axes = tuple(left_axes)\n if isinstance(right_axes, list):\n right_axes = tuple(right_axes)\n\n dt = np.promote_types(lhs.dtype, rhs.dtype)\n\n left_index = list(range(lhs.ndim))\n right_index = list(range(lhs.ndim, lhs.ndim + rhs.ndim))\n out_index = left_index + right_index\n\n for l, r in zip(left_axes, right_axes):\n out_index.remove(right_index[r])\n right_index[r] = left_index[l]\n\n intermediate = blockwise(\n _tensordot,\n out_index,\n lhs,\n left_index,\n rhs,\n right_index,\n dtype=dt,\n axes=(left_axes, right_axes),\n )\n\n result = intermediate.sum(axis=left_axes)\n return result\n\n\n@derived_from(np)\ndef dot(a, b):\n return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))\n\n\n@derived_from(np)\ndef vdot(a, b):\n return dot(a.conj().ravel(), b.ravel())\n\n\n@derived_from(np)\ndef matmul(a, b):\n a = asanyarray(a)\n b = asanyarray(b)\n\n if a.ndim == 0 or b.ndim == 0:\n raise ValueError(\"`matmul` does not support scalars.\")\n\n a_is_1d = False\n if a.ndim == 1:\n a_is_1d = True\n a = a[np.newaxis, :]\n\n b_is_1d = False\n if b.ndim == 1:\n b_is_1d = True\n b = b[:, np.newaxis]\n\n if a.ndim < b.ndim:\n a = a[(b.ndim - a.ndim) * (np.newaxis,)]\n elif a.ndim > b.ndim:\n b = b[(a.ndim - b.ndim) * (np.newaxis,)]\n\n out = blockwise(\n np.matmul,\n tuple(range(1, a.ndim + 1)),\n a,\n tuple(range(1, a.ndim - 1)) + (a.ndim - 1, 0),\n b,\n tuple(range(1, a.ndim - 1)) + (0, a.ndim),\n dtype=result_type(a, b),\n concatenate=True,\n )\n\n if a_is_1d:\n out = out[..., 0, :]\n if b_is_1d:\n out = out[..., 0]\n\n return out\n\n\n@derived_from(np)\ndef outer(a, b):\n a = a.flatten()\n b = b.flatten()\n\n dtype = np.outer(a.dtype.type(), b.dtype.type()).dtype\n\n return blockwise(np.outer, \"ij\", a, \"i\", b, \"j\", dtype=dtype)\n\n\ndef _inner_apply_along_axis(arr, func1d, func1d_axis, func1d_args, func1d_kwargs):\n return np.apply_along_axis(func1d, func1d_axis, arr, *func1d_args, **func1d_kwargs)\n\n\n@derived_from(np)\ndef apply_along_axis(func1d, axis, arr, *args, dtype=None, shape=None, **kwargs):\n \"\"\"\n Apply a function to 1-D slices along the given axis. This is\n a blocked variant of :func:`numpy.apply_along_axis` implemented via\n :func:`dask.array.map_blocks`\n\n Parameters\n ----------\n func1d : callable\n Function to apply to 1-D slices of the array along the given axis\n axis : int\n Axis along which func1d will be applied\n arr : dask array\n Dask array to which ``func1d`` will be applied\n args : any\n Additional arguments to ``func1d``.\n dtype : str or dtype, optional\n The dtype of the output of ``func1d``.\n shape : tuple, optional\n The shape of the output of ``func1d``.\n kwargs : any\n Additional keyword arguments for ``func1d``.\n\n Notes\n -----\n If either of `dtype` or `shape` are not provided, Dask attempts to\n determine them by calling `func1d` on a dummy array. This may produce\n incorrect values for `dtype` or `shape`, so we recommend providing them.\n \"\"\"\n arr = asarray(arr)\n\n # Verify that axis is valid and throw an error otherwise\n axis = len(arr.shape[:axis])\n\n # If necessary, infer dtype and shape of the output of func1d by calling it on test data.\n if shape is None or dtype is None:\n test_data = np.ones((1,), dtype=arr.dtype)\n test_result = np.array(func1d(test_data, *args, **kwargs))\n if shape is None:\n shape = test_result.shape\n if dtype is None:\n dtype = test_result.dtype\n\n # Rechunk so that func1d is applied over the full axis.\n arr = arr.rechunk(\n arr.chunks[:axis] + (arr.shape[axis : axis + 1],) + arr.chunks[axis + 1 :]\n )\n\n # Map func1d over the data to get the result\n # Adds other axes as needed.\n result = arr.map_blocks(\n _inner_apply_along_axis,\n name=funcname(func1d) + \"-along-axis\",\n dtype=dtype,\n chunks=(arr.chunks[:axis] + shape + arr.chunks[axis + 1 :]),\n drop_axis=axis,\n new_axis=list(range(axis, axis + len(shape), 1)),\n func1d=func1d,\n func1d_axis=axis,\n func1d_args=args,\n func1d_kwargs=kwargs,\n )\n\n return result\n\n\n@derived_from(np)\ndef apply_over_axes(func, a, axes):\n # Validate arguments\n a = asarray(a)\n try:\n axes = tuple(axes)\n except TypeError:\n axes = (axes,)\n\n sl = a.ndim * (slice(None),)\n\n # Compute using `apply_along_axis`.\n result = a\n for i in axes:\n result = apply_along_axis(func, i, result, 0)\n\n # Restore original dimensionality or error.\n if result.ndim == (a.ndim - 1):\n result = result[sl[:i] + (None,)]\n elif result.ndim != a.ndim:\n raise ValueError(\n \"func must either preserve dimensionality of the input\"\n \" or reduce it by one.\"\n )\n\n return result\n\n\n@derived_from(np)\ndef ptp(a, axis=None):\n return a.max(axis=axis) - a.min(axis=axis)\n\n\n@derived_from(np)\ndef diff(a, n=1, axis=-1):\n a = asarray(a)\n n = int(n)\n axis = int(axis)\n\n sl_1 = a.ndim * [slice(None)]\n sl_2 = a.ndim * [slice(None)]\n\n sl_1[axis] = slice(1, None)\n sl_2[axis] = slice(None, -1)\n\n sl_1 = tuple(sl_1)\n sl_2 = tuple(sl_2)\n\n r = a\n for i in range(n):\n r = r[sl_1] - r[sl_2]\n\n return r\n\n\n@derived_from(np)\ndef ediff1d(ary, to_end=None, to_begin=None):\n ary = asarray(ary)\n\n aryf = ary.flatten()\n r = aryf[1:] - aryf[:-1]\n\n r = [r]\n if to_begin is not None:\n r = [asarray(to_begin).flatten()] + r\n if to_end is not None:\n r = r + [asarray(to_end).flatten()]\n r = concatenate(r)\n\n return r\n\n\ndef _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):\n \"\"\"\n x: nd-array\n array of one block\n coord: 1d-array or scalar\n coordinate along which the gradient is computed.\n axis: int\n axis along which the gradient is computed\n array_locs:\n actual location along axis. None if coordinate is scalar\n grad_kwargs:\n keyword to be passed to np.gradient\n \"\"\"\n block_loc = block_id[axis]\n if array_locs is not None:\n coord = coord[array_locs[0][block_loc] : array_locs[1][block_loc]]\n grad = np.gradient(x, coord, axis=axis, **grad_kwargs)\n return grad\n\n\n@derived_from(np)\ndef gradient(f, *varargs, **kwargs):\n f = asarray(f)\n\n kwargs[\"edge_order\"] = math.ceil(kwargs.get(\"edge_order\", 1))\n if kwargs[\"edge_order\"] > 2:\n raise ValueError(\"edge_order must be less than or equal to 2.\")\n\n drop_result_list = False\n axis = kwargs.pop(\"axis\", None)\n if axis is None:\n axis = tuple(range(f.ndim))\n elif isinstance(axis, Integral):\n drop_result_list = True\n axis = (axis,)\n\n axis = validate_axis(axis, f.ndim)\n\n if len(axis) != len(set(axis)):\n raise ValueError(\"duplicate axes not allowed\")\n\n axis = tuple(ax % f.ndim for ax in axis)\n\n if varargs == ():\n varargs = (1,)\n if len(varargs) == 1:\n varargs = len(axis) * varargs\n if len(varargs) != len(axis):\n raise TypeError(\n \"Spacing must either be a single scalar, or a scalar / 1d-array per axis\"\n )\n\n if issubclass(f.dtype.type, (np.bool8, Integral)):\n f = f.astype(float)\n elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:\n f = f.astype(float)\n\n results = []\n for i, ax in enumerate(axis):\n for c in f.chunks[ax]:\n if np.min(c) < kwargs[\"edge_order\"] + 1:\n raise ValueError(\n \"Chunk size must be larger than edge_order + 1. \"\n \"Minimum chunk for axis {} is {}. Rechunk to \"\n \"proceed.\".format(np.min(c), ax)\n )\n\n if np.isscalar(varargs[i]):\n array_locs = None\n else:\n if isinstance(varargs[i], Array):\n raise NotImplementedError(\"dask array coordinated is not supported.\")\n # coordinate position for each block taking overlap into account\n chunk = np.array(f.chunks[ax])\n array_loc_stop = np.cumsum(chunk) + 1\n array_loc_start = array_loc_stop - chunk - 2\n array_loc_stop[-1] -= 1\n array_loc_start[0] = 0\n array_locs = (array_loc_start, array_loc_stop)\n\n results.append(\n f.map_overlap(\n _gradient_kernel,\n dtype=f.dtype,\n depth={j: 1 if j == ax else 0 for j in range(f.ndim)},\n boundary=\"none\",\n coord=varargs[i],\n axis=ax,\n array_locs=array_locs,\n grad_kwargs=kwargs,\n )\n )\n\n if drop_result_list:\n results = results[0]\n\n return results\n\n\ndef _bincount_sum(bincounts, dtype=int):\n n = max(map(len, bincounts))\n out = zeros_like_safe(bincounts[0], shape=n, dtype=dtype)\n for b in bincounts:\n out[: len(b)] += b\n return out\n\n\n@derived_from(np)\ndef bincount(x, weights=None, minlength=0):\n if x.ndim != 1:\n raise ValueError(\"Input array must be one dimensional. Try using x.ravel()\")\n if weights is not None:\n if weights.chunks != x.chunks:\n raise ValueError(\"Chunks of input array x and weights must match.\")\n\n token = tokenize(x, weights, minlength)\n name = \"bincount-\" + token\n final_name = \"bincount-sum\" + token\n # Call np.bincount on each block, possibly with weights\n if weights is not None:\n dsk = {\n (name, i): (np.bincount, (x.name, i), (weights.name, i), minlength)\n for i, _ in enumerate(x.__dask_keys__())\n }\n dtype = np.bincount([1], weights=[1]).dtype\n else:\n dsk = {\n (name, i): (np.bincount, (x.name, i), None, minlength)\n for i, _ in enumerate(x.__dask_keys__())\n }\n dtype = np.bincount([]).dtype\n\n dsk[(final_name, 0)] = (_bincount_sum, list(dsk), dtype)\n graph = HighLevelGraph.from_collections(\n final_name, dsk, dependencies=[x] if weights is None else [x, weights]\n )\n\n if minlength == 0:\n chunks = ((np.nan,),)\n else:\n chunks = ((minlength,),)\n\n meta = meta_from_array(x, 1, dtype=dtype)\n\n return Array(graph, final_name, chunks, meta=meta)\n\n\n@derived_from(np)\ndef digitize(a, bins, right=False):\n bins = np.asarray(bins)\n dtype = np.digitize([0], bins, right=False).dtype\n return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)\n\n\ndef histogram(a, bins=None, range=None, normed=False, weights=None, density=None):\n \"\"\"\n Blocked variant of :func:`numpy.histogram`.\n\n Follows the signature of :func:`numpy.histogram` exactly with the following\n exceptions:\n\n - Either an iterable specifying the ``bins`` or the number of ``bins``\n and a ``range`` argument is required as computing ``min`` and ``max``\n over blocked arrays is an expensive operation that must be performed\n explicitly.\n\n - ``weights`` must be a dask.array.Array with the same block structure\n as ``a``.\n\n Examples\n --------\n Using number of bins and range:\n\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array(np.arange(10000), chunks=10)\n >>> h, bins = da.histogram(x, bins=10, range=[0, 10000])\n >>> bins\n array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000.,\n 8000., 9000., 10000.])\n >>> h.compute()\n array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])\n\n Explicitly specifying the bins:\n\n >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))\n >>> bins\n array([ 0, 5000, 10000])\n >>> h.compute()\n array([5000, 5000])\n \"\"\"\n if not np.iterable(bins) and (range is None or bins is None):\n raise ValueError(\n \"dask.array.histogram requires either specifying \"\n \"bins as an iterable or specifying both a range and \"\n \"the number of bins\"\n )\n\n if weights is not None and weights.chunks != a.chunks:\n raise ValueError(\"Input array and weights must have the same chunked structure\")\n\n if normed is not False:\n raise ValueError(\n \"The normed= keyword argument has been deprecated. \"\n \"Please use density instead. \"\n \"See the numpy.histogram docstring for more information.\"\n )\n\n if not np.iterable(bins):\n bin_token = bins\n mn, mx = range\n if mn == mx:\n mn -= 0.5\n mx += 0.5\n\n bins = np.linspace(mn, mx, bins + 1, endpoint=True)\n else:\n bin_token = bins\n token = tokenize(a, bin_token, range, weights, density)\n\n nchunks = len(list(flatten(a.__dask_keys__())))\n chunks = ((1,) * nchunks, (len(bins) - 1,))\n\n name = \"histogram-sum-\" + token\n\n # Map the histogram to all bins\n def block_hist(x, range=None, weights=None):\n return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]\n\n if weights is None:\n dsk = {\n (name, i, 0): (block_hist, k, range)\n for i, k in enumerate(flatten(a.__dask_keys__()))\n }\n dtype = np.histogram([])[0].dtype\n else:\n a_keys = flatten(a.__dask_keys__())\n w_keys = flatten(weights.__dask_keys__())\n dsk = {\n (name, i, 0): (block_hist, k, range, w)\n for i, (k, w) in enumerate(zip(a_keys, w_keys))\n }\n dtype = weights.dtype\n\n graph = HighLevelGraph.from_collections(\n name, dsk, dependencies=[a] if weights is None else [a, weights]\n )\n\n mapped = Array(graph, name, chunks, dtype=dtype)\n n = mapped.sum(axis=0)\n\n # We need to replicate normed and density options from numpy\n if density is not None:\n if density:\n db = from_array(np.diff(bins).astype(float), chunks=n.chunks)\n return n / db / n.sum(), bins\n else:\n return n, bins\n else:\n return n, bins\n\n\n@derived_from(np)\ndef cov(m, y=None, rowvar=1, bias=0, ddof=None):\n # This was copied almost verbatim from np.cov\n # See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n if ddof is not None and ddof != int(ddof):\n raise ValueError(\"ddof must be integer\")\n\n # Handles complex arrays too\n m = asarray(m)\n if y is None:\n dtype = np.result_type(m, np.float64)\n else:\n y = asarray(y)\n dtype = np.result_type(m, y, np.float64)\n X = array(m, ndmin=2, dtype=dtype)\n\n if X.shape[0] == 1:\n rowvar = 1\n if rowvar:\n N = X.shape[1]\n axis = 0\n else:\n N = X.shape[0]\n axis = 1\n\n # check ddof\n if ddof is None:\n if bias == 0:\n ddof = 1\n else:\n ddof = 0\n fact = float(N - ddof)\n if fact <= 0:\n warnings.warn(\"Degrees of freedom <= 0 for slice\", RuntimeWarning)\n fact = 0.0\n\n if y is not None:\n y = array(y, ndmin=2, dtype=dtype)\n X = concatenate((X, y), axis)\n\n X = X - X.mean(axis=1 - axis, keepdims=True)\n if not rowvar:\n return (dot(X.T, X.conj()) / fact).squeeze()\n else:\n return (dot(X, X.T.conj()) / fact).squeeze()\n\n\n@derived_from(np)\ndef corrcoef(x, y=None, rowvar=1):\n c = cov(x, y, rowvar)\n if c.shape == ():\n return c / c\n d = diag(c)\n d = d.reshape((d.shape[0], 1))\n sqr_d = sqrt(d)\n return (c / sqr_d) / sqr_d.T\n\n\n@implements(np.round, np.round_)\n@derived_from(np)\ndef round(a, decimals=0):\n return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)\n\n\ndef _unique_internal(ar, indices, counts, return_inverse=False):\n \"\"\"\n Helper/wrapper function for :func:`numpy.unique`.\n\n Uses :func:`numpy.unique` to find the unique values for the array chunk.\n Given this chunk may not represent the whole array, also take the\n ``indices`` and ``counts`` that are in 1-to-1 correspondence to ``ar``\n and reduce them in the same fashion as ``ar`` is reduced. Namely sum\n any counts that correspond to the same value and take the smallest\n index that corresponds to the same value.\n\n To handle the inverse mapping from the unique values to the original\n array, simply return a NumPy array created with ``arange`` with enough\n values to correspond 1-to-1 to the unique values. While there is more\n work needed to be done to create the full inverse mapping for the\n original array, this provides enough information to generate the\n inverse mapping in Dask.\n\n Given Dask likes to have one array returned from functions like\n ``blockwise``, some formatting is done to stuff all of the resulting arrays\n into one big NumPy structured array. Dask is then able to handle this\n object and can split it apart into the separate results on the Dask side,\n which then can be passed back to this function in concatenated chunks for\n further reduction or can be return to the user to perform other forms of\n analysis.\n\n By handling the problem in this way, it does not matter where a chunk\n is in a larger array or how big it is. The chunk can still be computed\n on the same way. Also it does not matter if the chunk is the result of\n other chunks being run through this function multiple times. The end\n result will still be just as accurate using this strategy.\n \"\"\"\n\n return_index = indices is not None\n return_counts = counts is not None\n\n u = np.unique(ar)\n\n dt = [(\"values\", u.dtype)]\n if return_index:\n dt.append((\"indices\", np.intp))\n if return_inverse:\n dt.append((\"inverse\", np.intp))\n if return_counts:\n dt.append((\"counts\", np.intp))\n\n r = np.empty(u.shape, dtype=dt)\n r[\"values\"] = u\n if return_inverse:\n r[\"inverse\"] = np.arange(len(r), dtype=np.intp)\n if return_index or return_counts:\n for i, v in enumerate(r[\"values\"]):\n m = ar == v\n if return_index:\n indices[m].min(keepdims=True, out=r[\"indices\"][i : i + 1])\n if return_counts:\n counts[m].sum(keepdims=True, out=r[\"counts\"][i : i + 1])\n\n return r\n\n\n@derived_from(np)\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False):\n ar = ar.ravel()\n\n # Run unique on each chunk and collect results in a Dask Array of\n # unknown size.\n\n args = [ar, \"i\"]\n out_dtype = [(\"values\", ar.dtype)]\n if return_index:\n args.extend([arange(ar.shape[0], dtype=np.intp, chunks=ar.chunks[0]), \"i\"])\n out_dtype.append((\"indices\", np.intp))\n else:\n args.extend([None, None])\n if return_counts:\n args.extend([ones((ar.shape[0],), dtype=np.intp, chunks=ar.chunks[0]), \"i\"])\n out_dtype.append((\"counts\", np.intp))\n else:\n args.extend([None, None])\n\n out = blockwise(_unique_internal, \"i\", *args, dtype=out_dtype, return_inverse=False)\n out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)\n\n # Take the results from the unique chunks and do the following.\n #\n # 1. Collect all results as arguments.\n # 2. Concatenate each result into one big array.\n # 3. Pass all results as arguments to the internal unique again.\n #\n # TODO: This should be replaced with a tree reduction using this strategy.\n # xref: https://github.com/dask/dask/issues/2851\n\n out_parts = [out[\"values\"]]\n if return_index:\n out_parts.append(out[\"indices\"])\n else:\n out_parts.append(None)\n if return_counts:\n out_parts.append(out[\"counts\"])\n else:\n out_parts.append(None)\n\n name = \"unique-aggregate-\" + out.name\n dsk = {\n (name, 0): (\n (_unique_internal,)\n + tuple(\n (np.concatenate, o.__dask_keys__())\n if hasattr(o, \"__dask_keys__\")\n else o\n for o in out_parts\n )\n + (return_inverse,)\n )\n }\n out_dtype = [(\"values\", ar.dtype)]\n if return_index:\n out_dtype.append((\"indices\", np.intp))\n if return_inverse:\n out_dtype.append((\"inverse\", np.intp))\n if return_counts:\n out_dtype.append((\"counts\", np.intp))\n\n dependencies = [o for o in out_parts if hasattr(o, \"__dask_keys__\")]\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n chunks = ((np.nan,),)\n out = Array(graph, name, chunks, out_dtype)\n\n # Split out all results to return to the user.\n\n result = [out[\"values\"]]\n if return_index:\n result.append(out[\"indices\"])\n if return_inverse:\n # Using the returned unique values and arange of unknown length, find\n # each value matching a unique value and replace it with its\n # corresponding index or `0`. There should be only one entry for this\n # index in axis `1` (the one of unknown length). Reduce axis `1`\n # through summing to get an array with known dimensionality and the\n # mapping of the original values.\n mtches = (ar[:, None] == out[\"values\"][None, :]).astype(np.intp)\n result.append((mtches * out[\"inverse\"]).sum(axis=1))\n if return_counts:\n result.append(out[\"counts\"])\n\n if len(result) == 1:\n result = result[0]\n else:\n result = tuple(result)\n\n return result\n\n\ndef _isin_kernel(element, test_elements, assume_unique=False):\n values = np.in1d(element.ravel(), test_elements, assume_unique=assume_unique)\n return values.reshape(element.shape + (1,) * test_elements.ndim)\n\n\n@safe_wraps(getattr(np, \"isin\", None))\ndef isin(element, test_elements, assume_unique=False, invert=False):\n element = asarray(element)\n test_elements = asarray(test_elements)\n element_axes = tuple(range(element.ndim))\n test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))\n mapped = blockwise(\n _isin_kernel,\n element_axes + test_axes,\n element,\n element_axes,\n test_elements,\n test_axes,\n adjust_chunks={axis: lambda _: 1 for axis in test_axes},\n dtype=bool,\n assume_unique=assume_unique,\n )\n\n result = mapped.any(axis=test_axes)\n if invert:\n result = ~result\n return result\n\n\n@derived_from(np)\ndef roll(array, shift, axis=None):\n result = array\n\n if axis is None:\n result = ravel(result)\n\n if not isinstance(shift, Integral):\n raise TypeError(\n \"Expect `shift` to be an instance of Integral when `axis` is None.\"\n )\n\n shift = (shift,)\n axis = (0,)\n else:\n try:\n len(shift)\n except TypeError:\n shift = (shift,)\n try:\n len(axis)\n except TypeError:\n axis = (axis,)\n\n if len(shift) != len(axis):\n raise ValueError(\"Must have the same number of shifts as axes.\")\n\n for i, s in zip(axis, shift):\n s = -s\n s %= result.shape[i]\n\n sl1 = result.ndim * [slice(None)]\n sl2 = result.ndim * [slice(None)]\n\n sl1[i] = slice(s, None)\n sl2[i] = slice(None, s)\n\n sl1 = tuple(sl1)\n sl2 = tuple(sl2)\n\n result = concatenate([result[sl1], result[sl2]], axis=i)\n\n result = result.reshape(array.shape)\n\n return result\n\n\n@derived_from(np)\ndef shape(array):\n return array.shape\n\n\n@derived_from(np)\ndef union1d(ar1, ar2):\n return unique(concatenate((ar1.ravel(), ar2.ravel())))\n\n\n@derived_from(np)\ndef ravel(array):\n return array.reshape((-1,))\n\n\n@derived_from(np)\ndef squeeze(a, axis=None):\n if axis is None:\n axis = tuple(i for i, d in enumerate(a.shape) if d == 1)\n elif not isinstance(axis, tuple):\n axis = (axis,)\n\n if any(a.shape[i] != 1 for i in axis):\n raise ValueError(\"cannot squeeze axis with size other than one\")\n\n axis = validate_axis(axis, a.ndim)\n\n sl = tuple(0 if i in axis else slice(None) for i, s in enumerate(a.shape))\n\n a = a[sl]\n\n return a\n\n\n@derived_from(np)\ndef compress(condition, a, axis=None):\n\n if not is_arraylike(condition):\n # Allow `condition` to be anything array-like, otherwise ensure `condition`\n # is a numpy array.\n condition = np.asarray(condition)\n condition = condition.astype(bool)\n a = asarray(a)\n\n if condition.ndim != 1:\n raise ValueError(\"Condition must be one dimensional\")\n\n if axis is None:\n a = a.ravel()\n axis = 0\n axis = validate_axis(axis, a.ndim)\n\n # Treat `condition` as filled with `False` (if it is too short)\n a = a[\n tuple(\n slice(None, len(condition)) if i == axis else slice(None)\n for i in range(a.ndim)\n )\n ]\n\n # Use `condition` to select along 1 dimension\n a = a[tuple(condition if i == axis else slice(None) for i in range(a.ndim))]\n\n return a\n\n\n@derived_from(np)\ndef extract(condition, arr):\n condition = asarray(condition).astype(bool)\n arr = asarray(arr)\n return compress(condition.ravel(), arr.ravel())\n\n\n@derived_from(np)\ndef take(a, indices, axis=0):\n axis = validate_axis(axis, a.ndim)\n\n if isinstance(a, np.ndarray) and isinstance(indices, Array):\n return _take_dask_array_from_numpy(a, indices, axis)\n else:\n return a[(slice(None),) * axis + (indices,)]\n\n\ndef _take_dask_array_from_numpy(a, indices, axis):\n assert isinstance(a, np.ndarray)\n assert isinstance(indices, Array)\n\n return indices.map_blocks(\n lambda block: np.take(a, block, axis), chunks=indices.chunks, dtype=a.dtype\n )\n\n\n@derived_from(np)\ndef around(x, decimals=0):\n return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)\n\n\ndef _asarray_isnull(values):\n import pandas as pd\n\n return np.asarray(pd.isnull(values))\n\n\ndef isnull(values):\n \"\"\" pandas.isnull for dask arrays \"\"\"\n # eagerly raise ImportError, if pandas isn't available\n import pandas as pd # noqa\n\n return elemwise(_asarray_isnull, values, dtype=\"bool\")\n\n\ndef notnull(values):\n \"\"\" pandas.notnull for dask arrays \"\"\"\n return ~isnull(values)\n\n\n@derived_from(np)\ndef isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):\n func = partial(np.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)\n return elemwise(func, arr1, arr2, dtype=\"bool\")\n\n\n@derived_from(np)\ndef allclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):\n return isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=equal_nan).all()\n\n\ndef variadic_choose(a, *choices):\n return np.choose(a, choices)\n\n\n@derived_from(np)\ndef choose(a, choices):\n return elemwise(variadic_choose, a, *choices)\n\n\ndef _isnonzero_vec(v):\n return bool(np.count_nonzero(v))\n\n\n_isnonzero_vec = np.vectorize(_isnonzero_vec, otypes=[bool])\n\n\ndef isnonzero(a):\n try:\n np.zeros(tuple(), dtype=a.dtype).astype(bool)\n except ValueError:\n ######################################################\n # Handle special cases where conversion to bool does #\n # not work correctly. #\n # #\n # xref: https://github.com/numpy/numpy/issues/9479 #\n ######################################################\n return a.map_blocks(_isnonzero_vec, dtype=bool)\n else:\n return a.astype(bool)\n\n\n@derived_from(np)\ndef argwhere(a):\n a = asarray(a)\n\n nz = isnonzero(a).flatten()\n\n ind = indices(a.shape, dtype=np.intp, chunks=a.chunks)\n if ind.ndim > 1:\n ind = stack([ind[i].ravel() for i in range(len(ind))], axis=1)\n ind = compress(nz, ind, axis=0)\n\n return ind\n\n\n@derived_from(np)\ndef where(condition, x=None, y=None):\n if (x is None) != (y is None):\n raise ValueError(\"either both or neither of x and y should be given\")\n if (x is None) and (y is None):\n return nonzero(condition)\n\n if np.isscalar(condition):\n dtype = result_type(x, y)\n x = asarray(x)\n y = asarray(y)\n\n shape = broadcast_shapes(x.shape, y.shape)\n out = x if condition else y\n\n return broadcast_to(out, shape).astype(dtype)\n else:\n return elemwise(np.where, condition, x, y)\n\n\n@derived_from(np)\ndef count_nonzero(a, axis=None):\n return isnonzero(asarray(a)).astype(np.intp).sum(axis=axis)\n\n\n@derived_from(np)\ndef flatnonzero(a):\n return argwhere(asarray(a).ravel())[:, 0]\n\n\n@derived_from(np)\ndef nonzero(a):\n ind = argwhere(a)\n if ind.ndim > 1:\n return tuple(ind[:, i] for i in range(ind.shape[1]))\n else:\n return (ind,)\n\n\ndef _int_piecewise(x, *condlist, **kwargs):\n return np.piecewise(\n x, list(condlist), kwargs[\"funclist\"], *kwargs[\"func_args\"], **kwargs[\"func_kw\"]\n )\n\n\ndef _unravel_index_kernel(indices, func_kwargs):\n return np.stack(np.unravel_index(indices, **func_kwargs))\n\n\n@derived_from(np)\ndef unravel_index(indices, dims, order=\"C\"):\n # TODO: deprecate dims as well?\n if dims and indices.size:\n unraveled_indices = tuple(\n indices.map_blocks(\n _unravel_index_kernel,\n dtype=np.intp,\n chunks=(((len(dims),),) + indices.chunks),\n new_axis=0,\n func_kwargs={_unravel_index_keyword: dims, \"order\": order},\n )\n )\n else:\n unraveled_indices = tuple(empty((0,), dtype=np.intp, chunks=1) for i in dims)\n\n return unraveled_indices\n\n\n@derived_from(np)\ndef piecewise(x, condlist, funclist, *args, **kw):\n return map_blocks(\n _int_piecewise,\n x,\n *condlist,\n dtype=x.dtype,\n name=\"piecewise\",\n funclist=funclist,\n func_args=args,\n func_kw=kw\n )\n\n\n@wraps(chunk.coarsen)\ndef coarsen(reduction, x, axes, trim_excess=False, **kwargs):\n if not trim_excess and not all(\n bd % div == 0 for i, div in axes.items() for bd in x.chunks[i]\n ):\n msg = \"Coarsening factor does not align with block dimensions\"\n raise ValueError(msg)\n\n if \"dask\" in inspect.getfile(reduction):\n reduction = getattr(np, reduction.__name__)\n\n name = \"coarsen-\" + tokenize(reduction, x, axes, trim_excess)\n dsk = {\n (name,)\n + key[1:]: (apply, chunk.coarsen, [reduction, key, axes, trim_excess], kwargs)\n for key in flatten(x.__dask_keys__())\n }\n chunks = tuple(\n tuple(int(bd // axes.get(i, 1)) for bd in bds) for i, bds in enumerate(x.chunks)\n )\n\n meta = reduction(np.empty((1,) * x.ndim, dtype=x.dtype), **kwargs)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, meta=meta)\n\n\ndef split_at_breaks(array, breaks, axis=0):\n \"\"\" Split an array into a list of arrays (using slices) at the given breaks\n\n >>> split_at_breaks(np.arange(6), [3, 5])\n [array([0, 1, 2]), array([3, 4]), array([5])]\n \"\"\"\n padded_breaks = concat([[None], breaks, [None]])\n slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]\n preslice = (slice(None),) * axis\n split_array = [array[preslice + (s,)] for s in slices]\n return split_array\n\n\n@derived_from(np)\ndef insert(arr, obj, values, axis):\n # axis is a required argument here to avoid needing to deal with the numpy\n # default case (which reshapes the array to make it flat)\n axis = validate_axis(axis, arr.ndim)\n\n if isinstance(obj, slice):\n obj = np.arange(*obj.indices(arr.shape[axis]))\n obj = np.asarray(obj)\n scalar_obj = obj.ndim == 0\n if scalar_obj:\n obj = np.atleast_1d(obj)\n\n obj = np.where(obj < 0, obj + arr.shape[axis], obj)\n if (np.diff(obj) < 0).any():\n raise NotImplementedError(\n \"da.insert only implemented for monotonic ``obj`` argument\"\n )\n\n split_arr = split_at_breaks(arr, np.unique(obj), axis)\n\n if getattr(values, \"ndim\", 0) == 0:\n # we need to turn values into a dask array\n name = \"values-\" + tokenize(values)\n dtype = getattr(values, \"dtype\", type(values))\n values = Array({(name,): values}, name, chunks=(), dtype=dtype)\n\n values_shape = tuple(\n len(obj) if axis == n else s for n, s in enumerate(arr.shape)\n )\n values = broadcast_to(values, values_shape)\n elif scalar_obj:\n values = values[(slice(None),) * axis + (None,)]\n\n values_chunks = tuple(\n values_bd if axis == n else arr_bd\n for n, (arr_bd, values_bd) in enumerate(zip(arr.chunks, values.chunks))\n )\n values = values.rechunk(values_chunks)\n\n counts = np.bincount(obj)[:-1]\n values_breaks = np.cumsum(counts[counts > 0])\n split_values = split_at_breaks(values, values_breaks, axis)\n\n interleaved = list(interleave([split_arr, split_values]))\n interleaved = [i for i in interleaved if i.nbytes]\n return concatenate(interleaved, axis=axis)\n\n\ndef _average(a, axis=None, weights=None, returned=False, is_masked=False):\n # This was minimally modified from numpy.average\n # See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n # Wrapper used by da.average or da.ma.average.\n a = asanyarray(a)\n\n if weights is None:\n avg = a.mean(axis)\n scl = avg.dtype.type(a.size / avg.size)\n else:\n wgt = asanyarray(weights)\n\n if issubclass(a.dtype.type, (np.integer, np.bool_)):\n result_dtype = result_type(a.dtype, wgt.dtype, \"f8\")\n else:\n result_dtype = result_type(a.dtype, wgt.dtype)\n\n # Sanity checks\n if a.shape != wgt.shape:\n if axis is None:\n raise TypeError(\n \"Axis must be specified when shapes of a and weights differ.\"\n )\n if wgt.ndim != 1:\n raise TypeError(\n \"1D weights expected when shapes of a and weights differ.\"\n )\n if wgt.shape[0] != a.shape[axis]:\n raise ValueError(\n \"Length of weights not compatible with specified axis.\"\n )\n\n # setup wgt to broadcast along axis\n wgt = broadcast_to(wgt, (a.ndim - 1) * (1,) + wgt.shape)\n wgt = wgt.swapaxes(-1, axis)\n if is_masked:\n from .ma import getmaskarray\n\n wgt = wgt * (~getmaskarray(a))\n scl = wgt.sum(axis=axis, dtype=result_dtype)\n avg = multiply(a, wgt, dtype=result_dtype).sum(axis) / scl\n\n if returned:\n if scl.shape != avg.shape:\n scl = broadcast_to(scl, avg.shape).copy()\n return avg, scl\n else:\n return avg\n\n\n@derived_from(np)\ndef average(a, axis=None, weights=None, returned=False):\n return _average(a, axis, weights, returned, is_masked=False)\n"
] | [
[
"numpy.promote_types",
"numpy.min",
"numpy.where",
"numpy.apply_along_axis",
"numpy.cumsum",
"numpy.gradient",
"numpy.bincount",
"numpy.histogram",
"numpy.count_nonzero",
"numpy.empty",
"numpy.vectorize",
"numpy.take",
"numpy.unravel_index",
"numpy.array",
"numpy.diff",
"numpy.isscalar",
"pandas.isnull",
"numpy.result_type",
"numpy.iterable",
"numpy.asarray",
"numpy.ones",
"numpy.choose",
"numpy.digitize",
"numpy.atleast_1d",
"numpy.linspace",
"numpy.unique"
]
] |
PMBio/limix-backup | [
"1e201fdb5c694d0d5506f207f3de65d8ef66146c"
] | [
"limix/stats/pca.py"
] | [
"# Copyright(c) 2014, The LIMIX developers (Christoph Lippert, Paolo Francesco Casale, Oliver Stegle)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nPCA related utility function\n\"\"\"\nimport scipy as sp\nimport numpy as np \nimport pdb\nimport numpy.linalg as linalg\nimport scipy as sp\n\ndef PCA(Y, components):\n\t\"\"\"\n\trun PCA, retrieving the first (components) principle components\n\treturn [s0, eig, w0]\n\ts0: factors\n\tw0: weights\n\t\"\"\"\n\n\tN,D = Y.shape\n\tsv = linalg.svd(Y, full_matrices=0);\n\t[s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]]\n\tv = s0.std(axis=0)\n\ts0 /= v;\n\tw0 *= v;\n\treturn [s0, w0]\n\n\tif N>D:\n\t\tsv = linalg.svd(Y, full_matrices=0);\n\t\t[s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]]\n\t\tv = s0.std(axis=0)\n\t\ts0 /= v;\n\t\tw0 *= v;\n\t\treturn [s0, w0]\n\telse:\n\t\tK=np.cov(Y)\n\t\tsv = linalg.eigh(K)\n\t\tstd_var = np.sqrt(sv[0])\n\t\tpc = sv[1]*std_var[np.newaxis(),0]\n\t\t#import ipdb\n\t\t#ipdb.set_trace()\n\t\treturn [pc,std_var]\n\ndef PC_varExplained(Y,standardized=True):\n \"\"\"\n Run PCA and calculate the cumulative fraction of variance\n Args:\n Y: phenotype values\n standardize: if True, phenotypes are standardized\n Returns:\n var: cumulative distribution of variance explained\n \"\"\"\n # figuring out the number of latent factors\n if standardized:\n Y-=Y.mean(0)\n Y/=Y.std(0)\n covY = sp.cov(Y)\n S,U = linalg.eigh(covY+1e-6*sp.eye(covY.shape[0]))\n S = S[::-1]\n rv = np.array([S[0:i].sum() for i in range(1,S.shape[0])])\n rv/= S.sum()\n return rv\n\nif __name__ == '__main__':\n\tY = np.random.randn(10,20)\n\tcomponents = 5\n\t#import pca\n\tpca = PCA(Y, components)\n\tpass\n"
] | [
[
"numpy.cov",
"scipy.eye",
"numpy.linalg.eigh",
"numpy.random.randn",
"numpy.newaxis",
"numpy.linalg.svd",
"numpy.sqrt",
"numpy.diag",
"scipy.cov"
]
] |
nelsonsw5/3DDet | [
"e554becde1815a62459834a41336dd45d86f955e"
] | [
"tools/viz_display_audit.py"
] | [
"scene_data = [{'upc': '818094005777',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.2110433280467987,\n 'y': -0.15601402521133423,\n 'z': -0.19092947244644165},\n 'quantity': 6,\n 'confidence': 0.8571581840515137,\n 'dimensions': {'x': 0.09343475103378296,\n 'y': 0.16350924968719482,\n 'z': 0.0}},\n {'upc': '818094005777',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.13486860692501068,\n 'y': -0.15853095054626465,\n 'z': -0.1901559680700302},\n 'quantity': 6,\n 'confidence': 0.8989095687866211,\n 'dimensions': {'x': 0.07776474952697754,\n 'y': 0.15579725801944733,\n 'z': 0.0}},\n {'upc': '818094005753',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.06312471628189087,\n 'y': -0.1597088724374771,\n 'z': -0.18360841274261475},\n 'quantity': 6,\n 'confidence': 0.89031982421875,\n 'dimensions': {'x': 0.07762962579727173,\n 'y': 0.14996032416820526,\n 'z': 0.0}},\n {'upc': '818094005753',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.01368597149848938,\n 'y': -0.15843042731285095,\n 'z': -0.16567431390285492},\n 'quantity': 6,\n 'confidence': 0.8884763717651367,\n 'dimensions': {'x': 0.06925937533378601, 'y': 0.1440218836069107, 'z': 0.0}},\n {'upc': '818094005746',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.08395534008741379,\n 'y': -0.15687577426433563,\n 'z': -0.1569824367761612},\n 'quantity': 6,\n 'confidence': 0.8323421478271484,\n 'dimensions': {'x': 0.07391330599784851,\n 'y': 0.14595046639442444,\n 'z': 0.0}},\n {'upc': '818094005791',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.18140855431556702,\n 'y': -0.16950783133506775,\n 'z': -0.48921725153923035},\n 'quantity': 1,\n 'confidence': 0.8289909362792969,\n 'dimensions': {'x': 0.07373327016830444,\n 'y': 0.16346201300621033,\n 'z': 0.0}},\n {'upc': '818094008402',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.22015288472175598,\n 'y': -0.16037575900554657,\n 'z': -0.21370862424373627},\n 'quantity': 6,\n 'confidence': 0.5253796577453613,\n 'dimensions': {'x': 0.05327582359313965,\n 'y': 0.15980948507785797,\n 'z': 0.0}},\n {'upc': '818094005784',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.326053649187088,\n 'y': -0.15991030633449554,\n 'z': -0.19700884819030762},\n 'quantity': 5,\n 'confidence': 0.8227825164794922,\n 'dimensions': {'x': 0.05623340606689453,\n 'y': 0.16140693426132202,\n 'z': 0.0}},\n {'upc': '818094005784',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.39171361923217773,\n 'y': -0.1611032485961914,\n 'z': -0.21412178874015808},\n 'quantity': 5,\n 'confidence': 0.8448057174682617,\n 'dimensions': {'x': 0.07884377241134644,\n 'y': 0.17246566712856293,\n 'z': 0.0}},\n {'upc': '070847811169',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.21189150214195251,\n 'y': -0.36991700530052185,\n 'z': -0.1652209609746933},\n 'quantity': 5,\n 'confidence': 0.8740301132202148,\n 'dimensions': {'x': 0.0756341814994812, 'y': 0.1750447154045105, 'z': 0.0}},\n {'upc': '070847811169',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.13705268502235413,\n 'y': -0.3637668192386627,\n 'z': -0.14215806126594543},\n 'quantity': 6,\n 'confidence': 0.883244514465332,\n 'dimensions': {'x': 0.07631893455982208,\n 'y': 0.15997955203056335,\n 'z': 0.0}},\n {'upc': '070847010463',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.06676575541496277,\n 'y': -0.3715401589870453,\n 'z': -0.1485072821378708},\n 'quantity': 6,\n 'confidence': 0.5068564414978027,\n 'dimensions': {'x': 0.07902286946773529, 'y': 0.1618039309978485, 'z': 0.0}},\n {'upc': '070847029014',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.005420338362455368,\n 'y': -0.3666209578514099,\n 'z': -0.1488816887140274},\n 'quantity': 6,\n 'confidence': 0.8660681247711182,\n 'dimensions': {'x': 0.07647958397865295,\n 'y': 0.16669851541519165,\n 'z': 0.0}},\n {'upc': '611269818994',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.07422016561031342,\n 'y': -0.38779816031455994,\n 'z': -0.20005324482917786},\n 'quantity': 5,\n 'confidence': 0.8378763198852539,\n 'dimensions': {'x': 0.06629350781440735, 'y': 0.1732531487941742, 'z': 0.0}},\n {'upc': '611269818994',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.13483896851539612,\n 'y': -0.3731918931007385,\n 'z': -0.14355239272117615},\n 'quantity': 5,\n 'confidence': 0.90362548828125,\n 'dimensions': {'x': 0.06405854225158691,\n 'y': 0.15245956182479858,\n 'z': 0.0}},\n {'upc': '611269818994',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.20092469453811646,\n 'y': -0.37837010622024536,\n 'z': -0.16582250595092773},\n 'quantity': 7,\n 'confidence': 0.8714923858642578,\n 'dimensions': {'x': 0.06650108098983765,\n 'y': 0.16727781295776367,\n 'z': 0.0}},\n {'upc': '611269818994',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.26247522234916687,\n 'y': -0.37466394901275635,\n 'z': -0.15715079009532928},\n 'quantity': 6,\n 'confidence': 0.8723082542419434,\n 'dimensions': {'x': 0.06895685195922852, 'y': 0.1692761778831482, 'z': 0.0}},\n {'upc': '611269332827',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.32441413402557373,\n 'y': -0.3697647154331207,\n 'z': -0.1320071816444397},\n 'quantity': 1,\n 'confidence': 0.6828818321228027,\n 'dimensions': {'x': 0.07439756393432617,\n 'y': 0.15927201509475708,\n 'z': 0.0}},\n {'upc': '611269332827',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.39020824432373047,\n 'y': -0.3711986839771271,\n 'z': -0.12990984320640564},\n 'quantity': 2,\n 'confidence': 0.6488802433013916,\n 'dimensions': {'x': 0.07589972019195557,\n 'y': 0.16788294911384583,\n 'z': 0.0}},\n {'upc': '012000028496',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.12651684880256653,\n 'y': -0.6164897084236145,\n 'z': -0.15308189392089844},\n 'quantity': 2,\n 'confidence': 0.8554458618164062,\n 'dimensions': {'x': 0.07690630853176117,\n 'y': 0.14901567995548248,\n 'z': 0.0}},\n {'upc': '012000028496',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.05642849579453468,\n 'y': -0.6179320812225342,\n 'z': -0.15816207230091095},\n 'quantity': 2,\n 'confidence': 0.8884010314941406,\n 'dimensions': {'x': 0.07432705163955688,\n 'y': 0.14524905383586884,\n 'z': 0.0}},\n {'upc': '012000028458',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.02520172856748104,\n 'y': -0.618762731552124,\n 'z': -0.15411531925201416},\n 'quantity': 2,\n 'confidence': 0.9027607440948486,\n 'dimensions': {'x': 0.06828022003173828,\n 'y': 0.14076381921768188,\n 'z': 0.0}},\n {'upc': '012000028458',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.09459851682186127,\n 'y': -0.618120014667511,\n 'z': -0.1430647373199463},\n 'quantity': 2,\n 'confidence': 0.911005973815918,\n 'dimensions': {'x': 0.0672944188117981, 'y': 0.14057016372680664, 'z': 0.0}},\n {'upc': '898999010007',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.31885775923728943,\n 'y': -0.6192162036895752,\n 'z': -0.18573836982250214},\n 'quantity': 6,\n 'confidence': 0.7801816463470459,\n 'dimensions': {'x': 0.08441579341888428, 'y': 0.169972762465477, 'z': 0.0}},\n {'upc': '898999010007',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.3841838240623474,\n 'y': -0.6160695552825928,\n 'z': -0.12251804023981094},\n 'quantity': 7,\n 'confidence': 0.750434398651123,\n 'dimensions': {'x': 0.11046695709228516, 'y': 0.1709478348493576, 'z': 0.0}},\n {'upc': '610764863812',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.21709463000297546,\n 'y': -0.8858003616333008,\n 'z': -0.1490178257226944},\n 'quantity': 6,\n 'confidence': 0.6478636264801025,\n 'dimensions': {'x': 0.0765542984008789, 'y': 0.1680009365081787, 'z': 0.0}},\n {'upc': '610764863430',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.15105746686458588,\n 'y': -0.8834516406059265,\n 'z': -0.14109262824058533},\n 'quantity': 7,\n 'confidence': 0.8081793785095215,\n 'dimensions': {'x': 0.08105121552944183, 'y': 0.1674301028251648, 'z': 0.0}},\n {'upc': '610764028495',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.08033323287963867,\n 'y': -0.8980768918991089,\n 'z': -0.17895439267158508},\n 'quantity': 6,\n 'confidence': 0.8108139038085938,\n 'dimensions': {'x': 0.07966506481170654,\n 'y': 0.18070891499519348,\n 'z': 0.0}},\n {'upc': '610764024220',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': -0.009536086581647396,\n 'y': -0.9038947224617004,\n 'z': -0.20284324884414673},\n 'quantity': 6,\n 'confidence': 0.6765151023864746,\n 'dimensions': {'x': 0.076515793800354, 'y': 0.1871100664138794, 'z': 0.0}},\n {'upc': '858176002065',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.06519287824630737,\n 'y': -0.8804823756217957,\n 'z': -0.1436401605606079},\n 'quantity': 6,\n 'confidence': 0.7823410034179688,\n 'dimensions': {'x': 0.0748170018196106, 'y': 0.18220365047454834, 'z': 0.0}},\n {'upc': '858176002065',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.1364011913537979,\n 'y': -0.8802597522735596,\n 'z': -0.13831031322479248},\n 'quantity': 5,\n 'confidence': 0.7775943279266357,\n 'dimensions': {'x': 0.07438942790031433,\n 'y': 0.18267884850502014,\n 'z': 0.0}},\n {'upc': '858176002270',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.2152194380760193,\n 'y': -0.8808143138885498,\n 'z': -0.14225275814533234},\n 'quantity': 5,\n 'confidence': 0.7995820045471191,\n 'dimensions': {'x': 0.08222317695617676,\n 'y': 0.19047483801841736,\n 'z': 0.0}},\n {'upc': '052000324815',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.2964170277118683,\n 'y': -0.87630695104599,\n 'z': -0.137282133102417},\n 'quantity': 6,\n 'confidence': 0.8014755249023438,\n 'dimensions': {'x': 0.09002119302749634,\n 'y': 0.20546022057533264,\n 'z': 0.0}},\n {'upc': '052000324815',\n 'normal': {'x': 0.0, 'y': 0.0, 'z': 1.0},\n 'centroid': {'x': 0.3823223412036896,\n 'y': -0.8788396120071411,\n 'z': -0.13560059666633606},\n 'quantity': 6,\n 'confidence': 0.8259701728820801,\n 'dimensions': {'x': 0.0984799861907959, 'y': 0.21821492910385132, 'z': 0.0}}]\n\nobj_path = \"pointcloud.obj\"\n\n\nfrom pytorch3d.io import load_obj\nimport numpy as np\nimport torch\nimport pdb\n#from wandb_utils.wandb import WandB\n#pdb.set_trace()\nfrom wandb_utils.wandb import WandB\nfrom transforms import NormalizeShift, NegZForward2YForward, MirrorX\nfrom features.points_to_beams import BeamFeatures\n\ncentroids = []\ndims = []\nnormals = []\nlabels = []\n\nfor scene_dict in scene_data:\n if \"dimensions\" not in scene_dict:\n continue\n labels.append(f\"upc:{scene_dict['upc']}\\ngt:{scene_dict['quantity']}\")\n centroids.append(\n [scene_dict[\"centroid\"][\"x\"], scene_dict[\"centroid\"][\"y\"], scene_dict[\"centroid\"][\"z\"]]\n )\n dims.append(\n [scene_dict[\"dimensions\"][\"x\"], scene_dict[\"dimensions\"][\"y\"], scene_dict[\"dimensions\"][\"z\"]]\n )\n normals.append(\n [scene_dict[\"normal\"][\"x\"], scene_dict[\"normal\"][\"y\"], scene_dict[\"normal\"][\"z\"]]\n )\n\n#loaded_obj = load_obj(obj_path)\n\n#pdb.set_trace()\n\n# print(\"centroids: \", centroids)\ncentroids = torch.tensor(centroids)\ndims = torch.tensor(dims)\nnormals = torch.tensor(normals)\n#pdb.set_trace()\n\n\nrun = WandB(\n project='neonet-viz_audit',\n enabled=True,\n log_objects=True\n)\n\n\npc, _, _ = load_obj(obj_path)\nswap = NegZForward2YForward()\nnorm = NormalizeShift()\n\n# normalize point cloud\npc = norm.fit_transform(pc)\ncentroids = norm(centroids)\ndims = norm.scale(dims)\nnormals = norm.scale(normals)\n#pdb.set_trace()\n# swap axes\npc = swap(pc)\ncentroids = swap(centroids)\ndims = swap(dims)\nnormals = swap(normals) / 10.0\n\n# get beams\nbeam_feats = BeamFeatures(\n beam_dim=1, # assumed to be in -Z forward (App) space\n max_voxels=centroids.shape[0],\n max_points=1024,\n epsilon=0.05,\n augmented=False,\n with_distance=False,\n zero_pad=True\n)\n\n#print(\"pc: \", pc)\n#print(\"centroids: \", centroids)\n#print(\"dims: \", dims)\n\nfeats, _, _ = beam_feats(\n points=pc,\n centroids=centroids,\n dimensions=dims\n)\n\n# get feature RGB\nn, d, c = feats.shape # centroids, dimension, channels\nfeats = torch.flatten(feats, start_dim=0, end_dim=1)\n\nf_colors = []\nfor i in range(n):\n color = np.random.randint(0, 255, size=3).reshape(1, 3)\n rgb_i = np.repeat(color, d, axis=0)\n f_colors.append(rgb_i)\nf_colors = np.concatenate(f_colors)\nfeats_rgb = np.concatenate([feats, f_colors], axis=1)\n\n\n# get pseudo boxes for normal viz\nshifted_centroids= centroids + normals\n#midpoints = centroids + normals/2.0\n\n\npc = pc.data.numpy()\nrgb = run.get_rgb_point_heatmap(pc)\n# print(\"centroids: \", centroids)\n# print(\"dims: \", dims)\nboxes = run.get_bb_dict(centroids, dims, labels=labels)\nc = [[255, 255, 0]]*shifted_centroids.shape[0]\nboxes2 = run.get_bb_dict(shifted_centroids, dims, colors=c)\n\nboxes = np.concatenate([boxes, boxes2], axis=0)\n\n# Fetch points (with associated colors) for logging in W&B later\npoints_rgb = np.array([[p[0], p[1], p[2], c[0], c[1], c[2]] for p, c in zip(pc, rgb)])\n\npc_dict = run.get_point_cloud_log(points_rgb, boxes=boxes)\n\npc_feats_dict = run.get_point_cloud_log(\n points=feats_rgb,\n boxes=np.array(boxes),\n key=\"Point Features\"\n )\n\n#img_dict = run.get_img_log(\n# [\n# \"/home/porter/data/display-audits/12-29-21/med/scene-4/img-2.jpg\",\n# \"/home/porter/data/display-audits/12-29-21/med/scene-4/img-1.jpg\"\n#\n#]\n#)\npdb.set_trace()\ndict_list = [pc_dict, pc_feats_dict]\n#img_dict]\n\nlog_dict = {}\nfor d in dict_list:\n for k, v in d.items():\n log_dict[k] = v\n\nrun.log(log_dict)\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"torch.tensor",
"numpy.random.randint",
"numpy.repeat",
"torch.flatten"
]
] |
Fang-Lansheng/C-3-Framework | [
"82e22c93231d2609421d27d594bb18d26ef5be59"
] | [
"models/SCC_Model/NewNet.py"
] | [
"import cv2\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torchvision\n\nfrom misc.networks.cnn import make_layers, cfg\nfrom misc.networks.rnn import BiConvLSTM\nfrom misc.networks.vision_transformer import VisionTransformer\n\n\nclass NewNet(nn.Module):\n def __init__(self, backbone='csrnet', patch_size=8, lstm_in_dim=512, num_sam_layers=6,\n num_lstm_layers=1, load_weights=True, multi_fuse=True, is_lstm=False, is_vit=True):\n super(NewNet, self).__init__()\n self.backbone = backbone\n self.patch_size = patch_size\n self.lstm_in_dim = lstm_in_dim\n self.num_sam_layers = num_sam_layers\n self.num_lstm_layers = num_lstm_layers\n self.load_weights = load_weights\n self.multi_fuse = multi_fuse\n self.is_lstm = is_lstm\n self.is_vit = is_vit\n\n self.features = make_layers(cfg[self.backbone])\n\n self.reg_layer = make_layers(cfg['csrnet-backend'], in_channels=512, dilation=2)\n\n self.density_layer = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=1)\n\n if self.multi_fuse:\n scale_tree_list = []\n for i in range(self.num_sam_layers):\n if i == 0:\n # module = ScaleTreeBlock(in_channels=512, out_channels=64)\n module = ScaleTreeBlockRAW(in_channels=512, out_channels=64)\n else:\n # module = ScaleTreeBlock(in_channels=64 * i, out_channels=64)\n module = ScaleTreeBlockRAW(in_channels=64 * i, out_channels=64)\n scale_tree_list.append(module)\n self.multi_scale_module = nn.ModuleList(scale_tree_list)\n\n self.dense_fuse = nn.Sequential(\n nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU())\n\n if self.is_lstm:\n self.ascend_layer = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=self.lstm_in_dim, kernel_size=1),\n nn.ReLU())\n self.descend_layer = nn.Sequential(\n nn.Conv2d(in_channels=self.lstm_in_dim, out_channels=1, kernel_size=1),\n nn.ReLU())\n\n # self.encoder = TransformerEncoder()\n # self.decoder = TransformerDecoder()\n\n self.bi_conv_lstm = BiConvLSTM(input_size=(self.patch_size, self.patch_size),\n input_dim=self.lstm_in_dim, hidden_dim=self.lstm_in_dim,\n kernel_size=(3, 3), num_layers=self.num_lstm_layers)\n\n if self.is_vit:\n self.vit = VisionTransformer(patch_size=self.patch_size, in_channels=1,\n depth=8, num_heads=16, mlp_ratio=3.)\n\n if self.load_weights:\n if self.backbone == 'vgg-19':\n mod = torchvision.models.vgg19(pretrained=True)\n self._initialize_weights()\n self.features.load_state_dict(mod.features[:].state_dict())\n elif self.backbone == 'vgg-16':\n mod = torchvision.models.vgg16(pretrained=True)\n self._initialize_weights()\n self.features.load_state_dict(mod.features[:].state_dict())\n elif self.backbone == 'csrnet':\n mod = torchvision.models.vgg16(pretrained=True)\n self._initialize_weights()\n self.features.load_state_dict(mod.features[0:23].state_dict())\n elif self.backbone == 'amrnet':\n mod = torchvision.models.vgg19(pretrained=True)\n self._initialize_weights()\n self.features.load_state_dict(mod.features[0:27].state_dict())\n\n def forward(self, x): # x.shape: [B, 1, M / 8, N / 8] (Train: M = N = crop_size)\n # x, _ = self.get_density_map(x)\n # x = torch.nn.functional.upsample_bilinear(x, scale_factor=8)\n\n if self.is_lstm:\n # ~~~\n b, c, m, n = x.size()\n x, sort_idx, part = self._partition_and_sort(x, patch_size=self.patch_size)\n\n _, t, _, h, w = x.size()\n x = x.reshape([b, t, c * h * w]) # c = 1, h = w = patch_size\n\n # x = self.ascend_layer(x.reshape([b * t, c, h, w])) # [b * t, c, h, w]\n # x = x.reshape([b, t, self.lstm_in_dim, h, w]) # [b, t, c, h, w]\n\n x = self.bi_conv_lstm(x) # [b, t, 1, h, w]\n\n _, t, c, h, w = x.size()\n x = self.descend_layer(x.reshape([b * t, c, h, w]))\n x = x.reshape([b, t, 1, h, w])\n x = self._jagsaw(x, index=sort_idx, part=part)\n # ~~~\n #\n # b, c, h, w = x.size()\n # x_sum = x.view([b, -1]).sum(1).unsqueeze(1).unsqueeze(2).unsqueeze(3)\n # x_norm = x / (x_sum + 1e-6)\n #\n # return x, x_norm # [B, 1, M, N]\n\n if self.is_vit:\n x = self.vit(x)\n\n return x\n\n def get_density_map(self, imgs): # img.shape: [B, 3, M, N] (Train: M = N = crop_size)\n x = self.features(imgs) # [B, 512, M / 8, N / 8]\n x = self.reg_layer(x) # [B, 64, M / 8, N / 8]\n\n if self.multi_fuse:\n for (i, module) in enumerate(self.multi_scale_module):\n if i < 1:\n x = module(x, self.training)\n else:\n y = module(x, self.training)\n x = torch.cat((x, y), dim=1)\n x = self.dense_fuse(x)\n\n x = self.density_layer(x) # [B, 1, M / 8, N / 8]\n\n x = torch.nn.functional.upsample_bilinear(x, scale_factor=8)\n\n b, c, h, w = x.size()\n x_sum = x.view([b, -1]).sum(1).unsqueeze(1).unsqueeze(2).unsqueeze(3)\n x_norm = x / (x_sum + 1e-6)\n\n return x, x_norm\n\n def _forward_unimplemented(self, x) -> None:\n raise NotImplementedError\n\n def _initialize_weights(self) -> None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n @staticmethod\n def _partition_and_sort(x, patch_size):\n # x = nn.functional.upsample_bilinear(density_maps, scale_factor=8) # [B, C, M, N] (C=1)\n batch_size = x.shape[0]\n w, h = x.shape[-2:]\n m, n = int(w / patch_size), int(h / patch_size)\n\n # partition\n patches = []\n idx_list = []\n for b in range(batch_size):\n patches_batch = []\n patch_counts_batch = []\n for i in range(m):\n for j in range(n):\n patch = x[b, :, i * patch_size:(i + 1) * patch_size, j * patch_size:(j + 1) * patch_size]\n patch_count = patch.cpu().data.numpy().sum()\n patch = patch.unsqueeze(0).unsqueeze(0) # [1, 1, C, PATCH_SIZE, PATCH_SIZE]\n patches_batch.append(patch) # len = m * n, elem = torch.tensor\n patch_counts_batch.append(patch_count)\n _, idx0 = torch.sort(torch.tensor(patch_counts_batch))\n _, idx = torch.sort(idx0)\n\n patches_batch = torch.cat(patches_batch, dim=1)\n patches_batch = patches_batch.index_select(dim=1, index=idx0.cuda())\n\n # # --------------------\n # # plot the patches\n # img_count_sum = 0\n # for i in range(m * n):\n # img = patches_batch[0, i, 0, :, :].cpu().data.numpy()\n # img_count = img.sum()\n # img_count_sum += img_count\n # img = cv2.resize(img, (256, 256))\n # img = (img - img.min()) / (img.max() - img.min() + 1e-5)\n # img = (img * 255).astype(np.uint8)\n # img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n # cv2.imwrite('000_patch_{:d}_count_{:.2f}.png'.format(i, img_count), img)\n # print('STAGE 2 - count validation (partition): {:.4f}'.format(img_count_sum))\n # # --------------------\n\n idx_list.append(idx)\n\n patches.append(patches_batch)\n\n patches = torch.cat(patches, dim=0)\n\n # # patch_count\n # _, idx0 = torch.sort(torch.tensor(patch_counts))\n # _, idx = torch.sort(idx0)\n #\n # patches = torch.cat(patches, dim=1) # [B, T, C, PATCH_SIZE, PATCH_SIZE] (T = m * n)\n # patches = patches.index_select(dim=1, index=idx0)\n\n return patches.cuda(), idx_list, [m, n] # [B, T, C, PATCH_SIZE, PATCH_SIZE] (T = m * n)\n\n @staticmethod\n def _jagsaw(x, index, part):\n batch_size, _, c, patch_size, _ = x.shape\n imgs = []\n m, n = part\n for b in range(batch_size):\n patches = x[b]\n patches = torch.index_select(patches, dim=0, index=index[b].cuda())\n img = torch.zeros([1, c, patch_size * m, patch_size * n])\n for i in range(m):\n for j in range(n):\n img[:, :, i * patch_size:(i + 1) * patch_size, j * patch_size:(j + 1) * patch_size] = \\\n patches[i * n + j, :, :, :]\n imgs.append(img)\n output = torch.cat(imgs, dim=0)\n\n return output.cuda()\n\n\nclass DC_layer(nn.Module):\n def __init__(self, level, fuse=False):\n super(DC_layer, self).__init__()\n self.level = level # ~ `level`: just a variable\n self.conv1x1_d1 = nn.Conv2d(512, 512, kernel_size=1)\n self.conv1x1_d2 = nn.Conv2d(512, 512, kernel_size=1)\n self.conv1x1_d3 = nn.Conv2d(512, 512, kernel_size=1)\n self.conv1x1_d4 = nn.Conv2d(512, 512, kernel_size=1)\n\n self.conv_d1 = nn.Conv2d(512, 512, kernel_size=3, padding=1, dilation=1)\n self.conv_d2 = nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2)\n self.conv_d3 = nn.Conv2d(512, 512, kernel_size=3, padding=3, dilation=3)\n self.conv_d4 = nn.Conv2d(512, 512, kernel_size=3, padding=4, dilation=4)\n\n self.fuse = fuse\n if self.fuse:\n self.fuse = nn.Conv2d(512 * 2, 512, kernel_size=3, padding=1)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x1 = self.conv1x1_d1(x)\n x2 = self.conv1x1_d2(x)\n x3 = self.conv1x1_d3(x)\n x4 = self.conv1x1_d4(x)\n\n x1 = self.conv_d1(x1)\n x2 = self.conv_d2(x2)\n x3 = self.conv_d3(x3)\n x4 = self.conv_d4(x4)\n\n x = self.max_out(x1, x2, x3, x4)\n return x\n\n def _forward_unimplemented(self, x) -> None:\n raise NotImplementedError\n\n @staticmethod\n def max_out(x1, x2, x3, x4):\n \"\"\" ~ x = max(x1, x2, x3, x4) \"\"\"\n mask_1 = torch.ge(x1, x2) # ~ computes x1 >= x2 element-wise\n mask_1 = mask_1.float()\n x = mask_1 * x1 + (1 - mask_1) * x2\n\n mask_2 = torch.ge(x, x3)\n mask_2 = mask_2.float()\n x = mask_2 * x + (1 - mask_2) * x3\n\n mask_3 = torch.ge(x, x4)\n mask_3 = mask_3.float()\n x = mask_3 * x + (1 - mask_3) * x4\n return x\n\n\nclass ScaleTreeBlock(nn.Module):\n def __init__(self, in_channels, out_channels, hidden_dim=256):\n super(ScaleTreeBlock, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n self.root = nn.Conv2d(in_channels=self.in_channels, out_channels=hidden_dim, kernel_size=1)\n\n self.layer_1_1 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=1, padding=0, dilation=1)\n self.layer_1_2 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=1, dilation=1)\n self.layer_1_3 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=2, dilation=2)\n\n self.layer_2_1 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=1, padding=0, dilation=1)\n self.layer_2_2 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=1, dilation=1)\n self.layer_2_3 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=2, dilation=2)\n\n self.layer_2_4 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=2, dilation=2)\n self.layer_2_5 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=3, dilation=3)\n self.layer_2_6 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=4, dilation=4)\n\n self.layer_2_7 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=4, dilation=4)\n self.layer_2_8 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=5, dilation=5)\n self.layer_2_9 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=6, dilation=6)\n\n self.scale_fuse = nn.Sequential(\n # nn.Conv2d(in_channels=hidden_dim * 9, out_channels=512, kernel_size=3, padding=1),\n # nn.ReLU(),\n # nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1),\n # nn.ReLU(),\n # nn.Conv2d(in_channels=256, out_channels=self.out_channels, kernel_size=3, padding=1),\n nn.Conv2d(in_channels=hidden_dim * 9, out_channels=self.out_channels, kernel_size=3, padding=1),\n nn.ReLU()\n )\n\n def forward(self, x, is_training=True):\n f = self.root(x)\n\n f1 = self.layer_1_1(f)\n f2 = self.layer_1_2(f)\n f3 = self.layer_1_3(f)\n\n if is_training:\n alpha, beta = torch.rand(1).cuda(), torch.rand(1).cuda()\n else:\n alpha, beta = torch.tensor(0.5).cuda(), torch.tensor(0.5).cuda()\n\n f1_hat = f1\n f2_hat = alpha * f1_hat + (1 - alpha) * f2\n f3_hat = beta * f2_hat + (1 - beta) * f3\n\n s1 = self.layer_2_1(f1_hat)\n s2 = self.layer_2_2(f1_hat)\n s3 = self.layer_2_3(f1_hat)\n\n s4 = self.layer_2_4(f2_hat)\n s5 = self.layer_2_5(f2_hat)\n s6 = self.layer_2_6(f2_hat)\n\n s7 = self.layer_2_7(f3_hat)\n s8 = self.layer_2_8(f3_hat)\n s9 = self.layer_2_9(f3_hat)\n\n if is_training:\n alpha_1, beta_1 = torch.rand(1).cuda(), torch.rand(1).cuda()\n alpha_2, beta_2 = torch.rand(1).cuda(), torch.rand(1).cuda()\n alpha_3, beta_3 = torch.rand(1).cuda(), torch.rand(1).cuda()\n else:\n alpha_1, beta_1 = torch.tensor(0.5).cuda(), torch.tensor(0.5).cuda()\n alpha_2, beta_2 = torch.tensor(0.5).cuda(), torch.tensor(0.5).cuda()\n alpha_3, beta_3 = torch.tensor(0.5).cuda(), torch.tensor(0.5).cuda()\n # alpha_1, alpha_2, alpha_3 = alpha, alpha, alpha\n # beta_1, beta_2, beta_3 = beta, beta, beta\n\n s1_hat = s1\n s2_hat = alpha_1 * s1_hat + (1 - alpha_1) * s2\n s3_hat = beta_1 * s2_hat + (1 - beta_1) * s3\n\n s4_hat = s4\n s5_hat = alpha_2 * s4_hat + (1 - alpha_2) * s5\n s6_hat = beta_2 * s5_hat + (1 - beta_2) * s6\n\n s7_hat = s7\n s8_hat = alpha_3 * s7_hat + (1 - alpha_3) * s8\n s9_hat = beta_3 * s8_hat + (1 - beta_3) * s9\n\n output = self.scale_fuse(torch.cat((s1_hat, s2_hat, s3_hat, s4_hat, s5_hat,\n s6_hat, s7_hat, s8_hat, s9_hat), dim=1))\n\n return output\n\n def _forward_unimplemented(self, x) -> None:\n raise NotImplementedError\n\n\nclass ScaleTreeBlockRAW(nn.Module):\n def __init__(self, in_channels, out_channels, hidden_dim=256):\n super(ScaleTreeBlockRAW, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n self.root = nn.Conv2d(in_channels=self.in_channels, out_channels=hidden_dim, kernel_size=1)\n\n self.layer_1_1 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=1, padding=0, dilation=1)\n self.layer_1_2 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=2, dilation=2)\n self.layer_1_3 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=5, padding=6, dilation=3)\n\n self.layer_2_1 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=1, padding=0, dilation=1)\n self.layer_2_2 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=2, dilation=2)\n self.layer_2_3 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=5, padding=6, dilation=3)\n\n self.layer_2_4 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=5, padding=6, dilation=3)\n self.layer_2_5 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=7, padding=12, dilation=4)\n self.layer_2_6 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=9, padding=20, dilation=5)\n\n self.layer_2_7 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=9, padding=20, dilation=5)\n self.layer_2_8 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, padding=30, dilation=6)\n self.layer_2_9 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=13, padding=42, dilation=7)\n\n self.scale_fuse = nn.Sequential(\n # nn.Conv2d(in_channels=hidden_dim * 9, out_channels=512, kernel_size=3, padding=1),\n # nn.ReLU(),\n # nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1),\n # nn.ReLU(),\n # nn.Conv2d(in_channels=256, out_channels=self.out_channels, kernel_size=3, padding=1),\n nn.Conv2d(in_channels=hidden_dim * 9, out_channels=self.out_channels, kernel_size=3, padding=1),\n nn.ReLU()\n )\n\n def forward(self, x, is_training=True):\n f = self.root(x)\n\n f1 = self.layer_1_1(f)\n f2 = self.layer_1_2(f)\n f3 = self.layer_1_3(f)\n\n if is_training:\n alpha, beta = torch.rand(1).cuda(), torch.rand(1).cuda()\n else:\n alpha, beta = torch.tensor(0.5).cuda(), torch.tensor(0.5).cuda()\n\n f1_hat = f1\n f2_hat = alpha * f1_hat + (1 - alpha) * f2\n f3_hat = beta * f2_hat + (1 - beta) * f3\n\n s1 = self.layer_2_1(f1_hat)\n s2 = self.layer_2_2(f1_hat)\n s3 = self.layer_2_3(f1_hat)\n\n s4 = self.layer_2_4(f2_hat)\n s5 = self.layer_2_5(f2_hat)\n s6 = self.layer_2_6(f2_hat)\n\n s7 = self.layer_2_7(f3_hat)\n s8 = self.layer_2_8(f3_hat)\n s9 = self.layer_2_9(f3_hat)\n\n if is_training:\n alpha_1, beta_1 = torch.rand(1).cuda(), torch.rand(1).cuda()\n alpha_2, beta_2 = torch.rand(1).cuda(), torch.rand(1).cuda()\n alpha_3, beta_3 = torch.rand(1).cuda(), torch.rand(1).cuda()\n else:\n alpha_1, beta_1 = torch.tensor(0.5).cuda(), torch.tensor(0.5).cuda()\n alpha_2, beta_2 = torch.tensor(0.5).cuda(), torch.tensor(0.5).cuda()\n alpha_3, beta_3 = torch.tensor(0.5).cuda(), torch.tensor(0.5).cuda()\n # alpha_1, alpha_2, alpha_3 = alpha, alpha, alpha\n # beta_1, beta_2, beta_3 = beta, beta, beta\n\n s1_hat = s1\n s2_hat = alpha_1 * s1_hat + (1 - alpha_1) * s2\n s3_hat = beta_1 * s2_hat + (1 - beta_1) * s3\n\n s4_hat = s4\n s5_hat = alpha_2 * s4_hat + (1 - alpha_2) * s5\n s6_hat = beta_2 * s5_hat + (1 - beta_2) * s6\n\n s7_hat = s7\n s8_hat = alpha_3 * s7_hat + (1 - alpha_3) * s8\n s9_hat = beta_3 * s8_hat + (1 - beta_3) * s9\n\n output = self.scale_fuse(torch.cat((s1_hat, s2_hat, s3_hat, s4_hat, s5_hat,\n s6_hat, s7_hat, s8_hat, s9_hat), dim=1))\n\n return output\n\n def _forward_unimplemented(self, x) -> None:\n raise NotImplementedError\n"
] | [
[
"torch.zeros",
"torch.rand",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReLU",
"torch.ge",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.functional.upsample_bilinear",
"torch.sort"
]
] |
lxzzhy/VITAD-Fast-Online-Network-Traffic-Anomaly-Detection-Based-on-Variational-Inference | [
"7704b5984ec87fff35b98509f11b4883ffd6ccc1"
] | [
"utils/plot.py"
] | [
"import matplotlib\nimport numpy as np\nimport os\nimport json\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nmatplotlib.use('Agg')\n\n\ndef plot_R(dataset_name):\n x = range(2, 51, 2)\n pre = os.path.join(os.path.join('results', dataset_name))\n\n with open(os.path.join(pre, os.path.join('proposed', 'R_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n\n plt.figure(figsize=(2.5, 1.6))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n\n plt.xticks(range(0, len(x), 4), range(2, 51, 8), fontsize=14)\n plt.yticks(fontsize=12)\n plt.xlabel('rank (R)', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n\n #plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n #plt.margins(0, 0)\n\n plt.savefig(os.path.join(pre, 'R_TPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'R_TPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join(pre, 'R_TPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.clf()\n\n with open(os.path.join(pre, os.path.join('proposed', 'R_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n\n plt.figure(figsize=(2.5, 1.6))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n\n plt.xticks(range(0, len(x), 4), range(2, 51, 8), fontsize=14)\n plt.yticks(fontsize=12)\n plt.xlabel('rank (R)', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'R_FPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'R_FPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join(pre, 'R_FPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.close()\n\n\ndef plot_mu(dataset_name):\n x = [0.01, 0.05, 0.1, 0.5, 1]\n pre = os.path.join(os.path.join('results', dataset_name))\n\n with open(os.path.join(pre, os.path.join('proposed', 'mu_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'mu_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'mu_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'mu_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'mu_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'mu_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'mu_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n #plt.figure(figsize=(2, 1.5))\n plt.subplot(2, 3, 1)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(0.84, 0.825), loc=2, borderaxespad=0, ncol=4)\n plt.xlabel('mean value $\\\\mu$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'mu_TPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'mu_TPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join(pre, 'mu_TPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.clf()\n\n with open(os.path.join(pre, os.path.join('proposed', 'mu_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'mu_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'mu_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'mu_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'mu_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'mu_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'mu_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.figure(figsize=(11.5, 6.6))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('mean value $\\\\mu$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'mu_FPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'mu_FPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join(pre, 'mu_FPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.close()\n\n\ndef plot_sigma(dataset_name):\n x = [0.01, 0.05, 0.1, 0.5, 1]\n pre = os.path.join(os.path.join('results', dataset_name))\n\n with open(os.path.join(pre, os.path.join('proposed', 'sigma_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'sigma_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'sigma_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'sigma_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'sigma_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'sigma_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'sigma_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.figure(figsize=(11.5, 6.6))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('variance $\\\\sigma^2$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'sigma_TPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'sigma_TPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join(pre, 'sigma_TPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.clf()\n\n with open(os.path.join(pre, os.path.join('proposed', 'sigma_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'sigma_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'sigma_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'sigma_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'sigma_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'sigma_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'sigma_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.figure(figsize=(11.5, 6.6))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x)\n plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('variance $\\\\sigma^2$')\n plt.ylabel('FPR')\n plt.savefig(os.path.join(pre, 'sigma_FPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'sigma_FPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join(pre, 'sigma_FPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.close()\n\n\ndef plot_ratio(dataset_name):\n x = np.arange(1, 11)\n x = x / 100\n pre = os.path.join(os.path.join('results', dataset_name))\n\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'ratio_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'ratio_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'ratio_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'ratio_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'ratio_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'ratio_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.figure(figsize=(11.5, 6.6))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('outlier ratio $\\\\gamma$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'ratio_TPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'ratio_TPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join(pre, 'ratio_TPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.clf()\n\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'ratio_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'ratio_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'ratio_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'ratio_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'ratio_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'ratio_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.figure(figsize=(11.5, 6.6))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('outlier ratio $\\\\gamma$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'ratio_FPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'ratio_FPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join(pre, 'ratio_FPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.close()\n\n\ndef plot_time(dataset_name):\n x = range(0, 8000, 100)\n pre = os.path.join(os.path.join('results', dataset_name))\n tmp = [_ for _ in range(0, 80, 8)]\n tmp.append(79)\n tmp2 = [x[_]+1 for _ in range(0, 80, 8)]\n tmp2.append(8000)\n with open(os.path.join(pre, os.path.join('proposed', 'TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n\n y1 = [y1[_] for _ in range(0, 8000, 100)]\n y2 = [y2[_] for _ in range(0, 8000, 100)]\n y3 = [y3[_] for _ in range(0, 8000, 100)]\n y4 = [y4[_] for _ in range(0, 8000, 100)]\n y5 = [y5[_] for _ in range(0, 8000, 100)]\n y6 = [y6[_] for _ in range(0, 8000, 100)]\n print(len(y1))\n plt.figure(figsize=(10.5, 6.5))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n\n plt.xticks(tmp, tmp2, fontsize=12)\n plt.legend(bbox_to_anchor=(0.056, 0.454), loc=2, borderaxespad=0)\n #plt.ylim(ymin=0, ymax=1.0)\n plt.xlabel('sample point t', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'TPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.savefig(os.path.join(pre, 'TPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'TPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.clf()\n\n with open(os.path.join(pre, os.path.join('proposed', 'FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n\n y1 = [y1[_] for _ in range(0, 8000, 100)]\n y2 = [y2[_] for _ in range(0, 8000, 100)]\n\n y3 = [y3[_] for _ in range(0, 8000, 100)]\n y4 = [y4[_] for _ in range(0, 8000, 100)]\n y5 = [y5[_] for _ in range(0, 8000, 100)]\n y6 = [y6[_] for _ in range(0, 8000, 100)]\n print(len(y1))\n plt.figure(figsize=(10.5, 6.5))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(tmp, tmp2, fontsize=12)\n plt.legend(bbox_to_anchor=(0.825, 0.985), loc=2, borderaxespad=0)\n #plt.ylim(ymin=0, ymax=1.0)\n plt.xlabel('sample point t', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'FPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'FPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join(pre, 'FPRS.png'), bbox_inches='tight', dpi=300, format='png')\n # plt.clf()\n #\n # with open(os.path.join(pre, os.path.join('proposed', 'RSE.json')), 'r') as FR:\n # y1 = json.loads(FR.read())\n # with open(os.path.join(pre, os.path.join('OSTD', 'RSE.json')), 'r') as FR:\n # y2 = json.loads(FR.read())\n # with open(os.path.join(pre, os.path.join('STOC-RPCA', 'RSE.json')), 'r') as FR:\n # y3 = json.loads(FR.read())\n # with open(os.path.join(pre, os.path.join('ReProCS', 'RSE.json')), 'r') as FR:\n # y4 = json.loads(FR.read())\n # with open(os.path.join(pre, os.path.join('FBCP', 'RSE.json')), 'r') as FR:\n # y5 = json.loads(FR.read())\n # with open(os.path.join(pre, os.path.join('T-online', 'RSE.json')), 'r') as FR:\n # y6 = json.loads(FR.read())\n #\n # y1 = [y1[_] for _ in range(0, 8000, 100)]\n # y2 = [y2[_] for _ in range(0, 8000, 100)]\n # y3 = [y3[_] for _ in range(0, 8000, 100)]\n # y4 = [y4[_] for _ in range(0, 8000, 100)]\n # y5 = [y5[_] for _ in range(0, 8000, 100)]\n # print(len(y1))\n # plt.figure(figsize=(10.5, 6.5))\n # plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n # plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n # plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n # plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n # plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n # plt.plot(range(len(x)), y6, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n # plt.xticks(tmp, tmp2, fontsize=12)\n # plt.legend(bbox_to_anchor=(0.89, 0.98), loc=2, borderaxespad=0)\n # #plt.ylim(ymin=0, ymax=1.0)\n # plt.xlabel('sample point t', fontsize=12)\n # plt.ylabel('RSE', fontsize=12)\n # plt.savefig(os.path.join(pre, 'RSE.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n # plt.savefig(os.path.join(pre, 'RSE.eps'), bbox_inches='tight', dpi=300, format='eps')\n # plt.savefig(os.path.join(pre, 'RSE.png'), bbox_inches='tight', dpi=300, format='png')\n plt.close()\n\n\ndef plot_mu_n():\n x = [0.01, 0.05, 0.1, 0.5, 1]\n pre = os.path.join(os.path.join('results', 'Abilene'))\n\n with open(os.path.join(pre, os.path.join('proposed', 'mu_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'mu_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'mu_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'mu_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'mu_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'mu_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'mu_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.figure(figsize=(10.5, 6.5))\n plt.subplot(2, 3, 1)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(0.84, 0.825), loc=2, borderaxespad=0, ncol=4)\n plt.xlabel('mean value $\\\\mu$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'GEANT'))\n with open(os.path.join(pre, os.path.join('proposed', 'mu_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'mu_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'mu_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'mu_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'mu_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'mu_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'mu_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 2)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('mean value $\\\\mu$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'mu_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'mu_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'mu_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'mu_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'mu_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'mu_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'mu_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 3)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('mean value $\\\\mu$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'Abilene'))\n with open(os.path.join(pre, os.path.join('proposed', 'mu_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'mu_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'mu_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'mu_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'mu_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'mu_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'mu_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 4)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('mean value $\\\\mu$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(1, -0.224, '(a) Abilene', fontsize=12, weight='bold')\n\n pre = os.path.join(os.path.join('results', 'GEANT'))\n with open(os.path.join(pre, os.path.join('proposed', 'mu_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'mu_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'mu_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'mu_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'mu_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'mu_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'mu_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 5)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('mean value $\\\\mu$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(1, -0.047, '(b) GEANT', fontsize=12, weight='bold')\n\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'mu_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'mu_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'mu_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'mu_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'mu_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'mu_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'mu_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'mu_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 6)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(-2.664, 2.39), loc=2, borderaxespad=0, ncol=8, handletextpad=0.6, columnspacing=1)\n plt.xlabel('mean value $\\\\mu$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(1, -0.205, '(c) CERNET', fontsize=12, weight='bold')\n plt.subplots_adjust(wspace=0.33, hspace=0.24)\n plt.savefig(os.path.join('results', 'mu.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join('results', 'mu.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join('results', 'mu.png'), bbox_inches='tight', dpi=300, format='png')\n plt.close()\n\n\ndef plot_sigma_n():\n x = [0.01, 0.05, 0.1, 0.5, 1]\n pre = os.path.join(os.path.join('results', 'Abilene'))\n\n with open(os.path.join(pre, os.path.join('proposed', 'sigma_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'sigma_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'sigma_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'sigma_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'sigma_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'sigma_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'sigma_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'sigma_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.figure(figsize=(10.5, 6.5))\n plt.subplot(2, 3, 1)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(0.84, 0.825), loc=2, borderaxespad=0, ncol=4)\n plt.xlabel('variance $\\\\sigma^2$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'GEANT'))\n with open(os.path.join(pre, os.path.join('proposed', 'sigma_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'sigma_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'sigma_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'sigma_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'sigma_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'sigma_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'sigma_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'sigma_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 2)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('variance $\\\\sigma^2$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'sigma_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'sigma_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'sigma_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'sigma_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'sigma_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'sigma_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'sigma_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'sigma_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 3)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('variance $\\\\sigma^2$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'Abilene'))\n with open(os.path.join(pre, os.path.join('proposed', 'sigma_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'sigma_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'sigma_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'sigma_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'sigma_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'sigma_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'sigma_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'sigma_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 4)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('variance $\\\\sigma^2$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(1, -0.2347, '(a) Abilene', fontsize=12, weight='bold')\n\n pre = os.path.join(os.path.join('results', 'GEANT'))\n with open(os.path.join(pre, os.path.join('proposed', 'sigma_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'sigma_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'sigma_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'sigma_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'sigma_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'sigma_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'sigma_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'sigma_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 5)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('variance $\\\\sigma^2$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(1, -0.226, '(b) GEANT', fontsize=12, weight='bold')\n\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'sigma_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'sigma_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'sigma_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'sigma_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'sigma_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'sigma_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'sigma_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'sigma_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 6)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(-2.626, 2.37), loc=2, borderaxespad=0, ncol=8, handletextpad=0.6, columnspacing=1)\n plt.xlabel('variance $\\\\sigma^2$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(1, -0.198, '(c) CERNET', fontsize=12, weight='bold')\n plt.subplots_adjust(wspace=0.31, hspace=0.22)\n plt.savefig(os.path.join('results', 'sigma.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join('results', 'sigma.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join('results', 'sigma.png'), bbox_inches='tight', dpi=300, format='png')\n plt.close()\n\n\ndef plot_ratio_n():\n x = np.arange(1, 11)\n x = x / 100\n pre = os.path.join(os.path.join('results', 'Abilene'))\n\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'ratio_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'ratio_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'ratio_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'ratio_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'ratio_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'ratio_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'ratio_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.figure(figsize=(10.5, 6.5))\n plt.subplot(2, 3, 1)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(0, len(x), 2), [x[_] for _ in range(0, len(x), 2)], fontsize=12)\n #plt.legend(bbox_to_anchor=(0.84, 0.825), loc=2, borderaxespad=0, ncol=4)\n plt.xlabel('outlier ratio $\\\\gamma$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'GEANT'))\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'ratio_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'ratio_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'ratio_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'ratio_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'ratio_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'ratio_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'ratio_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 2)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(0, len(x), 2), [x[_] for _ in range(0, len(x), 2)], fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('outlier ratio $\\\\gamma$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'ratio_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'ratio_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'ratio_FPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'ratio_FPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'ratio_FPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'ratio_FPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'ratio_FPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 3)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(0, len(x), 2), [x[_] for _ in range(0, len(x), 2)], fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('outlier ratio $\\\\gamma$', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'Abilene'))\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'ratio_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'ratio_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'ratio_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'ratio_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'ratio_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'ratio_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'ratio_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 4)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(0, len(x), 2), [x[_] for _ in range(0, len(x), 2)], fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('outlier ratio $\\\\gamma$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(2.16, -0.322, '(a) Abilene', fontsize=12, weight='bold')\n\n pre = os.path.join(os.path.join('results', 'GEANT'))\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'ratio_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'ratio_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'ratio_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'ratio_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'ratio_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'ratio_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'ratio_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 5)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(0, len(x), 2), [x[_] for _ in range(0, len(x), 2)], fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$outlier ratio $\\\\gamma$\n plt.xlabel('outlier ratio $\\\\gamma$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(2.16, -0.1235, '(b) GEANT', fontsize=12, weight='bold')\n\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('OSTD', 'ratio_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('STOC-RPCA', 'ratio_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('ReProCS', 'ratio_TPRS.json')), 'r') as FR:\n y4 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('FBCP', 'ratio_TPRS.json')), 'r') as FR:\n y5 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('3WD', 'ratio_TPRS.json')), 'r') as FR:\n y6 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('CP-ALS', 'ratio_TPRS.json')), 'r') as FR:\n y7 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'ratio_TPRS.json')), 'r') as FR:\n y8 = json.loads(FR.read())\n\n plt.subplot(2, 3, 6)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='OSTD')\n plt.plot(range(len(x)), y3, color='#ddccc5', marker='D', linestyle='-.', label='STOC-RPCA')\n plt.plot(range(len(x)), y4, color='#ffa289', marker='h', linestyle=':', label='ReProCS')\n plt.plot(range(len(x)), y5, color='#e24b2c', marker='v', linestyle=':', label='FBCP')\n plt.plot(range(len(x)), y6, color='#6e7a8a', marker='^', linestyle=':', label='3WD')\n plt.plot(range(len(x)), y7, color='#c1194d', marker='<', linestyle=':', label='CP-ALS')\n plt.plot(range(len(x)), y8, color='#faaf3a', marker='<', linestyle=':', label='T-online')\n plt.xticks(range(0, len(x), 2), [x[_] for _ in range(0, len(x), 2)], fontsize=12)\n plt.legend(bbox_to_anchor=(-2.644, 2.369), loc=2, borderaxespad=0, ncol=8, handletextpad=0.6, columnspacing=1)\n plt.xlabel('outlier ratio $\\\\gamma$', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(2.16, -0.270, '(c) CERNET', fontsize=12, weight='bold')\n plt.subplots_adjust(wspace=0.32, hspace=0.22)\n plt.savefig(os.path.join('results', 'ratio.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join('results', 'ratio.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join('results', 'ratio.png'), bbox_inches='tight', dpi=300, format='png')\n plt.close()\n\n\ndef plot_R_n():\n x = range(2, 51, 2)\n pre = os.path.join(os.path.join('results', 'Abilene'))\n\n with open(os.path.join(pre, os.path.join('proposed', 'R_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n\n plt.figure(figsize=(10.5, 6.5))\n plt.subplot(2, 3, 1)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.xticks(range(0, len(x), 4), range(2, 51, 8), fontsize=12)\n #plt.legend(bbox_to_anchor=(0.84, 0.825), loc=2, borderaxespad=0, ncol=4)\n plt.xlabel('rank (R)', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'GEANT'))\n with open(os.path.join(pre, os.path.join('proposed', 'R_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n\n plt.subplot(2, 3, 2)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.xticks(range(0, len(x), 4), range(2, 51, 8), fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('rank (R)', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'R_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n\n plt.subplot(2, 3, 3)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.xticks(range(0, len(x), 4), range(2, 51, 8), fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('rank (R)', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n\n pre = os.path.join(os.path.join('results', 'Abilene'))\n with open(os.path.join(pre, os.path.join('proposed', 'R_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n\n plt.subplot(2, 3, 4)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.xticks(range(0, len(x), 4), range(2, 51, 8), fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('rank (R)', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(4.83, 0.99867, '(a) Abilene', fontsize=12, weight='bold')\n\n pre = os.path.join(os.path.join('results', 'GEANT'))\n with open(os.path.join(pre, os.path.join('proposed', 'R_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n\n plt.subplot(2, 3, 5)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.xticks(range(0, len(x), 4), range(2, 51, 8), fontsize=12)\n #plt.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0)\n plt.xlabel('rank (R)', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(5.25, 0.945966, '(b) GEANT', fontsize=12, weight='bold')\n\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'R_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n\n plt.subplot(2, 3, 6)\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.xticks(range(0, len(x), 4), range(2, 51, 8), fontsize=12)\n plt.xlabel('rank (R)', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.text(4.9, 0.799192, '(c) CERNET', fontsize=12, weight='bold')\n plt.subplots_adjust(wspace=0.52, hspace=0.22)\n plt.savefig(os.path.join('results', 'R.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join('results', 'R.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join('results', 'R.png'), bbox_inches='tight', dpi=300, format='png')\n plt.close()\n\n\ndef plot_SNR_n():\n x = [0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5]\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'SNR_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'SNR_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n\n plt.figure(figsize=(10.5, 6.5))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='T-online')\n\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(0.056, 0.454), loc=2, borderaxespad=0)\n # plt.ylim(ymin=0, ymax=1.0)\n plt.xlabel('sample point t', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'SNR_TPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.savefig(os.path.join(pre, 'SNR_TPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'SNR_TPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.clf()\n\n with open(os.path.join(pre, os.path.join('proposed', 'SNR_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'SNR_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n\n plt.figure(figsize=(10.5, 6.5))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='proposed')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='T-online')\n\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(0.056, 0.454), loc=2, borderaxespad=0)\n # plt.ylim(ymin=0, ymax=1.0)\n plt.xlabel('sample point t', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'SNR_FPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.savefig(os.path.join(pre, 'SNR_FPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'SNR_FPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.close()\n\n\ndef plot_ratio_n2():\n x = np.arange(1, 11)\n x = x / 100\n pre = os.path.join(os.path.join('results', 'CERNET'))\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_original_TPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'ratio_TPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_n_TPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n\n plt.figure(figsize=(10.5, 6.5))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='original')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='T-online')\n plt.plot(range(len(x)), y3, color='#e56a6c', marker='D', linestyle='-.', label='improved')\n\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(0.056, 0.454), loc=2, borderaxespad=0)\n # plt.ylim(ymin=0, ymax=1.0)\n plt.xlabel('sample point t', fontsize=12)\n plt.ylabel('TPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'ratio2_TPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.savefig(os.path.join(pre, 'ratio2_TPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'ratio2_TPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.clf()\n\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_original_FPRS.json')), 'r') as FR:\n y1 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('T-online', 'ratio_FPRS.json')), 'r') as FR:\n y2 = json.loads(FR.read())\n with open(os.path.join(pre, os.path.join('proposed', 'ratio_n_FPRS.json')), 'r') as FR:\n y3 = json.loads(FR.read())\n\n plt.figure(figsize=(10.5, 6.5))\n plt.plot(range(len(x)), y1, color='#3d5d46', marker='o', linestyle='-', label='original')\n plt.plot(range(len(x)), y2, color='#8dabb6', marker='s', linestyle='--', label='T-online')\n plt.plot(range(len(x)), y3, color='#e56a6c', marker='D', linestyle='-.', label='improved')\n\n plt.xticks(range(len(x)), x, fontsize=12)\n plt.legend(bbox_to_anchor=(0.056, 0.904), loc=2, borderaxespad=0)\n # plt.ylim(ymin=0, ymax=1.0)\n plt.xlabel('sample point t', fontsize=12)\n plt.ylabel('FPR', fontsize=12)\n plt.savefig(os.path.join(pre, 'ratio2_FPRS.png'), bbox_inches='tight', dpi=300, format='png')\n plt.savefig(os.path.join(pre, 'ratio2_FPRS.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join(pre, 'ratio2_FPRS.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.close()\n\n\ndef locations():\n length = 8000\n x = range(length)\n dataset_name = \"CERNET\"\n pre = os.path.join(os.path.join('results', dataset_name))\n with open(os.path.join(pre, os.path.join('proposed', 'false_locations.json')), 'r') as FR:\n y = json.loads(FR.read())\n\n st = {}\n for i in range(len(y)):\n for ele in y[i]:\n loc = str(ele[0]) + ' ' + str(ele[1])\n if loc in st:\n st[loc] += 1\n else:\n st[loc] = 1\n\n st = sorted(st.items(), key=lambda kv:(kv[1], kv[0]), reverse=True)\n for item in st:\n print(item[0]+':'+str(item[1]))\n\n data = np.load('data/CERNET/tensor.npy')\n y1 = []\n y2 = []\n y3 = []\n y4 = []\n y5 = []\n y6 = []\n for t in range(length):\n y1.append(data[0, 2, t])\n y2.append(data[0, 0, t])\n y3.append(data[0, 11, t])\n y4.append(data[12, 11, t])\n y5.append(data[2, 0, t])\n y6.append(data[12, 0, t])\n # x = []\n # y = []\n # z = []\n # for t in range(2000):\n # x.append(list(range(15)))\n # y.append(np.ones(15)*t)\n # z.append(data[:, :, t].reshape(14*14, )[15:30])\n\n # fig = plt.figure(figsize=(32, 24))\n # ax = fig.gca(projection='3d') # get current axes,且坐标轴是3d的\n # ax.scatter(x, y, z, c='red', marker='o', label='points')\n # ax.set_xlabel(\"X axis\")\n # ax.set_ylabel(\"Y axis\")\n # ax.set_zlabel(\"Z axis\")\n plt.figure(figsize=(10.5, 6.5))\n plt.plot(range(len(x)), y1, color='#FF0000', linestyle='-', label='0-2')\n plt.plot(range(len(x)), y2, color='#FF7F00', linestyle='--', label='0-0')\n plt.plot(range(len(x)), y3, color='#FFFF00', linestyle='-.', label='0-11')\n plt.plot(range(len(x)), y4, color='#00FF00', linestyle=':', label='12-11')\n plt.plot(range(len(x)), y5, color='#00FFFF', linestyle=':', label='2-0')\n plt.plot(range(len(x)), y6, color='#0000FF', linestyle=':', label='12-0')\n plt.legend()\n plt.xlabel('sample point t', fontsize=12)\n plt.ylabel('traffic volume', fontsize=12)\n plt.savefig(os.path.join('results', 'locations.pdf'), bbox_inches='tight', dpi=300, format='pdf')\n plt.savefig(os.path.join('results', 'locations.eps'), bbox_inches='tight', dpi=300, format='eps')\n plt.savefig(os.path.join('results', 'locations.png'), bbox_inches='tight', dpi=300, format='png')\n\n\n\nif __name__ == '__main__':\n #plot_mu('Abilene')\n # plot_mu('GEANT')\n # plot_mu('CERNET')\n # plot_sigma('Abilene')\n # plot_sigma('GEANT')\n # plot_sigma('CERNET')\n # #\n # plot_ratio('Abilene')\n # plot_ratio('GEANT')\n # plot_ratio('CERNET')\n # #\n #plot_time('Abilene')\n #plot_time('GEANT')\n #plot_time('CERNET')\n #\n #plot_R('Abilene')\n #plot_R('GEANT')\n #plot_R('CERNET')\n\n #plot_mu_n()\n #plot_sigma_n()\n #plot_ratio_n()\n #plot_R_n()\n #plot_SNR_n()\n plot_ratio_n2()\n #locations()"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.text",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"numpy.load",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplot"
]
] |
shahid313/embedding-propagation | [
"f3da33939ddd3eba195c9c8e0f433944a8b02ef6"
] | [
"src/models/backbones/wrn.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn.functional as F\n\n\nclass Block(torch.nn.Module):\n def __init__(self, ni, no, stride, dropout=0):\n super().__init__()\n self.conv0 = torch.nn.Conv2d(ni, no, 3, stride=stride, padding=1, bias=False)\n self.bn0 = torch.nn.BatchNorm2d(no)\n torch.nn.init.kaiming_normal_(self.conv0.weight.data)\n self.bn1 = torch.nn.BatchNorm2d(no)\n if dropout == 0:\n self.dropout = lambda x: x\n else:\n self.dropout = torch.nn.Dropout2d(dropout)\n self.conv1 = torch.nn.Conv2d(no, no, 3, stride=1, padding=1, bias=False)\n torch.nn.init.kaiming_normal_(self.conv1.weight.data)\n self.reduce = ni != no\n if self.reduce:\n self.conv_reduce = torch.nn.Conv2d(ni, no, 1, stride=stride, bias=False)\n torch.nn.init.kaiming_normal_(self.conv_reduce.weight.data)\n\n def forward(self, x):\n y = self.conv0(x)\n y = F.relu(self.bn0(y), inplace=True)\n y = self.dropout(y)\n y = self.conv1(y)\n y = self.bn1(y)\n if self.reduce:\n return F.relu(y + self.conv_reduce(x), True)\n else:\n return F.relu(y + x, True)\n\n\nclass Group(torch.nn.Module):\n def __init__(self, ni, no, n, stride, dropout=0):\n super().__init__()\n self.n = n\n for i in range(n):\n self.__setattr__(\"block_%d\" % i, Block(ni if i == 0 else no, no, stride if i == 0 else 1, dropout=dropout))\n\n def forward(self, x):\n for i in range(self.n):\n x = self.__getattr__(\"block_%d\" % i)(x)\n return x\n\n\nclass WideResNet(torch.nn.Module):\n def __init__(self, depth, width, exp_dict):\n super(WideResNet, self).__init__()\n assert (depth - 4) % 6 == 0, 'depth should be 6n+4'\n self.n = (depth - 4) // 6\n self.output_size = 640\n self.widths = torch.Tensor([16, 32, 64]).mul(width).int().numpy().tolist()\n self.conv0 = torch.nn.Conv2d(3, self.widths[0] // 2, 3, padding=1, bias=False)\n self.bn_0 = torch.nn.BatchNorm2d(self.widths[0] // 2)\n self.dropout_prob = exp_dict[\"dropout\"]\n self.group_0 = Group(self.widths[0] // 2, self.widths[0], self.n, 2, dropout=self.dropout_prob)\n self.group_1 = Group(self.widths[0], self.widths[1], self.n, 2, dropout=self.dropout_prob)\n self.group_2 = Group(self.widths[1], self.widths[2], self.n, 2, dropout=self.dropout_prob)\n self.bn_out = torch.nn.BatchNorm1d(self.output_size)\n\n def get_base_parameters(self):\n parameters = []\n parameters += list(self.conv0.parameters())\n parameters += list(self.group_0.parameters())\n parameters += list(self.group_1.parameters())\n parameters += list(self.group_2.parameters())\n parameters += list(self.bn.parameters())\n if self.embedding:\n parameters += list(self.conv_embed)\n return parameters\n\n def get_classifier_parameters(self):\n return self.classifier.parameters()\n\n def add_classifier(self, nclasses, name=\"classifier\", modalities=None):\n setattr(self, name, torch.nn.Linear(self.output_size, nclasses))\n\n def forward(self, x, **kwargs):\n o = F.relu(self.bn_0(self.conv0(x)), True)\n o = self.group_0(o)\n o = self.group_1(o)\n o = self.group_2(o)\n o = o.mean(3).mean(2)\n o = F.relu(self.bn_out(o.view(o.size(0), -1)))\n return o"
] | [
[
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.functional.relu",
"torch.Tensor",
"torch.nn.Dropout2d"
]
] |
uwoseis/zephyr-cli | [
"e4228be3947021f2b983c919c51bb1f67df90eb0"
] | [
"zephyr/middleware/fields.py"
] | [
"from __future__ import print_function, unicode_literals, division, absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import range\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom ..backend import BaseModelDependent\nimport SimPEG\n\nclass HelmFields(SimPEG.Fields.Fields):\n \"\"\"Fancy Field Storage for frequency domain problems\n u[:,'phi', freqInd] = phi\n print u[src0,'phi']\n \"\"\"\n\n knownFields = {'u': 'N'}\n aliasFields = None\n dtype = np.complex128\n\n def startup(self):\n pass\n\n def _storageShape(self, loc):\n nP = {'CC': self.mesh.nC,\n 'N': self.mesh.nN,\n 'F': self.mesh.nF,\n 'E': self.mesh.nE}[loc]\n nSrc = self.survey.nSrc\n nFreq = self.survey.nfreq\n return (nP, nSrc, nFreq)\n\n def _indexAndNameFromKey(self, key, accessType):\n if type(key) is not tuple:\n key = (key,)\n if len(key) == 1:\n key += (None,)\n if len(key) == 2:\n key += (slice(None,None,None),)\n\n assert len(key) == 3, 'must be [Src, fieldName, freqs]'\n\n srcTestList, name, freqInd = key\n\n name = self._nameIndex(name, accessType)\n srcInd = self._srcIndex(srcTestList)\n\n return (srcInd, freqInd), name\n\n def _correctShape(self, name, ind, deflate=False):\n srcInd, freqInd = ind\n if name in self.knownFields:\n loc = self.knownFields[name]\n else:\n loc = self.aliasFields[name][1]\n nP, total_nSrc, total_nF = self._storageShape(loc)\n nSrc = np.ones(total_nSrc, dtype=bool)[srcInd].sum()\n nF = np.ones(total_nF, dtype=bool)[freqInd].sum()\n shape = nP, nSrc, nF\n if deflate:\n shape = tuple([s for s in shape if s > 1])\n if len(shape) == 1:\n shape = shape + (1,)\n return shape\n\n def _setField(self, field, val, name, ind):\n srcInd, freqInd = ind\n shape = self._correctShape(name, ind)\n if SimPEG.Utils.isScalar(val):\n field[:,srcInd,freqInd] = val\n return\n if val.size != np.array(shape).prod():\n print('val.size: %r'%(val.size,))\n print('np.array(shape).prod(): %r'%(np.array(shape).prod(),))\n raise ValueError('Incorrect size for data.')\n correctShape = field[:,srcInd,freqInd].shape\n field[:,srcInd,freqInd] = val.reshape(correctShape, order='F')\n\n def _getField(self, name, ind):\n srcInd, freqInd = ind\n\n if name in self._fields:\n out = self._fields[name][:,srcInd,freqInd]\n else:\n # Aliased fields\n alias, loc, func = self.aliasFields[name]\n if type(func) is str:\n assert hasattr(self, func), 'The alias field function is a string, but it does not exist in the Fields class.'\n func = getattr(self, func)\n pointerFields = self._fields[alias][:,srcInd,freqInd]\n pointerShape = self._correctShape(alias, ind)\n pointerFields = pointerFields.reshape(pointerShape, order='F')\n\n freqII = np.arange(self.survey.nfreq)[freqInd]\n srcII = np.array(self.survey.srcList)[srcInd]\n srcII = srcII.tolist()\n\n if freqII.size == 1:\n pointerShapeDeflated = self._correctShape(alias, ind, deflate=True)\n pointerFields = pointerFields.reshape(pointerShapeDeflated, order='F')\n out = func(pointerFields, srcII, freqII)\n else: #loop over the frequencies\n nF = pointerShape[2]\n out = list(range(nF))\n for i, FIND_i in enumerate(freqII):\n fieldI = pointerFields[:,:,i]\n if fieldI.shape[0] == fieldI.size:\n fieldI = SimPEG.Utils.mkvc(fieldI, 2)\n out[i] = func(fieldI, srcII, FIND_i)\n if out[i].ndim == 1:\n out[i] = out[i][:,np.newaxis,np.newaxis]\n elif out[i].ndim == 2:\n out[i] = out[i][:,:,np.newaxis]\n out = np.concatenate(out, axis=2)\n\n shape = self._correctShape(name, ind, deflate=True)\n return out.reshape(shape, order='F')\n\n def __repr__(self):\n\n shape = self._storageShape('N')\n attrs = {\n 'name': self.__class__.__name__,\n 'id': id(self),\n 'nFields': len(self.knownFields) + len(self.aliasFields),\n 'nN': shape[0],\n 'nSrc': shape[1],\n 'nFreq': shape[2],\n }\n\n return '<%(name)s container at 0x%(id)x: %(nFields)d fields, with N shape (%(nN)d, %(nSrc)d, %(nFreq)d)>'%attrs\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.ones",
"numpy.arange"
]
] |
gulnazaki/performer-pytorch | [
"b423eed3550099e718e6318f5b501dc3417347c0"
] | [
"performer_pytorch/autoregressive_wrapper.py"
] | [
"from functools import partial\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pad_sequence\n\ndef exists(val):\n return val is not None\n\ndef top_p(logits, thres = 0.9):\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n sorted_indices_to_remove = cum_probs > (1 - thres)\n sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()\n sorted_indices_to_remove[:, 0] = 0\n\n sorted_logits[sorted_indices_to_remove] = float('-inf')\n return sorted_logits.scatter(1, sorted_indices, sorted_logits)\n\ndef top_k(logits, thres = 0.9):\n k = int((1 - thres) * logits.shape[-1])\n val, ind = torch.topk(logits, k)\n probs = torch.full_like(logits, float('-inf'))\n probs.scatter_(1, ind, val)\n return probs\n\ndef constrain(constrain_fn, logits, previous_sample):\n valid_ids = constrain_fn(previous_sample)\n valid_logits = logits.gather(1, valid_ids)\n probs = torch.full_like(logits, float('-inf'))\n return probs.scatter(1, valid_ids, valid_logits)\n\nclass AutoregressiveWrapper(nn.Module):\n def __init__(self, net):\n super().__init__()\n self.net = net\n self.max_seq_len = net.max_seq_len\n\n @torch.no_grad()\n def generate(self, start_tokens, seq_len, return_also_encodings = False, constrain_fn = None, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):\n was_training = self.net.training\n num_dims = len(start_tokens.shape)\n\n if num_dims == 1:\n start_tokens = start_tokens[None, :]\n\n b, t = start_tokens.shape\n\n self.net.eval()\n out = start_tokens\n input_mask = kwargs.pop('mask', None)\n\n if input_mask is None:\n input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)\n \n # # in case of conditional generation, if enc_mask is not provided use the correct context_mask\n # context_mask = kwargs.pop('context_mask', None)\n\n # if 'context' in kwargs and not exists(context_mask):\n # context = kwargs['context']\n # context_mask = torch.full(context.shape[:2], True, dtype=torch.bool, device=out.device)\n\n # kwargs.update(context_mask = context_mask)\n\n sample = torch.full((b, 1), -1)\n\n for _ in range(seq_len):\n x = out[:, -self.max_seq_len:]\n input_mask = input_mask[:, -self.max_seq_len:]\n if return_also_encodings:\n encodings, logits_sequence = self.net(x, mask=input_mask, return_both = True, **kwargs)\n logits = logits_sequence[:, -1, :]\n else:\n logits = self.net(x, mask=input_mask, **kwargs)[:, -1, :]\n if constrain_fn:\n logits = constrain(constrain_fn, logits, sample)\n noninf = torch.nonzero(logits[0].isfinite())\n if logits.size(0) == 1 and len(noninf) == 1:\n sample = noninf\n else:\n filtered_logits = filter_logits_fn(logits, thres = filter_thres)\n probs = F.softmax(filtered_logits / temperature, dim=-1)\n sample = torch.multinomial(probs, 1)\n\n out = torch.cat((out, sample), dim=-1)\n input_mask = F.pad(input_mask, (0, 1), value=True)\n\n if eos_token is not None and (sample == eos_token).all():\n break\n\n out = out[:, t:]\n\n if num_dims == 1:\n out = out.squeeze(0)\n\n self.net.train(was_training)\n \n if return_also_encodings:\n return encodings, out\n else:\n return out\n\n def forward(self, x, return_also_encodings = False, **kwargs):\n xi = x[:, :-1]\n xo = x[:, 1:]\n\n # help auto-solve an area of confusion around input masks in auto-regressive\n # if user supplies a mask that is only off by one from the source sequence, resolve it for them\n mask = kwargs.pop('mask', None)\n if mask is not None and mask.shape[1] == x.shape[1]:\n mask = mask[:, :-1]\n kwargs.update(mask = mask)\n\n if return_also_encodings:\n encodings, out = self.net(xi, return_both = True, **kwargs)\n loss = F.cross_entropy(out.transpose(1, 2), xo)\n return encodings, loss\n else:\n out = self.net(xi, **kwargs)\n loss = F.cross_entropy(out.transpose(1, 2), xo)\n return loss\n"
] | [
[
"torch.cat",
"torch.no_grad",
"torch.full_like",
"torch.multinomial",
"torch.full",
"torch.nn.functional.softmax",
"torch.nn.functional.pad",
"torch.sort",
"torch.topk"
]
] |
adam-dziedzic/time-series-ml | [
"81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a"
] | [
"cnns/graphs/fft_visualize/technique_tensorflow.py"
] | [
"import tensorflow as tf\nimport numpy as np\ntf.enable_eager_execution()\n\nx = np.array([[6., 5., 0., 2., 4., 1.],\n [4., 2., 8., 5., 6., 8.],\n [0., 0., 4., 2., 0., 2.],\n [2., 9., 9., 9., 1., 6.],\n [3., 0., 9., 4., 6., 6.],\n [7., 3., 4., 7., 9., 0.]])\nprint(\"sum: \", x.sum())\n\nxfft = tf.signal.rfft2d(x)\nprint(\"xfft: \", xfft.numpy())\n\n# xfft: tf.Tensor(\n# [[153. +0.j -16. -3.4641016j 0. +10.392305j\n# 11. +0.j ]\n# [ -4.5 +14.722432j 7.499999 +7.794228j 18. -12.124355j\n# 16.499998 +12.99038j ]\n# [ 4.499999 -19.918583j 14. -12.124355j 16.500002 +0.866024j\n# -20.5 -0.866025j ]\n# [-45. +0.j 9.000001 +5.196152j -3.0000002 +1.7320508j\n# 9. -0.j ]\n# [ 4.499999 +19.918583j 3.5 -12.99038j -11.999998 -19.05256j\n# -20.5 +0.866025j ]\n# [ -4.5 -14.722432j 11.999999 +15.588459j -1.5 -23.382687j\n# 16.499998 -12.99038j ]], shape=(6, 4), dtype=complex64)"
] | [
[
"numpy.array",
"tensorflow.enable_eager_execution",
"tensorflow.signal.rfft2d"
]
] |
antoine-spahr/Label-Efficient-Volumetric-Deep-Semantic-Segmentation-of-ICH | [
"61e74a6188fe82843085e87da7d9c4ec7bdbf85e"
] | [
"code/scripts/AE/AD_AE_scripts.py"
] | [
"\"\"\"\nauthor: Antoine Spahr\n\ndate : 01.03.2021\n\n----------\n\nTO DO :\n\"\"\"\nimport sys\nsys.path.append('../../')\nimport click\nimport os\nimport logging\nimport json\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.cuda\nimport numpy as np\nimport pandas as pd\nimport skimage.io as io\nimport skimage.filters\nfrom skimage import img_as_ubyte\nfrom skimage.exposure import rescale_intensity\nfrom sklearn.metrics import confusion_matrix, roc_auc_score\n\nfrom src.dataset.datasets import public_SegICH_Dataset2D\nfrom src.models.networks.AE_net import AE_net\nimport src.models.networks.ResNet as rn\nfrom src.utils.python_utils import AttrDict\n\[email protected]()\[email protected]('config_path', type=click.Path(exists=True))\ndef main(config_path):\n \"\"\" \"\"\"\n # load config\n cfg = AttrDict.from_json_path(config_path)\n\n # make outputs dir\n out_path = os.path.join(cfg.path.output, cfg.exp_name)\n os.makedirs(out_path, exist_ok=True)\n\n # initialize seed\n if cfg.seed != -1:\n random.seed(cfg.seed)\n np.random.seed(cfg.seed)\n torch.manual_seed(cfg.seed)\n torch.cuda.manual_seed(cfg.seed)\n torch.cuda.manual_seed_all(cfg.seed)\n torch.backends.cudnn.deterministic = True\n\n # initialize logger\n logger = initialize_logger(os.path.join(out_path, 'log.txt'))\n logger.info(f\"Experiment : {cfg.exp_name}\")\n\n # set device\n if cfg.device:\n cfg.device = torch.device(cfg.device)\n else:\n cfg.device = torch.device(f'cuda:0') if torch.cuda.is_available() else torch.device('cpu')\n logger.info(f\"Device set to {cfg.device}.\")\n\n # get Dataset\n data_info_df = pd.read_csv(os.path.join(cfg.path.data, 'ct_info.csv'), index_col=0)\n dataset = public_SegICH_Dataset2D(data_info_df, cfg.path.data,\n augmentation_transform=[getattr(tf, tf_name)(**tf_kwargs) for tf_name, tf_kwargs in cfg.data.augmentation.items()],\n output_size=cfg.data.size, window=(cfg.data.win_center, cfg.data.win_width))\n\n # load inpainting model\n cfg_ae = AttrDict.from_json_path(cfg.ae_cfg_path)\n ae_net = AE_net(**cfg_ae.net)\n loaded_state_dict = torch.load(cfg.ae_model_path, map_location=cfg.device)\n ae_net.load_state_dict(loaded_state_dict)\n ae_net = ae_net.to(cfg.device).eval()\n logger.info(f\"AE model succesfully loaded from {cfg.ae_model_path}\")\n\n # Load Classifier\n if cfg.classifier_model_path is not None:\n cfg_classifier = AttrDict.from_json_path(os.path.join(cfg.classifier_model_path, 'config.json'))\n classifier = getattr(rn, cfg_classifier.net.resnet)(num_classes=cfg_classifier.net.num_classes, input_channels=cfg_classifier.net.input_channels)\n classifier_state_dict = torch.load(os.path.join(cfg.classifier_model_path, 'resnet_state_dict.pt'), map_location=cfg.device)\n classifier.load_state_dict(classifier_state_dict)\n classifier = classifier.to(cfg.device)\n classifier.eval()\n logger.info(f\"ResNet classifier model succesfully loaded from {os.path.join(cfg.classifier_model_path, 'resnet_state_dict.pt')}\")\n\n # iterate over dataset\n all_pred = []\n for i, sample in enumerate(dataset):\n # unpack data\n image, target, id, slice = sample\n logger.info(\"=\"*25 + f\" SAMPLE {i+1:04}/{len(dataset):04} - Volume {id:03} Slice {slice:03} \" + \"=\"*25)\n\n # Classify sample\n if cfg.classifier_model_path is not None:\n with torch.no_grad():\n input_clss = image.unsqueeze(0).to(cfg.device).float()\n pred_score = nn.functional.softmax(classifier(input_clss), dim=1)[:,1] # take columns of softmax of positive class as score\n pred = 1 if pred_score >= cfg.classification_threshold else 0\n else:\n pred = 1 # if not classifier given, all slices are processed\n\n # process slice if classifier has detected Hemorrhage\n if pred == 1:\n logger.info(f\"ICH detected. Compute anomaly mask through AE reconstruction.\")\n # Detect anomalies using the robuste approach\n ad_map, ad_mask = compute_anomaly(ae_net, image, alpha_low=cfg.alpha_low, alpha_high=cfg.alpha_high, device=cfg.device)\n logger.info(f\"{ad_mask.sum()} anomalous pixels detected.\")\n # save ad_mask\n ad_mask_fn = f\"{id}/{slice}_anomalies.bmp\"\n save_path = os.path.join(out_path, 'pred/', ad_mask_fn)\n if not os.path.isdir(os.path.dirname(save_path)):\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n io.imsave(save_path, img_as_ubyte(ad_mask), check_contrast=False)\n # save anomaly map\n ad_map_fn = f\"{id}/{slice}_map_anomalies.png\"\n save_path_map = os.path.join(out_path, 'pred/', ad_map_fn)\n io.imsave(save_path_map, img_as_ubyte(rescale_intensity(ad_map, out_range=(0.0, 1.0))), check_contrast=False)\n else:\n logger.info(f\"No ICH detected. Set the anomaly mask to zeros.\")\n ad_mask = np.zeros_like(target[0].numpy())\n ad_mask_fn, ad_map_fn = 'None', 'None'\n\n # compute confusion matrix with target ICH mask\n tn, fp, fn, tp = confusion_matrix(target[0].numpy().ravel(), ad_mask.ravel(), labels=[0,1]).ravel()\n auc = roc_auc_score(target[0].numpy().ravel(), ad_map.ravel()) if torch.any(target[0]) else 'None'\n # append to all_pred list\n all_pred.append({'id': id.item(), 'slice': slice.item(), 'label':target.max().item(), 'TP': tp, 'TN': tn, 'FP': fp, 'FN': fn, 'AUC': auc, 'ad_mask_fn': ad_mask_fn, 'ad_map_fn': ad_map_fn})\n\n # make a dataframe of all predictions\n slice_df = pd.DataFrame(all_pred)\n volume_df = slice_df[['id', 'label', 'TP', 'TN', 'FP', 'FN']].groupby('id').agg({'label':'max', 'TP':'sum', 'TN':'sum', 'FP':'sum', 'FN':'sum'})\n\n # Compute Dice and Volume Dice\n slice_df['Dice'] = (2*slice_df.TP + 1) / (2*slice_df.TP + slice_df.FP + slice_df.FN + 1)\n volume_df['Dice'] = (2*volume_df.TP + 1) / (2*volume_df.TP + volume_df.FP + volume_df.FN + 1)\n logger.info(f\"Mean slice dice : {slice_df.Dice.mean(axis=0):.3f}\")\n logger.info(f\"Mean volume dice : {volume_df.Dice.mean(axis=0):.3f}\")\n logger.info(f\"Mean posiitve slice AUC {slice_df[slice_df.label == 1].AUC.mean(axis=0):.3f}\")\n\n # Save Scores and Config\n slice_df.to_csv(os.path.join(out_path, 'slice_predictions.csv'))\n logger.info(f\"Slice prediction csv saved at {os.path.join(out_path, 'slice_predictions.csv')}\")\n volume_df.to_csv(os.path.join(out_path, 'volume_predictions.csv'))\n logger.info(f\"Volume prediction csv saved at {os.path.join(out_path, 'volume_predictions.csv')}\")\n cfg.device = str(cfg.device)\n with open(os.path.join(out_path, 'config.json'), 'w') as f:\n json.dump(cfg, f)\n logger.info(f\"Config file saved at {os.path.join(out_path, 'config.json')}\")\n\ndef compute_anomaly(ae_net, im, alpha_low=1.5, alpha_high=3.0, device='cuda'):\n \"\"\"\n Compute anomaly map and mask.\n ----------\n INPUT\n |---- ae_net (nn.Module) trained AE network.\n |---- im (torch.tensor) input image with dimension (C x H x W).\n |---- alpha_low (float) the lower threshold as fraction of IQR : t_low = q75(err) + alpha_low * IQR(err).\n |---- alpha_high (float) the higher threshold as fraction of IQR : t_low = q75(err) + alpha_high * IQR(err).\n |---- device (str) device to work on.\n OUTPUT\n |---- ad_map (np.array) the reconstruction error map.\n |---- ad_mask (np.array) the thresholded error map = anoamly mask.\n \"\"\"\n with torch.no_grad():\n im = im.unsqueeze(0).to(device).float()\n rec = ae_net(im)\n ad_map = torch.abs(im - rec).squeeze().cpu().numpy()\n # thresholding\n IQR = (np.quantile(ad_map, 0.75) - np.quantile(ad_map, 0.25))\n t_high = np.quantile(ad_map, 0.75) + alpha_high * IQR\n t_low = np.quantile(ad_map, 0.75) + alpha_low * IQR\n ad_mask = skimage.filters.apply_hysteresis_threshold(ad_map, t_low, t_high)\n\n return ad_map, ad_mask\n\ndef initialize_logger(logger_fn):\n \"\"\"\n Initialize a logger with given file name. It will start a new logger.\n \"\"\"\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger()\n try:\n logger.handlers[1].stream.close()\n logger.removeHandler(logger.handlers[1])\n except IndexError:\n pass\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(logger_fn)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(logging.Formatter('%(asctime)s | %(levelname)s | %(message)s'))\n logger.addHandler(file_handler)\n\n return logger\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.device",
"numpy.quantile",
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"pandas.DataFrame",
"torch.no_grad",
"torch.any",
"torch.manual_seed",
"torch.abs",
"torch.cuda.is_available",
"torch.load"
]
] |
tranduchuy682/HarDNet-MSEG | [
"e32fb15a93e17ef953ad415fd2d51480e89678a7"
] | [
"utils/dataloader.py"
] | [
"import os\nfrom PIL import Image\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport numpy as np\nimport random\nimport torch\n\n\nclass PolypDataset(data.Dataset):\n \"\"\"\n dataloader for polyp segmentation tasks\n \"\"\"\n def __init__(self, image_root, gt_root, trainsize, augmentations):\n self.trainsize = trainsize\n self.augmentations = augmentations\n print(self.augmentations)\n self.images = [image_root + f for f in os.listdir(image_root) if f.endswith('.jpg') or f.endswith('.png')]\n self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.png')]\n self.images = sorted(self.images)\n self.gts = sorted(self.gts)\n self.filter_files()\n self.size = len(self.images)\n if self.augmentations == True:\n print('Using RandomRotation, RandomFlip')\n self.img_transform = transforms.Compose([\n transforms.RandomRotation(90, resample=False, expand=False, center=None, fill=None),\n transforms.RandomVerticalFlip(p=0.5),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.Resize((self.trainsize, self.trainsize)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n self.gt_transform = transforms.Compose([\n transforms.RandomRotation(90, resample=False, expand=False, center=None, fill=None),\n transforms.RandomVerticalFlip(p=0.5),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.Resize((self.trainsize, self.trainsize)),\n transforms.ToTensor()])\n \n else:\n print('no augmentation')\n self.img_transform = transforms.Compose([\n transforms.Resize((self.trainsize, self.trainsize)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n self.gt_transform = transforms.Compose([\n transforms.Resize((self.trainsize, self.trainsize)),\n transforms.ToTensor()])\n \n\n def __getitem__(self, index):\n \n image = self.rgb_loader(self.images[index])\n gt = self.binary_loader(self.gts[index])\n \n seed = np.random.randint(2147483647) # make a seed with numpy generator \n random.seed(seed) # apply this seed to img tranfsorms\n torch.manual_seed(seed) # needed for torchvision 0.7\n if self.img_transform is not None:\n image = self.img_transform(image)\n \n random.seed(seed) # apply this seed to img tranfsorms\n torch.manual_seed(seed) # needed for torchvision 0.7\n if self.gt_transform is not None:\n gt = self.gt_transform(gt)\n return image, gt\n\n def filter_files(self):\n assert len(self.images) == len(self.gts)\n images = []\n gts = []\n for img_path, gt_path in zip(self.images, self.gts):\n img = Image.open(img_path)\n gt = Image.open(gt_path)\n if img.size == gt.size:\n images.append(img_path)\n gts.append(gt_path)\n self.images = images\n self.gts = gts\n\n def rgb_loader(self, path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n def binary_loader(self, path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n # return img.convert('1')\n return img.convert('L')\n\n def resize(self, img, gt):\n assert img.size == gt.size\n w, h = img.size\n if h < self.trainsize or w < self.trainsize:\n h = max(h, self.trainsize)\n w = max(w, self.trainsize)\n return img.resize((w, h), Image.BILINEAR), gt.resize((w, h), Image.NEAREST)\n else:\n return img, gt\n\n def __len__(self):\n return self.size\n\n\ndef get_loader(image_root, gt_root, batchsize, trainsize, shuffle=True, num_workers=4, pin_memory=True, augmentation=False):\n\n dataset = PolypDataset(image_root, gt_root, trainsize, augmentation)\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batchsize,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=pin_memory)\n return data_loader\n\n\nclass test_dataset:\n def __init__(self, image_root, gt_root, testsize):\n self.testsize = testsize\n self.images = [image_root + f for f in os.listdir(image_root) if f.endswith('.jpg') or f.endswith('.png')]\n self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.tif') or f.endswith('.png')]\n self.images = sorted(self.images)\n self.gts = sorted(self.gts)\n self.transform = transforms.Compose([\n transforms.Resize((self.testsize, self.testsize)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n self.gt_transform = transforms.ToTensor()\n self.size = len(self.images)\n self.index = 0\n\n def load_data(self):\n image = self.rgb_loader(self.images[self.index])\n image = self.transform(image).unsqueeze(0)\n gt = self.binary_loader(self.gts[self.index])\n name = self.images[self.index].split('/')[-1]\n if name.endswith('.jpg'):\n name = name.split('.jpg')[0] + '.png'\n self.index += 1\n return image, gt, name\n\n def rgb_loader(self, path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n def binary_loader(self, path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('L')\n"
] | [
[
"torch.manual_seed",
"numpy.random.randint",
"torch.utils.data.DataLoader"
]
] |
springcoil/pandas | [
"945075ad78cef652039feb50d60092b0580604e6"
] | [
"pandas/core/config_init.py"
] | [
"\"\"\"\nThis module is imported from the pandas package __init__.py file\nin order to ensure that the core.config options registered here will\nbe available as soon as the user loads the package. if register_option\nis invoked inside specific modules, they will not be registered until that\nmodule is imported, which may or may not be a problem.\n\nIf you need to make sure options are available even before a certain\nmodule is imported, register them here rather then in the module.\n\n\"\"\"\n\nimport pandas.core.config as cf\nfrom pandas.core.config import (is_int, is_bool, is_text, is_float,\n is_instance_factory, is_one_of_factory,\n get_default_val)\nfrom pandas.core.format import detect_console_encoding\n\n\n#\n# options from the \"display\" namespace\n\npc_precision_doc = \"\"\"\n: int\n Floating point output precision (number of significant digits). This is\n only a suggestion\n\"\"\"\n\npc_colspace_doc = \"\"\"\n: int\n Default space for DataFrame columns.\n\"\"\"\n\npc_max_rows_doc = \"\"\"\n: int\n If max_rows is exceeded, switch to truncate view. Depending on\n `large_repr`, objects are either centrally truncated or printed as\n a summary view. 'None' value means unlimited.\n\n In case python/IPython is running in a terminal and `large_repr`\n equals 'truncate' this can be set to 0 and pandas will auto-detect\n the height of the terminal and print a truncated object which fits\n the screen height. The IPython notebook, IPython qtconsole, or\n IDLE do not run in a terminal and hence it is not possible to do\n correct auto-detection.\n\"\"\"\n\npc_max_cols_doc = \"\"\"\n: int\n If max_cols is exceeded, switch to truncate view. Depending on\n `large_repr`, objects are either centrally truncated or printed as\n a summary view. 'None' value means unlimited.\n\n In case python/IPython is running in a terminal and `large_repr`\n equals 'truncate' this can be set to 0 and pandas will auto-detect\n the width of the terminal and print a truncated object which fits\n the screen width. The IPython notebook, IPython qtconsole, or IDLE\n do not run in a terminal and hence it is not possible to do\n correct auto-detection.\n\"\"\"\n\npc_max_categories_doc = \"\"\"\n: int\n This sets the maximum number of categories pandas should output when printing\n out a `Categorical` or a Series of dtype \"category\".\n\"\"\"\n\npc_max_info_cols_doc = \"\"\"\n: int\n max_info_columns is used in DataFrame.info method to decide if\n per column information will be printed.\n\"\"\"\n\npc_nb_repr_h_doc = \"\"\"\n: boolean\n When True, IPython notebook will use html representation for\n pandas objects (if it is available).\n\"\"\"\n\npc_date_dayfirst_doc = \"\"\"\n: boolean\n When True, prints and parses dates with the day first, eg 20/01/2005\n\"\"\"\n\npc_date_yearfirst_doc = \"\"\"\n: boolean\n When True, prints and parses dates with the year first, eg 2005/01/20\n\"\"\"\n\npc_pprint_nest_depth = \"\"\"\n: int\n Controls the number of nested levels to process when pretty-printing\n\"\"\"\n\npc_multi_sparse_doc = \"\"\"\n: boolean\n \"sparsify\" MultiIndex display (don't display repeated\n elements in outer levels within groups)\n\"\"\"\n\npc_encoding_doc = \"\"\"\n: str/unicode\n Defaults to the detected encoding of the console.\n Specifies the encoding to be used for strings returned by to_string,\n these are generally strings meant to be displayed on the console.\n\"\"\"\n\nfloat_format_doc = \"\"\"\n: callable\n The callable should accept a floating point number and return\n a string with the desired format of the number. This is used\n in some places like SeriesFormatter.\n See core.format.EngFormatter for an example.\n\"\"\"\n\nmax_colwidth_doc = \"\"\"\n: int\n The maximum width in characters of a column in the repr of\n a pandas data structure. When the column overflows, a \"...\"\n placeholder is embedded in the output.\n\"\"\"\n\ncolheader_justify_doc = \"\"\"\n: 'left'/'right'\n Controls the justification of column headers. used by DataFrameFormatter.\n\"\"\"\n\npc_expand_repr_doc = \"\"\"\n: boolean\n Whether to print out the full DataFrame repr for wide DataFrames across\n multiple lines, `max_columns` is still respected, but the output will\n wrap-around across multiple \"pages\" if its width exceeds `display.width`.\n\"\"\"\n\npc_show_dimensions_doc = \"\"\"\n: boolean or 'truncate'\n Whether to print out dimensions at the end of DataFrame repr.\n If 'truncate' is specified, only print out the dimensions if the\n frame is truncated (e.g. not display all rows and/or columns)\n\"\"\"\n\npc_line_width_doc = \"\"\"\n: int\n Deprecated.\n\"\"\"\n\npc_line_width_deprecation_warning = \"\"\"\\\nline_width has been deprecated, use display.width instead (currently both are\nidentical)\n\"\"\"\n\npc_height_deprecation_warning = \"\"\"\\\nheight has been deprecated.\n\"\"\"\n\npc_width_doc = \"\"\"\n: int\n Width of the display in characters. In case python/IPython is running in\n a terminal this can be set to None and pandas will correctly auto-detect\n the width.\n Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a\n terminal and hence it is not possible to correctly detect the width.\n\"\"\"\n\npc_height_doc = \"\"\"\n: int\n Deprecated.\n\"\"\"\n\npc_chop_threshold_doc = \"\"\"\n: float or None\n if set to a float value, all float values smaller then the given threshold\n will be displayed as exactly 0 by repr and friends.\n\"\"\"\n\npc_max_seq_items = \"\"\"\n: int or None\n when pretty-printing a long sequence, no more then `max_seq_items`\n will be printed. If items are omitted, they will be denoted by the\n addition of \"...\" to the resulting string.\n\n If set to None, the number of items to be printed is unlimited.\n\"\"\"\n\npc_max_info_rows_doc = \"\"\"\n: int or None\n df.info() will usually show null-counts for each column.\n For large frames this can be quite slow. max_info_rows and max_info_cols\n limit this null check only to frames with smaller dimensions then specified.\n\"\"\"\n\npc_large_repr_doc = \"\"\"\n: 'truncate'/'info'\n For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can\n show a truncated table (the default from 0.13), or switch to the view from\n df.info() (the behaviour in earlier versions of pandas).\n\"\"\"\n\npc_mpl_style_doc = \"\"\"\n: bool\n Setting this to 'default' will modify the rcParams used by matplotlib\n to give plots a more pleasing visual style by default.\n Setting this to None/False restores the values to their initial value.\n\"\"\"\n\npc_memory_usage_doc = \"\"\"\n: bool or None\n This specifies if the memory usage of a DataFrame should be displayed when\n df.info() is called.\n\"\"\"\n\nstyle_backup = dict()\n\n\ndef mpl_style_cb(key):\n import sys\n from pandas.tools.plotting import mpl_stylesheet\n global style_backup\n\n val = cf.get_option(key)\n\n if 'matplotlib' not in sys.modules.keys():\n if not(val): # starting up, we get reset to None\n return val\n raise Exception(\"matplotlib has not been imported. aborting\")\n\n import matplotlib.pyplot as plt\n\n if val == 'default':\n style_backup = dict([(k, plt.rcParams[k]) for k in mpl_stylesheet])\n plt.rcParams.update(mpl_stylesheet)\n elif not val:\n if style_backup:\n plt.rcParams.update(style_backup)\n\n return val\n\nwith cf.config_prefix('display'):\n cf.register_option('precision', 6, pc_precision_doc, validator=is_int)\n cf.register_option('float_format', None, float_format_doc)\n cf.register_option('column_space', 12, validator=is_int)\n cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc,\n validator=is_instance_factory((int, type(None))))\n cf.register_option('max_rows', 60, pc_max_rows_doc,\n validator=is_instance_factory([type(None), int]))\n cf.register_option('max_categories', 8, pc_max_categories_doc, validator=is_int)\n cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)\n cf.register_option('max_columns', 20, pc_max_cols_doc,\n validator=is_instance_factory([type(None), int]))\n cf.register_option('large_repr', 'truncate', pc_large_repr_doc,\n validator=is_one_of_factory(['truncate', 'info']))\n cf.register_option('max_info_columns', 100, pc_max_info_cols_doc,\n validator=is_int)\n cf.register_option('colheader_justify', 'right', colheader_justify_doc,\n validator=is_text)\n cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,\n validator=is_bool)\n cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc,\n validator=is_bool)\n cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc,\n validator=is_bool)\n cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,\n validator=is_int)\n cf.register_option('multi_sparse', True, pc_multi_sparse_doc,\n validator=is_bool)\n cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,\n validator=is_text)\n cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)\n cf.register_option('show_dimensions', 'truncate', pc_show_dimensions_doc,\n validator=is_one_of_factory([True, False, 'truncate']))\n cf.register_option('chop_threshold', None, pc_chop_threshold_doc)\n cf.register_option('max_seq_items', 100, pc_max_seq_items)\n cf.register_option('mpl_style', None, pc_mpl_style_doc,\n validator=is_one_of_factory([None, False, 'default']),\n cb=mpl_style_cb)\n cf.register_option('height', 60, pc_height_doc,\n validator=is_instance_factory([type(None), int]))\n cf.register_option('width', 80, pc_width_doc,\n validator=is_instance_factory([type(None), int]))\n # redirected to width, make defval identical\n cf.register_option('line_width', get_default_val('display.width'),\n pc_line_width_doc)\n cf.register_option('memory_usage', True, pc_memory_usage_doc,\n validator=is_instance_factory([type(None), bool]))\n\ncf.deprecate_option('display.line_width',\n msg=pc_line_width_deprecation_warning,\n rkey='display.width')\n\ncf.deprecate_option('display.height',\n msg=pc_height_deprecation_warning,\n rkey='display.max_rows')\n\ntc_sim_interactive_doc = \"\"\"\n: boolean\n Whether to simulate interactive mode for purposes of testing\n\"\"\"\nwith cf.config_prefix('mode'):\n cf.register_option('sim_interactive', False, tc_sim_interactive_doc)\n\nuse_inf_as_null_doc = \"\"\"\n: boolean\n True means treat None, NaN, INF, -INF as null (old way),\n False means None and NaN are null, but INF, -INF are not null\n (new way).\n\"\"\"\n\n# We don't want to start importing everything at the global context level\n# or we'll hit circular deps.\n\n\ndef use_inf_as_null_cb(key):\n from pandas.core.common import _use_inf_as_null\n _use_inf_as_null(key)\n\nwith cf.config_prefix('mode'):\n cf.register_option('use_inf_as_null', False, use_inf_as_null_doc,\n cb=use_inf_as_null_cb)\n\n\n# user warnings\nchained_assignment = \"\"\"\n: string\n Raise an exception, warn, or no action if trying to use chained assignment,\n The default is warn\n\"\"\"\n\nwith cf.config_prefix('mode'):\n cf.register_option('chained_assignment', 'warn', chained_assignment,\n validator=is_one_of_factory([None, 'warn', 'raise']))\n\n\n# Set up the io.excel specific configuration.\nwriter_engine_doc = \"\"\"\n: string\n The default Excel writer engine for '{ext}' files. Available options:\n '{default}' (the default){others}.\n\"\"\"\n\nwith cf.config_prefix('io.excel'):\n # going forward, will be additional writers\n for ext, options in [('xls', ['xlwt']),\n ('xlsm', ['openpyxl'])]:\n default = options.pop(0)\n if options:\n options = \" \" + \", \".join(options)\n else:\n options = \"\"\n doc = writer_engine_doc.format(ext=ext, default=default,\n others=options)\n cf.register_option(ext + '.writer', default, doc, validator=str)\n\n def _register_xlsx(engine, other):\n cf.register_option('xlsx.writer', engine,\n writer_engine_doc.format(ext='xlsx',\n default=engine,\n others=\", '%s'\" % other),\n validator=str)\n\n try:\n # better memory footprint\n import xlsxwriter\n _register_xlsx('xlsxwriter', 'openpyxl')\n except ImportError:\n # fallback\n _register_xlsx('openpyxl', 'xlsxwriter')\n"
] | [
[
"matplotlib.pyplot.rcParams.update",
"pandas.core.config.is_one_of_factory",
"pandas.core.config.config_prefix",
"pandas.core.format.detect_console_encoding",
"pandas.core.config.register_option",
"pandas.core.config.get_default_val",
"pandas.core.common._use_inf_as_null",
"pandas.core.config.deprecate_option",
"pandas.core.config.get_option"
]
] |
shriya999/MultiStage-ActionDetection | [
"bb74152536d368b9fcb9e430f551ea64de00be4b"
] | [
"models/rcnn_predictor.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\r\nimport logging\r\nfrom typing import Dict, List, Tuple, Union\r\nimport torch\r\nfrom fvcore.nn import giou_loss, smooth_l1_loss\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\nfrom class_ids import coco_obj_class_to_id\r\nfrom class_ids import coco_obj_id_to_class\r\nfrom class_ids import coco_obj_to_actev_obj\r\n\r\nfrom detectron2.config import configurable\r\nfrom detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple\r\nfrom detectron2.modeling.box_regression import Box2BoxTransform\r\nfrom detectron2.structures import Boxes, Instances\r\nfrom detectron2.utils.events import get_event_storage\r\nfrom detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers\r\n\r\n\r\nclass RCNNPredictor(FastRCNNOutputLayers):\r\n @configurable\r\n def __init__(\r\n self,\r\n input_shape: ShapeSpec,\r\n *,\r\n box2box_transform,\r\n num_classes: int,\r\n test_score_thresh: float = 0.0,\r\n test_nms_thresh: float = 0.5,\r\n test_topk_per_image: int = 300,\r\n cls_agnostic_bbox_reg: bool = False,\r\n smooth_l1_beta: float = 0.0,\r\n box_reg_loss_type: str = \"smooth_l1\",\r\n loss_weight: Union[float, Dict[str, float]] = 1.0,\r\n ):\r\n super().__init__(\r\n input_shape=input_shape,\r\n box2box_transform=box2box_transform,\r\n num_classes=num_classes,\r\n test_score_thresh=test_score_thresh,\r\n test_nms_thresh=test_nms_thresh,\r\n test_topk_per_image=test_topk_per_image,\r\n cls_agnostic_bbox_reg=cls_agnostic_bbox_reg,\r\n smooth_l1_beta=smooth_l1_beta,\r\n box_reg_loss_type=box_reg_loss_type,\r\n loss_weight=loss_weight,\r\n )\r\n\r\n def inference(\r\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\r\n ):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were\r\n used to compute predictions. The ``proposal_boxes`` field is expected.\r\n\r\n Returns:\r\n list[Instances]: same as `fast_rcnn_inference`.\r\n list[Tensor]: same as `fast_rcnn_inference`.\r\n \"\"\"\r\n boxes = self.predict_boxes(predictions, proposals)\r\n scores = self.predict_probs(predictions, proposals)\r\n image_shapes = [x.image_size for x in proposals]\r\n\r\n return self.box_pred_inference(\r\n boxes,\r\n scores,\r\n image_shapes,\r\n self.test_score_thresh,\r\n self.test_nms_thresh,\r\n self.test_topk_per_image,\r\n )\r\n\r\n def box_pred_inference(\r\n self,\r\n boxes: List[torch.Tensor],\r\n scores: List[torch.Tensor],\r\n image_shapes: List[Tuple[int, int]],\r\n score_thresh: float,\r\n nms_thresh: float,\r\n topk_per_image: int,\r\n ):\r\n \"\"\"\r\n Call `fast_rcnn_inference_single_image` for all images.\r\n\r\n Args:\r\n boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic\r\n boxes for each image. Element i has shape (Ri, K * 4) if doing\r\n class-specific regression, or (Ri, 4) if doing class-agnostic\r\n regression, where Ri is the number of predicted objects for image i.\r\n This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.\r\n scores (list[Tensor]): A list of Tensors of predicted class scores for each image.\r\n Element i has shape (Ri, K + 1), where Ri is the number of predicted objects\r\n for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.\r\n image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.\r\n score_thresh (float): Only return detections with a confidence score exceeding this\r\n threshold.\r\n nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].\r\n topk_per_image (int): The number of top scoring detections to return. Set < 0 to return\r\n all detections.\r\n\r\n Returns:\r\n instances: (list[Instances]): A list of N instances, one for each image in the batch,\r\n that stores the topk most confidence detections.\r\n kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates\r\n the corresponding boxes/scores index in [0, Ri) from the input, for image i.\r\n \"\"\"\r\n result_per_image = [\r\n self.box_pred_inference_single_image(\r\n boxes_per_image,\r\n scores_per_image,\r\n image_shape,\r\n score_thresh,\r\n nms_thresh,\r\n topk_per_image,\r\n )\r\n for scores_per_image, boxes_per_image, image_shape in zip(\r\n scores, boxes, image_shapes\r\n )\r\n ]\r\n return [x[0] for x in result_per_image], [x[1] for x in result_per_image]\r\n\r\n def box_pred_inference_single_image(\r\n self,\r\n boxes,\r\n scores,\r\n image_shape: Tuple[int, int],\r\n score_thresh: float,\r\n nms_thresh: float,\r\n topk_per_image: int,\r\n ):\r\n \"\"\"\r\n Single-image inference. Return bounding-box detection results by thresholding\r\n on scores and applying non-maximum suppression (NMS).\r\n\r\n Args:\r\n Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes\r\n per image.\r\n\r\n Returns:\r\n Same as `fast_rcnn_inference`, but for only one image.\r\n \"\"\"\r\n\r\n partial_classes = [classname for classname in coco_obj_to_actev_obj]\r\n classname2id = coco_obj_class_to_id\r\n partial_classes = [classname for classname in coco_obj_to_actev_obj]\r\n needed_object_classids = [classname2id[name] for name in partial_classes]\r\n needed_object_classids_minus_1 = [o - 1 for o in needed_object_classids]\r\n\r\n # (N, num_class), (N, num_class - 1, 4)\r\n # -> (num_class, N), (num_class - 1, N, 4)\r\n box_logits = boxes.reshape(boxes.shape[0], boxes.shape[1] // 4, 4)\r\n label_logits_t = scores.permute(1, 0)\r\n box_logits_t = box_logits.permute(1, 0, 2)\r\n # [C + 1, N] # 1 is the BG class\r\n partial_label_logits_t = label_logits_t[[0] + needed_object_classids]\r\n # [C, N, 4]\r\n partial_box_logits_t = box_logits_t[needed_object_classids_minus_1]\r\n\r\n partial_label_logits = partial_label_logits_t.permute(1, 0)\r\n partial_box_logits = partial_box_logits_t.permute(1, 0, 2)\r\n scores = partial_label_logits\r\n boxes = partial_box_logits.reshape(partial_box_logits.shape[0], -1)\r\n\r\n valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(\r\n dim=1\r\n )\r\n if not valid_mask.all():\r\n boxes = boxes[valid_mask]\r\n scores = scores[valid_mask]\r\n\r\n scores = scores[:, :-1]\r\n num_bbox_reg_classes = boxes.shape[1] // 4\r\n # Convert to Boxes to use the `clip` function ...\r\n boxes = Boxes(boxes.reshape(-1, 4))\r\n boxes.clip(image_shape)\r\n boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4\r\n\r\n # 1. Filter results based on detection scores. It can make NMS more efficient\r\n # by filtering out low-confidence detections.\r\n filter_mask = scores > 0.0 # R x K\r\n # R' x 2. First column contains indices of the R predictions;\r\n # Second column contains indices of classes.\r\n filter_inds = filter_mask.nonzero()\r\n if num_bbox_reg_classes == 1:\r\n boxes = boxes[filter_inds[:, 0], 0]\r\n else:\r\n boxes = boxes[filter_mask]\r\n scores = scores[filter_mask]\r\n\r\n # 2. Apply NMS for each class independently.\r\n keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)\r\n if topk_per_image >= 0:\r\n keep = keep[:topk_per_image]\r\n boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]\r\n\r\n result = Instances(image_shape)\r\n result.pred_boxes = Boxes(boxes)\r\n result.scores = scores\r\n result.pred_classes = filter_inds[:, 1]\r\n return result, filter_inds[:, 0]\r\n"
] | [
[
"torch.isfinite"
]
] |
batra-mlp-lab/vln-chasing-ghosts | [
"f819aea21b94d9d3e23d9b6b9264054ee50c007b"
] | [
"tracker/evaluator.py"
] | [
"from abc import abstractmethod\n\nimport math\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom trainer import Trainer\nfrom modules.mapper import Mapper\nimport metrics\nimport utils\n\n\nclass Evaluator(Trainer):\n \"\"\" Generic class for running evaluations for different models on the goal prediction task \"\"\"\n\n def __init__(self, args=None, filepath=None, load_sim=True):\n super(Evaluator, self).__init__(args, filepath, load_sim=load_sim)\n self.init_models()\n self.load_model_weights()\n\n @abstractmethod\n def init_models(self):\n \"\"\" Initialize the model \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def load_model_weights(self):\n \"\"\" Load the pretrained model weights \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def set_models_eval(self):\n \"\"\" Set models to eval phase \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_predictions(self, seq, seq_mask, seq_lens, batch, xyzhe, simulator_next_action):\n \"\"\" Get predictions from the model\n Args:\n simulator_next_action: Function which takes next action in the simulator\n Return:\n goal_pred: FloatTensor, [N*timesteps, belief_map_y, belief_map_x]\n path_pred: FloatTensor, [N*timesteps, belief_map_y, belief_map_x]\n mask: FloatTensor with values from {0,1}, [N*timesteps, belief_map_y, belief_map_x]\n \"\"\"\n raise NotImplementedError\n\n def eval_logging(self, split, timestep_nav_error, average_nav_error, timestep_success_rate, average_success_rate, timestep_map_coverage, average_map_coverage, timestep_goal_seen, average_goal_seen):\n\n timestep_nav_error = np.array(timestep_nav_error).mean(axis=1)\n average_nav_error = np.array(average_nav_error).mean()\n timestep_success_rate = 100 * np.array(timestep_success_rate).mean(axis=1)\n average_success_rate = 100 * np.array(average_success_rate).mean()\n timestep_map_coverage = 100 * np.array(timestep_map_coverage).mean(axis=1)\n average_map_coverage = 100 * np.array(average_map_coverage).mean()\n timestep_goal_seen = 100 * np.array(timestep_goal_seen).mean(axis=1)\n average_goal_seen = 100 * np.array(average_goal_seen).mean()\n\n map_size_in_m2 = (self.args.map_range_x*self.args.gridcellsize) * (self.args.map_range_y*self.args.gridcellsize)\n timestep_map_coverage_in_m2 = (timestep_map_coverage / 100.0) * map_size_in_m2\n average_map_coverage_in_m2 = (average_map_coverage / 100.0) * map_size_in_m2\n\n result_str = \"Epoch %d Split %s scores: Nav Error=%0.4f Success Rate=%0.4f Map Coverage=%0.4f (%0.4f metres^2) Goal Seen Rate=%0.4f\" %(self.args.val_epoch, split, average_nav_error, average_success_rate, average_map_coverage, average_map_coverage_in_m2, average_goal_seen)\n print(result_str)\n\n timestep_result_str = \"\"\n for t in range(self.args.timesteps):\n r_str = \"Epoch %d Split %s Timestep: %d Scores: Nav Error=%0.4f Success Rate=%0.4f Map Coverage=%0.4f (%0.4f metres^2) Goal Seen Rate=%0.4f\" %(self.args.val_epoch, split, t, timestep_nav_error[t], timestep_success_rate[t], timestep_map_coverage[t], timestep_map_coverage_in_m2[t], timestep_goal_seen[t])\n timestep_result_str += r_str + \"\\n\"\n print(timestep_result_str)\n\n if self.visdom:\n self.visdom.line(self.args.val_epoch, average_nav_error, \"nav_error\", \"%s Avg Nav Error\" % split, \"Epochs\", \"Navigation Error\", title=\"Average Nav Error - Val Split\")\n self.visdom.line(self.args.val_epoch, average_success_rate, \"success_rate\", \"%s Avg Success Rate\" % split, \"Epochs\", \"Success Rate\", title=\"Average Success Rate - Val Split\")\n self.visdom.line(self.args.val_epoch, average_map_coverage, \"map_coverage\", \"%s Avg Map Coverage\" % split, \"Epochs\", \"Map Coverage\", title=\"Average Map Coverage - Val Split\")\n self.visdom.line(self.args.val_epoch, average_map_coverage_in_m2, \"map_coverage_in_metres\", \"%s Avg Map Coverage in Metres^2\" % split, \"Epochs\", \"Map Coverage in Metres^2\", title=\"Average Map Coverage (in metres^2)- Val Split\")\n self.visdom.line(self.args.val_epoch, average_goal_seen, \"goal_seen\", \"%s Avg Goal Seen Rate\" % split, \"Epochs\", \"Goal Seen Rate\", title=\"Average Goal Seen Rate - Val Split\")\n for t in range(self.args.timesteps):\n self.visdom.line(self.args.val_epoch, timestep_nav_error[t], \"nav_error-t%d\" % t, \"%s Nav Error-t%d\" % (split, t), \"Epochs\", \"Navigation Error\", title=\"Nav Error Timestep %d - Val Split\" % t)\n self.visdom.line(self.args.val_epoch, timestep_success_rate[t], \"success_rate-t%d\" % t, \"%s Success Rate-t%d\" % (split, t), \"Epochs\", \"Success Rate\", title=\"Success Rate Timestep %d - Val Split\" % t)\n self.visdom.line(self.args.val_epoch, timestep_map_coverage[t], \"map_coverage-t%d\" % t, \"%s Map Coverage-t%d\" % (split, t), \"Epochs\", \"Map Coverage\", title=\"Map Coverage Timestep %d - Val Split\" % t)\n self.visdom.line(self.args.val_epoch, timestep_map_coverage_in_m2[t], \"map_coverage-metres-t%d\" % t, \"%s Map Coverage (in Metres^2)-t%d\" % (split, t), \"Epochs\", \"Map Coverage in Metres^2\", title=\"Map Coverage (in Metres^2) Timestep %d - Val Split\" % t)\n self.visdom.line(self.args.val_epoch, timestep_goal_seen[t], \"goal_seen_rate-t%d\" % t, \"%s Goal Seen Rate-t%d\" % (split, t), \"Epochs\", \"Goal Seen Rate\", title=\"Goal Seen Rate Timestep %d - Val Split\" % t)\n self.visdom.text(result_str)\n self.visdom.text(timestep_result_str.replace(\"\\n\", \"<br>\"))\n self.visdom.save()\n\n def simulator_next_action(self):\n self.sim.takePseudoSupervisedAction(self.dataloader.get_supervision)\n\n def evaluate(self, split):\n\n self.set_models_eval()\n\n with torch.no_grad():\n if split == \"val_seen\":\n self.dataloader = self.valseendata\n elif split == \"val_unseen\":\n self.dataloader = self.valunseendata\n\n iterations = int(math.ceil(len(self.dataloader.data) / float(self.args.batch_size)))\n last_batch_valid_idx = len(self.dataloader.data) - (iterations-1)*self.args.batch_size\n\n timestep_nav_error = [[] for i in range(self.args.timesteps)]\n timestep_success_rate = [[] for i in range(self.args.timesteps)]\n average_nav_error = []\n average_success_rate = []\n\n timestep_map_coverage = [[] for i in range(self.args.timesteps)]\n timestep_goal_seen = [[] for i in range(self.args.timesteps)]\n average_map_coverage = []\n average_goal_seen = []\n\n mapper = Mapper(self.args).to(self.args.device)\n\n for it in tqdm(range(iterations), desc=\"Evaluation Progress for %s split\" % split):\n\n valid_batch_len = last_batch_valid_idx if it == iterations - 1 else self.args.batch_size\n\n seq, seq_mask, seq_lens, batch = self.dataloader.get_batch()\n\n self.sim.newEpisode(batch)\n self.floorplan_images = utils.get_floorplan_images(self.sim.getState(),\n self.floorplan, self.args.map_range_x, self.args.map_range_y,\n scale_factor=self.args.debug_scale)\n\n xyzhe = self.sim.getXYZHE()\n mapper.init_map(xyzhe)\n goal_pos = self.dataloader.get_goal_coords_on_map_grid(mapper.map_center)\n\n pred_goal_map, pred_path_map, mask = self.get_predictions(seq, seq_mask, seq_lens, batch, xyzhe,\n self.simulator_next_action)\n\n # shape: (batch_size, 2)\n self.goal_map = mapper.heatmap(self.dataloader.goal_coords(), self.args.goal_heatmap_sigma)\n self.path_map = mapper.heatmap(self.dataloader.path_coords(), self.args.path_heatmap_sigma)\n\n if self.args.multi_maps:\n # shape: (batch_size*timesteps, 2)\n self.goal_map = self.goal_map.unsqueeze(1).expand(-1, self.args.timesteps, -1, -1).flatten(0, 1)\n self.path_map = self.path_map.unsqueeze(1).expand(-1, self.args.timesteps, -1, -1).flatten(0, 1)\n\n # shape: (batch_size*timesteps, 2)\n goal_pred_argmax = utils.compute_argmax(pred_goal_map).flip(1)\n\n # shape: (batch_size, timesteps, 2)\n goal_pred_argmax = goal_pred_argmax.reshape(-1, self.args.timesteps, 2)\n goal_pred_xy = self.dataloader.convert_map_pixels_to_xy_coords(goal_pred_argmax,\n mapper.map_center, multi_timestep_input=True)\n\n # shape: (batch_size, 2)\n goal_target_xy = self.dataloader.goal_coords()\n # shape: (batch_size, timesteps, 2)\n goal_target_xy = goal_target_xy.unsqueeze(1).expand(-1, self.args.timesteps, -1)\n\n # shape: (batch_size, timesteps, map_range_y, map_range_x)\n b_t_mask = mask.reshape(-1, self.args.timesteps, self.args.map_range_y, self.args.map_range_x)\n\n batch_timestep_map_coverage, batch_average_map_coverage = \\\n metrics.map_coverage(b_t_mask)\n batch_timestep_goal_seen, batch_average_goal_seen = \\\n metrics.goal_seen_rate(b_t_mask, goal_pos, self.args)\n batch_timestep_nav_error, batch_average_nav_error = \\\n metrics.nav_error(goal_target_xy, goal_pred_xy, self.args)\n batch_timestep_success_rate, batch_average_success_rate = \\\n metrics.success_rate(goal_target_xy, goal_pred_xy, self.args)\n\n for n in range(valid_batch_len):\n average_nav_error.append(batch_average_nav_error[n].item())\n average_success_rate.append(batch_average_success_rate[n].item())\n average_map_coverage.append(batch_average_map_coverage[n].item())\n average_goal_seen.append(batch_average_goal_seen[n].item())\n\n for t in range(batch_timestep_map_coverage.shape[1]):\n timestep_nav_error[t].append(batch_timestep_nav_error[n][t].item())\n timestep_success_rate[t].append(batch_timestep_success_rate[n][t].item())\n timestep_map_coverage[t].append(batch_timestep_map_coverage[n][t].item())\n timestep_goal_seen[t].append(batch_timestep_goal_seen[n][t].item())\n\n self.eval_logging(split, timestep_nav_error, average_nav_error, timestep_success_rate, average_success_rate, timestep_map_coverage, average_map_coverage, timestep_goal_seen, average_goal_seen)\n"
] | [
[
"numpy.array",
"torch.no_grad"
]
] |
LoganAMorrison/Hazma | [
"e9612729767ff48d5ce50633393f81ee021242d2",
"e9612729767ff48d5ce50633393f81ee021242d2"
] | [
"test/vector_mediator/herwig4dm/PhiPi.py",
"hazma/rh_neutrino/_rh_neutrino_spectra.py"
] | [
"# Libraries to load\nimport cmath,math,Resonance,alpha,os,FPhiPi\nimport matplotlib.pyplot as plt\n\n# set DM parameters\n# DM to mediator coupling\ngDM = 1.\n#DM mass\nmDM = (FPhiPi.mpi+FPhiPi.mPhi)/2.\n# mediator mass\nmMed = 10\n# mediator width, own width.py will be added with all channels (work in progress)\n# wMed\n\n\n#energy range\nlow_lim = FPhiPi.mPhi+FPhiPi.mpi#1.1\nupp_lim = 2.0\nstep_size = 0.005\n\nxSM=[]\nySM=[]\nxDP=[]\nyDP=[]\nxBL=[]\nyBL=[]\n\nenergy = low_lim\nwhile energy < upp_lim :\n xSM.append(energy)\n ySM.append(FPhiPi.sigmaSMPhiPi(energy**2))\n energy+=step_size\n\nwith open('txt_files/PhiPi.txt', 'w') as txtfile:\n for i in range(0,len(xSM)):\n txtfile.write(\"%s , %s\\n\" %(xSM[i],ySM[i]))\n txtfile.close()\n\n\n# couplings of mediator to quarks\ncMed_u = 2./3.\ncMed_d = -1./3.\ncMed_s = -1./3.\nwhile mDM < upp_lim:\n energy = 2*mDM+0.0001\n Q2 = energy**2\n xDP.append(energy)\n wMed = FPhiPi.GammaDM(mMed)\n FPhiPi.resetParameters(gDM,mDM,mMed,wMed,cMed_u,cMed_d,cMed_s)\n yDP.append(FPhiPi.sigmaDMPhiPi(Q2))\n mDM+=step_size\n\n# couplings of mediator to quarks\ncMed_u = 1./3.\ncMed_d = 1./3.\ncMed_s = 1./3.\nmDM = (FPhiPi.mpi+FPhiPi.mPhi)/2.\nwhile mDM < upp_lim:\n energy = 2*mDM+0.0001\n Q2 = energy**2\n xBL.append(energy)\n wMed = FPhiPi.GammaDM(mMed)\n FPhiPi.resetParameters(gDM,mDM,mMed,wMed,cMed_u,cMed_d,cMed_s)\n yBL.append(FPhiPi.sigmaDMPhiPi(Q2))\n mDM+=step_size\n\n\nplt.plot(xSM,ySM,color=\"blue\",label=\"SM\")\nplt.plot(xDP,yDP,color=\"red\",label=\"DP\")\nplt.plot(xBL,yBL,color=\"green\",label=\"BL\")\nplt.xlabel(\"$\\\\sqrt{s}$/GeV\")\nplt.ylabel(\"$\\\\sigma$/nb\")\nplt.title(\"$\\\\phi\\\\pi$ final state\")\nplt.xlim(low_lim,2.)\nplt.legend()\nplt.savefig(\"plots/PhiPi.pdf\")\nplt.clf()\nplt.cla()\n",
"\"\"\"\nThis file contains the decay spectra from a right-handed neutrino at rest.\n\"\"\"\nimport numpy as np\n\nfrom hazma.decay import (\n neutral_pion as decay_pi0,\n charged_pion as decay_pi,\n charged_kaon as decay_k,\n muon as decay_mu,\n)\n\nfrom hazma.parameters import (\n GF,\n Vud,\n lepton_masses,\n sin_theta_weak as sw,\n cos_theta_weak as cw,\n neutral_pion_mass as mpi0,\n charged_pion_mass as mpi,\n charged_kaon_mass as mk,\n)\nfrom hazma.gamma_ray import gamma_ray_decay\n\n\ndef dnde_nu_pi0(self, photon_energies, spectrum_type=\"all\"):\n \"\"\"\n Compute the gamma-ray spectrum from the decay of a right-handed\n neutrino into a neutral pion and neutrino.\n\n Parameters\n ----------\n photon_energies: float or np.array\n Photon energies where the spectrum should be computed.\n spectrum_type: str, optional\n String specifying which spectrum component should be computed.\n Options are: \"all\", \"decay\" or \"fsr\"\n \"\"\"\n if self.mx < mpi0:\n if hasattr(photon_energies, \"__len__\"):\n return np.zeros_like(photon_energies)\n else:\n return 0.0\n\n if spectrum_type == \"all\":\n return self.dnde_nu_pi0(photon_energies, \"fsr\") + self.dnde_nu_pi0(\n photon_energies, \"decay\"\n )\n elif spectrum_type == \"fsr\":\n if hasattr(photon_energies, \"__len__\"):\n return np.array([0.0 for _ in photon_energies])\n else:\n return 0.0\n elif spectrum_type == \"decay\":\n epi = (self.mx ** 2 + mpi0 ** 2) / (2.0 * self.mx)\n return decay_pi0(photon_energies, epi)\n else:\n raise ValueError(\n \"Type {} is invalid. Use 'all', 'fsr' or 'decay'\".format(spectrum_type)\n )\n\n\ndef dnde_pi_l(self, photon_energies, spectrum_type=\"all\"):\n \"\"\"\n Compute the gamma-ray spectrum from the decay of a right-handed\n neutrino into a charged pion and lepton.\n\n Parameters\n ----------\n photon_energies: float or np.array\n Photon energies where the spectrum should be computed.\n spectrum_type: str, optional\n String specifying which spectrum component should be computed.\n Options are: \"all\", \"decay\" or \"fsr\"\n \"\"\"\n if self.mx < mpi + self.ml:\n if hasattr(photon_energies, \"__len__\"):\n return np.zeros_like(photon_energies)\n else:\n return 0.0\n\n if spectrum_type == \"all\":\n return self.dnde_pi_l(photon_energies, \"fsr\") + self.dnde_pi_l(\n photon_energies, \"decay\"\n )\n elif spectrum_type == \"fsr\":\n return self.dnde_pi_l_fsr(photon_energies)\n elif spectrum_type == \"decay\":\n epi = (self.mx ** 2 + mpi ** 2 - self.ml ** 2) / (2.0 * self.mx)\n el = (self.mx ** 2 - mpi ** 2 + self.ml ** 2) / (2.0 * self.mx)\n if self.lepton == \"e\":\n return decay_pi(photon_energies, epi)\n elif self.lepton == \"mu\":\n return decay_pi(photon_energies, epi) + decay_mu(photon_energies, el)\n else:\n raise ValueError(\n \"Type {} is invalid. Use 'all', 'fsr' or 'decay'\".format(spectrum_type)\n )\n\n\ndef dnde_k_l(self, photon_energies, spectrum_type=\"all\"):\n \"\"\"\n Compute the gamma-ray spectrum from the decay of a right-handed\n neutrino into a charged kaon and lepton.\n\n Parameters\n ----------\n photon_energies: float or np.array\n Photon energies where the spectrum should be computed.\n spectrum_type: str, optional\n String specifying which spectrum component should be computed.\n Options are: \"all\", \"decay\" or \"fsr\"\n \"\"\"\n if self.mx < mk + self.ml:\n if hasattr(photon_energies, \"__len__\"):\n return np.zeros_like(photon_energies)\n else:\n return 0.0\n\n if spectrum_type == \"all\":\n return self.dnde_k_l(photon_energies, \"fsr\") + self.dnde_k_l(\n photon_energies, \"decay\"\n )\n elif spectrum_type == \"fsr\":\n return self.dnde_k_l_fsr(photon_energies)\n elif spectrum_type == \"decay\":\n ek = (self.mx ** 2 + mk ** 2 - self.ml ** 2) / (2.0 * self.mx)\n el = (self.mx ** 2 - mk ** 2 + self.ml ** 2) / (2.0 * self.mx)\n if self.lepton == \"e\":\n return decay_k(photon_energies, ek)\n elif self.lepton == \"mu\":\n return decay_k(photon_energies, ek) + decay_mu(photon_energies, el)\n else:\n raise ValueError(\n \"Type {} is invalid. Use 'all', 'fsr' or 'decay'\".format(spectrum_type)\n )\n\n\ndef __lnorm_sqr(p):\n return p[0] ** 2 - p[1] ** 2 - p[2] ** 2 - p[3] ** 2\n\n\ndef __msqrd_nu_l_l(momenta, mx, tmix, ml):\n s = __lnorm_sqr(momenta[0] + momenta[2])\n t = __lnorm_sqr(momenta[1] + momenta[2])\n return -(\n (\n GF ** 2\n * (\n 2\n * ml ** 4\n * (\n 1\n + 4 * cw ** 4\n - 4 * sw ** 2\n + 8 * sw ** 4\n + cw ** 2 * (-4 + 8 * sw ** 2)\n )\n - 2\n * ml ** 2\n * (\n -((1 - 2 * cw ** 2) ** 2 * mx ** 2)\n + (1 - 2 * cw ** 2) ** 2 * s\n + 2\n * (\n 1\n + 4 * cw ** 4\n - 4 * sw ** 2\n + 8 * sw ** 4\n + cw ** 2 * (-4 + 8 * sw ** 2)\n )\n * t\n )\n + (\n 1\n + 4 * cw ** 4\n - 4 * sw ** 2\n + 8 * sw ** 4\n + cw ** 2 * (-4 + 8 * sw ** 2)\n )\n * (s ** 2 + 2 * s * t + 2 * t ** 2 - mx ** 2 * (s + 2 * t))\n )\n * np.sin(2 * tmix) ** 2\n )\n / cw ** 4\n )\n\n\ndef __msqrd_nu_lp_lp(momenta, mx, tmix, ml):\n s = __lnorm_sqr(momenta[0] + momenta[2])\n t = __lnorm_sqr(momenta[1] + momenta[2])\n return -(\n (\n GF ** 2\n * (\n 2 * ml ** 4 * (1 - 4 * sw ** 2 + 8 * sw ** 4) * np.sin(2 * tmix) ** 2\n + (1 - 4 * sw ** 2 + 8 * sw ** 4)\n * (s ** 2 + 2 * s * t + 2 * t ** 2 - mx ** 2 * (s + 2 * t))\n * np.sin(2 * tmix) ** 2\n - 2\n * ml ** 2\n * (\n -(mx ** 2 * np.sin(2 * tmix) ** 2)\n + (s + 2 * (1 - 4 * sw ** 2 + 8 * sw ** 4) * t)\n * np.sin(2 * tmix) ** 2\n )\n )\n )\n / cw ** 4\n )\n\n\ndef __msqrd_nup_l_lp(momenta, mx, tmix, mli, mlk):\n t = __lnorm_sqr(momenta[1] + momenta[2])\n return (\n -16\n * GF ** 2\n * (-(mli ** 2) + t)\n * (-(mlk ** 2) - mx ** 2 + t)\n * np.sin(tmix) ** 2\n )\n\n\ndef __dnde_nu_l_l_decay(self, photon_energies, j, n, m):\n i = self._gen\n leps = [\"electron\", \"muon\"]\n\n if self.include_3body and (n == 2 or m == 2):\n fs = [\"neutrino\", leps[n - 1], leps[m - 1]]\n if i == j == n == m:\n\n def msqrd_nu_l_l(momenta):\n return __msqrd_nu_l_l(momenta, self.mx, self.theta, self.ml)\n\n return gamma_ray_decay(fs, self.mx, photon_energies, msqrd_nu_l_l)\n if (i == j) and (n == m):\n\n def msqrd_nu_lp_lp(momenta):\n return __msqrd_nu_lp_lp(\n momenta, self.mx, self.theta, lepton_masses[n - 1]\n )\n\n return gamma_ray_decay(fs, self.mx, photon_energies, msqrd_nu_lp_lp)\n if ((i == n) and (j == m)) or ((i == m) and (j == n)):\n\n def msqrd_nup_l_lp(momenta):\n return __msqrd_nup_l_lp(\n momenta,\n self.mx,\n self.theta,\n lepton_masses[n - 1],\n lepton_masses[m - 1],\n )\n\n return gamma_ray_decay(fs, self.mx, photon_energies, msqrd_nup_l_lp)\n else:\n return np.zeros_like(photon_energies)\n\n\ndef dnde_nu_l_l(self, photon_energies, j, n, m, spectrum_type=\"all\"):\n \"\"\"\n Compute the gamma-ray spectrum from the decay of a right-handed\n neutrino into an active neutrino and two charged leptons.\n\n Parameters\n ----------\n photon_energies: float or np.array\n Photon energies where the spectrum should be computed.\n spectrum_type: str, optional\n String specifying which spectrum component should be computed.\n Options are: \"all\", \"decay\" or \"fsr\"\n \"\"\"\n accessable = self.mx > lepton_masses[n - 1] + lepton_masses[m - 1]\n\n if not accessable:\n if hasattr(photon_energies, \"__len__\"):\n return np.zeros_like(photon_energies)\n return 0.0\n\n if spectrum_type == \"all\":\n return self.dnde_nu_l_l(photon_energies, j, n, m, \"fsr\") + self.dnde_nu_l_l(\n photon_energies, j, n, m, \"decay\"\n )\n elif spectrum_type == \"fsr\":\n if self.include_3body:\n return self.dnde_nu_l_l_fsr(photon_energies, j, n, m)\n else:\n return np.zeros_like(photon_energies)\n elif spectrum_type == \"decay\":\n return __dnde_nu_l_l_decay(self, photon_energies, j, n, m)\n\n else:\n raise ValueError(\n \"Type {} is invalid. Use 'all', 'fsr' or 'decay'\".format(spectrum_type)\n )\n\n\ndef __msqrd_l_pi_pi0(momenta, mx, tmix, ml):\n s = __lnorm_sqr(momenta[0] + momenta[2])\n t = __lnorm_sqr(momenta[1] + momenta[2])\n return (\n 2\n * GF ** 2\n * Vud ** 2\n * (\n ml ** 4\n + mx ** 4\n + 4 * mpi ** 2 * (mpi0 ** 2 - t)\n + 4 * t * (-(mpi0 ** 2) + s + t)\n - mx ** 2 * (s + 4 * t)\n - ml ** 2 * (-2 * mx ** 2 + s + 4 * t)\n )\n * np.sin(tmix) ** 2\n )\n\n\ndef dnde_l_pi_pi0(self, photon_energies, spectrum_type=\"all\"):\n \"\"\"\n Compute the gamma-ray spectrum from the decay of a right-handed\n neutrino into a charged pion, neutral pion and charged lepton.\n\n Parameters\n ----------\n photon_energies: float or np.array\n Photon energies where the spectrum should be computed.\n spectrum_type: str, optional\n String specifying which spectrum component should be computed.\n Options are: \"all\", \"decay\" or \"fsr\"\n \"\"\"\n if self.mx < self.ml + mpi + mpi0:\n if hasattr(photon_energies, \"__len__\"):\n return np.zeros_like(photon_energies)\n else:\n return 0.0\n\n if spectrum_type == \"all\":\n return self.dnde_l_pi_pi0(photon_energies, \"fsr\") + self.dnde_l_pi_pi0(\n photon_energies, \"decay\"\n )\n elif spectrum_type == \"fsr\":\n if self.include_3body:\n return self.dnde_l_pi_pi0_fsr(photon_energies)\n else:\n return np.zeros_like(photon_energies)\n elif spectrum_type == \"decay\":\n if self.include_3body:\n if self.lepton == \"e\":\n lepton = \"electron\"\n else:\n lepton = \"muon\"\n\n def msqrd(momenta):\n return __msqrd_l_pi_pi0(momenta, self.mx, self.theta, self.ml)\n\n return gamma_ray_decay(\n [lepton, \"charged_pion\", \"neutral_pion\"],\n self.mx,\n photon_energies,\n msqrd,\n )\n else:\n return np.zeros_like(photon_energies)\n\n else:\n raise ValueError(\n \"Type {} is invalid. Use 'all', 'fsr' or 'decay'\".format(spectrum_type)\n )\n\n\ndef __msqrd_nu_pi_pi(momenta, mx, tmix):\n s = __lnorm_sqr(momenta[0] + momenta[2])\n t = __lnorm_sqr(momenta[1] + momenta[2])\n return (\n GF ** 2\n * (1 - 2 * sw ** 2) ** 2\n * (\n 4 * mpi ** 4\n + mx ** 4\n - 8 * mpi ** 2 * t\n + 4 * t * (s + t)\n - mx ** 2 * (s + 4 * t)\n )\n * np.sin(2 * tmix) ** 2\n ) / (2.0 * cw ** 4)\n\n\ndef dnde_nu_pi_pi(self, photon_energies, spectrum_type=\"all\"):\n \"\"\"\n Compute the gamma-ray spectrum from the decay of a right-handed\n neutrino into an active neutrino and two charged pions.\n\n Parameters\n ----------\n photon_energies: float or np.array\n Photon energies where the spectrum should be computed.\n spectrum_type: str, optional\n String specifying which spectrum component should be computed.\n Options are: \"all\", \"decay\" or \"fsr\"\n \"\"\"\n if self.mx < 2.0 * mpi:\n if hasattr(photon_energies, \"__len__\"):\n return np.zeros_like(photon_energies)\n else:\n return 0.0\n\n if spectrum_type == \"all\":\n return self.dnde_nu_pi_pi(photon_energies, \"fsr\") + self.dnde_nu_pi_pi(\n photon_energies, \"decay\"\n )\n elif spectrum_type == \"fsr\":\n if self.include_3body:\n return self.dnde_nu_pi_pi_fsr(photon_energies)\n else:\n return np.zeros_like(photon_energies)\n elif spectrum_type == \"decay\":\n if self.include_3body:\n\n def msqrd(momenta):\n return __msqrd_nu_pi_pi(momenta, self.mx, self.theta)\n\n return gamma_ray_decay(\n [\"neutrino\", \"charged_pion\", \"charged_pion\"],\n self.mx,\n photon_energies,\n msqrd,\n )\n else:\n return np.zeros_like(photon_energies)\n\n else:\n raise ValueError(\n \"Type {} is invalid. Use 'all', 'fsr' or 'decay'\".format(spectrum_type)\n )\n\n\ndef dnde_nu_g_g(self, photon_energies, spectrum_type=\"all\"):\n \"\"\"\n Compute the gamma-ray spectrum from the decay of a right-handed\n neutrino into an active neutrino and two photons through an off-shell\n neutral pion. This is only included if the RH neutrino mass is less than\n the neutral pion mass.\n\n Parameters\n ----------\n photon_energies: float or np.array\n Photon energies where the spectrum should be computed.\n spectrum_type: str, optional\n String specifying which spectrum component should be computed.\n Options are: \"all\", \"decay\" or \"fsr\"\n \"\"\"\n mx = self.mx\n if mx > mpi0:\n return np.zeros_like(photon_energies)\n\n spec = (\n 32\n * photon_energies ** 3\n * (\n 6 * photon_energies * mx * (-8 * photon_energies + 5 * mx)\n + 5 * (-3 * photon_energies + 2 * mx) * mpi0 ** 2\n )\n ) / (mx ** 5 * (mx ** 2 + mpi0 ** 2))\n\n if spectrum_type == \"all\":\n return spec\n elif spectrum_type == \"fsr\":\n return spec\n elif spectrum_type == \"decay\":\n return np.zeros_like(photon_energies)\n\n else:\n raise ValueError(\n \"Type {} is invalid. Use 'all', 'fsr' or 'decay'\".format(spectrum_type)\n )\n"
] | [
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf"
],
[
"numpy.array",
"numpy.zeros_like",
"numpy.sin"
]
] |
mindriot101/gaia-webgl | [
"da86e89e5a9d591009c9a96b00ed9f77a0595e92"
] | [
"convert_to_cartesian.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport glob\nimport os\nimport fitsio\nimport numpy as np\n\n\ndef convert_to_cartesian(ra, dec, dist_parsec):\n r = dist_parsec # np.log10(dist_parsec)\n theta = np.radians(ra)\n phi = np.radians(dec)\n\n x = r * np.cos(theta) * np.cos(phi)\n y = r * np.sin(theta) * np.cos(phi)\n z = r * np.sin(phi)\n return x, y, z\n\n\ndef main():\n files = glob.iglob('gaia_data/Gaia/tgas_source/fits/*.fits')\n for filename in files:\n output_filename = os.path.splitext(\n os.path.basename(filename))[0] + '_cartesian.fits'\n if os.path.isfile(output_filename):\n continue\n\n print(filename, output_filename)\n with fitsio.FITS(filename) as infile:\n cat = infile[1]\n ra = cat['ra'].read()\n dec = cat['dec'].read()\n parallax = cat['parallax'].read()\n gmag = cat['phot_g_mean_mag'].read()\n\n dist_parsec = 1. / parallax\n ind = ((dist_parsec > 0) &\n (gmag > 0))\n assert ind.any()\n\n x, y, z = convert_to_cartesian(ra, dec, dist_parsec)\n\n with fitsio.FITS(output_filename, 'rw', clobber=True) as outfile:\n outfile.write({\n 'x': x,\n 'y': y,\n 'z': z,\n 'gmag': gmag,\n })\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.radians",
"numpy.sin",
"numpy.cos"
]
] |
sofiadutta/deep-learning-v2-pytorch | [
"34fec1c48dab31d74b3f73df4aa7cf161268b0dc"
] | [
"project-bikesharing/my_answers.py"
] | [
"import numpy as np\n\n\nclass NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,\n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,\n (self.hidden_nodes, self.output_nodes))\n self.lr = learning_rate\n\n #### TODO: Set self.activation_function to your implemented sigmoid function ####\n #\n # Note: in Python, you can define a function with a lambda expression,\n # as shown below.\n self.activation_function = lambda x : 1.0/(1.0+np.exp(-x)) # Replace 0 with your sigmoid calculation.\n\n ### If the lambda code above is not something you're familiar with,\n # You can uncomment out the following three lines and put your\n # implementation there instead.\n #\n #def sigmoid(x):\n # return 0 # Replace 0 with your sigmoid calculation here\n #self.activation_function = sigmoid\n\n\n def train(self, features, targets):\n ''' Train the network on batch of features and targets.\n\n Arguments\n ---------\n\n features: 2D array, each row is one data record, each column is a feature\n targets: 1D array of target values\n\n '''\n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n\n final_outputs, hidden_outputs = self.forward_pass_train(X) # Implement the forward pass function below\n # Implement the backproagation function below\n delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y,\n delta_weights_i_h, delta_weights_h_o)\n self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)\n\n\n def forward_pass_train(self, X):\n ''' Implement forward pass here\n\n Arguments\n ---------\n X: features batch\n\n '''\n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n\n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer\n\n return final_outputs, hidden_outputs\n\n def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):\n ''' Implement backpropagation\n\n Arguments\n ---------\n final_outputs: output from forward pass\n y: target (i.e. label) batch\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n\n '''\n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # TODO: Output error - Replace this value with your calculations.\n error = y - final_outputs # Output layer error is the difference between desired target and actual output.\n\n # TODO: Calculate the hidden layer's contribution to the error\n hidden_error = np.dot(self.weights_hidden_to_output, error)\n\n\n # TODO: Backpropagated error terms - Replace these values with your calculations.\n output_error_term = error\n\n hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)\n\n\n # Weight step (input to hidden)\n delta_weights_i_h += hidden_error_term * X[:, None]\n # Weight step (hidden to output)\n delta_weights_h_o += output_error_term * hidden_outputs[:,None]\n return delta_weights_i_h, delta_weights_h_o\n\n def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):\n ''' Update weights on gradient descent step\n\n Arguments\n ---------\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n n_records: number of records\n\n '''\n self.weights_hidden_to_output += self.lr * delta_weights_h_o/n_records # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += self.lr * delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step\n\n def run(self, features):\n ''' Run a forward pass through the network with input features\n\n Arguments\n ---------\n features: 1D array of feature values\n '''\n\n #### Implement the forward pass here ####\n # TODO: Hidden layer - replace these values with the appropriate calculations.\n hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n\n # TODO: Output layer - Replace these values with the appropriate calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer\n\n return final_outputs\n\n\n#########################################################\n# Set your hyperparameters here\n##########################################################\niterations = 6000\nlearning_rate = 0.54\nhidden_nodes = 32\noutput_nodes = 1\n"
] | [
[
"numpy.random.normal",
"numpy.dot",
"numpy.exp",
"numpy.zeros"
]
] |
mgraupe/SPySort | [
"da0f1710bd7dfe0881e1ded18052e868f151aec9"
] | [
"examples/example_nonintract.py"
] | [
"import numpy as np\nimport matplotlib.pylab as plt\n# from guppy import hpy\n\nfrom SPySort.ReadData import data\n# from Events import spikes\n# from Events import events\n# from Events import clusters\n# from Events import alignment\n\nif __name__ == '__main__':\n # Define the raw data location\n folder = '/Users/gdetorak/Documents/SUPELEC/Software/Data/Locust/'\n IFiles = [folder+'Locust_1.dat', folder+'Locust_2.dat',\n folder+'Locust_3.dat', folder+'Locust_4.dat']\n\n # Data analysis parameters\n freq = 1.5e4 # Sampling frequency\n freq = 1.0\n win = np.array([1., 1., 1., 1., 1.])/5. # Boxcar filter\n\n # Test of rawData class\n r_data = data.read_data(IFiles, freq) # Read raw data\n # r_data.summary() # Prints a data summary\n\n # res = r_data.select_channels([1, 2], 0, r_data.data_len) # Selects chan\n\n r_data.timeseries[0] = r_data.renormalization() # Raw data normalization\n\n r_data.plot_data(r_data.data[0:300]) # Plot normalized data\n # -------------------------------------------------------\n\n # Test of spike_detection class\n # s = spikes.spike_detection(r_data.timeseries)\n # filtered = s.filtering(4.0, win) # Filter the normalized data\n # sp0 = s.peaks(filtered, kind='aggregate') # Peaks detection\n\n # Define the proper size of events positions- this must be done by the user\n # positions = sp0[sp0 <= r_data.data_len/2.]\n\n # s.plot_filtered_data(s.data, filtered, 4.) # Plot the data\n # s.plot_peaks(s.data, sp0)\n # -------------------------------------------------------\n\n # Test of events class\n # evts = events.build_events(r_data.data, positions, win, before=14,\n # after=30)\n # evtsE = evts.mk_events() # Make spike events\n # noise = evts.mk_noise() # Make noise events\n\n # evts.plot_mad_median(evtsE) # Plot mad and median of the events\n # evts.plot_events(evtsE) # Plot events\n # -------------------------------------------------------\n\n # Test PCA and KMeans clustering\n # CSize = 10\n # c = clusters.pca_clustering(r_data.timeseries[0], positions, win, thr=8,\n # before=14, after=30)\n\n # print c.pca_variance(10) # Print the variance of the PCs\n\n # c.plot_mean_pca() # Plot the mean +- PC\n # c.plot_pca_projections() # Plot the projections of the PCs on the data\n\n # kmeans_clusters = c.KMeans(CSize) # K-means clustering\n\n # gmm_clusters = c.GMM(10, 'diag') # GMM clustering\n\n # tree = clusters.bagged_clustering(10, 100, 30) # Bagged clustering\n\n # c.plot_clusters(gmm_clusters) # Plot the clusters\n # -------------------------------------------------------\n\n # Test alignement of the spike events\n # goodEvts = c.goodEvts\n # align = alignment.align_events(r_data.timeseries[0], positions, goodEvts,\n # kmeans_clusters, CSize, win)\n\n # evtsE_noj = [align.mk_aligned_events(align.gcpos[i])\n # for i in range(CSize)]\n\n # centers = {\"Cluster \" + str(i): align.mk_center_dictionary(evtsE_noj[i][1])\n # for i in range(CSize)}\n\n # round0 = [align.classify_and_align_evt(align.positions[i], centers)\n # for i in range(len(align.positions))]\n\n # print len([x[1] for x in round0 if x[0] == '?'])\n plt.show()\n"
] | [
[
"numpy.array",
"matplotlib.pylab.show"
]
] |
rileynwong/soundscape-instrument-design | [
"8d7eabe85e07a86107a89c907f229867479ab6e2"
] | [
"main.py"
] | [
"import numpy as np\nimport cv2\nimport pygame.mixer\n\n\n### Set up sounds\npygame.init()\npygame.mixer.init()\n\n# Melody\nred_sound = pygame.mixer.Sound('loops/red_melody.wav')\nred_sound.set_volume(0.0)\nred_sound.play()\n\n# Chords\ngreen_sound = pygame.mixer.Sound('loops/green_chords.wav')\ngreen_sound.set_volume(0.0)\ngreen_sound.play()\n\n# Birds\nblue_sound = pygame.mixer.Sound('loops/blue_bird.wav')\nblue_sound.set_volume(0.0)\nblue_sound.play()\n\n\n### Capturing video through webcam\nwebcam = cv2.VideoCapture(0)\n\n### Main application loop\nwhile(1):\n has_red = False\n has_green = False\n has_blue = False\n\n # Reading the video from the\n # webcam in image frames\n _, imageFrame = webcam.read()\n\n # Convert the imageFrame in\n # BGR(RGB color space) to\n # HSV(hue-saturation-value)\n # color space\n hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV)\n\n # Set range for red color and\n # define mask\n red_lower = np.array([136, 87, 111], np.uint8)\n red_upper = np.array([180, 255, 255], np.uint8)\n red_mask = cv2.inRange(hsvFrame, red_lower, red_upper)\n\n # Set range for green color and\n # define mask\n green_lower = np.array([25, 52, 72], np.uint8)\n green_upper = np.array([102, 255, 255], np.uint8)\n green_mask = cv2.inRange(hsvFrame, green_lower, green_upper)\n\n # Set range for blue color and\n # define mask\n blue_lower = np.array([94, 80, 2], np.uint8)\n blue_upper = np.array([120, 255, 255], np.uint8)\n blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper)\n\n # Morphological Transform, Dilation\n # for each color and bitwise_and operator\n # between imageFrame and mask determines\n # to detect only that particular color\n kernal = np.ones((5, 5), \"uint8\")\n\n # For red color\n red_mask = cv2.dilate(red_mask, kernal)\n res_red = cv2.bitwise_and(imageFrame, imageFrame,\n mask = red_mask)\n\n # For green color\n green_mask = cv2.dilate(green_mask, kernal)\n res_green = cv2.bitwise_and(imageFrame, imageFrame,\n mask = green_mask)\n\n # For blue color\n blue_mask = cv2.dilate(blue_mask, kernal)\n res_blue = cv2.bitwise_and(imageFrame, imageFrame,\n mask = blue_mask)\n\n # Creating contour to track red color\n contours, hierarchy = cv2.findContours(red_mask,\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n for pic, contour in enumerate(contours):\n area = cv2.contourArea(contour)\n if(area > 300):\n x, y, w, h = cv2.boundingRect(contour)\n imageFrame = cv2.rectangle(imageFrame, (x, y),\n (x + w, y + h),\n (0, 0, 255), 2)\n\n cv2.putText(imageFrame, \"Red Colour\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1.0,\n (0, 0, 255))\n has_red = True\n\n\n # Creating contour to track green color\n contours, hierarchy = cv2.findContours(green_mask,\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n for pic, contour in enumerate(contours):\n area = cv2.contourArea(contour)\n if(area > 300):\n x, y, w, h = cv2.boundingRect(contour)\n imageFrame = cv2.rectangle(imageFrame, (x, y),\n (x + w, y + h),\n (0, 255, 0), 2)\n\n cv2.putText(imageFrame, \"Green Colour\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1.0, (0, 255, 0))\n has_green = True\n\n\n # Creating contour to track blue color\n contours, hierarchy = cv2.findContours(blue_mask,\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n for pic, contour in enumerate(contours):\n area = cv2.contourArea(contour)\n if(area > 300):\n x, y, w, h = cv2.boundingRect(contour)\n imageFrame = cv2.rectangle(imageFrame, (x, y),\n (x + w, y + h),\n (255, 0, 0), 2)\n\n cv2.putText(imageFrame, \"Blue Colour\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1.0, (255, 0, 0))\n has_blue = True\n\n # Play sounds based on what's in the shot\n if has_red:\n # Play red sounds\n red_sound.set_volume(1)\n else:\n # No red? Turn off sound\n red_sound.set_volume(0.0)\n\n if has_green:\n # Play green sounds\n green_sound.set_volume(1)\n else:\n # No green? Turn off sound\n green_sound.set_volume(0.0)\n\n if has_blue:\n # Play blue sounds\n blue_sound.set_volume(1)\n else:\n # No blue? Turn off sound\n blue_sound.set_volume(0.0)\n\n # Program Termination\n cv2.imshow(\"Multiple Color Detection in Real-TIme\", imageFrame)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n"
] | [
[
"numpy.array",
"numpy.ones"
]
] |
IzayoiRin/MstarHe2R | [
"938d83acdfa5ec4464cf9113fef104a6e80ad662"
] | [
"mstarhe/core/nn/models.py"
] | [
"import copy as cp\nimport time\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport torch as th\nfrom tqdm import tqdm\n\nfrom mstarhe.conf import LazySettings\nfrom mstarhe.errors import ConfigureError, ObjectTypeError, AnalysisRuntimeError, ParametersError\n\n\nsettings = LazySettings()\nTORCH_PATH_ = settings.PERSISTENCE_DAT_DIR\n\n\nclass GenericFeedForwardNet(object):\n\n # config torch data loader\n data_loader_class = None\n # config torch computational graph\n model_graph_class = None\n # config torch optimizer\n optimizer_class = None\n # config loss function\n loss_func_class = None\n\n # the device that model is running on\n device = th.device('cuda:0' if th.cuda.is_available() else 'cpu')\n\n # hyper-parameters\n lr = 1e-3 # learning rate\n l1_lambda = 0.5 # l1-penalty coef\n l2_lambda = 0.01 # l2-penalty coef\n\n # persistence config\n ROOT_PATH = TORCH_PATH_\n ROOT_DIR = 'data'\n PKL_DIR = \"pickles\"\n CSV_DIR = 'texts'\n TAR_DIR = 'tar'\n CHECK_POINT = 'cp%s.tar'\n\n def __init__(self, ifea, ofea, **kwargs):\n \"\"\"\n :param kwargs: {\n reg: l-norm penalty, [L1, L2]\n dropout: dropout, [bool]\n batch_nor: batch_normalize, [bool]\n }\n \"\"\"\n # init object running kwargs\n self.kwargs = kwargs\n\n # init regulation params\n if kwargs.get('reg'):\n self.penalty_mapping = {\n 'L1': lambda v: self.l1_lambda * th.abs(v).sum(),\n 'L2': lambda v: self.l2_lambda * th.sqrt(th.pow(v, 2).sum())\n }\n\n # init graph and optimizer\n self.graph = self.get_graph(ifea, ofea)\n self.optimizer = self.get_optimizer()\n\n self._loss = list()\n self.last_epoch_ = 0\n\n # init persistence path config\n self.path = os.path.join(self.ROOT_PATH, self.ROOT_DIR)\n self.pkl_path = os.path.join(self.path, self.PKL_DIR)\n self.csv_path = os.path.join(self.path, self.CSV_DIR)\n self.tar_path = os.path.join(self.path, self.TAR_DIR)\n self._init_dir(self.pkl_path, self.csv_path, self.tar_path)\n\n @staticmethod\n def _init_dir(*path):\n for p in path:\n if not os.path.exists(p):\n os.makedirs(p)\n\n def get_data_loader(self, train):\n return self.data_loader_class(train=train)\n\n def get_graph(self, ifea, ofea):\n model_graph = self.model_graph_class(dropout=self.kwargs.get('dropout', False),\n batch_nor=self.kwargs.get('batch_nor', False))\\\n .assemble(ifea, ofea)\n return model_graph.to(self.device) if self.device else model_graph\n\n def get_optimizer(self):\n if hasattr(self.graph, 'parameters'):\n return self.optimizer_class(self.graph.parameters(), lr=self.lr)\n raise ConfigureError('Config a legal model graph first')\n\n def get_loss(self, x, y, **kwargs):\n \"\"\"\n reg: l1 or l2 penalty regularization\n :param x: logP\n :param y: label\n :return: J(theta) = cross-entropy + l-norm penalty\n \"\"\"\n loss = self.loss_func_class(**kwargs)(x, y)\n reg = self.kwargs.get('reg')\n if reg and reg in self.penalty_mapping.keys():\n penalty = self.penalty_mapping[self.kwargs['reg']]\n loss += penalty(y.float())\n return loss\n\n def dat2device(self, x, y):\n return [x.to(self.device), y.to(self.device)] if self.device else [x, y]\n\n def ave_loss(self, loss_arr):\n return np.mean(loss_arr)\n\n def accuracy(self, x, y):\n return x.eq(y).float().mean()\n\n def analysis(self, label, ypre, preP):\n raise NotImplementedError\n\n def train(self, iter_n, checkpoint=0, **kwargs):\n \"\"\"\n train model from training data loader in several epoch, each use whole batch to train\n :param iter_n: max epoch\n \"\"\"\n for epoch in range(iter_n):\n self.kwargs['epoch'] = epoch\n tr_dl = self.get_data_loader(train=True)\n self.train_batch(tr_dl)\n if checkpoint and (epoch+1) % checkpoint == 0:\n self.checkpoint()\n\n def train_batch(self, dl):\n \"\"\"\n train model from data loader in whole batch, each use one batch to update parameters\n :param dl: training data loader\n \"\"\"\n self._loss = list()\n self.graph.train()\n for batch, (X, label) in enumerate(dl):\n self.kwargs['batch'] = batch\n X, label = self.dat2device(X, label)\n # initial optimizer\n self.optimizer.zero_grad()\n # forward\n logP = self.graph(X)\n # calculate loss J(theta)\n loss = self.get_loss(logP, label)\n # backward\n loss.backward()\n # update parameters\n self.optimizer.step()\n\n # average loss\n self._loss.append(loss.item())\n # if batch % 100 == 0:\n # print(\"Batch: %s loss: %s\" % (batch, self.ave_loss(loss_arr)))\n self.ave_loss(self._loss)\n\n def eval(self):\n \"\"\"\n eval model from testing data loader in several epoch, each use whole batch\n \"\"\"\n te_dl = self.get_data_loader(train=False)\n with th.no_grad():\n self.eval_batch(te_dl)\n\n def eval_batch(self, dl):\n \"\"\"\n eval model from testing data loader in whole batch, each use one batch to analysis eval coef.\n :param dl: testing data loader\n \"\"\"\n self._loss = list()\n self.graph.eval()\n for batch, (X, label) in enumerate(dl):\n self.kwargs['batch'] = batch\n X, label = self.dat2device(X, label)\n # forward\n logP = self.graph(X)\n # calculate loss J(theta)\n loss = self.get_loss(logP, label)\n\n # average loss\n self._loss.append(loss.item())\n self.ave_loss(self._loss)\n\n # get predict out, predict probability and label\n preP, ypre = logP.max(dim=-1)\n\n try:\n self.analysis(label, ypre, preP)\n except NotImplementedError:\n pass\n except Exception as e:\n raise AnalysisRuntimeError(e)\n\n def save(self, obj, name):\n if not hasattr(obj, 'state_dict'):\n raise ObjectTypeError(\"%s can't save\" % obj)\n path = os.path.join(self.pkl_path, name)\n th.save(obj.state_dict(), path)\n\n def checkpoint(self):\n cp = {\n 'epoch': self.kwargs['epoch'],\n 'model_state_dict': self.graph.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n }\n th.save(cp, os.path.join(self.tar_path, self.CHECK_POINT % cp['epoch']))\n\n def load(self, gpath=None, optpath=None, ckpath=None):\n if gpath:\n self.graph.load_state_dict(th.load(os.path.join(self.pkl_path, gpath)))\n if optpath:\n self.optimizer.load_state_dict(th.load(os.path.join(self.pkl_path, optpath)))\n if ckpath:\n checkpoint = th.load(os.path.join(self.tar_path, ckpath))\n self.graph.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n self.last_epoch_ = checkpoint['epoch']\n\n\nclass EarlyStoppingMixin(object):\n\n step = 10\n alpha = 1e-2\n patient = 3\n s = 0\n\n def __init__(self, ifea, ofea, **kwargs):\n super().__init__(ifea, ofea, **kwargs)\n self.standard = {\n 'GL': lambda x, y: x > self.alpha,\n 'PQ': lambda x, y: x / y > self.alpha,\n }\n self.optim_record = None\n self.coef_ = list()\n\n def get_data_loader(self, train=True):\n \"\"\"\n must be overwritten to split data loader as two part, include training and validating set\n :param train:\n :return:\n \"\"\"\n raise NotImplementedError\n\n def ave_loss(self, loss_arr):\n \"\"\"\n on mod: validating, set ave loss to attr: vl_ave_loss\n :param loss_arr:\n :return: scale\n \"\"\"\n ave_loss = np.mean(loss_arr)\n if getattr(self, 'validate', None):\n setattr(self, 'vl_ave_loss', ave_loss)\n return ave_loss\n\n def _judge_stopping(self, standard, gloss_arr, ploss_arr, pk):\n epoch = self.kwargs['epoch']\n # get ave loss on validating set\n vl_ave_loss = getattr(self, 'vl_ave_loss')\n\n # generic loss array\n gloss_arr.append(vl_ave_loss)\n # cal generic loss: eva_t / optim eva_lt_t - 1\n gl = self.generic_loss(gloss_arr)\n\n # measure progress\n ploss_arr.append(vl_ave_loss)\n # measure progress loss only be calculated at non GL std and in K step\n if (epoch + 1) % self.step == 0 and standard != 'GL':\n # cal measure progress: sigma(eva_k) / (k * optim eva_between_t-k+1_k) - 1\n pk = self.measure_progress(ploss_arr)\n # zero record array\n ploss_arr = list()\n\n print('*****ValEpoch: %s ave_loss: %s gl: %s, pk: %s last-patient: %s / %s *****'\n % (epoch, vl_ave_loss, gl, pk, self.s, self.patient))\n self.coef_.append([gl, pk if pk else np.nan])\n return gl, pk, ploss_arr\n\n def stop(self, std, gl, pk, nojud=True):\n \"\"\"\n early stopping standards, GL, PQ, within max patient\n :param nojud: Don't judge PQ\n :param std: GL, PQ\n :param gl: generic loss\n :param pk: measure progress\n :return: bool\n \"\"\"\n if std == 'PQ' and nojud:\n return False\n func = self.standard[std]\n self.s += int(func(gl, pk))\n return self.s >= self.patient\n\n def train(self, iter_n, standard, checkpoint=0):\n \"\"\"\n train model from training data loader in several epoch, each use whole batch to train\n :param iter_n: max epoch\n :param standard: 'GL', 'PQ'\n :return:\n \"\"\"\n if standard not in self.standard.keys():\n raise ParametersError('standard only choose from GL / PQ / UP')\n # generic loss array\n gloss_arr = list()\n # progress loss array\n ploss_arr = list()\n # measure progress array\n pk = None\n\n # get split data loader\n tr_dl, va_dl = self.get_data_loader(train=True)\n\n # training epoch, max iteration N\n for epoch in range(iter_n):\n self.kwargs['epoch'] = epoch\n # use the whole batch training set to train model\n self.train_batch(tr_dl)\n # turn on validate mod\n setattr(self, 'validate', True)\n with th.no_grad():\n # use the whole batch validating set to train model\n # function inner call self.ave_loss\n self.eval_batch(va_dl)\n # turn off validate mod\n setattr(self, 'validate', False)\n\n # analysis stopping eval coef\n gl, pk, ploss_arr = self._judge_stopping(standard, gloss_arr, ploss_arr, pk)\n\n # should be stop\n if self.stop(standard, gl, pk, nojud=bool(ploss_arr)):\n print('Stop at %sth Iteration' % (epoch-1))\n break\n\n self.optim_record = (cp.deepcopy(self.graph), cp.deepcopy(self.optimizer))\n\n if checkpoint and (epoch + 1) % checkpoint == 0:\n self.checkpoint()\n\n # haven't get the optim stop point before max iteration\n else:\n print('Stop at Max Iteration')\n\n self.model_persistence()\n\n @staticmethod\n def generic_loss(eva_arr):\n \"\"\"calculate generic loss\"\"\"\n eopt = min(eva_arr)\n return 100 * (eva_arr[-1] / eopt - 1)\n\n @staticmethod\n def measure_progress(eva_arr):\n \"\"\"calculate measure progress\"\"\"\n eopt = min(eva_arr)\n return 1000 * (np.mean(eva_arr) / eopt - 1)\n\n def model_persistence(self):\n if self.optim_record is None:\n return\n name = self.model_graph_class.__name__\n names = 'mod%s.pkl' % name, 'opt%s.pkl' % name\n for obj, nm in zip(self.optim_record, names):\n self.save(obj, nm)\n path = os.path.join(self.csv_path, 'EpochESCoef%s.txt' % self.model_graph_class.__name__)\n pd.DataFrame(self.coef_, columns=['gl', 'mp']).to_csv(path, sep='\\t', header=True, index=True, na_rep=-127)\n\n\nclass ESFeedForwardNet(EarlyStoppingMixin, GenericFeedForwardNet):\n\n pass\n\n\nclass Pretty:\n\n _bar = None\n _step = 40\n\n @classmethod\n def bar(cls, func):\n def inner(ins, dl):\n cls._bar = tqdm(dl)\n return func(ins, cls._bar)\n return inner\n\n @classmethod\n def desc(cls, func):\n name = func.__name__\n\n def inner(ins, arr):\n ret = func(ins, arr)\n if ins.kwargs['batch'] % cls._step == 0:\n s = \"TrMod[{mod}] Epoch:{e} Batch:{b} {stat}:{r}\".format(mod=ins.graph.training,\n e=ins.kwargs.get('epoch', 0),\n b=ins.kwargs.get('batch', 0),\n stat=name,\n r=round(ret, 6))\n cls._bar.set_description(s)\n return ret\n return inner\n\n\nclass PrettyFeedForward(ESFeedForwardNet):\n\n @Pretty.desc\n def ave_loss(self, loss_arr):\n return super().ave_loss(loss_arr)\n\n @Pretty.bar\n def train_batch(self, dl):\n \"\"\"\n train model from data loader in whole batch, each use one batch to update parameters\n :param dl: training data loader\n \"\"\"\n super().train_batch(dl)\n\n @Pretty.bar\n def eval_batch(self, dl):\n super().eval_batch(dl)\n time.sleep(0.1)\n"
] | [
[
"pandas.DataFrame",
"torch.no_grad",
"numpy.mean",
"torch.abs",
"torch.cuda.is_available",
"torch.pow"
]
] |
fsschneider/cockpit-experiments | [
"a9eaf3dc5da5a58356ac0eef25a11235bf0891c4"
] | [
"experiments/01_benchmark/plot_grid.py"
] | [
"\"\"\"Benchmark Heatmap Plot of the Overhead of Cockpit Configurations.\"\"\"\n\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport run_grid\nimport seaborn as sns\nfrom benchmark_utils import _fix_dev_naming, _fix_tp_naming, read_data\n\nsys.path.append(os.getcwd())\nfrom experiments.utils.plotting import _get_plot_size, _set_plotting_params # noqa\n\nHERE = os.path.abspath(__file__)\nHEREDIR = os.path.dirname(HERE)\nSAVEDIR = os.path.join(HEREDIR, \"output/fig_grid/\")\nos.makedirs(SAVEDIR, exist_ok=True)\n\nPLOT_FRACTION = 0.35\nPLOT_FRACTION_OTHER = 0.6\nAPP_PLOT_FRACTION = 0.49\nPLOT_HEIGHT = 0.4\nPLOT_HEIGHT_APP = 0.75\nPLOT_HEIGHT_IMAGENET = 0.5\nPLOT_FRACTION_IMAGENET = 0.4\n\n\ndef plot_data(\n ax,\n df,\n cbar=True,\n hide_y_axis=False,\n ext_title=False,\n show_title=False,\n reduced_config=False,\n):\n \"\"\"Create a heatmap plot from the benchmarking data.\n\n The heatmap plot shows the relative run time compared to an empty cockpit for\n different cockpit configurations (e.g. 'economy', 'business' and 'full') and\n different tracking rates. The run time is averaged over multiple seeds.\n\n Args:\n ax (mpl.axes): Axes for the plot.\n df (pandas.DataFrame): DataFrame holding the benchmark data.\n cbar (book, optional): Whether to show a colorbar. Defaults to True.\n hide_y_axis (bool, optional): Whether to hide labels and ticks on the y-axis.\n Defaults to False.\n ext_title (bool, optional): Whether to show the full title including the\n super title. Defaults to False.\n show_title (bool, optional): Whether to show the figure's title. Defaults to\n False.\n reduced_config (bool, optional): Whether only the `baseline` and `economy`\n configs are tested. Currently this is the case for `ImageNet`.\n Defaults to False.\n \"\"\"\n # Plotting Params #\n cmap = \"vlag\" # , \"tab10\" \"Set2\"\n cockpit_configs = (\n [\"baseline\", \"economy\"]\n if reduced_config\n else [\"baseline\", \"economy\", \"business\", \"full\"]\n )\n annot = True\n vmin, vmax = 1, 3\n\n # Verify that the data is from a single test problem and use it as a title\n testproblem_set = df.testproblem.unique()\n assert len(testproblem_set) == 1\n tp_name = _fix_tp_naming(str(testproblem_set[0]))\n\n device_set = df.device.unique()\n assert len(device_set) == 1\n dev_name = _fix_dev_naming(str(device_set[0]))\n\n # Only keep cockpit configurations\n df = df.loc[df[\"quantities\"].isin(cockpit_configs)]\n\n # reshape and average\n df = (\n df.groupby([\"quantities\", \"track_interval\"])\n .mean()\n .unstack(level=0)[\"time_per_step\"]\n ).T\n\n # Sort index by cockpit_configs list\n df = df.reindex(cockpit_configs)\n\n # relative overhead\n ref_value = df.loc[\"baseline\", 1]\n df = df.divide(ref_value)\n\n hm = sns.heatmap(\n df,\n cmap=cmap,\n ax=ax,\n annot=annot,\n vmin=vmin,\n vmax=vmax,\n lw=0.2,\n center=2,\n cbar=cbar,\n annot_kws={\"fontsize\": 5},\n )\n if cbar:\n cbar_ticks = list(range(vmin, vmax + 1))\n hm.collections[0].colorbar.ax.tick_params(size=1, width=0.0)\n hm.collections[0].colorbar.set_ticks(cbar_ticks)\n hm.collections[0].colorbar.set_ticklabels([str(t) for t in cbar_ticks])\n\n hm.set_yticklabels(hm.get_yticklabels(), rotation=0)\n hm.set_xticklabels(hm.get_xticklabels(), rotation=0)\n\n ax.tick_params(\n axis=\"both\",\n which=\"major\",\n labelbottom=False,\n bottom=False,\n top=False,\n labeltop=True,\n pad=-4,\n )\n\n if ext_title:\n add_title = \"Computational Overhead for\"\n else:\n add_title = \"\"\n title = f\"{add_title}\\n{tp_name} ({dev_name})\"\n if show_title:\n ax.set_title(title, fontsize=8)\n ax.set_xlabel(\"Track Interval\")\n ax.xaxis.set_label_position(\"top\")\n ax.set_ylabel(\"Configuration\")\n if hide_y_axis:\n ax.get_yaxis().set_visible(False)\n\n plt.tight_layout()\n\n\ndef plot_individual(\n df,\n tp,\n dev,\n show=True,\n save=True,\n show_title=True,\n appendix=False,\n reduced_config=False,\n):\n \"\"\"Plot the heatmaps of each test problem individually.\n\n Args:\n df (pandas.DataFrame): DataFrame holding the benchmark data.\n tp (str): Name of the testproblem.\n dev (str): Device.\n show (bool, optional): Switch to show the plot. Defaults to True.\n save (bool, optional): Switch to save the plot as pdf. Defaults to True.\n show_title (bool, optional): Switch to show title. Defaults to False.\n appendix (bool, optional): Whether the plot will be used in the appendix.\n Defaults to False.\n reduced_config (bool, optional): Whether only the `baseline` and `economy`\n configs are tested. Currently this is the case for `ImageNet`.\n Defaults to False.\n \"\"\"\n height_ratio = PLOT_HEIGHT * PLOT_FRACTION_OTHER / PLOT_FRACTION\n fraction = PLOT_FRACTION\n if appendix:\n height_ratio = PLOT_HEIGHT_APP\n\n if reduced_config:\n height_ratio = PLOT_HEIGHT_IMAGENET\n fraction = PLOT_FRACTION_IMAGENET\n\n fig, ax = plt.subplots(\n figsize=_get_plot_size(\n textwidth=\"neurips\",\n fraction=fraction,\n height_ratio=height_ratio,\n )\n )\n plot_data(\n ax,\n df,\n show_title=show_title,\n cbar=not appendix,\n reduced_config=reduced_config,\n )\n\n # Save plot\n if save:\n savename = \"benchmark_\" + tp + \"_\" + dev\n savename += \"_app\" if appendix else \"\"\n savename += \".pdf\"\n savepath = os.path.join(SAVEDIR, savename)\n plt.savefig(savepath, bbox_inches=\"tight\")\n if show:\n plt.show()\n\n\ndef plot_all(dev, show=True, save=True):\n \"\"\"Plot the heatmap of three test problems in a single figure.\n\n Args:\n dev (str): Device to plot.\n show (bool, optional): Switch to show the plot. Defaults to True.\n save (bool, optional): Switch to save the plot as pdf. Defaults to True.\n \"\"\"\n fig, axs = plt.subplots(1, 3, figsize=(16, 5))\n\n tps = [\n \"quadratic_deep\",\n \"mnist_mlp\",\n \"cifar10_3c3d\",\n ]\n\n for idx, tp in enumerate(tps):\n df, testproblem_set = read_data(run_grid.get_savefile(tp, dev))\n\n for tp in testproblem_set:\n if idx == 2:\n plot_data(axs[2], df[tp], cbar=True, hide_y_axis=True)\n elif idx == 1:\n plot_data(axs[1], df[tp], cbar=False, hide_y_axis=True, ext_title=True)\n elif idx == 0:\n plot_data(axs[0], df[tp], cbar=False)\n plt.tight_layout()\n\n # Save plot\n if save:\n savepath = os.path.join(SAVEDIR, \"heatmap.pdf\")\n plt.savefig(savepath, bbox_inches=\"tight\")\n if show:\n plt.show()\n\n\nif __name__ == \"__main__\":\n _set_plotting_params()\n\n PLOT_APPENDIX = True\n\n # Main Plot\n MAIN_PROBLEM = (\"cifar10_3c3d\", \"cuda\")\n MAIN_PROBLEM_FILE = run_grid.get_savefile(*MAIN_PROBLEM)\n\n df, testproblems = read_data(MAIN_PROBLEM_FILE)\n plot_individual(df[testproblems[0]], *MAIN_PROBLEM, show=False, show_title=False)\n\n # Appendix Plots\n if PLOT_APPENDIX:\n APPENDIX_RUNS = [\n # GPU\n (\"mnist_logreg\", \"cuda\"),\n (\"mnist_mlp\", \"cuda\"),\n (\"cifar10_3c3d\", \"cuda\"),\n (\"fmnist_2c2d\", \"cuda\"),\n (\"dummyimagenet_resnet50nobn\", \"cuda\"),\n # CPU\n (\"mnist_logreg\", \"cpu\"),\n (\"mnist_mlp\", \"cpu\"),\n (\"cifar10_3c3d\", \"cpu\"),\n (\"fmnist_2c2d\", \"cpu\"),\n ]\n\n for (testproblem, device) in APPENDIX_RUNS:\n\n filepath = run_grid.get_savefile(testproblem, device)\n df, _ = read_data(filepath)\n\n plot_individual(\n df[testproblem],\n testproblem,\n device,\n show=False,\n show_title=False,\n appendix=True,\n reduced_config=True\n if testproblem == \"dummyimagenet_resnet50nobn\"\n else False,\n )\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
]
] |
goncaloperes/Library_Lasagne | [
"5d3c63cb315c50b1cbd27a6bc8664b406f34dd99"
] | [
"lasagne/tests/layers/test_local.py"
] | [
"import numpy as np\nimport pytest\nimport theano\n\nfrom lasagne.utils import floatX\n\n\ndef locally_connected2d(input, W, flip_filters=True):\n \"\"\"\n 2D convolution with unshared weights, no stride, 'same' padding,\n no dilation and no bias\n \"\"\"\n num_batch, input_channels, input_rows, input_cols = input.shape\n assert W.shape[1] == input_channels\n num_filters, input_channels, \\\n filter_rows, filter_cols, output_rows, output_cols = W.shape\n assert filter_rows % 2 == 1\n assert filter_cols % 2 == 1\n output = np.zeros((num_batch, num_filters, output_rows, output_cols))\n for b in range(num_batch):\n for f in range(num_filters):\n for c in range(input_channels):\n for i_out in range(output_rows):\n for j_out in range(output_cols):\n for i_filter in range(filter_rows):\n i_in = i_out + i_filter - (filter_rows // 2)\n if not (0 <= i_in < input_rows):\n continue\n for j_filter in range(filter_cols):\n j_in = j_out + j_filter - (filter_cols // 2)\n if not (0 <= j_in < input_cols):\n continue\n if flip_filters:\n inc = (input[b, c, i_in, j_in] *\n W[f, c, -i_filter-1, -j_filter-1,\n i_out, j_out])\n else:\n inc = (input[b, c, i_in, j_in] *\n W[f, c, i_filter, j_filter,\n i_out, j_out])\n output[b, f, i_out, j_out] += inc\n return output\n\n\ndef channelwise_locally_connected2d(input, W, flip_filters=True):\n \"\"\"\n channelwise 2D convolution with unshared weights, no stride,\n 'same' padding, no dilation and no bias\n \"\"\"\n num_batch, input_channels, input_rows, input_cols = input.shape\n num_filters, filter_rows, filter_cols, output_rows, output_cols = W.shape\n assert input_channels == num_filters\n assert filter_rows % 2 == 1\n assert filter_cols % 2 == 1\n output = np.zeros((num_batch, num_filters, output_rows, output_cols))\n for b in range(num_batch):\n for f in range(num_filters):\n for i_out in range(output_rows):\n for j_out in range(output_cols):\n for i_filter in range(filter_rows):\n i_in = i_out + i_filter - (filter_rows // 2)\n if not (0 <= i_in < input_rows):\n continue\n for j_filter in range(filter_cols):\n j_in = j_out + j_filter - (filter_cols // 2)\n if not (0 <= j_in < input_cols):\n continue\n if flip_filters:\n inc = (input[b, f, i_in, j_in] *\n W[f, -i_filter-1, -j_filter-1,\n i_out, j_out])\n else:\n inc = (input[b, f, i_in, j_in] *\n W[f, i_filter, j_filter,\n i_out, j_out])\n output[b, f, i_out, j_out] += inc\n return output\n\n\ndef locally_connected2d_test_sets():\n def _convert(input, W, output, kwargs):\n return [floatX(input), floatX(W), output, kwargs]\n\n for batch_size in (2, 3):\n for input_shape in ((batch_size, 2, 5, 5), (batch_size, 4, 8, 8)):\n for num_filters in (2, 4):\n for filter_size in ((3, 3), (3, 5)):\n for flip_filters in (True, False):\n for channelwise in (True, False):\n if channelwise and num_filters != input_shape[1]:\n continue\n input = np.random.random(input_shape)\n if channelwise:\n W = np.random.random(\n (num_filters,) + filter_size +\n input_shape[2:])\n output = channelwise_locally_connected2d(\n input, W, flip_filters=flip_filters)\n else:\n W = np.random.random(\n (num_filters, input_shape[1]) +\n filter_size + input_shape[2:])\n output = locally_connected2d(\n input, W, flip_filters=flip_filters)\n yield _convert(input, W, output,\n {'num_filters': num_filters,\n 'filter_size': filter_size,\n 'flip_filters': flip_filters,\n 'channelwise': channelwise})\n\n\[email protected]\ndef DummyInputLayer():\n def factory(shape):\n from lasagne.layers.input import InputLayer\n return InputLayer(shape)\n return factory\n\n\nclass TestLocallyConnected2DLayer:\n @pytest.mark.parametrize(\n \"input, W, output, kwargs\", list(locally_connected2d_test_sets()))\n def test_defaults(self, DummyInputLayer, input, W, output, kwargs):\n from lasagne.layers import LocallyConnected2DLayer\n b, c, h, w = input.shape\n input_layer = DummyInputLayer((b, c, h, w))\n layer = LocallyConnected2DLayer(\n input_layer,\n W=W,\n **kwargs)\n actual = layer.get_output_for(theano.shared(input)).eval()\n assert actual.shape == output.shape\n assert actual.shape == layer.output_shape\n assert np.allclose(actual, output)\n\n def test_unsupported_settings(self, DummyInputLayer):\n from lasagne.layers import LocallyConnected2DLayer\n input_layer = DummyInputLayer((10, 2, 4, 4))\n for pad in 'valid', 'full', 1:\n with pytest.raises(NotImplementedError) as exc:\n LocallyConnected2DLayer(input_layer, 2, 3, pad=pad)\n assert \"requires pad='same'\" in exc.value.args[0]\n with pytest.raises(NotImplementedError) as exc:\n LocallyConnected2DLayer(input_layer, 2, 3, stride=2)\n assert \"requires stride=1 / (1, 1)\" in exc.value.args[0]\n\n def test_invalid_settings(self, DummyInputLayer):\n from lasagne.layers import LocallyConnected2DLayer\n input_layer = DummyInputLayer((10, 2, 4, 4))\n with pytest.raises(ValueError) as exc:\n LocallyConnected2DLayer(input_layer, 4, 3, channelwise=True)\n assert \"num_filters and the number of input channels should match\" \\\n in exc.value.args[0]\n input_layer = DummyInputLayer((10, 2, None, 4))\n with pytest.raises(ValueError) as exc:\n LocallyConnected2DLayer(input_layer, 4, 3, channelwise=True)\n assert \"A LocallyConnected2DLayer requires a fixed input shape \" \\\n \"(except for the batch size)\" in exc.value.args[0]\n"
] | [
[
"numpy.allclose",
"numpy.zeros",
"numpy.random.random"
]
] |
wanliuhuo/rafiki | [
"602679cc294d3e14f74e42d0184cbbffb90f9c88"
] | [
"examples/models/image_classification/TfVgg16.py"
] | [
"import tensorflow as tf\nfrom tensorflow import keras\nimport json\nimport os\nimport tempfile\nimport numpy as np\nimport base64\nimport abc\nfrom urllib.parse import urlparse, parse_qs \n\nfrom rafiki.model import BaseModel, InvalidModelParamsException, test_model_class, \\\n IntegerKnob, FloatKnob, CategoricalKnob, dataset_utils\nfrom rafiki.constants import TaskType, ModelDependency\n\nclass TfVgg16(BaseModel):\n '''\n Implements VGG16 on Tensorflow for simple image classification\n '''\n @staticmethod\n def get_knob_config():\n return {\n 'epochs': FixedKnob(1),\n 'learning_rate': FloatKnob(1e-5, 1e-1, is_exp=True),\n 'batch_size': CategoricalKnob([16, 32, 64, 128]),\n }\n\n def __init__(self, **knobs):\n super().__init__(**knobs)\n self._knobs = knobs\n self._graph = tf.Graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self._sess = tf.Session(graph=self._graph, config=config)\n\n def train(self, dataset_uri):\n ep = self._knobs.get('epochs')\n bs = self._knobs.get('batch_size')\n\n dataset = dataset_utils.load_dataset_of_image_files(dataset_uri, image_size=[48, 48])\n num_classes = dataset.classes\n (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset])\n images = np.asarray(images)\n images = np.stack([images] * 3, axis=-1)\n classes = np.asarray(classes)\n\n with self._graph.as_default():\n self._model = self._build_model(num_classes)\n with self._sess.as_default():\n self._model.fit(\n images, \n classes, \n epochs=ep, \n batch_size=bs\n )\n\n def evaluate(self, dataset_uri):\n dataset = dataset_utils.load_dataset_of_image_files(dataset_uri, image_size=[48, 48])\n (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset])\n images = np.asarray(images)\n images = np.stack([images] * 3, axis=-1)\n classes = np.asarray(classes)\n\n with self._graph.as_default():\n with self._sess.as_default():\n (loss, accuracy) = self._model.evaluate(images, classes)\n return accuracy\n\n def predict(self, queries):\n images = dataset_utils.resize_as_images(queries, image_size=[48, 48])\n images = np.stack([images] * 3, axis=-1)\n with self._graph.as_default():\n with self._sess.as_default():\n probs = self._model.predict(images)\n \n return probs.tolist()\n \n def destroy(self):\n self._sess.close()\n\n def dump_parameters(self):\n params = {}\n\n # Save model parameters\n with tempfile.NamedTemporaryFile() as tmp:\n # Save whole model to temp h5 file\n with self._graph.as_default():\n with self._sess.as_default():\n self._model.save(tmp.name)\n \n # Read from temp h5 file & encode it to base64 string\n with open(tmp.name, 'rb') as f:\n h5_model_bytes = f.read()\n\n params['h5_model_base64'] = base64.b64encode(h5_model_bytes).decode('utf-8')\n\n return params\n\n def load_parameters(self, params):\n # Load model parameters\n h5_model_base64 = params.get('h5_model_base64', None)\n if h5_model_base64 is None:\n raise InvalidModelParamsException()\n\n with tempfile.NamedTemporaryFile() as tmp:\n # Convert back to bytes & write to temp file\n h5_model_bytes = base64.b64decode(h5_model_base64.encode('utf-8'))\n with open(tmp.name, 'wb') as f:\n f.write(h5_model_bytes)\n\n # Load model from temp file\n with self._graph.as_default():\n with self._sess.as_default():\n self._model = keras.models.load_model(tmp.name)\n \n def _build_model(self, num_classes):\n lr = self._knobs.get('learning_rate')\n\n model = keras.applications.VGG16(\n include_top=True,\n input_shape=(48, 48, 3),\n weights=None, \n classes=num_classes\n )\n\n model.compile(\n optimizer=keras.optimizers.Adam(lr=lr),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n )\n return model\n\nif __name__ == '__main__':\n test_model_class(\n model_file_path=__file__,\n model_class='TfVgg16',\n task=TaskType.IMAGE_CLASSIFICATION,\n dependencies={\n ModelDependency.TENSORFLOW: '1.12.0'\n },\n train_dataset_uri='data/fashion_mnist_for_image_classification_train.zip',\n test_dataset_uri='data/fashion_mnist_for_image_classification_test.zip',\n queries=[\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 7, 0, 37, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 27, 84, 11, 0, 0, 0, 0, 0, 0, 119, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 88, 143, 110, 0, 0, 0, 0, 22, 93, 106, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 53, 129, 120, 147, 175, 157, 166, 135, 154, 168, 140, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 11, 137, 130, 128, 160, 176, 159, 167, 178, 149, 151, 144, 0, 0], \n [0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 0, 3, 0, 0, 115, 114, 106, 137, 168, 153, 156, 165, 167, 143, 157, 158, 11, 0], \n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 89, 139, 90, 94, 153, 149, 131, 151, 169, 172, 143, 159, 169, 48, 0], \n [0, 0, 0, 0, 0, 0, 2, 4, 1, 0, 0, 0, 98, 136, 110, 109, 110, 162, 135, 144, 149, 159, 167, 144, 158, 169, 119, 0], \n [0, 0, 2, 2, 1, 2, 0, 0, 0, 0, 26, 108, 117, 99, 111, 117, 136, 156, 134, 154, 154, 156, 160, 141, 147, 156, 178, 0], \n [3, 0, 0, 0, 0, 0, 0, 21, 53, 92, 117, 111, 103, 115, 129, 134, 143, 154, 165, 170, 154, 151, 154, 143, 138, 150, 165, 43], \n [0, 0, 23, 54, 65, 76, 85, 118, 128, 123, 111, 113, 118, 127, 125, 139, 133, 136, 160, 140, 155, 161, 144, 155, 172, 161, 189, 62], \n [0, 68, 94, 90, 111, 114, 111, 114, 115, 127, 135, 136, 143, 126, 127, 151, 154, 143, 148, 125, 162, 162, 144, 138, 153, 162, 196, 58], \n [70, 169, 129, 104, 98, 100, 94, 97, 98, 102, 108, 106, 119, 120, 129, 149, 156, 167, 190, 190, 196, 198, 198, 187, 197, 189, 184, 36], \n [16, 126, 171, 188, 188, 184, 171, 153, 135, 120, 126, 127, 146, 185, 195, 209, 208, 255, 209, 177, 245, 252, 251, 251, 247, 220, 206, 49], \n [0, 0, 0, 12, 67, 106, 164, 185, 199, 210, 211, 210, 208, 190, 150, 82, 8, 0, 0, 0, 178, 208, 188, 175, 162, 158, 151, 11], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n ]\n )\n"
] | [
[
"numpy.asarray",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.ConfigProto",
"tensorflow.keras.models.load_model",
"numpy.stack",
"tensorflow.keras.applications.VGG16",
"tensorflow.keras.optimizers.Adam"
]
] |
e93fem/Hands-On-Transfer-Learning-with-Python | [
"57e38231380aa95d753f1c7f7a711b5107436038"
] | [
"Chapter02/example2.py"
] | [
"# EXAMPLE of Tensor Operations using tensorflow.\nimport tensorflow as tf\n\n# Initialize 3 constants: 2 vectors, a scalar and a 2D tensor\nx1 = tf.constant([1, 2, 3, 4])\nx2 = tf.constant([5, 6, 7, 8])\nb = tf.constant(10)\nW = tf.constant(-1, shape=[4, 2])\n\n# Elementwise Multiply/subtract\nres_elem_wise_mult = tf.multiply(x1, x2)\nres_elem_wise_sub = tf.subtract(x1, x2)\n\n# dot product of two tensors of compatable shapes\nres_dot_product = tf.tensordot(x1, x2, axes=1)\n\n# broadcasting : add scalar 10 to all elements of the vector\nres_broadcast = tf.add(x1, b)\n\n# Calculating Wtx\nres_matrix_vector_dot = tf.multiply(tf.transpose(W), x1)\n\n# scalar multiplication\nscal_mult_matrix = tf.scalar_mul(scalar=10, x=W)\n\n# Initialize Session and execute\nwith tf.Session() as sess:\n output = sess.run([res_elem_wise_mult, res_elem_wise_sub, res_dot_product, res_broadcast, res_matrix_vector_dot,\n scal_mult_matrix])\n print(output)\n"
] | [
[
"tensorflow.multiply",
"tensorflow.scalar_mul",
"tensorflow.tensordot",
"tensorflow.subtract",
"tensorflow.Session",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.add"
]
] |
pjhartout/fastwlk | [
"deb78923c9a8450099c26bac09da94ae87892d0d"
] | [
"tests/test_kernel.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for `fastwlk` package.\"\"\"\n\nimport json\nimport pickle\nfrom typing import Iterable\n\nimport numpy as np\nimport pytest\nfrom fastwlk.kernel import WeisfeilerLehmanKernel\nfrom grakel import WeisfeilerLehman, graph_from_networkx\nfrom pyprojroot import here\n\nN_JOBS = 6\ndefault_eigvalue_precision = float(\"-1e-5\")\n\nwith open(here() / \"data/graphs.pkl\", \"rb\") as f:\n graphs = pickle.load(f)\n\nwith open(\"data/test_encoding_graph_1.json\", \"r\") as f:\n encoding = json.load(f)\n\n\nKX = np.array([[3608, 5062, 5009], [5062, 14532, 9726], [5009, 9726, 13649]])\nKXY = np.array(\n [\n [6481, 6794, 6718, 7014],\n [14273, 15091, 15595, 14968],\n [12569, 12520, 12882, 13661],\n ]\n)\n\n\ntest_validity_biased = [\n (graphs[:3], graphs[:3], KX),\n (graphs[:3], graphs[3:7], KXY),\n]\n\n\[email protected](\"X, Y, expected\", test_validity_biased)\ndef test_compute_matrix_multithreaded(X, Y, expected):\n wl_kernel = WeisfeilerLehmanKernel(\n n_jobs=N_JOBS,\n n_iter=4,\n node_label=\"residue\",\n biased=True,\n verbose=False,\n )\n K_fastwlk = wl_kernel.compute_matrix(X, Y)\n np.testing.assert_array_equal(K_fastwlk, expected)\n\n\[email protected](\"X, Y, expected\", test_validity_biased)\ndef test_compute_matrix_single_threaded(X, Y, expected):\n wl_kernel = WeisfeilerLehmanKernel(\n n_jobs=None,\n n_iter=4,\n node_label=\"residue\",\n biased=True,\n verbose=False,\n )\n K_fastwlk = wl_kernel.compute_matrix(X, Y)\n np.testing.assert_array_equal(K_fastwlk, expected)\n\n\nKX_unbiased = np.array([[0, 5062, 5009], [5062, 0, 9726], [5009, 9726, 0]])\n\ntest_validity_unbiased = [\n (graphs[:3], graphs[:3], KX_unbiased),\n (graphs[:3], graphs[3:7], KXY),\n]\n\n\[email protected](\"X, Y, expected\", test_validity_unbiased)\ndef test_compute_matrix_unbiased(X, Y, expected):\n wl_kernel = WeisfeilerLehmanKernel(\n n_jobs=None,\n n_iter=4,\n node_label=\"residue\",\n biased=False,\n verbose=False,\n )\n K_fastwlk = wl_kernel.compute_matrix(X, Y)\n np.testing.assert_array_equal(K_fastwlk, expected)\n\n\[email protected](\"X, Y, expected\", test_validity_biased)\ndef test_compute_matrix_precomputed(X, Y, expected):\n wl_kernel = WeisfeilerLehmanKernel(\n n_jobs=None,\n precomputed=True,\n n_iter=4,\n node_label=\"residue\",\n biased=True,\n verbose=False,\n )\n hashes_X = [wl_kernel.compute_wl_hashes(graph) for graph in X]\n hashes_Y = [wl_kernel.compute_wl_hashes(graph) for graph in Y]\n K_fastwlk = wl_kernel.compute_matrix(hashes_X, hashes_Y)\n np.testing.assert_array_equal(K_fastwlk, expected)\n\n\ntest_compute_wl_hashes_data = [(graphs[1], encoding)]\n\n\[email protected](\"X, expected\", test_compute_wl_hashes_data)\ndef test_compute_wl_hashes(X, expected):\n wl_kernel = WeisfeilerLehmanKernel(\n n_jobs=2,\n precomputed=False,\n n_iter=2,\n node_label=\"residue\",\n biased=True,\n verbose=False,\n )\n hashes = wl_kernel.compute_wl_hashes(graphs[1])\n assert hashes == expected\n\n\ntest_grakel_equality_data = [\n (graphs[:10], graphs[:10]),\n (graphs[:10], graphs[10:30]),\n]\n\n\ndef networkx2grakel(X: Iterable) -> Iterable:\n Xt = list(graph_from_networkx(X, node_labels_tag=\"residue\"))\n return Xt\n\n\[email protected](\"X, Y\", test_grakel_equality_data)\ndef test_grakel_equivalence(X, Y):\n Xt = networkx2grakel(X)\n Yt = networkx2grakel(Y)\n wl_kernel_grakel = WeisfeilerLehman(n_jobs=N_JOBS, n_iter=3)\n KXY_grakel = wl_kernel_grakel.fit(Xt).transform(Yt).T\n wl_kernel_fastwlk = WeisfeilerLehmanKernel(\n n_jobs=N_JOBS,\n n_iter=3,\n node_label=\"residue\",\n biased=True,\n verbose=False,\n )\n KXY_fastwlk = wl_kernel_fastwlk.compute_matrix(X, Y)\n np.testing.assert_array_equal(KXY_fastwlk, KXY_grakel)\n return KXY\n\n\nKX_norm = np.array(\n [\n [1.0, 0.73037069, 0.74368576],\n [0.73037069, 1.0, 0.71130753],\n [0.74368576, 0.71130753, 1.0],\n ]\n)\nKXY_norm = np.array(\n [\n [0.79378634, 0.71006284, 0.74347089, 0.75475861],\n [0.86111583, 0.77691583, 0.85014706, 0.79339745],\n [0.78030479, 0.6632504, 0.72261873, 0.74512092],\n ]\n)\n\ntest_validity_normalized = [\n (graphs[:3], graphs[:3], KX_norm),\n (graphs[:3], graphs[3:7], KXY_norm),\n]\n\n\[email protected](\"X, Y, expected\", test_validity_normalized)\ndef test_compute_matrix_normalized(X, Y, expected):\n wl_kernel = WeisfeilerLehmanKernel(\n n_jobs=N_JOBS,\n n_iter=3,\n normalize=True,\n node_label=\"residue\",\n biased=True,\n verbose=False,\n )\n K_fastwlk = wl_kernel.compute_matrix(X, Y)\n np.testing.assert_array_almost_equal(K_fastwlk, expected, decimal=8)\n\n\nKX_norm_unbiased = np.array(\n [\n [0.0, 0.73037069, 0.74368576],\n [0.73037069, 0.0, 0.71130753],\n [0.74368576, 0.71130753, 0.0],\n ]\n)\n\ntest_validity_normalized_unbiased = [\n (graphs[:3], graphs[:3], KX_norm_unbiased),\n (graphs[:3], graphs[3:7], KXY_norm),\n]\n\n\[email protected](\"X, Y, expected\", test_validity_normalized)\ndef test_unbiased_normalized(X, Y, expected):\n wl_kernel = WeisfeilerLehmanKernel(\n n_jobs=N_JOBS,\n n_iter=3,\n normalize=True,\n node_label=\"residue\",\n biased=False,\n verbose=True,\n )\n K_fastwlk = wl_kernel.compute_matrix(X, Y)\n np.testing.assert_array_almost_equal(K_fastwlk, expected, decimal=8)\n\n\ntest_positive_eig_data = [\n (graphs),\n]\n\n\[email protected](\"X\", test_positive_eig_data)\ndef test_positive_eig(X):\n wl_kernel = WeisfeilerLehmanKernel(\n n_jobs=N_JOBS,\n n_iter=3,\n normalize=True,\n node_label=\"residue\",\n biased=False,\n verbose=True,\n )\n K = wl_kernel.compute_matrix(X)\n min_eig = np.real(np.min(np.linalg.eig(K)[0]))\n np.testing.assert_array_less(default_eigvalue_precision, min_eig)\n\n\nKXY_norm_gram = np.array(\n [\n [\n 1.0,\n 0.73037069,\n 0.74368576,\n 0.79378634,\n 0.71006284,\n 0.74347089,\n 0.75475861,\n ],\n [\n 0.73037069,\n 1.0,\n 0.71130753,\n 0.86111583,\n 0.77691583,\n 0.85014706,\n 0.79339745,\n ],\n [\n 0.74368576,\n 0.71130753,\n 1.0,\n 0.78030479,\n 0.6632504,\n 0.72261873,\n 0.74512092,\n ],\n [\n 0.79378634,\n 0.86111583,\n 0.78030479,\n 1.0,\n 0.84933868,\n 0.89514908,\n 0.86589011,\n ],\n [\n 0.71006284,\n 0.77691583,\n 0.6632504,\n 0.84933868,\n 1.0,\n 0.86015441,\n 0.84185343,\n ],\n [\n 0.74347089,\n 0.85014706,\n 0.72261873,\n 0.89514908,\n 0.86015441,\n 1.0,\n 0.85020836,\n ],\n [\n 0.75475861,\n 0.79339745,\n 0.74512092,\n 0.86589011,\n 0.84185343,\n 0.85020836,\n 1.0,\n ],\n ]\n)\n\ntest_data_gram = [\n (graphs[:3], graphs[:3], KX_norm),\n (graphs[:3], graphs[3:7], KXY_norm_gram),\n]\n\n\[email protected](\"X, Y, expected\", test_data_gram)\ndef test_compute_gram_matrix_normalized(X, Y, expected):\n wl_kernel = WeisfeilerLehmanKernel(\n n_jobs=N_JOBS,\n n_iter=3,\n node_label=\"residue\",\n normalize=True,\n biased=True,\n verbose=False,\n )\n K_fastwlk = wl_kernel.compute_gram_matrix(X, Y)\n np.testing.assert_array_almost_equal(K_fastwlk, expected, decimal=8)\n"
] | [
[
"numpy.array",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_array_almost_equal",
"numpy.testing.assert_array_less",
"numpy.linalg.eig"
]
] |
dair-iitd/CDNet | [
"5e9b03e898070d62ef3a5453d777b1f0af824504"
] | [
"code/utils/config.py"
] | [
"import argparse\nimport torch\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")\n\n\nparser = argparse.ArgumentParser(description=\"AggNet Model\")\nparser.add_argument(\n \"-bsz\", \"--batch\", help=\"Batch_size\", required=False, type=int, default=8\n)\nparser.add_argument(\n \"-emb\",\n \"--emb_dim\",\n help=\"Embedding Dimension Size\",\n required=False,\n type=int,\n default=200,\n)\nparser.add_argument(\n \"-ehd\",\n \"--enc_hid_dim\",\n help=\"Encoder Hidden Dimension Size\",\n required=False,\n type=int,\n default=100,\n)\nparser.add_argument(\n \"-dhd\",\n \"--dec_hid_dim\",\n help=\"Decoder Hidden Dimension Size\",\n required=False,\n type=int,\n default=200,\n) # Should always be 2 * enc_hid_dim\nparser.add_argument(\n \"-ats\", \"--attn_size\", help=\"Attention Size\", required=False, type=int, default=200\n)\nparser.add_argument(\n \"-e\",\n \"--num_epochs\",\n help=\"Number of Epochs to Run\",\n required=False,\n type=int,\n default=40,\n)\nparser.add_argument(\n \"-d\", \"--dropout\", help=\"Dropout Rate\", required=False, type=float, default=0.05\n)\nparser.add_argument(\n \"-gd\",\n \"--gru_drop\",\n help=\"GRU Dropout Rate\",\n required=False,\n type=float,\n default=0.2,\n)\nparser.add_argument(\n \"-lr\", \"--lr\", help=\"Learinng Rate\", required=False, type=float, default=2.5e-4\n)\nparser.add_argument(\n \"-ld\", \"--load\", help=\"Load Model Checkpoint\", required=False, default=None\n)\nparser.add_argument(\n \"-cp\",\n \"--ckpt_path\",\n help=\"Path to save Checkpoints\",\n required=False,\n default=\"./../../../scratch/Aggnet_ckpts/ablations_cnn\",\n)\nparser.add_argument(\n \"-n\", \"--name\", help=\"Name Your Model\", required=False, default=\"default\"\n)\nparser.add_argument(\n \"-clip\", \"--clip\", help=\"gradient clipping\", required=False, type=float, default=10\n)\nparser.add_argument(\n \"-gpu\",\n \"--gpu\",\n help=\"Run in GPU or not\",\n required=False,\n type=str2bool,\n default=True,\n)\nparser.add_argument(\n \"-m\", \"--model\", help=\"1 is MLM, 2 is MLM+GLMP\", required=False, type=int, default=2\n)\nparser.add_argument(\n \"-ds\",\n \"--dataset\",\n help=\"1 is Incar, 2 is Camrest, 3 is MultiWoz\",\n required=False,\n type=int,\n default=1,\n)\nparser.add_argument(\n \"-dp\",\n \"--data\",\n help=\"Dataset path\",\n required=False,\n default=\"../Incar_sketch_standard/\",\n)\nparser.add_argument(\n \"-lg\",\n \"--logs\",\n help=\"Print Logs or not\",\n required=False,\n type=str2bool,\n default=False,\n)\nparser.add_argument(\n \"-test\",\n \"--test\",\n help=\"Test or Train\",\n required=False,\n type=str2bool,\n default=False,\n)\nparser.add_argument(\n \"-hp\", \"--hops\", help=\"Number of Memory Hops\", required=False, type=int, default=3\n)\nparser.add_argument(\n \"-s\", \"--seed\", help=\"Enter Manual Seed\", required=False, type=int, default=None\n)\nparser.add_argument(\n \"-v\", \"--vocab\", help=\"Vocab Name\", required=False, default=\"vocab.json\"\n)\nparser.add_argument(\n \"-tf\",\n \"--teacher_forcing\",\n help=\"Teacher Forcing\",\n type=float,\n required=False,\n default=0.9,\n)\nparser.add_argument(\n \"-abl_g\",\n \"--abl_glove\",\n help=\"Glove Embedding Use or not\",\n required=False,\n type=str2bool,\n default=True,\n)\nparser.add_argument(\n \"-abl_bs\",\n \"--abl_beta_supvis\",\n help=\"Beta Supervision Loss False is disable, True is both labels\",\n required=False,\n type=str2bool,\n default=True,\n)\nparser.add_argument(\n \"-abl_gb\",\n \"--abl_global_beta\",\n help=\"Global Beta enable/disable\",\n required=False,\n type=str2bool,\n default=True,\n)\nparser.add_argument(\n \"-abl_wd\",\n \"--abl_window\",\n help=\"Window CNN enable/disable\",\n required=False,\n type=int,\n default=1,\n)\nparser.add_argument(\n \"-abl_sml\",\n \"--abl_similarity_loss\",\n help=\"Similarity Loss enable/disable\",\n required=False,\n type=str2bool,\n default=True,\n)\nparser.add_argument(\n \"-abl_gle\",\n \"--abl_glove_rnd\",\n help=\"0 no glove, 1 only non entities glove, 2 all glove\",\n required=False,\n type=int,\n default=2,\n)\n\nargs = vars(parser.parse_args())\nprint(str(args), flush=True)\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nif not args[\"gpu\"]:\n DEVICE = torch.device(\"cpu\")\nif DEVICE.type == \"cuda\":\n USE_CUDA = True\nelse:\n USE_CUDA = False\nprint(\"Using Device:\", DEVICE, flush=True)\n\n# print(\"USE_CUDA: \"+str(USE_CUDA))\n"
] | [
[
"torch.device",
"torch.cuda.is_available"
]
] |
0shimax/pytorch-sam-lr | [
"bc9c25a65a98d360306e8b9aec79da6dd739f483"
] | [
"src/data/loader.py"
] | [
"from typing import List\nimport pandas\nimport numpy\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass CvrDataset(Dataset):\n def __init__(self,\n features: numpy.matrix,\n labels: numpy.ndarray,\n transform=None):\n super().__init__()\n self.features = features\n self.labels = labels\n self.transform = transform\n\n def __len__(self) -> int:\n return self.features.shape[0]\n\n def __getitem__(self, idx:int) -> (torch.Tensor, torch.Tensor):\n line = self.features[idx].toarray()\n label = self.labels[idx]\n return torch.FloatTensor(line), torch.LongTensor([label])\n \n\ndef loader(dataset:Dataset, batch_size:int, shuffle:bool=True) -> torch.utils.data.DataLoader:\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=4)\n return loader"
] | [
[
"torch.FloatTensor",
"torch.LongTensor",
"torch.utils.data.DataLoader"
]
] |
exityan/PySyft | [
"6477f64b63dc285059c3766deab3993653cead2e"
] | [
"packages/grid/apps/worker/src/main/utils/monkey_patch.py"
] | [
"# stdlib\nimport math\n\n# third party\nimport numpy\n\n\ndef mask_payload_fast(self, payload: bytes) -> str:\n \"\"\"Monkey patch geventwebsocket.websocket.Header.mask_payload(). Version\n currently in geventwebsocket does a very slow python for loop to mask the\n payload.\n\n We take advantage of numpy to do this faster.\n \"\"\"\n key = (self.mask * int(math.ceil(float(len(payload)) / float(len(self.mask)))))[\n : len(payload)\n ]\n\n # Select the type size in bytes\n if len(payload) % 8 == 0:\n dt = numpy.dtype(\"<Q\")\n else:\n dt = numpy.dtype(\"B\")\n\n return numpy.bitwise_xor(\n numpy.fromstring(key, dtype=dt), numpy.fromstring(payload, dtype=dt)\n ).tostring()\n"
] | [
[
"numpy.fromstring",
"numpy.dtype"
]
] |
indranil1997/CAL | [
"36000a2ed08ec2d1f6ae23f9d7ffd164e72d4aba"
] | [
"PythonClient/agents/CAL_agent/perception/model_functions.py"
] | [
"import os, re\nfrom keras.models import Model\nfrom keras.layers import Input, BatchNormalization, Conv1D, TimeDistributed, LSTM, \\\n multiply, Cropping1D, GRU, CuDNNGRU\nfrom keras.layers.core import Flatten, Dense, Dropout, Lambda\nfrom keras.applications import vgg16\nfrom keras import backend as K\nimport numpy as np\nimport tensorflow as tf\n\nDATA_HOME_DIR = os.path.dirname(__file__) + '/model_data/'\nMODEL_PATH = DATA_HOME_DIR + '/models/'\nWEIGHTS_PATH = DATA_HOME_DIR + '/results/'\nlast_layer_keys = ['0_red_light', '1_hazard_stop', '2_speed_sign',\\\n '3_relative_angle', '4_center_distance', '5_veh_distance']\n[RED_LIGHT, HAZARD_STOP, SPEED_SIGN, RELATIVE_ANGLE, CENTER_DISTANCE, VEH_DISTANCE] = last_layer_keys\n\ndef split_model(base_model, split_idx):\n \"\"\"\n split the given model into two model instances\n \"\"\"\n # rebuild the front model\n front_model = Model(inputs=base_model.input,\n outputs=base_model.get_layer(index=split_idx).output)\n front_out = base_model.get_layer(index=split_idx).output_shape\n\n # build the new \"tail\" model\n last_layers = base_model.layers[split_idx+1:]\n inp = Input(shape=front_out[1:])\n x = inp\n for layer in last_layers: x = layer(x)\n out = x\n tail_model = Model(inp, out)\n\n return front_model, tail_model\n\n\ndef get_conv_model():\n \"\"\"\n get the front model and tail model of a conv model\n and the used preprocessing function\n \"\"\"\n base_model = vgg16.VGG16(include_top=False,\n weights='imagenet',\n input_shape=(100,222,3))\n front_model, tail_model = split_model(base_model, -5)\n preprocessing = vgg16.preprocess_input\n\n return front_model, tail_model, preprocessing\n\n### parameter study\ndef get_task_block_params(name):\n return np.load(MODEL_PATH + name + '_params.npy').item()\n \ndef reload_model_from_episode(name):\n \"\"\"Load a model with a defined architecture from a specific training epoch\"\"\"\n if name == 'full_model_ep_3936':\n model = get_final_model()\n else:\n params = get_task_block_params(name)\n model = get_model_master(params)\n\n w_name = WEIGHTS_PATH + '{}.h5'.format(name)\n model.load_weights(w_name)\n\n return model, name\n\ndef conv_bn_dropout(x, p=0.1):\n x = BatchNormalization(axis=1)(x)\n x = Dropout(p)(x)\n return x\n\ndef dense_block(x, p=0.5, n=64):\n \"\"\"\n standard dense block (Dense - BatchNorm - Dropout)\n x = input\n n = number of nodes in the dense layer\n p = dropout\n \"\"\"\n x_out = Dense(n, activation='relu', )(x)\n x_out = BatchNormalization()(x_out)\n x_out = Dropout(p)(x_out)\n return x_out\n\ndef vgg_to_timedistributed(model_type, seq_len, conv_dp):\n # get the tail_model\n _, tail_model, _ = get_conv_model()\n # get the new input shape\n conv_inp = tail_model.layers[0].input_shape[1:]\n inp_shape = (seq_len,) + conv_inp\n\n # turn into a time distributed layer\n # start at idx 1 (skip Input layer)\n inp = Input(shape=inp_shape)\n x = TimeDistributed(tail_model.layers[1], name=tail_model.layers[1].name)(inp)\n for i,l in enumerate(tail_model.layers[2:]):\n if conv_dp:\n x = TimeDistributed(BatchNormalization(axis=1), name='Batchnorm_{}'.format(i))(x)\n x = TimeDistributed(Dropout(.2), name='Conv_Dropout_{}'.format(i))(x)\n x = TimeDistributed(l, name=l.name)(x)\n prediction = TimeDistributed(Flatten())(x)\n m = Model(inputs=[inp], outputs=prediction)\n return m\n\ndef LSTM_block(x, p=0.5, n=64):\n x = CuDNNLSTM(n)(x)\n x = Dropout(p)(x)\n return x\n\ndef GRU_block(x, p=0.5, n=64):\n x = CuDNNGRU(n)(x)\n x = Dropout(p)(x)\n return x\n\ndef conv1D_block(x, p=0.5, n=64):\n \"\"\"\n standard dense block (Dense - BatchNorm - Dropout)\n x = input\n n = number of nodes in the dense layer\n p = dropout\n \"\"\"\n seq_len = int(x.shape[1])\n x = Conv1D(n, seq_len, activation='relu')(x)\n x = Lambda(lambda y: K.squeeze(y, 1))(x)\n x = BatchNormalization()(x)\n x = Dropout(p)(x)\n return x\n\ndef get_model_master(params):\n # set up\n predictions = []\n p = params['p']\n no_nodes = params['no_nodes']\n seq_len = params['seq_len']\n try: dilation = params['dilation']\n except KeyError: dilation = 1\n if params['block_type'] == 'dense': block = dense_block\n elif params['block_type'] == 'GRU': block = GRU_block\n elif params['block_type'] == 'LSTM': block = LSTM_block\n elif params['block_type'] == 'conv1D': block = conv1D_block\n try: conv_dp = params['conv_dp']\n except KeyError: conv_dp = False\n\n # bool tensor for directional switch\n bool_tensor = tf.constant([-1]*no_nodes + [0]*no_nodes + [1]*no_nodes)\n\n # determine the seq_len after dilation\n dilated_seq_len = seq_len/dilation\n if seq_len%dilation!=0: dilated_seq_len +=1\n\n # build lrcn\n model = vgg_to_timedistributed('VGG16', dilated_seq_len, conv_dp)\n x = model.output\n if params['block_type'] == 'dense':\n x = get_last_time_slice(x, dilated_seq_len)\n\n ### CLASSIFICATION\n # red_light\n x0 = block(x, p, no_nodes)\n predictions.append(Dense(2, activation='softmax', name=RED_LIGHT)(x0))\n # hazard_stop\n x1 = block(x, p, no_nodes)\n predictions.append(Dense(2, activation='softmax', name=HAZARD_STOP)(x1))\n # speed_sign\n x2 = block(x, p, no_nodes)\n predictions.append(Dense(4, activation='softmax', name=SPEED_SIGN)(x2))\n\n\n ### REGRESSION\n dir_input = Input(shape=(1,), name='dir_input')\n dir_bool = Lambda(lambda d: tf.equal(K.cast(d, 'int32'), bool_tensor))(dir_input)\n dir_bool = Lambda(lambda d: K.cast(d, 'float32'),)(dir_bool)\n # relative_angle\n x3 = block(x, p, no_nodes*3)\n x3 = multiply([x3, dir_bool])\n predictions.append(Dense(1, name=RELATIVE_ANGLE)(x3))\n # center_distance\n x4 = block(x, p, no_nodes*3)\n x4 = multiply([x4, dir_bool])\n predictions.append(Dense(1, name=CENTER_DISTANCE)(x4))\n # veh_distance\n x5 = block(x, p, no_nodes)\n predictions.append(Dense(1, name=VEH_DISTANCE)(x5))\n\n model = Model(inputs=[model.input, dir_input], outputs=predictions)\n return model\n\n### load from parameter study , build final models\ndef get_prev_layer(model, layer, list_idx=0):\n inp = layer.input\n if isinstance(inp, list): inp = inp[list_idx]\n prev_layer_name = re.findall('(.*?)/', inp.name)[0]\n return model.get_layer(prev_layer_name)\n\ndef get_task_block(model_name):\n # load the model at the specified episode\n model, name = reload_model_from_episode(model_name)\n params = get_task_block_params(model_name)\n\n # get all layers in the task block\n l = model.get_layer(model_name)\n layers = []\n\n # get all layers\n while True:\n layers.append(l)\n l = get_prev_layer(model, l)\n if isinstance(l, TimeDistributed): break\n\n # reverse the order\n layers = layers[::-1]\n\n if model_name==RELATIVE_ANGLE \\\n or model_name==CENTER_DISTANCE:\n # get the directional layers\n l = model.get_layer(model_name)\n dir_layers = []\n while True:\n dir_layers.append(l)\n try: l = get_prev_layer(model, l, list_idx=1)\n except: break # reached dir_input layer\n\n # reverse the order\n dir_layers = dir_layers[::-1]\n\n # standard task block\n inp = Input(shape=layers[0].input_shape[1:])\n x0 = layers[0](inp)\n i = 1\n while True:\n x0 = layers[i](x0)\n if isinstance(layers[i], Dropout): break\n i += 1\n\n # dir input\n dir_input = Input(shape=(1,), name='dir_input')\n x1 = dir_layers[0](dir_input)\n x1 = dir_layers[1](x1)\n # multiply and output\n x = multiply([x0, x1])\n x = layers[-1](x)\n pred = x\n task_model = Model(inputs=[inp, dir_input], outputs=[pred])\n\n else:\n # build the model\n inp = Input(shape=layers[0].input_shape[1:])\n x = layers[0](inp)\n for l in layers[1:]: x = l(x)\n pred = x\n task_model = Model(inputs=[inp], outputs=[pred])\n\n print(\"Built Task Block {}\".format(model_name))\n\n return task_model, params\n\ndef get_sequence_idcs(seq_len, dilation):\n seq_idcs = np.arange(seq_len)\n rest = seq_len%dilation\n if not rest: start_idx = dilation-1\n else: start_idx = rest - 1\n seq_idcs = seq_idcs[start_idx::dilation]\n return list(seq_idcs.astype('int32'))\n\ndef get_dilated_sequence(x, dilation):\n import tensorflow as tf\n seq_len = int(x.shape[1])\n idcs = get_sequence_idcs(seq_len,dilation)\n x = Lambda(lambda y: tf.gather(y, idcs, axis=1))(x)\n return x\n\ndef get_last_time_slice(x, seq_len):\n x = Cropping1D((seq_len-1,0))(x)\n x = Lambda(lambda y: K.squeeze(y, 1))(x)\n return x\n\ndef get_time_slice(x, slice_len):\n seq_len = int(x.shape[1])\n x = Cropping1D((seq_len - slice_len,0))(x)\n return x\n\ndef get_x_sequence(x, seq_len, dilation):\n x = get_time_slice(x, seq_len)\n x = get_dilated_sequence(x, dilation)\n return x\n\ndef get_time_dist_model(model_name, seq_len):\n model, name = reload_model_from_episode(model_name)\n\n # get the layers inside the time distribution wrapper\n layers = [model.layers[0]]\n for l in model.layers[1:]:\n if not type(l)==TimeDistributed: break\n layers.append(l.layer)\n\n # get the new input shape\n conv_inp = layers[0].input_shape[2:]\n inp_shape = (seq_len,) + conv_inp\n\n # gee\n inp = Input(shape=inp_shape)\n x = TimeDistributed(layers[1], name=layers[1].name)(inp)\n for i,l in enumerate(layers[2:]):\n x = TimeDistributed(l, name=l.name)(x)\n prediction = x\n m = Model(inputs=[inp], outputs=prediction)\n return m\n\ndef get_final_model():\n # set up\n predictions = []\n dir_input = Input(shape=(1,), name='dir_input')\n \n # load all the blocks\n b0, p0 = get_task_block(RED_LIGHT)\n b1, p1 = get_task_block(HAZARD_STOP)\n b2, p2 = get_task_block(SPEED_SIGN)\n b3, p3 = get_task_block(RELATIVE_ANGLE)\n b4, p4 = get_task_block(CENTER_DISTANCE)\n b5, p5 = get_task_block(VEH_DISTANCE)\n\n # build lrcn\n model = get_time_dist_model(RELATIVE_ANGLE, 14)\n x = model.output\n\n # get the predictions\n x0 = get_x_sequence(x, p0['seq_len'], p0['dilation'])\n predictions.append(b0([x0]))\n x1 = get_x_sequence(x, p1['seq_len'], p1['dilation'])\n predictions.append(b1([x1]))\n x2 = get_x_sequence(x, p2['seq_len'], p2['dilation'])\n predictions.append(b2([x2]))\n x3 = get_x_sequence(x, p3['seq_len'], p3['dilation'])\n predictions.append(b3([x3, dir_input]))\n x4 = get_x_sequence(x, p4['seq_len'], p4['dilation'])\n predictions.append(b4([x4, dir_input]))\n x5 = get_x_sequence(x, p5['seq_len'], p5['dilation'])\n predictions.append(b5([x5]))\n\n model = Model(inputs=[model.input, dir_input],\n outputs=predictions)\n return model\n\n\n"
] | [
[
"tensorflow.constant",
"numpy.arange",
"numpy.load",
"tensorflow.gather"
]
] |
mrware91/PhilTransA-TRXS-Limits | [
"5592c6c66276cd493d10f066aa636aaf600d3a00"
] | [
"Libraries/trxsToolBox/chiSquare/sphericalBesselChiSquare.py"
] | [
"\"\"\"\nConverts input data from momentum to real space by fitting to a spherical\nbessel model\nAuthor: Matthew R. Ware ([email protected])\nDescription: Tools to convert noisy, undersampled Fourier-space distributions\ninto real-space images.\n\"\"\"\n\nimport numpy as np\nfrom scipy import optimize, special\nfrom chiSquare import *\n\nimport sys\nsys.path.insert(0, '../formFactors')\nfrom formFactors import *\n\nclass sphericalBesselChiSquare(chiSquareMinimization):\n def __init__(self, Qs, Atoms, meanSignal,\n stdSignal, legendreOrder=0,\n moleculeType='homonuclear', reconType='CS',\n rMax=10):\n \"\"\"\n Initializes the reconstructor.\n\n Args:\n Qs: The grid of momentum transfer coordinates\n Atoms: The atoms in your molecule*\n meanSignal: The measured signal at the coordinates Qs\n stdSignal: The standard deviation of that signal\n\n *You'll need to add the definition of f(Q) for your molecule to the library\n\n Returns:\n The initialized reconstructor object\n \"\"\"\n # Initialize the reconstruction parameters\n self.Qs = Qs\n N = Qs.shape[0]\n RMAX = np.pi/(Qs[1]-Qs[0])\n DR = 2*np.pi/(Qs.max()-Qs.min())\n if RMAX > rMax:\n self.Rs = np.arange(DR,rMax+DR,DR)\n else:\n self.Rs = np.arange(DR,RMAX+DR,DR)\n self.Atoms = Atoms\n self.legendreOrder = legendreOrder\n self.model = self.generate_scattering_model()\n\n # Run initializaiton from parent class\n super(sphericalBesselChiSquare, self).__init__(meanSignal,\n stdSignal, self.model,\n reconType)\n\n def setBounds(self,boundsFunction):\n self.bounds = boundsFunction(self.Rs)\n\n def generate_scattering_model(self):\n QQ, RR = np.meshgrid(self.Qs, self.Rs)\n return fQ(QQ,self.Atoms[0])**2*special.spherical_jn(self.legendreOrder, QQ*RR)*RR**2\n"
] | [
[
"numpy.arange",
"scipy.special.spherical_jn",
"numpy.meshgrid"
]
] |
Stargrazer82301/ChrisFuncs | [
"8d577fa74123e742ab5360fd6f90337cbc8ecddb"
] | [
"ChrisFuncs/FromGitHub/martynbristow.py"
] | [
"import numpy\n\ndef rebin(array, dimensions=None, scale=None):\n \"\"\" Return the array ``array`` to the new ``dimensions`` conserving flux the flux in the bins\n The sum of the array will remain the same\n\n >>> ar = numpy.array([\n [0,1,2],\n [1,2,3],\n [2,3,4]\n ])\n >>> rebin(ar, (2,2))\n array([\n [1.5, 4.5]\n [4.5, 7.5]\n ])\n Raises\n ------\n\n AssertionError\n If the totals of the input and result array don't agree, raise an error because computation may have gone wrong\n\n Reference\n =========\n +-+-+-+\n |1|2|3|\n +-+-+-+\n |4|5|6|\n +-+-+-+\n |7|8|9|\n +-+-+-+\n \"\"\"\n if dimensions is not None:\n if isinstance(dimensions, float):\n dimensions = [int(dimensions)] * len(array.shape)\n elif isinstance(dimensions, int):\n dimensions = [dimensions] * len(array.shape)\n elif len(dimensions) != len(array.shape):\n raise RuntimeError('')\n elif scale is not None:\n if isinstance(scale, float) or isinstance(scale, int):\n dimensions = map(int, map(round, map(lambda x: x*scale, array.shape)))\n elif len(scale) != len(array.shape):\n raise RuntimeError('')\n else:\n raise RuntimeError('Incorrect parameters to rebin.\\n\\trebin(array, dimensions=(x,y))\\n\\trebin(array, scale=a')\n \"\"\"\n print dimensions\n print \"Rebinning to Dimensions: %s, %s\" % tuple(dimensions)\n \"\"\"\n import itertools\n dY, dX = map(divmod, map(float, array.shape), dimensions)\n\n result = numpy.zeros(dimensions)\n for j, i in itertools.product(*map(xrange, array.shape)):\n (J, dj), (I, di) = divmod(j*dimensions[0], array.shape[0]), divmod(i*dimensions[1], array.shape[1])\n (J1, dj1), (I1, di1) = divmod(j+1, array.shape[0]/float(dimensions[0])), divmod(i+1, array.shape[1]/float(dimensions[1]))\n\n # Moving to new bin\n # Is this a discrete bin?\n dx,dy=0,0\n if (I1-I == 0) | ((I1-I == 1) & (di1==0)):\n dx = 1\n else:\n dx=1-di1\n if (J1-J == 0) | ((J1-J == 1) & (dj1==0)):\n dy=1\n else:\n dy=1-dj1\n # Prevent it from allocating outide the array\n I_=min(dimensions[1]-1,I+1)\n J_=min(dimensions[0]-1,J+1)\n result[J, I] += array[j,i]*dx*dy\n result[J_, I] += array[j,i]*(1-dy)*dx\n result[J, I_] += array[j,i]*dy*(1-dx)\n result[J_, I_] += array[j,i]*(1-dx)*(1-dy)\n allowError = 0.1\n assert (array.sum() < result.sum() * (1+allowError)) & (array.sum() >result.sum() * (1-allowError))\n return result"
] | [
[
"numpy.zeros"
]
] |
aekruijssen/RaceOn | [
"7636e712883bb8c166568614a3dcba45a702446e"
] | [
"Resources/selfdrivingcar/project_1_lane_finding_basic/lane_detection.py"
] | [
"import numpy as np\nimport cv2\nfrom Line import Line\n\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n\n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n\n # defining a blank mask to start with\n mask = np.zeros_like(img)\n\n # defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n # filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, vertices, ignore_mask_color)\n\n # returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n\n return masked_image, mask\n\n\ndef hough_lines_detection(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,\n maxLineGap=max_line_gap)\n return lines\n\n\ndef weighted_img(img, initial_img, α=0.8, β=1., λ=0.):\n \"\"\"\n Returns resulting blend image computed as follows:\n\n initial_img * α + img * β + λ\n \"\"\"\n img = np.uint8(img)\n if len(img.shape) is 2:\n img = np.dstack((img, np.zeros_like(img), np.zeros_like(img)))\n\n return cv2.addWeighted(initial_img, α, img, β, λ)\n\n\ndef compute_lane_from_candidates(line_candidates, img_shape):\n \"\"\"\n Compute lines that approximate the position of both road lanes.\n\n :param line_candidates: lines from hough transform\n :param img_shape: shape of image to which hough transform was applied\n :return: lines that approximate left and right lane position\n \"\"\"\n\n # separate candidate lines according to their slope\n pos_lines = [l for l in line_candidates if l.slope > 0]\n neg_lines = [l for l in line_candidates if l.slope < 0]\n\n # interpolate biases and slopes to compute equation of line that approximates left lane\n # median is employed to filter outliers\n neg_bias = np.median([l.bias for l in neg_lines]).astype(int)\n neg_slope = np.median([l.slope for l in neg_lines])\n x1, y1 = 0, neg_bias\n x2, y2 = -np.int32(np.round(neg_bias / neg_slope)), 0\n left_lane = Line(x1, y1, x2, y2)\n\n # interpolate biases and slopes to compute equation of line that approximates right lane\n # median is employed to filter outliers\n lane_right_bias = np.median([l.bias for l in pos_lines]).astype(int)\n lane_right_slope = np.median([l.slope for l in pos_lines])\n x1, y1 = 0, lane_right_bias\n x2, y2 = np.int32(np.round((img_shape[0] - lane_right_bias) / lane_right_slope)), img_shape[0]\n right_lane = Line(x1, y1, x2, y2)\n\n return left_lane, right_lane\n\n\ndef get_lane_lines(color_image, solid_lines=True):\n \"\"\"\n This function take as input a color road frame and tries to infer the lane lines in the image.\n :param color_image: input frame\n :param solid_lines: if True, only selected lane lines are returned. If False, all candidate lines are returned.\n :return: list of (candidate) lane lines.\n \"\"\"\n # resize to 960 x 540\n color_image = cv2.resize(color_image, (960, 540))\n\n # convert to grayscale\n img_gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)\n\n # perform gaussian blur\n img_blur = cv2.GaussianBlur(img_gray, (17, 17), 0)\n\n # perform edge detection\n img_edge = cv2.Canny(img_blur, threshold1=50, threshold2=80)\n\n # perform hough transform\n detected_lines = hough_lines_detection(img=img_edge,\n rho=2,\n theta=np.pi / 180,\n threshold=1,\n min_line_len=15,\n max_line_gap=5)\n\n # convert (x1, y1, x2, y2) tuples into Lines\n detected_lines = [Line(l[0][0], l[0][1], l[0][2], l[0][3]) for l in detected_lines]\n\n # if 'solid_lines' infer the two lane lines\n if solid_lines:\n candidate_lines = []\n for line in detected_lines:\n # consider only lines with slope between 30 and 60 degrees\n if 0.5 <= np.abs(line.slope) <= 2:\n candidate_lines.append(line)\n # interpolate lines candidates to find both lanes\n lane_lines = compute_lane_from_candidates(candidate_lines, img_gray.shape)\n else:\n # if not solid_lines, just return the hough transform output\n lane_lines = detected_lines\n\n return lane_lines\n\n\ndef smoothen_over_time(lane_lines):\n \"\"\"\n Smooth the lane line inference over a window of frames and returns the average lines.\n \"\"\"\n\n avg_line_lt = np.zeros((len(lane_lines), 4))\n avg_line_rt = np.zeros((len(lane_lines), 4))\n\n for t in range(0, len(lane_lines)):\n avg_line_lt[t] += lane_lines[t][0].get_coords()\n avg_line_rt[t] += lane_lines[t][1].get_coords()\n\n return Line(*np.mean(avg_line_lt, axis=0)), Line(*np.mean(avg_line_rt, axis=0))\n\n\ndef color_frame_pipeline(frames, solid_lines=True, temporal_smoothing=True):\n \"\"\"\n Entry point for lane detection pipeline. Takes as input a list of frames (RGB) and returns an image (RGB)\n with overlaid the inferred road lanes. Eventually, len(frames)==1 in the case of a single image.\n \"\"\"\n is_videoclip = len(frames) > 0\n\n img_h, img_w = frames[0].shape[0], frames[0].shape[1]\n\n lane_lines = []\n for t in range(0, len(frames)):\n inferred_lanes = get_lane_lines(color_image=frames[t], solid_lines=solid_lines)\n lane_lines.append(inferred_lanes)\n\n if temporal_smoothing and solid_lines:\n lane_lines = smoothen_over_time(lane_lines)\n else:\n lane_lines = lane_lines[0]\n\n # prepare empty mask on which lines are drawn\n line_img = np.zeros(shape=(img_h, img_w))\n\n # draw lanes found\n for lane in lane_lines:\n lane.draw(line_img)\n\n # keep only region of interest by masking\n vertices = np.array([[(50, img_h),\n (450, 310),\n (490, 310),\n (img_w - 50, img_h)]],\n dtype=np.int32)\n img_masked, _ = region_of_interest(line_img, vertices)\n\n # make blend on color image\n img_color = frames[-1] if is_videoclip else frames[0]\n img_blend = weighted_img(img_masked, img_color, α=0.8, β=1., λ=0.)\n\n return img_blend\n"
] | [
[
"numpy.uint8",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.median",
"numpy.round",
"numpy.mean",
"numpy.abs"
]
] |
ENVIRO-Module/nis-backend | [
"fd86cf30f79f53cdccddd2a5479507d32f914d4e"
] | [
"nexinfosys/model_services/workspace.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n* Registry of objects. Add, remove, search\n* Support for high level operations: directly create and/or modify objects, calling the specification API. Create connections\n\"\"\"\nimport copy\nimport datetime\nimport json\nimport logging\nimport uuid\nfrom enum import Enum\nfrom typing import List, Union, Dict, NoReturn\n\nimport pandas as pd\n\nimport nexinfosys\nfrom nexinfosys.command_generators import Issue, IssueLocation, IType\nfrom nexinfosys.command_generators.parsers_factory import commands_container_parser_factory\nfrom nexinfosys.common.helper import create_dictionary, strcmp\nfrom nexinfosys.initialization import prepare_and_reset_database_for_tests\nfrom nexinfosys.model_services import IExecutableCommand, get_case_study_registry_objects\nfrom nexinfosys.model_services import State\nfrom nexinfosys.models.musiasem_concepts import ProblemStatement, FactorsRelationDirectedFlowObservation, Processor, \\\n Factor, Parameter, FactorInProcessorType\nfrom nexinfosys.models.musiasem_methodology_support import (User,\n CaseStudy,\n CaseStudyVersion,\n CaseStudyVersionSession,\n CommandsContainer,\n force_load,\n DBSession)\nfrom nexinfosys.serialization import serialize_state, deserialize_state\nfrom nexinfosys.solving import BasicQuery\nfrom nexinfosys.solving.flow_graph_solver import flow_graph_solver, evaluate_parameters_for_scenario, get_dataset\n\nlogger = logging.getLogger(__name__)\n\n\nclass Identity:\n pass\n\n# class IdentityCredentials:\n# pass\n#\n# class NISystem:\n# def __init__(self):\n# self._authentication_service = None\n# self._authorization_service = None\n# self._base_url = None\n# self._configuration_manager = None\n# self._plugin_manager = None\n#\n#\n# def gather_credentials(self) -> IdentityCredentials:\n# return IdentityCredentials()\n#\n# def login(self, ic: IdentityCredentials):\n# # Check credentials using authentication service\n# return Identity()\n#\n# def logout(self, id: Identity):\n# pass\n\n\nclass CommandResult:\n pass\n\n# class SessionCreationAction(Enum): # Used in FlowFund\n# \"\"\"\n#\n# +--------+--------+---------+---------------------------------+\n# | branch | clone | restart | Behavior |\n# +--------+--------+---------+---------------------------------+\n# | True | True | True | Branch & New ReproducibleSession|\n# | True | True | False | Branch & Clone |\n# | True | False | True | Branch & New ReproducibleSession|\n# | True | False | False | Branch & New ReproducibleSession|\n# | False | True | True | New CS (not CS version) |\n# | False | True | False | New CS & Clone |\n# | False | False | True | Restart |\n# | False | False | False | Continue |\n# +--------+--------+---------+---------------------------------+\n# \"\"\"\n# BranchAndNewWS = 7\n# BranchAndCloneWS = 6\n# NewCaseStudy = 3\n# NewCaseStudyCopyFrom = 2\n# Restart = 1\n# Continue = 0\n# ---------------------------------------------------------------------------------------------------------------------\n\n# #####################################################################################################################\n# >>>> PRIMITIVE COMMAND & COMMAND GENERATOR PROCESSING FUNCTIONS <<<<\n# #####################################################################################################################\n\n\ndef executable_command_to_commands_container(e_cmd: IExecutableCommand):\n \"\"\"\n IExecutableCommand -> CommandsContainer\n\n The resulting CommandsContainer will be always in native (JSON) format, because the specification\n to construct an IExecutableCommand has been translated to this native format.\n\n :param command:\n :return:\n \"\"\"\n d = {\"command\": e_cmd._serialization_type,\n \"label\": e_cmd._serialization_label,\n \"content\": e_cmd.json_serialize()}\n\n return CommandsContainer.create(\"native\", \"application/json\", json.dumps(d).encode(\"utf-8\"))\n\n\ndef persistable_to_executable_command(p_cmd: CommandsContainer, limit=1000):\n \"\"\"\n A persistable command can be either a single command or a sequence of commands (like a spreadsheet). In the future\n it could even be a full script.\n\n Because the executable command is DIRECTLY executable, it is not possible to convert from persistable to a single\n executable command. But it is possible to obtain a list of executable commands, and this is the aim of this function\n\n The size of the list can be limited by the parameter \"limit\". \"0\" is for unlimited\n\n :param p_cmd:\n :return: A list of IExecutableCommand\n \"\"\"\n # Create commands generator from factory (from generator_type and file_type)\n state = State()\n commands_generator = commands_container_parser_factory(p_cmd.generator_type, p_cmd.file_type, p_cmd.file, state)\n\n # Loop over the IExecutableCommand instances\n issues_aggreg = []\n outputs = []\n count = 0\n for cmd, issues in commands_generator:\n # If there are syntax ERRORS, STOP!!!\n stop = False\n if issues and len(issues) > 0:\n for t in issues:\n if t[0] == 3: # Error\n stop = True\n if stop:\n break\n\n issues_aggreg.extend(issues)\n\n count += 1\n if count >= limit:\n break\n\n\ndef execute_command(state, e_cmd: \"IExecutableCommand\") -> nexinfosys.IssuesOutputPairType:\n if e_cmd:\n return e_cmd.execute(state)\n else:\n return [], None # issues, output\n\n\ndef execute_command_container(state, p_cmd: CommandsContainer, ignore_imports=False):\n return execute_command_container_file(state, p_cmd.generator_type, p_cmd.content_type, p_cmd.content, ignore_imports)\n\n\ndef execute_command_container_file(state, generator_type, file_type: str, file, ignore_imports):\n \"\"\"\n This could be considered the MAIN method of the processing.\n 1) Assuming an initial \"state\" (that can be clean or not),\n 2) loops over the commands represented in a \"file\" of some of the supported types (JSON, Spreadsheet)\n 2.1) Parses each command, returning an IExecutableCommand instance (containing the JSON definition inside)\n 2.2) Executes each command, if there are no error issues. Command execution can reads and modifies \"state\"\n\n :param generator_type: Which commands generator (parser + constructor of IExecutableCommand instances) is used\n :param file_type: The file format\n :param file: The file contents\n :return: Issues and outputs (no outputs still required, probably won't be needed)\n \"\"\"\n # Create commands generator from factory (from generator_type and file_type)\n commands_generator = commands_container_parser_factory(generator_type, file_type, file, state, None, None, ignore_imports)\n\n # Loop over the IExecutableCommand instances\n issues_aggreg = []\n outputs = []\n cmd_number = 0\n for cmd, issues in commands_generator:\n if len(issues) > 0:\n new_issues, errors_exist = transform_issues(issues, cmd, cmd_number)\n c = \"\\n\"\n logging.debug(f\"Issues:\\n{c.join([i.description for i in new_issues])}\")\n else:\n logging.debug(f\"{type(cmd)} {cmd._source_block_name if hasattr(cmd, '_source_block_name') else ''}; # syntax issues: {len(issues)}\")\n cmd_number += 1 # Command counter\n\n errors_exist = False\n\n if issues and len(issues) > 0:\n new_issues, errors_exist = transform_issues(issues, cmd, cmd_number)\n issues_aggreg.extend(new_issues)\n\n if errors_exist:\n break\n\n # ## COMMAND EXECUTION ## #\n issues, output = execute_command(state, cmd)\n\n if issues and len(issues) > 0:\n new_issues, errors_exist = transform_issues(issues, cmd, cmd_number)\n issues_aggreg.extend(new_issues)\n\n if output:\n outputs.append(output)\n\n if errors_exist:\n break\n\n return issues_aggreg, outputs\n\n\ndef transform_issues(issues: List[Union[dict, nexinfosys.Issue, tuple, Issue]], cmd, sheet_number: int) -> (List[Issue], bool):\n\n errors_exist = False\n new_issues: List[Issue] = []\n\n for i in issues:\n if isinstance(i, dict):\n issue = Issue(itype=IType(i[\"type\"]), description=i[\"message\"], ctype=i[\"c_type\"],\n location=IssueLocation(sheet_name=i[\"sheet_name\"], sheet_number=i[\"sheet_number\"]))\n elif isinstance(i, nexinfosys.Issue): # namedtuple\n issue = Issue(itype=i.type, description=i.message, ctype=i.c_type,\n location=IssueLocation(sheet_name=i.sheet_name, sheet_number=i.sheet_number))\n elif isinstance(i, tuple):\n issue = Issue(itype=IType(i[0]), description=i[1],\n location=IssueLocation(sheet_name=\"\"))\n else: # isinstance(i, Issue):\n issue = i\n\n if issue.itype == IType.ERROR:\n errors_exist = True\n\n if not issue.ctype and cmd: # \"cmd\" may be \"None\", in case the Issue is produced by the commands container loop\n issue.ctype = cmd._serialization_type\n\n if not issue.location.sheet_name or issue.location.sheet_name == \"\":\n issue.location.sheet_name = cmd._source_block_name if hasattr(cmd, \"_source_block_name\") else \"\"\n\n if not issue.location.sheet_number:\n issue.location.sheet_number = sheet_number\n\n new_issues.append(issue)\n\n return new_issues, errors_exist\n\n\ndef convert_generator_to_native(generator_type, file_type: str, file):\n \"\"\"\n Converts a generator\n Creates a generator parser, then it feeds the file type and the file\n The generator parser has to parse the file and to elaborate a native generator (JSON)\n\n :param generator_type:\n :param file_type:\n :param file:\n :return: Issues and output file\n \"\"\"\n\n output = []\n if generator_type.lower() not in [\"json\", \"native\", \"primitive\"]:\n # Create commands generator from factory (from generator_type and file_type)\n state = State()\n commands_generator = commands_container_parser_factory(generator_type, file_type, file, state)\n # Loop over the IExecutableCommand instances\n for cmd, issues in commands_generator:\n # If there are syntax ERRORS, STOP!!!\n stop = False\n if issues and len(issues) > 0:\n for t in issues:\n if t[\"type\"] == 3: # Error\n stop = True\n break\n\n output.append({\"command\": cmd._serialization_type,\n \"label\": cmd._serialization_label,\n \"content\": cmd.json_serialize(),\n \"issues\": issues\n }\n )\n if stop:\n break\n\n return output\n\n\n# ######################################################################################################################\n# SOLVING (PREPARATION AND CALL SOLVER)\n# ######################################################################################################################\n\ndef prepare_and_solve_model(state: State, dynamic_scenario_parameters: Dict = None) -> List[Issue]:\n \"\"\"\n Modify the state so that:\n * Implicit references of Interfaces to subcontexts are materialized\n * Creating processors\n * Creating interfaces in these processors\n * Creating relationships in these processors\n\n * The ProblemStatement class is considered for solving\n q* State is modified to contain the scalar and matrix indicators\n\n :param state:\n :param dynamic_scenario_parameters:\n :return:\n \"\"\"\n prepare_model(state)\n issues = call_solver(state, dynamic_scenario_parameters)\n\n return issues\n\n\ndef call_solver(state: State, dynamic_scenario_parameters: Dict) -> List[Issue]:\n \"\"\"\n Solve the problem\n\n :param state: MuSIASEM object model\n :param systems:\n :param dynamic_scenario_parameters: A dictionary containing a dynamic scenario, for interactive exploration\n \"\"\"\n\n def obtain_problem_statement(dynamic_scenario_parameters: Dict = None) -> ProblemStatement:\n \"\"\"\n Obtain a ProblemStatement instance\n Obtain the solver parameters plus a list of scenarios\n :param dynamic_scenario_parameters:\n :return:\n \"\"\"\n if dynamic_scenario_parameters is not None:\n scenarios = create_dictionary()\n scenarios[\"dynamic\"] = create_dictionary(dynamic_scenario_parameters)\n return ProblemStatement(scenarios=scenarios)\n else:\n ps_list: List[ProblemStatement] = glb_idx.get(ProblemStatement.partial_key())\n if len(ps_list) == 0:\n # No scenarios (dummy), and use the default solver\n scenarios = create_dictionary()\n scenarios[\"default\"] = create_dictionary()\n return ProblemStatement(scenarios=scenarios)\n else:\n # TODO Combine all ProblemStatements into a single ProblemStatement\n return ps_list[-1] # Take last ProblemStatement\n\n # Registry and the other objects also\n glb_idx, _, _, datasets, _ = get_case_study_registry_objects(state)\n\n global_parameters: List[Parameter] = glb_idx.get(Parameter.partial_key())\n\n dynamic_scenario = dynamic_scenario_parameters is not None\n if not dynamic_scenario:\n problem_statement = obtain_problem_statement()\n else:\n problem_statement = obtain_problem_statement(dynamic_scenario_parameters)\n\n # Obtain \"parameters\" Dataset\n datasets[\"params\"] = obtain_parameters_dataset(global_parameters, problem_statement)\n\n solver_type_param = glb_idx.get(Parameter.partial_key(\"NISSolverType\"))\n solver_type_param = solver_type_param[0]\n solver_type = solver_type_param.current_value\n\n issues: List[Issue] = []\n if solver_type == \"FlowGraph\":\n issues = flow_graph_solver(global_parameters, problem_statement, state, dynamic_scenario)\n\n return issues\n\n\ndef prepare_model(state) -> NoReturn:\n \"\"\"\n Modify the state so that:\n * Implicit references of Interfaces to subcontexts are materialized\n * Creating processors\n * Creating interfaces in these processors\n * Creating relationships in these processors\n\n :param state:\n \"\"\"\n\n # TODO: currently when an interface is defined as a Scale from two or more interfaces, the computed values are\n # added while the intuition tells us that only one scale should be defined. We have to give a warning message\n # if this situation happens.\n\n # Registry and the other objects also\n glb_idx, _, _, _, _ = get_case_study_registry_objects(state)\n # Prepare a Query to obtain ALL interfaces\n query = BasicQuery(state)\n filt = {}\n objs = query.execute([Factor], filt)\n for iface in objs[Factor]: # type: Factor\n if strcmp(iface.processor.instance_or_archetype, 'Archetype') or strcmp(iface.processor.instance_or_archetype, 'No'):\n continue\n\n # If the Interface is connected to a \"Subcontext\" different than the owning Processor\n if iface.opposite_processor_type:\n if iface.opposite_processor_type.lower() != iface.processor.subsystem_type.lower():\n # Check if the interface has flow relationships\n # TODO An alternative is to search \"observations\" of type FactorsRelationDirectedFlowObservation\n # in the same \"iface\"\n\n if iface.orientation.lower() == \"input\":\n parameter = {\"target\": iface}\n else:\n parameter = {\"source\": iface}\n\n relations = glb_idx.get(FactorsRelationDirectedFlowObservation.partial_key(**parameter))\n\n # If it does not have flow relationships:\n # * define default Processor name and retrieve it (or if it does not exist, create it)\n # * create an Interface into that Processor and a Flow Relationship\n if len(relations) == 0:\n # Define the name of a Processor in the same context but in different subcontext\n p_name = iface.opposite_processor_type # + \"_\" + iface.processor.processor_system\n p = glb_idx.get(Processor.partial_key(p_name, system=iface.processor.processor_system))\n if len(p) == 0:\n attributes = {\n 'subsystem_type': iface.opposite_processor_type,\n 'processor_system': iface.processor.processor_system,\n 'functional_or_structural': 'Functional',\n 'instance_or_archetype': 'Instance'\n # 'stock': None\n }\n\n p = Processor(p_name, attributes=attributes)\n glb_idx.put(p.key(), p)\n else:\n p = p[0]\n\n attributes = {\n 'sphere': 'Technosphere' if iface.opposite_processor_type.lower() in [\"local\", \"external\"] else 'Biosphere',\n 'roegen_type': iface.roegen_type,\n 'orientation': \"Input\" if iface.orientation.lower() == \"output\" else \"Output\",\n 'opposite_processor_type': iface.processor.subsystem_type\n }\n\n # Create Interface (if it does not exist)\n if not p.factors_find(iface.taxon.name):\n f = Factor.create_and_append(name=iface.taxon.name,\n processor=p,\n in_processor_type=\n FactorInProcessorType(external=False,\n incoming=iface.orientation.lower() == \"output\"),\n attributes=attributes,\n taxon=iface.taxon)\n\n glb_idx.put(f.key(), f)\n\n # Create Flow Relationship\n if iface.orientation.lower() == \"output\":\n source = iface\n target = f\n else:\n source = f\n target = iface\n\n fr = FactorsRelationDirectedFlowObservation.create_and_append(\n source=source,\n target=target,\n observer=None)\n glb_idx.put(fr.key(), fr)\n\n\ndef obtain_parameters_dataset(global_parameters: List[Parameter], problem_statement: ProblemStatement):\n params_keys = []\n params_data = []\n for scenario_name, scenario_exp_params in problem_statement.scenarios.items(): # type: str, dict\n p = evaluate_parameters_for_scenario(global_parameters, scenario_exp_params)\n for k, v in p.items():\n params_keys.append((scenario_name, k))\n params_data.append(v)\n\n df = pd.DataFrame(params_data,\n index=pd.MultiIndex.from_tuples(params_keys, names=[\"Scenario\", \"Parameter\"]),\n columns=[\"Value\"])\n return get_dataset(df, \"params\", \"Parameter values per Scenario\")\n\n# #####################################################################################################################\n# >>>> INTERACTIVE SESSION <<<<\n# #####################################################################################################################\n\n\nclass CreateNew(Enum):\n CASE_STUDY = 1\n VERSION = 2\n NO = 3\n\n\nclass InteractiveSession:\n \"\"\" \n Main class for interaction with NIS\n The first thing would be to identify the user and create a GUID for the session which can be used by the web server\n to store and retrieve the interactive session state.\n \n It receives command_executors, modifying state accordingly\n If a reproducible session is opened, \n \"\"\"\n def __init__(self, session_factory):\n # Session factory with access to business logic database\n self._session_factory = session_factory\n\n # Interactive session ID\n self._guid = str(uuid.uuid4())\n\n # User identity, if given (can be an anonymous session)\n self._identity = None # type: Identity\n self._state = State() # To keep the state\n self._reproducible_session = None # type: ReproducibleSession\n\n def reset_state(self):\n \"\"\" Restart state \"\"\"\n self._state = State()\n # TODO self._recordable_session = None ??\n\n @property\n def state(self):\n return self._state\n\n @state.setter\n def state(self, state: State):\n self._state = state\n\n def get_sf(self):\n return self._session_factory\n\n def set_sf(self, session_factory):\n self._session_factory = session_factory\n if self._reproducible_session:\n self._reproducible_session.set_sf(session_factory)\n\n def open_db_session(self):\n return self._session_factory()\n\n def close_db_session(self):\n self._session_factory.remove()\n\n def quit(self):\n \"\"\"\n End interactive session\n :return: \n \"\"\"\n self.close_reproducible_session()\n self.close_db_session()\n\n # --------------------------------------------------------------------------------------------\n\n def identify(self, identity_information, testing=False):\n \"\"\"\n Given credentials of some type -identity_information-, link an interactive session to an identity.\n The credentials can vary from an OAuth2 Token to user+password.\n Depending on the type of credentials, invoke a type of \"identificator\" or other\n An interactive session without identification is allowed to perform a subset of available operations\n \n :param identity_information: \n :return: True if the identification was successful, False if not \n \"\"\"\n # TODO Check the credentials\n if isinstance(identity_information, dict):\n if \"user\" in identity_information and testing:\n # Check if the user is in the User's table\n session = self._session_factory()\n src = session.query(User).filter(User.name == identity_information[\"user\"]).first()\n # Check if the dataset exists. \"ETL\" it if not\n # ds = session.query(Dataset).\\\n # filter(Dataset.code == dataset).\\\n # join(Dataset.database).join(Database.data_source).\\\n # filter(DataSource.name == src_name).first()\n force_load(src)\n session.close()\n self._session_factory.remove()\n if src:\n self._identity = src.name\n if self._state:\n self._state.set(\"_identity\", self._identity)\n\n return src is not None\n elif \"token\" in identity_information:\n # TODO Validate against some Authentication service\n pass\n\n def get_identity_id(self):\n return self._identity\n\n def unidentify(self):\n # TODO The un-identification cannot be done in the following circumstances: any?\n self._identity = None\n if self._state:\n self._state.set(\"_identity\", self._identity)\n\n # --------------------------------------------------------------------------------------------\n # Reproducible sessions and commands INSIDE them\n # --------------------------------------------------------------------------------------------\n def open_reproducible_session(self,\n case_study_version_uuid: str,\n recover_previous_state=True,\n cr_new: CreateNew = CreateNew.NO,\n allow_saving=True):\n self._reproducible_session = ReproducibleSession(self)\n self._reproducible_session.open(self._session_factory, case_study_version_uuid, recover_previous_state, cr_new, allow_saving)\n\n def close_reproducible_session(self, issues=None, output=None, save=False, from_web_service=False, cs_uuid=None, cs_name=None):\n if self._reproducible_session:\n if save:\n # TODO Save issues AND (maybe) output\n self._reproducible_session.save(from_web_service, cs_uuid=cs_uuid, cs_name=cs_name)\n uuid_, v_uuid, cs_uuid = self._reproducible_session.close()\n self._reproducible_session = None\n return uuid_, v_uuid, cs_uuid\n else:\n return None, None, None\n\n def reproducible_session_opened(self):\n return self._reproducible_session is not None\n\n @property\n def reproducible_session(self):\n return self._reproducible_session\n\n # --------------------------------------------------------------\n\n def execute_executable_command(self, cmd: IExecutableCommand):\n return execute_command(self._state, cmd)\n\n def register_executable_command(self, cmd: IExecutableCommand):\n self._reproducible_session.register_executable_command(cmd)\n\n def register_andor_execute_command_generator1(self, c: CommandsContainer, register=True, execute=False, ignore_imports=False):\n \"\"\"\n Creates a generator parser, then it feeds the file type and the file\n The generator parser has to parse the file and to generate command_executors as a Python generator\n\n :param generator_type:\n :param file_type:\n :param file:\n :param register: If True, register the command in the ReproducibleSession\n :param execute: If True, execute the command in the ReproducibleSession\n :return:\n \"\"\"\n if not self._reproducible_session:\n raise Exception(\"In order to execute a command generator, a work session is needed\")\n if not register and not execute:\n raise Exception(\"More than zero of the parameters 'register' and 'execute' must be True\")\n\n if register:\n self._reproducible_session.register_persistable_command(c)\n\n if execute:\n c.execution_start = datetime.datetime.now()\n pass_case_study = self._reproducible_session._session.version.case_study is not None\n ret = self._reproducible_session.execute_command_generator(c, pass_case_study, ignore_imports)\n c.execution_end = datetime.datetime.now()\n return ret\n # Or\n # return execute_command_container(self._state, c)\n else:\n return None\n\n def register_andor_execute_command_generator(self, generator_type, file_type: str, file, register=True, execute=False, ignore_imports=False):\n \"\"\"\n Creates a generator parser, then it feeds the file type and the file\n The generator parser has to parse the file and to generate command_executors as a Python generator\n\n :param generator_type: \n :param file_type: \n :param file: \n :param register: If True, register the command in the ReproducibleSession\n :param execute: If True, execute the command in the ReproducibleSession\n :return: \n \"\"\"\n\n return self.register_andor_execute_command_generator1(\n CommandsContainer.create(generator_type, file_type, file),\n register,\n execute,\n ignore_imports\n )\n\n # --------------------------------------------------------------------------------------------\n\n def get_case_studies(self):\n \"\"\" Get a list of case studies READABLE by current identity (or public if anonymous) \"\"\"\n pass\n\n def get_case_study_versions(self, case_study: str):\n # TODO Check that the current user has READ access to the case study\n pass\n\n def get_case_study_version(self, case_study_version: str):\n # TODO Check that the current user has READ access to the case study\n pass\n\n def get_case_study_version_variables(self, case_study_version: str):\n \"\"\" A tree of variables, by type: processors, flows \"\"\"\n pass\n\n def get_case_study_version_variable(self, case_study_version: str, variable: str):\n pass\n\n def remove_case_study_version(self, case_study_version: str):\n pass\n\n def share_case_study(self, case_study: str, identities: List[str], permission: str):\n pass\n\n def remove_case_study_share(self, case_study: str, identities: List[str], permission: str):\n pass\n\n def get_case_study_permissions(self, case_study: str):\n pass\n\n def export_case_study_version(self, case_study_version: str):\n pass\n\n def import_case_study(self, file):\n pass\n\n# #####################################################################################################################\n# >>>> REPRODUCIBLE SESSION <<<<\n# #####################################################################################################################\n\n\nclass ReproducibleSession:\n def __init__(self, isess):\n # Containing InteractiveSession. Used to set State when a ReproducibleSession is opened and it overwrites State\n self._isess = isess # type: InteractiveSession\n self._identity = isess._identity\n self._sess_factory = None\n self._allow_saving = None\n self._session = None # type: CaseStudyVersionSession\n\n @property\n def ws_commands(self):\n return self._session.commands if self._session else None\n\n def open(self, session_factory, uuid_: str=None, recover_previous_state=True, cr_new:CreateNew=CreateNew.NO, allow_saving=True):\n \"\"\"\n Open a work session\n\n +--------+--------+---------+-----------------------------------------------------------------------------------+\n | UUID | cr_new | recover | Behavior |\n +--------+--------+---------+-----------------------------------------------------------------------------------+ \n | !=None | True | True | Create new CS or version (branch) from \"UUID\", clone WS, recover State, append WS |\n | !=None | True | False | Create new CS or version (branch) from \"UUID\", Zero State, first WS |\n | !=None | False | True | Recover State, append WS |\n | !=None | False | False | Zero State, append WS (overwrite type) |\n | ==None | - | - | New CS and version, Zero State, first WS |\n +--------+--------+---------+-----------------------------------------------------------------------------------+\n Use cases:\n * A new case study, from scratch. uuid_=None\n * A new case study, copied from another case study. uuid_=<something>, cr_new=CreateNew.CASE_STUDY, recover_previous_state=True\n * A new version of a case study\n - Copying previous version\n - Starting from scratch\n * Continue a case study version\n - But restart (from scratch)\n * Can be a Transient session\n\n :param uuid_: UUID of the case study or case study version. Can be None, for new case studies or for testing purposes.\n :param recover_previous_state: If an existing version is specified, it will recover its state after execution of all command_executors\n :param cr_new: If != CreateNew.NO, create either a case study or a new version. If == CreateNew.NO, append session to \"uuid\"\n :param allow_saving: If True, it will allow saving at the end (it will be optional). If False, trying to save will generate an Exception\n :return UUID of the case study version in use. If it is a new case study and it has not been saved, the value will be \"None\"\n \"\"\"\n\n # TODO Just register for now. But in the future it should control that there is no other \"allow_saving\" ReproducibleSession opened\n # TODO for the same Case Study Version. So it implies modifying some state in CaseStudyVersion to have the UUID\n # TODO of the active ReproducibleSession, even if it is not in the database. Register also the date of \"lock\", so the\n # TODO lock can be removed in case of \"hang\" of the locker ReproducibleSession\n self._allow_saving = allow_saving\n self._sess_factory = session_factory\n session = self._sess_factory()\n if uuid_:\n uuid_ = str(uuid_)\n # Find UUID. Is it a Case Study or a Case Study version?\n # If it is the former, look for the active version.\n cs = session.query(CaseStudy).filter(CaseStudy.uuid == uuid_).first()\n if not cs:\n vs = session.query(CaseStudyVersion).filter(CaseStudyVersion.uuid == uuid_).first()\n if not vs:\n ss = session.query(CaseStudyVersionSession).filter(CaseStudyVersionSession.uuid == uuid_).first()\n if not ss:\n raise Exception(\"Object '\"+uuid_+\"' not found, when opening a ReproducibleSession\")\n else:\n vs = ss.version\n cs = vs.case_study\n else:\n cs = vs.case_study\n else: # A case study, find the latest version (the version modified latest -by activity, newest ReproducibleSession-)\n max_date = None\n max_version = None\n for v in cs.versions:\n for s in v.sessions:\n if not max_date or s.open_instant > max_date:\n max_date = s.open_instant\n max_version = v\n vs = max_version\n cs = vs.case_study\n\n # List of active sessions\n # NOTE: instead of time ordering, the ID is used, assuming sessions with greater ID were created later\n lst = session.query(CaseStudyVersionSession). \\\n filter(CaseStudyVersionSession.version_id == vs.id). \\\n order_by(CaseStudyVersionSession.id). \\\n all()\n idx = 0\n for i, ws in enumerate(lst):\n if ws.restarts:\n idx = i\n lst = lst[idx:] # Cut the list, keep only active sessions\n\n if cr_new != CreateNew.NO: # Create either a case study or a case study version\n if cr_new == CreateNew.CASE_STUDY:\n cs = copy.copy(cs) # New Case Study: COPY CaseStudy\n else:\n force_load(cs) # New Case Study Version: LOAD CaseStudy (then version it)\n vs2 = copy.copy(vs) # COPY CaseStudyVersion\n vs2.case_study = cs # Assign case study to the new version\n if recover_previous_state: # If the new version keeps previous state, copy it also\n vs2.state = vs.state # Copy state\n vs2.state_version = vs.state_version\n for ws in lst: # COPY active ReproducibleSessions\n ws2 = copy.copy(ws)\n ws2.version = vs2\n for c in ws.commands: # COPY commands\n c2 = copy.copy(c)\n c2.session = ws2\n vs = vs2\n else:\n # Load into memory\n if len(lst) == 1:\n ws = lst[0]\n force_load(ws)\n force_load(vs)\n force_load(cs)\n\n if recover_previous_state:\n # Load state if it is persisted (if not EXECUTE, POTENTIALLY VERY SLOW)\n if vs.state:\n # Deserialize\n self._isess._state = deserialize_state(vs.state, vs.state_version)\n else:\n self._isess._state = State() # Zero State, execute all commands in sequence\n for ws in lst:\n for c in ws.commands:\n execute_command_container(self._isess._state, c)\n if cr_new == CreateNew.VERSION: # TODO Check if this works in all possible circumstances (combine the parameters of the function)\n recover_previous_state = False\n else:\n self._isess._state = State()\n\n else: # New Case Study AND new Case Study Version\n cs = CaseStudy()\n vs = CaseStudyVersion()\n vs.creation_instant = datetime.datetime.utcnow()\n vs.case_study = cs\n\n # Detach Case Study and Case Study Version\n if cs in session:\n session.expunge(cs)\n if vs in session:\n session.expunge(vs)\n # Create the Case Study Version Session\n usr = session.query(User).filter(User.name == self._identity).first()\n if usr:\n force_load(usr)\n else:\n if allow_saving:\n raise Exception(\"A user is required to register which user is authoring a case study\")\n # TODO !!!!NEW CODE, ADDED TO SUPPORT NEEDED FUNCTIONALITY. NEEDS BETTER CODING!!!!\n restart = not recover_previous_state if uuid_ else True\n if not restart:\n self._session = ws\n else:\n self._session = CaseStudyVersionSession()\n self._session.version = vs\n self._session.who = usr\n self._session.restarts = True\n # If the Version existed, define \"restarts\" according to parameter \"recover_previous_state\"\n # ElseIf it is the first Session -> RESTARTS=True\n\n session.close()\n # session.expunge_all()\n self._sess_factory.remove()\n\n def update_current_version_state(self, lst_cmds):\n \"\"\" Designed to work using the REST interface. TEST in direct use. \"\"\"\n # Version\n # v = self._session.version\n # Serialize state\n st = serialize_state(self._isess._state)\n # v.state = st\n # Open DB session\n session = self._sess_factory()\n # Load version and change its state\n v = session.query(CaseStudyVersion).get(self._session.version_id)\n v.state = st\n session.add(v)\n for c in lst_cmds:\n c2 = session.query(CommandsContainer).get(c.id)\n c2.execution_start = c.execution_start\n c2.execution_end = c.execution_end\n session.add(c2)\n session.commit()\n self._sess_factory.remove()\n\n def save(self, from_web_service=False, cs_uuid=None, cs_name=None):\n if not self._allow_saving:\n raise Exception(\"The ReproducibleSession was opened disallowing saving. Please close it and reopen it with the proper value\")\n # Serialize state\n st = serialize_state(self._isess._state)\n self._session.version.state = st\n self._session.state = st\n ws = self._session\n\n # Open DB session\n session = self._sess_factory()\n # Change the case study\n if cs_uuid:\n # Load case study\n cs = session.query(CaseStudy).filter(CaseStudy.uuid == cs_uuid).first()\n if cs:\n ws.version.case_study = cs\n else:\n raise Exception(\"The case study UUID '\"+cs_uuid+\"' was not found\")\n # Append commands, self._session, the version and the case_study\n if not from_web_service:\n for c in self._session.commands:\n session.add(c)\n session.add(ws)\n session.add(ws.version)\n session.add(ws.version.case_study)\n else:\n ws.who = session.merge(ws.who)\n cs_id = ws.version.case_study.id\n vs_id = ws.version.id\n if cs_id and not vs_id:\n ws.version.case_study = None\n if vs_id:\n ws.version = None\n\n if cs_id:\n cs = session.query(CaseStudy).get(cs_id)\n else:\n cs = ws.version.case_study\n session.add(cs)\n\n if vs_id:\n vs = session.query(CaseStudyVersion).get(vs_id)\n ws.version = vs\n else:\n ws.version.case_study = cs\n vs = ws.version\n session.add(vs)\n\n ws.close_instant = datetime.datetime.utcnow()\n session.add(ws)\n for c in self._session.commands:\n session.add(c)\n if cs_name:\n ws.version.name = cs_name\n\n # If it was called from the REST API, assure that the version has a creation date (it should not happen)\n if from_web_service and not vs.creation_instant:\n logging.debug(\"Late setup of version creation date\")\n vs.creation_instant = datetime.datetime.utcnow()\n\n # Commit DB session\n session.commit()\n force_load(self._session)\n self._sess_factory.remove()\n\n def register_persistable_command(self, cmd: CommandsContainer):\n cmd.session = self._session\n\n def create_and_register_persistable_command(self, generator_type, file_type, file):\n \"\"\"\n Generates command_executors from an input stream (string or file)\n There must be a factory to parse stream \n :param generator_type: \n :param file_type: \n :param file: It can be a stream or a URL or a file name\n \"\"\"\n c = CommandsContainer.create(generator_type, file_type, file)\n self.register_persistable_command(c)\n return c\n\n def execute_command_generator(self, cmd: CommandsContainer, pass_case_study=False, ignore_imports=False):\n if pass_case_study: # CaseStudy can be modified by Metadata command, pass a reference to it\n self._isess._state.set(\"_case_study\", self._session.version.case_study)\n self._isess._state.set(\"_case_study_version\", self._session.version)\n\n ret = execute_command_container(self._isess._state, cmd, ignore_imports)\n\n if pass_case_study:\n self._isess._state.set(\"_case_study\", None)\n self._isess._state.set(\"_case_study_version\", None)\n\n return ret\n\n def register_executable_command(self, command: IExecutableCommand):\n c = executable_command_to_commands_container(command)\n c.session = self._session\n\n def set_sf(self, session_factory):\n self._sess_factory = session_factory\n\n @property\n def commands(self):\n return self._session.commands\n\n @property\n def case_study(self):\n return self._session.version.case_study\n\n def close(self) -> tuple:\n if not self._session:\n raise Exception(\"The CaseStudyVersionSession is not opened\")\n id3 = self._session.uuid, self._session.version.uuid, self._session.version.case_study.uuid\n self._session = None\n self._allow_saving = None\n return id3\n\n\ndef execute_file_return_issues(file_name, generator_type):\n \"\"\"\n Execution of files in the context of TESTS\n\n :param file_name:\n :param generator_type:\n :return:\n \"\"\"\n if generator_type == \"spreadsheet\":\n content_type = \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n read_type = \"rb\"\n elif generator_type == \"native\":\n content_type = \"application/json\"\n read_type = \"r\"\n\n prepare_and_reset_database_for_tests()\n isess = InteractiveSession(DBSession)\n isess.identify({\"user\": \"test_user\"}, testing=True) # Pass just user name.\n isess.open_reproducible_session(case_study_version_uuid=None,\n recover_previous_state=None,\n cr_new=CreateNew.CASE_STUDY,\n allow_saving=False)\n\n # Add system-level entities from JSON definition in \"default_cmds\"\n ret = isess.register_andor_execute_command_generator(\"json\", \"application/json\", nexinfosys.default_cmds, False, True)\n\n # Execute current file\n with open(file_name, read_type) as f1:\n buffer = f1.read()\n\n issues, output = isess.register_andor_execute_command_generator(generator_type, content_type, buffer, False, True)\n\n for idx, issue in enumerate(issues):\n logging.debug(f\"Issue {idx+1}/{len(issues)} = {issue}\")\n\n logging.debug(f\"Output = {output}\")\n\n isess.close_reproducible_session()\n isess.close_db_session()\n return isess, issues\n\n\ndef execute_file(file_name, generator_type):\n \"\"\"\n Execution of files in the context of TESTS\n\n :param file_name:\n :param generator_type:\n :return:\n \"\"\"\n return execute_file_return_issues(file_name, generator_type)[0] # Return just \"isession\"\n\n\nif __name__ == '__main__':\n import jsonpickle\n with open(\"/home/rnebot/pickled_state\", \"r\") as f:\n s = f.read()\n o = jsonpickle.decode(s)\n # Submit Worksheet as New Case Study\n isess = InteractiveSession()\n isess.quit()\n\n"
] | [
[
"pandas.MultiIndex.from_tuples"
]
] |
RPrenger/NeMo | [
"e8912ca6e3321347272a6a7da18e052812fb2062"
] | [
"nemo/collections/nlp/modules/common/transformer/transformer_decoders.py"
] | [
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nfrom dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nfrom omegaconf.omegaconf import MISSING\n\nfrom nemo.collections.common.parts import form_attention_mask\nfrom nemo.collections.nlp.modules.common.transformer.transformer_modules import MultiHeadAttention, PositionWiseFF\nfrom nemo.core.classes import NeuralModule\n\n__all__ = [\"TransformerDecoder\"]\n\n\nclass TransformerDecoderBlock(NeuralModule):\n \"\"\"\n Building block of Transformer decoder.\n\n Args:\n hidden_size: size of the embeddings in the model, also known as d_model\n inner_size: number of neurons in the intermediate part of feed-forward\n net, usually is (4-8 x hidden_size) in the papers\n num_attention_heads: number of heads in multi-head attention\n attn_score_dropout: probability of dropout applied to attention scores\n attn_layer_dropout: probability of dropout applied to the output of the\n attention layers, but before layer normalization\n ffn_dropout: probability of dropout applied to FFN output\n hidden_act: activation function used between two linear layers in FFN\n \"\"\"\n\n def __init__(\n self,\n hidden_size: int,\n inner_size: int,\n num_attention_heads: int = 1,\n attn_score_dropout: float = 0.0,\n attn_layer_dropout: float = 0.0,\n ffn_dropout: float = 0.0,\n hidden_act: str = \"relu\",\n pre_ln: bool = False,\n ):\n super().__init__()\n self.pre_ln = pre_ln\n self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=1e-5)\n self.first_sub_layer = MultiHeadAttention(\n hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout\n )\n self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=1e-5)\n self.second_sub_layer = MultiHeadAttention(\n hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout\n )\n self.layer_norm_3 = nn.LayerNorm(hidden_size, eps=1e-5)\n self.third_sub_layer = PositionWiseFF(hidden_size, inner_size, ffn_dropout, hidden_act)\n\n # TODO: add Neural Types\n def forward(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):\n\n # Pre-LN: LN -> Self-Attn -> Drop -> Residual -> LN -> Cross-Attn -> Drop -> Residual -> LN -> FFN\n # Post-LN: Self-Attn -> Drop -> Residual -> LN -> Cross-Attn -> Drop -> Residual -> LN -> FFN -> Residual -> LN\n if self.pre_ln:\n # Share same LN params for query, key (self-attn)\n decoder_query = self.layer_norm_1(decoder_query)\n decoder_keys = self.layer_norm_1(decoder_keys)\n\n self_attn_output = self.first_sub_layer(decoder_query, decoder_keys, decoder_keys, decoder_mask)\n self_attn_output += decoder_query\n\n self_attn_output = self.layer_norm_2(self_attn_output) if self.pre_ln else self.layer_norm_1(self_attn_output)\n\n enc_dec_attn_output = self.second_sub_layer(self_attn_output, encoder_states, encoder_states, encoder_mask)\n enc_dec_attn_output += self_attn_output\n\n enc_dec_attn_output = (\n self.layer_norm_3(enc_dec_attn_output) if self.pre_ln else self.layer_norm_2(enc_dec_attn_output)\n )\n\n output_states = self.third_sub_layer(enc_dec_attn_output)\n\n if not self.pre_ln:\n output_states = self.layer_norm_3(output_states + enc_dec_attn_output)\n\n return output_states\n\n\nclass TransformerDecoder(nn.Module):\n def __init__(\n self,\n num_layers: int,\n hidden_size: int,\n inner_size: int,\n num_attention_heads: int = 1,\n attn_score_dropout: float = 0.0,\n attn_layer_dropout: float = 0.0,\n ffn_dropout: float = 0.0,\n hidden_act: str = \"relu\",\n pre_ln: bool = False,\n ):\n super().__init__()\n\n layer = TransformerDecoderBlock(\n hidden_size,\n inner_size,\n num_attention_heads,\n attn_score_dropout,\n attn_layer_dropout,\n ffn_dropout,\n hidden_act,\n pre_ln,\n )\n self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])\n\n def _get_memory_states(self, decoder_states, decoder_mems_list=None, i=0):\n if decoder_mems_list is not None:\n memory_states = torch.cat((decoder_mems_list[i], decoder_states), dim=1)\n else:\n memory_states = decoder_states\n return memory_states\n\n def forward(\n self, decoder_states, decoder_mask, encoder_states, encoder_mask, decoder_mems_list=None, return_mems=False\n ):\n \"\"\"\n Args:\n decoder_states: output of the embedding layer (B x L_dec x H)\n decoder_mask: decoder inputs mask (B x L_dec)\n encoder_states: output of the encoder (B x L_enc x H)\n encoder_mask: encoder inputs mask (B x L_enc)\n decoder_mems_list: list of the cached decoder hidden states\n for fast autoregressive generation which will be used instead\n of decoder_states as keys and values if not None\n return_mems: bool, whether to return outputs of all decoder layers\n or the last layer only\n \"\"\"\n decoder_attn_mask = form_attention_mask(decoder_mask, diagonal=0)\n encoder_attn_mask = form_attention_mask(encoder_mask)\n memory_states = self._get_memory_states(decoder_states, decoder_mems_list, 0)\n cached_mems_list = [memory_states]\n\n for i, layer in enumerate(self.layers):\n decoder_states = layer(decoder_states, decoder_attn_mask, memory_states, encoder_states, encoder_attn_mask)\n memory_states = self._get_memory_states(decoder_states, decoder_mems_list, i + 1)\n cached_mems_list.append(memory_states)\n\n if return_mems:\n return cached_mems_list\n else:\n return cached_mems_list[-1]\n"
] | [
[
"torch.cat",
"torch.nn.LayerNorm"
]
] |
rkeb/rgn | [
"2cd76ce2b0fcfbb376946bb98997db916609c659"
] | [
"model/model.py"
] | [
"\"\"\" Recurrent geometric network model for protein structure prediction.\n\n In general, there is an implicit ordering of tensor dimensions that is respected throughout. It is:\n\n NUM_STEPS, BATCH_SIZE, NUM_DIHEDRALS, NUM_DIMENSIONS\n\n All tensors are assumed to have this orientation unless otherwise labeled.\n\n\"\"\"\n\n__author__ = \"Mohammed AlQuraishi\"\n__copyright__ = \"Copyright 2018, Harvard Medical School\"\n__license__ = \"MIT\"\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nimport rnn_cell_extended\nfrom tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn\nfrom tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import init_ops\nfrom geom_ops import *\nfrom net_ops import *\nfrom utils import *\nfrom glob import glob\nfrom copy import deepcopy\nfrom itertools import zip_longest\n\n# Public interface\n\nSCOPE = 'RGN'\nDUMMY_LOSS = -1.\nPREFETCH_BUFFER = 10\nLOSS_SCALING_FACTOR = 0.01 # this is to convert recorded losses to angstroms\n\n\nclass RGNModel(object):\n \"\"\"Recurrent geometric network model\"\"\"\n\n # static variable to control creation of new objects and starting the model\n _is_started = False\n _num_models = 0\n\n def __init__(self, mode, config):\n \"\"\" Sets up type of instance object and invokes TF graph creation function. \"\"\"\n\n # make sure model hasn't been started, otherwise bail.\n if not RGNModel._is_started:\n # instance variables\n self.mode = mode\n self.config = deepcopy(config)\n\n # set up and expose appropriate methods based on mode (for initial state)\n if mode == 'training':\n self.start = self._start\n else:\n self.evaluate = self._evaluate\n self.predict = self._predict\n\n # process config for derived properties\n io = self.config.io\n arch = self.config.architecture\n reg = self.config.regularization\n curr = self.config.curriculum\n opt = self.config.optimization\n init = self.config.initialization\n\n # test for correct curriculum configuration\n if curr['mode'] is None and curr['behavior'] is not None:\n raise RuntimeError('Curriculum mode must be set when curriculum behavior is set.')\n elif curr['mode'] is not None and curr['behavior'] is None:\n raise RuntimeError('Curriculum behavior must be set when curriculum mode is set.')\n\n # model name\n if io['name'] is None:\n io['name'] = 'model_' + str(RGNModel._num_models)\n RGNModel._num_models = RGNModel._num_models + 1\n\n # alphabet-related\n arch['alphabet'] = np.loadtxt(io['alphabet_file'], delimiter = ',')[:, 6:] if io['alphabet_file'] is not None else None\n if arch['alphabet'] is not None: arch['alphabet_size'] = len(arch['alphabet']) # set alphabet size if implicit\n arch['single_or_no_alphabet'] = type(arch['alphabet_size']) is not list # having multiple alphabets is isomorphic to not reusing alphabet\n arch['is_alphabetized'] = 'alphabet' in arch['tertiary_output']\n\n # angularization\n arch['is_angularized'] = 'angular' in arch['tertiary_output']\n\n # optimization\n if opt['optimizer'] == 'adadelta':\n opt.update({'rho': opt['decay']})\n\n # initialization\n if arch['higher_order_layers']:\n for key in ['recurrent_init']:\n if type(init[key]) is not list: init[key] = [init[key]] * len(arch['recurrent_layer_size'])\n\n if arch['recurrent_nonlinear_out_proj_size'] is not None:\n for key in ['recurrent_nonlinear_out_proj_init']:\n if type(init[key]) is not list: init[key] = [init[key]] * len(arch['recurrent_nonlinear_out_proj_size'])\n \n # regularization\n for key in ['recurrent_input_keep_probability', \n 'recurrent_output_keep_probability', \n 'recurrent_keep_probability',\n 'recurrent_state_zonein_probability',\n 'recurrent_memory_zonein_probability',\n 'alphabet_keep_probability',\n 'alphabet_normalization']:\n if type(reg[key]) is not list: reg[key] = [reg[key]] * len(arch['recurrent_layer_size'])\n\n # create graph\n self._create_graph(mode, self.config)\n\n else:\n raise RuntimeError('Model already started; cannot create new objects.')\n\n def _create_graph(self, mode, config):\n \"\"\" Creates TensorFlow computation graph \n\n Creates a different model depending on whether mode is set to 'training' or 'evaluation'.\n The semantics are such that the head (default 'training' mode) model is the one\n required for starting, training, and checkpointing. Additionally the user may create any \n number of 'evaluation' models that depend on the head model, but supplement it with \n additional data sets (and different model semantics (e.g. no dropout)) for the evaluation \n and logging of their performance. However a head model is always required, and it is the \n only one that exposes the core methods for starting and training.\n\n Note that the head model creates all variables, even ones it doesn't use, because it is \n the one with the reuse=None semantics. Ops however are specific to each model type and\n so some ops are missing from the training model and vice-versa.\n\n Almost all graph construction is done in this function, which relies on a number of \n private methods to do the actual construction. Methods internal to this class are ad hoc \n and thus not meant for general use--general methods are placed in separate *_ops python \n modules. Some parts of graph construction, namely summary ops, are done in the start \n method, to ensure that all models have been created.\n\n There are two types of internal (private, prefaced with _) variables stored in each\n object. One are ops collections, like training_ops, evaluation_ops, etc. These are lists \n of ops that are run when the similarly named object method is called. As the graph is \n built up, ops are added to these lists. The second type of variables are various nodes\n that are like TF methods, e.g. the initializer, saver, etc, which are stored in the\n object and are accessed by various methods when necessary.\n \"\"\"\n\n # set up appropriate op collections based on mode (for initial state)\n if mode == 'training':\n self._training_ops = training_ops = {} # collection of ops to be run at each step of training\n self._diagnostic_ops = diagnostic_ops = {} # collection of ops for diagnostics like weight norms and curriculum quantiles\n else:\n self._evaluation_ops = evaluation_ops = {} # collection of ops for evaluation of losses\n self._last_evaluation_ops = last_evaluation_ops = {} # collection of ops for the last evaluation in a multi-invocation evaluation\n self._prediction_ops = prediction_ops = {} # collection of ops for prediction of structures\n\n # set variable scoping, op scoping, and place on appropriate device\n with tf.variable_scope(SCOPE, reuse=(mode == 'evaluation')) as scope, \\\n tf.name_scope(SCOPE + '/' + config.io['name'] + '/'), \\\n tf.device(_device_function_constructor(**{k: config.computing[k] for k in ('functions_on_devices', 'default_device')})):\n\n # set graph seed\n if mode == 'training': tf.set_random_seed(config.initialization['graph_seed'])\n\n # Create curriculum state and tracking variables if needed.\n if config.curriculum['mode'] is not None:\n # Variable to hold current curriculum iteration\n curriculum_step = tf.get_variable(name='curriculum_step', shape=[], trainable=False, \n initializer=tf.constant_initializer(config.curriculum['base']))\n if mode == 'training': diagnostic_ops.update({'curriculum_step': curriculum_step})\n\n # Set up data ports\n if mode == 'training': self._coordinator = tf.train.Coordinator()\n if config.curriculum['mode'] == 'length':\n max_length = tf.cast(tf.reduce_min([curriculum_step, config.optimization['num_steps']]), tf.int32)\n else:\n max_length = config.optimization['num_steps']\n dataflow_config = merge_dicts(config.io, config.initialization, config.optimization, config.queueing)\n ids, primaries, evolutionaries, secondaries, tertiaries, masks, num_stepss = _dataflow(dataflow_config, max_length)\n\n # Set up inputs\n inputs = _inputs(merge_dicts(config.architecture, config.initialization), primaries, evolutionaries)\n\n # Compute dRMSD weights (this masks out meaningless (longer than sequence) pairwise distances and incorporates curriculum weights)\n weights_config = merge_dicts(config.optimization, config.curriculum, config.loss, config.io)\n weights, flat_curriculum_weights = _weights(weights_config, masks, curriculum_step if config.curriculum['mode'] == 'loss' else None)\n if mode == 'training' and config.curriculum['mode'] == 'loss': diagnostic_ops.update({'flat_curriculum_weights': flat_curriculum_weights})\n\n # create alphabet if needed and if it will be shared between layers, otherwise set to None so that _dihedrals takes care of it\n alphabet_config = merge_dicts(config.architecture, config.initialization)\n if alphabet_config['is_alphabetized'] and alphabet_config['single_or_no_alphabet']:\n alphabet = _alphabet(mode, alphabet_config)\n if mode == 'training' and config.io['log_alphabet']: diagnostic_ops.update({'alphabet': alphabet})\n else:\n alphabet = None\n\n # Create recurrent layer(s) that translate primary sequences into internal representation\n recurrence_config = merge_dicts(config.initialization, config.architecture, config.regularization, config.optimization, \n config.computing, config.io)\n recurrent_outputs, recurrent_states = _higher_recurrence(mode, recurrence_config, inputs, num_stepss, alphabet=alphabet)\n\n # Tertiary structure generation\n if config.loss['tertiary_weight'] > 0:\n # Convert internal representation to (thru some number of possible ways) dihedral angles\n dihedrals_config = merge_dicts(config.initialization, config.optimization, config.architecture, config.regularization, config.io)\n dihedrals_config.update({k: dihedrals_config[k][-1] for k in ['alphabet_keep_probability',\n 'alphabet_normalization']})\n if not dihedrals_config['single_or_no_alphabet']: dihedrals_config.update({'alphabet_size': dihedrals_config['alphabet_size'][-1]})\n dihedrals = _dihedrals(mode, dihedrals_config, recurrent_outputs, alphabet=alphabet)\n\n # Convert dihedrals into full 3D structures and compute dRMSDs\n coordinates = _coordinates(merge_dicts(config.computing, config.optimization, config.queueing), dihedrals)\n drmsds = _drmsds(merge_dicts(config.optimization, config.loss, config.io), coordinates, tertiaries, weights)\n\n if mode == 'evaluation': \n prediction_ops.update({'ids': ids, 'coordinates': coordinates, 'num_stepss': num_stepss, 'recurrent_states': recurrent_states})\n\n # Losses\n if config.loss['include']:\n filters = {grp: id_filter(ids, grp) for grp in config.io['evaluation_sub_groups']} if mode == 'evaluation' else {}\n filters.update({'all': tf.tile([True], tf.shape(ids))})\n\n for group_id, group_filter in filters.items():\n with tf.variable_scope(group_id):\n # Tertiary loss\n effective_tertiary_loss = 0.\n if config.loss['tertiary_weight'] > 0:\n if config.queueing['num_evaluation_invocations'] > 1 and mode == 'training':\n raise RuntimeError('Cannot use multiple invocations with training mode.')\n else:\n # Compute tertiary loss quotient parts by reducing dRMSDs based on normalization behavior\n tertiary_loss_numerator, tertiary_loss_denominator = _reduce_loss_quotient(merge_dicts(config.loss, config.io, config.optimization), \n drmsds, masks, group_filter, \n name_prefix='tertiary_loss')\n\n # Handles multiple invocations and gracefully degrades for single invocations.\n # Variables are created below _per_ evaluation model, which is a deviation from my general design\n # the scope of those variables is the evaluation model's, _not_ the training model's as usual\n tertiary_loss, min_loss_achieved, min_loss_op, update_accu_op, reduce_accu_op = _accumulate_loss(\n merge_dicts(config.io, config.queueing),\n tertiary_loss_numerator, tertiary_loss_denominator,\n name_prefix='tertiary_loss')\n\n if mode == 'evaluation':\n evaluation_ops.update( {'update_accumulator_' + group_id + '_op': update_accu_op})\n last_evaluation_ops.update({'tertiary_loss_' + group_id : tertiary_loss * LOSS_SCALING_FACTOR, \\\n 'reduce_accumulator_' + group_id + '_op': reduce_accu_op, \\\n 'min_tertiary_loss_achieved_' + group_id : min_loss_achieved * LOSS_SCALING_FACTOR, \\\n 'min_tertiary_loss_' + group_id + '_op': min_loss_op})\n\n if config.io['log_model_summaries']: tf.add_to_collection(config.io['name'] + '_tertiary_losses', tertiary_loss)\n effective_tertiary_loss = config.loss['tertiary_weight'] * tertiary_loss\n\n # Final loss and related housekeeping\n loss = tf.identity(effective_tertiary_loss, name='loss')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # batch_norm related\n if update_ops: loss = control_flow_ops.with_dependencies(tf.tuple(update_ops), loss)\n if config.io['log_model_summaries']: tf.add_to_collection(config.io['name'] + '_losses', loss)\n if group_id == config.curriculum['loss_history_subgroup']: curriculum_loss = loss\n\n # Curriculum loss history; not always used but design is much cleaner if always created.\n curriculum_loss_history = tf.get_variable(\n initializer=tf.constant_initializer([DUMMY_LOSS] * config.curriculum['change_num_iterations']), \n shape=[config.curriculum['change_num_iterations']], trainable=False, name='curriculum_loss_history')\n if mode == 'evaluation' and config.curriculum['update_loss_history']:\n update_curriculum_history_op = _history(config.io, curriculum_loss, curriculum_loss_history)\n last_evaluation_ops.update({'update_curriculum_history_op': update_curriculum_history_op})\n\n # Training\n if mode == 'training':\n # get grads, training ops\n self._global_step, minimize_op, grads_and_vars_dict = _training(config.optimization, loss)\n self._grads_and_vars_length = len(grads_and_vars_dict) / 2\n\n # update relevant op dicts\n # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n # if update_ops: training_ops.update({'update_ops': tf.tuple(update_ops)})\n training_ops.update({'minimize_op': minimize_op, 'global_step': self._global_step, 'ids': ids})\n diagnostic_ops.update(grads_and_vars_dict)\n\n # Curriculum\n if mode == 'training' and config.curriculum['behavior'] in ['fixed_rate', 'loss_threshold', 'loss_change']:\n curriculum_update_op = _curriculum(config.curriculum, curriculum_step, curriculum_loss_history, [minimize_op])\n training_ops.update({'curriculum_update_op': curriculum_update_op})\n\n def _train(self, session):\n \"\"\" Performs one iteration of training and, if applicable, advances the curriculum. \"\"\"\n\n training_dict = ops_to_dict(session, self._training_ops)\n\n return training_dict['global_step'], training_dict['ids']\n\n def _evaluate(self, session, pretty=True):\n \"\"\" Evaluates loss(es) and returns dicts with the relevant loss(es). \"\"\"\n if RGNModel._is_started:\n # evaluate\n num_invocations = self.config.queueing['num_evaluation_invocations']\n for invocation in range(num_invocations):\n if invocation < num_invocations - 1:\n evaluation_dict = ops_to_dict(session, self._evaluation_ops)\n else:\n evaluation_dict = ops_to_dict(session, merge_dicts(self._evaluation_ops, self._last_evaluation_ops))\n\n # write event summaries to disk\n if self.config.io['log_model_summaries']:\n self._summary_writer.add_summary(evaluation_dict['merged_summaries_op'], global_step=evaluation_dict['global_step'])\n\n # remove non-user facing ops\n if pretty: [evaluation_dict.pop(k) for k in evaluation_dict.keys() if 'op' in k]\n\n return evaluation_dict\n\n else:\n raise RuntimeError('Model has not been started or has already finished.')\n\n def _predict(self, session):\n \"\"\" Predict 3D structures. \"\"\"\n\n if RGNModel._is_started:\n # evaluate prediction dict\n prediction_dict = ops_to_dict(session, self._prediction_ops)\n\n # process tertiary sequences\n if prediction_dict.has_key('coordinates'): prediction_dict['coordinates'] = np.transpose(prediction_dict['coordinates'], (1, 2, 0))\n\n # generate return dict\n predictions = {}\n for id_, num_steps, tertiary, recurrent_states in izip_longest(*[prediction_dict.get(key, []) \n for key in ['ids', 'num_stepss', 'coordinates', 'recurrent_states']]):\n prediction = {}\n\n if tertiary is not None:\n last_atom = (num_steps - self.config.io['num_edge_residues']) * NUM_DIHEDRALS\n prediction.update({'tertiary': tertiary[:, :last_atom]})\n\n prediction.update({'recurrent_states': recurrent_states})\n\n predictions.update({id_: prediction})\n\n return predictions\n\n else:\n raise RuntimeError('Model has not been started or has already finished.')\n\n def _diagnose(self, session, pretty=True):\n \"\"\" Compute and return diagnostic measurements like weight norms and curriculum quantiles. \"\"\"\n\n diagnostic_dict = ops_to_dict(session, self._diagnostic_ops)\n\n # write event summaries to disk\n if self.config.io['log_model_summaries']:\n for op in ['merged_summaries_op', 'base_merged_summaries_op']:\n self._summary_writer.add_summary(diagnostic_dict[op], global_step=diagnostic_dict['global_step'])\n\n # compute max/min of vars and grads\n vars_ = [diagnostic_dict['v' + str(i)] for i in range(self._grads_and_vars_length)]\n grads = [diagnostic_dict['g' + str(i)] for i in range(self._grads_and_vars_length)]\n diagnostic_dict.update({'min_weight': np.min([np.min(var) for var in vars_]),\n 'max_weight': np.max([np.max(var) for var in vars_]),\n 'min_grad': np.min([np.min(grad) for grad in grads]),\n 'max_grad': np.max([np.max(grad) for grad in grads])})\n\n # compute curriculum quantiles if applicable.\n if self.config.curriculum['mode'] == 'loss':\n quantiles = cum_quantile_positions(diagnostic_dict['flat_curriculum_weights'])\n diagnostic_dict.update({'curriculum_quantiles': quantiles})\n elif self.config.curriculum['mode'] == 'length':\n diagnostic_dict.update({'curriculum_quantiles': float('nan')})\n\n # remove non-user facing ops and tensors\n if pretty:\n diagnostic_dict.pop('flat_curriculum_weights', None)\n for i in range(self._grads_and_vars_length):\n diagnostic_dict.pop('v' + str(i))\n diagnostic_dict.pop('g' + str(i))\n\n return diagnostic_dict\n\n def _start(self, evaluation_models, session=None, restore_if_checkpointed=True):\n \"\"\" Initializes model from scratch or loads state from disk.\n Must be run once (and only once) before model is used. \"\"\"\n\n if not RGNModel._is_started:\n # Checkpointing. Must be done here after all models have been instantiated, because evaluation models may introduce additional variables\n self._saver = tf.train.Saver(max_to_keep=self.config.io['max_checkpoints'], \n keep_checkpoint_every_n_hours=self.config.io['checkpoint_every_n_hours'])\n\n # variable tracking and summarization. it has to be done here after all models have been instantiated\n model_names = set([model.config.io['name'] for model in evaluation_models] + [self.config.io['name']])\n if self.config.io['log_model_summaries']:\n # add histogram and scalar summaries losses\n for model_name in model_names:\n for coll in ['tertiary_losses', 'losses']:\n for node in tf.get_collection(model_name + '_' + coll):\n tf.summary.scalar(node.name, node, collections=[model_name + '_' + tf.GraphKeys.SUMMARIES])\n if self.config.io['detailed_logs']:\n # additional detailed summaries for losses\n for model_name in model_names:\n for coll in ['scess', 'matches', 'drmsdss', tf.GraphKeys.ACTIVATIONS]:\n for node_or_named_output in tf.get_collection(model_name + '_' + coll): \n if type(node_or_named_output) is tf.Tensor:\n tf.summary.histogram(node_or_named_output.name, node_or_named_output, \n collections=[model_name + '_' + tf.GraphKeys.SUMMARIES])\n elif type(node_or_named_output) is layers.utils.NamedOutputs:\n tf.summary.histogram(node_or_named_output[1].name, node_or_named_output[1],\n collections=[model_name + '_' + tf.GraphKeys.SUMMARIES])\n\n # summaries for trainable variables and their activations\n for var in tf.trainable_variables(): tf.summary.histogram(var.name, var)\n layers.summarize_activations() \n\n # add housekeeping training ops that merge and write summaries\n self._summary_writer = tf.summary.FileWriter(self.config.io['logs_directory'])\n self._diagnostic_ops.update({'global_step': self._global_step,\n 'base_merged_summaries_op': tf.summary.merge_all(), # leftovers not covered by model-specific 'summaries'\n 'merged_summaries_op': tf.summary.merge_all(self.config.io['name'] + '_' + tf.GraphKeys.SUMMARIES)})\n\n # ditto for evaluation models\n for model in evaluation_models:\n if model.mode == 'evaluation':\n model._summary_writer = self._summary_writer\n model._last_evaluation_ops.update({\n 'global_step': self._global_step,\n 'merged_summaries_op': tf.summary.merge_all(model.config.io['name'] + '_' + tf.GraphKeys.SUMMARIES)})\n\n # start session with appropriate device settings if no Session is passed\n if self.config.computing['fill_gpu']:\n gpu_fraction = None\n else:\n gpu_fraction = self.config.computing['gpu_fraction']\n\n if session is None:\n session = tf.Session(config=tf.ConfigProto(\n allow_soft_placement=False,\n inter_op_parallelism_threads=self.config.computing['num_cpus'],\n intra_op_parallelism_threads=self.config.computing['num_cpus'],\n gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction,\n allow_growth=self.config.computing['allow_gpu_growth'])))\n\n # retrieve latest checkpoint, if any\n latest_checkpoint = tf.train.latest_checkpoint(self.config.io['checkpoints_directory'])\n\n # restore latest checkpoint if found, initialize from scratch otherwise.\n if not restore_if_checkpointed or latest_checkpoint is None:\n tf.global_variables_initializer().run(session=session)\n tf.local_variables_initializer().run(session=session)\n else:\n self._saver.restore(session, latest_checkpoint)\n tf.local_variables_initializer().run(session=session)\n\n # start coordinator and queueing threads\n self._threads = tf.train.start_queue_runners(sess=session, coord=self._coordinator)\n RGNModel._is_started = True\n\n # expose new methods and hide old ones\n self.train = self._train\n self.diagnose = self._diagnose\n self.save = self._save\n self.is_done = self._is_done\n self.current_step = self._current_step\n self.finish = self._finish\n del self.start\n\n return session\n\n else:\n raise RuntimeError('Model already started.')\n\n def _save(self, session):\n \"\"\" Checkpoints current model. \"\"\"\n\n checkpoints_dir = self.config.io['checkpoints_directory']\n if not os.path.exists(checkpoints_dir): os.makedirs(checkpoints_dir)\n return self._saver.save(session, checkpoints_dir, global_step=self._global_step)\n\n def _is_done(self):\n \"\"\" Returns True if training is finished, False otherwise. \"\"\"\n \n return self._coordinator.should_stop()\n\n def _current_step(self, session):\n \"\"\" Returns the current global step. \"\"\"\n\n return session.run(self._global_step)\n\n def _finish(self, session, save=True, close_session=True, reset_graph=True):\n \"\"\" Instructs the model to shutdown. \"\"\"\n\n self._coordinator.request_stop()\n self._coordinator.join(self._threads)\n \n if save: self.save(session)\n if self.config.io['log_model_summaries']: self._summary_writer.close()\n if close_session: session.close()\n if reset_graph: tf.reset_default_graph()\n \n RGNModel._num_models = 0\n RGNModel._is_started = False\n\n del self.train, self.diagnose, self.save, self.is_done, self.current_step, self.finish\n\n### Private functions\n# These functions are meant strictly for internal use by RGNModel, and are \n# generally quite ad hoc. For TF-based ones, they do not carry out proper scoping \n# of their internals, as what they produce is meant to be dropped in the main TF \n# graph. They are often stateful, producing TF variables that are used by other \n# parts of RGNModel. However their behavior is still transparent in the sense\n# that they're only passed parameters, not actual TF nodes or ops, and return\n# everything that needs to be acted upon by RGNModel. So they don't modify\n# the state of anything that's passed to them.\n\ndef _device_function_constructor(functions_on_devices={}, default_device=''):\n \"\"\" Returns a device placement function to insure that each operation is placed on the most optimal device. \"\"\"\n\n def device_function(op):\n # note that one can't depend on ordering of items in dicts due to their indeterminancy\n for device, funcs in functions_on_devices.items():\n if any(((func in op.name) or any(func in node.name for node in op.inputs)) for func in funcs):\n return device\n else:\n return default_device\n\n return device_function\n\ndef _dataflow(config, max_length):\n \"\"\" Creates TF queues and nodes for inputting and batching data. \"\"\"\n\n # files\n if config['data_files'] is not None:\n files = config['data_files']\n else:\n files = glob(config['data_files_glob'])\n\n # files queue\n file_queue = tf.train.string_input_producer(\n files,\n num_epochs=config['num_epochs'],\n shuffle=config['shuffle'],\n seed=config['queue_seed'],\n capacity=config['file_queue_capacity'],\n name='file_queue')\n\n # read instance\n inputs = read_protein(file_queue, max_length, config['num_edge_residues'], config['num_evo_entries'])\n\n # randomization\n if config['shuffle']: # based on https://github.com/tensorflow/tensorflow/issues/5147#issuecomment-271086206\n dtypes = list(map(lambda x: x.dtype, inputs))\n shapes = list(map(lambda x: x.get_shape(), inputs))\n randomizer_queue = tf.RandomShuffleQueue(capacity=config['batch_queue_capacity'], min_after_dequeue=config['min_after_dequeue'], \n dtypes=dtypes, seed=config['queue_seed'], name='randomization_queue')\n randomizer_enqueue_op = randomizer_queue.enqueue(inputs)\n randomizer_qr = tf.train.QueueRunner(randomizer_queue, [randomizer_enqueue_op])\n tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, randomizer_qr)\n inputs = randomizer_queue.dequeue()\n for tensor, shape in zip(inputs, shapes): tensor.set_shape(shape)\n num_steps, keep = inputs[-2:]\n\n # bucketing\n if config['bucket_boundaries'] is not None:\n batch_fun = tf.contrib.training.bucket_by_sequence_length\n batch_kwargs = {'input_length': num_steps,\n 'bucket_boundaries': config['bucket_boundaries'], \n 'capacity': config['batch_queue_capacity'] / config['batch_size']}\n sel_slice = 1\n else:\n batch_fun = tf.train.maybe_batch\n batch_kwargs = {'capacity': config['batch_queue_capacity']}\n sel_slice = slice(len(inputs) - 1)\n\n # batching\n inputs = batch_fun(tensors=list(inputs)[:-1], keep_input=keep, dynamic_pad=True, batch_size=config['batch_size'], \n name='batching_queue', **batch_kwargs)\n ids, primaries_batch_major, evolutionaries_batch_major, secondaries_batch_major, tertiaries_batch_major, masks_batch_major, num_stepss = \\\n inputs[sel_slice]\n\n # transpose to time_step major\n primaries = tf.transpose(primaries_batch_major, perm=(1, 0, 2), name='primaries') \n # primary sequences, i.e. one-hot sequences of amino acids.\n # [NUM_STEPS, BATCH_SIZE, NUM_AAS]\n\n evolutionaries = tf.transpose(evolutionaries_batch_major, perm=(1, 0, 2), name='evolutionaries') \n # evolutionary sequences, i.e. multi-dimensional evolutionary profiles of amino acid propensities.\n # [NUM_STEPS, BATCH_SIZE, NUM_EVO_ENTRIES]\n\n secondaries = tf.transpose(secondaries_batch_major, perm=(1, 0), name='secondaries') \n # secondary sequences, i.e. sequences of DSSP classes.\n # [NUM_STEPS, BATCH_SIZE]\n\n tertiaries = tf.transpose(tertiaries_batch_major, perm=(1, 0, 2), name='tertiaries')\n # tertiary sequences, i.e. sequences of 3D coordinates.\n # [(NUM_STEPS - NUM_EDGE_RESIDUES) x NUM_DIHEDRALS, BATCH_SIZE, NUM_DIMENSIONS]\n\n masks = tf.transpose(masks_batch_major, perm=(1, 2, 0), name='masks')\n # mask matrix for each datum that masks meaningless distances.\n # [NUM_STEPS - NUM_EDGE_RESIDUES, NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE]\n\n # assign names to the nameless\n ids = tf.identity(ids, name='ids')\n num_stepss = tf.identity(num_stepss, name='num_stepss')\n\n return ids, primaries, evolutionaries, secondaries, tertiaries, masks, num_stepss\n\ndef _inputs(config, primaries, evolutionaries):\n \"\"\" Returns final concatenated input for use in recurrent layer. \"\"\"\n\n inputs_list = ([primaries] if config['include_primary'] else []) + \\\n ([evolutionaries * config['evolutionary_multiplier']] if config['include_evolutionary'] else [])\n\n if inputs_list is not []:\n inputs = tf.concat(inputs_list, 2, name='inputs')\n # [NUM_STEPS, BATCH_SIZE, NUM_AAS or NUM_EVO_ENTRIES or NUM_AAS + NUM_EVO_ENTRIES]\n else:\n raise RuntimeError('Either primaries or evolutionaries (or both) must be used as inputs.')\n\n return inputs\n\ndef _weights(config, masks, curriculum_step=None):\n \"\"\" Returns dRMSD weights that mask meaningless (missing or longer than \n sequence residues) pairwise distances and incorporate the state of \n the curriculum to differentially weigh pairwise distances based on \n their proximity. \"\"\"\n\n if config['atoms'] == 'c_alpha':\n if config['mode'] != 'loss':\n # no loss-based curriculum, create fixed weighting matrix that weighs all distances equally. \n # minus one factor is there because we ignore self-distances.\n flat_curriculum_weights = np.ones(config['num_steps'] - config['num_edge_residues'] - 1, dtype='float32')\n\n elif config['mode'] == 'loss' and curriculum_step is not None:\n # create appropriate weights based on curriculum parameters and current step.\n flat_curriculum_weights = curriculum_weights(base=curriculum_step, \n slope=config['slope'], \n max_seq_length=config['num_steps'] - config['num_edge_residues'])\n else:\n raise RuntimeError('Curriculum step tensor not supplied.')\n\n # weighting matrix for entire batch that accounts for curriculum weighting.\n unnormalized_weights = weighting_matrix(flat_curriculum_weights, name='unnormalized_weights')\n # [NUM_STEPS - NUM_EDGE_RESIDUES, NUM_STEPS - NUM_EDGE_RESIDUES]\n\n # create final weights by multiplying with masks and normalizing.\n mask_length = tf.shape(masks)[0]\n unnormalized_masked_weights = masks * unnormalized_weights[:mask_length, :mask_length, tf.newaxis]\n masked_weights = tf.div(unnormalized_masked_weights, \n tf.reduce_sum(unnormalized_masked_weights, axis=[0, 1]), \n name='weights')\n\n return masked_weights, flat_curriculum_weights\n\n else:\n raise NotImplementedError('Model does not currently support anything other than C alpha atoms for the loss function.')\n\ndef _higher_recurrence(mode, config, inputs, num_stepss, alphabet=None):\n \"\"\" Higher-order recurrence that creates multiple layers, possibly with interleaving dihedrals \"\"\"\n\n # prep\n is_training = (mode == 'training')\n initial_inputs = inputs\n\n # check if it's a simple recurrence that is just a lower-order recurrence (include simple multilayers) or a higher-order recurrence.\n # higher-order recurrences always concatenate both directions before passing them on to the next layer, in addition to allowing\n # additional information to be incorporated in the passed activations, including dihedrals. The final output that's returned\n # by this function is always just the recurrent outputs, not the other information, which is only used in intermediate layers.\n if config['higher_order_layers']:\n # higher-order recurrence that concatenates both directions and possibly additional outputs before sending to the next layer.\n \n # prep\n layer_inputs = initial_inputs\n layers_recurrent_outputs = []\n layers_recurrent_states = []\n num_layers = len(config['recurrent_layer_size'])\n residual_n = config['residual_connections_every_n_layers']\n residual_shift = config['first_residual_connection_from_nth_layer'] - 1\n\n # iteratively construct each layer\n for layer_idx in range(num_layers):\n with tf.variable_scope('layer' + str(layer_idx)):\n # prepare layer-specific config\n layer_config = deepcopy(config)\n layer_config.update({k: [config[k][layer_idx]] for k in ['recurrent_layer_size',\n 'recurrent_input_keep_probability',\n 'recurrent_output_keep_probability',\n 'recurrent_keep_probability',\n 'recurrent_state_zonein_probability',\n 'recurrent_memory_zonein_probability']})\n layer_config.update({k: config[k][layer_idx] for k in ['alphabet_keep_probability',\n 'alphabet_normalization',\n 'recurrent_init']})\n layer_config.update({k: (config[k][layer_idx] if not config['single_or_no_alphabet'] else config[k]) for k in ['alphabet_size']})\n\n # core lower-level recurrence\n layer_recurrent_outputs, layer_recurrent_states = _recurrence(mode, layer_config, layer_inputs, num_stepss)\n\n # residual connections (only for recurrent outputs; other outputs are maintained but not wired in a residual manner)\n # all recurrent layer sizes must be the same\n if residual_n is not None:\n if (residual_n >= 1) and ((layer_idx - residual_shift) % residual_n == 0) and (int(layer_idx) >= residual_n + residual_shift): \n layer_recurrent_outputs = layer_recurrent_outputs + layers_recurrent_outputs[-residual_n]\n print('residually wired layer ' + str(layer_idx - residual_n + 1) + ' to layer ' + str(layer_idx + 1))\n\n # add to list of recurrent layers' outputs (needed for residual connection and some skip connections)\n layers_recurrent_outputs.append(layer_recurrent_outputs)\n layers_recurrent_states.append(layer_recurrent_states)\n\n # intermediate recurrences, only created if there's at least one layer on top of the current one\n if layer_idx != num_layers - 1: # not last layer\n layer_outputs = []\n\n # dihedrals\n if config['include_dihedrals_between_layers']:\n layer_dihedrals = _dihedrals(mode, layer_config, layer_recurrent_outputs, alphabet=alphabet)\n layer_outputs.append(layer_dihedrals)\n\n # skip connections from all previous layers (these will not be connected to the final linear output layer)\n if config['all_to_recurrent_skip_connections']:\n layer_outputs.append(layer_inputs)\n\n # skip connections from initial inputs only (these will not be connected to the final linear output layer)\n if config['input_to_recurrent_skip_connections'] and not config['all_to_recurrent_skip_connections']:\n layer_outputs.append(initial_inputs)\n\n # recurrent state\n if config['include_recurrent_outputs_between_layers']:\n layer_outputs.append(layer_recurrent_outputs)\n\n # feed outputs as inputs to the next layer up\n layer_inputs = tf.concat(layer_outputs, 2)\n\n # if recurrent to output skip connections are enabled, return all recurrent layer outputs, otherwise return only last one.\n # always return all states.\n if config['recurrent_to_output_skip_connections']:\n return tf.concat(layers_recurrent_outputs, 2), tf.concat(layers_recurrent_states, 1)\n else:\n return layer_recurrent_outputs, tf.concat(layers_recurrent_states, 1)\n else:\n # simple recurrence, including multiple layers that use TF's builtin functionality, call lower-level recurrence function\n return _recurrence(mode, config, initial_inputs, num_stepss)\n\ndef _recurrence(mode, config, inputs, num_stepss):\n \"\"\" Recurrent layer for transforming inputs (primary sequences) into an internal representation. \"\"\"\n \n is_training = (mode == 'training')\n reverse = lambda seqs: tf.reverse_sequence(seqs, num_stepss, seq_axis=0, batch_axis=1) # convenience function for sequence reversal\n\n # create recurrent initialization dict\n if config['recurrent_init'] != None:\n recurrent_init = dict_to_inits(config['recurrent_init'], config['recurrent_seed'])\n else:\n for case in switch(config['recurrent_unit']):\n if case('LNLSTM'):\n recurrent_init = {'base': None, 'bias': None}\n elif case('CudnnLSTM') or case('CudnnGRU'):\n recurrent_init = {'base': dict_to_init({}), 'bias': None}\n else:\n recurrent_init = {'base': None, 'bias': tf.zeros_initializer()}\n\n # fused mode vs. explicit dynamic rollout mode\n if 'Cudnn' in config['recurrent_unit']:\n # cuDNN-based fusion; assumes all (lower-order) layers are of the same size (first layer size) and all input dropouts are the same \n # (first layer one). Does not support peephole connections, and only supports input dropout as a form of regularization.\n layer_size = config['recurrent_layer_size'][0]\n num_layers = len(config['recurrent_layer_size'])\n input_keep_prob = config['recurrent_input_keep_probability'][0]\n\n for case in switch(config['recurrent_unit']):\n if case('CudnnLSTM'):\n cell = cudnn_rnn.CudnnLSTM\n elif case('CudnnGRU'):\n cell = cudnn_rnn.CudnnGRU\n\n if is_training and input_keep_prob < 1: # this layer is needed because cuDNN dropout only applies to inputs between layers, not the first inputs\n inputs = tf.nn.dropout(inputs, input_keep_prob, seed=config['dropout_seed'])\n\n if num_layers > 1: # strictly speaking this isn't needed, but it allows multiple cuDNN-based models to run on the same GPU when num_layers = 1\n dropout_kwargs = {'dropout': 1 - input_keep_prob, 'seed': config['dropout_seed']}\n else:\n dropout_kwargs = {}\n\n outputs = []\n states = []\n scopes = ['fw', 'bw'] if config['bidirectional'] else ['fw']\n for scope in scopes:\n with tf.variable_scope(scope):\n rnn = cell(num_layers=num_layers, num_units=layer_size, direction=cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION, \n kernel_initializer=recurrent_init['base'], bias_initializer=recurrent_init['bias'], **dropout_kwargs)\n inputs_directed = inputs if scope == 'fw' else reverse(inputs)\n outputs_directed, (_, states_directed) = rnn(inputs_directed, training=is_training)\n outputs_directed = outputs_directed if scope == 'fw' else reverse(outputs_directed)\n outputs.append(outputs_directed)\n states.append(states_directed)\n outputs = tf.concat(outputs, 2)\n states = tf.concat(states, 2)[0]\n \n else:\n # TF-based dynamic rollout\n if config['bidirectional']:\n outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=_recurrent_cell(mode, config, recurrent_init, 'fw'), \n cell_bw=_recurrent_cell(mode, config, recurrent_init, 'bw'), \n inputs=inputs, time_major=True, sequence_length=tf.to_int64(num_stepss),\n dtype=tf.float32, swap_memory=True, parallel_iterations=config['num_recurrent_parallel_iters'])\n outputs = tf.concat(outputs, 2)\n states = tf.concat(states, 2)\n # [NUM_STEPS, BATCH_SIZE, 2 x RECURRENT_LAYER_SIZE]\n # outputs of recurrent layer over all time steps. \n else:\n outputs, states = tf.nn.dynamic_rnn(cell=_recurrent_cell(mode, config, recurrent_init),\n inputs=inputs, time_major=True, sequence_length=num_stepss, \n dtype=tf.float32, swap_memory=True, parallel_iterations=config['num_recurrent_parallel_iters'])\n # [NUM_STEPS, BATCH_SIZE, RECURRENT_LAYER_SIZE]\n # outputs of recurrent layer over all time steps.\n\n # add newly created variables to respective collections\n if is_training:\n for v in tf.trainable_variables():\n if 'rnn' in v.name and ('cell/kernel' in v.name): tf.add_to_collection(tf.GraphKeys.WEIGHTS, v)\n if 'rnn' in v.name and ('cell/bias' in v.name): tf.add_to_collection(tf.GraphKeys.BIASES, v)\n\n return outputs, states\n\ndef _recurrent_cell(mode, config, recurrent_init, name=''):\n \"\"\" create recurrent cell(s) used in RNN \"\"\"\n\n is_training = (mode == 'training')\n\n # lower-order multilayer\n cells = []\n for layer_idx, (layer_size, input_keep_prob, output_keep_prob, keep_prob, hidden_state_keep_prob, memory_cell_keep_prob) \\\n in enumerate(zip(\n config['recurrent_layer_size'], \n config['recurrent_input_keep_probability'], \n config['recurrent_output_keep_probability'],\n config['recurrent_keep_probability'],\n config['recurrent_state_zonein_probability'], \n config['recurrent_memory_zonein_probability'])):\n \n # set context\n with tf.variable_scope('sublayer' + str(layer_idx) + (name if name is '' else '_' + name), initializer=recurrent_init['base']):\n\n # create core cell\n for case in switch(config['recurrent_unit']):\n if case('Basic'):\n cell = tf.nn.rnn_cell.BasicRNNCell(num_units=layer_size, reuse=(not is_training))\n elif case('GRU'):\n cell = tf.nn.rnn_cell.GRUCell(num_units=layer_size, reuse=(not is_training))\n elif case('LSTM'):\n cell = tf.nn.rnn_cell.LSTMCell(num_units=layer_size, use_peepholes=config['recurrent_peepholes'],\n forget_bias=config['recurrent_forget_bias'], cell_clip=config['recurrent_threshold'], \n initializer=recurrent_init['base'], reuse=(not is_training))\n elif case('LNLSTM'):\n cell = tf.contrib.rnn.LayerNormBasicLSTMCell(num_units=layer_size, forget_bias=config['recurrent_forget_bias'],\n layer_norm=config['recurrent_layer_normalization'],\n dropout_keep_prob=keep_prob, reuse=(not is_training))\n elif case('LSTMBlock'):\n cell = tf.contrib.rnn.LSTMBlockCell(num_units=layer_size, forget_bias=config['recurrent_forget_bias'], \n use_peephole=config['recurrent_peepholes'])\n\n # wrap cell with zoneout\n if hidden_state_keep_prob < 1 or memory_cell_keep_prob < 1:\n cell = rnn_cell_extended.ZoneoutWrapper(cell=cell, is_training=is_training, seed=config['zoneout_seed'],\n hidden_state_keep_prob=hidden_state_keep_prob, memory_cell_keep_prob=memory_cell_keep_prob)\n\n # if not just evaluation, then wrap cell in dropout\n if is_training and (input_keep_prob < 1 or output_keep_prob < 1 or keep_prob < 1):\n cell = tf.nn.rnn_cell.DropoutWrapper(cell=cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob, \n state_keep_prob=keep_prob, variational_recurrent=config['recurrent_variational_dropout'], \n seed=config['dropout_seed'])\n\n # add to collection\n cells.append(cell)\n\n # stack multiple cells if needed\n if len(cells) > 1:\n cell = tf.nn.rnn_cell.MultiRNNCell(cells)\n else:\n cell = cells[0]\n\n return cell\n\ndef _alphabet(mode, config):\n \"\"\" Creates alphabet for alphabetized dihedral prediction. \"\"\"\n\n # prepare initializer\n if config['alphabet'] is not None:\n alphabet_initializer = tf.constant_initializer(config['alphabet']) # user-defined alphabet\n else:\n alphabet_initializer = dict_to_init(config['alphabet_init'], config['alphabet_seed']) # random initialization\n\n # alphabet variable, possibly trainable\n alphabet = tf.get_variable(name='alphabet',\n shape=[config['alphabet_size'], NUM_DIHEDRALS],\n initializer=alphabet_initializer,\n trainable=config['alphabet_trainable']) # [OUTPUT_SIZE, NUM_DIHEDRALS]\n if mode == 'training' and config['alphabet_trainable']: \n tf.add_to_collection(tf.GraphKeys.WEIGHTS, alphabet) # add to WEIGHTS collection if trainable\n\n return alphabet\n\ndef _dihedrals(mode, config, inputs, alphabet=None):\n \"\"\" Converts internal representation resultant from RNN output activations\n into dihedral angles based on one of many methods. \n\n The optional argument alphabet does not determine whether an alphabet \n should be created or not--that's controlled by config. Instead the\n option allows the reuse of an existing alphabet. \"\"\"\n \n is_training = (mode == 'training')\n\n # output size for linear transform layer (OUTPUT_SIZE)\n output_size = config['alphabet_size'] if config['is_alphabetized'] else NUM_DIHEDRALS\n \n # set up non-linear dihedrals layer(s) if requested\n nonlinear_out_proj_size = config['recurrent_nonlinear_out_proj_size']\n if nonlinear_out_proj_size is not None:\n if config['recurrent_nonlinear_out_proj_normalization'] == 'batch_normalization':\n nonlinear_out_proj_normalization_fn = layers.batch_norm\n nonlinear_out_proj_normalization_fn_opts = {'center': True, 'scale': True, 'decay': 0.9, 'epsilon': 0.001, \n 'is_training': tf.constant(is_training), 'scope': 'nonlinear_out_proj_batch_norm', \n 'outputs_collections': config['name'] + '_' + tf.GraphKeys.ACTIVATIONS}\n elif config['recurrent_nonlinear_out_proj_normalization'] == 'layer_normalization':\n nonlinear_out_proj_normalization_fn = layers.layer_norm\n nonlinear_out_proj_normalization_fn_opts = {'center': True, 'scale': True, 'scope': 'nonlinear_out_proj_layer_norm', \n 'outputs_collections': config['name'] + '_' + tf.GraphKeys.ACTIVATIONS}\n else:\n nonlinear_out_proj_normalization_fn = None\n nonlinear_out_proj_normalization_fn_opts = None\n\n nonlinear_out_proj_fn = {'tanh': tf.tanh, 'relu': tf.nn.relu}[config['recurrent_nonlinear_out_proj_function']]\n\n outputs = inputs\n for idx, (layer_size, init) in enumerate(zip(nonlinear_out_proj_size, config['recurrent_nonlinear_out_proj_init'])):\n recurrent_nonlinear_out_proj_init = dict_to_inits(init, config['recurrent_nonlinear_out_proj_seed'])\n outputs = layers.fully_connected(outputs, layer_size, scope='nonlinear_dihedrals_' + str(idx), \n activation_fn=nonlinear_out_proj_fn, \n normalizer_fn=nonlinear_out_proj_normalization_fn, \n normalizer_params=nonlinear_out_proj_normalization_fn_opts,\n weights_initializer=recurrent_nonlinear_out_proj_init['base'], \n biases_initializer=recurrent_nonlinear_out_proj_init['bias'], \n outputs_collections=config['name'] + '_' + tf.GraphKeys.ACTIVATIONS, \n variables_collections={'weights': [tf.GraphKeys.WEIGHTS], 'biases': [tf.GraphKeys.BIASES]})\n dihedrals_inputs = outputs\n # [NUM_STEPS, BATCH_SIZE, NONLINEAR_DIHEDRALS_LAYER_SIZE]\n else:\n dihedrals_inputs = inputs\n # [NUM_STEPS, BATCH_SIZE, N x RECURRENT_LAYER_SIZE] where N is 1 or 2 depending on bidirectionality\n\n # set up linear transform variables\n recurrent_out_proj_init = dict_to_inits(config['recurrent_out_proj_init'], config['recurrent_out_proj_seed'])\n linear = layers.fully_connected(dihedrals_inputs, output_size, activation_fn=None, scope='linear_dihedrals',\n weights_initializer=recurrent_out_proj_init['base'], biases_initializer=recurrent_out_proj_init['bias'],\n variables_collections={'weights': [tf.GraphKeys.WEIGHTS], 'biases': [tf.GraphKeys.BIASES]}, \n outputs_collections=config['name'] + '_' + tf.GraphKeys.ACTIVATIONS)\n # [NUM_STEPS, BATCH_SIZE, OUTPUT_SIZE]\n\n # reduce to dihedrals, through an alphabet if specified\n if config['is_alphabetized']:\n # create alphabet if one is not already there\n if alphabet is None: alphabet = _alphabet(mode, config)\n\n # angularize alphabet if specified\n if config['is_angularized']: alphabet = angularize(alphabet)\n\n # batch or layer normalize linear inputs to softmax (stats are computed over all batches and timesteps, effectively flattened)\n if config['alphabet_normalization'] == 'batch_normalization':\n linear = layers.batch_norm(linear, center=True, scale=True, decay=0.999, epsilon=0.001, is_training=tf.constant(is_training), \n scope='alphabet_batch_norm', outputs_collections=config['name'] + '_' + tf.GraphKeys.ACTIVATIONS)\n elif config['alphabet_normalization'] == 'layer_normalization':\n linear = layers.layer_norm(linear, center=True, scale=True,\n scope='alphabet_layer_norm', outputs_collections=config['name'] + '_' + tf.GraphKeys.ACTIVATIONS)\n\n # softmax for linear to create angle mixtures\n flattened_linear = tf.reshape(linear, [-1, output_size]) # [NUM_STEPS x BATCH_SIZE, OUTPUT_SIZE]\n probs = tf.nn.softmax(flattened_linear / config['alphabet_temperature'], name='probs') # [NUM_STEPS x BATCH_SIZE, OUTPUT_SIZE] \n tf.add_to_collection(config['name'] + '_' + tf.GraphKeys.ACTIVATIONS, probs)\n\n # dropout alphabet if specified. I don't renormalize since final angle is invariant wrt overall scale.\n if mode == 'training' and config['alphabet_keep_probability'] < 1:\n probs = tf.nn.dropout(probs, config['alphabet_keep_probability'], seed=config['dropout_seed'], name='dropped_probs')\n\n # form final dihedrals based on mixture of alphabetized angles\n num_steps = tf.shape(linear)[0]\n batch_size = linear.get_shape().as_list()[1]\n flattened_dihedrals = reduce_mean_angle(probs, alphabet) # [NUM_STEPS x BATCH_SIZE, NUM_DIHEDRALS]\n dihedrals = tf.reshape(flattened_dihedrals, [num_steps, batch_size, NUM_DIHEDRALS]) # [NUM_STEPS, BATCH_SIZE, NUM_DIHEDRALS]\n else:\n # just linear\n dihedrals = linear\n\n # angularize if specified\n if config['is_angularized']: dihedrals = angularize(dihedrals)\n # [NUM_STEPS, BATCH_SIZE, NUM_DIHEDRALS] (for both cases)\n\n # add angle shift\n dihedrals = tf.add(dihedrals, tf.constant(config['angle_shift'], dtype=tf.float32, name='angle_shift'), name='dihedrals')\n\n return dihedrals\n\ndef _coordinates(config, dihedrals):\n \"\"\" Converts dihedrals into full 3D structures. \"\"\"\n\n # converts dihedrals to points ready for reconstruction.\n points = dihedral_to_point(dihedrals) # [NUM_STEPS x NUM_DIHEDRALS, BATCH_SIZE, NUM_DIMENSIONS]\n \n # converts points to final 3D coordinates.\n coordinates = point_to_coordinate(points, num_fragments=config['num_reconstruction_fragments'], \n parallel_iterations=config['num_reconstruction_parallel_iters']) \n # [NUM_STEPS x NUM_DIHEDRALS, BATCH_SIZE, NUM_DIMENSIONS]\n\n return coordinates\n\ndef _drmsds(config, coordinates, targets, weights):\n \"\"\" Computes reduced weighted dRMSD loss (as specified by weights) \n between predicted tertiary structures and targets. \"\"\"\n\n # lose end residues if desired\n if config['num_edge_residues'] > 0:\n coordinates = coordinates[:-(config['num_edge_residues'] * NUM_DIHEDRALS)]\n\n # if only c_alpha atoms are requested then subsample\n if config['atoms'] == 'c_alpha': # starts at 1 because c_alpha atoms are the second atoms\n coordinates = coordinates[1::NUM_DIHEDRALS] # [NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE, NUM_DIMENSIONS]\n targets = targets[1::NUM_DIHEDRALS] # [NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE, NUM_DIMENSIONS]\n \n # compute per structure dRMSDs\n drmsds = drmsd(coordinates, targets, weights, name='drmsds') # [BATCH_SIZE]\n\n # add to relevant collections for summaries, etc.\n if config['log_model_summaries']: tf.add_to_collection(config['name'] + '_drmsdss', drmsds)\n\n return drmsds\n\ndef _reduce_loss_quotient(config, losses, masks, group_filter, name_prefix=''):\n \"\"\" Reduces loss according to normalization order. \"\"\"\n\n normalization = config['tertiary_normalization']\n num_edge_residues = config['num_edge_residues']\n max_seq_length = config['num_steps']\n\n losses_filtered = tf.boolean_mask(losses, group_filter) # will give problematic results if all entries are removed\n\n for case in switch(normalization):\n if case('zeroth'):\n loss_factors = tf.ones_like(losses_filtered)\n elif case ('first'):\n loss_factors = tf.boolean_mask(effective_steps(masks, num_edge_residues), group_filter)\n fixed_denominator_factor = float(max_seq_length - num_edge_residues)\n elif case ('second'):\n eff_num_stepss = tf.boolean_mask(effective_steps(masks, num_edge_residues), group_filter)\n loss_factors = (tf.square(eff_num_stepss) - eff_num_stepss) / 2.0\n fixed_denominator_factor = float(max_seq_length - num_edge_residues)\n fixed_denominator_factor = ((fixed_denominator_factor ** 2) - fixed_denominator_factor) / 2.0\n\n numerator = tf.reduce_sum(loss_factors * losses_filtered, name=name_prefix + '_numerator')\n\n if config['batch_dependent_normalization'] or normalization == 'zeroth':\n denominator = tf.reduce_sum(loss_factors, name=name_prefix + '_denominator')\n else:\n denominator = tf.multiply(tf.cast(tf.size(loss_factors), tf.float32), fixed_denominator_factor, name=name_prefix + '_denominator')\n\n return numerator, denominator\n\ndef _accumulate_loss(config, numerator, denominator, name_prefix=''):\n \"\"\" Constructs ops to accumulate and reduce loss and maintain a memory of lowest loss achieved \"\"\"\n\n if config['num_evaluation_invocations'] == 1:\n # return simple loss\n accumulated_loss = tf.divide(numerator, denominator, name=name_prefix)\n update_op = reduce_op = tf.no_op()\n else:\n # create accumulator variables. note that tf.Variable uses name_scope (not variable_scope) for naming, which is what's desired in this instance\n numerator_accumulator = tf.Variable(initial_value=0., trainable=False, name=name_prefix + '_numerator_accumulator')\n denominator_accumulator = tf.Variable(initial_value=0., trainable=False, name=name_prefix + '_denominator_accumulator')\n\n # accumulate\n with tf.control_dependencies([numerator, denominator, numerator_accumulator, denominator_accumulator]):\n accumulate_numerator = tf.assign_add(numerator_accumulator, numerator)\n accumulate_denominator = tf.assign_add(denominator_accumulator, denominator)\n update_op = tf.group(accumulate_numerator, accumulate_denominator, name=name_prefix + '_accumulate_op')\n\n # divide to get final quotient\n with tf.control_dependencies([update_op]):\n accumulated_loss = tf.divide(numerator_accumulator, denominator_accumulator, name=name_prefix + '_accumulated')\n\n # zero accumulators\n with tf.control_dependencies([accumulated_loss]):\n zero_numerator = tf.assign(numerator_accumulator, 0.)\n zero_denominator = tf.assign(denominator_accumulator, 0.)\n reduce_op = tf.group(zero_numerator, zero_denominator, name=name_prefix + '_reduce_op')\n\n min_loss_achieved = tf.Variable(initial_value=float('inf'), trainable=False, name='min_' + name_prefix + '_achieved')\n min_loss_op = tf.assign(min_loss_achieved, tf.reduce_min([min_loss_achieved, accumulated_loss]), name='min_' + name_prefix + '_achieved_op')\n with tf.control_dependencies([min_loss_op]):\n min_loss_achieved = tf.identity(min_loss_achieved)\n\n return accumulated_loss, min_loss_achieved, min_loss_op, update_op, reduce_op\n\ndef _training(config, loss):\n \"\"\" Creates loss optimizer and returns minimization op. \"\"\"\n\n # helper function\n optimizer_args = lambda o: o.__init__.__code__.co_varnames[:o.__init__.__code__.co_argcount]\n\n # select appropriate optimization function and construct arg list based on config\n optimizer_func = {'steepest': tf.train.GradientDescentOptimizer, # doesn't support momentum, unlike autograd\n 'rmsprop': tf.train.RMSPropOptimizer, \n 'adam': tf.train.AdamOptimizer, \n 'momentum': tf.train.MomentumOptimizer,\n 'adagrad': tf.train.AdagradOptimizer,\n 'adadelta': tf.train.AdadeltaOptimizer}[config['optimizer']]\n optimizer_params = config.keys() & set(optimizer_args(optimizer_func))\n optimizer_params_and_values = {param: config[param] for param in optimizer_params}\n optimizer = optimizer_func(**optimizer_params_and_values)\n\n # obtain and process gradients\n grads_and_vars = optimizer.compute_gradients(loss)\n threshold = config['gradient_threshold']\n\n if threshold != float('inf'):\n for case in switch(config['rescale_behavior']):\n if case('norm_rescaling'):\n grads, _ = tf.clip_by_global_norm([g for g, _ in grads_and_vars], threshold)\n vars_ = [v for _, v in grads_and_vars]\n grads_and_vars = zip(grads, vars_)\n elif case('hard_clipping'):\n grads_and_vars = [(tf.clip_by_value(g, -threshold, threshold), v) for g, v in grads_and_vars]\n\n # apply gradients and return stepping op\n global_step = tf.get_variable(initializer=tf.constant_initializer(0), shape=[], trainable=False, dtype=tf.int32, name='global_step')\n minimize_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n # dict useful for diagnostics\n grads_and_vars_dict = {}\n grads_and_vars_dict.update({('g' + str(i)): g for i, (g, _) in enumerate(grads_and_vars)})\n grads_and_vars_dict.update({('v' + str(i)): v for i, (_, v) in enumerate(grads_and_vars)})\n\n return global_step, minimize_op, grads_and_vars_dict\n\ndef _history(config, loss, loss_history=None, scaling_factor=LOSS_SCALING_FACTOR):\n \"\"\" Creates op for loss history updating. \"\"\"\n\n # op for shifting history, i.e. adding new loss, dropping oldest one\n #new_history = tf.concat([loss_history[1:], tf.expand_dims(loss * scaling_factor, 0)], 0)\n new_history = tf.concat([loss_history[1:], [loss * scaling_factor]], 0)\n with tf.control_dependencies([new_history]):\n update_op = tf.assign(loss_history, new_history, name='update_curriculum_history_op')\n \n return update_op\n\ndef _curriculum(config, step, loss_history, dependency_ops):\n \"\"\" Creates TF ops for maintaining and advancing the curriculum. \"\"\"\n\n # assign appropriate curriculum increment value\n for case in switch(config['behavior']):\n if case('fixed_rate'):\n # fixed rate, always return same number\n increment = tf.constant(config['rate'], name='curriculum_increment')\n elif case('loss_threshold'):\n # return fixed increment if last loss is below threshold, zero otherwise\n increment_pred = tf.less(loss_history[-1], config['threshold'], name='curriculum_predicate')\n full_increment_func = lambda: tf.constant(config['rate'], name='full_curriculum_increment')\n zero_increment_func = lambda: tf.constant(0.0, name='zero_curriculum_increment')\n increment = tf.cond(increment_pred, full_increment_func, zero_increment_func)\n elif case('loss_change'):\n # predicate for increment type\n increment_pred = tf.not_equal(loss_history[0], DUMMY_LOSS, name='curriculum_predicate')\n\n # increment function for when loss history is still\n def full_increment_func():\n lin_seq = tf.expand_dims(tf.linspace(0., 1., config['change_num_iterations']), 1)\n ls_matrix = tf.concat([tf.ones_like(lin_seq), lin_seq], 1)\n ls_rhs = tf.expand_dims(loss_history, 1)\n ls_slope = tf.matrix_solve_ls(ls_matrix, ls_rhs)[1, 0]\n\n full_increment = tf.div(config['rate'], tf.pow(tf.abs(ls_slope) + 1, config['sharpness']), name='full_curriculum_increment')\n\n return full_increment\n\n # dummy increment function for when loss history is changing rapidly\n zero_increment_func = lambda: tf.constant(0.0, name='zero_curriculum_increment')\n\n # final conditional increment\n increment = tf.cond(increment_pred, full_increment_func, zero_increment_func)\n\n # create updating op. the semantics are such that training / gradient update is first performed before the curriculum is incremented.\n with tf.control_dependencies(dependency_ops):\n update_op = tf.assign_add(step, increment, name='update_curriculum_op')\n\n return update_op\n"
] | [
[
"tensorflow.reduce_min",
"tensorflow.train.start_queue_runners",
"tensorflow.constant_initializer",
"tensorflow.contrib.layers.summarize_activations",
"tensorflow.group",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.ones_like",
"numpy.min",
"tensorflow.reshape",
"tensorflow.assign_add",
"tensorflow.clip_by_value",
"tensorflow.square",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.local_variables_initializer",
"tensorflow.global_variables_initializer",
"tensorflow.add_to_collection",
"tensorflow.identity",
"tensorflow.divide",
"tensorflow.no_op",
"tensorflow.trainable_variables",
"tensorflow.set_random_seed",
"tensorflow.shape",
"tensorflow.train.latest_checkpoint",
"tensorflow.concat",
"tensorflow.less",
"tensorflow.RandomShuffleQueue",
"numpy.max",
"tensorflow.summary.histogram",
"tensorflow.Variable",
"tensorflow.train.Saver",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.variable_scope",
"numpy.transpose",
"tensorflow.nn.rnn_cell.MultiRNNCell",
"tensorflow.nn.dropout",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.get_collection",
"tensorflow.tuple",
"tensorflow.abs",
"tensorflow.contrib.rnn.LayerNormBasicLSTMCell",
"tensorflow.train.Coordinator",
"tensorflow.expand_dims",
"tensorflow.to_int64",
"tensorflow.summary.scalar",
"tensorflow.train.string_input_producer",
"numpy.loadtxt",
"tensorflow.get_variable",
"tensorflow.reduce_sum",
"tensorflow.name_scope",
"tensorflow.summary.merge_all",
"tensorflow.matrix_solve_ls",
"tensorflow.clip_by_global_norm",
"tensorflow.boolean_mask",
"tensorflow.zeros_initializer",
"tensorflow.contrib.rnn.LSTMBlockCell",
"tensorflow.size",
"tensorflow.assign",
"tensorflow.linspace",
"tensorflow.reverse_sequence",
"tensorflow.cond",
"tensorflow.nn.rnn_cell.BasicRNNCell",
"tensorflow.not_equal",
"tensorflow.reset_default_graph",
"numpy.ones",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.train.QueueRunner",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.summary.FileWriter",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.GPUOptions"
]
] |
kellyjelly0904/macros_29id | [
"573946d13eee7f85da049ac666b5dd2d18d19bb1"
] | [
"IEX_29id/utils/plot.py"
] | [
"import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\n\n\n\n# filepath='/home/beams/29IDUSER/Documents/User_Folders/Topp/S089.tif'\ndef plot_image(filepath,h=20,v=10):\n \"\"\"\n filepath = '/home/beams/29IDUSER/Documents/User_Folders/UserName/TifFile.tif'\n \"\"\"\n image = mpimg.imread(filepath)\n plt.figure(figsize=(h,v))\n #plt.imshow(image,cmap='gray',vmin=v1,vmax=v2)\n plt.imshow(image,cmap='gray')\n plt.axis('off')\n plt.show()\n"
] | [
[
"matplotlib.image.imread",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
] |
vsingh-group/DatasetPooling | [
"fcede46a949def869b777dfaec4863b39f926945"
] | [
"code/src/train_equivariance.py"
] | [
"import argparse\nfrom src import dataloader as mydatasets, model as models\nfrom src import adni_models\nimport os, sys, time, shutil\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport datetime as dt\n\nfrom src.average_meter import AverageMeter\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom sklearn.metrics import roc_auc_score\nsys.path.append('../..')\n\nfrom src.test_adversaries import train_adv_epoch\n\nimport pdb\n\ndef get_equiv_loss(args, device, g_lt, g):\n\n # Constant difference\n g_lt_freeze = g_lt.detach().clone() \n g_lt_diff = g_lt.unsqueeze(1) - g_lt_freeze.unsqueeze(0)\n\n g_diff = (g.unsqueeze(1) - g.unsqueeze(0)).unsqueeze(-1)\n normal_ = torch.normal(0, 0.001, size=g_lt_diff.shape).to(device)\n delta = g_diff + normal_ \n temp = (args.delta_lambda * delta + g_lt_diff)\n loss = (temp * temp).sum(-1).mean()\n\n return loss\n\n\ndef mmd_lap_loss(args, device, mu, labels):\n diff = mu.unsqueeze(1) - mu.unsqueeze(0)\n diff = diff.norm(dim = -1)\n lap_kernel = torch.exp(-diff/args.mmd_lap_p)\n\n s = torch.zeros_like(labels).float().to(device)\n unq_labels = torch.unique(labels)\n for i in unq_labels:\n index = labels == i\n n = index.sum().item()\n if n < 1e-9:\n continue\n s[index] = 1.0/n\n s_prod = s.unsqueeze(1) * s.unsqueeze(0)\n\n c_diff = labels.unsqueeze(1) != labels.unsqueeze(0)\n lap_kernel = lap_kernel * s_prod\n loss = -lap_kernel[c_diff == True].sum()\n loss += (len(unq_labels)-1)*lap_kernel[c_diff == False].sum()\n \n return loss\n\ndef pairwise_loss(args, device, mu, labels):\n diff = mu.unsqueeze(1) - mu.unsqueeze(0)\n diff = diff.pow(2).sum(-1)\n \n s = torch.zeros_like(labels).float().to(device)\n unq_labels = torch.unique(labels)\n for i in unq_labels:\n index = labels == i\n n = index.sum().item()\n if n < 1e-9:\n continue\n s[index] = 1.0/n\n s_prod = s.unsqueeze(1) * s.unsqueeze(0)\n \n c_diff = labels.unsqueeze(1) != labels.unsqueeze(0)\n diff = diff * s_prod\n loss = diff[c_diff == True].sum()\n loss += -(len(unq_labels)-1)*diff[c_diff == False].sum()\n \n return loss\n\ndef subsampling_loss(args, device, mu, controls, covars):\n loss = 0.\n covars_dz = torch.trunc(covars*10) # covars into discrete groups\n for cvr in torch.unique(covars_dz):\n mu_cvr = mu[covars_dz==cvr]\n controls_cvr = controls[covars_dz==cvr]\n loss += mmd_lap_loss(args, device, mu_cvr, controls_cvr)\n return loss\n\ndef randmatch_loss(args, device, mu, targets, controls, covars):\n loss = 0.\n covars_dz = torch.trunc(covars*10) # covars into discrete groups\n for cvr in torch.unique(covars_dz):\n for tgt in torch.unique(targets):\n mu_ct = mu[(covars_dz==cvr) & (targets==tgt)]\n controls_ct = controls[(covars_dz==cvr) & (targets==tgt)]\n loss += pairwise_loss(args, device, mu_ct, controls_ct)\n\n return loss\n\ndef train_disc(args, device, epoch, adv, opt, trainloader, writer, equivar_model):\n assert equivar_model is not None\n equivar_model.eval()\n train_adv_epoch(args, device, epoch, adv, opt, trainloader, writer, equivar_model, None, tag='train')\n print('Disc epoch!')\n\n\ndef train_disentangler(device, epoch, dis, opt, trainloader, writer, equivar_model):\n equivar_model.eval()\n for idx, (x, y, c, g) in enumerate(trainloader):\n x = x.to(device)\n _, _, e1, e2 = equivar_model(x)\n e1 = e1.detach()\n e2 = e2.detach()\n e1_pred, e2_pred = dis(e1, e2)\n loss = F.mse_loss(e1_pred, e1) + F.mse_loss(e2_pred, e2)\n opt.zero_grad()\n loss.backward()\n opt.step()\n print('Disentangler epoch!')\n\n\ndef equivar_epoch(args, device, epoch, model, opt, dataloader, writer, tag='train', disc=None, disc_opt=None):\n loss_logger = AverageMeter()\n recons_loss_logger = AverageMeter()\n pred_loss_logger = AverageMeter()\n equiv_loss_logger = AverageMeter()\n mu_logger = AverageMeter()\n sigma_logger = AverageMeter()\n prior_loss_logger = AverageMeter()\n train = tag == 'train'\n if train:\n if args.equiv_type == 'cai':\n train_disc(args, device, epoch, disc, disc_opt, dataloader, writer, model)\n disc.eval()\n model.train()\n else:\n model.eval()\n\n total_steps = len(dataloader.dataset)//args.batch_size\n y_correct = 0\n y_total = 0\n y_true_pos = 0\n y_pos = 0\n start_time = time.time()\n\n for idx, (x, y, c, g) in enumerate(dataloader):\n x = x.to(device)\n y = y.to(device)\n c = c.to(device)\n g = g.to(device)\n\n recons, pred_logits, g_lt, _ = model(x, g.unsqueeze(1))\n\n if args.equiv_type == 'ell2':\n equiv_loss = get_equiv_loss(args, device, g_lt, g)\n elif args.equiv_type == 'mmd_lap':\n equiv_loss = mmd_lap_loss(args, device, g_lt, c)\n elif args.equiv_type == 'subsampling':\n equiv_loss = subsampling_loss(args, device, g_lt, c, g)\n elif args.equiv_type == 'randmatch':\n equiv_loss = randmatch_loss(args, device, g_lt, y, c, g)\n elif args.equiv_type == 'cai':\n if train:\n logits = disc(g_lt)\n equiv_loss = -1 * F.cross_entropy(logits, c)\n else:\n equiv_loss = torch.tensor(0).to(device)\n elif args.equiv_type == 'none':\n equiv_loss = torch.tensor(0).to(device)\n else:\n raise NotImplementedError\n\n if recons is not None:\n recons_loss = F.mse_loss(recons, x)\n else:\n recons_loss = torch.tensor(0).to(device)\n \n if pred_logits is not None:\n pred_loss = F.cross_entropy(pred_logits, y)\n else:\n pred_loss = torch.tensor(0).to(device)\n \n loss = args.recon_lambda * recons_loss + pred_loss + args.equiv_lambda * equiv_loss\n \n if args.add_prior:\n loss += args.beta * prior_loss\n prior_loss_logger.update(prior_loss.item())\n\n mu_logger.update(g_lt.norm(dim=-1).mean())\n \n # Log the losses\n loss_logger.update(loss.item())\n recons_loss_logger.update(recons_loss.item())\n if pred_logits is not None:\n pred_loss_logger.update(pred_loss.item())\n equiv_loss_logger.update(equiv_loss.item())\n\n pred = torch.argmax(pred_logits, 1)\n y_correct += torch.sum(pred == y)\n y_total += x.size(0)\n y_pos += torch.sum(y)\n y_true_pos += torch.sum(y[pred == 1])\n\n if idx % args.log_step == 0:\n start_time = time.time()\n \n if train:\n opt.zero_grad()\n loss.backward()\n opt.step()\n \n model_name = 'equivar_'\n accuracy = y_correct * 100.0 / y_total\n precision = y_true_pos * 100.0 / y_pos\n recons_loss_avg = recons_loss_logger.avg\n print(tag, 'accuracy:', accuracy.item(), 'recons_loss:', recons_loss_avg)\n \n writer.add_scalar(model_name + 'acc/' + tag, accuracy, epoch)\n writer.add_scalar(model_name + 'recons_loss/' + tag, recons_loss_logger.avg, epoch)\n writer.add_scalar(model_name + 'pred_loss/' + tag, pred_loss_logger.avg, epoch)\n writer.add_scalar(model_name + 'equiv_loss/' + tag, equiv_loss_logger.avg, epoch)\n writer.add_scalar(model_name + 'mu/' + tag, mu_logger.avg, epoch)\n writer.add_scalar(model_name + 'loss/' + tag, loss_logger.avg, epoch)\n return accuracy, recons_loss_avg\n\n\ndef train_equivar(args, device, model_path, logf, model, opt, trainloader, valloader, testloader, writer):\n lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.65)\n best_val_acc = 0 \n if args.equiv_type == 'cai':\n if args.dataset_name == 'German' or args.dataset_name == 'Adult':\n disc = models.Adv('Disc', input_dim=args.latent_dim, output_dim=2,\n hidden_dim=args.adv_hidden_dim, hidden_layers=3).to(device)\n elif args.dataset_name == 'ADNI' or args.dataset_name == 'ADCP':\n disc = models.Adv('Disc', input_dim=args.latent_dim, output_dim=3,\n hidden_dim=args.adv_hidden_dim, hidden_layers=3).to(device)\n else:\n raise NotImplementedError\n disc_opt = optim.Adam(disc.parameters(), lr=args.disc_lr)\n\n for epoch in range(1, args.num_epochs + 1):\n if args.equiv_type == 'cai':\n equivar_epoch(args, device, epoch, model, opt, trainloader, writer, tag='train',\n disc=disc, disc_opt=disc_opt)\n else:\n equivar_epoch(args, device, epoch, model, opt, trainloader, writer, tag='train')\n val_acc, val_recons = equivar_epoch(args, device, epoch, model, opt, valloader, writer, tag='val')\n\n if testloader is None:\n test_acc, test_recons = None, None\n else:\n test_acc, test_recons = equivar_epoch(args, device, epoch, model, opt, testloader, writer, tag='test')\n \n if val_acc > best_val_acc:\n name = 'Equivar_best_val_acc'\n model.name = name\n path = os.path.join(model_path, name + '.pth')\n torch.save(model.state_dict(), path)\n best_val_acc = val_acc\n message = 'Best val_acc{} val_recons{}\\n test_acc{} test_recons{}\\n Saving model{}\\n'.format(\n best_val_acc, val_recons, test_acc, test_recons, path)\n print(message)\n logf.write(message + '\\n')\n if epoch % args.save_step == 0:\n name = 'Equivar_ckpt_' + str(epoch)\n path = os.path.join(model_path, name + '.pth')\n model.name = name\n torch.save(model.state_dict(), path)\n lr_scheduler.step()\n name = 'Equivar'\n model.name = name\n path = os.path.join(model_path, name + '.pth')\n torch.save(model.state_dict(), path)\n\n\ndef run_equivariance(args, device, model_path, logf, trainset, valset, testset, writer):\n if args.dataset_name == 'German' and (args.equiv_type == 'cai'):\n drop_last = True\n else:\n drop_last = False\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True,\n drop_last=drop_last)\n valloader = torch.utils.data.DataLoader(valset, batch_size=args.batch_size, shuffle=False)\n if testset is None:\n testloader = None\n else:\n testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False)\n dummy_x, _, _, _ = trainset.__getitem__(0)\n input_dim = dummy_x.size(0)\n\n if args.dataset_name == 'German':\n if args.equiv_type == 'ell2':\n model = models.TauNetEncDec(input_dim=input_dim, latent_dim=args.latent_dim, feature_dim=0, const=0.01).to(device) \n else:\n model = models.BaselineEncDec(input_dim=input_dim, latent_dim=args.latent_dim, feature_dim=0).to(device)\n elif args.dataset_name == 'Adult':\n if args.equiv_type == 'ell2':\n model = models.TauNetEncDec(input_dim=input_dim, latent_dim=args.latent_dim, feature_dim=0, const=0.1).to(device)\n else:\n model = models.BaselineEncDec(input_dim=input_dim, latent_dim=args.latent_dim, feature_dim=0).to(device)\n elif args.dataset_name == 'ADNI' or args.dataset_name == 'ADCP':\n if args.equiv_type == 'ell2':\n model = adni_models.TauResNet(in_depth=1, n_blocks=args.blocks, interm_depths=args.channels, bottleneck=args.use_bottleneck_layers, n_out_linear=2, dropout=0.5, const=0.01)\n model = model.to(device)\n #model = torch.nn.DataParallel(model).to(device)\n else:\n model = adni_models.ResNet(in_depth=1, n_blocks=args.blocks, interm_depths=args.channels, bottleneck=args.use_bottleneck_layers, n_out_linear=2, dropout=0.5)\n model = model.to(device)\n #model = torch.nn.DataParallel(model).to(device)\n else:\n raise NotImplementedError\n \n opt = optim.Adam(model.parameters(), lr=args.lr)\n train_equivar(args, device, model_path, logf, model, opt, trainloader, valloader, testloader, writer)\n"
] | [
[
"torch.trunc",
"torch.optim.lr_scheduler.StepLR",
"torch.unique",
"torch.normal",
"torch.nn.functional.mse_loss",
"torch.nn.functional.cross_entropy",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.zeros_like",
"torch.exp",
"torch.argmax",
"torch.sum"
]
] |
mayukh18/keras-yolo2 | [
"166c9e1356758881fb744f27ce968aad19c1c5bc"
] | [
"train.py"
] | [
"#! /usr/bin/env python\nimport pickle\nimport argparse\nimport os\nimport numpy as np\nfrom preprocessing import parse_annotation\nfrom frontend import YOLO\nimport json\nfrom sklearn.model_selection import train_test_split\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nargparser = argparse.ArgumentParser(\n description='Train and validate YOLO_v2 model on any dataset')\n\nargparser.add_argument(\n '-c',\n '--conf',\n help='path to configuration file')\n\n\ndef _main_(args):\n config_path = args.conf\n\n with open(config_path) as config_buffer:\n config = json.loads(config_buffer.read())\n\n ###############################\n # Parse the annotations\n ###############################\n\n # parse annotations of the training set\n if 'pickle' or 'pkl' in config['train']['train_annot_folder']:\n train_imgs = pickle.load(open(config['train']['train_annot_folder'], \"rb\"))\n train_labels = {x: 10 for x in config['model']['labels']}\n else:\n train_imgs, train_labels = parse_annotation(config['train']['train_annot_folder'],\n config['train']['train_image_folder'],\n config['model']['labels'])\n\n\n # parse annotations of the validation set, if any, otherwise split the training set\n if os.path.exists(config['valid']['valid_annot_folder']):\n if 'pickle' or 'pkl' in config['valid']['valid_annot_folder']:\n train_imgs = pickle.load(open(config['valid']['valid_annot_folder'], \"rb\"))\n train_labels = {x: 10 for x in config['model']['labels']}\n else:\n valid_imgs, valid_labels = parse_annotation(config['valid']['valid_annot_folder'],\n config['valid']['valid_image_folder'],\n config['model']['labels'])\n else:\n train_imgs, valid_imgs = train_test_split(train_imgs, test_size=0.15,\n random_state=config['train']['random_seed'])\n\n if len(config['model']['labels']) > 0:\n overlap_labels = set(config['model']['labels']).intersection(set(train_labels.keys()))\n\n print('Seen labels:\\t', train_labels)\n print('Given labels:\\t', config['model']['labels'])\n print('Overlap labels:\\t', overlap_labels)\n\n if len(overlap_labels) < len(config['model']['labels']):\n print('Some labels have no annotations! Please revise the list of labels in the config.json file!')\n return\n else:\n print('No labels are provided. Train on all seen labels.')\n config['model']['labels'] = train_labels.keys()\n\n ###############################\n # Construct the model \n ###############################\n\n yolo = YOLO(backend=config['model']['backend'],\n input_size=config['model']['input_size'],\n labels=config['model']['labels'],\n max_box_per_image=config['model']['max_box_per_image'],\n anchors=config['model']['anchors'])\n\n ###############################\n # Load the pretrained weights (if any) \n ############################### \n\n if os.path.exists(config['train']['pretrained_weights']):\n print(\"Loading pre-trained weights in\", config['train']['pretrained_weights'])\n yolo.load_weights(config['train']['pretrained_weights'])\n\n ###############################\n # Start the training process \n ###############################\n\n yolo.train(train_imgs=train_imgs,\n valid_imgs=valid_imgs,\n train_times=config['train']['train_times'],\n valid_times=config['valid']['valid_times'],\n nb_epochs=config['train']['nb_epochs'],\n learning_rate=config['train']['learning_rate'],\n batch_size=config['train']['batch_size'],\n warmup_epochs=config['train']['warmup_epochs'],\n object_scale=config['train']['object_scale'],\n no_object_scale=config['train']['no_object_scale'],\n coord_scale=config['train']['coord_scale'],\n class_scale=config['train']['class_scale'],\n saved_weights_name=config['train']['saved_weights_name'],\n debug=config['train']['debug'])\n\n\nif __name__ == '__main__':\n args = argparser.parse_args()\n _main_(args)\n"
] | [
[
"sklearn.model_selection.train_test_split"
]
] |
clemkoa/u-net | [
"554ed1b20de71659974c7049114700bc9db94008"
] | [
"unet/unet.py"
] | [
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass double_conv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(double_conv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\n\nclass up(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(up, self).__init__()\n self.up_scale = nn.ConvTranspose2d(in_ch, out_ch, 2, stride=2)\n\n def forward(self, x1, x2):\n x2 = self.up_scale(x2)\n\n diffY = x1.size()[2] - x2.size()[2]\n diffX = x1.size()[3] - x2.size()[3]\n\n x2 = F.pad(x2, [diffX // 2, diffX - diffX // 2,\n diffY // 2, diffY - diffY // 2])\n x = torch.cat([x2, x1], dim=1)\n return x\n\n\nclass down_layer(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(down_layer, self).__init__()\n self.pool = nn.MaxPool2d(2, stride=2, padding=0)\n self.conv = double_conv(in_ch, out_ch)\n\n def forward(self, x):\n x = self.conv(self.pool(x))\n return x\n\n\nclass up_layer(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(up_layer, self).__init__()\n self.up = up(in_ch, out_ch)\n self.conv = double_conv(in_ch, out_ch)\n\n def forward(self, x1, x2):\n a = self.up(x1, x2)\n x = self.conv(a)\n return x\n\n\nclass UNet(nn.Module):\n def __init__(self, dimensions=2):\n super(UNet, self).__init__()\n self.conv1 = double_conv(1, 64)\n self.down1 = down_layer(64, 128)\n self.down2 = down_layer(128, 256)\n self.down3 = down_layer(256, 512)\n self.down4 = down_layer(512, 1024)\n self.up1 = up_layer(1024, 512)\n self.up2 = up_layer(512, 256)\n self.up3 = up_layer(256, 128)\n self.up4 = up_layer(128, 64)\n self.last_conv = nn.Conv2d(64, dimensions, 1)\n\n def forward(self, x):\n x1 = self.conv1(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x1_up = self.up1(x4, x5)\n x2_up = self.up2(x3, x1_up)\n x3_up = self.up3(x2, x2_up)\n x4_up = self.up4(x1, x3_up)\n output = self.last_conv(x4_up)\n return output\n"
] | [
[
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.pad"
]
] |
lkilcommons/esabin | [
"423411338a35f1882a0b6e7c6d838b48079f3e14"
] | [
"esabin/esafile.py"
] | [
"# (C) 2019 University of Colorado AES-CCAR-SEDA (Space Environment Data Analysis) Group\n# Liam Kilcommons - University of Colorado, Boulder - Colorado Center for Astrodynamics Research\n# Originally created May, 2016\nimport numpy as np\nimport h5py,os,shutil\nfrom collections import OrderedDict\nfrom esabin.esagrid import Esagrid,ConstantAzimuthalSpacingGrid,EsagridBin\nfrom esabin import spheretools\n\nclass EsaGridFileDuplicateTimeError(Exception):\n\tpass\n\nclass InvalidFlatIndexError(Exception):\n\tpass\n\nclass InvalidBinGroupNameError(Exception):\n\tpass\n\nclass EsagridBinComparisonError(Exception):\n\tpass\n\nclass EsagridBin(object):\n\t\"\"\"Class which abstractly represents one bin in a grid\"\"\"\n\tdef __init__(self,grid,flatind):\n\t\tself._meta = OrderedDict()\n\t\tself.grid = grid\n\n\t\tself['flatind'] = flatind\n\t\tself['slat'] = grid.flat_bins[flatind,0]\n\t\tself['elat'] = grid.flat_bins[flatind,1]\n\t\tself['lat'] = spheretools.angle_midpoint(self['slat'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself['elat'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tgrid.azi_units)\n\t\tself['sazi'] = grid.flat_bins[flatind,2]\n\t\tself['eazi'] = grid.flat_bins[flatind,3]\n\t\tself['azi'] = spheretools.angle_midpoint(self['sazi'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself['eazi'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tgrid.azi_units)\n\n\t\t#Alias azimuth coordinate by lon or lt\n\t\tself['s{}'.format(grid.azi_coord)]=self['sazi']\n\t\tself['e{}'.format(grid.azi_coord)]=self['eazi']\n\t\tself['{}'.format(grid.azi_coord)]=self['azi']\n\n\t\tself['azi_coord']=grid.azi_coord\n\n\t\t#Long name of bin\n\t\tself['longname'] = str(self)\n\n\tdef __str__(self):\n\t\treturn ('#%d,' % (self['flatind'])\n\t\t \t\t+'lat:%.3f-%.3f,' % (self['slat'],self['elat'])\n\t\t \t\t+'%s:%.3f-%.3f' % (self['azi_coord'],self['sazi'],self['eazi']))\n\n\tdef __eq__(self,other):\n\t\tbin_edge_keys = ['slat','elat','sazi','eazi']\n\t\tedges_match = []\n\t\tfor key in bin_edge_keys:\n\t\t\tedges_match.append(np.isclose(self[key],other[key],\n\t\t\t\t\t\t\t\t\t\t\trtol=0.,atol=1.0e-8))\n\t\treturn all(edges_match)\n\n\tdef __getitem__(self,key):\n\t\treturn self._meta[key]\n\n\tdef __setitem__(self,key,value):\n\t\tself._meta[key]=value\n\n\tdef items(self):\n\t\treturn self._meta.items()\n\n\tdef __contains__(self,key):\n\t\treturn key in self._meta\n\nclass EsagridFileBinGroup(object):\n\t\"\"\"Class which abstractly represents the H5 group which describes\n\ta bin's position and data\"\"\"\n\tdef __init__(self,grid,flatind):\n\t\tself.esagrid_bin = EsagridBin(grid,flatind)\n\t\tself.groupname = self._group_name_to_flatind(flatind)\n\n\tdef __getitem__(self,key):\n\t\treturn self.esagrid_bin[key]\n\n\tdef __setitem__(self,key,value):\n\t\tself.esagrid_bin[key]=value\n\n\tdef __contains__(self,key):\n\t\treturn key in self.esagrid_bin\n\n\tdef items(self):\n\t\treturn self.esagrid_bin.items()\n\n\t@staticmethod\n\tdef _flatind_from_group_name(h5groupname):\n\t\tflatindstr = h5groupname.split('bin')[-1]\n\t\ttry:\n\t\t\tflatind = int(flatindstr)\n\t\texcept ValueError:\n\t\t\traise InvalidBinGroupNameError(('{} '.format(h5groupname)\n\t\t\t\t\t\t\t\t\t\t\t+'is not a valid bin group name'))\n\t\treturn flatind\n\n\t@classmethod\n\tdef from_groupname(cls,grid,groupname):\n\t\tflatind = cls._flatind_from_group_name(groupname)\n\t\treturn cls(grid,flatind)\n\n\tdef _check_flatind(self,flatind):\n\t\tgrid = self.esagrid_bin.grid\n\t\tif flatind < 0 or flatind>=grid.n_bins:\n\t\t\traise InvalidFlatIndexError(('No bin with flat index {}'.format(flatind)\n\t\t\t\t\t\t\t \t\t\t+'in grid {}'.format(str(grid))))\n\n\tdef _group_name_to_flatind(self,flatind):\n\t\tself._check_flatind(flatind)\n\t\treturn 'bin%d' % (flatind)\n\n\tdef _check_bin_group_name(self,h5group):\n\t\t#H5 groups' name is their full path\n\t\th5groupname = h5group.name.split('/')[-1]\n\t\tif self.groupname != h5groupname:\n\t\t\traise RuntimeError(('H5 group name {} did not match'.format(h5groupname)\n\t\t\t\t\t\t\t +'EsagridFileBinGroup {}'.format(self.groupname)))\n\n\tdef check_bin_group_metadata(self,h5group,fix=False,raise_error=False):\n\t\t#Check that this group matches this object\n\t\tself._check_bin_group_name(h5group)\n\n\t\t#Check for old/missing metadata (e.g. flatind not an integer)\n\t\tfor attrname,attrval in self.esagrid_bin.items():\n\t\t\tif attrname in h5group.attrs:\n\t\t\t\tif h5group.attrs[attrname]!=attrval:\n\t\t\t\t\terrstr = ('Group:{}\\n'.format(h5group.name)\n\t\t\t\t\t\t\t +'Incorrect Attribute {}'.format(attrname)\n\t\t\t\t\t\t +'{}!={}'.format(attrval,h5group.attrs[attrname]))\n\t\t\t\t\t\n\t\t\t\t\tif raise_error:\n\t\t\t\t\t\traise BinGroupMetadataError(errstr)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(errstr)\n\n\t\t\t\t\tif fix:\n\t\t\t\t\t\th5group.attrs[attrname]=attrval\n\t\t\t\t\t\tprint('Fixed')\n\t\t\telse:\n\t\t\t\tprint('Group:{}'.format(h5group.name))\n\t\t\t\tprint('Missing Attribute {}'.format(attrname))\n\t\t\t\tif fix:\n\t\t\t\t\th5group.attrs[attrname]=attrval\n\t\t\t\t\tprint('Fixed')\n\n\n\tdef get_h5group(self,h5f):\n\t\tif self.groupname not in h5f:\n\t\t\th5grp = h5f.create_group(self.groupname)\n\t\t\t#Write bin describing metadata\n\t\t\tfor attrname,attrval in self.esagrid_bin.items():\n\t\t\t\th5grp.attrs[attrname]=attrval\n\t\telse:\n\t\t\th5grp = h5f[self.groupname]\n\t\t\tself.check_bin_group_metadata(h5grp,fix=True)\n\t\treturn h5grp\n\n\tdef append_from_other(self,h5f,other_h5f):\n\t\tself_h5group = self.get_h5group(h5f)\n\t\tother_h5group = other_h5f[self.groupname]\n\t\tself.check_bin_group_metadata(other_h5group,fix=False,raise_error=True)\n\t\tfor h5dsname,h5ds in other_h5group.items():\n\t\t\tif not isinstance(h5ds,h5py.Dataset):\n\t\t\t\tprint('Will not copy {}, not a HDF5 dataset'.format(h5dsname))\n\n\t\t\tadditional_attrs = {key:val for key,val in h5ds.attrs.items()}\n\t\t\tdata = h5ds[:]\n\t\t\tself.store(h5f,h5dsname,data,additional_attrs=additional_attrs)\n\n\tdef store(self,h5f,t,data,additional_attrs=None,silent=False):\n\t\th5grp = self.get_h5group(h5f)\n\n\t\tif isinstance(t,np.ndarray):\n\t\t\t#If time is an array, use the first value in the bin\n\t\t\t#as the hdf5 dataset name\n\t\t\th5datasetnm = str(t.flatten()[0])\n\t\telse:\n\t\t\t#If time is not an array, just use it's string\n\t\t\t#version as the dataset name\n\t\t\th5datasetnm = str(t)\n\n\t\t#Ensure no dataset name collisions\n\t\tif h5datasetnm in h5grp:\n\t\t\traise EsaGridFileDuplicateTimeError(('Dataset with name'\n\t\t\t \t+' {}'.format(h5datasetnm)\n\t\t\t \t+' already exists in'\n\t\t\t \t+' group {}'.format(h5grp)))\n\t\t# else:\n\t\t# \twhile h5datasetnm in h5grp:\n\t\t# \t\th5datasetnm += '0'\n\n\t\tdataset = h5grp.create_dataset(h5datasetnm,data=data)\n\t\tif additional_attrs is not None:\n\t\t\tfor attr in additional_attrs:\n\t\t\t\tdataset.attrs[attr]=additional_attrs[attr]\n\n\t\tif not silent:\n\t\t\tprint(\"Added %d points to %s\" % (data.size,\n\t\t\t\t\t\t\t\t\t\t\th5grp.attrs['longname']))\n\n\tdef copy(self,h5f,destination_esagrid_file_bingroup,destination_h5f):\n\t\tsrc_esagrid_bin = self.esagrid_bin\n\t\tdest_esagrid_bin = destination_esagrid_file_bingroup.esagrid_bin\n\t\tif src_esagrid_bin != dest_esagrid_bin:\n\t\t\traise EsagridBinComparisonError(('Cannot copy because '\n\t\t\t\t\t\t\t\t\t\t\t+'destination bin metadata does '\n\t\t\t\t\t\t\t\t\t\t\t+'not match source bin metadata '\n\t\t\t\t\t\t\t\t\t\t\t+'{} != '.format(str(dest_esagrid_bin))\n\t\t\t\t\t\t\t\t\t\t\t+'{}'.format(str(src_esagrid_bin))))\n\n\t\th5grp = self.get_h5group(h5f)\n\t\tother_h5grp = destination_esagrid_file_bingroup.get_h5group(destination_h5f)\n\t\tfor dataset_name in h5grp:\n\t\t\tif dataset_name in other_h5grp:\n\t\t\t\traise EsaGridFileDuplicateTimeError(('Error while copying. '\n\t\t\t\t\t\t\t\t\t\t\t\t\t+' Dataset with name '\n\t\t\t\t\t \t+' {}'.format(h5datasetnm)\n\t\t\t\t\t \t+' already exists in '\n\t\t\t\t\t \t+' destination '\n\t\t\t\t\t \t+' group {}'.format(h5grp)))\n\t\t\t#Only h5py Group objects have a copy method\n\t\t\th5grp.copy(dataset_name,other_h5grp,name=dataset_name)\n\n\tdef _columnify_additional_attrs(self,additional_attrs):\n\t\t\"\"\"Takes a list of dictionaries. Each dictionary in the\n\t\tlist are the HDF5 dataset attributes from one dataset in this\n\t\tbin's group. Converts this list of dictionaries to an\n\t\toutput dictionary of lists or arrays depending on the type of\n\t\tdata encountered. The keys of the output dictionary include any\n\t\tkeys encountered in an input dictionary. If a particular key is\n\t\tnot in every input dictionary a fill value with be inserted\n\t\tThe fill value is numpy.nan if the data is numeric,\n\t\totherwise it is None\"\"\"\n\t\tkeys = []\n\t\ttypefuncs = []\n\t\tfor attrdict in additional_attrs:\n\t\t\tfor key in attrdict:\n\t\t\t\tif key not in keys:\n\t\t\t\t\tkeys.append(key)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdum = float(attrdict[key])\n\t\t\t\t\t\ttypefuncs.append(float)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\ttypefuncs.append(str)\n\n\t\toutdict = {key:[] for key in keys}\n\t\tfor attrdict in additional_attrs:\n\t\t\tfor key,typefunc in zip(keys,typefuncs):\n\t\t\t\tif key in attrdict:\n\t\t\t\t\toutdict[key].append(typefunc(attrdict[key]))\n\t\t\t\telse:\n\t\t\t\t\toutdict[key].append(np.nan if typefunc is float else None)\n\n\t\tfor key,typefunc in zip(keys,typefuncs):\n\t\t\tif typefunc is float:\n\t\t\t\toutdict[key]=np.array(outdict[key])\n\t\treturn outdict\n\n\tdef read(self,h5f):\n\t\th5grp = self.get_h5group(h5f)\n\t\ttimes = []\n\t\tdatasets = []\n\t\tadditional_attrs = []\n\t\tfor dset_timestr in h5grp:\n\t\t\tdataset_time = float(dset_timestr)\n\t\t\tdata = h5grp[dset_timestr][:]\n\t\t\ttimes.append(dataset_time)\n\t\t\tdatasets.append(data)\n\t\t\tadditional_attrs.append({key:val for key,val in h5grp[dset_timestr].attrs.items()})\n\t\tadditional_attrs = self._columnify_additional_attrs(additional_attrs)\n\t\treturn times,datasets,additional_attrs\n\nclass DefaultEsagrid(Esagrid):\n\t\"\"\"Default settings of a 3 degrees latitude per band, 3 cap bins, with\n\tlocaltime as the azimuthal coordinate\"\"\"\n\tdef __init__(self,delta_lat=3,n_cap_bins=3,azi_coord='lt'):\n\t\tEsagrid.__init__(self,delta_lat,n_cap_bins=n_cap_bins,azi_coord=azi_coord)\n\n\nclass EsagridFile(object):\n\t\"\"\"\n\tClass for storing data associated with each bin of an esagrid object on disk.\n\tUses h5py interface to HDF5 library.\n\n\tHDF5 files are organized thus:\n\t-Each bin gets a HDF5 group.\n\t-Each time the bin_and_store function is called, all of the groups/bins are iterated\n\t\tthrough and any data which falls within the bin's lat,lt or lat,lon boundaries is stored\n\t\tas a new dataset.\n\n\tINPUTS\n\t------\n\t\thdf5_filenm - str\n\t\t\tThe filename of the hdf5 file to store to. If this is an existing file\n\t\t\tand clobber == False, will use the stored metadata in the file to\n\t\t\tcreate the appropriate esagrid and you can continue adding to the file\n\t\t\tor process results from it\n\n\t\tgrid - an esagrid instance, optional\n\t\t\tThe grid of bins to bin into. If it is None (default), a default grid\n\t\t\twith delta_lat = 3 and n_cap_bins = 3 and azi_coord = 'lt' is used\n\n\t\thdf5_local_dir - str, optional\n\t\t\tA valid local path at which the hdf5 files will be created\n\n\t\tclobber - bool, optional\n\t\t\tIf True, will delete and overwrite the HDF5 file specified as os.path.join(hdf5_local_dir,hdf5_filenm)\n\t\t\tif it exists.\n\t\"\"\"\n\tdef __init__(self,hdf5_filenm,grid=None,hdf5_local_dir=None,clobber=False):\n\n\t\tif hdf5_local_dir is None:\n\t\t\traise ValueError(('hdf5_local_dir kwarg is now mandatory '\n\t\t\t\t +'and cannot be None'))\n\n\t\tself.hdf5dir = hdf5_local_dir\n\t\tself.hdf5filenm = hdf5_filenm\n\t\tself.h5fn = os.path.join(self.hdf5dir,self.hdf5filenm)\n\n\t\t#Default to grid of with 3 latitude bins if no grid passed\n\t\tdefault_grid = DefaultEsagrid()\n\n\t\tif os.path.exists(self.h5fn):\n\t\t\tif not clobber:\n\t\t\t\tself.grid = self.create_grid_from_metadata()\n\t\t\telse:\n\t\t\t\tos.remove(self.h5fn)\n\t\t\t\tself.grid = default_grid if grid is None else grid\n\t\t\t\tself.write_grid_metadata()\n\t\telse:\n\t\t\tself.grid = default_grid if grid is None else grid\n\t\t\tself.write_grid_metadata()\n\n\t\tself.binlats,self.binlonorlts = self.grid.bin_locations(center_or_edges='edges')\n\n\t\tself._bingroups = {}\n\t\twith h5py.File(self.h5fn,'r') as h5f:\n\t\t\tfor groupname in h5f:\n\t\t\t\ttry:\n\t\t\t\t\tbingroup = EsagridFileBinGroup.from_groupname(self.grid,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgroupname)\n\t\t\t\texcept InvalidBinGroupNameError:\n\t\t\t\t\tprint(('Could not create EsagridFileBinGroup'\n\t\t\t\t\t\t +'from h5 group {}'.format(groupname)))\n\t\t\t\t\tcontinue\n\t\t\t\tself._bingroups[bingroup['flatind']]=bingroup\n\n\t@classmethod\n\tdef copy_of_existing(cls,source_esagrid_file,destination_h5fn):\n\t\t\"\"\"Generate a new EsagridFile that contains all of the data from\n\t\tan existing one under a new filename\"\"\"\n\t\tif os.path.exists(destination_h5fn):\n\t\t\traise IOError('Destination file {} exists!'.format(destination_h5fn))\n\n\t\tshutil.copyfile(source_esagrid_file.h5fn,destination_h5fn)\n\t\tdest_hdf5_local_dir,dest_hdf5_filename = os.path.split(destination_h5fn)\n\t\treturn EsagridFile(dest_hdf5_filename,hdf5_local_dir=dest_hdf5_local_dir)\n\n\tdef append_existing(self,existing_esagrid_file):\n\t\t\"\"\"Copy all bin data from another EsagridFile instance into this one\"\"\"\n\t\twith h5py.File(self.h5fn,'a') as h5f:\n\t\t\twith h5py.File(existing_esagrid_file.h5fn,'r') as other_h5f: \n\t\t\t\tfor flatind in existing_esagrid_file:\n\t\t\t\t\tif flatind not in self:\n\t\t\t\t\t\tself[flatind]=EsagridFileBinGroup(self.grid,flatind)\n\t\t\t\t\tself[flatind].append_from_other(h5f,other_h5f)\n\n\tdef __getitem__(self,flatind):\n\t\treturn self._bingroups[flatind]\n\n\tdef __setitem__(self,flatind,esagrid_file_bingroup):\n\t\tif not isinstance(esagrid_file_bingroup,EsagridFileBinGroup):\n\t\t\traise TypeError(('{}'.format(esagrid_file_bingroup)\n\t\t\t\t\t\t\t+' is not an EsagridFileBinGroup'))\n\t\tself._bingroups[flatind] = esagrid_file_bingroup\n\n\tdef __contains__(self,flatind):\n\t\treturn flatind in self._bingroups\n\n\tdef items(self):\n\t\treturn self._bingroups.items()\n\n\tdef __iter__(self):\n\t\tfor flatind in self._bingroups:\n\t\t\tyield flatind\n\n\tdef write_grid_metadata(self):\n\t\twith h5py.File(self.h5fn,'a') as h5f:\n\t\t\th5f.attrs['delta_lat'] = self.grid.delta_lat\n\t\t\th5f.attrs['n_cap_bins'] = self.grid.n_cap_bins\n\t\t\th5f.attrs['azi_coord'] = self.grid.azi_coord\n\n\tdef create_grid_from_metadata(self):\n\t\twith h5py.File(self.h5fn,'r') as h5f:\n\t\t\tdelta_lat = h5f.attrs['delta_lat']\n\t\t\ttry:\n\t\t\t\tazi_coord = str(h5f.attrs['azi_coord'],'utf8')\n\t\t\texcept:\n\t\t\t\tazi_coord = h5f.attrs['azi_coord']\n\n\t\t\tif 'n_cap_bins' in h5f.attrs:\n\t\t\t\tn_cap_bins = h5f.attrs['n_cap_bins']\n\t\t\t\treturn Esagrid(delta_lat,n_cap_bins=n_cap_bins,azi_coord=azi_coord)\n\t\t\telif 'delta_azi' in h5f.attrs:\n\t\t\t\tdelta_azi = h5f.attrs['delta_azi']\n\t\t\t\treturn ConstantAzimuthalSpacingGrid(delta_lat,delta_azi,azi_coord)\n\t\t\telse:\n\t\t\t\traise ValueError(('Missing H5 attribute; cannot specify grid'\n\t\t\t\t +'\\ndid not find either \"n_cap_bins\" (esagrid)'\n\t\t\t\t +'\\n or \"delta_azi\" (constant azi spacing grid)'\n\t\t\t\t +'\\nin attrs: {}'.format(h5f.attrs)))\n\n\tdef bin_and_store(self,t,lat,lonorlt,data,silent=False,additional_attrs=None):\n\n\t\tlatbands,lonbins,flatinds = self.grid.whichbin(lat,lonorlt)\n\n\t\twith h5py.File(self.h5fn,'a') as h5f:\n\t\t\tfor bin_ind in np.unique(flatinds):\n\n\t\t\t\tin_bin = flatinds == bin_ind\n\n\t\t\t\tif bin_ind not in self:\n\t\t\t\t\tself[bin_ind] = EsagridFileBinGroup(self.grid,bin_ind)\n\n\t\t\t\tself[bin_ind].store(h5f,\n\t\t\t\t\t\t\t\t\tt[in_bin].flatten(),\n\t\t\t\t\t\t\t\t\tdata[in_bin].flatten(),\n\t\t\t\t\t\t\t\t\tadditional_attrs=additional_attrs,\n\t\t\t\t\t\t\t\t\tsilent=silent)\n\n\tdef dataset_passes_attr_filters(self,dataset,attr_filters,default_result=True):\n\t\t\"\"\"\n\t\tfiltering whether to include a specific dataset in a bin_stats\n\t\tsample for a particular bin\n\n\t\tFilters are specified as a nested dictionary\n\t\twith the following grammar:\n\t\t\tfilters['attr_key']=test_function\n\n\t\twhere:\n\t\t\t'attr_key' : the NAME of the HDF5 attribute of the dataset\n\t\t\ttest_function : a python lambda function or other function\n\t\t\t\t\t\t\tto apply to the value of attribute.\n\t\t\t\t\t\t\tThis function must return a single True or\n\t\t\t\t\t\t\tFalse value and appropriately handle the\n\t\t\t\t\t\t\ttype of data that is in the attribute\n\n\t\t\"\"\"\n\t\tpassed_filters=default_result\n\t\tif attr_filters is not None:\n\t\t\tfor attr_key in attr_filters:\n\t\t\t\tattr_test_fun = attr_filters[attr_key]\n\t\t\t\tif attr_key in dataset.attrs:\n\t\t\t\t\tattr_value = dataset.attrs[attr_key]\n\t\t\t\t\tpassed_filters = passed_filters and attr_test_fun(attr_value)\n\t\treturn passed_filters\n\n\tdef bin_stats(self,statfun=np.nanmean,statfunname=None,\n\t\t\t\t\t\tcenter_or_edges='edges',minlat=50.,\n\t\t\t\t\t\tsilent=False,force_recompute=False,\n\t\t\t\t\t\twrite_to_h5=True,attr_filters=None):\n\t\t\"\"\"\n\t\t\tif statfun is list of functions, they will be applied succesively and binstats will be a list\n\t\t\tthis is more time efficient than calling bin_stats multiple times\n\t\t\"\"\"\n\t\tif not isinstance(statfun,list):\n\t\t\tstatfun = [statfun]\n\n\t\tif statfunname is not None:\n\t\t\tif not isinstance(statfunname,list):\n\t\t\t\tstatfunname = [statfunname]\n\n\t\t#hdf5 dataset names where we can store results, or reload them if they've already been computed\n\t\tstatfun_dataset_names = ['binstats_'+func.__name__ for func in statfun]\n\t\tprint(statfun_dataset_names)\n\n\t\tbinstats = []\n\t\tfor fun in statfun:\n\t\t\tthis_binstats = np.zeros((len(self.grid.flat_bins[:,0]),1))\n\t\t\tthis_binstats.fill(np.nan)\n\t\t\tbinstats.append(this_binstats)\n\n\t\t#Locate the bins\n\t\tbinlats,binlonorlts = self.grid.bin_locations(center_or_edges=center_or_edges)\n\n\t\twith h5py.File(self.h5fn,'a') as h5f:\n\t\t\tstats_computed = 'binstats_results' in h5f and \\\n\t\t\t\tall([dataset_name in h5f['binstats_results'] \\\n\t\t\t\t\tfor dataset_name in statfun_dataset_names])\n\t\t\tif not force_recompute and stats_computed:\n\t\t\t\tif not silent:\n\t\t\t\t\tprint(\"Loading precomputed results, set kwarg force_recompute=True to avoid this behaviour\")\n\t\t\t\tfor istatfun,statfun_dataset_name in enumerate(statfun_dataset_names):\n\t\t\t\t\tthis_binstats = h5f['binstats_results'][statfun_dataset_name][:]\n\t\t\t\t\tif not silent:\n\t\t\t\t\t\tprint(\"Loading %d nonzero bins from dataset %s...\" % (np.count_nonzero(np.isfinite(this_binstats)),statfun_dataset_name))\n\t\t\t\t\tbinstats[istatfun] = this_binstats\n\n\t\t\t\t\t#We can just short-circuit and return, since we don't need to do any more data loading\n\t\t\t\t\t#if len(binstats) == 1:\n\t\t\t\t\t#\tbinstats = binstats[0]\n\t\t\t\t\t#return binlats,binlonorlts,binstats\n\t\t\telif not stats_computed or force_recompute:\n\t\t\t\t#Read every dataset (pass) from each bin\n\t\t\t\tfor groupnm in h5f:\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tflatind = EsagridFileBinGroup._flatind_from_group_name(groupnm)\n\t\t\t\t\texcept InvalidBinGroupNameError:\n\t\t\t\t\t\tprint('{} is not a bin group, skipping'.format(groupnm))\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tesagrid_file_bingroup = EsagridFileBinGroup(self.grid,flatind)\n\t\t\t\t\t#Load h5 group and check for old or missing metadata\n\t\t\t\t\tgrp = esagrid_file_bingroup.get_h5group(h5f)\n\n\t\t\t\t\t#Skip bins below the desired latitude\n\t\t\t\t\tif np.abs(grp.attrs['slat'])<minlat and np.abs(grp.attrs['elat'])<minlat:\n\t\t\t\t\t\t#if not silent:\n\t\t\t\t\t\t#\tprint(\"Skipping bin %s because too low latitude (<%.3f)\" % (grp.attrs['longname'],minlat))\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tstatusstr = \"| %s | \" % (grp.attrs['longname'])\n\t\t\t\t\tflatind = grp.attrs['flatind']\n\t\t\t\t\tif np.floor(flatind) != flatind or flatind < 0:\n\t\t\t\t\t\traise ValueError('Unexpected bin index {}'.format(flatind))\n\t\t\t\t\tflatind = int(flatind)\n\n\t\t\t\t\tdatasets = []\n\t\t\t\t\tndatasets = len(grp.items())\n\t\t\t\t\tfor datasetnm,dataset in grp.items():\n\t\t\t\t\t\t#Check that the dataset does not have\n\t\t\t\t\t\t#any attributes which are in the prohibited_attrs\n\t\t\t\t\t\t#list (an optional kwarg)\n\t\t\t\t\t\tis_okay = self.dataset_passes_attr_filters(dataset,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tattr_filters,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdefault_result=True)\n\t\t\t\t\t\tif is_okay:\n\t\t\t\t\t\t\tdatasets.append(dataset[:])\n\n\t\t\t\t\tstatusstr+= \"kept %d/%d passes | \" % (len(datasets),ndatasets)\n\t\t\t\t\tif len(datasets)>0:\n\t\t\t\t\t\tbin_data = np.concatenate(datasets)\n\t\t\t\t\t\t#Do all stat functions\n\t\t\t\t\t\tfor ifun,this_statfun in enumerate(statfun):\n\t\t\t\t\t\t\tbinstats[ifun][flatind] = this_statfun(bin_data)\n\t\t\t\t\t\t\tstatusstr+=\"%s: %.3f | \" % (this_statfun.__name__,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tbinstats[ifun][flatind])\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor ifun,this_statfun in enumerate(statfun):\n\t\t\t\t\t\t\tbinstats[ifun][flatind] = np.nan\n\t\t\t\t\t\t\tstatusstr+=\"%s: NaN | \" % (this_statfun.__name__)\n\n\t\t\t\t\tif not silent:\n\t\t\t\t\t\tprint(statusstr)\n\t\t\t\t#Save the results\n\t\t\telse:\n\t\t\t\traise RuntimeError(\"Unhandled case!\")\n\n\t\t\tif write_to_h5:\n\t\t\t\tfor istatfun,statfun_dataset_name in enumerate(statfun_dataset_names):\n\t\t\t\t\tif 'binstats_results' not in h5f:\n\t\t\t\t\t\th5f.create_group('binstats_results')\n\t\t\t\t\tresults_grp = h5f['binstats_results']\n\t\t\t\t\t#Create a dataset for each statistics function's binned results\n\t\t\t\t\tif statfun_dataset_name in results_grp:\n\t\t\t\t\t\tdel results_grp[statfun_dataset_name]\n\t\t\t\t\tresults_grp.create_dataset(statfun_dataset_name,data=binstats[istatfun])\n\t\t\t\t\tif not silent:\n\t\t\t\t\t\tprint(\"Saved binning results to h5 datatset %s\" % (statfun_dataset_name))\n\n\t\tif statfunname is not None:\n\t\t\t#Return binstats as a dictionary if we named the statistics\n\t\t\t#functions\n\t\t\tbinstats_dict = {}\n\t\t\tfor i in range(len(binstats)):\n\t\t\t\tbinstats_dict[statfunname[i]] = binstats[i]\n\t\t\treturn binlats,binlonorlts,binstats_dict\n\t\telse:\n\t\t\t#Don't bother returning a list of results if we are only using one stat function\n\t\t\t#Just return the array\n\t\t\tif len(binstats) == 1:\n\t\t\t\tbinstats = binstats[0]\n\n\t\t\treturn binlats,binlonorlts,binstats\n\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.isclose",
"numpy.abs",
"numpy.isfinite",
"numpy.unique",
"numpy.floor"
]
] |
hepaccelerate/hepaccelerate | [
"6415fe3d6b569cc94ec33d06180869b0b0773c6c"
] | [
"examples/atlas_hzz.py"
] | [
"# usr/bin/env python3\n# Run as PYTHONPATH=. python3 examples/atlas_hzz.py\n\n# In case you use CUDA, you may have to find the libnvvm.so on your system manually\nimport os, time, glob, argparse, multiprocessing\nimport numba\nimport sys\nimport numpy as np\nimport uproot\nimport hepaccelerate\nimport hepaccelerate.kernels as kernels\nfrom hepaccelerate.utils import Results, Dataset, Histogram, choose_backend\nimport hepaccelerate.backend_cpu as backend_cpu\nimport matplotlib\nimport infofile\nimport json\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport copy\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom scipy.stats import wasserstein_distance\nfrom plot_utils import plot_hist_ratio\n\nha = None\nlumi =10.0\n\n# define our analysis function\ndef analyze_data_function(data, parameters):\n ret = Results()\n ha = parameters[\"ha\"]\n num_events = data[\"num_events\"]\n lep = data[\"Lep\"]\n lep.hepaccelerate_backend = ha\n lep.attrs_data[\"pt\"] = lep.lep_pt \n lep.attrs_data[\"eta\"] = lep.lep_eta\n lep.attrs_data[\"phi\"] = lep.lep_phi\n lep.attrs_data[\"charge\"] = lep.lep_charge\n lep.attrs_data[\"type\"] = lep.lep_type\n \n lep_mass = np.zeros_like(lep[\"pt\"],dtype=nplib.float32)\n lep_mass = np.where(lep[\"type\"]==11,0.511,lep_mass)\n lep_mass = np.where(lep[\"type\"]==13,105.65837,lep_mass)\n\n lep.attrs_data[\"mass\"] = lep_mass\n mask_events = nplib.ones(lep.numevents(), dtype=nplib.bool)\n \n\n lep_ele = lep[\"type\"] == 11\n lep_muon = lep[\"type\"] == 13\n\n\n ele_Iso = np.logical_and(lep_ele,np.logical_and( lep.lep_ptcone30/lep.pt < 0.15 , lep.lep_etcone20/lep.pt < 0.20))\n muon_Iso = np.logical_and(lep_muon ,np.logical_and( lep.lep_ptcone30/lep.pt < 0.15 ,lep.lep_etcone20/lep.pt < 0.30))\n pass_iso = np.logical_or(ele_Iso, muon_Iso)\n lep.attrs_data[\"pass_iso\"] = pass_iso\n\n num_lep_event = kernels.sum_in_offsets(\n backend,\n lep.offsets,\n lep.masks[\"all\"],\n mask_events,\n lep.masks[\"all\"],\n nplib.int8,\n )\n mask_events_4lep = num_lep_event == 4\n\n lep_attrs = [ \"pt\", \"eta\", \"phi\", \"charge\",\"type\",\"mass\", \"pass_iso\"]#, \"ptcone30\", \"etcone20\"]\n \n lep0 = lep.select_nth(0, mask_events_4lep, lep.masks[\"all\"], attributes=lep_attrs)\n lep1 = lep.select_nth(1, mask_events_4lep, lep.masks[\"all\"], attributes=lep_attrs)\n lep2 = lep.select_nth(2, mask_events_4lep, lep.masks[\"all\"], attributes=lep_attrs)\n lep3 = lep.select_nth(3, mask_events_4lep, lep.masks[\"all\"], attributes=lep_attrs)\n \n mask_event_sumchg_zero = (lep0[\"charge\"]+lep1[\"charge\"]+lep2[\"charge\"]+lep3[\"charge\"] == 0) \n sum_lep_type = lep0[\"type\"]+lep1[\"type\"]+lep2[\"type\"]+lep3[\"type\"] \n all_pass_iso = (lep0[\"pass_iso\"] & lep1[\"pass_iso\"] & lep2[\"pass_iso\"] & lep3[\"pass_iso\"])\n \n mask_event_sum_lep_type = np.logical_or((sum_lep_type == 44),np.logical_or((sum_lep_type == 48),(sum_lep_type == 52) ) )\n mask_events = mask_events & mask_event_sumchg_zero & mask_events_4lep & mask_event_sum_lep_type & all_pass_iso\n \n\n mask_lep1_passing_pt = lep1[\"pt\"] > parameters[\"leading_lep_ptcut\"]\n mask_lep2_passing_pt = lep2[\"pt\"] > parameters[\"lep_ptcut\"]\n \n mask_events = mask_events & mask_lep1_passing_pt & mask_lep2_passing_pt\n\n l0 = to_cartesian(lep0)\n l1 = to_cartesian(lep1)\n l2 = to_cartesian(lep2)\n l3 = to_cartesian(lep3)\n\n llll = {k: l0[k] + l1[k] + l2[k] + l3[k] for k in [\"px\", \"py\", \"pz\", \"e\"]}\n\n llll_sph = to_spherical(llll)\n\n llll_sph[\"mass\"] = llll_sph[\"mass\"]/1000. # Convert to GeV\n \n #import pdb;pdb.set_trace();\n # compute a weighted histogram\n weights = nplib.ones(num_events, dtype=nplib.float32)\n ## Add xsec weights based on sample name\n if parameters[\"is_mc\"]:\n weights = data['eventvars']['mcWeight']*data['eventvars']['scaleFactor_PILEUP']*data['eventvars']['scaleFactor_ELE']*data['eventvars']['scaleFactor_MUON']*data['eventvars']['scaleFactor_LepTRIGGER']\n info = infofile.infos[parameters[\"sample\"]]\n weights *= (lumi*1000*info[\"xsec\"])/(info[\"sumw\"]*info[\"red_eff\"])\n \n bins = nplib.linspace(110, 150, 11, dtype=nplib.float32)\n hist_m4lep= Histogram(\n *kernels.histogram_from_vector(\n backend,\n llll_sph[\"mass\"][mask_events],\n weights[mask_events],\n bins,\n )\n )\n # save it to the output\n ret[\"hist_m4lep\"] = hist_m4lep\n return ret\n\ndef to_cartesian(arrs):\n pt = arrs[\"pt\"]\n eta = arrs[\"eta\"]\n phi = arrs[\"phi\"]\n mass = arrs[\"mass\"]\n px, py, pz, e = backend.spherical_to_cartesian(pt, eta, phi, mass)\n return {\"px\": px, \"py\": py, \"pz\": pz, \"e\": e}\n\ndef rapidity(e, pz):\n return 0.5*np.log((e + pz) / (e - pz))\n\n\"\"\"\nGiven a a dictionary of arrays of cartesian coordinates (px, py, pz, e),\ncomputes the array of spherical coordinates (pt, eta, phi, m)\n arrs: dict of str -> array\n returns: dict of str -> array\n\"\"\"\ndef to_spherical(arrs):\n px = arrs[\"px\"]\n py = arrs[\"py\"]\n pz = arrs[\"pz\"]\n e = arrs[\"e\"]\n pt, eta, phi, mass = backend.cartesian_to_spherical(px, py, pz, e)\n rap = rapidity(e, pz)\n return {\"pt\": pt, \"eta\": eta, \"phi\": phi, \"mass\": mass, \"rapidity\": rap}\n\ndef pct_barh(ax, values, colors):\n prev = 0\n norm = sum(values)\n for v, c in zip(values, colors):\n ax.barh(0, width=v/norm, height=1.0, left=prev, color=c)\n prev += v/norm\n ax.set_yticks([])\n ax.set_xticks([])\n ax.set_xlim(0,prev)\n ax.axis('off')\n\ndef make_plot(datasets):\n res = {}\n mc_samples =[]\n for ds, fn_pattern, is_mc in datasets:\n with open(\"data/atlas/{0}.json\".format(ds)) as f:\n ret = json.load(f)[\"hist_m4lep\"]\n \n res[ds] = hepaccelerate.Histogram(ret[\"contents\"], ret[\"contents_w2\"], ret[\"edges\"])\n \n #remove the overflow bin\n res[ds].contents[-1] = 0\n res[ds].contents_w2[-1] = 0\n \n if 'data' in ds:\n hd = res[ds]\n else:\n mc_samples += [ds]\n\n matplotlib.use(\"Agg\")\n import matplotlib.pyplot as plt\n\n if np.sum(hd.contents) == 0:\n print(\"ERROR: Histogram was empty, skipping\")\n return\n\n htot_nominal = copy.deepcopy(hd)\n htot_nominal.contents[:] = 0\n htot_nominal.contents_w2[:] = 0\n\n hmc = []\n for mc_samp in mc_samples:\n h = res[mc_samp]\n h.label = nice_names.get(mc_samp, mc_samp)\n h.color = colors[mc_samp][0]/255.0, colors[mc_samp][1]/255.0, colors[mc_samp][2]/255.0\n hmc += [h]\n \n #hmc = [hmc_g[k[0]] for k in groups]\n for h in hmc:\n htot_nominal.contents += h.contents\n htot_nominal.contents_w2 += h.contents_w2\n hd.label = \"data ({0:.1E})\".format(np.sum(hd.contents))\n\n extra_kwargs = {\n \"hist_m4lep\": {\n \"do_log\": True,\n \"ylim\": (0, 100),\n \"xlim\": (110.,150.)\n }}\n\n figure = plt.figure(figsize=(5,5))\n a1, a2 = plot_hist_ratio(\n hmc, hd,\n figure=figure, **extra_kwargs)\n \n# colorlist = [h.color for h in hmc]\n# a1inset = inset_axes(a1, width=1.0, height=0.1, loc=2)\n\n# pct_barh(a1inset, [np.sum(h.contents) for h in hmc], colorlist)\n# #a2.grid(which=\"both\", linewidth=0.5)\n\n # Ratio axis ticks\n #ts = a2.set_yticks([0.5, 1.0, 1.5], minor=False)\n #ts = a2.set_yticks(np.arange(0,2,0.2), minor=True)\n #ts = a2.set_xticklabels([])\n\n a1.text(0.03,0.95, \"Atlas Open Data @ 13 TeV \\n\" +\n r\"$L = {0:.1f}\\ fb^{{-1}}$\".format(lumi),\n #\"\\nd/mc={0:.2f}\".format(np.sum(hd[\"contents\"])/np.sum(htot_nominal[\"contents\"])) +\n #\"\\nwd={0:.2E}\".format(wasserstein_distance(htot_nominal[\"contents\"]/np.sum(htot_nominal[\"contents\"]), hd[\"contents\"]/np.sum(hd[\"contents\"]))),\n horizontalalignment='left',\n verticalalignment='top',\n transform=a1.transAxes,\n fontsize=10\n )\n handles, labels = a1.get_legend_handles_labels()\n a1.legend(handles[::-1], labels[::-1], frameon=False, fontsize=10, loc=1, ncol=2)\n\n #a1.set_title(catname + \" ({0})\".format(analysis_names[analysis][datataking_year]))\n a2.set_xlabel(r'$M_{4l}$ (GeV)')\n\n binwidth = np.diff(hd.edges)[0]\n a1.set_ylabel(\"Events / [{0:.1f} GeV]\".format(binwidth))\n\n if not os.path.isdir(\"paper/plots/atlas\"):\n os.makedirs(\"paper/plots/atlas\") \n plt.savefig(\"paper/plots/atlas/m_4lep.pdf\", bbox_inches=\"tight\")\n plt.savefig(\"paper/plots/atlas/m_4lep.png\", bbox_inches=\"tight\", dpi=100)\n plt.close(figure)\n del figure\n\n return\n\ndatasets = [\n (\"Zee\", \"Atlas_opendata/mc_361106.Zee.4lep.root\", True),\n (\"Zmumu\", \"Atlas_opendata/mc_*Zmumu.4lep.root\", True), \n (\"ttbar_lep\", \"Atlas_opendata/mc_*ttbar*.4lep.root\", True),\n (\"llll\", \"Atlas_opendata/mc_*llll*.4lep.root\", True),\n ('ggH125_ZZ4lep',\"Atlas_opendata/mc_*ggH125_ZZ4lep.4lep.root\", True),\n ('VBFH125_ZZ4lep',\"Atlas_opendata/mc_*VBFH125_ZZ4lep.4lep.root\", True),\n ('WH125_ZZ4lep',\"Atlas_opendata/mc_*WH125_ZZ4lep.4lep.root\", True),\n ('ZH125_ZZ4lep',\"Atlas_opendata/mc_*ZH125_ZZ4lep.4lep.root\", True),\n (\"data\",\"Atlas_opendata/data*.4lep.root\",False)\n]\n\ncolors = {\n \"Zee\": (254, 254, 83),\n \"Zmumu\": (109, 253, 245),\n \"ttbar_lep\": (67, 150, 42),\n \"llll\": (247, 206, 205),\n \"ggH125_ZZ4lep\": (0, 100, 150),\n \"VBFH125_ZZ4lep\": (0, 100, 150),\n \"WH125_ZZ4lep\": (0, 100, 150),\n \"ZH125_ZZ4lep\": (0, 100, 150),\n}\n\nnice_names = {\n \"Zee\": r\"$Z \\rightarrow ee$\",\n \"Zmumu\": r\"$Z \\rightarrow \\mu\\mu$\",\n \"ttbar_lep\": r\"$t\\bar{t}$\",\n \"llll\": r\"$ZZ \\rightarrow 4l$\",\n \"ggH125_ZZ4lep\":r\"$H \\rightarrow ZZ \\rightarrow 4l$\",\n \"VBFH125_ZZ4lep\":\"\",# r\"$VBF$\",\n \"WH125_ZZ4lep\": \"\",#r\"$WH$\",\n \"ZH125_ZZ4lep\": \"\",#r\"$ZH$\"\n}\n\nif __name__ == \"__main__\":\n\n\n use_cuda = int(os.environ.get(\"HEPACCELERATE_CUDA\", 0)) == 1\n # choose whether or not to use the GPU backend\n if use_cuda:\n import setGPU\n\n nplib, backend = choose_backend(use_cuda=use_cuda)\n \n # Predefine which branches to read from the TTree and how they are grouped to objects\n # This will be verified against the actual ROOT TTree when it is loaded\n datastructures = {\n \"Lep\": [\n (\"lep_pt\", \"float32\"),\n (\"lep_eta\",\"float32\"),\n (\"lep_phi\",\"float32\"),\n (\"lep_charge\",\"int32\"),\n (\"lep_type\",\"uint32\"),\n (\"lep_ptcone30\",\"float32\"),\n (\"lep_etcone20\",\"float32\")\n \n ],\n \"EventVariables\": [\n (\"lep_n\", \"int32\"),\n (\"mcWeight\", \"float32\"),\n (\"scaleFactor_PILEUP\", \"float32\"),\n (\"scaleFactor_ELE\", \"float32\"),\n (\"scaleFactor_MUON\", \"float32\"),\n (\"scaleFactor_LepTRIGGER\", \"float32\")\n ],\n }\n \n\n # Load this input file\n #filename = \"data/data_A.4lep.root\"\n if not os.path.isdir(\"data/atlas\"):\n os.makedirs(\"data/atlas\")\n\n walltime_t0 = time.time()\n for ds, fn_pattern, is_mc in datasets:\n filename = glob.glob(fn_pattern)\n print(filename)\n if len(filename) == 0:\n raise Exception(\n \"Could not find any filenames for dataset={0}: {{fn_pattern}}={1}\".format(\n ds, fn_pattern\n )\n )\n\n # Define a dataset, given the data structure and a list of filenames\n dataset = Dataset(ds, filename, datastructures, treename=\"mini\")\n # Load the ROOT files\n dataset.load_root(verbose=True)\n \n # merge arrays across files into one big array\n dataset.merge_inplace(verbose=True)\n \n # move to GPU if CUDA was specified\n dataset.move_to_device(nplib, verbose=True)\n \n # process data, save output as a json file\n results = dataset.analyze(\n analyze_data_function, verbose=True, parameters={\n \"lep_ptcut\": 10000.0, #MeV units\n \"leading_lep_ptcut\": 15000.0, #MeV units\n \"sample\": ds,\n \"is_mc\": is_mc,\n \"ha\":backend\n }\n )\n results.save_json(\"data/atlas/{0}.json\".format(ds))\n\n make_plot(datasets)\n"
] | [
[
"matplotlib.use",
"numpy.logical_or",
"numpy.zeros_like",
"numpy.log",
"matplotlib.pyplot.savefig",
"numpy.sum",
"matplotlib.pyplot.close",
"numpy.logical_and",
"matplotlib.pyplot.figure",
"numpy.diff",
"numpy.where"
]
] |
qiang2100/ParaLS | [
"d4a37d2e19976e20d331dec752b1e4463405fef6"
] | [
"fairseq/modules/transformer_layer.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Dict, List, Optional\n\nimport pdb\nimport torch\nimport torch.nn as nn\nfrom fairseq import utils\nfrom fairseq.modules import LayerNorm, MultiheadAttention\nfrom fairseq.modules.fairseq_dropout import FairseqDropout\nfrom fairseq.modules.quant_noise import quant_noise\nfrom torch import Tensor\nfrom fairseq.models.transformer import (\n TransformerConfig,\n)\n\n\nclass TransformerEncoderLayerBase(nn.Module):\n \"\"\"Encoder layer block.\n\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *cfg.encoder.normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n self.embed_dim = cfg.encoder.embed_dim\n self.quant_noise = cfg.quant_noise.pq\n self.quant_noise_block_size = cfg.quant_noise.pq_block_size\n self.self_attn = self.build_self_attention(self.embed_dim, cfg)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)\n self.dropout_module = FairseqDropout(\n cfg.dropout, module_name=self.__class__.__name__\n )\n self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn)\n activation_dropout_p = cfg.activation_dropout\n if activation_dropout_p == 0:\n # for backwards compatibility with models that use cfg.relu_dropout\n activation_dropout_p = cfg.relu_dropout or 0\n self.activation_dropout_module = FairseqDropout(\n float(activation_dropout_p), module_name=self.__class__.__name__\n )\n self.normalize_before = cfg.encoder.normalize_before\n self.fc1 = self.build_fc1(\n self.embed_dim,\n cfg.encoder.ffn_embed_dim,\n self.quant_noise,\n self.quant_noise_block_size,\n )\n self.fc2 = self.build_fc2(\n cfg.encoder.ffn_embed_dim,\n self.embed_dim,\n self.quant_noise,\n self.quant_noise_block_size,\n )\n\n self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)\n\n def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(\n nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size\n )\n\n def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(\n nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size\n )\n\n def build_self_attention(self, embed_dim, cfg):\n return MultiheadAttention(\n embed_dim,\n cfg.encoder.attention_heads,\n dropout=cfg.attention_dropout,\n self_attention=True,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n\n def residual_connection(self, x, residual):\n return residual + x\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n \"\"\"\n layer_norm_map = {\"0\": \"self_attn_layer_norm\", \"1\": \"final_layer_norm\"}\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layer_norms.{}.{}\".format(name, old, m)\n if k in state_dict:\n state_dict[\"{}.{}.{}\".format(name, new, m)] = state_dict[k]\n del state_dict[k]\n\n def forward(\n self,\n x,\n encoder_padding_mask: Optional[Tensor],\n attn_mask: Optional[Tensor] = None,\n ):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, seq_len)` where padding elements are indicated by ``1``.\n attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,\n where `tgt_len` is the length of output and `src_len` is the\n length of input, though here both are equal to `seq_len`.\n `attn_mask[tgt_i, src_j] = 1` means that when calculating the\n embedding for `tgt_i`, we exclude (mask out) `src_j`. This is\n useful for strided self-attention.\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n # anything in original attn_mask = 1, becomes -1e8\n # anything in original attn_mask = 0, becomes 0\n # Note that we cannot use -inf here, because at some edge cases,\n # the attention weight (before softmax) for some padded element in query\n # will become -inf, which results in NaN in model parameters\n if attn_mask is not None:\n attn_mask = attn_mask.masked_fill(\n attn_mask.to(torch.bool),\n -1e8 if x.dtype == torch.float32 else -1e4\n )\n\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n x, _ = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask,\n need_weights=False,\n attn_mask=attn_mask,\n )\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n x = self.activation_fn(self.fc1(x))\n x = self.activation_dropout_module(x)\n x = self.fc2(x)\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n return x\n\n\n# backward compatible with the legacy argparse format\nclass TransformerEncoderLayer(TransformerEncoderLayerBase):\n def __init__(self, args):\n super().__init__(TransformerConfig.from_namespace(args))\n self.args = args\n\n def build_self_attention(self, embed_dim, args):\n return super().build_self_attention(\n embed_dim, TransformerConfig.from_namespace(args)\n )\n\n\nclass TransformerDecoderLayerBase(nn.Module):\n \"\"\"Decoder layer block.\n\n In the original paper each operation (multi-head attention, encoder\n attention or FFN) is postprocessed with: `dropout -> add residual ->\n layernorm`. In the tensor2tensor code they suggest that learning is more\n robust when preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *cfg.decoder.normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(\n self, cfg, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False\n ):\n super().__init__()\n self.embed_dim = cfg.decoder.embed_dim\n self.dropout_module = FairseqDropout(\n cfg.dropout, module_name=self.__class__.__name__\n )\n self.quant_noise = cfg.quant_noise.pq\n self.quant_noise_block_size = cfg.quant_noise.pq_block_size\n\n self.cross_self_attention = cfg.cross_self_attention\n\n self.self_attn = self.build_self_attention(\n self.embed_dim,\n cfg,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n )\n self.attn_ln = LayerNorm(self.embed_dim) if utils.safe_getattr(cfg, 'scale_attn', False) else None\n self.nh = self.self_attn.num_heads\n self.head_dim = self.self_attn.head_dim\n scale_heads = utils.safe_getattr(cfg, 'scale_heads', False)\n self.c_attn = nn.Parameter(torch.ones((self.nh,)), requires_grad=True) if scale_heads else None\n\n self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn)\n activation_dropout_p = cfg.activation_dropout\n if activation_dropout_p == 0:\n # for backwards compatibility with models that use cfg.relu_dropout\n activation_dropout_p = cfg.relu_dropout or 0\n self.activation_dropout_module = FairseqDropout(\n float(activation_dropout_p), module_name=self.__class__.__name__\n )\n self.normalize_before = cfg.decoder.normalize_before\n\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)\n\n if no_encoder_attn:\n self.encoder_attn = None\n self.encoder_attn_layer_norm = None\n else:\n self.encoder_attn = self.build_encoder_attention(self.embed_dim, cfg)\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)\n\n self.ffn_layernorm = LayerNorm(cfg.decoder.ffn_embed_dim) if utils.safe_getattr(cfg, 'scale_fc', False) else None\n self.w_resid = nn.Parameter(torch.ones(self.embed_dim, ), requires_grad=True) if utils.safe_getattr(cfg, 'scale_resids', False) else None\n\n self.fc1 = self.build_fc1(\n self.embed_dim,\n cfg.decoder.ffn_embed_dim,\n self.quant_noise,\n self.quant_noise_block_size,\n )\n self.fc2 = self.build_fc2(\n cfg.decoder.ffn_embed_dim,\n self.embed_dim,\n self.quant_noise,\n self.quant_noise_block_size,\n )\n\n self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)\n self.need_attn = True\n\n self.onnx_trace = False\n\n def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)\n\n def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)\n\n def build_self_attention(\n self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False\n ):\n return MultiheadAttention(\n embed_dim,\n cfg.decoder.attention_heads,\n dropout=cfg.attention_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n self_attention=not cfg.cross_self_attention,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n\n def build_encoder_attention(self, embed_dim, cfg):\n return MultiheadAttention(\n embed_dim,\n cfg.decoder.attention_heads,\n kdim=cfg.encoder.embed_dim,\n vdim=cfg.encoder.embed_dim,\n dropout=cfg.attention_dropout,\n encoder_decoder_attention=True,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n )\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def residual_connection(self, x, residual):\n return residual + x\n\n\n def forward(\n self,\n x,\n encoder_out: Optional[torch.Tensor] = None,\n encoder_padding_mask: Optional[torch.Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n prev_self_attn_state: Optional[List[torch.Tensor]] = None,\n prev_attn_state: Optional[List[torch.Tensor]] = None,\n self_attn_mask: Optional[torch.Tensor] = None,\n self_attn_padding_mask: Optional[torch.Tensor] = None,\n need_attn: bool = False,\n need_head_weights: bool = False,\n attn_len: int = -1,\n tgt_token: int = -1,\n ):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor, optional): binary\n ByteTensor of shape `(batch, src_len)` where padding\n elements are indicated by ``1``.\n need_attn (bool, optional): return attention weights\n need_head_weights (bool, optional): return attention weights\n for each head (default: return average over heads).\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n if need_head_weights:\n need_attn = True\n\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n if prev_self_attn_state is not None:\n prev_key, prev_value = prev_self_attn_state[:2]\n saved_state: Dict[str, Optional[Tensor]] = {\n \"prev_key\": prev_key,\n \"prev_value\": prev_value,\n }\n if len(prev_self_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_self_attn_state[2]\n assert incremental_state is not None\n self.self_attn._set_input_buffer(incremental_state, saved_state)\n _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)\n if self.cross_self_attention and not (\n incremental_state is not None\n and _self_attn_input_buffer is not None\n and \"prev_key\" in _self_attn_input_buffer\n ):\n if self_attn_mask is not None:\n assert encoder_out is not None\n self_attn_mask = torch.cat(\n (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1\n )\n if self_attn_padding_mask is not None:\n if encoder_padding_mask is None:\n assert encoder_out is not None\n encoder_padding_mask = self_attn_padding_mask.new_zeros(\n encoder_out.size(1), encoder_out.size(0)\n )\n self_attn_padding_mask = torch.cat(\n (encoder_padding_mask, self_attn_padding_mask), dim=1\n )\n assert encoder_out is not None\n y = torch.cat((encoder_out, x), dim=0)\n else:\n y = x\n\n #\n x, attn = self.self_attn(\n query=x,\n key=y,\n value=y,\n key_padding_mask=self_attn_padding_mask,\n incremental_state=incremental_state,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n if self.c_attn is not None:\n tgt_len, bsz = x.size(0), x.size(1)\n x = x.view(tgt_len, bsz, self.nh, self.head_dim)\n x = torch.einsum('tbhd,h->tbhd', x, self.c_attn)\n x = x.reshape(tgt_len, bsz, self.embed_dim)\n if self.attn_ln is not None:\n x = self.attn_ln(x)\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n if self.encoder_attn is not None and encoder_out is not None:\n residual = x\n if self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n if prev_attn_state is not None:\n prev_key, prev_value = prev_attn_state[:2]\n saved_state: Dict[str, Optional[Tensor]] = {\n \"prev_key\": prev_key,\n \"prev_value\": prev_value,\n }\n if len(prev_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_attn_state[2]\n assert incremental_state is not None\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\n\n \n if attn_len == -1:\n x, attn = self.encoder_attn(\n query=x,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=need_attn or (not self.training and self.need_attn),\n need_head_weights=need_head_weights,\n )\n else:\n x, attn = self.encoder_attn(\n query=x,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=need_attn or (not self.training and self.need_attn),\n need_head_weights=need_head_weights,\n attn_len=attn_len,\n tgt_token=tgt_token,\n )\n\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n\n x = self.activation_fn(self.fc1(x))\n x = self.activation_dropout_module(x)\n if self.ffn_layernorm is not None:\n x = self.ffn_layernorm(x)\n x = self.fc2(x)\n x = self.dropout_module(x)\n if self.w_resid is not None:\n residual = torch.mul(self.w_resid, residual)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n if self.onnx_trace and incremental_state is not None:\n saved_state = self.self_attn._get_input_buffer(incremental_state)\n assert saved_state is not None\n if self_attn_padding_mask is not None:\n self_attn_state = [\n saved_state[\"prev_key\"],\n saved_state[\"prev_value\"],\n saved_state[\"prev_key_padding_mask\"],\n ]\n else:\n self_attn_state = [saved_state[\"prev_key\"], saved_state[\"prev_value\"]]\n return x, attn, self_attn_state\n return x, attn, None\n\n def make_generation_fast_(self, need_attn: bool = False, **kwargs):\n self.need_attn = need_attn\n\n\n# backward compatible with the legacy argparse format\nclass TransformerDecoderLayer(TransformerDecoderLayerBase):\n def __init__(\n self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False\n ):\n super().__init__(\n TransformerConfig.from_namespace(args),\n no_encoder_attn=no_encoder_attn,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n )\n self.args = args\n\n def build_self_attention(\n self, embed_dim, args, add_bias_kv=False, add_zero_attn=False\n ):\n return super().build_self_attention(\n embed_dim,\n TransformerConfig.from_namespace(args),\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n )\n\n def build_encoder_attention(self, embed_dim, args):\n return super().build_encoder_attention(\n embed_dim,\n TransformerConfig.from_namespace(args),\n )\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"torch.mul",
"torch.einsum",
"torch.ones"
]
] |
snkas/floodns | [
"1c7c4f5e34b279f1ed0f8b0cc134c698f9b0fc5a"
] | [
"simulator/external/analyze.py"
] | [
"##\n# The MIT License (MIT)\n#\n# Copyright (c) 2019 snkas\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n##\n\nimport numpy as np\nimport os\nimport sys\nimport exputil\n\n\ndef analyze_flow_info(logs_floodns_dir, analysis_folder_dir):\n\n # Read in all the columns\n flows_info_csv_columns = exputil.read_csv_direct_in_columns(\n logs_floodns_dir + '/flow_info.csv',\n \"pos_int,pos_int,pos_int,string,pos_int,pos_int,pos_int,pos_float,pos_float,string\"\n )\n flow_id_list = flows_info_csv_columns[0]\n source_id_list = flows_info_csv_columns[1]\n target_id_list = flows_info_csv_columns[2]\n path_list = flows_info_csv_columns[3]\n path_length_list = list(map(lambda x: len(x.split(\">\")) - 1, path_list))\n # start_time_list = flows_info_csv_columns[4]\n # end_time_list = flows_info_csv_columns[5]\n # duration_list = flows_info_csv_columns[6]\n # total_sent_list = flows_info_csv_columns[7]\n avg_throughput_list = flows_info_csv_columns[8]\n # metadata_list = flows_info_csv_columns[9]\n\n # Calculate some statistics\n if len(flow_id_list) == 0:\n statistics = {\n 'all_num_flows': len(flow_id_list)\n }\n \n else:\n statistics = {\n 'all_num_flows': len(flow_id_list),\n 'all_flow_num_unique_sources': len(set(source_id_list)),\n 'all_flow_num_unique_targets': len(set(target_id_list)),\n \n 'all_flow_avg_throughput_sum': sum(avg_throughput_list),\n 'all_flow_avg_throughput_min': np.min(avg_throughput_list),\n 'all_flow_avg_throughput_0.1th': np.percentile(avg_throughput_list, 0.1),\n 'all_flow_avg_throughput_1th': np.percentile(avg_throughput_list, 1),\n 'all_flow_avg_throughput_mean': np.mean(avg_throughput_list),\n 'all_flow_avg_throughput_median': np.median(avg_throughput_list),\n 'all_flow_avg_throughput_99th': np.percentile(avg_throughput_list, 99),\n 'all_flow_avg_throughput_99.9th': np.percentile(avg_throughput_list, 99.9),\n 'all_flow_avg_throughput_max': np.max(avg_throughput_list),\n \n 'all_flow_path_length_min': np.min(path_length_list),\n 'all_flow_path_length_0.1th': np.percentile(path_length_list, 0.1),\n 'all_flow_path_length_1th': np.percentile(path_length_list, 1),\n 'all_flow_path_length_mean': np.mean(path_length_list),\n 'all_flow_path_length_median': np.median(path_length_list),\n 'all_flow_path_length_99th': np.percentile(path_length_list, 99),\n 'all_flow_path_length_99.9th': np.percentile(path_length_list, 99.9),\n 'all_flow_path_length_max': np.max(path_length_list),\n }\n \n # Print results\n output_filename = analysis_folder_dir + '/flow_info.statistics'\n print('Writing flow statistics: ' + output_filename)\n with open(output_filename, 'w+') as outfile:\n for key, value in sorted(statistics.items()):\n outfile.write(str(key) + \"=\" + str(value) + \"\\n\")\n\n\ndef analyze_connection_info(logs_floodns_dir, analysis_folder_dir):\n\n # Read in all the columns\n flows_info_csv_columns = exputil.read_csv_direct_in_columns(\n logs_floodns_dir + '/connection_info.csv',\n \"pos_int,pos_int,pos_int,pos_float,pos_float,string,pos_int,pos_int,pos_int,pos_float,string,string\"\n )\n connection_id_list = flows_info_csv_columns[0]\n source_id_list = flows_info_csv_columns[1]\n target_id_list = flows_info_csv_columns[2]\n # total_size_list = flows_info_csv_columns[3]\n # total_sent_list = flows_info_csv_columns[4]\n # flows_string_list = flows_info_csv_columns[5]\n # num_flows_list = list(map(lambda x: len(x.split(\";\")), flows_string_list))\n # start_time_list = flows_info_csv_columns[6]\n # end_time_list = flows_info_csv_columns[7]\n duration_list = flows_info_csv_columns[8]\n avg_throughput_list = flows_info_csv_columns[9]\n completed_string_list = flows_info_csv_columns[10]\n completed_list = []\n count_completed = 0\n count_incomplete = 0\n for c in completed_string_list:\n if c == \"T\":\n completed_list.append(True)\n count_completed += 1\n elif c == \"F\":\n completed_list.append(False)\n count_incomplete += 1\n else:\n raise ValueError(\"Invalid completed value: \" + c)\n # metadata_list = flows_info_csv_columns[11]\n\n # Calculate some statistics\n if len(connection_id_list) == 0:\n statistics = {\n 'all_num_connections': len(connection_id_list),\n }\n \n else:\n\n statistics = {\n 'all_num_connections': len(connection_id_list),\n 'all_num_connections_completed': count_completed,\n 'all_num_connections_incomplete': count_incomplete,\n 'all_num_connections_fraction_completed': float(count_completed) / float(len(connection_id_list)),\n 'all_connection_num_unique_sources': len(set(source_id_list)),\n 'all_connection_num_unique_targets': len(set(target_id_list)),\n\n 'all_connection_avg_throughput_min': np.min(avg_throughput_list),\n 'all_connection_avg_throughput_0.1th': np.percentile(avg_throughput_list, 0.1),\n 'all_connection_avg_throughput_1th': np.percentile(avg_throughput_list, 1),\n 'all_connection_avg_throughput_mean': np.mean(avg_throughput_list),\n 'all_connection_avg_throughput_median': np.median(avg_throughput_list),\n 'all_connection_avg_throughput_99th': np.percentile(avg_throughput_list, 99),\n 'all_connection_avg_throughput_99.9th': np.percentile(avg_throughput_list, 99.9),\n 'all_connection_avg_throughput_max': np.max(avg_throughput_list),\n 'all_connection_avg_throughput_sum': sum(avg_throughput_list),\n }\n\n completion_time = []\n completion_throughput = []\n for i in range(len(connection_id_list)):\n if completed_list[i]:\n completion_time.append(duration_list[i])\n completion_throughput.append(avg_throughput_list[i])\n\n if count_completed > 0:\n statistics.update({\n 'completed_connection_completion_time_min': np.min(completion_time),\n 'completed_connection_completion_time_0.1th': np.percentile(completion_time, 0.1),\n 'completed_connection_completion_time_1th': np.percentile(completion_time, 1),\n 'completed_connection_completion_time_mean': np.mean(completion_time),\n 'completed_connection_completion_time_median': np.median(completion_time),\n 'completed_connection_completion_time_99th': np.percentile(completion_time, 99),\n 'completed_connection_completion_time_99.9th': np.percentile(completion_time, 99.9),\n 'completed_connection_completion_time_max': np.max(completion_time),\n\n 'completed_connection_throughput_min': np.min(completion_throughput),\n 'completed_connection_throughput_0.1th': np.percentile(completion_throughput, 0.1),\n 'completed_connection_throughput_1th': np.percentile(completion_throughput, 1),\n 'completed_connection_throughput_mean': np.mean(completion_throughput),\n 'completed_connection_throughput_median': np.median(completion_throughput),\n 'completed_connection_throughput_99th': np.percentile(completion_throughput, 99),\n 'completed_connection_throughput_99.9th': np.percentile(completion_throughput, 99.9),\n 'completed_connection_throughput_max': np.max(completion_throughput),\n })\n\n # Print raw results\n output_filename = analysis_folder_dir + '/connection_info.statistics'\n print('Writing connection statistics: %s' % output_filename)\n with open(output_filename, 'w+') as outfile:\n for key, value in sorted(statistics.items()):\n outfile.write(str(key) + \"=\" + str(value) + \"\\n\")\n\n\ndef analyze_link_info(logs_floodns_dir, analysis_folder_dir):\n\n # Read in all the columns\n link_info_csv_columns = exputil.read_csv_direct_in_columns(\n logs_floodns_dir + '/link_info.csv',\n \"pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_float,pos_float,string\"\n )\n link_id_list = link_info_csv_columns[0]\n source_id_list = link_info_csv_columns[1]\n target_id_list = link_info_csv_columns[2]\n # start_time_list = link_info_csv_columns[3]\n # end_time_list = link_info_csv_columns[4]\n # duration_list = link_info_csv_columns[5]\n avg_utilization_list = link_info_csv_columns[6]\n # avg_active_flows_list = link_info_csv_columns[7]\n # metadata_list = link_info_csv_columns[8]\n\n # Count how many links had utilization of zero\n num_link_inactive = 0\n num_link_active = 0\n for u in avg_utilization_list:\n if u == 0:\n num_link_inactive += 1\n else:\n num_link_active += 1\n\n # Calculate some statistics\n if len(link_id_list) == 0:\n statistics = {\n 'all_num_links': len(link_id_list),\n }\n else:\n\n # General statistics\n statistics = {\n 'all_num_links': len(link_id_list),\n 'all_num_links_active': num_link_active,\n 'all_num_links_inactive': num_link_inactive,\n 'all_link_unique_sources': len(set(source_id_list)),\n 'all_link_unique_targets': len(set(target_id_list)),\n\n 'all_link_avg_utilization_min': np.min(avg_utilization_list),\n 'all_link_avg_utilization_0.1th': np.percentile(avg_utilization_list, 0.1),\n 'all_link_avg_utilization_1th': np.percentile(avg_utilization_list, 1),\n 'all_link_avg_utilization_mean': np.mean(avg_utilization_list),\n 'all_link_avg_utilization_median': np.median(avg_utilization_list),\n 'all_link_avg_utilization_std': np.std(avg_utilization_list),\n 'all_link_avg_utilization_99th': np.percentile(avg_utilization_list, 99),\n 'all_link_avg_utilization_99.9th': np.percentile(avg_utilization_list, 99.9),\n 'all_link_avg_utilization_max': np.max(avg_utilization_list),\n }\n\n # Print raw results\n output_filename = analysis_folder_dir + '/link_info.statistics'\n print('Writing link statistics: %s' % output_filename)\n with open(output_filename, 'w+') as outfile:\n for key, value in sorted(statistics.items()):\n outfile.write(str(key) + \"=\" + str(value) + \"\\n\")\n\n\ndef analyze_node_info(logs_floodns_dir, analysis_folder_dir):\n\n # Read in all the columns\n link_info_csv_columns = exputil.read_csv_direct_in_columns(\n logs_floodns_dir + '/node_info.csv',\n \"pos_int,pos_float,string\"\n )\n node_id_list = link_info_csv_columns[0]\n avg_active_flows_list = link_info_csv_columns[1]\n # metadata_list = link_info_csv_columns[2]\n\n # Count how many nodes did not see any flows\n num_node_inactive = 0\n num_node_active = 0\n for a in avg_active_flows_list:\n if a == 0:\n num_node_inactive += 1\n else:\n num_node_active += 1\n\n # Calculate some statistics\n if len(node_id_list) == 0:\n statistics = {\n 'all_num_nodes': len(node_id_list),\n }\n else:\n\n # General statistics\n statistics = {\n 'all_num_nodes': len(node_id_list),\n 'all_num_nodes_active': num_node_active,\n 'all_num_nodes_inactive': num_node_inactive,\n\n 'all_node_avg_num_active_flows_min': np.min(avg_active_flows_list),\n 'all_node_avg_num_active_flows_1th': np.percentile(avg_active_flows_list, 1),\n 'all_node_avg_num_active_flows_0.1th': np.percentile(avg_active_flows_list, 0.1),\n 'all_node_avg_num_active_flows_mean': np.mean(avg_active_flows_list),\n 'all_node_avg_num_active_flows_median': np.median(avg_active_flows_list),\n 'all_node_avg_num_active_flows_std': np.std(avg_active_flows_list),\n 'all_node_avg_num_active_flows_99th': np.percentile(avg_active_flows_list, 99),\n 'all_node_avg_num_active_flows_99.9th': np.percentile(avg_active_flows_list, 99.9),\n 'all_node_avg_num_active_flows_max': np.max(avg_active_flows_list),\n }\n\n # Print raw results\n output_filename = analysis_folder_dir + '/node_info.statistics'\n print('Writing node statistics: %s' % output_filename)\n with open(output_filename, 'w+') as outfile:\n for key, value in sorted(statistics.items()):\n outfile.write(str(key) + \"=\" + str(value) + \"\\n\")\n\n\ndef main():\n args = sys.argv[1:]\n if len(args) != 1:\n print(\"Must supply exactly one argument\")\n print(\"Usage: python analyze.py [/path/to/run_folder/logs_floodns]\")\n exit(1)\n else:\n\n # Check run folder path given as first argument\n logs_floodns_dir = sys.argv[1]\n if not os.path.isdir(logs_floodns_dir):\n print(\"The logs_floodns directory does not exist: \" + logs_floodns_dir)\n exit()\n\n # Create analysis folder\n analysis_folder_dir = logs_floodns_dir + '/analysis'\n if not os.path.exists(analysis_folder_dir):\n os.makedirs(analysis_folder_dir)\n print(\"Output directory for analysis: \" + analysis_folder_dir)\n\n # Perform all four analyses\n analyze_flow_info(logs_floodns_dir, analysis_folder_dir)\n analyze_connection_info(logs_floodns_dir, analysis_folder_dir)\n analyze_link_info(logs_floodns_dir, analysis_folder_dir)\n analyze_node_info(logs_floodns_dir, analysis_folder_dir)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.max",
"numpy.median",
"numpy.percentile",
"numpy.min",
"numpy.mean",
"numpy.std"
]
] |
sanoussi82/turicreate | [
"bcc3166c9ca3a399010130d642c783f75132865a"
] | [
"src/python/turicreate/test/test_activity_classifier.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright © 2017 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\nfrom __future__ import print_function as _\nfrom __future__ import division as _\nfrom __future__ import absolute_import as _\nimport unittest\nimport turicreate as tc\nimport random\nimport tempfile\nimport math\nimport numpy as np\nfrom numbers import Number\nfrom . import util as test_util\nimport pytest\nfrom turicreate.toolkits._internal_utils import _mac_ver, _read_env_var_cpp\nfrom turicreate.toolkits._main import ToolkitError as _ToolkitError\nimport uuid\n\nUSE_CPP = _read_env_var_cpp('TURI_AC_USE_CPP_PATH')\n\ndef _load_data(self, num_examples = 1000, num_features = 3, max_num_sessions = 4,\n randomize_num_sessions = True, num_labels = 9, prediction_window = 5,\n enforce_all_sessions = False):\n random.seed(42)\n\n self.num_examples = num_examples\n self.num_features = num_features\n self.num_sessions = random.randint(1, max_num_sessions) if randomize_num_sessions else max_num_sessions\n self.num_labels = num_labels\n self.prediction_window = prediction_window\n\n self.features = ['X1-r', 'X2-r', 'X3-r']\n self.target = 'activity_label'\n self.session_id = 'session_id'\n\n if (enforce_all_sessions):\n random_session_ids = _random_session_ids(self.num_examples, self.num_sessions)\n else:\n random_session_ids = sorted([random.randint(0, self.num_sessions - 1) for i in range(self.num_examples)])\n\n random_labels = [random.randint(0, self.num_labels - 1) for i in range(self.num_examples)]\n\n self.data = tc.util.generate_random_sframe(column_codes='r' * self.num_features, num_rows=self.num_examples, random_seed=42)\n self.data[self.session_id] = random_session_ids\n self.data[self.target] = random_labels\n\n'''\n Creates a random session_id column, that guarantees that the number\n of sessions is exactly the requested one.\n'''\ndef _random_session_ids(num_examples, num_sessions):\n examples_per_session = num_examples // num_sessions\n if (examples_per_session == 0):\n raise ValueError(\"Can't divide {} lines into {} sessions.\".format(num_examples, num_sessions))\n\n min_lines_per_session = int(0.85 * examples_per_session)\n max_lines_per_session = int(1.15 * examples_per_session)\n\n lines_in_each_session = [random.randint(min_lines_per_session, max_lines_per_session) for i in range(num_sessions)]\n lines_in_each_session = [(x * (num_examples)) // sum(lines_in_each_session) for x in lines_in_each_session]\n lines_in_each_session[-1] += num_examples - sum(lines_in_each_session)\n\n session_ids = []\n for value, num_lines in enumerate(lines_in_each_session):\n session_ids.extend([value] * num_lines)\n\n return session_ids\n\n\nclass ActivityClassifierCreateStressTests(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n _load_data(self)\n\n def test_create_missing_value(self):\n sf_label = random.randint(0, self.num_labels - 1)\n sf_session_id = max(self.data[self.session_id])\n sf = self.data.append(tc.SFrame({self.features[0]: [None], self.features[1]: [3.14], self.features[2]: [5.23], self.target: [sf_label], self.session_id: [sf_session_id]}))\n with self.assertRaises(_ToolkitError):\n tc.activity_classifier.create(sf, \n features=self.features,\n target=self.target,\n session_id=self.session_id,\n prediction_window=self.prediction_window,\n validation_set=None)\n\n def test_create_none_validation_set(self):\n model = tc.activity_classifier.create(self.data,\n features=self.features,\n target=self.target,\n session_id=self.session_id,\n prediction_window=self.prediction_window,\n validation_set=None)\n predictions = model.predict(self.data)\n\n\n def test_create_no_validation_set(self):\n model = tc.activity_classifier.create(self.data,\n features=self.features,\n target=self.target,\n session_id=self.session_id,\n prediction_window=self.prediction_window)\n predictions = model.predict(self.data)\n\n\n def test_create_features_target_session(self):\n model = tc.activity_classifier.create(self.data,\n features=self.features,\n target=self.target,\n session_id=self.session_id)\n predictions = model.predict(self.data)\n\n\n def test_create_target_session(self):\n model = tc.activity_classifier.create(self.data,\n target=self.target,\n session_id=self.session_id)\n predictions = model.predict(self.data)\n\n def test_invalid_model(self):\n \"\"\"\n Verify that creating a model with wrong fields fails\n \"\"\"\n with self.assertRaises(RuntimeError):\n model = tc.activity_classifier.create(self.data,\n features = self.features,\n target ='wrong',\n session_id=self.session_id,\n prediction_window=self.prediction_window,\n validation_set=None)\n\n\nclass ActivityClassifierAutoValdSetTest(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n self.fraction = 0.9\n self.seed = 42\n\n def _create_auto_validation_set(self, is_small=False):\n model = tc.activity_classifier.create(self.data,\n features=self.features,\n target=self.target,\n session_id=self.session_id,\n prediction_window=self.prediction_window,\n validation_set='auto')\n predictions = model.predict(self.data)\n\n # Check the size of the auto validation set\n num_sessions = len(self.data[self.session_id].unique())\n valid_num_sessions = num_sessions - model.num_sessions\n valid_frac = float(valid_num_sessions / num_sessions)\n expected_frac = 0.0 if is_small else 1.0 - self.fraction\n self.assertAlmostEqual(valid_frac, expected_frac, places=1,\n msg=\"Got {} validation sessions out of {}, which is {:.3f}, and not the expected {}\".format(valid_num_sessions, num_sessions, valid_frac, expected_frac))\n\n def test_random_split_by_session(self):\n num_sessions = tc.activity_classifier.util._MIN_NUM_SESSIONS_FOR_SPLIT\n _load_data(self, num_examples=10000, max_num_sessions=num_sessions,\n randomize_num_sessions=False, enforce_all_sessions=True)\n\n train, valid = tc.activity_classifier.util.random_split_by_session(self.data, self.session_id, self.fraction, self.seed)\n\n train_num_sessions = len(train[self.session_id].unique())\n train_frac = float(train_num_sessions / num_sessions)\n expected_frac = self.fraction\n\n self.assertAlmostEqual(train_frac, expected_frac, places=1,\n msg= \"Got {} train sessions out of {}, which is {:.3f}, and not the expected {}\".format(\n train_num_sessions, num_sessions, train_frac, expected_frac))\n\n valid_num_sessions = len(valid[self.session_id].unique())\n valid_frac = float(valid_num_sessions / num_sessions)\n expected_valid_frac = 1.0 - self.fraction\n\n self.assertAlmostEqual(valid_frac, expected_valid_frac, places=1,\n msg= \"Got {} train sessions out of {}, which is {:.3f}, and not the expected {}\".format(\n valid_num_sessions, num_sessions, valid_frac, expected_valid_frac))\n\n train_sessions_set = set(train[self.session_id].unique())\n valid_sessions_set = set(valid[self.session_id].unique())\n\n self.assertTrue(train_sessions_set.isdisjoint(valid_sessions_set),\n \"After train-test split, the train and validation sets should not include the same sessions\")\n\n def test_create_auto_validation_set_small(self):\n num_sessions = tc.activity_classifier.util._MIN_NUM_SESSIONS_FOR_SPLIT // 2\n _load_data(self, max_num_sessions=num_sessions, randomize_num_sessions=False, enforce_all_sessions=True)\n\n self._create_auto_validation_set(is_small=True)\n\n def test_create_auto_validation_set_typical(self):\n num_sessions = tc.activity_classifier.util._MIN_NUM_SESSIONS_FOR_SPLIT * 4\n _load_data(self, num_examples=10000, max_num_sessions=num_sessions, randomize_num_sessions=False,\n enforce_all_sessions=True)\n\n self._create_auto_validation_set()\n\n def test_create_auto_validation_set_string_session_id(self):\n num_sessions = tc.activity_classifier.util._MIN_NUM_SESSIONS_FOR_SPLIT * 4\n _load_data(self, num_examples=10000, max_num_sessions=num_sessions, randomize_num_sessions=False,\n enforce_all_sessions=True)\n\n from six.moves import xrange as _xrange\n session_ids_dict = {}\n for i in _xrange(num_sessions):\n session_ids_dict[i] = uuid.uuid4().hex[:6].upper()\n\n self.data[self.session_id] = self.data[self.session_id].apply(lambda x: session_ids_dict[x])\n\n self._create_auto_validation_set()\n\nclass ActivityClassifierTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n The setup class method for the basic test case with all default values.\n \"\"\"\n _load_data(self)\n\n # Create the model\n self.model = tc.activity_classifier.create(self.data,\n features=self.features,\n target=self.target,\n session_id=self.session_id,\n prediction_window=self.prediction_window,\n validation_set=None)\n\n self.def_opts = {\n 'verbose': True,\n 'prediction_window': 100,\n 'max_iterations': 10,\n 'batch_size' : 32\n }\n\n # Answers\n self.opts = self.def_opts.copy()\n self.opts['prediction_window'] = self.prediction_window\n\n self.get_ans = {\n 'features': lambda x: x == self.features,\n 'training_time': lambda x: x > 0,\n 'target': lambda x: x == self.target,\n 'verbose': lambda x: x == True,\n 'session_id': lambda x: x == self.session_id,\n 'prediction_window': lambda x: x == self.prediction_window,\n 'training_accuracy': lambda x: x >= 0 and x <= 1,\n 'training_log_loss': lambda x: isinstance(x, Number),\n 'max_iterations': lambda x: x == self.def_opts['max_iterations'],\n 'num_sessions': lambda x: x == self.num_sessions,\n 'num_features': lambda x: x == self.num_features,\n 'num_examples': lambda x: x == self.num_examples,\n 'num_classes': lambda x: x == self.num_labels,\n 'batch_size' : lambda x: x == self.def_opts['batch_size'],\n 'classes': lambda x: sorted(x) == sorted(self.data[self.target].unique())\n }\n self.exposed_fields_ans = list(self.get_ans.keys())\n if USE_CPP:\n self.fields_ans = self.exposed_fields_ans + ['training_report_by_class',\n 'training_iterations', 'random_seed',\n 'training_precision', 'training_confusion_matrix',\n 'use_data_augmentation', 'training_f1_score',\n 'training_auc', 'training_roc_curve', 'training_recall']\n else:\n self.fields_ans = self.exposed_fields_ans + ['_recalibrated_batch_size',\n '_pred_model', '_id_target_map',\n '_predictions_in_chunk', '_target_id_map']\n\n\n\n def _calc_expected_predictions_length(self, predict_input, top_k = 1):\n\n input_sessions = predict_input.groupby(self.session_id, { 'session_len' : tc.aggregate.COUNT()})\n prediction_window = self.model.prediction_window\n input_sessions['num_predictions_per_session'] = input_sessions['session_len'].apply(\n lambda x: math.ceil(float(x) / prediction_window) )\n total_num_of_prediction = sum(input_sessions['num_predictions_per_session']) * top_k\n\n return total_num_of_prediction\n\n def test_predict(self):\n \"\"\"\n Check the predict() function.\n \"\"\"\n model = self.model\n for output_type in ['probability_vector', 'class']:\n preds = model.predict(\n self.data, output_type=output_type, output_frequency='per_window')\n expected_len = self._calc_expected_predictions_length(self.data)\n self.assertEqual(len(preds), expected_len)\n\n def test_export_coreml(self):\n \"\"\"\n Check the export_coreml() function.\n \"\"\"\n import coremltools\n # Save the model as a CoreML model file\n filename = tempfile.mkstemp('ActivityClassifier.mlmodel')[1]\n self.model.export_coreml(filename)\n\n # Load the model back from the CoreML model file\n coreml_model = coremltools.models.MLModel(filename)\n\n rs = np.random.RandomState(1234)\n\n # Create a small dataset, and compare the models' predict() output\n dataset = tc.util.generate_random_sframe(column_codes='r' * 3, num_rows=10)\n dataset['session_id'] = 0\n dataset[self.target] = random_labels = [rs.randint(0, self.num_labels - 1, ) for i in range(10)]\n\n if _mac_ver() >= (10, 13):\n w = self.prediction_window\n if USE_CPP:\n labels = list(map(str, sorted(self.model.classes)))\n else:\n labels = list(map(str, sorted(self.model._target_id_map.keys())))\n\n data_list = [dataset[f].to_numpy()[:, np.newaxis] for f in self.features]\n np_data = np.concatenate(data_list, 1)[np.newaxis]\n\n pred = self.model.predict(dataset, output_type='probability_vector')\n model_time0_values = pred[0]\n model_time1_values = pred[w]\n model_predictions = np.array([model_time0_values, model_time1_values])\n\n ret0 = coreml_model.predict({'features' : np_data[:, :w].copy()})\n\n ret1 = coreml_model.predict({'features' : np_data[:, w:2*w].copy(),\n 'hiddenIn': ret0['hiddenOut'],\n 'cellIn': ret0['cellOut']})\n\n coreml_time0_values = [ret0[self.target + 'Probability'][l] for l in labels]\n coreml_time1_values = [ret1[self.target + 'Probability'][l] for l in labels]\n coreml_predictions = np.array([coreml_time0_values, coreml_time1_values])\n\n np.testing.assert_array_almost_equal(model_predictions, coreml_predictions, decimal=3)\n\n def test_classify(self):\n \"\"\"\n Check the classify() function.\n \"\"\"\n model = self.model\n preds = model.classify(self.data, output_frequency='per_window')\n expected_len = self._calc_expected_predictions_length(self.data)\n self.assertEqual(len(preds), expected_len)\n\n def test_predict_topk(self):\n \"\"\"\n Check the predict_topk function.\n \"\"\"\n model = self.model\n for output_type in ['rank', 'probability']:\n preds = model.predict_topk(\n self.data, output_type=output_type, output_frequency='per_window')\n expected_len = self._calc_expected_predictions_length(self.data, top_k=3)\n self.assertEqual(len(preds), expected_len)\n\n preds = model.predict_topk(\n self.data.head(100), k=5, output_frequency='per_window')\n expected_len = self._calc_expected_predictions_length(self.data.head(100), top_k=5)\n self.assertEqual(len(preds), expected_len)\n\n def test_evaluate_with_incomplete_targets(self):\n \"\"\"\n Check that evaluation does not require the test data to span all labels.\n \"\"\"\n\n # Arbitrarily filter out all rows whose label matches the first row's.\n filtered_label = self.data[self.target][0]\n filtered_data = self.data[self.data[self.target] != filtered_label]\n\n # Run evaluation.\n evaluation = self.model.evaluate(filtered_data)\n\n # Verify that all metrics were computed and included in the result.\n for metric in ['accuracy', 'auc', 'precision', 'recall', 'f1_score',\n 'log_loss', 'confusion_matrix', 'roc_curve']:\n self.assertIn(metric, evaluation)\n\n def test__list_fields(self):\n \"\"\"\n Check the list fields function.\n \"\"\"\n model = self.model\n fields = model._list_fields()\n self.assertEqual(set(fields), set(self.fields_ans))\n\n def test_get(self):\n \"\"\"\n Check the get function. Compare with the answer supplied as a lambda\n function for each field.\n \"\"\"\n model = self.model\n for field in self.exposed_fields_ans:\n ans = model._get(field)\n self.assertTrue(self.get_ans[field](ans),\n '''Get failed in field {}. Output was {}.'''.format(field, ans))\n\n def test_summary(self):\n \"\"\"\n Check the summary function.\n \"\"\"\n model = self.model\n model.summary()\n\n def test_repr(self):\n \"\"\"\n Check the repr function.\n \"\"\"\n # Repr after fit\n model = self.model\n self.assertEqual(type(str(model)), str)\n self.assertEqual(type(model.__repr__()), str)\n\n def test_save_and_load(self):\n \"\"\"\n Make sure saving and loading retains everything.\n \"\"\"\n test_methods_list = [func for func in dir(self) if callable(getattr(self, func)) and func.startswith(\"test\")]\n test_methods_list.remove('test_save_and_load')\n\n with test_util.TempDirectory() as filename:\n self.model.save(filename)\n self.model = None\n self.model = tc.load_model(filename)\n\n print (\"Repeating all test cases after model delete and reload\")\n for test_method in test_methods_list:\n try:\n getattr(self, test_method)()\n print(\"Save and Load:\", test_method, \"has passed\")\n except unittest.SkipTest:\n pass\n except Exception as e:\n self.assertTrue(False, \"After model save and load, method \" + test_method +\n \" has failed with error: \" + str(e))\n\n\[email protected](tc.util._num_available_gpus() == 0, 'Requires GPU')\[email protected]\nclass ActivityClassifierGPUTest(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n _load_data(self)\n\n def test_gpu_save_load_export(self):\n old_num_gpus = tc.config.get_num_gpus()\n gpu_options = set([old_num_gpus, 0, 1])\n for in_gpus in gpu_options:\n for out_gpus in gpu_options:\n tc.config.set_num_gpus(in_gpus)\n model = tc.activity_classifier.create(self.data,\n target=self.target,\n session_id=self.session_id)\n with test_util.TempDirectory() as filename:\n model.save(filename)\n tc.config.set_num_gpus(out_gpus)\n model = tc.load_model(filename)\n\n with test_util.TempDirectory() as filename:\n model.export_coreml(filename)\n\n tc.config.set_num_gpus(old_num_gpus)\n"
] | [
[
"numpy.testing.assert_array_almost_equal",
"numpy.array",
"numpy.concatenate",
"numpy.random.RandomState"
]
] |
gsy/moonlight | [
"1eca6c903b7334afca7555b9aeb7a212c76fef9d"
] | [
"moonlight/structure/beams.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Detects note beams.\n\nBeams are long, very thick, horizontal or diagonal lines that may intersect with\nthe staves. To detect them, we use staff removal followed by extra binary\nerosion, in case the staves are not completely removed and still have extra\nblack pixels around a beam. We then find all of the connected components,\nbecause each beam should now be detached from the stem, staff, and (typically)\nother beams. We filter beams by minimum width. Further processing and assignment\nof stems to beams is done in `beam_processor.py`.\n\nEach beam halves the duration of each note it is atteched to by a stem.\n\"\"\"\n# TODO(ringw): Make Hough line segments more robust, and then use them here\n# instead of connected components.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom moonlight.structure import components\nfrom moonlight.vision import morphology\n\nCOLUMNS = components.ConnectedComponentsColumns\n\n\nclass Beams(object):\n \"\"\"Note beam detector.\"\"\"\n\n def __init__(self, staff_remover, threshold=127):\n staff_detector = staff_remover.staff_detector\n image = morphology.binary_erosion(\n tf.less(staff_remover.remove_staves, threshold),\n staff_detector.staffline_thickness)\n beams = components.get_component_bounds(image)\n staffline_distance = tf.cond(\n tf.greater(tf.shape(staff_detector.staves)[0], 0),\n lambda: tf.reduce_mean(staff_detector.staffline_distance),\n lambda: tf.constant(0, tf.int32))\n min_length = 2 * staffline_distance\n keep_beam = tf.greater_equal(beams[:, COLUMNS.X1] - beams[:, COLUMNS.X0],\n min_length)\n keep_beam.set_shape([None])\n self.beams = tf.boolean_mask(beams, keep_beam)\n self.data = [self.beams]\n\n\nclass ComputedBeams(object):\n \"\"\"Holder for the computed beams NumPy array.\"\"\"\n\n def __init__(self, beams):\n self.beams = np.asarray(beams, np.int32)\n"
] | [
[
"tensorflow.shape",
"numpy.asarray",
"tensorflow.less",
"tensorflow.greater_equal",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.boolean_mask"
]
] |
charliecb/habitat-sim | [
"1357956e33884ddf1c39811e5d05e7f3c17695d7"
] | [
"tests/test_data_extraction.py"
] | [
"# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nfrom torch import nn as nn\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, Dataset\n\nimport habitat_sim\nfrom examples.settings import make_cfg\nfrom habitat_sim.utils.data.data_extractor import ImageExtractor\nfrom habitat_sim.utils.data.data_structures import ExtractorLRUCache\nfrom habitat_sim.utils.data.pose_extractor import TopdownView\n\n\nclass TrivialNet(nn.Module):\n def __init__(self):\n super(TrivialNet, self).__init__()\n\n def forward(self, x):\n x = F.relu(x)\n return x\n\n\nclass MyDataset(Dataset):\n def __init__(self, extractor):\n self.extractor = extractor\n\n def __len__(self):\n return len(self.extractor)\n\n def __getitem__(self, idx):\n sample = self.extractor[idx]\n sample[\"label\"] = 0\n return sample\n\n\ndef test_topdown_view(make_cfg_settings):\n with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim:\n tdv = TopdownView(sim, height=0.0, meters_per_pixel=0.1)\n topdown_view = tdv.topdown_view\n assert type(topdown_view) == np.ndarray\n\n\ndef test_data_extractor_end_to_end(make_cfg_settings):\n # Path is relative to simulator.py\n with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim:\n scene_filepath = \"\"\n extractor = ImageExtractor(\n scene_filepath, labels=[0.0], img_size=(32, 32), sim=sim\n )\n dataset = MyDataset(extractor)\n dataloader = DataLoader(dataset, batch_size=3)\n net = TrivialNet()\n\n # Run data through network\n for sample_batch in dataloader:\n img, _ = sample_batch[\"rgba\"], sample_batch[\"label\"]\n img = img.permute(0, 3, 2, 1).float()\n net(img)\n\n\ndef test_extractor_cache():\n cache = ExtractorLRUCache()\n cache.add(1, \"one\")\n cache.add(2, \"two\")\n cache.add(3, \"three\")\n assert cache[next(reversed(list(cache._order)))] == \"three\"\n accessed_data = cache[2] # noqa : F841\n assert cache[next(reversed(list(cache._order)))] == \"two\"\n cache.remove_from_back()\n assert 1 not in cache\n\n\ndef test_pose_extractors(make_cfg_settings):\n with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim:\n scene_filepath = \"\"\n pose_extractor_names = [\"closest_point_extractor\", \"panorama_extractor\"]\n for name in pose_extractor_names:\n extractor = ImageExtractor(\n scene_filepath, img_size=(32, 32), sim=sim, pose_extractor_name=name\n )\n assert len(extractor) > 1\n"
] | [
[
"torch.nn.functional.relu",
"torch.utils.data.DataLoader"
]
] |
janjagusch/meetup-analytics | [
"1d923f6353bd95edbd98dd8ee914606e6f944e77"
] | [
"cloud_functions/meetup-api-to-bigquery/main.py"
] | [
"\"\"\"\nRequests data from Meetup API and inserts it into Google BigQuery.\n\"\"\"\n\nimport datetime\nimport os\nimport warnings\n\nimport pandas as pd\nfrom meetup.client import Client\nfrom meetup.client.errors import RequestError\nfrom meetup.token_manager import TokenCacheGCS, TokenManager\nfrom tqdm import tqdm\n\nfrom cloud_functions_utils import decode, error_reporting, to_table\n\nwarnings.filterwarnings(\n \"ignore\", \"Your application has authenticated using end user credentials\"\n)\n\nTOKEN_MANAGER = TokenManager(\n os.environ[\"CLIENT_ID\"],\n os.environ[\"CLIENT_SECRET\"],\n TokenCacheGCS(os.environ[\"BUCKET_NAME\"], os.environ[\"BLOB_NAME\"]),\n)\n\nCLIENT = Client(access_token=lambda: TOKEN_MANAGER.token().access_token)\n\nDATASET_ID = \"meetup_raw\"\n\n\ndef _merge_location_info(\n df,\n country_col=\"country\",\n city_col=\"city\",\n lon_col=\"lon\",\n lat_col=\"lat\",\n new_col=\"location\",\n):\n df[new_col] = df.apply(\n lambda row: {\n \"country\": row[country_col],\n \"city\": row[city_col],\n \"geo\": {\"lon\": row[lon_col], \"lat\": row[lat_col]},\n },\n axis=1,\n )\n return df\n\n\ndef _access_nested_value(df, keys, new_col):\n def nested_get(dct, keys):\n for key in keys:\n try:\n dct = dct[key]\n except KeyError:\n return None\n return dct\n\n df[new_col] = df.apply(lambda row: nested_get(row, keys), axis=1)\n return df\n\n\ndef _add_column(df, val, new_col):\n df[new_col] = val(df) if callable(val) else val\n return df\n\n\ndef _cast_to_datetime(df, col, new_col=None):\n if not new_col:\n new_col = col\n df[new_col] = pd.to_datetime(df[col] * 10 ** 6)\n return df\n\n\ndef _replace_nan(df):\n return df.replace({pd.NA: None})\n\n\ndef _transform_members(members, requested_at, inplace=False):\n if not inplace:\n members = members.copy()\n return (\n members.pipe(\n _access_nested_value, keys=[\"group_profile\", \"created\"], new_col=\"joined_at\"\n )\n .pipe(\n _access_nested_value,\n keys=[\"group_profile\", \"visited\"],\n new_col=\"visited_at\",\n )\n .pipe(\n _access_nested_value,\n keys=[\"group_profile\", \"updated\"],\n new_col=\"updated_at\",\n )\n .pipe(_access_nested_value, keys=[\"group_profile\", \"role\"], new_col=\"role\")\n .pipe(_cast_to_datetime, col=\"joined_at\")\n .pipe(_cast_to_datetime, col=\"visited_at\")\n .pipe(_cast_to_datetime, col=\"updated_at\")\n .pipe(_cast_to_datetime, col=\"joined\", new_col=\"created_at\")\n .pipe(_merge_location_info)\n .pipe(_add_column, val=requested_at, new_col=\"requested_at\")\n .pipe(_add_column, val=datetime.datetime.now(), new_col=\"inserted_at\")\n .pipe(_replace_nan)[\n [\n \"id\",\n \"created_at\",\n \"joined_at\",\n \"updated_at\",\n \"visited_at\",\n \"role\",\n \"location\",\n \"requested_at\",\n \"inserted_at\",\n ]\n ]\n )\n\n\ndef _transform_rsvps(rsvps, requested_at, inplace=False):\n if not inplace:\n rsvps = rsvps.copy()\n return (\n rsvps.pipe(_access_nested_value, keys=[\"member\", \"id\"], new_col=\"member_id\")\n .pipe(_access_nested_value, keys=[\"event\", \"id\"], new_col=\"event_id\")\n .pipe(_access_nested_value, keys=[\"group\", \"id\"], new_col=\"group_id\")\n .pipe(_cast_to_datetime, col=\"updated\", new_col=\"updated_at\")\n .pipe(_cast_to_datetime, col=\"created\", new_col=\"created_at\")\n .pipe(_add_column, val=requested_at, new_col=\"requested_at\")\n .pipe(_add_column, val=datetime.datetime.now(), new_col=\"inserted_at\")\n .pipe(_replace_nan)[\n [\n \"member_id\",\n \"event_id\",\n \"group_id\",\n \"response\",\n \"guests\",\n \"created_at\",\n \"updated_at\",\n \"requested_at\",\n \"inserted_at\",\n ]\n ]\n )\n\n\ndef _transform_events(events, requested_at, inplace=False):\n if not inplace:\n events = events.copy()\n return (\n events.copy()\n .pipe(_cast_to_datetime, col=\"created\", new_col=\"created_at\")\n .pipe(_add_column, val=lambda df: df[\"duration\"] / 1000, new_col=\"duration\")\n .pipe(_cast_to_datetime, col=\"time\", new_col=\"started_at\")\n .pipe(_cast_to_datetime, col=\"updated\", new_col=\"updated_at\")\n .pipe(_access_nested_value, keys=[\"group\", \"id\"], new_col=\"group_id\")\n .pipe(_access_nested_value, keys=[\"venue\", \"country\"], new_col=\"country\")\n .pipe(_access_nested_value, keys=[\"venue\", \"city\"], new_col=\"city\")\n .pipe(_access_nested_value, keys=[\"venue\", \"lon\"], new_col=\"lon\")\n .pipe(_access_nested_value, keys=[\"venue\", \"lat\"], new_col=\"lat\")\n .pipe(_access_nested_value, keys=[\"venue\", \"name\"], new_col=\"venue_name\")\n .pipe(_merge_location_info)\n .pipe(\n _add_column,\n val=lambda df: df.apply(\n lambda row: {\"name\": row[\"venue_name\"], \"location\": row[\"location\"]},\n axis=1,\n ),\n new_col=\"venue\",\n )\n .pipe(_add_column, val=requested_at, new_col=\"requested_at\")\n .pipe(_add_column, val=datetime.datetime.now(), new_col=\"inserted_at\")\n .pipe(_replace_nan)[\n [\n \"id\",\n \"name\",\n \"group_id\",\n \"started_at\",\n \"duration\",\n \"rsvp_limit\",\n \"status\",\n \"yes_rsvp_count\",\n \"waitlist_count\",\n \"venue\",\n \"is_online_event\",\n \"visibility\",\n \"pro_is_email_shared\",\n \"member_pay_fee\",\n \"created_at\",\n \"updated_at\",\n \"requested_at\",\n \"inserted_at\",\n ]\n ]\n )\n\n\ndef _transform_attendances(df, group_id, event_id, requested_at, inplace=False):\n if not inplace:\n df = df.copy()\n df[\"updated\"] = df.get(\"updated\")\n df[\"attendance_id\"] = df.get(\"attendance_id\")\n df[\"status\"] = df.get(\"status\")\n df[\"guests\"] = df.get(\"guests\")\n return (\n df.pipe(_access_nested_value, keys=[\"member\", \"id\"], new_col=\"member_id\")\n .pipe(_cast_to_datetime, col=\"updated\", new_col=\"updated_at\")\n .pipe(_add_column, val=requested_at, new_col=\"requested_at\")\n .pipe(_add_column, val=datetime.datetime.now(), new_col=\"inserted_at\")\n .pipe(_add_column, val=group_id, new_col=\"group_id\")\n .pipe(_add_column, val=event_id, new_col=\"event_id\")\n .rename({\"attendance_id\": \"id\"}, axis=1)\n .pipe(_replace_nan)[\n [\n \"id\",\n \"member_id\",\n \"event_id\",\n \"group_id\",\n \"status\",\n \"guests\",\n \"updated_at\",\n \"requested_at\",\n \"inserted_at\",\n ]\n ]\n )\n\n\ndef _request_members(client, group_id):\n return client.scan(\n url=f\"{group_id}/members\",\n # only=\"id,joined,group_profile.created,group_profile.visited,group_profile.role,group_profile.updated,city,country,lat,lon\"\n )\n\n\ndef _request_events(client, group_id):\n return client.scan(url=f\"/{group_id}/events\", status=\"past,upcoming\")\n\n\ndef _request_rsvps(client, group_id, event_id):\n return client.scan(\n url=f\"{group_id}/events/{event_id}/rsvps\",\n only=\"created,updated,response,guests,event.id,member.id,group.id\",\n )\n\n\ndef _request_attendances(client, group_id, event_id):\n \"\"\"\n Handle RequestError if it happens in the first yield.\n \"\"\"\n i = 0\n try:\n for obj in client.scan(\n url=f\"{group_id}/events/{event_id}/attendance\",\n only=\"member.id,attendance_id,status,updated,guests\",\n ):\n yield obj\n i += 1\n except RequestError as error:\n if not i:\n print(f\"Event {group_id}/{event_id} has not started yet.\")\n return\n raise error\n\n\ndef _main(client, group_id, project_id, force_past_events=False):\n \"\"\"\n Requests data from Meetup API and inserts it into Google BigQuery.\n \"\"\"\n requested_at = datetime.datetime.now()\n # request, transform and insert members\n print(\"Processing members.\")\n for page in _request_members(client, group_id):\n to_table(\n _transform_members(page, requested_at).to_dict(orient=\"records\"),\n project_id,\n DATASET_ID,\n \"members\",\n )\n # request, transform and insert events\n # also track event ids for rsvps\n print(\"Processing events.\")\n event_ids = []\n for page in _request_events(client, group_id):\n events_transformed = _transform_events(page, requested_at)\n event_ids.extend(\n events_transformed[\n (\n events_transformed.started_at\n > datetime.datetime.now() - datetime.timedelta(hours=24)\n )\n | force_past_events\n ].id\n )\n to_table(\n events_transformed.to_dict(orient=\"records\"),\n project_id,\n DATASET_ID,\n \"events\",\n )\n # iterate through event ids\n for event_id in tqdm(event_ids):\n # request, transform and insert rsvps per event id\n print(\"Processing rsvps.\")\n for page in _request_rsvps(client, group_id, event_id):\n to_table(\n _transform_rsvps(page, requested_at).to_dict(orient=\"records\"),\n project_id,\n DATASET_ID,\n \"rsvps\",\n )\n # request, transform and insert attendances per event id\n print(\"Processing attendances.\")\n for page in _request_attendances(client, group_id, event_id):\n to_table(\n _transform_attendances(page, group_id, event_id, requested_at).to_dict(\n orient=\"records\"\n ),\n project_id,\n DATASET_ID,\n \"attendances\",\n )\n\n\n@error_reporting\n# pylint: disable=unused-argument\ndef main(event, context):\n # pylint: enable=unused-argument\n \"\"\"\n Requests data from Meetup API and inserts it into Google BigQuery.\n \"\"\"\n data = decode(event[\"data\"])\n group_id = data[\"group_id\"]\n _main(\n CLIENT,\n group_id,\n os.environ[\"PROJECT_ID\"],\n bool(os.environ.get(\"FORCE_PAST_EVENTS\")),\n )\n"
] | [
[
"pandas.to_datetime"
]
] |
duncanhobbs/OG-Core | [
"699a7030739e5b3f44ab4dd58fecf4cefbdc24a6"
] | [
"ogcore/parameters.py"
] | [
"import os\nimport numpy as np\nimport scipy.interpolate as si\nimport pkg_resources\nimport paramtools\n\n# import ogcore\nfrom ogcore import elliptical_u_est\nfrom ogcore.utils import rate_conversion\nfrom ogcore.constants import BASELINE_DIR\nCURRENT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Specifications(paramtools.Parameters):\n '''\n Inherits ParamTools Parameters abstract base class.\n '''\n defaults = os.path.join(CURRENT_PATH, \"default_parameters.json\")\n array_first = True\n\n def __init__(self,\n output_base=BASELINE_DIR, baseline_dir=BASELINE_DIR,\n baseline=False, num_workers=1):\n super().__init__()\n\n self.output_base = output_base\n self.baseline_dir = baseline_dir\n self.baseline = baseline\n self.num_workers = num_workers\n\n # put OG-Core version in parameters to save for reference\n self.ogcore_version = pkg_resources.get_distribution(\"ogcore\").version\n\n # does cheap calculations to find parameter values\n self.initialize()\n\n self.parameter_warnings = ''\n self.parameter_errors = ''\n self._ignore_errors = False\n\n def initialize(self):\n '''\n ParametersBase reads JSON file and sets attributes to self\n Next call self.compute_default_params for further initialization\n\n Args:\n None\n\n Returns:\n None\n\n '''\n\n self.compute_default_params()\n\n def compute_default_params(self):\n '''\n Does cheap calculations to return parameter values\n\n Args:\n None\n\n Returns:\n None\n\n '''\n # reshape lambdas\n self.lambdas = self.lambdas.reshape(self.lambdas.shape[0], 1)\n # cast integers as integers\n self.S = int(self.S)\n self.T = int(self.T)\n self.J = len(self.lambdas)\n\n # get parameters of elliptical utility function\n self.b_ellipse, self.upsilon = elliptical_u_est.estimation(\n self.frisch, self.ltilde)\n # determine length of budget window from individual income tax\n # parameters passed in\n self.BW = self.etr_params.shape[0]\n # Find number of economically active periods of life\n self.E = int(\n self.starting_age * (self.S / (self.ending_age -\n self.starting_age)))\n # Find rates in model periods from annualized rates\n self.beta = (\n 1 / (rate_conversion(1 / self.beta_annual - 1,\n self.starting_age, self.ending_age,\n self.S) + 1))\n self.delta = (\n -1 * rate_conversion(-1 * self.delta_annual,\n self.starting_age, self.ending_age,\n self.S))\n self.g_y = rate_conversion(self.g_y_annual, self.starting_age,\n self.ending_age, self.S)\n\n # set fraction of income taxes from payroll to zero initially\n # will be updated when function tax function parameters\n if self.T + self.S > self.BW:\n self.frac_tax_payroll = np.append(\n self.frac_tax_payroll,\n np.ones(self.T + self.S - self.BW)\n * self.frac_tax_payroll[-1])\n\n # Extend parameters that may vary over the time path\n tp_param_list = [\n 'alpha_G', 'alpha_T', 'Z', 'world_int_rate_annual',\n 'delta_tau_annual', 'cit_rate',\n 'adjustment_factor_for_cit_receipts', 'tau_bq',\n 'tau_payroll', 'h_wealth', 'm_wealth', 'p_wealth',\n 'retirement_age', 'replacement_rate_adjust', 'zeta_D',\n 'zeta_K']\n for item in tp_param_list:\n this_attr = getattr(self, item)\n if this_attr.ndim > 1:\n this_attr = np.squeeze(this_attr, axis=1)\n # the next if statement is a quick fix to avoid having to\n # update all these time varying parameters if change T or S\n # ideally, the default json values are read in again and the\n # extension done is done again here with those defaults and\n # the new T and S values...\n if this_attr.size > self.T + self.S:\n this_attr = this_attr[:self.T + self.S]\n this_attr = np.concatenate((\n this_attr, np.ones((self.T + self.S - this_attr.size)) *\n this_attr[-1]))\n setattr(self, item, this_attr)\n # Deal with tax parameters that maybe age and time specific\n tax_params_to_TP = [\n 'tau_c', 'etr_params', 'mtrx_params', 'mtry_params']\n for item in tax_params_to_TP:\n tax_to_set = getattr(self, item)\n if tax_to_set.size == 1:\n setattr(self, item, np.ones((self.T + self.S, self.S,\n self.J)) * tax_to_set)\n elif tax_to_set.ndim == 3:\n if tax_to_set.shape[0] > self.T + self.S:\n tax_to_set = tax_to_set[:self.T + self.S, :, :]\n if tax_to_set.shape[0] < self.T + self.S:\n tax_to_set = np.append(\n tax_to_set[:, :, :], np.tile(\n tax_to_set[-1, :, :],\n (self.T + self.S - tax_to_set.shape[0], 1, 1)),\n axis=0)\n if tax_to_set.shape[1] > self.S:\n tax_to_set = tax_to_set[:, :self.S, :]\n if item == 'tau_c':\n if tax_to_set.shape[2] > self.J:\n tax_to_set = tax_to_set[:, :, :self.J]\n setattr(self, item, tax_to_set)\n else:\n print('please give a ' + item +\n ' that is a single element or 3-D array')\n assert False\n\n # Try to deal with size of eta. It may vary by S, J, T, but\n # want to allow user to enter one that varies by only S, S and J,\n # S and T, or T and S and J.\n eta_to_set = getattr(self, 'eta')\n # this is the case that vary only by S\n if eta_to_set.ndim == 1:\n assert eta_to_set.shape[0] == self.S\n eta_to_set = np.tile(\n (np.tile(eta_to_set.reshape(self.S, 1), (1, self.J)) /\n self.J).reshape(1, self.S, self.J),\n (self.T + self.S, 1, 1))\n # this could be where vary by S and J or T and S\n elif eta_to_set.ndim == 2:\n # case if S by J input\n if eta_to_set.shape[0] == self.S:\n eta_to_set = np.tile(\n eta_to_set.reshape(1, self.S, self.J),\n (self.T + self.S, 1, 1))\n eta_to_set = eta_to_set = np.concatenate(\n (eta_to_set,\n np.tile(eta_to_set[-1, :, :].reshape(1, self.S, self.J),\n (self.S, 1, 1))), axis=0)\n # case if T by S input\n elif eta_to_set.shape[0] == self.T:\n eta_to_set = (np.tile(\n eta_to_set.reshape(self.T, self.S, 1),\n (1, 1, self.J)) / self.J)\n eta_to_set = eta_to_set = np.concatenate(\n (eta_to_set,\n np.tile(eta_to_set[-1, :, :].reshape(1, self.S, self.J),\n (self.S, 1, 1))), axis=0)\n else:\n print('Eta dimensions are: ', self.eta.shape)\n print('please give an eta that is either SxJ or TxS')\n assert False\n # this is the case where vary by S, J, T\n elif eta_to_set.ndim == 3:\n eta_to_set = eta_to_set = np.concatenate(\n (eta_to_set,\n np.tile(eta_to_set[-1, :, :].reshape(1, self.S, self.J),\n (self.S, 1, 1))), axis=0)\n setattr(self, 'eta', eta_to_set)\n\n # make sure zeta matrix sums to one (e.g., default off due to rounding)\n self.zeta = self.zeta / self.zeta.sum()\n\n # open economy parameters\n self.world_int_rate = rate_conversion(\n self.world_int_rate_annual, self.starting_age,\n self.ending_age, self.S)\n\n # set period of retirement\n self.retire = (np.round(((self.retirement_age -\n self.starting_age) * self.S) /\n 80.0) - 1).astype(int)\n\n # Calculations for business income taxes\n # at some point, we will want to make Cost of Capital Calculator\n # a dependency to compute tau_b\n # this adjustment factor has as the numerator CIT receipts/GDP\n # and as the denominator CIT receipts/GDP from the\n # model with baseline parameterization and no adjustment to the\n # CIT_rate\n self.tau_b = (self.cit_rate * self.c_corp_share_of_assets *\n self.adjustment_factor_for_cit_receipts)\n self.delta_tau = (\n -1 * rate_conversion(-1 * self.delta_tau_annual,\n self.starting_age, self.ending_age,\n self.S))\n\n # for constant demographics\n if self.constant_demographics:\n self.g_n_ss = 0.0\n self.g_n = np.zeros(self.T + self.S)\n surv_rate1 = np.ones((self.S, )) # prob start at age S\n surv_rate1[1:] = np.cumprod(self.surv_rate[:-1], dtype=float)\n # number of each age alive at any time\n omega_SS = np.ones(self.S) * surv_rate1\n self.omega_SS = omega_SS / omega_SS.sum()\n self.imm_rates = np.zeros((self.T + self.S, self.S))\n self.omega = np.tile(np.reshape(self.omega_SS, (1, self.S)),\n (self.T + self.S, 1))\n self.omega_S_preTP = self.omega_SS\n\n # Interpolate chi_n and create omega_SS_80 if necessary\n if self.S < 80 and len(self.chi_n) == 80:\n self.age_midp_80 = np.linspace(20.5, 99.5, 80)\n self.chi_n_interp = si.interp1d(self.age_midp_80,\n np.squeeze(self.chi_n),\n kind='cubic')\n self.newstep = 80.0 / self.S\n self.age_midp_S = np.linspace(20 + 0.5 * self.newstep,\n 100 - 0.5 * self.newstep,\n self.S)\n self.chi_n = self.chi_n_interp(self.age_midp_S)\n\n # Create time series of stationarized UBI transfers\n self.ubi_nom_array = self.get_ubi_nom_objs()\n\n def get_ubi_nom_objs(self):\n '''\n Generate time series of nominal SxJ UBI household matrix and aggregate\n UBI expenditure over necessary time periods. Also generate steady-state\n versions\n\n Args:\n self: OG-Core Specifications class object\n\n Returns:\n ubi_nom_array (array): T+S x S x J array time series of UBI\n transfers in dollars for each type-j age-s household in every\n period t\n '''\n # Get matrices of number of children 0-17, number of dependents 18-20,\n # number of adults 21-64, and number of seniors >= 65 from\n # OG-Core-Calibration package\n ubi_num_017_mat = 1.1 * np.ones((self.S, self.J))\n ubi_num_1864_mat = 0.85 * np.ones((self.S, self.J))\n ubi_num_65p_mat = 0.15 * np.ones((self.S, self.J))\n\n # Calculate the UBI transfers to each household type in the first\n # period t=0\n ubi_nom_init = np.tile(np.reshape(np.minimum(\n (self.ubi_nom_017 * ubi_num_017_mat +\n self.ubi_nom_1864 * ubi_num_1864_mat +\n self.ubi_nom_65p * ubi_num_65p_mat), self.ubi_nom_max),\n (1, self.S, self.J)), (self.T + self.S, 1, 1))\n\n # Calculate steady-state and transition path of stationary individual\n # household UBI payments and stationary aggregate UBI outlays\n if self.ubi_growthadj or self.g_y_annual == 0:\n # If ubi_growthadj=True or if g_y_annual<0, then ubi_arr is\n # just a copy of the initial UBI matrix for T periods.\n ubi_nom_array = ubi_nom_init\n else:\n # If ubi_growthadj=False, and g_y_annual>=0, then must divide\n # by e^{g_y t} every period, then set the steady-state matrix\n # to its value close to zero at t=T\n ubi_nom_array = np.zeros_like(ubi_nom_init)\n discount_factor = np.exp(\n self.g_y * np.linspace(0, self.T, self.T + 1))\n ubi_nom_array[:self.T + 1, :, :] = (\n ubi_nom_init[:self.T + 1, :, :] /\n discount_factor.reshape(discount_factor.shape[0], 1, 1))\n ubi_nom_array[self.T + 1:, :, :] = ubi_nom_array[self.T, :, :]\n\n return ubi_nom_array\n\n def update_specifications(self, revision, raise_errors=True):\n '''\n Updates parameter specification with values in revision\n dictionary.\n\n Args:\n revision (dict): dictionary or JSON string with one or more\n `PARAM: VALUE` pairs\n raise_errors (boolean):\n if True (the default), raises ValueError when\n `parameter_errors` exists;\n if False, does not raise ValueError when\n `parameter_errors` exists and leaves error\n handling to caller of the update_specifications\n method.\n\n Returns:\n None\n\n Raises:\n ValueError: if raise_errors is True AND\n `_validate_parameter_names_types` generates errors OR\n `_validate_parameter_values` generates errors.\n\n Notes:\n Given a reform dictionary, typical usage of the\n Specifications class is as follows::\n >>> specs = Specifications()\n >>> specs.update_specifications(revision)\n An example of a multi-parameter specification is as follows::\n >>> revision = {\n frisch: [0.03]\n }\n\n '''\n if not (isinstance(revision, dict) or isinstance(revision, str)):\n raise ValueError(\n 'ERROR: revision is not a dictionary or string')\n self.adjust(revision, raise_errors=raise_errors)\n self.compute_default_params()\n\n\ndef revision_warnings_errors(spec_revision):\n '''\n Generate warnings and errors for OG-Core parameter specifications\n\n Args:\n spec_revision (dict): dictionary suitable for use with the\n `Specifications.update_specifications method`.\n\n Returns:\n rtn_dict (dict): with endpoint specific warning and error messages\n\n '''\n rtn_dict = {'warnings': '', 'errors': ''}\n spec = Specifications()\n spec.update_specifications(spec_revision, raise_errors=False)\n if spec._errors:\n rtn_dict['errors'] = spec._errors\n return rtn_dict\n"
] | [
[
"numpy.zeros_like",
"numpy.cumprod",
"numpy.reshape",
"numpy.zeros",
"numpy.minimum",
"numpy.round",
"numpy.ones",
"numpy.tile",
"numpy.linspace",
"numpy.squeeze"
]
] |
Howal/pyc_repo | [
"0d50d03967b78a089494f7f98b57c6b6f6fc8e81"
] | [
"pose_ae/symbols/archives_20191202/posenet_v1_hourglass4_relation_cat_sqrtdim.py"
] | [
"import mxnet as mx\nimport numpy as np\nfrom common.lib.utils.symbol import Symbol\nfrom common.backbone.hourglass_v1 import hourglass_v1, conv_sym_wrapper, CurrentBN\nfrom common.gpu_metric import *\nfrom common.operator_py.select_part import *\nfrom common.operator_py.monitor_op import *\n\nclass posenet_v1_hourglass4_relation_cat_sqrtdim(Symbol):\n def __init__(self, FP16=False):\n \"\"\"\n Use __init__ to define parameter network needs\n \"\"\"\n # FP16 is not used for now\n self.FP16 = FP16\n self.init_pre_list = []\n self.init_hourglass_list = []\n self.cfg = None\n\n def get_inside_outside_loss(self, feature, keypoint_visible, keypoint_location, batch_size, max_persons, num_keypoint_cls, prefix):\n # visible_keypoint_num: [N, P, 1]\n visible_keypoint_num = keypoint_visible.sum(axis=2, keepdims=True)\n # visible_person: [N, P, 1]\n visible_person = visible_keypoint_num > 0\n # visible_person_t: [N, 1, P]\n visible_person_t = mx.sym.transpose(visible_person, axes=(0, 2, 1))\n # visible_person_pair: [N, P, P]\n eye_mat = mx.sym.eye(max_persons).reshape(shape=(1, max_persons, max_persons))\n visible_person_pair = mx.sym.broadcast_mul(mx.sym.broadcast_mul(visible_person, visible_person_t), 1-eye_mat)\n # visible_person_num: [N, 1, 1]\n visible_person_num = visible_person.sum(axis=1)\n\n # feature: [N, K, H, W, C]\n keypoint_feats = mx.sym.gather_nd(feature, keypoint_location)\n\n # keypoint_feat: [N, P, K, C]\n keypoint_feats = mx.sym.reshape(keypoint_feats, shape=(batch_size, -1, num_keypoint_cls, 0), name=prefix + \"_keypoint_feats_reshape\")\n\n keypoint_visible_4d = mx.sym.expand_dims(keypoint_visible, axis=3)\n\n # masked unvalid keypoint_feats\n keypoint_feats = mx.sym.broadcast_mul(keypoint_feats, keypoint_visible_4d, name='masked_keypoint_feats')\n\n # mean keypoint_feat: [N, P, C]\n mean_keypoint_feats = mx.sym.broadcast_div(mx.sym.sum(keypoint_feats, axis=2),\n mx.sym.maximum(1, visible_keypoint_num))\n\n # mean keypoint_feat: [N, P, 1, C]\n mean_keypoint_feats = mx.sym.expand_dims(mean_keypoint_feats, axis=2)\n\n # calc outside loss\n # mean_keypoint_feats_t: [N, 1, P, C]\n mean_keypoint_feats_t = mx.sym.transpose(mean_keypoint_feats, axes=(0, 2, 1, 3))\n\n # mean_diff: [N, P, P, C]\n mean_sqr_diff = mx.sym.broadcast_sub(mean_keypoint_feats, mean_keypoint_feats_t, name=prefix + '_braodcast_sub_mean_sqr_diff')\n mean_sqr_diff = mx.sym.square(mean_sqr_diff).sum(axis=3)\n\n # outside_loss: [N, P, P]\n outside_loss = mx.sym.exp(-mean_sqr_diff)\n outside_loss = outside_loss * visible_person_pair\n\n # outside_loss: [N, P*P]\n outside_loss = outside_loss.reshape(shape=(0, -1))\n # outside_loss: [N]\n norm_scale = mx.sym.maximum(1, mx.sym.square(visible_person_num) - visible_person_num).reshape(shape=(-1))\n outside_loss = outside_loss.sum(axis=1) / norm_scale\n # outside_loss = mx.symbol.Custom(op_type='monitor', data=outside_loss, nickname=prefix + '_outside_loss')\n\n # instance_diff_sqr: [N, P, K, 1]\n instance_sqr_diff = mx.sym.broadcast_sub(keypoint_feats, mean_keypoint_feats, name=prefix + '_broadcast_sub_instance_sqr_diff')\n instance_sqr_diff = mx.sym.square(instance_sqr_diff).sum(axis=3)\n\n instance_sqr_diff = instance_sqr_diff * keypoint_visible\n\n # inside loss\n inside_loss = instance_sqr_diff.sum(axis=2, keepdims=True) / mx.sym.maximum(1, visible_keypoint_num)\n inside_loss = inside_loss.sum(axis=1) / mx.sym.maximum(1, visible_person_num)\n\n outside_loss_mean = mx.sym.mean(outside_loss, name=\"outside_loss_mean\")\n inside_loss_mean = mx.sym.mean(inside_loss, name=\"inside_loss_mean\")\n\n return outside_loss_mean, inside_loss_mean\n\n # key_data: [N, Dim, num_part, K]\n # query_data: [N, Dim, H, W]\n def relation_module(self, key_data, query_data, affinity_dim, value_dim, output_dim, num_part, top_k,\n prefix=\"\"):\n # query_embd: [N, Aff_Dim, H, W]\n query_embd = mx.sym.Convolution(query_data, kernel=(1, 1), stride=(1, 1), num_filter=affinity_dim,\n no_bias=True, name=prefix + \"_query_embed\")\n\n # key_embd: [N, Aff_Dim, num_part2, K]\n key_embd = mx.sym.Convolution(key_data, kernel=(1, 1), stride=(1, 1), num_filter=affinity_dim,\n no_bias=True, name=prefix + \"_key_embed\")\n\n # value_embd: [N, Val_Dim, num_part2, K]\n value_embd = mx.sym.Convolution(key_data, kernel=(1, 1), stride=(1, 1), num_filter=value_dim,\n no_bias=True, name=prefix + \"_val_embed\")\n\n # query_embd_reshape: [N, H*W, Aff_Dim]\n query_embd_reshape = query_embd.reshape(shape=(0, 0, -1)) # [N, Aff_dim, H*W]\n query_embd_reshape = mx.sym.transpose(query_embd_reshape, axes=(0, 2, 1)) # [N, H*W, Aff_dim]\n\n # key_embd_reshape: [N, num_part2 * K, Aff_Dim]\n key_embd_reshape = mx.sym.transpose(key_embd, axes=(0, 2, 3, 1)) # [N, num_part2, K, Aff_Dim]\n key_embd_reshape = key_embd_reshape.reshape(shape=(0, num_part * top_k, affinity_dim))\n\n # value_embd_reshape: [N * num_part2, K, Val_Dim]\n value_embd_reshape = mx.sym.transpose(value_embd, axes=(0, 2, 3, 1)) # [N, num_part2, K, Val_Dim]\n value_embd_reshape = value_embd_reshape.reshape(shape=(-1, top_k, value_dim),\n name=prefix + \"_value_embd_reshape\")\n\n # aff_mat = [N, H*W, num_part2*K]\n aff_mat = mx.sym.batch_dot(lhs=query_embd_reshape, rhs=key_embd_reshape, transpose_a=False,\n transpose_b=True, name=prefix + '_aff_mat_batch_dot') / (affinity_dim ** 0.5)\n # aff_mat = [N, H*W, num_part2, K]\n aff_mat = aff_mat.reshape(shape=(0, 0, -4, num_part, top_k)) #\n\n # aff_mat = mx.symbol.Custom(op_type='monitor', data=aff_mat, nickname=\"aff_mat\")\n\n # aff_mat_norm = [N, H*W, num_part2, K]\n aff_mat_norm = mx.sym.softmax(aff_mat, axis=3, name=prefix + \"_aff_mat_softmax\")\n # aff_mat_norm = mx.symbol.Custom(op_type='monitor', data=aff_mat_norm, nickname=\"aff_mat_softmax\")\n\n # aff_mat_norm: [N, num_part2, H*W, K]\n aff_mat_norm = mx.sym.transpose(aff_mat_norm, axes=(0, 2, 1, 3))\n # aff_mat_norm: [N* num_part2, H*W, K]\n aff_mat_norm = mx.sym.reshape(aff_mat_norm, shape=(-3, 0, 0))\n\n # relation_feat: [N*num_part2, H*W, Val_Dim]\n relation_feat = mx.sym.batch_dot(lhs=aff_mat_norm, rhs=value_embd_reshape, transpose_a=False,\n transpose_b=False)\n # relation_feat: [N * num_part2, Val_dim, H*W]\n relation_feat = mx.sym.swapaxes(relation_feat, 1, 2)\n\n # relation_feat: [N, num_part2, Val_dim, H*W]\n relation_feat = mx.sym.reshape(relation_feat, shape=(-4, -1, num_part, value_dim, 0))\n # relation_feat: [N, num_part2 * Val_dim, H*W]\n relation_feat = mx.sym.reshape(relation_feat, shape=(0, -3, 0))\n # relation_feat: [N, output_dim, H*W]\n relation_feat = mx.sym.reshape(relation_feat, shape=(0, 0, 0, 1))\n relation_feat = mx.sym.Convolution(relation_feat, kernel=(1, 1), stride=(1, 1), num_filter=output_dim,\n name=prefix + \"_fusion\")\n # relation_feat = mx.symbol.Custom(op_type='monitor', data=relation_feat, nickname=\"relation_feat_fusion\")\n\n relation_feat = mx.sym.Activation(relation_feat, act_type='relu')\n # relation_feat: [N, output_dim, H * W]\n relation_feat = mx.sym.reshape_like(relation_feat, query_data)\n return relation_feat\n\n def get_stacked_hourglass(self, data, num_stack=4, in_dim=256, out_dim=68, increase_dim=128,\n bn=CurrentBN(False, 0.9), record=[], num_parts=17, cfg=None, is_train=False):\n\n det_preds = []\n association_preds = []\n for i in range(num_stack):\n body = hourglass_v1(data=data,\n num_stage=4,\n in_dim=in_dim,\n increase_dim=increase_dim,\n bn=bn,\n prefix=\"hg{}\".format(i + 1),\n record=record)\n body = conv_sym_wrapper(data=body, prefix=\"hg{}_out1\".format(i + 1),\n num_filter=in_dim, kernel=(3, 3), stride=(1, 1), pad=(1, 1),\n bn=bn, record=record)\n feature = conv_sym_wrapper(data=body, prefix=\"hg{}_out2\".format(i + 1),\n num_filter=in_dim, kernel=(3, 3), stride=(1, 1), pad=(1, 1),\n bn=bn, record=record)\n out = conv_sym_wrapper(data=feature, prefix=\"hg{}_out3\".format(i + 1),\n num_filter=out_dim, kernel=(1, 1), stride=(1, 1), pad=(0, 0),\n bn=bn, relu=False, record=record)\n #preds.append(out)\n d_pred = mx.sym.slice_axis(data=out, axis=1, begin=0, end=num_parts) # shape, [N, num_stack, num_parts, H, W]\n a_pred = mx.sym.slice_axis(data=out, axis=1, begin=num_parts, end=2*num_parts) # shape, [N, num_stack, num_parts, H, W]\n\n det_preds.append(d_pred)\n association_preds.append(a_pred)\n\n if i != num_stack - 1:\n data_preds = conv_sym_wrapper(data=out, prefix=\"hg{}_merge_preds\".format(i + 1),\n num_filter=in_dim, kernel=(1, 1), stride=(1, 1), pad=(0, 0),\n bn=bn, relu=False, record=record)\n data_feats = conv_sym_wrapper(data=feature, prefix=\"hg{}_merge_feats\".format(i + 1),\n num_filter=in_dim, kernel=(1, 1), stride=(1, 1), pad=(0, 0),\n bn=bn, relu=False, record=record)\n data = data + data_preds + data_feats\n\n\n for i in range(cfg.pose.head_num):\n prefix_name=\"head_{}\".format(i)\n\n top_k = cfg.pose.top_k\n\n pose_sensitive_feature = mx.sym.Convolution(data=feature, kernel=(1, 1), stride=(1, 1),\n num_filter=num_parts * cfg.pose.sensitive_dim,\n name=prefix_name + \"_sensitve_conv\")\n # select_part_indices = self.select_part(det_pred, feat_h, feat_w, 2, 50, prefix=\"head_{}\".format(head_idx))\n select_part_indices = mx.sym.Custom(op_type=\"select_part\", kernel=cfg.pose.nms, top_k=top_k, det_score=d_pred,\n name=prefix_name + \"_select_part\")\n # data_reshape: [N, num_parts, Dim, H, W]\n data_reshape = mx.sym.reshape(pose_sensitive_feature, shape=(0, -4, num_parts, -1, 0, 0))\n # data_reshape: [N, num_parts, H, W, Dim]\n data_reshape = mx.sym.transpose(data_reshape, axes=(0, 1, 3, 4, 2))\n # part_feat: [N, num_part, K, Dim]\n part_feat = mx.sym.gather_nd(data_reshape, indices=select_part_indices, name=prefix_name + \"_gather_nd\")\n # part_feat: [N, Dim, num_part, K]\n part_feat = mx.sym.transpose(part_feat, axes=(0, 3, 1, 2))\n # relation_feat: [N, num_parts*Dim, H, W]\n relation_feat = self.relation_module(key_data=part_feat, query_data=feature,\n affinity_dim=cfg.pose.aff_dim, value_dim=cfg.pose.val_dim,\n output_dim=in_dim,\n num_part=num_parts, top_k=top_k, prefix=prefix_name)\n\n # relation_feat = mx.symbol.Custom(op_type='monitor', data=relation_feat, nickname=\"relation_feat\")\n\n feature = mx.sym.concat(feature, relation_feat, dim=1)\n feature = mx.sym.Convolution(data=feature, kernel=(1,1), stride=(1,1), num_filter=256, name=prefix_name+\"_after_concat_1x1\")\n\n d_pred = mx.sym.Convolution(data=feature, kernel=(1,1), stride=(1,1), num_filter=num_parts, name=prefix_name+\"_det\")\n a_pred = mx.sym.Convolution(data=feature, kernel=(1, 1), stride=(1,1), num_filter=num_parts, name=prefix_name + \"_association\")\n\n det_preds.append(d_pred)\n association_preds.append(a_pred)\n\n return det_preds, association_preds\n\n def get_det_loss(self, det_pred, heatmaps, masks):\n det_loss = mx.symbol.square(data=(det_pred - heatmaps))\n masks_4d = mx.symbol.expand_dims(masks, axis=1)\n det_loss = mx.symbol.broadcast_mul(det_loss, masks_4d).mean()\n\n return det_loss\n\n\n def get_symbol(self, cfg, is_train=True):\n # config alias for convenient\n in_dim = 256\n increase_dim = 128\n out_dim = 68\n num_stack = 4\n self.cfg = cfg\n num_parts = cfg.dataset.NUM_PARTS\n max_persons = cfg.dataset.MAX_PERSONS\n\n # input init\n if is_train:\n data = mx.sym.Variable(name=\"data\") # img, [N, 3, H ,W]\n heatmaps = mx.sym.Variable(name=\"heatmaps\") # heatmaps of parts, [N, num_parts, H/4, W/4], REMARK 1/4 scale\n masks = mx.sym.Variable(name=\"masks\") # mask of crowds in coco, [N, H/4, W/4], REMARK 1/4 scale\n # keypoints = mx.sym.Variable(name='keypoints') # coordinates of keypoints, [N, max_persons, num_parts, 2], REMARK 1/4 scale\n keypoint_visible = mx.sym.Variable(name='keypoint_visible') # [N, max_persons, num_parts]\n keypoint_location = mx.sym.Variable(name='keypoint_location') # [N, max_person, num_parts, 4]\n keypoint_location = mx.sym.transpose(keypoint_location, axes=(3, 0, 1, 2), name=\"keypoint_location_transpose\")\n # prepare BN func, this one can be easily replaced\n bn = CurrentBN(cfg.network.use_bn_type, 0.9)\n else:\n data = mx.sym.Variable(name=\"data\") # img, [N, 3, H ,W]\n # prepare BN func, this one can be easily replaced\n bn = CurrentBN(cfg.network.use_bn_type, 0.9, use_global_stats=False)\n\n # pre\n init_pre_list = []\n data = conv_sym_wrapper(data=data, prefix=\"pre1\", num_filter=64, kernel=(7, 7),\n stride=(2, 2), pad=(3, 3), bn=bn, record=init_pre_list)\n data = conv_sym_wrapper(data=data, prefix=\"pre2\", num_filter=128, kernel=(3, 3),\n stride=(1, 1), pad=(1, 1), bn=bn, record=init_pre_list)\n data = mx.symbol.Pooling(data=data, kernel=(2, 2), stride=(2, 2), pad=(0, 0), pool_type='max')\n data = conv_sym_wrapper(data=data, prefix=\"pre3\", num_filter=128, kernel=(3, 3),\n stride=(1, 1), pad=(1, 1), bn=bn, record=init_pre_list)\n data = conv_sym_wrapper(data=data, prefix=\"pre4\", num_filter=in_dim, kernel=(3, 3),\n stride=(1, 1), pad=(1, 1), bn=bn, record=init_pre_list)\n self.init_pre_list = init_pre_list\n\n # hourglass\n # preds->shape [N, num_stack, C=out_dim, H, W]\n init_hourglass_list = []\n det_preds, association_preds = self.get_stacked_hourglass(data=data, num_stack=num_stack, in_dim=in_dim, out_dim=out_dim,\n increase_dim=increase_dim, bn=bn, record=init_hourglass_list,\n num_parts=num_parts, cfg=cfg, is_train=is_train)\n self.init_hourglass_list = init_hourglass_list\n\n # preds = mx.sym.Custom(data=preds, op_type='monitor', nickname='preds')\n # calc_loss\n if is_train:\n # calc detection loss\n d_loss = []\n for i in range(len(det_preds)):\n d_loss.append(self.get_det_loss(det_preds[i], heatmaps, masks))\n\n\n # stack all stage\n d_loss = mx.sym.stack(*d_loss, axis=1) # shape, [N, num_stack]\n\n d_losses = mx.symbol.mean(data=d_loss, axis=0) # shape, [num_stack]\n\n # pick keypoint feats\n outside_loss_list = []\n inside_loss_list = []\n\n\n for i in range(len(association_preds)):\n # a_pred: [N, K, H, W]\n a_pred = association_preds[i]\n # a_pred = mx.sym.Custom(data=a_pred, op_type='monitor', nickname='stack_{}_a_pred'.format(i))\n # a_pred:[N, K, H, W, 1]\n a_pred = a_pred.reshape(shape=(0, 0, 0, 0, 1))\n # a_pred = mx.sym.Custom(data=a_pred, op_type='monitor', nickname='stack_{}_a_pred_reshape'.format(i))\n\n outside_loss, inside_loss = self.get_inside_outside_loss(feature=a_pred,\n keypoint_visible=keypoint_visible,\n keypoint_location=keypoint_location,\n batch_size=cfg.TRAIN.BATCH_IMAGES,\n num_keypoint_cls=num_parts,\n max_persons=max_persons,\n prefix=\"stack_{}\".format(i))\n outside_loss_list.append(outside_loss * 0.5)\n inside_loss_list.append(inside_loss)\n\n outside_loss_all_stage = mx.sym.stack(*outside_loss_list)\n\n # stack all stage loss together\n outside_loss_all_stage = outside_loss_all_stage.mean()\n inside_loss_all_stage = mx.sym.stack(*inside_loss_list).mean()\n\n\n outside_loss_all_stage = mx.sym.MakeLoss(outside_loss_all_stage, grad_scale=1e-3, name='outside_loss')\n inside_loss_all_stage = mx.sym.MakeLoss(inside_loss_all_stage, grad_scale=1e-3, name='inside_loss')\n det_loss_all_stage = mx.sym.MakeLoss(mx.symbol.mean(data=d_losses), grad_scale=1.0, name='det_loss')\n\n output_list = [det_loss_all_stage, inside_loss_all_stage, outside_loss_all_stage]\n\n # get gpu metric\n if cfg.TRAIN.GPU_METRIC:\n for i in range(num_stack + cfg.pose.head_num):\n D_Loss = mx.sym.slice_axis(data=d_losses, axis=0, begin=i, end=i+1)\n output_list.extend(get_detection_loss(D_Loss))\n A_Loss_Inside = inside_loss_list[i]\n output_list.extend(get_association_loss_inside(A_Loss_Inside))\n A_Loss_Outside = outside_loss_list[i]\n output_list.extend(get_association_loss_outside(A_Loss_Outside))\n output_list.extend(get_det_max(mx.symbol.squeeze(mx.sym.slice_axis(data=det_preds[-1], axis=1, begin=3, end=4))))\n else:\n raise ValueError('No CPU metric is supported now!')\n\n group = mx.sym.Group(output_list)\n else:\n det_final = mx.symbol.BlockGrad(det_preds[-1], name=\"det_final\")\n association_final = mx.symbol.BlockGrad(association_preds[-1], name=\"association_final\")\n\n group = mx.sym.Group([det_final, association_final])\n\n self.sym = group\n return group\n\n def get_pred_names(self, is_train, gpu_metric=False):\n if is_train:\n pred_names = ['d_loss', 'a_loss_inside', 'a_loss_outside']\n if gpu_metric:\n for i in range(4 + self.cfg.pose.head_num):\n pred_names.append('D_Loss_{}'.format(i))\n pred_names.append('A_Loss_Inside_{}'.format(i))\n pred_names.append('A_Loss_Outside_{}'.format(i))\n\n pred_names.append('Det_Max')\n return pred_names\n else:\n pred_names = ['d_loss', 'a_loss_inside', 'a_loss_outside']\n\n def get_label_names(self):\n return ['preds']\n\n def init_weight_pre(self, cfg, arg_params, aux_params):\n for ele in self.init_pre_list:\n if '_conv' in ele:\n # pytorch's kaiming_uniform_\n weight_shape = self.arg_shape_dict['{}_weight'.format(ele)]\n fan_in = float(weight_shape[1]) * weight_shape[2] * weight_shape[3]\n bound = np.sqrt(6 / ((1 + 5) * fan_in))\n arg_params['{}_weight'.format(ele)] = mx.random.uniform(-bound, bound, shape=weight_shape)\n arg_params['{}_bias'.format(ele)] = mx.random.uniform(-bound, bound, shape=self.arg_shape_dict['{}_bias'.format(ele)])\n\n elif '_relu' in ele:\n continue\n else:\n raise ValueError('Layer {} init not inplemented'.format(ele))\n\n def init_weight_hourglass(self, cfg, arg_params, aux_params):\n for ele in self.init_hourglass_list:\n if '_conv' in ele:\n # pytorch's kaiming_uniform_\n weight_shape = self.arg_shape_dict['{}_weight'.format(ele)]\n fan_in = float(weight_shape[1]) * weight_shape[2] * weight_shape[3]\n bound = np.sqrt(6 / ((1 + 5) * fan_in))\n arg_params['{}_weight'.format(ele)] = mx.random.uniform(-bound, bound, shape=weight_shape)\n arg_params['{}_bias'.format(ele)] = mx.random.uniform(-bound, bound, shape=self.arg_shape_dict['{}_bias'.format(ele)])\n\n elif '_relu' in ele:\n continue\n else:\n raise ValueError('Layer {} init not inplemented'.format(ele))\n\n def init_weight_non_local(self, cfg, arg_params, aux_params):\n prefix_name = \"head_{}\"\n weight_names = ['key_embed', 'val_embed', 'query_embed', 'fusion', 'after_concat_1x1', 'det', 'association', 'sensitve_conv']\n bias_names = ['fusion', 'after_concat_1x1', 'det', 'association','sensitve_conv']\n for i in range(cfg.pose.head_num):\n for weight_name in weight_names:\n weight_name = prefix_name + \"_\" + weight_name\n weight_shape = self.arg_shape_dict[(weight_name + '_weight').format(i)]\n fan_in = float(weight_shape[1]) * weight_shape[2] * weight_shape[3]\n bound = np.sqrt(6 / ((1 + 5) * fan_in))\n\n if cfg.pose.param_init == 'normal':\n arg_params[(weight_name + '_weight').format(i)] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[\n (weight_name + '_weight').format(i)])\n else:\n arg_params[(weight_name + '_weight').format(i)] = mx.random.uniform(-bound, bound, shape=weight_shape)\n for bias_name in bias_names:\n bias_name = prefix_name + \"_\" + bias_name\n if cfg.pose.param_init == 'normal':\n arg_params[(bias_name + '_bias').format(i)] = mx.nd.zeros(shape=self.arg_shape_dict[(bias_name + '_bias').format(i)])\n else:\n arg_params[(bias_name + '_bias').format(i)] = mx.random.uniform(-bound, bound, self.arg_shape_dict[(bias_name + '_bias').format(i)])\n\n def init_weight(self, cfg, arg_params, aux_params):\n self.init_weight_pre(cfg, arg_params, aux_params)\n self.init_weight_hourglass(cfg, arg_params, aux_params)\n self.init_weight_non_local(cfg, arg_params, aux_params)\n"
] | [
[
"numpy.sqrt"
]
] |
livenson/Cirq | [
"b2fa642895089fba385999d675ab65d57a53e0df"
] | [
"cirq-core/cirq/json_resolver_cache.py"
] | [
"# Copyright 2020 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nfrom typing import Dict, TYPE_CHECKING\n\nfrom cirq.protocols.json_serialization import ObjectFactory\n\nif TYPE_CHECKING:\n import cirq.ops.pauli_gates\n import cirq.devices.unconstrained_device\n\n\[email protected]_cache()\ndef _class_resolver_dictionary() -> Dict[str, ObjectFactory]:\n import cirq\n from cirq.ops import raw_types\n import pandas as pd\n import numpy as np\n from cirq.devices.noise_model import _NoNoiseModel\n from cirq.experiments import CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer\n from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata\n\n def _identity_operation_from_dict(qubits, **kwargs):\n return cirq.identity_each(*qubits)\n\n def single_qubit_matrix_gate(matrix):\n if not isinstance(matrix, np.ndarray):\n matrix = np.array(matrix, dtype=np.complex128)\n return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],))\n\n def two_qubit_matrix_gate(matrix):\n if not isinstance(matrix, np.ndarray):\n matrix = np.array(matrix, dtype=np.complex128)\n return cirq.MatrixGate(matrix, qid_shape=(2, 2))\n\n def _parallel_gate_op(gate, qubits):\n return cirq.parallel_gate_op(gate, *qubits)\n\n import sympy\n\n return {\n 'AmplitudeDampingChannel': cirq.AmplitudeDampingChannel,\n 'AnyIntegerPowerGateFamily': cirq.AnyIntegerPowerGateFamily,\n 'AnyUnitaryGateFamily': cirq.AnyUnitaryGateFamily,\n 'AsymmetricDepolarizingChannel': cirq.AsymmetricDepolarizingChannel,\n 'BitFlipChannel': cirq.BitFlipChannel,\n 'BitstringAccumulator': cirq.work.BitstringAccumulator,\n 'BooleanHamiltonian': cirq.BooleanHamiltonian,\n 'CCNotPowGate': cirq.CCNotPowGate,\n 'CCXPowGate': cirq.CCXPowGate,\n 'CCZPowGate': cirq.CCZPowGate,\n 'Circuit': cirq.Circuit,\n 'CircuitOperation': cirq.CircuitOperation,\n 'ClassicallyControlledOperation': cirq.ClassicallyControlledOperation,\n 'CliffordState': cirq.CliffordState,\n 'CliffordTableau': cirq.CliffordTableau,\n 'CNotPowGate': cirq.CNotPowGate,\n 'ConstantQubitNoiseModel': cirq.ConstantQubitNoiseModel,\n 'ControlledGate': cirq.ControlledGate,\n 'ControlledOperation': cirq.ControlledOperation,\n 'CrossEntropyResult': CrossEntropyResult,\n 'CrossEntropyResultDict': CrossEntropyResultDict,\n 'CSwapGate': cirq.CSwapGate,\n 'CXPowGate': cirq.CXPowGate,\n 'CZPowGate': cirq.CZPowGate,\n 'DensePauliString': cirq.DensePauliString,\n 'DepolarizingChannel': cirq.DepolarizingChannel,\n 'DeviceMetadata': cirq.DeviceMetadata,\n 'Duration': cirq.Duration,\n 'FrozenCircuit': cirq.FrozenCircuit,\n 'FSimGate': cirq.FSimGate,\n 'GateFamily': cirq.GateFamily,\n 'GateOperation': cirq.GateOperation,\n 'Gateset': cirq.Gateset,\n 'GeneralizedAmplitudeDampingChannel': cirq.GeneralizedAmplitudeDampingChannel,\n 'GlobalPhaseGate': cirq.GlobalPhaseGate,\n 'GlobalPhaseOperation': cirq.GlobalPhaseOperation,\n 'GridDeviceMetadata': cirq.GridDeviceMetadata,\n 'GridInteractionLayer': GridInteractionLayer,\n 'GridParallelXEBMetadata': GridParallelXEBMetadata,\n 'GridQid': cirq.GridQid,\n 'GridQubit': cirq.GridQubit,\n 'HPowGate': cirq.HPowGate,\n 'ISwapPowGate': cirq.ISwapPowGate,\n 'IdentityGate': cirq.IdentityGate,\n 'InitObsSetting': cirq.work.InitObsSetting,\n 'KeyCondition': cirq.KeyCondition,\n 'KrausChannel': cirq.KrausChannel,\n 'LinearDict': cirq.LinearDict,\n 'LineQubit': cirq.LineQubit,\n 'LineQid': cirq.LineQid,\n 'LineTopology': cirq.LineTopology,\n 'MatrixGate': cirq.MatrixGate,\n 'MixedUnitaryChannel': cirq.MixedUnitaryChannel,\n 'MeasurementKey': cirq.MeasurementKey,\n 'MeasurementGate': cirq.MeasurementGate,\n '_MeasurementSpec': cirq.work._MeasurementSpec,\n 'Moment': cirq.Moment,\n 'MutableDensePauliString': cirq.MutableDensePauliString,\n 'MutablePauliString': cirq.MutablePauliString,\n '_NoNoiseModel': _NoNoiseModel,\n 'NamedQubit': cirq.NamedQubit,\n 'NamedQid': cirq.NamedQid,\n 'NoIdentifierQubit': cirq.testing.NoIdentifierQubit,\n 'ObservableMeasuredResult': cirq.work.ObservableMeasuredResult,\n 'OpIdentifier': cirq.OpIdentifier,\n 'ParamResolver': cirq.ParamResolver,\n 'ParallelGate': cirq.ParallelGate,\n 'ParallelGateFamily': cirq.ParallelGateFamily,\n 'PauliMeasurementGate': cirq.PauliMeasurementGate,\n 'PauliString': cirq.PauliString,\n 'PauliStringPhasor': cirq.PauliStringPhasor,\n 'PauliStringPhasorGate': cirq.PauliStringPhasorGate,\n '_PauliX': cirq.ops.pauli_gates._PauliX,\n '_PauliY': cirq.ops.pauli_gates._PauliY,\n '_PauliZ': cirq.ops.pauli_gates._PauliZ,\n 'PhaseDampingChannel': cirq.PhaseDampingChannel,\n 'PhaseFlipChannel': cirq.PhaseFlipChannel,\n 'PhaseGradientGate': cirq.PhaseGradientGate,\n 'PhasedFSimGate': cirq.PhasedFSimGate,\n 'PhasedISwapPowGate': cirq.PhasedISwapPowGate,\n 'PhasedXPowGate': cirq.PhasedXPowGate,\n 'PhasedXZGate': cirq.PhasedXZGate,\n 'ProductState': cirq.ProductState,\n 'ProjectorString': cirq.ProjectorString,\n 'ProjectorSum': cirq.ProjectorSum,\n 'QasmUGate': cirq.circuits.qasm_output.QasmUGate,\n '_QubitAsQid': raw_types._QubitAsQid,\n 'QuantumFourierTransformGate': cirq.QuantumFourierTransformGate,\n 'RandomGateChannel': cirq.RandomGateChannel,\n 'TensoredConfusionMatrices': cirq.TensoredConfusionMatrices,\n 'RepetitionsStoppingCriteria': cirq.work.RepetitionsStoppingCriteria,\n 'ResetChannel': cirq.ResetChannel,\n 'Result': cirq.Result,\n 'Rx': cirq.Rx,\n 'Ry': cirq.Ry,\n 'Rz': cirq.Rz,\n 'SingleQubitCliffordGate': cirq.SingleQubitCliffordGate,\n 'SingleQubitPauliStringGateOperation': cirq.SingleQubitPauliStringGateOperation,\n 'SingleQubitReadoutCalibrationResult': cirq.experiments.SingleQubitReadoutCalibrationResult,\n 'StabilizerStateChForm': cirq.StabilizerStateChForm,\n 'StatePreparationChannel': cirq.StatePreparationChannel,\n 'SwapPowGate': cirq.SwapPowGate,\n 'SymmetricalQidPair': cirq.SymmetricalQidPair,\n 'SympyCondition': cirq.SympyCondition,\n 'TaggedOperation': cirq.TaggedOperation,\n 'TiltedSquareLattice': cirq.TiltedSquareLattice,\n 'TrialResult': cirq.Result, # keep support for Cirq < 0.11.\n 'TwoQubitGateTabulation': cirq.TwoQubitGateTabulation,\n '_UnconstrainedDevice': cirq.devices.unconstrained_device._UnconstrainedDevice,\n 'VarianceStoppingCriteria': cirq.work.VarianceStoppingCriteria,\n 'VirtualTag': cirq.VirtualTag,\n 'WaitGate': cirq.WaitGate,\n # The formatter keeps putting this back\n # pylint: disable=line-too-long\n 'XEBPhasedFSimCharacterizationOptions': cirq.experiments.XEBPhasedFSimCharacterizationOptions,\n # pylint: enable=line-too-long\n '_XEigenState': cirq.value.product_state._XEigenState, # type: ignore\n 'XPowGate': cirq.XPowGate,\n 'XXPowGate': cirq.XXPowGate,\n '_YEigenState': cirq.value.product_state._YEigenState, # type: ignore\n 'YPowGate': cirq.YPowGate,\n 'YYPowGate': cirq.YYPowGate,\n '_ZEigenState': cirq.value.product_state._ZEigenState, # type: ignore\n 'ZPowGate': cirq.ZPowGate,\n 'ZZPowGate': cirq.ZZPowGate,\n # Old types, only supported for backwards-compatibility\n 'IdentityOperation': _identity_operation_from_dict,\n 'ParallelGateOperation': _parallel_gate_op, # Removed in v0.14\n 'SingleQubitMatrixGate': single_qubit_matrix_gate,\n 'TwoQubitMatrixGate': two_qubit_matrix_gate,\n # not a cirq class, but treated as one:\n 'pandas.DataFrame': pd.DataFrame,\n 'pandas.Index': pd.Index,\n 'pandas.MultiIndex': pd.MultiIndex.from_tuples,\n 'sympy.Symbol': sympy.Symbol,\n 'sympy.Add': lambda args: sympy.Add(*args),\n 'sympy.Mul': lambda args: sympy.Mul(*args),\n 'sympy.Pow': lambda args: sympy.Pow(*args),\n 'sympy.GreaterThan': lambda args: sympy.GreaterThan(*args),\n 'sympy.StrictGreaterThan': lambda args: sympy.StrictGreaterThan(*args),\n 'sympy.LessThan': lambda args: sympy.LessThan(*args),\n 'sympy.StrictLessThan': lambda args: sympy.StrictLessThan(*args),\n 'sympy.Equality': lambda args: sympy.Equality(*args),\n 'sympy.Unequality': lambda args: sympy.Unequality(*args),\n 'sympy.Float': lambda approx: sympy.Float(approx),\n 'sympy.Integer': sympy.Integer,\n 'sympy.Rational': sympy.Rational,\n 'sympy.pi': lambda: sympy.pi,\n 'sympy.E': lambda: sympy.E,\n 'sympy.EulerGamma': lambda: sympy.EulerGamma,\n 'complex': complex,\n }\n"
] | [
[
"numpy.array"
]
] |
MarionLepert/stable-baselines | [
"f55ac2dec40a46457d00a49548fa8fd6c889569a"
] | [
"stable_baselines/ddpg/ddpg.py"
] | [
"from functools import reduce\nimport os\nimport time\nfrom collections import deque\nimport pickle\nimport warnings\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib as tc\nfrom mpi4py import MPI\n\nfrom stable_baselines import logger\nfrom stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\nfrom stable_baselines.common.vec_env import VecEnv\nfrom stable_baselines.common.mpi_adam import MpiAdam\nfrom stable_baselines.common.buffers import ReplayBuffer\nfrom stable_baselines.common.math_util import unscale_action, scale_action\nfrom stable_baselines.common.mpi_running_mean_std import RunningMeanStd\nfrom stable_baselines.ddpg.policies import DDPGPolicy\n\n\ndef normalize(tensor, stats):\n \"\"\"\n normalize a tensor using a running mean and std\n\n :param tensor: (TensorFlow Tensor) the input tensor\n :param stats: (RunningMeanStd) the running mean and std of the input to normalize\n :return: (TensorFlow Tensor) the normalized tensor\n \"\"\"\n if stats is None:\n return tensor\n return (tensor - stats.mean) / stats.std\n\n\ndef denormalize(tensor, stats):\n \"\"\"\n denormalize a tensor using a running mean and std\n\n :param tensor: (TensorFlow Tensor) the normalized tensor\n :param stats: (RunningMeanStd) the running mean and std of the input to normalize\n :return: (TensorFlow Tensor) the restored tensor\n \"\"\"\n if stats is None:\n return tensor\n return tensor * stats.std + stats.mean\n\n\ndef reduce_std(tensor, axis=None, keepdims=False):\n \"\"\"\n get the standard deviation of a Tensor\n\n :param tensor: (TensorFlow Tensor) the input tensor\n :param axis: (int or [int]) the axis to itterate the std over\n :param keepdims: (bool) keep the other dimensions the same\n :return: (TensorFlow Tensor) the std of the tensor\n \"\"\"\n return tf.sqrt(reduce_var(tensor, axis=axis, keepdims=keepdims))\n\n\ndef reduce_var(tensor, axis=None, keepdims=False):\n \"\"\"\n get the variance of a Tensor\n\n :param tensor: (TensorFlow Tensor) the input tensor\n :param axis: (int or [int]) the axis to itterate the variance over\n :param keepdims: (bool) keep the other dimensions the same\n :return: (TensorFlow Tensor) the variance of the tensor\n \"\"\"\n tensor_mean = tf.reduce_mean(tensor, axis=axis, keepdims=True)\n devs_squared = tf.square(tensor - tensor_mean)\n return tf.reduce_mean(devs_squared, axis=axis, keepdims=keepdims)\n\n\ndef get_target_updates(_vars, target_vars, tau, verbose=0):\n \"\"\"\n get target update operations\n\n :param _vars: ([TensorFlow Tensor]) the initial variables\n :param target_vars: ([TensorFlow Tensor]) the target variables\n :param tau: (float) the soft update coefficient (keep old values, between 0 and 1)\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :return: (TensorFlow Operation, TensorFlow Operation) initial update, soft update\n \"\"\"\n if verbose >= 2:\n logger.info('setting up target updates ...')\n soft_updates = []\n init_updates = []\n assert len(_vars) == len(target_vars)\n for var, target_var in zip(_vars, target_vars):\n if verbose >= 2:\n logger.info(' {} <- {}'.format(target_var.name, var.name))\n init_updates.append(tf.assign(target_var, var))\n soft_updates.append(tf.assign(target_var, (1. - tau) * target_var + tau * var))\n assert len(init_updates) == len(_vars)\n assert len(soft_updates) == len(_vars)\n return tf.group(*init_updates), tf.group(*soft_updates)\n\n\ndef get_perturbable_vars(scope):\n \"\"\"\n Get the trainable variables that can be perturbed when using\n parameter noise.\n\n :param scope: (str) tensorflow scope of the variables\n :return: ([tf.Variables])\n \"\"\"\n return [var for var in tf_util.get_trainable_vars(scope) if 'LayerNorm' not in var.name]\n\n\ndef get_perturbed_actor_updates(actor, perturbed_actor, param_noise_stddev, verbose=0):\n \"\"\"\n Get the actor update, with noise.\n\n :param actor: (str) the actor\n :param perturbed_actor: (str) the pertubed actor\n :param param_noise_stddev: (float) the std of the parameter noise\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :return: (TensorFlow Operation) the update function\n \"\"\"\n assert len(tf_util.get_globals_vars(actor)) == len(tf_util.get_globals_vars(perturbed_actor))\n assert len(get_perturbable_vars(actor)) == len(get_perturbable_vars(perturbed_actor))\n\n updates = []\n for var, perturbed_var in zip(tf_util.get_globals_vars(actor), tf_util.get_globals_vars(perturbed_actor)):\n if var in get_perturbable_vars(actor):\n if verbose >= 2:\n logger.info(' {} <- {} + noise'.format(perturbed_var.name, var.name))\n # Add Gaussian noise to the parameter\n updates.append(tf.assign(perturbed_var,\n var + tf.random_normal(tf.shape(var), mean=0., stddev=param_noise_stddev)))\n else:\n if verbose >= 2:\n logger.info(' {} <- {}'.format(perturbed_var.name, var.name))\n updates.append(tf.assign(perturbed_var, var))\n assert len(updates) == len(tf_util.get_globals_vars(actor))\n return tf.group(*updates)\n\n\nclass DDPG(OffPolicyRLModel):\n \"\"\"\n Deep Deterministic Policy Gradient (DDPG) model\n\n DDPG: https://arxiv.org/pdf/1509.02971.pdf\n\n :param policy: (DDPGPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)\n :param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)\n :param gamma: (float) the discount factor\n :param memory_policy: (ReplayBuffer) the replay buffer\n (if None, default to baselines.deepq.replay_buffer.ReplayBuffer)\n\n .. deprecated:: 2.6.0\n This parameter will be removed in a future version\n\n :param eval_env: (Gym Environment) the evaluation environment (can be None)\n :param nb_train_steps: (int) the number of training steps\n :param nb_rollout_steps: (int) the number of rollout steps\n :param nb_eval_steps: (int) the number of evaluation steps\n :param param_noise: (AdaptiveParamNoiseSpec) the parameter noise type (can be None)\n :param action_noise: (ActionNoise) the action noise type (can be None)\n :param param_noise_adaption_interval: (int) apply param noise every N steps\n :param tau: (float) the soft update coefficient (keep old values, between 0 and 1)\n :param normalize_returns: (bool) should the critic output be normalized\n :param enable_popart: (bool) enable pop-art normalization of the critic output\n (https://arxiv.org/pdf/1602.07714.pdf), normalize_returns must be set to True.\n :param normalize_observations: (bool) should the observation be normalized\n :param batch_size: (int) the size of the batch for learning the policy\n :param observation_range: (tuple) the bounding values for the observation\n :param return_range: (tuple) the bounding values for the critic output\n :param critic_l2_reg: (float) l2 regularizer coefficient\n :param actor_lr: (float) the actor learning rate\n :param critic_lr: (float) the critic learning rate\n :param clip_norm: (float) clip the gradients (disabled if None)\n :param reward_scale: (float) the value the reward should be scaled by\n :param render: (bool) enable rendering of the environment\n :param render_eval: (bool) enable rendering of the evaluation environment\n :param memory_limit: (int) the max number of transitions to store, size of the replay buffer\n\n .. deprecated:: 2.6.0\n Use `buffer_size` instead.\n\n :param buffer_size: (int) the max number of transitions to store, size of the replay buffer\n :param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy)\n This is not needed for DDPG normally but can help exploring when using HER + DDPG.\n This hack was present in the original OpenAI Baselines repo (DDPG + HER)\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param tensorboard_log: (str) the log location for tensorboard (if None, no logging)\n :param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance\n :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation\n :param full_tensorboard_log: (bool) enable additional logging when using tensorboard\n WARNING: this logging can take a lot of space quickly\n :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).\n If None (default), use random seed. Note that if you want completely deterministic\n results, you must set `n_cpu_tf_sess` to 1.\n :param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations\n If None, the number of cpu of the current machine will be used.\n \"\"\"\n def __init__(self, policy, env, gamma=0.99, memory_policy=None, eval_env=None, nb_train_steps=50,\n nb_rollout_steps=100, nb_eval_steps=100, param_noise=None, action_noise=None,\n normalize_observations=False, tau=0.001, batch_size=128, param_noise_adaption_interval=50,\n normalize_returns=False, enable_popart=False, observation_range=(-5., 5.), critic_l2_reg=0.,\n return_range=(-np.inf, np.inf), actor_lr=1e-4, critic_lr=1e-3, clip_norm=None, reward_scale=1.,\n render=False, render_eval=False, memory_limit=None, buffer_size=50000, random_exploration=0.0,\n verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,\n full_tensorboard_log=False, seed=None, n_cpu_tf_sess=1):\n\n super(DDPG, self).__init__(policy=policy, env=env, replay_buffer=None,\n verbose=verbose, policy_base=DDPGPolicy,\n requires_vec_env=False, policy_kwargs=policy_kwargs,\n seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)\n\n # Parameters.\n self.gamma = gamma\n self.tau = tau\n\n # TODO: remove this param in v3.x.x\n if memory_policy is not None:\n warnings.warn(\"memory_policy will be removed in a future version (v3.x.x) \"\n \"it is now ignored and replaced with ReplayBuffer\", DeprecationWarning)\n\n if memory_limit is not None:\n warnings.warn(\"memory_limit will be removed in a future version (v3.x.x) \"\n \"use buffer_size instead\", DeprecationWarning)\n buffer_size = memory_limit\n\n self.normalize_observations = normalize_observations\n self.normalize_returns = normalize_returns\n self.action_noise = action_noise\n self.param_noise = param_noise\n self.return_range = return_range\n self.observation_range = observation_range\n self.actor_lr = actor_lr\n self.critic_lr = critic_lr\n self.clip_norm = clip_norm\n self.enable_popart = enable_popart\n self.reward_scale = reward_scale\n self.batch_size = batch_size\n self.critic_l2_reg = critic_l2_reg\n self.eval_env = eval_env\n self.render = render\n self.render_eval = render_eval\n self.nb_eval_steps = nb_eval_steps\n self.param_noise_adaption_interval = param_noise_adaption_interval\n self.nb_train_steps = nb_train_steps\n self.nb_rollout_steps = nb_rollout_steps\n self.memory_limit = memory_limit\n self.buffer_size = buffer_size\n self.tensorboard_log = tensorboard_log\n self.full_tensorboard_log = full_tensorboard_log\n self.random_exploration = random_exploration\n\n # init\n self.graph = None\n self.stats_sample = None\n self.replay_buffer = None\n self.policy_tf = None\n self.target_init_updates = None\n self.target_soft_updates = None\n self.critic_loss = None\n self.critic_grads = None\n self.critic_optimizer = None\n self.sess = None\n self.stats_ops = None\n self.stats_names = None\n self.perturbed_actor_tf = None\n self.perturb_policy_ops = None\n self.perturb_adaptive_policy_ops = None\n self.adaptive_policy_distance = None\n self.actor_loss = None\n self.actor_grads = None\n self.actor_optimizer = None\n self.old_std = None\n self.old_mean = None\n self.renormalize_q_outputs_op = None\n self.obs_rms = None\n self.ret_rms = None\n self.target_policy = None\n self.actor_tf = None\n self.normalized_critic_tf = None\n self.critic_tf = None\n self.normalized_critic_with_actor_tf = None\n self.critic_with_actor_tf = None\n self.target_q = None\n self.obs_train = None\n self.action_train_ph = None\n self.obs_target = None\n self.action_target = None\n self.obs_noise = None\n self.action_noise_ph = None\n self.obs_adapt_noise = None\n self.action_adapt_noise = None\n self.terminals_ph = None\n self.rewards = None\n self.actions = None\n self.critic_target = None\n self.param_noise_stddev = None\n self.param_noise_actor = None\n self.adaptive_param_noise_actor = None\n self.params = None\n self.summary = None\n self.tb_seen_steps = None\n\n self.target_params = None\n self.obs_rms_params = None\n self.ret_rms_params = None\n\n if _init_setup_model:\n self.setup_model()\n\n def _get_pretrain_placeholders(self):\n policy = self.policy_tf\n # Rescale\n deterministic_action = unscale_action(self.action_space, self.actor_tf)\n return policy.obs_ph, self.actions, deterministic_action\n\n def setup_model(self):\n with SetVerbosity(self.verbose):\n\n assert isinstance(self.action_space, gym.spaces.Box), \\\n \"Error: DDPG cannot output a {} action space, only spaces.Box is supported.\".format(self.action_space)\n assert issubclass(self.policy, DDPGPolicy), \"Error: the input policy for the DDPG model must be \" \\\n \"an instance of DDPGPolicy.\"\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.set_random_seed(self.seed)\n self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)\n\n self.replay_buffer = ReplayBuffer(self.buffer_size)\n\n with tf.variable_scope(\"input\", reuse=False):\n # Observation normalization.\n if self.normalize_observations:\n with tf.variable_scope('obs_rms'):\n self.obs_rms = RunningMeanStd(shape=self.observation_space.shape)\n else:\n self.obs_rms = None\n\n # Return normalization.\n if self.normalize_returns:\n with tf.variable_scope('ret_rms'):\n self.ret_rms = RunningMeanStd()\n else:\n self.ret_rms = None\n\n self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space, 1, 1, None,\n **self.policy_kwargs)\n\n # Create target networks.\n self.target_policy = self.policy(self.sess, self.observation_space, self.action_space, 1, 1, None,\n **self.policy_kwargs)\n self.obs_target = self.target_policy.obs_ph\n self.action_target = self.target_policy.action_ph\n\n normalized_obs = tf.clip_by_value(normalize(self.policy_tf.processed_obs, self.obs_rms),\n self.observation_range[0], self.observation_range[1])\n normalized_next_obs = tf.clip_by_value(normalize(self.target_policy.processed_obs, self.obs_rms),\n self.observation_range[0], self.observation_range[1])\n\n if self.param_noise is not None:\n # Configure perturbed actor.\n self.param_noise_actor = self.policy(self.sess, self.observation_space, self.action_space, 1, 1,\n None, **self.policy_kwargs)\n self.obs_noise = self.param_noise_actor.obs_ph\n self.action_noise_ph = self.param_noise_actor.action_ph\n\n # Configure separate copy for stddev adoption.\n self.adaptive_param_noise_actor = self.policy(self.sess, self.observation_space,\n self.action_space, 1, 1, None,\n **self.policy_kwargs)\n self.obs_adapt_noise = self.adaptive_param_noise_actor.obs_ph\n self.action_adapt_noise = self.adaptive_param_noise_actor.action_ph\n\n # Inputs.\n self.obs_train = self.policy_tf.obs_ph\n self.action_train_ph = self.policy_tf.action_ph\n self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals')\n self.rewards = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')\n self.actions = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape, name='actions')\n self.critic_target = tf.placeholder(tf.float32, shape=(None, 1), name='critic_target')\n self.param_noise_stddev = tf.placeholder(tf.float32, shape=(), name='param_noise_stddev')\n\n # Create networks and core TF parts that are shared across setup parts.\n with tf.variable_scope(\"model\", reuse=False):\n self.actor_tf = self.policy_tf.make_actor(normalized_obs)\n self.normalized_critic_tf = self.policy_tf.make_critic(normalized_obs, self.actions)\n self.normalized_critic_with_actor_tf = self.policy_tf.make_critic(normalized_obs,\n self.actor_tf,\n reuse=True)\n # Noise setup\n if self.param_noise is not None:\n self._setup_param_noise(normalized_obs)\n\n with tf.variable_scope(\"target\", reuse=False):\n critic_target = self.target_policy.make_critic(normalized_next_obs,\n self.target_policy.make_actor(normalized_next_obs))\n\n with tf.variable_scope(\"loss\", reuse=False):\n self.critic_tf = denormalize(\n tf.clip_by_value(self.normalized_critic_tf, self.return_range[0], self.return_range[1]),\n self.ret_rms)\n\n self.critic_with_actor_tf = denormalize(\n tf.clip_by_value(self.normalized_critic_with_actor_tf,\n self.return_range[0], self.return_range[1]),\n self.ret_rms)\n\n q_next_obs = denormalize(critic_target, self.ret_rms)\n self.target_q = self.rewards + (1. - self.terminals_ph) * self.gamma * q_next_obs\n\n tf.summary.scalar('critic_target', tf.reduce_mean(self.critic_target))\n if self.full_tensorboard_log:\n tf.summary.histogram('critic_target', self.critic_target)\n\n # Set up parts.\n if self.normalize_returns and self.enable_popart:\n self._setup_popart()\n self._setup_stats()\n self._setup_target_network_updates()\n\n with tf.variable_scope(\"input_info\", reuse=False):\n tf.summary.scalar('rewards', tf.reduce_mean(self.rewards))\n tf.summary.scalar('param_noise_stddev', tf.reduce_mean(self.param_noise_stddev))\n\n if self.full_tensorboard_log:\n tf.summary.histogram('rewards', self.rewards)\n tf.summary.histogram('param_noise_stddev', self.param_noise_stddev)\n if len(self.observation_space.shape) == 3 and self.observation_space.shape[0] in [1, 3, 4]:\n tf.summary.image('observation', self.obs_train)\n else:\n tf.summary.histogram('observation', self.obs_train)\n\n with tf.variable_scope(\"Adam_mpi\", reuse=False):\n self._setup_actor_optimizer()\n self._setup_critic_optimizer()\n tf.summary.scalar('actor_loss', self.actor_loss)\n tf.summary.scalar('critic_loss', self.critic_loss)\n\n self.params = tf_util.get_trainable_vars(\"model\") \\\n + tf_util.get_trainable_vars('noise/') + tf_util.get_trainable_vars('noise_adapt/')\n\n self.target_params = tf_util.get_trainable_vars(\"target\")\n self.obs_rms_params = [var for var in tf.global_variables()\n if \"obs_rms\" in var.name]\n self.ret_rms_params = [var for var in tf.global_variables()\n if \"ret_rms\" in var.name]\n\n with self.sess.as_default():\n self._initialize(self.sess)\n\n self.summary = tf.summary.merge_all()\n\n def _setup_target_network_updates(self):\n \"\"\"\n set the target update operations\n \"\"\"\n init_updates, soft_updates = get_target_updates(tf_util.get_trainable_vars('model/'),\n tf_util.get_trainable_vars('target/'), self.tau,\n self.verbose)\n self.target_init_updates = init_updates\n self.target_soft_updates = soft_updates\n\n def _setup_param_noise(self, normalized_obs):\n \"\"\"\n Setup the parameter noise operations\n\n :param normalized_obs: (TensorFlow Tensor) the normalized observation\n \"\"\"\n assert self.param_noise is not None\n\n with tf.variable_scope(\"noise\", reuse=False):\n self.perturbed_actor_tf = self.param_noise_actor.make_actor(normalized_obs)\n\n with tf.variable_scope(\"noise_adapt\", reuse=False):\n adaptive_actor_tf = self.adaptive_param_noise_actor.make_actor(normalized_obs)\n\n with tf.variable_scope(\"noise_update_func\", reuse=False):\n if self.verbose >= 2:\n logger.info('setting up param noise')\n self.perturb_policy_ops = get_perturbed_actor_updates('model/pi/', 'noise/pi/', self.param_noise_stddev,\n verbose=self.verbose)\n\n self.perturb_adaptive_policy_ops = get_perturbed_actor_updates('model/pi/', 'noise_adapt/pi/',\n self.param_noise_stddev,\n verbose=self.verbose)\n self.adaptive_policy_distance = tf.sqrt(tf.reduce_mean(tf.square(self.actor_tf - adaptive_actor_tf)))\n\n def _setup_actor_optimizer(self):\n \"\"\"\n setup the optimizer for the actor\n \"\"\"\n if self.verbose >= 2:\n logger.info('setting up actor optimizer')\n self.actor_loss = -tf.reduce_mean(self.critic_with_actor_tf)\n actor_shapes = [var.get_shape().as_list() for var in tf_util.get_trainable_vars('model/pi/')]\n actor_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in actor_shapes])\n if self.verbose >= 2:\n logger.info(' actor shapes: {}'.format(actor_shapes))\n logger.info(' actor params: {}'.format(actor_nb_params))\n self.actor_grads = tf_util.flatgrad(self.actor_loss, tf_util.get_trainable_vars('model/pi/'),\n clip_norm=self.clip_norm)\n self.actor_optimizer = MpiAdam(var_list=tf_util.get_trainable_vars('model/pi/'), beta1=0.9, beta2=0.999,\n epsilon=1e-08)\n\n def _setup_critic_optimizer(self):\n \"\"\"\n setup the optimizer for the critic\n \"\"\"\n if self.verbose >= 2:\n logger.info('setting up critic optimizer')\n normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms),\n self.return_range[0], self.return_range[1])\n self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf))\n if self.critic_l2_reg > 0.:\n critic_reg_vars = [var for var in tf_util.get_trainable_vars('model/qf/')\n if 'bias' not in var.name and 'qf_output' not in var.name and 'b' not in var.name]\n if self.verbose >= 2:\n for var in critic_reg_vars:\n logger.info(' regularizing: {}'.format(var.name))\n logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg))\n critic_reg = tc.layers.apply_regularization(\n tc.layers.l2_regularizer(self.critic_l2_reg),\n weights_list=critic_reg_vars\n )\n self.critic_loss += critic_reg\n critic_shapes = [var.get_shape().as_list() for var in tf_util.get_trainable_vars('model/qf/')]\n critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes])\n if self.verbose >= 2:\n logger.info(' critic shapes: {}'.format(critic_shapes))\n logger.info(' critic params: {}'.format(critic_nb_params))\n self.critic_grads = tf_util.flatgrad(self.critic_loss, tf_util.get_trainable_vars('model/qf/'),\n clip_norm=self.clip_norm)\n self.critic_optimizer = MpiAdam(var_list=tf_util.get_trainable_vars('model/qf/'), beta1=0.9, beta2=0.999,\n epsilon=1e-08)\n\n def _setup_popart(self):\n \"\"\"\n setup pop-art normalization of the critic output\n\n See https://arxiv.org/pdf/1602.07714.pdf for details.\n Preserving Outputs Precisely, while Adaptively Rescaling Targets”.\n \"\"\"\n self.old_std = tf.placeholder(tf.float32, shape=[1], name='old_std')\n new_std = self.ret_rms.std\n self.old_mean = tf.placeholder(tf.float32, shape=[1], name='old_mean')\n new_mean = self.ret_rms.mean\n\n self.renormalize_q_outputs_op = []\n for out_vars in [[var for var in tf_util.get_trainable_vars('model/qf/') if 'qf_output' in var.name],\n [var for var in tf_util.get_trainable_vars('target/qf/') if 'qf_output' in var.name]]:\n assert len(out_vars) == 2\n # wieght and bias of the last layer\n weight, bias = out_vars\n assert 'kernel' in weight.name\n assert 'bias' in bias.name\n assert weight.get_shape()[-1] == 1\n assert bias.get_shape()[-1] == 1\n self.renormalize_q_outputs_op += [weight.assign(weight * self.old_std / new_std)]\n self.renormalize_q_outputs_op += [bias.assign((bias * self.old_std + self.old_mean - new_mean) / new_std)]\n\n def _setup_stats(self):\n \"\"\"\n Setup the stat logger for DDPG.\n \"\"\"\n ops = [\n tf.reduce_mean(self.critic_tf),\n reduce_std(self.critic_tf),\n tf.reduce_mean(self.critic_with_actor_tf),\n reduce_std(self.critic_with_actor_tf),\n tf.reduce_mean(self.actor_tf),\n reduce_std(self.actor_tf)\n ]\n names = [\n 'reference_Q_mean',\n 'reference_Q_std',\n 'reference_actor_Q_mean',\n 'reference_actor_Q_std',\n 'reference_action_mean',\n 'reference_action_std'\n ]\n\n if self.normalize_returns:\n ops += [self.ret_rms.mean, self.ret_rms.std]\n names += ['ret_rms_mean', 'ret_rms_std']\n\n if self.normalize_observations:\n ops += [tf.reduce_mean(self.obs_rms.mean), tf.reduce_mean(self.obs_rms.std)]\n names += ['obs_rms_mean', 'obs_rms_std']\n\n if self.param_noise:\n ops += [tf.reduce_mean(self.perturbed_actor_tf), reduce_std(self.perturbed_actor_tf)]\n names += ['reference_perturbed_action_mean', 'reference_perturbed_action_std']\n\n self.stats_ops = ops\n self.stats_names = names\n\n def _policy(self, obs, apply_noise=True, compute_q=True):\n \"\"\"\n Get the actions and critic output, from a given observation\n\n :param obs: ([float] or [int]) the observation\n :param apply_noise: (bool) enable the noise\n :param compute_q: (bool) compute the critic output\n :return: ([float], float) the action and critic value\n \"\"\"\n obs = np.array(obs).reshape((-1,) + self.observation_space.shape)\n feed_dict = {self.obs_train: obs}\n if self.param_noise is not None and apply_noise:\n actor_tf = self.perturbed_actor_tf\n feed_dict[self.obs_noise] = obs\n else:\n actor_tf = self.actor_tf\n\n if compute_q:\n action, q_value = self.sess.run([actor_tf, self.critic_with_actor_tf], feed_dict=feed_dict)\n else:\n action = self.sess.run(actor_tf, feed_dict=feed_dict)\n q_value = None\n\n action = action.flatten()\n if self.action_noise is not None and apply_noise:\n noise = self.action_noise()\n action += noise\n action = np.clip(action, -1, 1)\n return action, q_value\n\n def _store_transition(self, obs, action, reward, next_obs, done, info):\n \"\"\"\n Store a transition in the replay buffer\n\n :param obs: ([float] or [int]) the last observation\n :param action: ([float]) the action\n :param reward: (float] the reward\n :param next_obs: ([float] or [int]) the current observation\n :param done: (bool) Whether the episode is over\n :param info: (dict) extra values used to compute reward when using HER\n \"\"\"\n reward *= self.reward_scale\n self.replay_buffer_add(obs, action, reward, next_obs, done, info)\n if self.normalize_observations:\n self.obs_rms.update(np.array([obs]))\n\n def _train_step(self, step, writer, log=False):\n \"\"\"\n run a step of training from batch\n\n :param step: (int) the current step iteration\n :param writer: (TensorFlow Summary.writer) the writer for tensorboard\n :param log: (bool) whether or not to log to metadata\n :return: (float, float) critic loss, actor loss\n \"\"\"\n # Get a batch\n obs, actions, rewards, next_obs, terminals = self.replay_buffer.sample(batch_size=self.batch_size,\n env=self._vec_normalize_env)\n # Reshape to match previous behavior and placeholder shape\n rewards = rewards.reshape(-1, 1)\n terminals = terminals.reshape(-1, 1)\n\n if self.normalize_returns and self.enable_popart:\n old_mean, old_std, target_q = self.sess.run([self.ret_rms.mean, self.ret_rms.std, self.target_q],\n feed_dict={\n self.obs_target: next_obs,\n self.rewards: rewards,\n self.terminals_ph: terminals\n })\n self.ret_rms.update(target_q.flatten())\n self.sess.run(self.renormalize_q_outputs_op, feed_dict={\n self.old_std: np.array([old_std]),\n self.old_mean: np.array([old_mean]),\n })\n\n else:\n target_q = self.sess.run(self.target_q, feed_dict={\n self.obs_target: next_obs,\n self.rewards: rewards,\n self.terminals_ph: terminals\n })\n\n # Get all gradients and perform a synced update.\n ops = [self.actor_grads, self.actor_loss, self.critic_grads, self.critic_loss]\n td_map = {\n self.obs_train: obs,\n self.actions: actions,\n self.action_train_ph: actions,\n self.rewards: rewards,\n self.critic_target: target_q,\n self.param_noise_stddev: 0 if self.param_noise is None else self.param_noise.current_stddev\n }\n if writer is not None:\n # run loss backprop with summary if the step_id was not already logged (can happen with the right\n # parameters as the step value is only an estimate)\n if self.full_tensorboard_log and log and step not in self.tb_seen_steps:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, actor_grads, actor_loss, critic_grads, critic_loss = \\\n self.sess.run([self.summary] + ops, td_map, options=run_options, run_metadata=run_metadata)\n\n writer.add_run_metadata(run_metadata, 'step%d' % step)\n self.tb_seen_steps.append(step)\n else:\n summary, actor_grads, actor_loss, critic_grads, critic_loss = self.sess.run([self.summary] + ops,\n td_map)\n writer.add_summary(summary, step)\n else:\n actor_grads, actor_loss, critic_grads, critic_loss = self.sess.run(ops, td_map)\n\n self.actor_optimizer.update(actor_grads, learning_rate=self.actor_lr)\n self.critic_optimizer.update(critic_grads, learning_rate=self.critic_lr)\n\n return critic_loss, actor_loss\n\n def _initialize(self, sess):\n \"\"\"\n initialize the model parameters and optimizers\n\n :param sess: (TensorFlow Session) the current TensorFlow session\n \"\"\"\n self.sess = sess\n self.sess.run(tf.global_variables_initializer())\n self.actor_optimizer.sync()\n self.critic_optimizer.sync()\n self.sess.run(self.target_init_updates)\n\n def _update_target_net(self):\n \"\"\"\n run target soft update operation\n \"\"\"\n self.sess.run(self.target_soft_updates)\n\n def _get_stats(self):\n \"\"\"\n Get the mean and standard deviation of the model's inputs and outputs\n\n :return: (dict) the means and stds\n \"\"\"\n if self.stats_sample is None:\n # Get a sample and keep that fixed for all further computations.\n # This allows us to estimate the change in value for the same set of inputs.\n obs, actions, rewards, next_obs, terminals = self.replay_buffer.sample(batch_size=self.batch_size,\n env=self._vec_normalize_env)\n self.stats_sample = {\n 'obs': obs,\n 'actions': actions,\n 'rewards': rewards,\n 'next_obs': next_obs,\n 'terminals': terminals\n }\n\n feed_dict = {\n self.actions: self.stats_sample['actions']\n }\n\n for placeholder in [self.action_train_ph, self.action_target, self.action_adapt_noise, self.action_noise_ph]:\n if placeholder is not None:\n feed_dict[placeholder] = self.stats_sample['actions']\n\n for placeholder in [self.obs_train, self.obs_target, self.obs_adapt_noise, self.obs_noise]:\n if placeholder is not None:\n feed_dict[placeholder] = self.stats_sample['obs']\n\n values = self.sess.run(self.stats_ops, feed_dict=feed_dict)\n\n names = self.stats_names[:]\n assert len(names) == len(values)\n stats = dict(zip(names, values))\n\n if self.param_noise is not None:\n stats = {**stats, **self.param_noise.get_stats()}\n\n return stats\n\n def _adapt_param_noise(self):\n \"\"\"\n calculate the adaptation for the parameter noise\n\n :return: (float) the mean distance for the parameter noise\n \"\"\"\n if self.param_noise is None:\n return 0.\n\n # Perturb a separate copy of the policy to adjust the scale for the next \"real\" perturbation.\n obs, *_ = self.replay_buffer.sample(batch_size=self.batch_size, env=self._vec_normalize_env)\n self.sess.run(self.perturb_adaptive_policy_ops, feed_dict={\n self.param_noise_stddev: self.param_noise.current_stddev,\n })\n distance = self.sess.run(self.adaptive_policy_distance, feed_dict={\n self.obs_adapt_noise: obs, self.obs_train: obs,\n self.param_noise_stddev: self.param_noise.current_stddev,\n })\n\n mean_distance = MPI.COMM_WORLD.allreduce(distance, op=MPI.SUM) / MPI.COMM_WORLD.Get_size()\n self.param_noise.adapt(mean_distance)\n return mean_distance\n\n def _reset(self):\n \"\"\"\n Reset internal state after an episode is complete.\n \"\"\"\n if self.action_noise is not None:\n self.action_noise.reset()\n if self.param_noise is not None:\n self.sess.run(self.perturb_policy_ops, feed_dict={\n self.param_noise_stddev: self.param_noise.current_stddev,\n })\n\n def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name=\"DDPG\",\n reset_num_timesteps=True, replay_wrapper=None):\n\n new_tb_log = self._init_num_timesteps(reset_num_timesteps)\n callback = self._init_callback(callback)\n\n if replay_wrapper is not None:\n self.replay_buffer = replay_wrapper(self.replay_buffer)\n\n with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \\\n as writer:\n self._setup_learn()\n\n # a list for tensorboard logging, to prevent logging with the same step number, if it already occured\n self.tb_seen_steps = []\n\n rank = MPI.COMM_WORLD.Get_rank()\n\n if self.verbose >= 2:\n logger.log('Using agent with the following configuration:')\n logger.log(str(self.__dict__.items()))\n\n eval_episode_rewards_history = deque(maxlen=100)\n episode_rewards_history = deque(maxlen=100)\n episode_successes = []\n\n with self.sess.as_default(), self.graph.as_default():\n # Prepare everything.\n self._reset()\n obs = self.env.reset()\n # Retrieve unnormalized observation for saving into the buffer\n if self._vec_normalize_env is not None:\n obs_ = self._vec_normalize_env.get_original_obs().squeeze()\n eval_obs = None\n if self.eval_env is not None:\n eval_obs = self.eval_env.reset()\n episode_reward = 0.\n episode_step = 0\n episodes = 0\n step = 0\n total_steps = 0\n\n start_time = time.time()\n\n epoch_episode_rewards = []\n epoch_episode_steps = []\n epoch_actor_losses = []\n epoch_critic_losses = []\n epoch_adaptive_distances = []\n eval_episode_rewards = []\n eval_qs = []\n epoch_actions = []\n epoch_qs = []\n epoch_episodes = 0\n epoch = 0\n\n callback.on_training_start(locals(), globals())\n\n while True:\n for _ in range(log_interval):\n callback.on_rollout_start()\n # Perform rollouts.\n for _ in range(self.nb_rollout_steps):\n\n if total_steps >= total_timesteps:\n callback.on_training_end()\n return self\n\n # Predict next action.\n action, q_value = self._policy(obs, apply_noise=True, compute_q=True)\n assert action.shape == self.env.action_space.shape\n\n # Execute next action.\n if rank == 0 and self.render:\n self.env.render()\n\n # Randomly sample actions from a uniform distribution\n # with a probability self.random_exploration (used in HER + DDPG)\n if np.random.rand() < self.random_exploration:\n # actions sampled from action space are from range specific to the environment\n # but algorithm operates on tanh-squashed actions therefore simple scaling is used\n unscaled_action = self.action_space.sample()\n action = scale_action(self.action_space, unscaled_action)\n else:\n # inferred actions need to be transformed to environment action_space before stepping\n unscaled_action = unscale_action(self.action_space, action)\n\n new_obs, reward, done, info = self.env.step(unscaled_action)\n\n self.num_timesteps += 1\n\n if callback.on_step() is False:\n callback.on_training_end()\n return self\n\n step += 1\n total_steps += 1\n if rank == 0 and self.render:\n self.env.render()\n\n # Book-keeping.\n epoch_actions.append(action)\n epoch_qs.append(q_value)\n\n # Store only the unnormalized version\n if self._vec_normalize_env is not None:\n new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()\n reward_ = self._vec_normalize_env.get_original_reward().squeeze()\n else:\n # Avoid changing the original ones\n obs_, new_obs_, reward_ = obs, new_obs, reward\n\n self._store_transition(obs_, action, reward_, new_obs_, done, info)\n obs = new_obs\n # Save the unnormalized observation\n if self._vec_normalize_env is not None:\n obs_ = new_obs_\n\n episode_reward += reward_\n episode_step += 1\n\n if writer is not None:\n ep_rew = np.array([reward_]).reshape((1, -1))\n ep_done = np.array([done]).reshape((1, -1))\n tf_util.total_episode_reward_logger(self.episode_reward, ep_rew, ep_done,\n writer, self.num_timesteps)\n\n if done:\n callback.on_episode_end()\n # Episode done.\n epoch_episode_rewards.append(episode_reward)\n episode_rewards_history.append(episode_reward)\n epoch_episode_steps.append(episode_step)\n episode_reward = 0.\n episode_step = 0\n epoch_episodes += 1\n episodes += 1\n\n maybe_is_success = info.get('is_success')\n if maybe_is_success is not None:\n episode_successes.append(float(maybe_is_success))\n\n self._reset()\n if not isinstance(self.env, VecEnv):\n obs = self.env.reset()\n\n callback.on_rollout_end()\n # Train.\n epoch_actor_losses = []\n epoch_critic_losses = []\n epoch_adaptive_distances = []\n for t_train in range(self.nb_train_steps):\n # Not enough samples in the replay buffer\n if not self.replay_buffer.can_sample(self.batch_size):\n break\n\n # Adapt param noise, if necessary.\n if len(self.replay_buffer) >= self.batch_size and \\\n t_train % self.param_noise_adaption_interval == 0:\n distance = self._adapt_param_noise()\n epoch_adaptive_distances.append(distance)\n\n # weird equation to deal with the fact the nb_train_steps will be different\n # to nb_rollout_steps\n step = (int(t_train * (self.nb_rollout_steps / self.nb_train_steps)) +\n self.num_timesteps - self.nb_rollout_steps)\n\n critic_loss, actor_loss = self._train_step(step, writer, log=t_train == 0)\n epoch_critic_losses.append(critic_loss)\n epoch_actor_losses.append(actor_loss)\n self._update_target_net()\n\n # Evaluate.\n eval_episode_rewards = []\n eval_qs = []\n if self.eval_env is not None:\n eval_episode_reward = 0.\n for _ in range(self.nb_eval_steps):\n if total_steps >= total_timesteps:\n return self\n\n eval_action, eval_q = self._policy(eval_obs, apply_noise=False, compute_q=True)\n unscaled_action = unscale_action(self.action_space, eval_action)\n eval_obs, eval_r, eval_done, _ = self.eval_env.step(unscaled_action)\n if self.render_eval:\n self.eval_env.render()\n eval_episode_reward += eval_r\n\n eval_qs.append(eval_q)\n if eval_done:\n if not isinstance(self.env, VecEnv):\n eval_obs = self.eval_env.reset()\n eval_episode_rewards.append(eval_episode_reward)\n eval_episode_rewards_history.append(eval_episode_reward)\n eval_episode_reward = 0.\n\n mpi_size = MPI.COMM_WORLD.Get_size()\n\n # Not enough samples in the replay buffer\n if not self.replay_buffer.can_sample(self.batch_size):\n continue\n\n # Log stats.\n # XXX shouldn't call np.mean on variable length lists\n duration = time.time() - start_time\n stats = self._get_stats()\n combined_stats = stats.copy()\n combined_stats['rollout/return'] = np.mean(epoch_episode_rewards)\n combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)\n combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)\n combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)\n combined_stats['rollout/Q_mean'] = np.mean(epoch_qs)\n combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses)\n combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses)\n if len(epoch_adaptive_distances) != 0:\n combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)\n combined_stats['total/duration'] = duration\n combined_stats['total/steps_per_second'] = float(step) / float(duration)\n combined_stats['total/episodes'] = episodes\n combined_stats['rollout/episodes'] = epoch_episodes\n combined_stats['rollout/actions_std'] = np.std(epoch_actions)\n # Evaluation statistics.\n if self.eval_env is not None:\n combined_stats['eval/return'] = np.mean(eval_episode_rewards)\n combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)\n combined_stats['eval/Q'] = np.mean(eval_qs)\n combined_stats['eval/episodes'] = len(eval_episode_rewards)\n\n def as_scalar(scalar):\n \"\"\"\n check and return the input if it is a scalar, otherwise raise ValueError\n\n :param scalar: (Any) the object to check\n :return: (Number) the scalar if x is a scalar\n \"\"\"\n if isinstance(scalar, np.ndarray):\n assert scalar.size == 1\n return scalar[0]\n elif np.isscalar(scalar):\n return scalar\n else:\n raise ValueError('expected scalar, got %s' % scalar)\n\n combined_stats_sums = MPI.COMM_WORLD.allreduce(\n np.array([as_scalar(x) for x in combined_stats.values()]))\n combined_stats = {k: v / mpi_size for (k, v) in zip(combined_stats.keys(), combined_stats_sums)}\n\n # Total statistics.\n combined_stats['total/epochs'] = epoch + 1\n combined_stats['total/steps'] = step\n\n for key in sorted(combined_stats.keys()):\n logger.record_tabular(key, combined_stats[key])\n if len(episode_successes) > 0:\n logger.logkv(\"success rate\", np.mean(episode_successes[-100:]))\n logger.dump_tabular()\n logger.info('')\n logdir = logger.get_dir()\n if rank == 0 and logdir:\n if hasattr(self.env, 'get_state'):\n with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as file_handler:\n pickle.dump(self.env.get_state(), file_handler)\n if self.eval_env and hasattr(self.eval_env, 'get_state'):\n with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as file_handler:\n pickle.dump(self.eval_env.get_state(), file_handler)\n\n def predict(self, observation, state=None, mask=None, deterministic=True):\n observation = np.array(observation)\n vectorized_env = self._is_vectorized_observation(observation, self.observation_space)\n\n observation = observation.reshape((-1,) + self.observation_space.shape)\n actions, _, = self._policy(observation, apply_noise=not deterministic, compute_q=False)\n actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape\n actions = unscale_action(self.action_space, actions) # scale the output for the prediction\n\n if not vectorized_env:\n actions = actions[0]\n\n return actions, None\n\n def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):\n _ = np.array(observation)\n\n if actions is not None:\n raise ValueError(\"Error: DDPG does not have action probabilities.\")\n\n # here there are no action probabilities, as DDPG does not use a probability distribution\n warnings.warn(\"Warning: action probability is meaningless for DDPG. Returning None\")\n return None\n\n def get_parameter_list(self):\n return (self.params +\n self.target_params +\n self.obs_rms_params +\n self.ret_rms_params)\n\n def save(self, save_path, cloudpickle=False):\n data = {\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"nb_eval_steps\": self.nb_eval_steps,\n \"param_noise_adaption_interval\": self.param_noise_adaption_interval,\n \"nb_train_steps\": self.nb_train_steps,\n \"nb_rollout_steps\": self.nb_rollout_steps,\n \"verbose\": self.verbose,\n \"param_noise\": self.param_noise,\n \"action_noise\": self.action_noise,\n \"gamma\": self.gamma,\n \"tau\": self.tau,\n \"normalize_returns\": self.normalize_returns,\n \"enable_popart\": self.enable_popart,\n \"normalize_observations\": self.normalize_observations,\n \"batch_size\": self.batch_size,\n \"observation_range\": self.observation_range,\n \"return_range\": self.return_range,\n \"critic_l2_reg\": self.critic_l2_reg,\n \"actor_lr\": self.actor_lr,\n \"critic_lr\": self.critic_lr,\n \"clip_norm\": self.clip_norm,\n \"reward_scale\": self.reward_scale,\n \"memory_limit\": self.memory_limit,\n \"buffer_size\": self.buffer_size,\n \"random_exploration\": self.random_exploration,\n \"policy\": self.policy,\n \"n_envs\": self.n_envs,\n \"n_cpu_tf_sess\": self.n_cpu_tf_sess,\n \"seed\": self.seed,\n \"_vectorize_action\": self._vectorize_action,\n \"policy_kwargs\": self.policy_kwargs\n }\n\n params_to_save = self.get_parameters()\n\n self._save_to_file(save_path,\n data=data,\n params=params_to_save,\n cloudpickle=cloudpickle)\n\n @classmethod\n def load(cls, load_path, env=None, custom_objects=None, **kwargs):\n data, params = cls._load_from_file(load_path, custom_objects=custom_objects)\n\n if 'policy_kwargs' in kwargs and kwargs['policy_kwargs'] != data['policy_kwargs']:\n raise ValueError(\"The specified policy kwargs do not equal the stored policy kwargs. \"\n \"Stored kwargs: {}, specified kwargs: {}\".format(data['policy_kwargs'],\n kwargs['policy_kwargs']))\n\n model = cls(None, env, _init_setup_model=False)\n model.__dict__.update(data)\n model.__dict__.update(kwargs)\n model.set_env(env)\n model.setup_model()\n # Patch for version < v2.6.0, duplicated keys where saved\n if len(params) > len(model.get_parameter_list()):\n n_params = len(model.params)\n n_target_params = len(model.target_params)\n n_normalisation_params = len(model.obs_rms_params) + len(model.ret_rms_params)\n # Check that the issue is the one from\n # https://github.com/hill-a/stable-baselines/issues/363\n assert len(params) == 2 * (n_params + n_target_params) + n_normalisation_params,\\\n \"The number of parameter saved differs from the number of parameters\"\\\n \" that should be loaded: {}!={}\".format(len(params), len(model.get_parameter_list()))\n # Remove duplicates\n params_ = params[:n_params + n_target_params]\n if n_normalisation_params > 0:\n params_ += params[-n_normalisation_params:]\n params = params_\n model.load_parameters(params)\n\n return model\n"
] | [
[
"numpy.random.rand",
"tensorflow.group",
"numpy.mean",
"tensorflow.clip_by_value",
"tensorflow.global_variables_initializer",
"tensorflow.shape",
"tensorflow.summary.histogram",
"tensorflow.global_variables",
"tensorflow.variable_scope",
"numpy.array",
"tensorflow.summary.scalar",
"numpy.std",
"tensorflow.placeholder",
"numpy.isscalar",
"numpy.clip",
"tensorflow.summary.merge_all",
"tensorflow.RunOptions",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.summary.image",
"tensorflow.assign",
"tensorflow.Graph",
"tensorflow.RunMetadata",
"tensorflow.reduce_mean",
"tensorflow.square"
]
] |
robinycfang/NovelQuant | [
"8e36f2ea6accad435b0b4a43a87fa843e07ff6c4"
] | [
"NovelQuant.py"
] | [
"#!/usr/bin/env python3\n\nimport sys\nimport argparse\nimport subprocess\nimport os\nimport pandas as pd\n\n\ndef findRI():\n\tparser = argparse.ArgumentParser(usage = 'python NovelQuant findRI -a annotated.gtf -n novel.gtf')\n\tparser.add_argument('findRI')\n\trequired = parser.add_argument_group('required arguments')\n\trequired.add_argument('-a', help = 'The gtf file that has annotated transcript information', dest = 'a')\n\trequired.add_argument('-n', help = 'The gtf file that has novel transcript information', dest = 'n')\n\targs = parser.parse_args()\n\n\t# find complementary introns in each annotated gene\n\tsubprocess.call([sys.executable, path + 'complementary_introns.py', args.a])\n\t# find introns that are retained in the novel transcripts\n\tsubprocess.call([sys.executable, path + 'find_retained_introns.py', 'anno_gene_complementary_introns.gtf', args.n])\n\ndef quantRI():\n\tparser = argparse.ArgumentParser(usage = 'python NovelQuant quantRI -r retained_introns.gtf -l sample_list.txt')\n\tparser.add_argument('quantRI')\n\trequired = parser.add_argument_group('required arguments')\n\trequired.add_argument('-r', help = 'The gtf file of introns that are retained in novel transcripts.\\\n\t\t\t\ti.e., the output of findRI, retained_introns.gtf', dest = 'r')\n\trequired.add_argument('-l', help = 'A list of BAM file(s) to be processed. \\\n\t\t\t\tEach line should be the path of each bam file.', dest = 'l')\n\tparser.add_argument('-p', help = 'Path to featureCounts if not in the environmental variables', dest = 'p', default = 'featureCounts')\n\tparser.add_argument('-t', help = 'Threads to use in featureCounts', dest = 't', type = str, default = '1')\n\targs = parser.parse_args()\n\n\t# quantify novel transcripts by retained introns\n\tcmd = [args.p, '-a', args.r, '-o', 'RI_counts.txt', '-t', 'intron', '-g', 'transcript_id', '--minOverlap', '50', '-T', args.t]\n\tfor line in open(args.l):\n\t\tline = line.strip('\\n')\n\t\tcmd.append(line)\n\tsubprocess.call(cmd)\n\ndef findUJ():\n\tparser = argparse.ArgumentParser(usage = 'python NovelQuant findUJ -a annotated.gtf -n novel.gtf')\n\tparser.add_argument('findUJ')\n\trequired = parser.add_argument_group('required arguments')\n\trequired.add_argument('-a', help = 'The gtf file of exons of annotated transcripts', dest = 'a')\n\trequired.add_argument('-n', help = 'The gtf file of exons of novel transcripts', dest = 'n')\n\targs = parser.parse_args()\n\n\t# extract exon-exon junctions from both annotated and novel transcripts\n\tsubprocess.call([sys.executable, path + 'extract_junctions.py', args.a, args.n])\n\t# find the junctions that are unique to the novel transcripts\n\tsubprocess.call([sys.executable, path + 'find_uniq_junctions.py', 'eej.gtf'])\n\ndef quantUJ():\n\tparser = argparse.ArgumentParser(usage = 'python3 NovelQuant quantUJ -n novel.gtf -e uniq_eej.gtf -l sample_list.txt')\n\tparser.add_argument('quantUJ')\n\trequired = parser.add_argument_group('required arguments')\t\n\trequired.add_argument('-n', help = 'The gtf file of exons of novel transcripts', dest = 'n')\n\trequired.add_argument('-e', help = 'The gtf file of unique junctions. i.e., the output of findUJ, uniq_eej.gtf', dest = 'e')\n\trequired.add_argument('-l', help = 'A list of BAM file(s) to be processed. \\\n\t\t\t\tEach line should be the path of each bam file.', dest = 'l')\n\tparser.add_argument('-p', help = 'Path to featureCounts if not in the environmental variables', dest = 'p', default = 'featureCounts')\n\tparser.add_argument('-t', help = 'Threads to use in featureCounts', dest = 't', type = str, default = '1')\n\targs = parser.parse_args()\n\n\tsample_num = subprocess.Popen(['wc', '-l', args.l], stdout = subprocess.PIPE)\n\tsample_num = int(sample_num.stdout.read().decode('utf-8').split(' ')[0])\n\tif sample_num <= 10:\n\t\tcmd = [args.p, '-a', args.n, '-o', 'novel_counts', '-J', '-T', args.t]\n\t\tfor line in open(args.l):\n\t\t\tline = line.strip('\\n')\n\t\t\tcmd.append(line)\n\t\tsubprocess.call(cmd)\n\telse:\n\t\tcmd = [args.p, '-a', args.n, '-o', 'novel_counts_1', '-J', '-T', args.t]\n\t\tcounter = 0\n\t\tsurfix_name = 1\n\t\tfor line in open(args.l):\n\t\t\tline = line.strip('\\n')\n\t\t\tcmd.append(line)\n\t\t\tcounter += 1\n\t\t\tif counter % 10 == 0 or counter == sample_num:\n\t\t\t\tsubprocess.call(cmd)\n\t\t\t\tsurfix_name += 1\n\t\t\t\tcmd = [args.p, '-a', args.n, '-o', 'novel_counts_' + str(surfix_name), '-J', '-T', args.t]\n\n\tuniq_eej = pd.read_csv(args.e, sep = '\\t')\n\tuniq_eej = uniq_eej.drop(['source', 'type', 'none1', 'none2', 'info'], axis = 1)\n\tmerged = uniq_eej.copy()\n\tfor i in os.listdir():\n\t\tif 'jcounts' in i:\n\t\t\tall_counts = pd.read_csv(i, sep = '\\t')\n\t\t\tall_counts = all_counts.drop(['PrimaryGene', 'SecondaryGenes', 'Site1_strand', 'Site2_chr', 'Site2_strand'], axis = 1)\n\t\t\tall_counts = all_counts.rename(columns = {'Site1_chr': 'chromosome', 'Site1_location': 'start', 'Site2_location': 'end'})\n\t\t\tall_counts['start'] = all_counts['start'] + 1\n\t\t\tall_counts['end'] = all_counts['end'] - 1\n\t\t\tmerged = pd.merge(merged, all_counts, how = 'left', on = ['chromosome', 'start', 'end'], sort = True)\n\tmerged = merged.fillna(0)\n\tmerged.to_csv('UJ_counts.txt', index = False, sep = '\\t')\n\t# remove redundant featureCounts outputs (using shell = True to avoid the error caused by qutations)\n\tsubprocess.call('rm novel_counts*', shell = True)\n\ndef summarize():\n\tparser = argparse.ArgumentParser(usage = 'python NovelQuant sum -r RI_counts.txt -u UJ_counts.txt -l sample_list.txt')\n\tparser.add_argument('sum')\n\trequired = parser.add_argument_group('required arguments')\n\trequired.add_argument('-r', help = 'Output from quantRI, RI_counts.txt.', dest = 'r')\n\trequired.add_argument('-u', help = 'Output from quantUJ, UJ_counts.txt.', dest = 'u')\n\trequired.add_argument('-l', help = 'A list of BAM file(s) to be processed. \\\n\t\t\t\tEach line should be the path of each bam file.', dest = 'l')\n\tparser.add_argument('-st', help = 'Path to samtools if not in the environment variables', dest = 'st', default = 'samtools')\n\tparser.add_argument('--CalExpPerc', default = False, action = 'store_true', dest = 'cal')\n\tparser.add_argument('-a', help = 'The gtf file that has annotated transcript information', dest = 'a')\n\tparser.add_argument('-n', help = 'The gtf file that has novel transcript information', dest = 'n')\n\tparser.add_argument('-fc', help = 'Path to featureCounts if not in the environmental variables', dest = 'fc', default = 'featureCounts')\n\tparser.add_argument('-t', help = 'Threads to use in featureCounts', dest = 't', type = str, default = '1')\n\n\targs = parser.parse_args()\n\n\tif args.cal:\n\t\tsubprocess.call([sys.executable, path + 'summarize.py', args.r, args.u, args.l, args.st, args.a, args.n, args.fc, args.t])\n\telse:\n\t\tsubprocess.call([sys.executable, path + 'summarize.py', args.r, args.u, args.l, args.st])\n\npath = '/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/bin/'\n\nif len(sys.argv) == 1 or sys.argv[1] == '-h' or sys.argv[1] == '--help':\n\tsys.exit('usage: python NovelQuant.py <mode>' + '\\n' + \\\n\t\t\t'modes: findRI, quantRI, findUJ, quantUJ, sum')\nelse:\n\tmode = sys.argv[1]\n\nif mode == 'findRI':\n\tfindRI()\nif mode == 'quantRI':\n\tquantRI()\nif mode == 'findUJ':\n\tfindUJ()\nif mode == 'quantUJ':\n\tquantUJ()\nif mode == 'sum':\n\tsummarize()"
] | [
[
"pandas.read_csv",
"pandas.merge"
]
] |
arjunnlp/hedwig-anlp | [
"b8f6c50d788509bc9e5670caeee3503257d716d0"
] | [
"hedwig/models/reg_lstm/__main__.py"
] | [
"import logging\nimport os\nimport random\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\n\nfrom common.evaluate import EvaluatorFactory\nfrom common.train import TrainerFactory\nfrom datasets.aapd import AAPD\nfrom datasets.imdb import IMDB\nfrom datasets.reuters import Reuters\nfrom datasets.yelp2014 import Yelp2014\nfrom datasets.mbti import MBTI\nfrom models.reg_lstm.args import get_args\nfrom models.reg_lstm.model import RegLSTM\nfrom datasets.sst import SST\n\n\nclass UnknownWordVecCache(object):\n \"\"\"\n Caches the first randomly generated word vector for a certain size to make it is reused.\n \"\"\"\n cache = {}\n\n @classmethod\n def unk(cls, tensor):\n size_tup = tuple(tensor.size())\n if size_tup not in cls.cache:\n cls.cache[size_tup] = torch.Tensor(tensor.size())\n cls.cache[size_tup].uniform_(-0.25, 0.25)\n return cls.cache[size_tup]\n\n\ndef get_logger():\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n return logger\n\n\ndef evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_size, device, is_multilabel):\n saved_model_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, loader, batch_size, device)\n if hasattr(saved_model_evaluator, 'is_multilabel'):\n saved_model_evaluator.is_multilabel = is_multilabel\n\n scores, metric_names = saved_model_evaluator.get_scores()\n print('Evaluation metrics for', split_name)\n print(metric_names)\n print(scores)\n\n\nif __name__ == '__main__':\n # Set default configuration in args.py\n args = get_args()\n logger = get_logger()\n\n # Set random seed for reproducibility\n torch.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n if not args.cuda:\n args.gpu = -1\n if torch.cuda.is_available() and args.cuda:\n print('Note: You are using GPU for training')\n torch.cuda.set_device(args.gpu)\n torch.cuda.manual_seed(args.seed)\n if torch.cuda.is_available() and not args.cuda:\n print('Warning: Using CPU for training')\n\n dataset_map = {\n 'Reuters': Reuters,\n 'AAPD': AAPD,\n 'IMDB': IMDB,\n 'Yelp2014': Yelp2014,\n 'MBTI': MBTI,\n 'SST-2': SST\n }\n\n if args.dataset not in dataset_map:\n raise ValueError('Unrecognized dataset')\n\n else:\n dataset_class = dataset_map[args.dataset]\n train_iter, dev_iter, test_iter = dataset_class.iters(args.data_dir,\n args.word_vectors_file,\n args.word_vectors_dir,\n batch_size=args.batch_size,\n device=args.gpu,\n unk_init=UnknownWordVecCache.unk)\n\n config = deepcopy(args)\n config.dataset = train_iter.dataset\n config.target_class = train_iter.dataset.NUM_CLASSES\n config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab)\n\n print('Dataset:', args.dataset)\n print('No. of target classes:', train_iter.dataset.NUM_CLASSES)\n print('No. of train instances', len(train_iter.dataset))\n print('No. of dev instances', len(dev_iter.dataset))\n print('No. of test instances', len(test_iter.dataset))\n\n if args.resume_snapshot:\n if args.cuda:\n model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu))\n else:\n model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage)\n else:\n model = RegLSTM(config)\n if args.cuda:\n model.cuda()\n\n if not args.trained_model:\n save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME)\n os.makedirs(save_path, exist_ok=True)\n\n parameter = filter(lambda p: p.requires_grad, model.parameters())\n optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay)\n\n train_evaluator = EvaluatorFactory.get_evaluator(dataset_class, model, None, train_iter, args.batch_size, args.gpu)\n test_evaluator = EvaluatorFactory.get_evaluator(dataset_class, model, None, test_iter, args.batch_size, args.gpu)\n dev_evaluator = EvaluatorFactory.get_evaluator(dataset_class, model, None, dev_iter, args.batch_size, args.gpu)\n\n if hasattr(train_evaluator, 'is_multilabel'):\n train_evaluator.is_multilabel = dataset_class.IS_MULTILABEL\n if hasattr(test_evaluator, 'is_multilabel'):\n test_evaluator.is_multilabel = dataset_class.IS_MULTILABEL\n if hasattr(dev_evaluator, 'is_multilabel'):\n dev_evaluator.is_multilabel = dataset_class.IS_MULTILABEL\n\n trainer_config = {\n 'optimizer': optimizer,\n 'batch_size': args.batch_size,\n 'log_interval': args.log_every,\n 'patience': args.patience,\n 'model_outfile': args.save_path,\n 'logger': logger,\n 'is_multilabel': dataset_class.IS_MULTILABEL\n }\n\n trainer = TrainerFactory.get_trainer(args.dataset, model, None, train_iter, trainer_config, train_evaluator, test_evaluator, dev_evaluator)\n\n if not args.trained_model:\n trainer.train(args.epochs)\n else:\n if args.cuda:\n model = torch.load(args.trained_model, map_location=lambda storage, location: storage.cuda(args.gpu))\n else:\n model = torch.load(args.trained_model, map_location=lambda storage, location: storage)\n\n model = torch.load(trainer.snapshot_path)\n\n if model.beta_ema > 0:\n old_params = model.get_params()\n model.load_ema_params()\n\n # Calculate dev and test metrics\n evaluate_dataset('dev', dataset_class, model, None, dev_iter, args.batch_size,\n is_multilabel=dataset_class.IS_MULTILABEL,\n device=args.gpu)\n evaluate_dataset('test', dataset_class, model, None, test_iter, args.batch_size,\n is_multilabel=dataset_class.IS_MULTILABEL,\n device=args.gpu)\n\n if model.beta_ema > 0:\n model.load_params(old_params)\n"
] | [
[
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.optim.Adam",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.load"
]
] |
AmazingDD/DeepFM-torch | [
"76daa619e5e6cf70ba55f5180173d2b1e6fdbe1a"
] | [
"main.py"
] | [
"import torch\nimport pandas as pd\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.model_selection import train_test_split\n\nfrom deepfm import DeepFM\n\ntorch.manual_seed(2022)\ndata = pd.read_csv('./temp_data.csv').reset_index(drop=True)\n\ncategory_cols = ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']\ndummy_cols = ['SK_ID_CURR']\ntarget_col = 'TARGET'\nnumeric_cols = list(set(data.columns) - set(category_cols + dummy_cols + [target_col]))\n\ndef data_massage(data, category_cols, numeric_cols):\n feat_cols = category_cols + numeric_cols\n fields = []\n for feat_col in feat_cols:\n if feat_col not in category_cols:\n fields.append(1)\n else:\n fields.append(data[feat_col].nunique())\n start_idx = [0] + np.cumsum(fields)[:-1].tolist()\n\n return feat_cols, start_idx, fields\n\nclass FMDataset(Dataset):\n def __init__(self, data, feat_start_idx, fields_size, feat_cols, target_col):\n self.data = data\n self.label = np.asarray(self.data[target_col])\n\n self.feat_cols = feat_cols\n self.fields = fields_size\n self.start_idx = feat_start_idx\n\n def __getitem__(self, index):\n row = self.data.loc[index, self.feat_cols]\n idxs = list()\n vals = list()\n # label = self.data.loc[index, self.]\n label = self.label[index]\n for i in range(len(row)):\n if self.fields[i] == 1:\n idxs.append(self.start_idx[i])\n vals.append(row[i])\n else:\n idxs.append(int(self.start_idx[i] + row[i]))\n vals.append(1)\n\n label = torch.tensor(label, dtype=torch.float32)\n idxs = torch.tensor(idxs, dtype=torch.long)\n vals = torch.tensor(vals, dtype=torch.float32)\n \n return label, idxs, vals\n\n def __len__(self):\n return len(self.data)\n\nfeat_cols, feat_start_idx, fields_size = data_massage(data, category_cols, numeric_cols)\n\nargs = {\n 'batch_size': 256,\n 'gpuid': '0',\n 'lr': 0.001,\n 'l2_reg': 0.,\n 'epochs': 10,\n 'num_features': len(feat_cols),\n 'embedding_dim': 8,\n 'field_size': fields_size,\n 'dense_size': [32, 32],\n '1o_dropout_p': 1., \n '2o_dropout_p': 1., \n 'deep_dropout_p': 0.5,\n 'batch_norm': True,\n 'deep_layer_act': 'relu',\n 'opt_name': 'adam'\n}\n\ntrain_data, test_data = train_test_split(data, test_size=0.2)\ntrain_data, test_data = train_data.reset_index(drop=True), test_data.reset_index(drop=True)\n\ntrain_dataset = FMDataset(train_data, feat_start_idx, fields_size, feat_cols, target_col)\ntrain_loader = DataLoader(train_dataset, batch_size=args['batch_size'], shuffle=True)\n\ntest_dataset = FMDataset(test_data, feat_start_idx, fields_size, feat_cols, target_col)\ntest_loader = DataLoader(test_dataset, batch_size=len(test_dataset))\n\nmodel = DeepFM(args)\nmodel.fit(train_loader)\nmodel.predict(test_loader)\n\n\n"
] | [
[
"numpy.asarray",
"torch.manual_seed",
"torch.tensor",
"torch.utils.data.DataLoader",
"numpy.cumsum",
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
ChandrakantKate/ga-learner-dsmp-repo | [
"e6c53282bbd42c8055c18a2f1203ea76eafa102a"
] | [
"pandas-loan-approval-project/code.py"
] | [
"# --------------\n# code starts here\nmask1 = banks['Self_Employed']=='Yes'\nmask2 = banks['Loan_Status']=='Y'\nloan_approved_se = banks.loc[mask1 & mask2].shape[0]\n#print (loan_approved_se)\nmask3 = banks['Self_Employed']=='No'\nloan_approved_nse = banks.loc[mask2 & mask3].shape[0]\n\npercentage_se = loan_approved_se/614 * 100\npercentage_nse = loan_approved_nse/614 * 100\n\n# code ends here\n\n\n# --------------\n# code starts here\n\nloan_term = banks['Loan_Amount_Term'].apply(lambda x: x/12)\n#print(loan_term)\n\nbig_loan_term = banks.loc[loan_term>=25].shape[0]\n\n# code ends here\n\n\n# --------------\n# Import packages\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import mode \n \nbank = pd.read_csv(path,sep=',')\ncategorical_var = bank.select_dtypes(include = 'object')\nprint (categorical_var)\n# code starts here\nnumerical_var = bank.select_dtypes(include = 'number')\nprint(numerical_var)\n\n\n\n\n# code ends here\n\n\n# --------------\n# code ends here\n#print(banks.head())\nloan_groupby = bank.groupby(['Loan_Status'],axis=0)['ApplicantIncome', 'Credit_History']\n#print(loan_groupby)\nmean_values = loan_groupby.agg([np.mean])\nprint(mean_values)\n\n# code ends here\n\n\n# --------------\n# code starts here\nbanks = bank.drop(columns='Loan_ID')\n\nprint(banks.isnull().sum())\n\nbank_mode = banks.mode()\n\nbanks.fillna(bank_mode.iloc[0],inplace=True)\n#print(bank_mode)\n#code ends here\n\n\n# --------------\n# Code starts here\n\navg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'], values='LoanAmount', aggfunc='mean')\n\n# code ends here\n\n\n\n"
] | [
[
"pandas.read_csv"
]
] |
edugm94/temporal-feat-emotion-prediction | [
"6548bbf5f5d8969de97c076ebc9b5462d7b8bdd4"
] | [
"code/src/d02_intermediate/SetupDataset.py"
] | [
"# !/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# Author: Eduardo Gutierrez Maestro\n# Date: 2021.11.23\n# email: [email protected]\n#\n# Center for Applied Autonomous Sensor Systems (AASS), Cognitive Robotic Systems Labs\n# University of Orebro, Sweden\nfrom src.d00_utils.constants import RAW_DATA_PATH\nfrom src.d00_utils.filtering import compute_norm, filter_acc_signal, filter_eda_signal\nimport pandas as pd\nimport os\n\n\nclass SetupDataset:\n def __init__(self, participant, n_days, weda):\n self.participant = participant\n self.days = n_days\n self.weda = weda\n self.signals = ['acc', 'eda', 'temp', 'hr']\n self.acc = None\n self.eda = None\n self.temp = None\n self.hr = None\n\n def run(self):\n for day in range(1, self.days+1):\n self.load_signals(day=day)\n self.filter_signals()\n self.emotion_to_signal(day=day)\n\n out_path = \"data/02_intermediate/{}/{}/{}/\".format(\n str(self.weda), self.participant, str(day))\n out_filename_acc = out_path + \"ACC.csv\"\n out_filename_eda = out_path + \"EDA.csv\"\n out_filename_temp = out_path + \"TEMP.csv\"\n out_filename_hr = out_path + \"HR.csv\"\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n self.acc.to_csv(out_filename_acc, header=True, index=False, sep='\\t', mode='w')\n self.eda.to_csv(out_filename_eda, header=True, index=False, sep='\\t', mode='w')\n self.temp.to_csv(out_filename_temp, header=True, index=False, sep='\\t', mode='w')\n self.hr.to_csv(out_filename_hr, header=True, index=False, sep='\\t', mode='w')\n\n def load_signals(self, day): # This function loads into an attribute a corresponding signal\n path = RAW_DATA_PATH + \"{}/{}_Complete/\".format(self.participant, self.participant)\n acc_path = path + \"ACC{}.csv\".format(day)\n eda_path = path + \"EDA{}.csv\".format(day)\n temp_path = path + \"TEMP{}.csv\".format(day)\n hr_path = path + \"HR{}.csv\".format(day)\n\n def get_acc_df(acc_path):\n f = 32\n Ts = 1/f\n acc = pd.read_csv(acc_path, names=['x', 'y', 'z'])\n init_timestamp = acc.iloc[0, :][0]\n acc = acc.drop([0, 1]).reset_index(drop=True).reset_index()\n acc['ts'] = init_timestamp + acc['index'] * Ts\n cols = ['ts', 'x', 'y', 'z']\n acc = acc[cols]\n\n # Synchro with HR signal\n actual_ts = init_timestamp + 10\n acc = acc[acc['ts'] >= actual_ts]\n\n # Get normalized signal\n acc['n'] = compute_norm(acc)\n return acc\n\n def get_eda_df(eda_path):\n f = 4\n Ts = 1 / f\n eda = pd.read_csv(eda_path, names=['eda'])\n init_timestamp = eda.iloc[0, :][0]\n eda = eda.drop([0, 1]).reset_index(drop=True).reset_index()\n eda['ts'] = init_timestamp + eda['index'] * Ts\n cols = ['ts', 'eda']\n eda = eda[cols]\n\n # Synchro with HR signal\n actual_ts = init_timestamp + 10\n eda = eda[eda['ts'] >= actual_ts]\n return eda\n\n def get_temp_df(temp_path):\n f = 4\n Ts = 1 / f\n temp = pd.read_csv(temp_path, names=['temp'])\n init_timestamp = temp.iloc[0, :][0]\n temp = temp.drop([0, 1]).reset_index(drop=True).reset_index()\n temp['ts'] = init_timestamp + temp['index'] * Ts\n cols = ['ts', 'temp']\n temp = temp[cols]\n\n # Synchro with HR signal\n actual_ts = init_timestamp + 10\n temp = temp[temp['ts'] >= actual_ts]\n return temp\n\n def get_hr_df(hr_path):\n f = 1\n Ts = 1 / f\n hr = pd.read_csv(hr_path, names=['hr'])\n init_timestamp = hr.iloc[0, :][0]\n hr = hr.drop([0, 1]).reset_index(drop=True).reset_index()\n hr['ts'] = init_timestamp + hr['index'] * Ts\n cols = ['ts', 'hr']\n hr = hr[cols]\n return hr\n\n self.acc = get_acc_df(acc_path)\n self.eda = get_eda_df(eda_path)\n self.temp = get_temp_df(temp_path)\n self.hr = get_hr_df(hr_path)\n\n def filter_signals(self): # To filter each of the signals\n for signal in self.signals:\n if signal == 'acc':\n self.acc['x'] = filter_acc_signal(self.acc['x'])\n self.acc['y'] = filter_acc_signal(self.acc['y'])\n self.acc['z'] = filter_acc_signal(self.acc['z'])\n self.acc['n'] = filter_acc_signal(self.acc['n'])\n elif signal == 'eda':\n self.eda['eda'] = filter_eda_signal(self.eda['eda'])\n else:\n continue\n\n def emotion_to_signal(self, day): # assign to each signal an emotion\n WINDOW = self.weda * 60 # EMA window converted to seconds\n path_EMA = RAW_DATA_PATH + \"{}/{}_Complete/EMAs{}.xlsx\".format(self.participant, self.participant, day)\n # path to EMA\n # Ask for dimension / or compute all dimensions at the same time\n df_ema = pd.read_excel(path_EMA, engine='openpyxl').iloc[:, 0:8].dropna(how='all')\n\n init_ts = self.acc['ts'].iloc[0]\n ts_aux = df_ema.iloc[:, 4]\n timestamps = ts_aux + init_ts\n\n timestamps = timestamps.dropna()\n end_ts = timestamps.iloc[-1]\n timestamps = timestamps[1:-1] # Delete first and last timestamp (reference)\n\n # be aware that EMA excel may be empty\n if timestamps.empty:\n pass\n else:\n # Assign -1 to all labels and dataframes\n self.acc['label_m'] = -1\n self.acc['label_h'] = -1\n self.acc['label_a'] = -1\n\n self.eda['label_m'] = -1\n self.eda['label_h'] = -1\n self.eda['label_a'] = -1\n\n self.temp['label_m'] = -1\n self.temp['label_h'] = -1\n self.temp['label_a'] = -1\n\n self.hr['label_m'] = -1\n self.hr['label_h'] = -1\n self.hr['label_a'] = -1\n\n for idx, ts in enumerate(timestamps):\n # You could try to do all labels at the same time in this script to optimize\n mood = int(df_ema.iloc[idx + 1, 7])\n happ = int(df_ema.iloc[idx + 1, 5])\n act = int(df_ema.iloc[idx + 1, 6])\n\n # En estos if definir el upper y lower bound para cada uno de los casos.\n if idx == 0:\n ts_pos = timestamps.iloc[idx + 1]\n ts_pre = None\n\n l_dis = ts - init_ts\n r_dis = ts_pos - ts\n l_bound = init_ts if l_dis < WINDOW / 2 else ts - WINDOW / 2\n r_bound = ts + r_dis / 2 if r_dis < WINDOW else ts + WINDOW / 2\n\n elif idx == len(timestamps) - 1:\n ts_pre = timestamps.iloc[idx - 1]\n ts_pos = None\n\n l_dis = ts - ts_pre\n r_dis = end_ts - ts\n l_bound = ts - l_dis / 2 if l_dis < WINDOW else ts - WINDOW / 2\n r_bound = end_ts if r_dis < WINDOW / 2 else ts + WINDOW / 2\n\n else:\n ts_pre = timestamps.iloc[idx - 1]\n ts_pos = timestamps.iloc[idx + 1]\n\n l_dis = ts - ts_pre\n r_dis = ts_pos - ts\n l_bound = ts - l_dis / 2 if l_dis < WINDOW else ts - WINDOW / 2\n r_bound = ts + r_dis / 2 if r_dis < WINDOW else ts + WINDOW / 2\n\n # Add label to each colum within the dataframe\n self.acc.loc[self.acc['ts'].between(l_bound, r_bound), 'label_m'] = mood\n self.acc.loc[self.acc['ts'].between(l_bound, r_bound), 'label_h'] = happ\n self.acc.loc[self.acc['ts'].between(l_bound, r_bound), 'label_a'] = act\n\n self.eda.loc[self.eda['ts'].between(l_bound, r_bound), 'label_m'] = mood\n self.eda.loc[self.eda['ts'].between(l_bound, r_bound), 'label_h'] = happ\n self.eda.loc[self.eda['ts'].between(l_bound, r_bound), 'label_a'] = act\n\n self.temp.loc[self.temp['ts'].between(l_bound, r_bound), 'label_m'] = mood\n self.temp.loc[self.temp['ts'].between(l_bound, r_bound), 'label_h'] = happ\n self.temp.loc[self.temp['ts'].between(l_bound, r_bound), 'label_a'] = act\n\n self.hr.loc[self.hr['ts'].between(l_bound, r_bound), 'label_m'] = mood\n self.hr.loc[self.hr['ts'].between(l_bound, r_bound), 'label_h'] = happ\n self.hr.loc[self.hr['ts'].between(l_bound, r_bound), 'label_a'] = act\n"
] | [
[
"pandas.read_csv",
"pandas.read_excel"
]
] |
maxrowland/LendingClub | [
"ea5d48ba8162774364898e00b8bcb7a86bb12d67"
] | [
"Models/CL_RFC.py"
] | [
"#Author: Max H. Rowland\r\n#Email: [email protected]\r\n#Script uses a random forest classifier to predict loan defaults within the lending Club dataset\r\nimport os, errno, time, smtplib, ssl, pickle\r\nfrom datetime import datetime\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom pandas.plotting import register_matplotlib_converters\r\nregister_matplotlib_converters()\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.pipeline import Pipeline\r\nimport sklearn.metrics as metrics\r\n#Model Specific Packages\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n#%%Global model output settings\r\n#######################################################################################################################\r\n#######################################################################################################################\r\nmodel_name = 'Random Forest' #Name used in chart titles\r\nsave_path = r'C:\\Users\\mhr19\\Dropbox\\CODE\\CONSUMER_DEBT\\CL_RFC' #Directory for saving model output\r\nscorer = 'roc_auc' #Scoring metric for Grid Search\r\nstart_time = time.time() #start time for execution timer\r\n#######################################################################################################################\r\n#######################################################################################################################\r\n#%%Load Train/Val/Test Data Files\r\n#######################################################################################################################\r\n#######################################################################################################################\r\n#Training CSV files\r\nX_train = pd.read_csv(r'C:\\Users\\mhr19\\Dropbox\\CODE\\CONSUMER_DEBT\\DATA\\TRAIN\\loans_SMOTE_X_train_all.CSV')\r\ny_train = pd.read_csv(r'C:\\Users\\mhr19\\Dropbox\\CODE\\CONSUMER_DEBT\\DATA\\TRAIN\\loans_SMOTE_y_train_all.CSV')\r\n\r\n#Validation CSV files\r\nX_val = pd.read_csv(r'C:\\Users\\mhr19\\Dropbox\\CODE\\CONSUMER_DEBT\\DATA\\VAL\\loans_IS_X_val_all.CSV').set_index('id')\r\ny_val = pd.read_csv(r'C:\\Users\\mhr19\\Dropbox\\CODE\\CONSUMER_DEBT\\DATA\\VAL\\loans_RAW_y_val_all.CSV').set_index('id')\r\n\r\n#Test CSV files\r\nX_test = pd.read_csv(r'C:\\Users\\mhr19\\Dropbox\\CODE\\CONSUMER_DEBT\\DATA\\TEST\\loans_IS_X_test_all.CSV').set_index('id')\r\ny_test = pd.read_csv(r'C:\\Users\\mhr19\\Dropbox\\CODE\\CONSUMER_DEBT\\DATA\\TEST\\loans_RAW_y_test_all.CSV').set_index('id')\r\n#######################################################################################################################\r\n#######################################################################################################################\r\n#%% Model output save folder creation\r\ntimestamp = datetime.today().strftime('%m-%d-%Y_%I-%M %p')\r\ntimestamp_label = 'Analysis Date: ' + datetime.today().strftime('%m/%d/%Y %I:%M %p')\r\ndef filecreation(list, filename):\r\n mydir = os.path.join(\r\n save_path,\r\n timestamp)\r\n try:\r\n os.makedirs(mydir)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise # This was not a \"directory exist\" error..\r\n with open(os.path.join(mydir, filename), 'w') as d:\r\n d.writelines(list)\r\nfilecreation(save_path, model_name)\r\nfolder_path = save_path + \"\\\\\" + timestamp\r\n\r\n#%%Random Forest Classifier\r\n#######################################################################################################################\r\n#######################################################################################################################\r\n#######################################################################################################################\r\npipeline = Pipeline([\r\n ('model', RandomForestClassifier(n_estimators=50, #The number of trees in the forest - default = 100\r\n criterion='gini', #The function to measure the quality of a split - default 'gini'\r\n max_depth=10, #The maximum depth of the tree - default = None\r\n min_samples_split=2, #The minimum number of samples required to split an internal node - default = 2\r\n min_samples_leaf=1, #The minimum number of samples required to be at a leaf node - default = 1\r\n min_weight_fraction_leaf=0.0, #The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node - default = 0\r\n max_features='auto', #The number of features to consider when looking for the best split - default = 'auto'\r\n max_leaf_nodes=None, #Grow trees with max_leaf_nodes in best-first fashion - default = None\r\n min_impurity_decrease=0.0, #A node will be split if this split induces a decrease of the impurity greater than or equal to this value - default = 0\r\n bootstrap=False, #Whether bootstrap samples are used when building trees - default = True\r\n oob_score=False, #Whether to use out-of-bag samples to estimate the generalization accuracy - default = False\r\n n_jobs=8, #Number of CPUs to use - default = None\r\n random_state=1989, #The seed of random number generator - default = None\r\n verbose=0, #How often progress messages are printed - default = 0\r\n warm_start=False, #When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest - default = False\r\n class_weight=None, #Weights associated with classes in the form {class_label: weight} - default = None\r\n ccp_alpha=0, #Complexity parameter used for Minimal Cost-Complexity Pruning - default = 0\r\n max_samples=None) #If bootstrap is True, the number of samples to draw from X to train each base estimator - default = none\r\n )\r\n])\r\nparam_grid = {\r\n #'model__n_estimators': [50,100,250],\r\n #'model__criterion': ['gini'],\r\n #'model__max_depth': [None, 5, 10],\r\n #model__min_samples_split': [2,5],\r\n #model__min_samples_leaf': [10,20],\r\n #'model__min_weight_fraction_leaf': [50],\r\n #model__max_features': ['sqrt'],\r\n #'model__max_leaf_nodes': [50],\r\n #'model__min_impurity_decrease': [50],\r\n #'model__min_impurity_split': [50],\r\n #'model__bootstrap': [50],\r\n #'model__oob_score': [50],\r\n #'model__warm_start': [50],\r\n #'model__class_weight': [50],\r\n #'model__ccp_alpha': [50],\r\n #'model__max_samples': [50],\r\n}\r\nscorers = {\r\n 'roc_auc': metrics.make_scorer(metrics.roc_auc_score),\r\n 'precision_score': metrics.make_scorer(metrics.precision_score),\r\n 'recall_score': metrics.make_scorer(metrics.recall_score),\r\n 'accuracy_score': metrics.make_scorer(metrics.accuracy_score)\r\n}\r\nmodel = GridSearchCV(estimator=pipeline, #Model\r\n param_grid=param_grid, #Search grip parameters\r\n scoring=scorers, #evaluate the predictions on the test set - default = None\r\n n_jobs=8, #Number of CPUs to use - default = None\r\n refit=scorer, #Refit an estimator using the best found parameters on the whole dataset. - default = True\r\n cv=5, #Determines the cross-validation splitting strategy\r\n verbose=0, #How often progress messages are printed - default = 0\r\n pre_dispatch='2*n_jobs', #Controls the number of jobs that get dispatched during parallel execution - default = '2*n_jobs'\r\n return_train_score=False #If False, the cv_results_ attribute will not include training scores - default = False\r\n )\r\nmodel.fit(X_train, y_train) #Dataset to train the model on\r\ngrid_results = pd.DataFrame(model.cv_results_)\r\ngrid_results = grid_results.sort_values(by='mean_test_roc_auc', ascending=False)\r\npickle.dump(model, open(folder_path + \"\\\\\" + \"MODEL.txt\",'wb'))\r\n#######################################################################################################################\r\n#######################################################################################################################\r\n#######################################################################################################################\r\n"
] | [
[
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame",
"sklearn.metrics.make_scorer",
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"pandas.plotting.register_matplotlib_converters"
]
] |
umayaml/neuprint-python | [
"7c19ce9c97aea63d6c45196a497dce30237586b4"
] | [
"neuprint/plotting.py"
] | [
"\"\"\"\nMiscellaneous plotting functions.\n\n\nNote:\n These functions require additional dependencies,\n which aren't listed by default dependencies of neuprint-python.\n (See docs for each function.)\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom .client import inject_client\nfrom .skeleton import skeleton_df_to_nx\n\ndef plot_soma_projections(neurons_df, color_by='cellBodyFiber'):\n \"\"\"\n Plot the soma locations as XY, XZ, and ZY 2D projections,\n colored by the given column.\n\n Requires ``bokeh``.\n\n Returns a layout which can be displayed\n with ``bokeh.plotting.show()``.\n\n Example:\n\n .. code-block: python\n\n from neuprint import fetch_neurons, NeuronCriteria as NC\n from bokeh.plotting import output_notebook\n output_notebook()\n\n criteria = NC(status='Traced', cropped=False)\n neurons_df, _roi_counts_df = fetch_neurons(criteria)\n p = plot_soma_projections(neurons_df, 'cellBodyFiber')\n show(p)\n\n \"\"\"\n import bokeh\n from bokeh.plotting import figure\n from bokeh.layouts import gridplot\n\n neurons_df = neurons_df[['somaLocation', color_by]].copy()\n\n extract_soma_coords(neurons_df)\n assign_colors(neurons_df, color_by)\n\n neurons_with_soma_df = neurons_df.query('not somaLocation.isnull()')\n def soma_projection(axis1, axis2, flip1, flip2):\n x = neurons_with_soma_df[f'soma_{axis1}'].values\n y = neurons_with_soma_df[f'soma_{axis2}'].values\n p = figure(title=f'{axis1}{axis2}')\n p.scatter(x, y, color=neurons_with_soma_df['color'])\n p.x_range.flipped = flip1\n p.y_range.flipped = flip2\n p.toolbar.logo = None\n return p\n\n p_xy = soma_projection('x', 'y', False, True)\n p_xz = soma_projection('x', 'z', False, True)\n p_zy = soma_projection('z', 'y', True, True)\n\n # This will produce one big plot with a shared toolbar\n layout = gridplot([[p_xy, p_xz], [None, p_zy]])\n\n # Discard the help buttons and bokeh logo\n tbar = layout.children[0].toolbar\n tbar.logo = None\n tbar.tools = [t for t in tbar.tools if not isinstance(t, bokeh.models.tools.HelpTool)]\n\n return layout\n\n\ndef plot_soma_3d(neurons_df, color_by='cellBodyFiber', point_size=1.0):\n \"\"\"\n Plot the soma locations in 3D, colored randomly according\n to the column given in ``color_by``.\n\n Requires ``ipyvolume``.\n If using Jupyterlab, install it like this:\n\n .. code-block: bash\n\n conda install -c conda-forge ipyvolume\n jupyter labextension install ipyvolume\n\n Example:\n\n .. code-block: python\n\n from neuprint import fetch_neurons, NeuronCriteria as NC\n\n criteria = NC(status='Traced', cropped=False)\n neurons_df, _roi_counts_df = fetch_neurons(criteria)\n plot_soma_3d(neurons_df, 'cellBodyFiber')\n \"\"\"\n import ipyvolume.pylab as ipv\n neurons_df = neurons_df[['somaLocation', color_by]].copy()\n\n extract_soma_coords(neurons_df)\n assign_colors(neurons_df, color_by)\n\n neurons_with_soma_df = neurons_df.query('not somaLocation.isnull()')\n assert neurons_with_soma_df.eval('color.isnull()').sum() == 0\n\n soma_x = neurons_with_soma_df['soma_x'].values\n soma_y = neurons_with_soma_df['soma_y'].values\n soma_z = neurons_with_soma_df['soma_z'].values\n\n def color_to_vals(color_string):\n # Convert bokeh color string into float tuples,\n # e.g. '#00ff00' -> (0.0, 1.0, 0.0)\n s = color_string\n return (int(s[1:3], 16) / 255,\n int(s[3:5], 16) / 255,\n int(s[5:7], 16) / 255 )\n\n color_vals = neurons_with_soma_df['color'].apply(color_to_vals).tolist()\n\n # DVID coordinate system assumes (0,0,0) is in the upper-left.\n # For consistency with DVID and neuroglancer conventions,\n # we invert the Y and X coordinates.\n ipv.figure()\n ipv.scatter(soma_x, -soma_y, -soma_z, color=color_vals, marker=\"circle_2d\", size=point_size)\n ipv.show()\n\n\n@inject_client\ndef plot_skeleton_3d(skeleton, color='blue', *, client=None):\n \"\"\"\n Plot the given skeleton in 3D.\n\n Args:\n skeleton:\n Either a bodyId or a pre-fetched pandas DataFrame\n\n color:\n See ``ipyvolume`` docs.\n Examples: ``'blue'``, ``'#0000ff'``\n If the skeleton is fragmented, you can give a list\n of colors and each fragment will be shown in a\n different color.\n\n Requires ``ipyvolume``.\n If using Jupyterlab, install it like this:\n\n .. code-block: bash\n\n conda install -c conda-forge ipyvolume\n jupyter labextension install ipyvolume\n \"\"\"\n import ipyvolume.pylab as ipv\n\n if np.issubdtype(type(skeleton), np.integer):\n skeleton = client.fetch_skeleton(skeleton, format='pandas')\n\n assert isinstance(skeleton, pd.DataFrame)\n g = skeleton_df_to_nx(skeleton)\n\n def skel_path(root):\n \"\"\"\n We want to plot each skeleton fragment as a single continuous line,\n but that means we have to backtrack: parent -> leaf -> parent\n to avoid jumping from one branch to another.\n This means that the line will be drawn on top of itself,\n and we'll have 2x as many line segments in the plot,\n but that's not a big deal.\n \"\"\"\n def accumulate_points(n):\n p = (g.nodes[n]['x'], g.nodes[n]['y'], g.nodes[n]['z'])\n points.append(p)\n\n children = [*g.successors(n)]\n if not children:\n return\n for c in children:\n accumulate_points(c)\n points.append(p)\n\n points = []\n accumulate_points(root)\n return np.asarray(points)\n\n # Skeleton may contain multiple fragments,\n # so compute the path for each one.\n def skel_paths(df):\n paths = []\n for root in df.query('link == -1')['rowId']:\n paths.append(skel_path(root))\n return paths\n\n paths = skel_paths(skeleton)\n if isinstance(color, str):\n colors = len(paths)*[color]\n else:\n colors = (1+len(paths)//len(color))*color\n\n ipv.figure()\n for points, color in zip(paths, colors):\n ipv.plot(*points.transpose(), color)\n ipv.show()\n\n\ndef extract_soma_coords(neurons_df):\n \"\"\"\n Expand the ``somaLocation`` column into three separate\n columns for ``soma_x``, ``soma_y``, and ``soma_z``.\n\n If ``somaLocation is None``, then the soma coords will be ``NaN``.\n\n Works in-place.\n \"\"\"\n neurons_df['soma_x'] = neurons_df['soma_y'] = neurons_df['soma_z'] = np.nan\n\n somaloc = neurons_df.query('not somaLocation.isnull()')['somaLocation']\n somaloc_array = np.asarray(somaloc.tolist())\n\n neurons_df.loc[somaloc.index, 'soma_x'] = somaloc_array[:, 0]\n neurons_df.loc[somaloc.index, 'soma_y'] = somaloc_array[:, 1]\n neurons_df.loc[somaloc.index, 'soma_z'] = somaloc_array[:, 2]\n\n\ndef assign_colors(neurons_df, color_by='cellBodyFiber'):\n \"\"\"\n Use a random colortable to assign a color to each row,\n according to the column given in ``color_by``.\n\n NaN values are always black.\n\n Works in-place.\n \"\"\"\n from bokeh.palettes import Turbo256\n colors = list(Turbo256)\n colors[0] = '#000000'\n color_categories = np.sort(neurons_df[color_by].fillna('').unique())\n assert color_categories[0] == ''\n\n np.random.seed(0)\n np.random.shuffle(color_categories[1:])\n assert color_categories[0] == ''\n\n while len(colors) < len(color_categories):\n colors.extend(colors[1:])\n\n color_mapping = dict(zip(color_categories, colors))\n neurons_df['color'] = neurons_df[color_by].fillna('').map(color_mapping)\n\n"
] | [
[
"numpy.random.seed",
"numpy.asarray",
"numpy.random.shuffle"
]
] |
narumiruna/efficientnet-pytorch | [
"64b99dfdbbe50a9356f6c61579995bc8ac7857a1"
] | [
"efficientnet/trainer.py"
] | [
"import os\nimport shutil\nfrom abc import ABCMeta, abstractmethod\n\nimport mlconfig\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, optim\nfrom torch.utils import data\nfrom tqdm import tqdm, trange\n\nfrom .metrics import Accuracy, Average\n\n\nclass AbstractTrainer(metaclass=ABCMeta):\n\n @abstractmethod\n def fit(self):\n raise NotImplementedError\n\n @abstractmethod\n def train(self):\n raise NotImplementedError\n\n @abstractmethod\n def evaluate(self):\n raise NotImplementedError\n\n\[email protected]\nclass Trainer(AbstractTrainer):\n\n def __init__(self, model: nn.Module, optimizer: optim.Optimizer, train_loader: data.DataLoader,\n valid_loader: data.DataLoader, scheduler: optim.lr_scheduler._LRScheduler, device: torch.device,\n num_epochs: int, output_dir: str):\n self.model = model\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.train_loader = train_loader\n self.valid_loader = valid_loader\n self.device = device\n self.num_epochs = num_epochs\n self.output_dir = output_dir\n\n self.epoch = 1\n self.best_acc = 0\n\n def fit(self):\n epochs = trange(self.epoch, self.num_epochs + 1, desc='Epoch', ncols=0)\n for self.epoch in epochs:\n self.scheduler.step()\n\n train_loss, train_acc = self.train()\n valid_loss, valid_acc = self.evaluate()\n\n self.save_checkpoint(os.path.join(self.output_dir, 'checkpoint.pth'))\n if valid_acc > self.best_acc:\n self.best_acc = valid_acc.value\n self.save_checkpoint(os.path.join(self.output_dir, 'best.pth'))\n\n epochs.set_postfix_str(f'train loss: {train_loss}, train acc: {train_acc}, '\n f'valid loss: {valid_loss}, valid acc: {valid_acc}, '\n f'best valid acc: {self.best_acc:.2f}')\n\n def train(self):\n self.model.train()\n\n train_loss = Average()\n train_acc = Accuracy()\n\n train_loader = tqdm(self.train_loader, ncols=0, desc='Train')\n for x, y in train_loader:\n x = x.to(self.device)\n y = y.to(self.device)\n\n output = self.model(x)\n loss = F.cross_entropy(output, y)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n train_loss.update(loss.item(), number=x.size(0))\n train_acc.update(output, y)\n\n train_loader.set_postfix_str(f'train loss: {train_loss}, train acc: {train_acc}.')\n\n return train_loss, train_acc\n\n def evaluate(self):\n self.model.eval()\n\n valid_loss = Average()\n valid_acc = Accuracy()\n\n with torch.no_grad():\n valid_loader = tqdm(self.valid_loader, desc='Validate', ncols=0)\n for x, y in valid_loader:\n x = x.to(self.device)\n y = y.to(self.device)\n\n output = self.model(x)\n loss = F.cross_entropy(output, y)\n\n valid_loss.update(loss.item(), number=x.size(0))\n valid_acc.update(output, y)\n\n valid_loader.set_postfix_str(f'valid loss: {valid_loss}, valid acc: {valid_acc}.')\n\n return valid_loss, valid_acc\n\n def save_checkpoint(self, f):\n self.model.eval()\n\n checkpoint = {\n 'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'scheduler': self.scheduler.state_dict(),\n 'epoch': self.epoch,\n 'best_acc': self.best_acc\n }\n\n dirname = os.path.dirname(f)\n if dirname:\n os.makedirs(dirname, exist_ok=True)\n\n torch.save(checkpoint, f)\n\n def resume(self, f):\n checkpoint = torch.load(f, map_location=self.device)\n\n self.model.load_state_dict(checkpoint['model'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n self.epoch = checkpoint['epoch'] + 1\n self.best_acc = checkpoint['best_acc']\n"
] | [
[
"torch.save",
"torch.nn.functional.cross_entropy",
"torch.no_grad",
"torch.load"
]
] |
Sundragon1993/CS231n_2020 | [
"47ebb67491dfedf1d9040866422b6cf369f0fbac"
] | [
"A6/a6_helper.py"
] | [
"import torch\nimport time\nimport math\nimport os\nimport shutil\nimport torch.optim as optim\nfrom torchvision import models, datasets, transforms\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom vae import loss_function\nfrom torch import nn\n\n\ndef hello_helper():\n print(\"Hello from a6_helper.py!\")\n\ndef show_images(images):\n images = torch.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)\n sqrtn = int(math.ceil(math.sqrt(images.shape[0])))\n sqrtimg = int(math.ceil(math.sqrt(images.shape[1])))\n\n fig = plt.figure(figsize=(sqrtn, sqrtn))\n gs = gridspec.GridSpec(sqrtn, sqrtn)\n gs.update(wspace=0.05, hspace=0.05)\n\n for i, img in enumerate(images):\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(img.reshape([sqrtimg,sqrtimg]))\n return \n\ndef count_params(model):\n \"\"\"Count the number of parameters in the model\"\"\"\n param_count = sum([p.numel() for p in model.parameters()])\n return param_count\n\ndef initialize_weights(m):\n \"\"\" Initializes the weights of a torch.nn model using xavier initialization\"\"\"\n if isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d):\n nn.init.xavier_uniform_(m.weight.data)\n\n\ndef one_hot(labels, class_size):\n \"\"\"\n Create one hot label matrix of size (N, C)\n\n Inputs:\n - labels: Labels Tensor of shape (N,) representing a ground-truth label\n for each MNIST image\n - class_size: Scalar representing of target classes our dataset \n Outputs:\n - targets: One-hot label matrix of (N, C), where targets[i, j] = 1 when \n the ground truth label for image i is j, and targets[i, :j] & \n targets[i, j + 1:] are equal to 0\n \"\"\"\n targets = torch.zeros(labels.size(0), class_size)\n for i, label in enumerate(labels):\n targets[i, label] = 1\n return targets\n\ndef train_vae(epoch, model, train_loader, cond=False):\n \"\"\"\n Train a VAE or CVAE!\n\n Inputs:\n - epoch: Current epoch number \n - model: VAE model object\n - train_loader: PyTorch Dataloader object that contains our training data\n - cond: Boolean value representing whether we're training a VAE or \n Conditional VAE \n \"\"\"\n model.train()\n train_loss = 0\n num_classes = 10\n loss = None\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n for batch_idx, (data, labels) in enumerate(train_loader):\n data = data.to(device='cuda:0')\n if cond:\n one_hot_vec = one_hot(labels, num_classes).to(device='cuda')\n recon_batch, mu, logvar = model(data, one_hot_vec)\n else:\n recon_batch, mu, logvar = model(data)\n optimizer.zero_grad()\n loss = loss_function(recon_batch, data, mu, logvar)\n loss.backward()\n train_loss += loss.data\n optimizer.step()\n print('Train Epoch: {} \\tLoss: {:.6f}'.format(\n epoch, loss.data))\n\n\n\n"
] | [
[
"torch.reshape",
"torch.nn.init.xavier_uniform_",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot"
]
] |
GeorgeDUT/MetaRLSAS | [
"d50ca936fd116435b871ebd63a82f61cb7ff036f"
] | [
"old_file_maybe_useful/test-my-plus.py"
] | [
"\"\"\"\ntest-my-plus.py\nthis file is based on test-my.py\n\npython test-my-plus.py --config mdp-deterministic/config.json --policy mdp-deterministic/policy.th --output mdp-deterministic/results.npz --meta-batch-size 100 --num-batches 20 --num-workers 8\n\n\"\"\"\n\nimport maml_rl.envs\nimport gym\nimport torch\nimport json\nimport numpy as np\nfrom tqdm import trange\nimport time\n\nfrom maml_rl.metalearners import MAMLTRPO\nfrom maml_rl.baseline import LinearFeatureBaseline\nfrom maml_rl.samplers import MultiTaskSampler\nfrom maml_rl.utils.helpers import get_policy_for_env, get_input_size\nfrom maml_rl.utils.reinforcement_learning import get_returns\n\n\ndef main(args):\n with open(args.config, 'r') as f:\n config = json.load(f)\n\n if args.seed is not None:\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n # env = gym.make(config['env-name'], **config['env-kwargs'])\n env = gym.make(config['env-name'], **config.get('env-kwargs', {}))\n env.close()\n\n # Policy\n policy = get_policy_for_env(env,\n hidden_sizes=config['hidden-sizes'],\n nonlinearity=config['nonlinearity'])\n\n with open(args.policy, 'rb') as f:\n state_dict = torch.load(f, map_location=torch.device(args.device))\n policy.load_state_dict(state_dict)\n policy.share_memory()\n\n\n # Baseline\n baseline = LinearFeatureBaseline(get_input_size(env))\n\n # Sampler\n sampler = MultiTaskSampler(config['env-name'],\n env_kwargs=config.get('env-kwargs', {}),\n batch_size=config['fast-batch-size'],\n policy=policy,\n baseline=baseline,\n env=env,\n seed=args.seed,\n num_workers=args.num_workers)\n\n # TODO: online adaptation step-1\n metalearner = MAMLTRPO(policy,\n fast_lr=config['fast-lr'],\n first_order=config['first-order'],\n device=args.device)\n # endtodo\n\n logs = {'tasks': []}\n train_returns, valid_returns = [], []\n num_iterations = 0\n for batch in trange(args.num_batches):\n \"\"\"old version of test-my.py\"\"\"\n # tasks = sampler.sample_tasks(num_tasks=args.meta_batch_size)\n # train_episodes, valid_episodes = sampler.sample(tasks,\n # num_steps=config['num-steps'],\n # fast_lr=config['fast-lr'],\n # gamma=config['gamma'],\n # gae_lambda=config['gae-lambda'],\n # device=args.device)\n #\n # logs['tasks'].extend(tasks)\n # train_returns.append(get_returns(train_episodes[0]))\n # valid_returns.append(get_returns(valid_episodes))\n\n \"\"\"new version of test-my-plus.py\"\"\"\n # TODO: online adaptation step-2\n tasks = sampler.sample_tasks(num_tasks=config['meta-batch-size'])\n futures = sampler.sample_async(tasks,\n num_steps=config['num-steps'],\n fast_lr=config['fast-lr'],\n gamma=config['gamma'],\n gae_lambda=config['gae-lambda'],\n device=args.device)\n logs = metalearner.step(*futures,\n max_kl=config['max-kl'],\n cg_iters=config['cg-iters'],\n cg_damping=config['cg-damping'],\n ls_max_steps=config['ls-max-steps'],\n ls_backtrack_ratio=config['ls-backtrack-ratio'])\n\n train_episodes, valid_episodes = sampler.sample_wait(futures)\n num_iterations += sum(sum(episode.lengths) for episode in train_episodes[0])\n num_iterations += sum(sum(episode.lengths) for episode in valid_episodes)\n logs.update(tasks=tasks,\n num_iterations=num_iterations,\n train_returns=get_returns(train_episodes[0]),\n valid_returns=get_returns(valid_episodes))\n\n train_returns.append(get_returns(train_episodes[0]))\n valid_returns.append(get_returns(valid_episodes))\n\n # for name,param in policy.layer1.named_parameters():\n # print(name,param)\n # endtodo\n\n logs['train_returns'] = np.concatenate(train_returns, axis=0)\n logs['valid_returns'] = np.concatenate(valid_returns, axis=0)\n print('name',args.output)\n with open(args.output, 'wb') as f:\n np.savez(f, **logs)\n\n\nif __name__ == '__main__':\n import argparse\n import os\n import multiprocessing as mp\n\n parser = argparse.ArgumentParser(description='Reinforcement learning with '\n 'Model-Agnostic Meta-Learning (MAML) - Test')\n\n parser.add_argument('--config', type=str, required=True,\n help='path to the configuration file')\n parser.add_argument('--policy', type=str, required=True,\n help='path to the policy checkpoint')\n\n # Evaluation\n evaluation = parser.add_argument_group('Evaluation')\n evaluation.add_argument('--num-batches', type=int, default=10,\n help='number of batches (default: 10)')\n evaluation.add_argument('--meta-batch-size', type=int, default=40,\n help='number of tasks per batch (default: 40)')\n\n # Miscellaneous\n misc = parser.add_argument_group('Miscellaneous')\n misc.add_argument('--output', type=str, required=True,\n help='name of the output folder (default: maml)')\n misc.add_argument('--seed', type=int, default=1,\n help='random seed (default: 1)')\n misc.add_argument('--num-workers', type=int, default=mp.cpu_count() - 1,\n help='number of workers for trajectories sampling (default: '\n '{0})'.format(mp.cpu_count() - 1))\n misc.add_argument('--use-cuda', action='store_true',\n help='use cuda (default: false, use cpu). WARNING: Full upport for cuda '\n 'is not guaranteed. Using CPU is encouraged.')\n\n args = parser.parse_args()\n args.device = ('cuda' if (torch.cuda.is_available()\n and args.use_cuda) else 'cpu')\n\n main(args)\n"
] | [
[
"numpy.concatenate",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"numpy.savez",
"torch.cuda.is_available"
]
] |
thurinj/mtuq | [
"8c539ac9da4c0e7eb72c5218b7818107c44fecc7"
] | [
"examples/GridSearch.DoubleCouple+Magnitude+Depth.py"
] | [
"#!/usr/bin/env python\n\nimport os\nimport numpy as np\n\nfrom mtuq import read, open_db, download_greens_tensors\nfrom mtuq.event import Origin\nfrom mtuq.graphics import plot_data_greens2, plot_misfit_depth, plot_misfit_dc\nfrom mtuq.grid import DoubleCoupleGridRegular\nfrom mtuq.grid_search import grid_search\nfrom mtuq.misfit import Misfit\nfrom mtuq.process_data import ProcessData\nfrom mtuq.util import fullpath\nfrom mtuq.util.cap import parse_station_codes, Trapezoid\n\n\n\nif __name__=='__main__':\n #\n # Carries out grid search over source orientation, magnitude, and depth\n # \n # USAGE\n # mpirun -n <NPROC> python GridSearch.DoubleCouple+Magnitude+Depth.py\n #\n # This is the most complicated example. For a much simpler one, see\n # SerialGridSearch.DoubleCouple.py\n # \n\n\n #\n # We will investigate the source process of an Mw~4 earthquake using data\n # from a regional seismic array\n #\n\n path_data= fullpath('data/examples/20090407201255351/*.[zrt]')\n path_weights= fullpath('data/examples/20090407201255351/weights.dat')\n event_id= '20090407201255351'\n model= 'ak135'\n\n\n #\n # Body and surface wave measurements will be made separately\n #\n\n process_bw = ProcessData(\n filter_type='Bandpass',\n freq_min= 0.1,\n freq_max= 0.333,\n pick_type='taup',\n taup_model=model,\n window_type='body_wave',\n window_length=15.,\n capuaf_file=path_weights,\n )\n\n process_sw = ProcessData(\n filter_type='Bandpass',\n freq_min=0.025,\n freq_max=0.0625,\n pick_type='taup',\n taup_model=model,\n window_type='surface_wave',\n window_length=150.,\n capuaf_file=path_weights,\n )\n\n\n #\n # For our objective function, we will use a sum of body and surface wave\n # contributions\n #\n\n misfit_bw = Misfit(\n norm='L2',\n time_shift_min=-2.,\n time_shift_max=+2.,\n time_shift_groups=['ZR'],\n )\n\n misfit_sw = Misfit(\n norm='L2',\n time_shift_min=-10.,\n time_shift_max=+10.,\n time_shift_groups=['ZR','T'],\n )\n\n\n #\n # User-supplied weights control how much each station contributes to the\n # objective function\n #\n\n station_id_list = parse_station_codes(path_weights)\n\n\n #\n # We will search over a range of depths about the catalog origin\n #\n\n\n catalog_origin = Origin({\n 'time': '2009-04-07T20:12:55.000000Z',\n 'latitude': 61.454200744628906,\n 'longitude': -149.7427978515625,\n 'depth_in_m': 33033.599853515625,\n 'id': '20090407201255351'\n })\n\n depths = np.array(\n # depth in meters\n [25000, 30000, 35000, 40000, \n 45000, 50000, 55000, 60000])\n\n origins = []\n for depth in depths:\n origins += [catalog_origin.copy()]\n setattr(origins[-1], 'depth_in_m', depth)\n\n\n\n #\n # Next, we specify the moment tensor grid and source-time function\n #\n\n magnitudes = np.array(\n # moment magnitude (Mw)\n [4.3, 4.4, 4.5, \n 4.6, 4.7, 4.8]) \n\n grid = DoubleCoupleGridRegular(\n npts_per_axis=30,\n magnitudes=magnitudes)\n\n wavelet = Trapezoid(\n magnitude=4.5)\n\n\n #\n # The main I/O work starts now\n #\n\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.rank\n nproc = comm.Get_size()\n\n if rank==0:\n print('Reading data...\\n')\n data = read(path_data, format='sac', \n event_id=event_id,\n station_id_list=station_id_list,\n tags=['units:cm', 'type:velocity']) \n\n\n data.sort_by_distance()\n stations = data.get_stations()\n\n\n print('Processing data...\\n')\n data_bw = data.map(process_bw)\n data_sw = data.map(process_sw)\n\n\n print('Reading Greens functions...\\n')\n greens = download_greens_tensors(stations, origins, model)\n\n\n print('Processing Greens functions...\\n')\n greens.convolve(wavelet)\n greens_bw = greens.map(process_bw)\n greens_sw = greens.map(process_sw)\n\n\n else:\n stations = None\n data_bw = None\n data_sw = None\n greens_bw = None\n greens_sw = None\n\n\n stations = comm.bcast(stations, root=0)\n data_bw = comm.bcast(data_bw, root=0)\n data_sw = comm.bcast(data_sw, root=0)\n greens_bw = comm.bcast(greens_bw, root=0)\n greens_sw = comm.bcast(greens_sw, root=0)\n\n\n #\n # The main computational work starts now\n #\n\n if rank==0:\n print('Evaluating body wave misfit...\\n')\n\n results_bw = grid_search(\n data_bw, greens_bw, misfit_bw, origins, grid)\n\n if rank==0:\n print('Evaluating surface wave misfit...\\n')\n\n results_sw = grid_search(\n data_sw, greens_sw, misfit_sw, origins, grid)\n\n if rank==0:\n results = results_bw + results_sw\n\n # source index corresponding to minimum misfit\n idx = results.idxmin('source')\n\n best_source = grid.get(idx)\n lune_dict = grid.get_dict(idx)\n\n # origin index corresponding to minimum misfit\n idx = results.idxmin('origin')\n\n best_origin = origins[idx]\n\n\n\n #\n # Saving results\n #\n\n if comm.rank==0:\n print('Saving results...\\n')\n\n plot_data_greens2(event_id+'_waveforms.png',\n data_bw, data_sw, greens_bw, greens_sw, \n process_bw, process_sw, misfit_bw, misfit_sw, \n stations, best_origin, best_source, lune_dict)\n\n plot_misfit_depth(event_id+'_misfit_depth.png',\n results, origins, grid, event_id)\n\n print('\\nFinished\\n')\n"
] | [
[
"numpy.array"
]
] |
Candida18/Job-Portal-with-Automated-Resume-Screening | [
"19d19464ad3d1714da856656753a4afdfe257b31"
] | [
"Job Portal with Automated Resume Screening/form.py"
] | [
"from cProfile import label\nfrom docx2txt.docx2txt import process\nimport streamlit as st\nimport streamlit as st\nimport streamlit.components.v1 as stc\nimport sqlite3 as sql\nimport pandas as pd\nimport datetime\n\n# File Processing Pkgs\nimport pandas as pd\nimport docx2txt\nfrom PIL import Image\nfrom PyPDF2 import PdfFileReader\nimport pdfplumber\nimport os\nimport webbrowser\n\njob_desc_dir = \"Data/JobDesc/\"\njob_description_names = os.listdir(job_desc_dir)\n#print(job_description_names)\n\n\n\n# DB Management\nimport sqlite3\n\nconn = sqlite3.connect(\"form_data.db\")\nc = conn.cursor()\n# DB Functions\ndef create_userinfo():\n\tc.execute(\n\t\t\"CREATE TABLE IF NOT EXISTS userinfo(name TEXT,age NUMBER,email TEXT,phone TEXT,jobs TEXT, FileName TEXT,resume TEXT, gender TEXT, openness NUMBER, neuroticism NUMBER, conscientiousness NUMBER, agreeableness NUMBER, extraversion NUMBER)\"\n\t)\n\n\ndef add_userdata(name, age, email, phone, jobs, filename, resume,gender, openness, neuroticism, conscientiousness, agreeableness, extraversion):\n\tc.execute(\n\t\t\"INSERT INTO userinfo(name,age,email,phone,jobs,FileName,resume, gender, openness, neuroticism, conscientiousness, agreeableness, extraversion) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)\",\n\t\t(name, age, email, phone, jobs, filename,resume, gender ,openness, neuroticism, conscientiousness, agreeableness, extraversion),\n\t)\n\tconn.commit()\n\n\n\ndef main():\n\tst.title(\"Job Application\")\n\tJobs = pd.read_csv(\"CSV/JobDesc_data.csv\")\n\t\t\n\tmy_form = st.form(key=\"form1\", clear_on_submit=True)\n\tmy_form.subheader(\"Enter Your Details To Apply For The Job\")\n\tname = my_form.text_input(label=\"Enter your name\")\n\tage = my_form.text_input(label=\"Enter your Age\")\n\temail = my_form.text_input(label=\"Enter your Email Id\")\n\tphone = my_form.text_input(label=\"Enter your Contact Number\",max_chars = 10)\n\t# gender = my_form.radio('Select your Gender:', ['Male', 'Female'])\n\tgender = my_form.text_input(label=\"Enter your gender ( 1 - Male, 0 - Female)\", max_chars = 1)\n\tjobs = my_form.selectbox(\n\tlabel=\"Select the Job Domain\",options=Jobs['name'])\n\n\tdocx_file = my_form.file_uploader(label=\"Upload Your Resume Here\", type=[\"docx\"])\n\tmy_form.markdown(\"---\")\n\tmy_form.subheader(\"Tell Us About Yourself\")\n\topenness = my_form.slider('Do you enjoy new experiences (Openness) ?', 0, 10)\n\tneuroticism = my_form.slider('How often do you feel negativity (Neuroticism) ?', 0, 10)\n\tconscientiousness = my_form.slider('Would you do your work well and thoroughly (Conscientiousness) ? ', 0, 10)\n\tagreeableness = my_form.slider('How much would you like to work with your peers (Agreeableness) ? ', 0, 10)\n\textraversion = my_form.slider('How outgoing and social interactive are you (Extraversion) ?', 0, 10)\n\t####### Saves Resume in Local Directory #######\n\tif docx_file is not None:\n\n\t\twith open(\n\t\t\tos.path.join(\"C:/Users/Gini/Mini Project/Final_ResumePPV2/Data/Resume\", docx_file.name), \"wb\"\n\t\t) as f:\n\t\t\tf.write((docx_file).getbuffer())\n\n\tsubmit = my_form.form_submit_button(label=\"Submit your application\")\n\tresume = text_resume(docx_file)\n\t\n\tif docx_file is not None:\n\n\t\tfilename = docx_file.name\n\n\tif submit:\n\t\tcreate_userinfo()\n\t\tadd_userdata(name, age, email, phone, jobs, filename, resume, gender, openness, neuroticism, conscientiousness, agreeableness, extraversion)\n\t\tst.success(\"You have successfully submitted the form\")\n\n\n\n\tconnection = sql.connect(\"form_data.db\")\n\tdf = pd.read_sql(sql=\"Select * FROM userinfo\", con=connection)\n\tdf.to_csv(\"CSV/Form_data.csv\", index=False)\n\n\ndef text_resume(docx_file):\n\tif docx_file is not None:\n\n\t\t# Check File Type\n\t\tif docx_file.type == \"text/plain\":\n\n\t\t\tst.text(str(docx_file.read(), \"utf-8\")) # empty\n\t\t\traw_text = str(\n\t\t\t\tdocx_file.read(), \"utf-8\"\n\t\t\t) # works with st.text and st.write,used for further processing\n\t\t\t#print(raw_text)\n\t\t\treturn raw_text\n\n\t\telif docx_file.type == \"application/pdf\":\n\n\t\t\ttry:\n\t\t\t\twith pdfplumber.open(docx_file) as pdf:\n\t\t\t\t\tpage = pdf.pages[0]\n\t\t\t\t\traw_text = page.extract_text()\n\t\t\t\treturn raw_text\n\t\t\texcept:\n\t\t\t\tst.write(\"None\")\n\n\t\telif (\n\t\t\tdocx_file.type\n\t\t\t== \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\"\n\t\t):\n\t\t\t# Use the right file processor ( Docx,Docx2Text,etc)\n\t\t\traw_text = docx2txt.process(docx_file) # Parse in the uploadFile Class\n\t\t\t#print(raw_text)\n\t\t\treturn raw_text\n\n\nif __name__ == \"__main__\":\n\tmain()\n"
] | [
[
"pandas.read_csv",
"pandas.read_sql"
]
] |
ivallesp/numerai | [
"06cff73ee5dd47a7dada123d9c74dfcaafab76f8"
] | [
"src/model_helpers.py"
] | [
"__author__ = \"ivallesp\"\nimport logging\nimport numpy as np\nfrom sklearn.model_selection import GridSearchCV\n\ndef train_model_with_gridsearch(X, y, estimator, param_grid, scoring=\"neg_log_loss\", cv=10, n_jobs=1, verbose=0):\n \"\"\"\n Trains a model using gridsearch and returns the best model trained with all the data and the results dictionary.\n :param estimator: sklearn-like model (sklearn object)\n :param param_grid: dictionary of parameters from which the grid is going to be built (dict)\n :param scoring: either a scoring function or a string (func|str)\n :param cv: either a cv object or an integer indicating the number of folds (sklearn obj|int)\n :param n_jobs: number of jobs (int)\n :param verbose: verbosity level (the greater the number, the more verbose is the model) (int)\n :return: model, results dict (sklearn model|dictionary)\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"Requested gridsearch process\")\n logger.info(\"Training the requested estimator using gridsearch\")\n\n grid_search = GridSearchCV(estimator=estimator, param_grid=param_grid, scoring=scoring, cv=cv, n_jobs=n_jobs,\n refit=True, verbose=verbose, return_train_score=True)\n\n trained_model = grid_search.fit(X, y)\n results = trained_model.cv_results_\n logger.info(\"Gridsearch trained successfully. Generating results JSON\")\n # Fix dictionary for allowing a further JSON conversion\n for key in results:\n if type(results[key]) in [np.ndarray, np.array, np.ma.core.MaskedArray]:\n results[key] = results[key].tolist()\n return trained_model, results\n\n"
] | [
[
"sklearn.model_selection.GridSearchCV"
]
] |
mvpossum/1010Solver | [
"4cc48465675bbf9071cc57d2ab1485443d749603"
] | [
"utils.py"
] | [
"import numpy as np\nimport os\n\nRAW_SAMPLES_DIR='samples/raw'\nSCREEN_SAMPLES_DIR='samples/screen'\n\ndef get_files(input_dir):\n return [os.path.join(input_dir, file_name) for file_name in os.listdir(input_dir)]\n\ndist = lambda p,q: np.linalg.norm(q-p)\n\ndef ensure_dir(file_path):\n directory = os.path.dirname(file_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n"
] | [
[
"numpy.linalg.norm"
]
] |
AhmedAlaa10/Consistent_Video_Depth_Estimation | [
"1a8868eadcf0b2082cdfea8ed339865f0ba8ea01"
] | [
"third_party/flownet2/networks/correlation_package/correlation.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\nfrom torch.nn.modules.module import Module\nfrom torch.autograd import Function\nimport correlation_cuda\n\n### Note(jkopf): I commented out the lines with triple comments (###) to fix the\n### following error: 'Legacy autograd function with non-static forward method\n### is deprecated. Please use new-style autograd function with static forward\n### method.'\n\n### class CorrelationFunction(Function):\nclass CorrelationFunction(Module):\n\n def __init__(self, pad_size=3, kernel_size=3, max_displacement=20, stride1=1, stride2=2, corr_multiply=1):\n super(CorrelationFunction, self).__init__()\n self.pad_size = pad_size\n self.kernel_size = kernel_size\n self.max_displacement = max_displacement\n self.stride1 = stride1\n self.stride2 = stride2\n self.corr_multiply = corr_multiply\n # self.out_channel = ((max_displacement/stride2)*2 + 1) * ((max_displacement/stride2)*2 + 1)\n\n def forward(self, input1, input2):\n ### self.save_for_backward(input1, input2)\n\n with torch.cuda.device_of(input1):\n rbot1 = input1.new()\n rbot2 = input2.new()\n output = input1.new()\n\n correlation_cuda.forward(input1, input2, rbot1, rbot2, output,\n self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)\n\n return output\n\n def backward(self, grad_output):\n input1, input2 = self.saved_tensors\n\n with torch.cuda.device_of(input1):\n rbot1 = input1.new()\n rbot2 = input2.new()\n\n grad_input1 = input1.new()\n grad_input2 = input2.new()\n\n correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2,\n self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)\n\n return grad_input1, grad_input2\n\n\nclass Correlation(Module):\n def __init__(self, pad_size=0, kernel_size=0, max_displacement=0, stride1=1, stride2=2, corr_multiply=1):\n super(Correlation, self).__init__()\n self.pad_size = pad_size\n self.kernel_size = kernel_size\n self.max_displacement = max_displacement\n self.stride1 = stride1\n self.stride2 = stride2\n self.corr_multiply = corr_multiply\n\n def forward(self, input1, input2):\n\n result = CorrelationFunction(self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)(input1, input2)\n\n return result\n"
] | [
[
"torch.cuda.device_of"
]
] |
zongdaoming/Ambiguity | [
"ab6bdda3eceb646f69aa509a73631cfc5620f473"
] | [
"unet_utils.py"
] | [
"# _*_ coding: utf-8 _*_\n# @author : naive dormin\n# @time : 2021/03/17 18:13:38\n\n\n\n\"\"\"Architectural blocks and utility functions of the U-Net.\"\"\"\nimport sonnet as snt\nimport tensorflow.compat.v1 as tf\n\ndef res_block(input_features, n_channels, n_down_channels=None,\n activation_fn=tf.nn.relu, initializers=None, regularizers=None,\n convs_per_block=3):\n \"\"\"A pre-activated residual block.\n Args:\n input_features: A tensor of shape (b, h, w, c).\n n_channels: An integer specifying the number of output channels.\n n_down_channels: An integer specifying the number of intermediate channels.\n activation_fn: A callable activation function.\n initializers: Initializers for the weights and biases.\n regularizers: Regularizers for the weights and biases.\n convs_per_block: An Integer specifying the number of convolutional layers.\n Returns:\n A tensor of shape (b, h, w, c).\n \"\"\"\n # Pre-activate the inputs.\n skip = input_features\n residual = activation_fn(input_features)\n\n # Set the number of intermediate channels that we compress to.\n if n_down_channels is None:\n n_down_channels = n_channels\n\n for c in range(convs_per_block):\n residual = snt.Conv2D(n_down_channels,\n (3, 3),\n padding='SAME',\n initializers=initializers,\n regularizers=regularizers)(residual)\n if c < convs_per_block - 1:\n residual = activation_fn(residual)\n\n incoming_channels = input_features.shape[-1]\n if incoming_channels != n_channels:\n skip = snt.Conv2D(n_channels,\n (1, 1),\n padding='SAME',\n initializers=initializers,\n regularizers=regularizers)(skip)\n if n_down_channels != n_channels:\n residual = snt.Conv2D(n_channels,\n (1, 1),\n padding='SAME',\n initializers=initializers,\n regularizers=regularizers)(residual)\n return skip + residual\n\n\ndef resize_up(input_features, scale=2):\n \"\"\"Nearest neighbor rescaling-operation for the input features.\n Args:\n input_features: A tensor of shape (b, h, w, c).\n scale: An integer specifying the scaling factor.\n Returns: A tensor of shape (b, scale * h, scale * w, c).\n \"\"\"\n assert scale >= 1\n _, size_x, size_y, _ = input_features.shape.as_list()\n new_size_x = int(round(size_x * scale))\n new_size_y = int(round(size_y * scale))\n return tf.image.resize(\n input_features,\n [new_size_x, new_size_y],\n align_corners=True,\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n\n\ndef resize_down(input_features, scale=2):\n \"\"\"Average pooling rescaling-operation for the input features.\n Args:\n input_features: A tensor of shape (b, h, w, c).\n scale: An integer specifying the scaling factor.\n Returns: A tensor of shape (b, h / scale, w / scale, c).\n \"\"\"\n assert scale >= 1\n return tf.nn.avg_pool2d(\n input_features,\n ksize=(1, scale, scale, 1),\n strides=(1, scale, scale, 1),\n padding='VALID')\n"
] | [
[
"tensorflow.compat.v1.nn.avg_pool2d",
"tensorflow.compat.v1.image.resize"
]
] |
angelolovatto/deep-rl | [
"9f0c1aafe71852c8973bf1ab732114a3cdbe23ad"
] | [
"proj/common/env_makers.py"
] | [
"\"\"\"\nImplements several factories for both single and vectorized environments.\n\"\"\"\nimport os\nimport gym\nimport numpy as np\nfrom baselines import logger\nfrom baselines.common.atari_wrappers import make_atari, wrap_deepmind\nfrom baselines.common.vec_env.dummy_vec_env import DummyVecEnv as _DummyVecEnv\nfrom baselines.common.vec_env.vec_frame_stack import VecFrameStack\nfrom baselines.common.vec_env.vec_monitor import VecMonitor\nfrom proj.common.env_pool import EnvPool, ShmEnvPool\n\n\nclass EnvMaker:\n \"\"\"\n Used to store an environment id and apply the appropriate wrappers\n when constructing the environment.\n \"\"\"\n\n def __init__(self, env_id):\n self.env_id = env_id\n self.__name__ = repr(self)\n\n def __call__(self):\n if (\n \"AtariEnv\" in gym.spec(self.env_id)._entry_point\n and \"-ram-\" not in self.env_id\n ):\n env = make_atari(self.env_id)\n env = wrap_deepmind(env)\n else:\n env = gym.make(self.env_id)\n\n if len(env.observation_space.shape) == 1 and \"TimeLimit\" in str(env):\n env = AddRelativeTimestep(env)\n\n return env\n\n def __repr__(self):\n return \"EnvMaker('{}')\".format(self.env_id)\n\n\nclass VecEnvMaker:\n \"\"\"\n Used to store an environment id and apply the appropriate vectorized\n environment wrappers when constructing the vectorized environment.\n \"\"\"\n\n def __init__(self, env_id):\n self.env_id = env_id\n self.__name__ = repr(self)\n\n def __call__(self, n_envs=1, *, train=True):\n env_fn = EnvMaker(self.env_id)\n\n if (\n \"AtariEnv\" in gym.spec(self.env_id)._entry_point\n and \"-ram-\" not in self.env_id\n ):\n if n_envs == 1:\n vec_env = DummyVecEnv([env_fn])\n else:\n vec_env = ShmEnvPool(env_fn, n_envs=n_envs)\n vec_env = VecFrameStack(vec_env, 4)\n else:\n if n_envs == 1:\n vec_env = DummyVecEnv([env_fn])\n else:\n vec_env = EnvPool(env_fn, n_envs=n_envs)\n\n monitor_dir = os.path.join(\n logger.get_dir(), (\"train\" if train else \"eval\") + \"_monitor\"\n )\n os.makedirs(monitor_dir, exist_ok=True)\n vec_env = VecMonitor(vec_env, filename=monitor_dir)\n return vec_env\n\n def __repr__(self):\n return \"VecEnvMaker('{}')\".format(self.env_id)\n\n\n# ==============================\n# Reproducible DummyVecEnv\n# ==============================\n\n\nclass DummyVecEnv(_DummyVecEnv):\n \"\"\"\n Extends baselines.common.vec_env.dummy_vec_env.DummyVecEnv to allow seeding\n and perform it on initialization. Plus, properly cleans up when closed.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # set initial seeds\n seeds = np.random.randint(\n low=0, high=np.iinfo(np.int32).max, size=self.num_envs\n )\n self.seed(seeds)\n\n def seed(self, seeds):\n for env, seed in zip(self.envs, seeds):\n env.seed(int(seed))\n\n def close_extras(self):\n for env in self.envs:\n env.close()\n\n\n# ==============================\n# Wrappers\n# ==============================\n\n\nclass AddTimestep(gym.ObservationWrapper):\n \"\"\"\n Adds the absolute timestep to the observations of environments with\n observation spaces of type gym.spaces.Box.\n \"\"\"\n\n def __init__(self, env=None):\n super().__init__(env)\n self.observation_space = gym.spaces.Box(\n low=np.append(self.observation_space.low, 0),\n high=np.append(self.observation_space.high, 2 ** 32),\n dtype=self.observation_space.dtype,\n )\n\n def observation(self, observation):\n return np.append(observation, self.env._elapsed_steps)\n\n\nclass AddRelativeTimestep(gym.ObservationWrapper):\n \"\"\"\n Adds the relative timestep (normalized to the range [-1, 1]) to the\n observations of environments with observation spaces of type gym.spaces.Box.\n \"\"\"\n\n def __init__(self, env=None):\n super().__init__(env)\n self.observation_space = gym.spaces.Box(\n low=np.append(self.observation_space.low, -1.0),\n high=np.append(self.observation_space.high, 1.0),\n dtype=self.observation_space.dtype,\n )\n\n def observation(self, observation):\n return np.append(\n observation,\n -1 + (self.env._elapsed_steps / self.spec.max_episode_steps) * 2,\n )\n"
] | [
[
"numpy.iinfo",
"numpy.append"
]
] |
eddie-chiang/prca | [
"c74c21034a4fcb785faedc8069470a70a74342e6"
] | [
"ghtorrent/BigQueryCsvFileProcessor.py"
] | [
"import logging\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pathlib import Path\n\nimport numpy\nimport pandas\nfrom tqdm import tqdm\n\nfrom commentprocessing import LanguageDetector\nfrom ghtorrent import CommentResourceAccess\nfrom github import PullRequestResourceAccess\n\n\nclass BigQueryCsvFileProcessor:\n \"\"\"A file processor that iterate through all rows in the given BigQuery result .csv file and determine whether a comment is truncated.\n If so, will load using :class:`CommentResourceAccess`.\n Also, it determines the language of a comment, and skip rows that are not in English.\n\n Args:\n comment_loader (CommentResourceAccess): An instance of comment loader.\n prra (PullRequestResourceAccess): An instance GitHub Pull Request Helper.\n \"\"\"\n\n def __init__(self, comment_loader: CommentResourceAccess, prra: PullRequestResourceAccess):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.comment_loader = comment_loader\n self.prra = prra\n\n def process(self, csv_file: Path):\n \"\"\"Process the given BigQuery result .csv file.\n\n Args:\n csv_file (Path): File path that points to the .csv file to be processed.\n\n Returns:\n tuple: (\n Path: The file path of the processed file.\n Path: The file containing the processing statistics.\n )\n \"\"\"\n final_csv = Path(csv_file.absolute().as_posix().replace('.csv', '_cleaned.csv'))\n final_stats_csv = Path(csv_file.absolute().as_posix().replace('.csv', '_cleaned_stats.csv'))\n\n if final_csv.exists() and final_stats_csv.exists():\n self.logger.info(f'Processed file already exists, stop further processing: {final_csv}')\n return final_csv, final_stats_csv\n\n self.logger.info(f'Start processing {csv_file}...')\n\n tmp_csv = Path(csv_file.absolute().as_posix().replace('.csv', '_processing.csv'))\n tmp_stats_csv = Path(csv_file.absolute().as_posix().replace('.csv', '_processing_stats.csv'))\n\n data_frame = pandas.read_csv(csv_file)\n total_rows = data_frame.shape[0]\n self.logger.info(f'No. of rows in {csv_file}: {total_rows}')\n\n ctr = truncated_ctr = del_from_mongo_ctr = del_from_github_ctr = non_eng_ctr = skip_ctr = 0\n\n tmp_stats_df = None\n if tmp_csv.exists():\n tmp_total_rows = pandas.read_csv(tmp_csv).shape[0]\n tmp_stats_df = pandas.read_csv(tmp_stats_csv)\n ctr = int(tmp_stats_df['rows_processed'].iat[0])\n truncated_ctr = int(tmp_stats_df['comments_truncated'].iat[0])\n non_eng_ctr = int(tmp_stats_df['non_english'].iat[0])\n del_from_mongo_ctr = int(tmp_stats_df['deleted_from_mongodb'].iat[0])\n del_from_github_ctr = int(tmp_stats_df['deleted_from_github'].iat[0])\n skip_ctr = int(tmp_stats_df['total_skipped'].iat[0])\n\n self.logger.warn(\n f'The file {tmp_csv.name} already exists, no. of rows in the file: {tmp_total_rows}, no. of rows processed: {ctr}. Resuming...')\n else:\n stats = {\n 'rows_processed': [0],\n 'comments_truncated': [0],\n 'non_english': [0],\n 'deleted_from_mongodb': [0],\n 'deleted_from_github': [0],\n 'total_skipped': [0]\n }\n tmp_stats_df = pandas.DataFrame(\n data=dict([(key, pandas.Series(value)) for key, value in stats.items()]))\n\n # Set up before the loop\n pbar = tqdm(desc='Process CSV', total=total_rows, initial=ctr)\n commentExecutor = ThreadPoolExecutor(max_workers=4, thread_name_prefix='CommentResourceAccess')\n gitHubExecutor = ThreadPoolExecutor(max_workers=4, thread_name_prefix='PullRequestResourceAccess')\n\n # Skip any previously processed rows, but do not skip the header.\n data_frame = pandas.read_csv(csv_file, chunksize=500, converters={'body': str}, skiprows=range(1, ctr + 1))\n for chunk in data_frame:\n # Get chunk size first before any filtering.\n chunk_size = chunk.shape[0]\n\n # Add new columns\n chunk = self.__get_header_fields(chunk)\n\n # Filter to only English comments.\n chunk['is_eng'] = chunk.apply(\n lambda row: LanguageDetector.is_english(row['body']),\n axis='columns')\n chunk = chunk[chunk['is_eng'] == True]\n non_eng_ctr += chunk_size - chunk.shape[0]\n chunk.drop(columns='is_eng', inplace=True)\n\n # Identify possible truncated comments and load from GHTorrent MongoDB.\n chunk['is_truncated'] = numpy.char.str_len(chunk['body'].to_numpy(dtype=str)) == 255\n truncated_ctr_in_loop = len(chunk[chunk['is_truncated'] == True].index)\n truncated_ctr += truncated_ctr_in_loop\n\n # https://api.github.com/repos/{owner}/{repo}\n chunk.loc[chunk['is_truncated'] == True, 'owner'] = [\n url[url.rfind('/', 0, url.rfind('/')) + 1:url.rfind('/')]\n for url in chunk[chunk['is_truncated'] == True]['project_url']\n ]\n\n chunk.loc[chunk['is_truncated'] == True, 'repo'] = [\n url[url.rfind('/') + 1:]\n for url in chunk[chunk['is_truncated'] == True]['project_url']\n ]\n\n # Loading comment from MongoDB has a lot of IO waits, use threading.\n chunk.loc[chunk['is_truncated'] == True, 'body'] = list(tqdm(\n commentExecutor.map(\n self.comment_loader.load,\n chunk[chunk['is_truncated'] == True]['owner'],\n chunk[chunk['is_truncated'] == True]['repo'],\n chunk[chunk['is_truncated'] == True]['pullreq_id'],\n chunk[chunk['is_truncated'] == True]['comment_id'],\n chunk[chunk['is_truncated'] == True]['body'],\n timeout=600\n ),\n desc='Load comment',\n total=truncated_ctr_in_loop,\n leave=False\n ))\n\n # Filter out comments deleted from MongoDB.\n del_from_mongo_ctr += len(chunk[chunk['body'].isnull()].index)\n chunk = chunk[chunk['body'].notnull()]\n\n # Drop temp columns\n chunk.drop(columns='is_truncated', inplace=True)\n chunk.drop(columns='owner', inplace=True)\n chunk.drop(columns='repo', inplace=True)\n\n # Temp working columns.\n chunk = chunk.assign(comment_user_login='', pr_user_login='')\n\n chunk[['pr_comments_cnt',\n 'pr_review_comments_cnt',\n 'pr_commits_cnt',\n 'pr_additions',\n 'pr_deletions',\n 'pr_changed_files',\n 'pr_user_login',\n 'pr_merged_by_user_id']] = list(tqdm(\n gitHubExecutor.map(\n self.prra.get_pull_request_info,\n chunk['project_url'],\n chunk['pullreq_id'],\n timeout=600\n ),\n desc='Load pull request',\n total=chunk.shape[0],\n leave=False\n ))\n\n # For ones which Pull Request is not found/available, mark commit info as 'Not Available'.\n chunk.loc[\n chunk['pr_commits_cnt'].isin(['Not Available', 'Not Found']),\n [\n 'comment_author_association',\n 'comment_user_login',\n 'comment_updated_at',\n 'comment_html_url',\n 'pr_commits_cnt_prior_to_comment',\n 'commit_file_status',\n 'commit_file_additions',\n 'commit_file_deletions',\n 'commit_file_changes'\n ]\n ] = ['Not Available'] * 9\n\n # Load commit info.\n chunk = self.__get_comment_info(\n chunk,\n gitHubExecutor,\n (~chunk['pr_commits_cnt'].isin(['Not Available', 'Not Found']))\n )\n\n chunk['comment_is_by_author'] = numpy.char.equal(\n chunk['pr_user_login'].to_numpy(dtype=str),\n chunk['comment_user_login'].to_numpy(dtype=str))\n chunk.drop(columns='pr_user_login', inplace=True)\n chunk.drop(columns='comment_user_login', inplace=True)\n\n # Filter out records not found from GitHub.\n del_from_github_ctr += len(\n chunk[\n (chunk['comment_html_url'] == 'Not Found')\n | (chunk['pr_commits_cnt'] == 'Not Found')\n ].index)\n chunk = chunk[\n (chunk['comment_html_url'] != 'Not Found')\n & (chunk['pr_commits_cnt'] != 'Not Found')\n ]\n\n skip_ctr += chunk_size - chunk.shape[0]\n\n if chunk.shape[0] > 0:\n pbar.write(f'Writing to {tmp_csv}')\n chunk.to_csv(tmp_csv,\n index=False,\n header=False if ctr > 0 else True,\n mode='w' if ctr == 0 else 'a')\n\n ctr += chunk_size\n\n # Save the counters for resume purpose.\n tmp_stats_df['rows_processed'].iat[0] = ctr\n tmp_stats_df['comments_truncated'].iat[0] = truncated_ctr\n tmp_stats_df['non_english'].iat[0] = non_eng_ctr\n tmp_stats_df['deleted_from_mongodb'].iat[0] = del_from_mongo_ctr\n tmp_stats_df['deleted_from_github'].iat[0] = del_from_github_ctr\n tmp_stats_df['total_skipped'].iat[0] = skip_ctr\n\n pbar.write(f'Writing to {tmp_stats_csv}')\n tmp_stats_df.to_csv(tmp_stats_csv,\n index=False, header=True, mode='w')\n\n pbar.update(chunk_size)\n pbar.write(\n f'Comment truncated: {truncated_ctr}, non English: {non_eng_ctr}, deleted from MongoDB/GitHub: {del_from_mongo_ctr}/{del_from_github_ctr}, total skipped: {skip_ctr}')\n\n # Clean up after the loop\n pbar.close()\n commentExecutor.shutdown()\n gitHubExecutor.shutdown()\n\n tmp_csv.rename(final_csv)\n tmp_stats_csv.rename(final_stats_csv)\n self.logger.info(f'Processing completed, output file: {final_csv}')\n\n return final_csv, final_stats_csv\n\n def __get_header_fields(self, df: pandas.DataFrame):\n columns = ['project_id',\n 'project_url',\n 'pull_request_id',\n 'pullreq_id',\n 'user_id',\n 'comment_id',\n 'position',\n 'body',\n 'commit_id',\n 'created_at',\n 'pr_comments_cnt',\n 'pr_review_comments_cnt',\n 'pr_commits_cnt',\n 'pr_additions',\n 'pr_deletions',\n 'pr_changed_files',\n 'pr_merged_by_user_id',\n 'comment_author_association',\n 'comment_is_by_author',\n 'comment_updated_at',\n 'comment_html_url',\n 'pr_commits_cnt_prior_to_comment',\n 'commit_file_status',\n 'commit_file_additions',\n 'commit_file_deletions',\n 'commit_file_changes']\n\n # Remove unused columns.\n for col in (col for col in df if col not in columns):\n df.drop(columns=col, inplace=True)\n\n # Add missing columns\n for col in (col for col in columns if col not in df.columns):\n df[col] = None\n\n return df\n\n def __get_comment_info(self, chunk: pandas.DataFrame, gitHubExecutor: ThreadPoolExecutor, filter_gen_exp):\n # For ones which Pull Request is not found/available, mark commit info as 'Not Available'.\n chunk.loc[\n chunk['pr_commits_cnt'].isin(['Not Available', 'Not Found']),\n [\n 'comment_author_association',\n 'comment_user_login',\n 'comment_updated_at',\n 'comment_html_url',\n 'pr_commits_cnt_prior_to_comment',\n 'commit_file_status',\n 'commit_file_additions',\n 'commit_file_deletions',\n 'commit_file_changes'\n ]\n ] = ['Not Available'] * 9\n\n if chunk[filter_gen_exp].shape[0] > 0:\n # Find the commit info, filtered by the given condition.\n chunk.loc[\n filter_gen_exp,\n [\n 'body',\n 'comment_author_association',\n 'comment_user_login',\n 'comment_updated_at',\n 'comment_html_url',\n 'pr_commits_cnt_prior_to_comment',\n 'commit_file_status',\n 'commit_file_additions',\n 'commit_file_deletions',\n 'commit_file_changes'\n ]\n ] = list(tqdm(\n gitHubExecutor.map(\n self.prra.get_pull_request_comment_info,\n chunk.loc[filter_gen_exp, 'project_url'],\n chunk.loc[filter_gen_exp, 'pullreq_id'],\n chunk.loc[filter_gen_exp, 'comment_id'],\n timeout=600\n ),\n desc='Load commit info',\n total=chunk[filter_gen_exp].shape[0],\n leave=False\n ))\n\n return chunk\n"
] | [
[
"pandas.read_csv",
"pandas.Series"
]
] |
gkiar/nilearn | [
"311422d0e55a2d385facb8431a37f4d69f9963e3"
] | [
"nilearn/datasets/func.py"
] | [
"\"\"\"\nDownloading NeuroImaging datasets: functional datasets (task + resting-state)\n\"\"\"\nimport warnings\nimport os\nimport re\nimport json\nimport numpy as np\nimport numbers\n\nimport nibabel\nfrom sklearn.datasets.base import Bunch\nfrom sklearn.utils import deprecated\n\nfrom .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr,\n _read_md5_sum_file, _tree, _filter_columns)\nfrom .._utils import check_niimg\nfrom .._utils.compat import BytesIO, _basestring, _urllib\nfrom .._utils.numpy_conversions import csv_to_array\nfrom .._utils.exceptions import VisibleDeprecationWarning\n\n\n@deprecated(\"fetch_haxby_simple will be removed in future releases. \"\n \"Use 'fetch_haxby' instead.\")\ndef fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load a simple example haxby dataset.\n\n Parameters\n ----------\n data_dir: string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n Returns\n -------\n data: sklearn.datasets.base.Bunch\n Dictionary-like object, interest attributes are:\n 'func': list of string. Path to nifti file with bold data.\n 'session_target': list of string. Path to text file containing session and\n target data.\n 'mask': string. Path to nifti mask file.\n 'session': list of string. Path to text file containing labels\n (can be used for LeaveOneGroupOut cross validation for example).\n\n References\n ----------\n `Haxby, J., Gobbini, M., Furey, M., Ishai, A., Schouten, J.,\n and Pietrini, P. (2001). Distributed and overlapping representations of\n faces and objects in ventral temporal cortex. Science 293, 2425-2430.`\n\n Notes\n -----\n PyMVPA provides a tutorial using this dataset :\n http://www.pymvpa.org/tutorial.html\n\n More informations about its structure :\n http://dev.pymvpa.org/datadb/haxby2001.html\n\n See `additional information\n <http://www.sciencemag.org/content/293/5539/2425>`_\n \"\"\"\n # URL of the dataset. It is optional because a test uses it to test dataset\n # downloading\n if url is None:\n url = 'http://www.pymvpa.org/files/pymvpa_exampledata.tar.bz2'\n\n opts = {'uncompress': True}\n files = [\n (os.path.join('pymvpa-exampledata', 'attributes.txt'), url, opts),\n (os.path.join('pymvpa-exampledata', 'bold.nii.gz'), url, opts),\n (os.path.join('pymvpa-exampledata', 'mask.nii.gz'), url, opts),\n (os.path.join('pymvpa-exampledata', 'attributes_literal.txt'),\n url, opts),\n ]\n\n dataset_name = 'haxby2001_simple'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n files = _fetch_files(data_dir, files, resume=resume, verbose=verbose)\n\n # There is a common file for the two versions of Haxby\n fdescr = _get_dataset_descr('haxby2001')\n\n # List of length 1 are used because haxby_simple is single-subject\n return Bunch(func=[files[1]], session_target=[files[0]], mask=files[2],\n conditions_target=[files[3]], description=fdescr)\n\n\ndef fetch_haxby(data_dir=None, n_subjects=None, subjects=(2,),\n fetch_stimuli=False, url=None, resume=True, verbose=1):\n \"\"\"Download and loads complete haxby dataset\n\n Parameters\n ----------\n data_dir: string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n n_subjects: int, optional\n Number of subjects, from 1 to 6.\n\n NOTE: n_subjects is deprecated from 0.2.6 and will be removed in next\n release. Use `subjects` instead.\n\n subjects : list or int, optional\n Either a list of subjects or the number of subjects to load, from 1 to\n 6. By default, 2nd subject will be loaded. Empty list returns no subject\n data.\n\n fetch_stimuli: boolean, optional\n Indicate if stimuli images must be downloaded. They will be presented\n as a dictionary of categories.\n\n Returns\n -------\n data: sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n 'anat': string list. Paths to anatomic images.\n 'func': string list. Paths to nifti file with bold data.\n 'session_target': string list. Paths to text file containing\n session and target data.\n 'mask': string. Path to fullbrain mask file.\n 'mask_vt': string list. Paths to nifti ventral temporal mask file.\n 'mask_face': string list. Paths to nifti ventral temporal mask file.\n 'mask_house': string list. Paths to nifti ventral temporal mask file.\n 'mask_face_little': string list. Paths to nifti ventral temporal\n mask file.\n 'mask_house_little': string list. Paths to nifti ventral temporal\n mask file.\n\n References\n ----------\n `Haxby, J., Gobbini, M., Furey, M., Ishai, A., Schouten, J.,\n and Pietrini, P. (2001). Distributed and overlapping representations of\n faces and objects in ventral temporal cortex. Science 293, 2425-2430.`\n\n Notes\n -----\n PyMVPA provides a tutorial making use of this dataset:\n http://www.pymvpa.org/tutorial.html\n\n More information about its structure:\n http://dev.pymvpa.org/datadb/haxby2001.html\n\n See `additional information\n <http://www.sciencemag.org/content/293/5539/2425>`\n\n Run 8 in subject 5 does not contain any task labels.\n The anatomical image for subject 6 is unavailable.\n \"\"\"\n if n_subjects is not None:\n warn_str = (\"The parameter 'n_subjects' is deprecated from 0.2.6 and \"\n \"will be removed in nilearn next release. Use parameter \"\n \"'subjects' instead.\")\n warnings.warn(warn_str, VisibleDeprecationWarning, stacklevel=2)\n subjects = n_subjects\n\n if isinstance(subjects, numbers.Number) and subjects > 6:\n subjects = 6\n\n if subjects is not None and (isinstance(subjects, list) or\n isinstance(subjects, tuple)):\n for sub_id in subjects:\n if sub_id not in [1, 2, 3, 4, 5, 6]:\n raise ValueError(\"You provided invalid subject id {0} in a \"\n \"list. Subjects must be selected in \"\n \"[1, 2, 3, 4, 5, 6]\".format(sub_id))\n\n dataset_name = 'haxby2001'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n\n # Get the mask\n url_mask = 'https://www.nitrc.org/frs/download.php/7868/mask.nii.gz'\n mask = _fetch_files(data_dir, [('mask.nii.gz', url_mask, {})],\n verbose=verbose)[0]\n\n # Dataset files\n if url is None:\n url = 'http://data.pymvpa.org/datasets/haxby2001/'\n md5sums = _fetch_files(data_dir, [('MD5SUMS', url + 'MD5SUMS', {})],\n verbose=verbose)[0]\n md5sums = _read_md5_sum_file(md5sums)\n\n # definition of dataset files\n sub_files = ['bold.nii.gz', 'labels.txt',\n 'mask4_vt.nii.gz', 'mask8b_face_vt.nii.gz',\n 'mask8b_house_vt.nii.gz', 'mask8_face_vt.nii.gz',\n 'mask8_house_vt.nii.gz', 'anat.nii.gz']\n n_files = len(sub_files)\n\n if subjects is None:\n subjects = []\n\n if isinstance(subjects, numbers.Number):\n subject_mask = np.arange(1, subjects + 1)\n else:\n subject_mask = np.array(subjects)\n\n files = [\n (os.path.join('subj%d' % i, sub_file),\n url + 'subj%d-2010.01.14.tar.gz' % i,\n {'uncompress': True,\n 'md5sum': md5sums.get('subj%d-2010.01.14.tar.gz' % i, None)})\n for i in subject_mask\n for sub_file in sub_files\n if not (sub_file == 'anat.nii.gz' and i == 6) # no anat for sub. 6\n ]\n\n files = _fetch_files(data_dir, files, resume=resume, verbose=verbose)\n\n if ((isinstance(subjects, numbers.Number) and subjects == 6) or\n np.any(subject_mask == 6)):\n files.append(None) # None value because subject 6 has no anat\n\n kwargs = {}\n if fetch_stimuli:\n stimuli_files = [(os.path.join('stimuli', 'README'),\n url + 'stimuli-2010.01.14.tar.gz',\n {'uncompress': True})]\n readme = _fetch_files(data_dir, stimuli_files, resume=resume,\n verbose=verbose)[0]\n kwargs['stimuli'] = _tree(os.path.dirname(readme), pattern='*.jpg',\n dictionary=True)\n\n fdescr = _get_dataset_descr(dataset_name)\n\n # return the data\n return Bunch(\n anat=files[7::n_files],\n func=files[0::n_files],\n session_target=files[1::n_files],\n mask_vt=files[2::n_files],\n mask_face=files[3::n_files],\n mask_house=files[4::n_files],\n mask_face_little=files[5::n_files],\n mask_house_little=files[6::n_files],\n mask=mask,\n description=fdescr,\n **kwargs)\n\n\ndef fetch_nyu_rest(n_subjects=None, sessions=[1], data_dir=None, resume=True,\n verbose=1):\n \"\"\"Download and loads the NYU resting-state test-retest dataset.\n\n Parameters\n ----------\n n_subjects: int, optional\n The number of subjects to load. If None is given, all the\n subjects are used.\n\n sessions: iterable of int, optional\n The sessions to load. Load only the first session by default.\n\n data_dir: string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n Returns\n -------\n data: sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n 'func': string list. Paths to functional images.\n 'anat_anon': string list. Paths to anatomic images.\n 'anat_skull': string. Paths to skull-stripped images.\n 'session': numpy array. List of ids corresponding to images sessions.\n\n Notes\n ------\n This dataset is composed of 3 sessions of 26 participants (11 males).\n For each session, three sets of data are available:\n\n - anatomical:\n\n * anonymized data (defaced thanks to BIRN defacer)\n * skullstripped data (using 3DSkullStrip from AFNI)\n\n - functional\n\n For each participant, 3 resting-state scans of 197 continuous EPI\n functional volumes were collected :\n\n - 39 slices\n - matrix = 64 x 64\n - acquisition voxel size = 3 x 3 x 3 mm\n\n Sessions 2 and 3 were conducted in a single scan session, 45 min\n apart, and were 5-16 months after Scan 1.\n\n All details about this dataset can be found here :\n http://cercor.oxfordjournals.org/content/19/10/2209.full\n\n References\n ----------\n :Documentation:\n http://www.nitrc.org/docman/?group_id=274\n\n :Download:\n http://www.nitrc.org/frs/?group_id=274\n\n :Paper to cite:\n `The Resting Brain: Unconstrained yet Reliable\n <http://cercor.oxfordjournals.org/content/19/10/2209>`_\n Z. Shehzad, A.M.C. Kelly, P.T. Reiss, D.G. Gee, K. Gotimer,\n L.Q. Uddin, S.H. Lee, D.S. Margulies, A.K. Roy, B.B. Biswal,\n E. Petkova, F.X. Castellanos and M.P. Milham.\n\n :Other references:\n * `The oscillating brain: Complex and Reliable\n <http://dx.doi.org/10.1016/j.neuroimage.2009.09.037>`_\n X-N. Zuo, A. Di Martino, C. Kelly, Z. Shehzad, D.G. Gee,\n D.F. Klein, F.X. Castellanos, B.B. Biswal, M.P. Milham\n\n * `Reliable intrinsic connectivity networks: Test-retest\n evaluation using ICA and dual regression approach\n <http://dx.doi.org/10.1016/j.neuroimage.2009.10.080>`_,\n X-N. Zuo, C. Kelly, J.S. Adelstein, D.F. Klein,\n F.X. Castellanos, M.P. Milham\n\n \"\"\"\n fa1 = 'http://www.nitrc.org/frs/download.php/1071/NYU_TRT_session1a.tar.gz'\n fb1 = 'http://www.nitrc.org/frs/download.php/1072/NYU_TRT_session1b.tar.gz'\n fa2 = 'http://www.nitrc.org/frs/download.php/1073/NYU_TRT_session2a.tar.gz'\n fb2 = 'http://www.nitrc.org/frs/download.php/1074/NYU_TRT_session2b.tar.gz'\n fa3 = 'http://www.nitrc.org/frs/download.php/1075/NYU_TRT_session3a.tar.gz'\n fb3 = 'http://www.nitrc.org/frs/download.php/1076/NYU_TRT_session3b.tar.gz'\n fa1_opts = {'uncompress': True,\n 'move': os.path.join('session1', 'NYU_TRT_session1a.tar.gz')}\n fb1_opts = {'uncompress': True,\n 'move': os.path.join('session1', 'NYU_TRT_session1b.tar.gz')}\n fa2_opts = {'uncompress': True,\n 'move': os.path.join('session2', 'NYU_TRT_session2a.tar.gz')}\n fb2_opts = {'uncompress': True,\n 'move': os.path.join('session2', 'NYU_TRT_session2b.tar.gz')}\n fa3_opts = {'uncompress': True,\n 'move': os.path.join('session3', 'NYU_TRT_session3a.tar.gz')}\n fb3_opts = {'uncompress': True,\n 'move': os.path.join('session3', 'NYU_TRT_session3b.tar.gz')}\n\n p_anon = os.path.join('anat', 'mprage_anonymized.nii.gz')\n p_skull = os.path.join('anat', 'mprage_skullstripped.nii.gz')\n p_func = os.path.join('func', 'lfo.nii.gz')\n\n subs_a = ['sub05676', 'sub08224', 'sub08889', 'sub09607', 'sub14864',\n 'sub18604', 'sub22894', 'sub27641', 'sub33259', 'sub34482',\n 'sub36678', 'sub38579', 'sub39529']\n subs_b = ['sub45463', 'sub47000', 'sub49401', 'sub52738', 'sub55441',\n 'sub58949', 'sub60624', 'sub76987', 'sub84403', 'sub86146',\n 'sub90179', 'sub94293']\n\n # Generate the list of files by session\n anat_anon_files = [\n [(os.path.join('session1', sub, p_anon), fa1, fa1_opts)\n for sub in subs_a]\n + [(os.path.join('session1', sub, p_anon), fb1, fb1_opts)\n for sub in subs_b],\n [(os.path.join('session2', sub, p_anon), fa2, fa2_opts)\n for sub in subs_a]\n + [(os.path.join('session2', sub, p_anon), fb2, fb2_opts)\n for sub in subs_b],\n [(os.path.join('session3', sub, p_anon), fa3, fa3_opts)\n for sub in subs_a]\n + [(os.path.join('session3', sub, p_anon), fb3, fb3_opts)\n for sub in subs_b]]\n\n anat_skull_files = [\n [(os.path.join('session1', sub, p_skull), fa1, fa1_opts)\n for sub in subs_a]\n + [(os.path.join('session1', sub, p_skull), fb1, fb1_opts)\n for sub in subs_b],\n [(os.path.join('session2', sub, p_skull), fa2, fa2_opts)\n for sub in subs_a]\n + [(os.path.join('session2', sub, p_skull), fb2, fb2_opts)\n for sub in subs_b],\n [(os.path.join('session3', sub, p_skull), fa3, fa3_opts)\n for sub in subs_a]\n + [(os.path.join('session3', sub, p_skull), fb3, fb3_opts)\n for sub in subs_b]]\n\n func_files = [\n [(os.path.join('session1', sub, p_func), fa1, fa1_opts)\n for sub in subs_a]\n + [(os.path.join('session1', sub, p_func), fb1, fb1_opts)\n for sub in subs_b],\n [(os.path.join('session2', sub, p_func), fa2, fa2_opts)\n for sub in subs_a]\n + [(os.path.join('session2', sub, p_func), fb2, fb2_opts)\n for sub in subs_b],\n [(os.path.join('session3', sub, p_func), fa3, fa3_opts)\n for sub in subs_a]\n + [(os.path.join('session3', sub, p_func), fb3, fb3_opts)\n for sub in subs_b]]\n\n max_subjects = len(subs_a) + len(subs_b)\n # Check arguments\n if n_subjects is None:\n n_subjects = len(subs_a) + len(subs_b)\n if n_subjects > max_subjects:\n warnings.warn('Warning: there are only %d subjects' % max_subjects)\n n_subjects = 25\n\n anat_anon = []\n anat_skull = []\n func = []\n session = []\n for i in sessions:\n if not (i in [1, 2, 3]):\n raise ValueError('NYU dataset session id must be in [1, 2, 3]')\n anat_anon += anat_anon_files[i - 1][:n_subjects]\n anat_skull += anat_skull_files[i - 1][:n_subjects]\n func += func_files[i - 1][:n_subjects]\n session += [i] * n_subjects\n\n dataset_name = 'nyu_rest'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n anat_anon = _fetch_files(data_dir, anat_anon, resume=resume,\n verbose=verbose)\n anat_skull = _fetch_files(data_dir, anat_skull, resume=resume,\n verbose=verbose)\n func = _fetch_files(data_dir, func, resume=resume,\n verbose=verbose)\n\n fdescr = _get_dataset_descr(dataset_name)\n\n return Bunch(anat_anon=anat_anon, anat_skull=anat_skull, func=func,\n session=session, description=fdescr)\n\n\ndef fetch_adhd(n_subjects=30, data_dir=None, url=None, resume=True,\n verbose=1):\n \"\"\"Download and load the ADHD resting-state dataset.\n\n Parameters\n ----------\n n_subjects: int, optional\n The number of subjects to load from maximum of 40 subjects.\n By default, 30 subjects will be loaded. If None is given,\n all 40 subjects will be loaded.\n\n data_dir: string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n url: string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data). Default: None\n\n Returns\n -------\n data: sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n - 'func': Paths to functional resting-state images\n - 'phenotypic': Explanations of preprocessing steps\n - 'confounds': CSV files containing the nuisance variables\n\n References\n ----------\n :Download:\n ftp://www.nitrc.org/fcon_1000/htdocs/indi/adhd200/sites/ADHD200_40sub_preprocessed.tgz\n\n \"\"\"\n\n if url is None:\n url = 'https://www.nitrc.org/frs/download.php/'\n\n # Preliminary checks and declarations\n dataset_name = 'adhd'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n ids = ['0010042', '0010064', '0010128', '0021019', '0023008', '0023012',\n '0027011', '0027018', '0027034', '0027037', '1019436', '1206380',\n '1418396', '1517058', '1552181', '1562298', '1679142', '2014113',\n '2497695', '2950754', '3007585', '3154996', '3205761', '3520880',\n '3624598', '3699991', '3884955', '3902469', '3994098', '4016887',\n '4046678', '4134561', '4164316', '4275075', '6115230', '7774305',\n '8409791', '8697774', '9744150', '9750701']\n nitrc_ids = range(7782, 7822)\n max_subjects = len(ids)\n if n_subjects is None:\n n_subjects = max_subjects\n if n_subjects > max_subjects:\n warnings.warn('Warning: there are only %d subjects' % max_subjects)\n n_subjects = max_subjects\n ids = ids[:n_subjects]\n nitrc_ids = nitrc_ids[:n_subjects]\n\n opts = dict(uncompress=True)\n\n # Dataset description\n fdescr = _get_dataset_descr(dataset_name)\n\n # First, get the metadata\n phenotypic = ('ADHD200_40subs_motion_parameters_and_phenotypics.csv',\n url + '7781/adhd40_metadata.tgz', opts)\n\n phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume,\n verbose=verbose)[0]\n\n # Load the csv file\n phenotypic = np.genfromtxt(phenotypic, names=True, delimiter=',',\n dtype=None)\n\n # Keep phenotypic information for selected subjects\n int_ids = np.asarray(ids, dtype=int)\n phenotypic = phenotypic[[np.where(phenotypic['Subject'] == i)[0][0]\n for i in int_ids]]\n\n # Download dataset files\n\n archives = [url + '%i/adhd40_%s.tgz' % (ni, ii)\n for ni, ii in zip(nitrc_ids, ids)]\n functionals = ['data/%s/%s_rest_tshift_RPI_voreg_mni.nii.gz' % (i, i)\n for i in ids]\n confounds = ['data/%s/%s_regressors.csv' % (i, i) for i in ids]\n\n functionals = _fetch_files(\n data_dir, zip(functionals, archives, (opts,) * n_subjects),\n resume=resume, verbose=verbose)\n\n confounds = _fetch_files(\n data_dir, zip(confounds, archives, (opts,) * n_subjects),\n resume=resume, verbose=verbose)\n\n return Bunch(func=functionals, confounds=confounds,\n phenotypic=phenotypic, description=fdescr)\n\n\ndef fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and loads Miyawaki et al. 2008 dataset (153MB)\n\n Returns\n -------\n data: Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'func': string list\n Paths to nifti file with bold data\n - 'label': string list\n Paths to text file containing session and target data\n - 'mask': string\n Path to nifti mask file to define target volume in visual\n cortex\n - 'background': string\n Path to nifti file containing a background image usable as a\n background image for miyawaki images.\n\n References\n ----------\n `Visual image reconstruction from human brain activity\n using a combination of multiscale local image decoders\n <http://www.cell.com/neuron/abstract/S0896-6273%2808%2900958-6>`_,\n Miyawaki, Y., Uchida, H., Yamashita, O., Sato, M. A.,\n Morito, Y., Tanabe, H. C., ... & Kamitani, Y. (2008).\n Neuron, 60(5), 915-929.\n\n Notes\n -----\n This dataset is available on the `brainliner website\n <http://brainliner.jp/data/brainliner-admin/Reconstruct>`_\n\n See `additional information\n <http://www.cns.atr.jp/dni/en/downloads/\n fmri-data-set-for-visual-image-reconstruction/>`_\n \"\"\"\n\n url = 'https://www.nitrc.org/frs/download.php' \\\n '/8486/miyawaki2008.tgz?i_agree=1&download_now=1'\n opts = {'uncompress': True}\n\n # Dataset files\n\n # Functional MRI:\n # * 20 random scans (usually used for training)\n # * 12 figure scans (usually used for testing)\n\n func_figure = [(os.path.join('func', 'data_figure_run%02d.nii.gz' % i),\n url, opts) for i in range(1, 13)]\n\n func_random = [(os.path.join('func', 'data_random_run%02d.nii.gz' % i),\n url, opts) for i in range(1, 21)]\n\n # Labels, 10x10 patches, stimuli shown to the subject:\n # * 20 random labels\n # * 12 figure labels (letters and shapes)\n\n label_filename = 'data_%s_run%02d_label.csv'\n label_figure = [(os.path.join('label', label_filename % ('figure', i)),\n url, opts) for i in range(1, 13)]\n\n label_random = [(os.path.join('label', label_filename % ('random', i)),\n url, opts) for i in range(1, 21)]\n\n # Masks\n\n file_mask = [\n 'mask.nii.gz',\n 'LHlag0to1.nii.gz',\n 'LHlag10to11.nii.gz',\n 'LHlag1to2.nii.gz',\n 'LHlag2to3.nii.gz',\n 'LHlag3to4.nii.gz',\n 'LHlag4to5.nii.gz',\n 'LHlag5to6.nii.gz',\n 'LHlag6to7.nii.gz',\n 'LHlag7to8.nii.gz',\n 'LHlag8to9.nii.gz',\n 'LHlag9to10.nii.gz',\n 'LHV1d.nii.gz',\n 'LHV1v.nii.gz',\n 'LHV2d.nii.gz',\n 'LHV2v.nii.gz',\n 'LHV3A.nii.gz',\n 'LHV3.nii.gz',\n 'LHV4v.nii.gz',\n 'LHVP.nii.gz',\n 'RHlag0to1.nii.gz',\n 'RHlag10to11.nii.gz',\n 'RHlag1to2.nii.gz',\n 'RHlag2to3.nii.gz',\n 'RHlag3to4.nii.gz',\n 'RHlag4to5.nii.gz',\n 'RHlag5to6.nii.gz',\n 'RHlag6to7.nii.gz',\n 'RHlag7to8.nii.gz',\n 'RHlag8to9.nii.gz',\n 'RHlag9to10.nii.gz',\n 'RHV1d.nii.gz',\n 'RHV1v.nii.gz',\n 'RHV2d.nii.gz',\n 'RHV2v.nii.gz',\n 'RHV3A.nii.gz',\n 'RHV3.nii.gz',\n 'RHV4v.nii.gz',\n 'RHVP.nii.gz'\n ]\n\n file_mask = [(os.path.join('mask', m), url, opts) for m in file_mask]\n\n file_names = func_figure + func_random + \\\n label_figure + label_random + \\\n file_mask\n\n dataset_name = 'miyawaki2008'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n files = _fetch_files(data_dir, file_names, resume=resume, verbose=verbose)\n\n # Fetch the background image\n bg_img = _fetch_files(data_dir, [('bg.nii.gz', url, opts)], resume=resume,\n verbose=verbose)[0]\n\n fdescr = _get_dataset_descr(dataset_name)\n\n # Return the data\n return Bunch(\n func=files[:32],\n label=files[32:64],\n mask=files[64],\n mask_roi=files[65:],\n background=bg_img,\n description=fdescr)\n\n\ndef fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False,\n get_masks=False, get_anats=False,\n data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load Brainomics/Localizer dataset (94 subjects).\n\n \"The Functional Localizer is a simple and fast acquisition\n procedure based on a 5-minute functional magnetic resonance\n imaging (fMRI) sequence that can be run as easily and as\n systematically as an anatomical scan. This protocol captures the\n cerebral bases of auditory and visual perception, motor actions,\n reading, language comprehension and mental calculation at an\n individual level. Individual functional maps are reliable and\n quite precise. The procedure is decribed in more detail on the\n Functional Localizer page.\"\n (see http://brainomics.cea.fr/localizer/)\n\n You may cite Papadopoulos Orfanos, Dimitri, *et al.* when using this\n dataset [1].\n\n Scientific results obtained using this dataset are described in\n Pinel *et al.*, 2007 [2].\n\n Parameters\n ----------\n contrasts: list of str\n The contrasts to be fetched (for all 94 subjects available).\n Allowed values are::\n\n {\"checkerboard\",\n \"horizontal checkerboard\",\n \"vertical checkerboard\",\n \"horizontal vs vertical checkerboard\",\n \"vertical vs horizontal checkerboard\",\n \"sentence listening\",\n \"sentence reading\",\n \"sentence listening and reading\",\n \"sentence reading vs checkerboard\",\n \"calculation (auditory cue)\",\n \"calculation (visual cue)\",\n \"calculation (auditory and visual cue)\",\n \"calculation (auditory cue) vs sentence listening\",\n \"calculation (visual cue) vs sentence reading\",\n \"calculation vs sentences\",\n \"calculation (auditory cue) and sentence listening\",\n \"calculation (visual cue) and sentence reading\",\n \"calculation and sentence listening/reading\",\n \"calculation (auditory cue) and sentence listening vs \"\n \"calculation (visual cue) and sentence reading\",\n \"calculation (visual cue) and sentence reading vs checkerboard\",\n \"calculation and sentence listening/reading vs button press\",\n \"left button press (auditory cue)\",\n \"left button press (visual cue)\",\n \"left button press\",\n \"left vs right button press\",\n \"right button press (auditory cue)\",\n \"right button press (visual cue)\",\n \"right button press\",\n \"right vs left button press\",\n \"button press (auditory cue) vs sentence listening\",\n \"button press (visual cue) vs sentence reading\",\n \"button press vs calculation and sentence listening/reading\"}\n\n or equivalently on can use the original names::\n\n {\"checkerboard\",\n \"horizontal checkerboard\",\n \"vertical checkerboard\",\n \"horizontal vs vertical checkerboard\",\n \"vertical vs horizontal checkerboard\",\n \"auditory sentences\",\n \"visual sentences\",\n \"auditory&visual sentences\",\n \"visual sentences vs checkerboard\",\n \"auditory calculation\",\n \"visual calculation\",\n \"auditory&visual calculation\",\n \"auditory calculation vs auditory sentences\",\n \"visual calculation vs sentences\",\n \"auditory&visual calculation vs sentences\",\n \"auditory processing\",\n \"visual processing\",\n \"visual processing vs auditory processing\",\n \"auditory processing vs visual processing\",\n \"visual processing vs checkerboard\",\n \"cognitive processing vs motor\",\n \"left auditory click\",\n \"left visual click\",\n \"left auditory&visual click\",\n \"left auditory & visual click vs right auditory&visual click\",\n \"right auditory click\",\n \"right visual click\",\n \"right auditory&visual click\",\n \"right auditory & visual click vs left auditory&visual click\",\n \"auditory click vs auditory sentences\",\n \"visual click vs visual sentences\",\n \"auditory&visual motor vs cognitive processing\"}\n\n n_subjects: int or list, optional\n The number or list of subjects to load. If None is given,\n all 94 subjects are used.\n\n get_tmaps: boolean\n Whether t maps should be fetched or not.\n\n get_masks: boolean\n Whether individual masks should be fetched or not.\n\n get_anats: boolean\n Whether individual structural images should be fetched or not.\n\n data_dir: string, optional\n Path of the data directory. Used to force data storage in a specified\n location.\n\n url: string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n resume: bool\n Whether to resume download of a partly-downloaded file.\n\n verbose: int\n Verbosity level (0 means no message).\n\n Returns\n -------\n data: Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'cmaps': string list\n Paths to nifti contrast maps\n - 'tmaps' string list (if 'get_tmaps' set to True)\n Paths to nifti t maps\n - 'masks': string list\n Paths to nifti files corresponding to the subjects individual masks\n - 'anats': string\n Path to nifti files corresponding to the subjects structural images\n\n References\n ----------\n [1] Papadopoulos Orfanos, Dimitri, et al.\n \"The Brainomics/Localizer database.\"\n NeuroImage 144.B (2017): 309.\n\n [2] Pinel, Philippe, et al.\n \"Fast reproducible identification and large-scale databasing of\n individual functional cognitive networks.\"\n BMC Neuroscience 8.1 (2007): 91.\n\n See Also\n ---------\n nilearn.datasets.fetch_localizer_calculation_task\n nilearn.datasets.fetch_localizer_button_task\n\n \"\"\"\n if isinstance(contrasts, _basestring):\n raise ValueError('Contrasts should be a list of strings, but '\n 'a single string was given: \"%s\"' % contrasts)\n if n_subjects is None:\n n_subjects = 94 # 94 subjects available\n if (isinstance(n_subjects, numbers.Number) and\n ((n_subjects > 94) or (n_subjects < 1))):\n warnings.warn(\"Wrong value for \\'n_subjects\\' (%d). The maximum \"\n \"value will be used instead (\\'n_subjects=94\\')\")\n n_subjects = 94 # 94 subjects available\n\n # we allow the user to use alternatives to Brainomics contrast names\n contrast_name_wrapper = {\n # Checkerboard\n \"checkerboard\": \"checkerboard\",\n \"horizontal checkerboard\": \"horizontal checkerboard\",\n \"vertical checkerboard\": \"vertical checkerboard\",\n \"horizontal vs vertical checkerboard\":\n \"horizontal vs vertical checkerboard\",\n \"vertical vs horizontal checkerboard\":\n \"vertical vs horizontal checkerboard\",\n # Sentences\n \"sentence listening\": \"auditory sentences\",\n \"sentence reading\": \"visual sentences\",\n \"sentence listening and reading\": \"auditory&visual sentences\",\n \"sentence reading vs checkerboard\": \"visual sentences vs checkerboard\",\n # Calculation\n \"calculation (auditory cue)\": \"auditory calculation\",\n \"calculation (visual cue)\": \"visual calculation\",\n \"calculation (auditory and visual cue)\": \"auditory&visual calculation\",\n \"calculation (auditory cue) vs sentence listening\":\n \"auditory calculation vs auditory sentences\",\n \"calculation (visual cue) vs sentence reading\":\n \"visual calculation vs sentences\",\n \"calculation vs sentences\": \"auditory&visual calculation vs sentences\",\n # Calculation + Sentences\n \"calculation (auditory cue) and sentence listening\":\n \"auditory processing\",\n \"calculation (visual cue) and sentence reading\":\n \"visual processing\",\n \"calculation (visual cue) and sentence reading vs \"\n \"calculation (auditory cue) and sentence listening\":\n \"visual processing vs auditory processing\",\n \"calculation (auditory cue) and sentence listening vs \"\n \"calculation (visual cue) and sentence reading\":\n \"auditory processing vs visual processing\",\n \"calculation (visual cue) and sentence reading vs checkerboard\":\n \"visual processing vs checkerboard\",\n \"calculation and sentence listening/reading vs button press\":\n \"cognitive processing vs motor\",\n # Button press\n \"left button press (auditory cue)\": \"left auditory click\",\n \"left button press (visual cue)\": \"left visual click\",\n \"left button press\": \"left auditory&visual click\",\n \"left vs right button press\": \"left auditory & visual click vs \"\n + \"right auditory&visual click\",\n \"right button press (auditory cue)\": \"right auditory click\",\n \"right button press (visual cue)\": \"right visual click\",\n \"right button press\": \"right auditory & visual click\",\n \"right vs left button press\": \"right auditory & visual click \"\n + \"vs left auditory&visual click\",\n \"button press (auditory cue) vs sentence listening\":\n \"auditory click vs auditory sentences\",\n \"button press (visual cue) vs sentence reading\":\n \"visual click vs visual sentences\",\n \"button press vs calculation and sentence listening/reading\":\n \"auditory&visual motor vs cognitive processing\"}\n allowed_contrasts = list(contrast_name_wrapper.values())\n # convert contrast names\n contrasts_wrapped = []\n # get a unique ID for each contrast. It is used to give a unique name to\n # each download file and avoid name collisions.\n contrasts_indices = []\n for contrast in contrasts:\n if contrast in allowed_contrasts:\n contrasts_wrapped.append(contrast)\n contrasts_indices.append(allowed_contrasts.index(contrast))\n elif contrast in contrast_name_wrapper:\n name = contrast_name_wrapper[contrast]\n contrasts_wrapped.append(name)\n contrasts_indices.append(allowed_contrasts.index(name))\n else:\n raise ValueError(\"Contrast \\'%s\\' is not available\" % contrast)\n\n # It is better to perform several small requests than a big one because:\n # - Brainomics server has no cache (can lead to timeout while the archive\n # is generated on the remote server)\n # - Local (cached) version of the files can be checked for each contrast\n opts = {'uncompress': True}\n\n if isinstance(n_subjects, numbers.Number):\n subject_mask = np.arange(1, n_subjects + 1)\n subject_id_max = \"S%02d\" % n_subjects\n else:\n subject_mask = np.array(n_subjects)\n subject_id_max = \"S%02d\" % np.max(n_subjects)\n n_subjects = len(n_subjects)\n subject_ids = [\"S%02d\" % s for s in subject_mask]\n data_types = [\"c map\"]\n if get_tmaps:\n data_types.append(\"t map\")\n rql_types = str.join(\", \", [\"\\\"%s\\\"\" % x for x in data_types])\n root_url = \"http://brainomics.cea.fr/localizer/\"\n\n base_query = (\"Any X,XT,XL,XI,XF,XD WHERE X is Scan, X type XT, \"\n \"X concerns S, \"\n \"X label XL, X identifier XI, \"\n \"X format XF, X description XD, \"\n 'S identifier <= \"%s\", ' % (subject_id_max, ) +\n 'X type IN(%(types)s), X label \"%(label)s\"')\n\n urls = [\"%sbrainomics_data_%d.zip?rql=%s&vid=data-zip\"\n % (root_url, i,\n _urllib.parse.quote(base_query % {\"types\": rql_types,\n \"label\": c},\n safe=',()'))\n for c, i in zip(contrasts_wrapped, contrasts_indices)]\n filenames = []\n for subject_id in subject_ids:\n for data_type in data_types:\n for contrast_id, contrast in enumerate(contrasts_wrapped):\n name_aux = str.replace(\n str.join('_', [data_type, contrast]), ' ', '_')\n file_path = os.path.join(\n \"brainomics_data\", subject_id, \"%s.nii.gz\" % name_aux)\n file_tarball_url = urls[contrast_id]\n filenames.append((file_path, file_tarball_url, opts))\n # Fetch masks if asked by user\n if get_masks:\n urls.append(\"%sbrainomics_data_masks.zip?rql=%s&vid=data-zip\"\n % (root_url,\n _urllib.parse.quote(base_query % {\"types\": '\"boolean mask\"',\n \"label\": \"mask\"},\n safe=',()')))\n for subject_id in subject_ids:\n file_path = os.path.join(\n \"brainomics_data\", subject_id, \"boolean_mask_mask.nii.gz\")\n file_tarball_url = urls[-1]\n filenames.append((file_path, file_tarball_url, opts))\n # Fetch anats if asked by user\n if get_anats:\n urls.append(\"%sbrainomics_data_anats.zip?rql=%s&vid=data-zip\"\n % (root_url,\n _urllib.parse.quote(base_query % {\"types\": '\"normalized T1\"',\n \"label\": \"anatomy\"},\n safe=',()')))\n for subject_id in subject_ids:\n file_path = os.path.join(\n \"brainomics_data\", subject_id,\n \"normalized_T1_anat_defaced.nii.gz\")\n file_tarball_url = urls[-1]\n filenames.append((file_path, file_tarball_url, opts))\n # Fetch subject characteristics (separated in two files)\n if url is None:\n url_csv = (\"%sdataset/cubicwebexport.csv?rql=%s&vid=csvexport\"\n % (root_url, _urllib.parse.quote(\"Any X WHERE X is Subject\")))\n url_csv2 = (\"%sdataset/cubicwebexport2.csv?rql=%s&vid=csvexport\"\n % (root_url,\n _urllib.parse.quote(\"Any X,XI,XD WHERE X is QuestionnaireRun, \"\n \"X identifier XI, X datetime \"\n \"XD\", safe=',')\n ))\n else:\n url_csv = \"%s/cubicwebexport.csv\" % url\n url_csv2 = \"%s/cubicwebexport2.csv\" % url\n filenames += [(\"cubicwebexport.csv\", url_csv, {}),\n (\"cubicwebexport2.csv\", url_csv2, {})]\n\n # Actual data fetching\n dataset_name = 'brainomics_localizer'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n fdescr = _get_dataset_descr(dataset_name)\n files = _fetch_files(data_dir, filenames, verbose=verbose)\n anats = None\n masks = None\n tmaps = None\n # combine data from both covariates files into one single recarray\n from numpy.lib.recfunctions import join_by\n ext_vars_file2 = files[-1]\n csv_data2 = np.recfromcsv(ext_vars_file2, delimiter=';')\n files = files[:-1]\n ext_vars_file = files[-1]\n csv_data = np.recfromcsv(ext_vars_file, delimiter=';')\n files = files[:-1]\n # join_by sorts the output along the key\n csv_data = join_by('subject_id', csv_data, csv_data2,\n usemask=False, asrecarray=True)[subject_mask - 1]\n if get_anats:\n anats = files[-n_subjects:]\n files = files[:-n_subjects]\n if get_masks:\n masks = files[-n_subjects:]\n files = files[:-n_subjects]\n if get_tmaps:\n tmaps = files[1::2]\n files = files[::2]\n return Bunch(cmaps=files, tmaps=tmaps, masks=masks, anats=anats,\n ext_vars=csv_data, description=fdescr)\n\n\ndef fetch_localizer_calculation_task(n_subjects=1, data_dir=None, url=None,\n verbose=1):\n \"\"\"Fetch calculation task contrast maps from the localizer.\n\n Parameters\n ----------\n n_subjects: int, optional\n The number of subjects to load. If None is given,\n all 94 subjects are used.\n\n data_dir: string, optional\n Path of the data directory. Used to force data storage in a specified\n location.\n\n url: string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n verbose: int, optional\n verbosity level (0 means no message).\n\n Returns\n -------\n data: Bunch\n Dictionary-like object, the interest attributes are :\n 'cmaps': string list, giving paths to nifti contrast maps\n\n Notes\n ------\n\n This function is only a caller for the fetch_localizer_contrasts in order\n to simplify examples reading and understanding.\n The 'calculation (auditory and visual cue)' contrast is used.\n\n See Also\n ---------\n nilearn.datasets.fetch_localizer_button_task\n nilearn.datasets.fetch_localizer_contrasts\n\n \"\"\"\n data = fetch_localizer_contrasts([\"calculation (auditory and visual cue)\"],\n n_subjects=n_subjects,\n get_tmaps=False, get_masks=False,\n get_anats=False, data_dir=data_dir,\n url=url, resume=True, verbose=verbose)\n data.pop('tmaps')\n data.pop('masks')\n data.pop('anats')\n return data\n\n\ndef fetch_localizer_button_task(data_dir=None, url=None, verbose=1):\n \"\"\"Fetch left vs right button press contrast maps from the localizer.\n\n This function ships only 2nd subject (S02) specific tmap and\n its normalized T1 image.\n\n Parameters\n ----------\n data_dir: string, optional\n Path of the data directory. Used to force data storage in a specified\n location.\n\n url: string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n verbose: int, optional\n verbosity level (0 means no message).\n\n Returns\n -------\n data: Bunch\n Dictionary-like object, the interest attributes are :\n 'tmap': string, giving paths to nifti contrast maps\n 'anat': string, giving paths to normalized anatomical image\n\n Notes\n ------\n\n This function is only a caller for the fetch_localizer_contrasts in order\n to simplify examples reading and understanding.\n The 'left vs right button press' contrast is used.\n\n See Also\n ---------\n nilearn.datasets.fetch_localizer_calculation_task\n nilearn.datasets.fetch_localizer_contrasts\n\n \"\"\"\n # The URL can be retrieved from the nilearn account on OSF (Open\n # Science Framework). Uploaded files specific to S02 from\n # fetch_localizer_contrasts ['left vs right button press']\n if url is None:\n url = 'https://osf.io/dx9jn/download'\n\n tmap = \"t_map_left_auditory_&_visual_click_vs_right_auditory&visual_click.nii.gz\"\n anat = \"normalized_T1_anat_defaced.nii.gz\"\n\n opts = {'uncompress': True}\n\n options = ('tmap', 'anat')\n filenames = [(os.path.join('localizer_button_task', name), url, opts)\n for name in (tmap, anat)]\n\n dataset_name = 'brainomics'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n files = _fetch_files(data_dir, filenames, verbose=verbose)\n\n fdescr = _get_dataset_descr('brainomics_localizer')\n\n params = dict([('description', fdescr)] + list(zip(options, files)))\n return Bunch(**params)\n\n\ndef fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac',\n band_pass_filtering=False, global_signal_regression=False,\n derivatives=['func_preproc'],\n quality_checked=True, url=None, verbose=1, **kwargs):\n \"\"\" Fetch ABIDE dataset\n\n Fetch the Autism Brain Imaging Data Exchange (ABIDE) dataset wrt criteria\n that can be passed as parameter. Note that this is the preprocessed\n version of ABIDE provided by the preprocess connectome projects (PCP).\n\n Parameters\n ----------\n\n data_dir: string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n n_subjects: int, optional\n The number of subjects to load. If None is given,\n all available subjects are used (this number depends on the\n preprocessing pipeline used).\n\n pipeline: string, optional\n Possible pipelines are \"ccs\", \"cpac\", \"dparsf\" and \"niak\"\n\n band_pass_filtering: boolean, optional\n Due to controversies in the literature, band pass filtering is\n optional. If true, signal is band filtered between 0.01Hz and 0.1Hz.\n\n global_signal_regression: boolean optional\n Indicates if global signal regression should be applied on the\n signals.\n\n derivatives: string list, optional\n Types of downloaded files. Possible values are: alff, degree_binarize,\n degree_weighted, dual_regression, eigenvector_binarize,\n eigenvector_weighted, falff, func_mask, func_mean, func_preproc, lfcd,\n reho, rois_aal, rois_cc200, rois_cc400, rois_dosenbach160, rois_ez,\n rois_ho, rois_tt, and vmhc. Please refer to the PCP site for more\n details.\n\n quality_checked: boolean, optional\n if true (default), restrict the list of the subjects to the one that\n passed quality assessment for all raters.\n\n kwargs: parameter list, optional\n Any extra keyword argument will be used to filter downloaded subjects\n according to the CSV phenotypic file. Some examples of filters are\n indicated below.\n\n SUB_ID: list of integers in [50001, 50607], optional\n Ids of the subjects to be loaded.\n\n DX_GROUP: integer in {1, 2}, optional\n 1 is autism, 2 is control\n\n DSM_IV_TR: integer in [0, 4], optional\n O is control, 1 is autism, 2 is Asperger, 3 is PPD-NOS,\n 4 is Asperger or PPD-NOS\n\n AGE_AT_SCAN: float in [6.47, 64], optional\n Age of the subject\n\n SEX: integer in {1, 2}, optional\n 1 is male, 2 is female\n\n HANDEDNESS_CATEGORY: string in {'R', 'L', 'Mixed', 'Ambi'}, optional\n R = Right, L = Left, Ambi = Ambidextrous\n\n HANDEDNESS_SCORE: integer in [-100, 100], optional\n Positive = Right, Negative = Left, 0 = Ambidextrous\n\n Notes\n -----\n Code and description of preprocessing pipelines are provided on the\n `PCP website <http://preprocessed-connectomes-project.github.io/>`.\n\n References\n ----------\n Nielsen, Jared A., et al. \"Multisite functional connectivity MRI\n classification of autism: ABIDE results.\" Frontiers in human neuroscience\n 7 (2013).\n \"\"\"\n # People keep getting it wrong and submiting a string instead of a\n # list of strings. We'll make their life easy\n if isinstance(derivatives, _basestring):\n derivatives = [derivatives, ]\n\n # Parameter check\n for derivative in derivatives:\n if derivative not in [\n 'alff', 'degree_binarize', 'degree_weighted',\n 'dual_regression', 'eigenvector_binarize',\n 'eigenvector_weighted', 'falff', 'func_mask', 'func_mean',\n 'func_preproc', 'lfcd', 'reho', 'rois_aal', 'rois_cc200',\n 'rois_cc400', 'rois_dosenbach160', 'rois_ez', 'rois_ho',\n 'rois_tt', 'vmhc']:\n raise KeyError('%s is not a valid derivative' % derivative)\n\n strategy = ''\n if not band_pass_filtering:\n strategy += 'no'\n strategy += 'filt_'\n if not global_signal_regression:\n strategy += 'no'\n strategy += 'global'\n\n # General file: phenotypic information\n dataset_name = 'ABIDE_pcp'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n if url is None:\n url = ('https://s3.amazonaws.com/fcp-indi/data/Projects/'\n 'ABIDE_Initiative')\n\n if quality_checked:\n kwargs['qc_rater_1'] = b'OK'\n kwargs['qc_anat_rater_2'] = [b'OK', b'maybe']\n kwargs['qc_func_rater_2'] = [b'OK', b'maybe']\n kwargs['qc_anat_rater_3'] = b'OK'\n kwargs['qc_func_rater_3'] = b'OK'\n\n # Fetch the phenotypic file and load it\n csv = 'Phenotypic_V1_0b_preprocessed1.csv'\n path_csv = _fetch_files(data_dir, [(csv, url + '/' + csv, {})],\n verbose=verbose)[0]\n\n # Note: the phenotypic file contains string that contains comma which mess\n # up numpy array csv loading. This is why I do a pass to remove the last\n # field. This can be\n # done simply with pandas but we don't want such dependency ATM\n # pheno = pandas.read_csv(path_csv).to_records()\n with open(path_csv, 'r') as pheno_f:\n pheno = ['i' + pheno_f.readline()]\n\n # This regexp replaces commas between double quotes\n for line in pheno_f:\n pheno.append(re.sub(r',(?=[^\"]*\"(?:[^\"]*\"[^\"]*\")*[^\"]*$)', \";\", line))\n\n # bytes (encode()) needed for python 2/3 compat with numpy\n pheno = '\\n'.join(pheno).encode()\n pheno = BytesIO(pheno)\n pheno = np.recfromcsv(pheno, comments='$', case_sensitive=True)\n\n # First, filter subjects with no filename\n pheno = pheno[pheno['FILE_ID'] != b'no_filename']\n # Apply user defined filters\n user_filter = _filter_columns(pheno, kwargs)\n pheno = pheno[user_filter]\n\n # Go into specific data folder and url\n data_dir = os.path.join(data_dir, pipeline, strategy)\n url = '/'.join([url, 'Outputs', pipeline, strategy])\n\n # Get the files\n results = {}\n file_ids = [file_id.decode() for file_id in pheno['FILE_ID']]\n if n_subjects is not None:\n file_ids = file_ids[:n_subjects]\n pheno = pheno[:n_subjects]\n\n results['description'] = _get_dataset_descr(dataset_name)\n results['phenotypic'] = pheno\n for derivative in derivatives:\n ext = '.1D' if derivative.startswith('rois') else '.nii.gz'\n files = []\n for file_id in file_ids:\n file_ = [(\n file_id + '_' + derivative + ext,\n '/'.join([url, derivative, file_id + '_' + derivative + ext]),\n {}\n )]\n files.append(_fetch_files(data_dir, file_, verbose=verbose)[0])\n # Load derivatives if needed\n if ext == '.1D':\n files = [np.loadtxt(f) for f in files]\n results[derivative] = files\n return Bunch(**results)\n\n\ndef _load_mixed_gambles(zmap_imgs):\n \"\"\"Ravel zmaps (one per subject) along time axis, resulting,\n in a n_subjects * n_trials 3D niimgs and, and then make\n gain vector y of same length.\n \"\"\"\n X = []\n y = []\n mask = []\n for zmap_img in zmap_imgs:\n # load subject data\n this_X = zmap_img.get_data()\n affine = zmap_img.affine\n finite_mask = np.all(np.isfinite(this_X), axis=-1)\n this_mask = np.logical_and(np.all(this_X != 0, axis=-1),\n finite_mask)\n this_y = np.array([np.arange(1, 9)] * 6).ravel()\n\n # gain levels\n if len(this_y) != this_X.shape[-1]:\n raise RuntimeError(\"%s: Expecting %i volumes, got %i!\" % (\n zmap_img, len(this_y), this_X.shape[-1]))\n\n # standardize subject data\n this_X -= this_X.mean(axis=-1)[..., np.newaxis]\n std = this_X.std(axis=-1)\n std[std == 0] = 1\n this_X /= std[..., np.newaxis]\n\n # commit subject data\n X.append(this_X)\n y.extend(this_y)\n mask.append(this_mask)\n y = np.array(y)\n X = np.concatenate(X, axis=-1)\n mask = np.sum(mask, axis=0) > .5 * len(mask)\n mask = np.logical_and(mask, np.all(np.isfinite(X), axis=-1))\n X = X[mask, :].T\n tmp = np.zeros(list(mask.shape) + [len(X)])\n tmp[mask, :] = X.T\n mask_img = nibabel.Nifti1Image(mask.astype(np.int), affine)\n X = nibabel.four_to_three(nibabel.Nifti1Image(tmp, affine))\n return X, y, mask_img\n\n\ndef fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True,\n return_raw_data=False, verbose=0):\n \"\"\"Fetch Jimura \"mixed gambles\" dataset.\n\n Parameters\n ----------\n n_subjects: int, optional (default 1)\n The number of subjects to load. If None is given, all the\n subjects are used.\n\n data_dir: string, optional (default None)\n Path of the data directory. Used to force data storage in a specified\n location. Default: None.\n\n url: string, optional (default None)\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n resume: bool, optional (default True)\n If true, try resuming download if possible.\n\n verbose: int, optional (default 0)\n Defines the level of verbosity of the output.\n\n return_raw_data: bool, optional (default True)\n If false, then the data will transformed into and (X, y) pair, suitable\n for machine learning routines. X is a list of n_subjects * 48\n Nifti1Image objects (where 48 is the number of trials),\n and y is an array of shape (n_subjects * 48,).\n\n smooth: float, or list of 3 floats, optional (default 0.)\n Size of smoothing kernel to apply to the loaded zmaps.\n\n Returns\n -------\n data: Bunch\n Dictionary-like object, the interest attributes are :\n 'zmaps': string list\n Paths to realigned gain betamaps (one nifti per subject).\n 'gain': ..\n If make_Xy is true, this is a list of n_subjects * 48\n Nifti1Image objects, else it is None.\n 'y': array of shape (n_subjects * 48,) or None\n If make_Xy is true, then this is an array of shape\n (n_subjects * 48,), else it is None.\n\n References\n ----------\n [1] K. Jimura and R. Poldrack, \"Analyses of regional-average activation\n and multivoxel pattern information tell complementary stories\",\n Neuropsychologia, vol. 50, page 544, 2012\n \"\"\"\n if n_subjects > 16:\n warnings.warn('Warning: there are only 16 subjects!')\n n_subjects = 16\n if url is None:\n url = (\"https://www.nitrc.org/frs/download.php/7229/\"\n \"jimura_poldrack_2012_zmaps.zip\")\n opts = dict(uncompress=True)\n files = [(\"zmaps%ssub%03i_zmaps.nii.gz\" % (os.sep, (j + 1)), url, opts)\n for j in range(n_subjects)]\n data_dir = _get_dataset_dir('jimura_poldrack_2012_zmaps',\n data_dir=data_dir)\n zmap_fnames = _fetch_files(data_dir, files, resume=resume, verbose=verbose)\n subject_id = np.repeat(np.arange(n_subjects), 6 * 8)\n data = Bunch(zmaps=zmap_fnames,\n subject_id=subject_id)\n if not return_raw_data:\n X, y, mask_img = _load_mixed_gambles(check_niimg(data.zmaps,\n return_iterator=True))\n data.zmaps, data.gain, data.mask_img = X, y, mask_img\n return data\n\n\ndef fetch_megatrawls_netmats(dimensionality=100, timeseries='eigen_regression',\n matrices='partial_correlation', data_dir=None,\n resume=True, verbose=1):\n \"\"\"Downloads and returns Network Matrices data from MegaTrawls release in HCP.\n\n This data can be used to predict relationships between imaging data and\n non-imaging behavioural measures such as age, sex, education, etc.\n The network matrices are estimated from functional connectivity\n datasets of 461 subjects. Full technical details in [1] [2].\n\n .. versionadded:: 0.2.2\n\n Parameters\n ----------\n dimensionality: int, optional\n Valid inputs are 25, 50, 100, 200, 300. By default, network matrices\n estimated using Group ICA brain parcellations of 100 components/dimensions\n will be returned.\n\n timeseries: str, optional\n Valid inputs are 'multiple_spatial_regression' or 'eigen_regression'. By\n default 'eigen_regression', matrices estimated using first principal\n eigen component timeseries signals extracted from each subject data\n parcellations will be returned. Otherwise, 'multiple_spatial_regression'\n matrices estimated using spatial regressor based timeseries signals\n extracted from each subject data parcellations will be returned.\n\n matrices: str, optional\n Valid inputs are 'full_correlation' or 'partial_correlation'. By default,\n partial correlation matrices will be returned otherwise if selected\n full correlation matrices will be returned.\n\n data_dir: str, default is None, optional\n Path of the data directory. Used to force data storage in a specified\n location.\n\n resume: bool, default is True\n This parameter is required if a partially downloaded file is needed\n to be resumed to download again.\n\n verbose: int, default is 1\n This parameter is used to set the verbosity level to print the message\n to give information about the processing.\n 0 indicates no information will be given.\n\n Returns\n -------\n data: Bunch\n dictionary-like object, the attributes are :\n\n - 'dimensions': int, consists of given input in dimensions.\n\n - 'timeseries': str, consists of given input in timeseries method.\n\n - 'matrices': str, consists of given type of specific matrices.\n\n - 'correlation_matrices': ndarray, consists of correlation matrices\n based on given type of matrices. Array size will depend on given\n dimensions (n, n).\n - 'description': data description\n\n References\n ----------\n [1] Stephen Smith et al, HCP beta-release of the Functional Connectivity\n MegaTrawl.\n April 2015 \"HCP500-MegaTrawl\" release.\n https://db.humanconnectome.org/megatrawl/\n\n [2] Smith, S.M. et al. Nat. Neurosci. 18, 1565-1567 (2015).\n\n [3] N.Filippini, et al. Distinct patterns of brain activity in young\n carriers of the APOE-e4 allele.\n Proc Natl Acad Sci USA (PNAS), 106::7209-7214, 2009.\n\n [4] S.Smith, et al. Methods for network modelling from high quality rfMRI data.\n Meeting of the Organization for Human Brain Mapping. 2014\n\n [5] J.X. O'Reilly et al. Distinct and overlapping functional zones in the\n cerebellum defined by resting state functional connectivity.\n Cerebral Cortex, 2009.\n\n Note: See description for terms & conditions on data usage.\n\n \"\"\"\n url = \"http://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz\"\n opts = {'uncompress': True}\n\n error_message = \"Invalid {0} input is provided: {1}, choose one of them {2}\"\n # standard dataset terms\n dimensionalities = [25, 50, 100, 200, 300]\n if dimensionality not in dimensionalities:\n raise ValueError(error_message.format('dimensionality', dimensionality,\n dimensionalities))\n timeseries_methods = ['multiple_spatial_regression', 'eigen_regression']\n if timeseries not in timeseries_methods:\n raise ValueError(error_message.format('timeseries', timeseries,\n timeseries_methods))\n output_matrices_names = ['full_correlation', 'partial_correlation']\n if matrices not in output_matrices_names:\n raise ValueError(error_message.format('matrices', matrices,\n output_matrices_names))\n\n dataset_name = 'Megatrawls'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)\n description = _get_dataset_descr(dataset_name)\n\n timeseries_map = dict(multiple_spatial_regression='ts2', eigen_regression='ts3')\n matrices_map = dict(full_correlation='Znet1.txt', partial_correlation='Znet2.txt')\n filepath = [(os.path.join(\n '3T_Q1-Q6related468_MSMsulc_d%d_%s' % (dimensionality, timeseries_map[timeseries]),\n matrices_map[matrices]), url, opts)]\n\n # Fetch all the files\n files = _fetch_files(data_dir, filepath, resume=resume, verbose=verbose)\n\n # Load the files into arrays\n correlation_matrices = csv_to_array(files[0])\n\n return Bunch(\n dimensions=dimensionality,\n timeseries=timeseries,\n matrices=matrices,\n correlation_matrices=correlation_matrices,\n description=description)\n\n\ndef fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1):\n \"\"\"Fetch COBRE datasets preprocessed using NIAK 0.17 under CentOS\n version 6.3 with Octave version 4.0.2 and the Minc toolkit version 0.3.18.\n\n Downloads and returns COBRE preprocessed resting state fMRI datasets,\n covariates and phenotypic information such as demographic, clinical\n variables, measure of frame displacement FD (an average FD for all the time\n frames left after censoring).\n\n Each subject `fmri_XXXXXXX.nii.gz` is a 3D+t nifti volume (150 volumes).\n WARNING: no confounds were actually regressed from the data, so it can be\n done interactively by the user who will be able to explore different\n analytical paths easily.\n\n For each subject, there is `fmri_XXXXXXX.tsv` files which contains the\n covariates such as motion parameters, mean CSF signal that should to be\n regressed out of the functional data.\n\n `keys_confounds.json`: a json file, that describes each variable mentioned\n in the files `fmri_XXXXXXX.tsv.gz`. It also contains a list of time frames\n that have been removed from the time series by censoring for high motion.\n\n `phenotypic_data.tsv` contains the data of clinical variables that\n explained in `keys_phenotypic_data.json`\n\n .. versionadded:: 0.3\n\n Parameters\n ----------\n n_subjects: int, optional\n The number of subjects to load from maximum of 146 subjects.\n By default, 10 subjects will be loaded. If n_subjects=None,\n all subjects will be loaded.\n\n data_dir: str, optional\n Path to the data directory. Used to force data storage in a\n specified location. Default: None\n\n url: str, optional\n Override download url. Used for test only (or if you setup a\n mirror of the data). Default: None\n\n verbose: int, optional\n Verbosity level (0 means no message).\n\n Returns\n -------\n data: Bunch\n Dictionary-like object, the attributes are:\n\n - 'func': string list\n Paths to Nifti images.\n - 'confounds': string list\n Paths to .tsv files of each subject, confounds.\n - 'phenotypic': numpy.recarray\n Contains data of clinical variables, sex, age, FD.\n - 'description': data description of the release and references.\n - 'desc_con': str\n description of the confounds variables\n - 'desc_phenotypic': str\n description of the phenotypic variables.\n\n Notes\n -----\n See `more information about datasets structure\n <https://figshare.com/articles/COBRE_preprocessed_with_NIAK_0_17_-_lightweight_release/4197885>`_\n \"\"\"\n\n if url is None:\n # Here we use the file that provides URL for all others\n url = 'https://api.figshare.com/v2/articles/4197885'\n dataset_name = 'cobre'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n fdescr = _get_dataset_descr(dataset_name)\n\n # First, fetch the file that references all individual URLs\n files = _fetch_files(data_dir, [(\"4197885\", url, {})],\n verbose=verbose)[0]\n\n files = json.load(open(files, 'r'))\n files = files['files']\n # Index files by name\n files_ = {}\n for f in files:\n files_[f['name']] = f\n files = files_\n\n # Fetch the phenotypic file and load it\n csv_name_gz = 'phenotypic_data.tsv.gz'\n csv_name = os.path.splitext(csv_name_gz)[0]\n csv_file_phen = _fetch_files(\n data_dir, [(csv_name, files[csv_name_gz]['download_url'],\n {'md5': files[csv_name_gz].get('md5', None),\n 'move': csv_name_gz,\n 'uncompress': True})],\n verbose=verbose)[0]\n\n # Load file in filename to numpy arrays\n names = ['ID', 'Current Age', 'Gender', 'Handedness', 'Subject Type',\n 'Diagnosis', 'Frames OK', 'FD', 'FD Scrubbed']\n\n csv_array_phen = np.recfromcsv(csv_file_phen, names=names,\n skip_header=True, delimiter='\\t')\n\n # Check number of subjects\n max_subjects = len(csv_array_phen)\n if n_subjects is None:\n n_subjects = max_subjects\n\n if n_subjects > max_subjects:\n warnings.warn('Warning: there are only %d subjects' % max_subjects)\n n_subjects = max_subjects\n\n sz_count = list(csv_array_phen['subject_type']).count(b'Patient')\n ct_count = list(csv_array_phen['subject_type']).count(b'Control')\n\n n_sz = np.round(float(n_subjects) / max_subjects * sz_count).astype(int)\n n_ct = np.round(float(n_subjects) / max_subjects * ct_count).astype(int)\n\n # First, restrict the csv files to the adequate number of subjects\n sz_ids = csv_array_phen[csv_array_phen['subject_type'] ==\n b'Patient']['id'][:n_sz]\n ct_ids = csv_array_phen[csv_array_phen['subject_type'] ==\n b'Control']['id'][:n_ct]\n ids = np.hstack([sz_ids, ct_ids])\n csv_array_phen = csv_array_phen[np.in1d(csv_array_phen['id'], ids)]\n\n # Call fetch_files once per subject.\n\n func = []\n con = []\n for i in ids:\n f = 'fmri_00' + str(i) + '.nii.gz'\n c_gz = 'fmri_00' + str(i) + '.tsv.gz'\n c = os.path.splitext(c_gz)[0]\n\n f, c = _fetch_files(\n data_dir,\n [(f, files[f]['download_url'], {'md5': files[f].get('md5', None),\n 'move': f}),\n (c, files[c_gz]['download_url'],\n {'md5': files[c_gz].get('md5', None),\n 'move': c_gz, 'uncompress': True})\n ],\n verbose=verbose)\n func.append(f)\n con.append(c)\n\n # Fetch the the complementary files\n keys_con = \"keys_confounds.json\"\n keys_phen = \"keys_phenotypic_data.json\"\n\n csv_keys_con, csv_keys_phen = _fetch_files(\n data_dir,\n [(keys_con, files[keys_con]['download_url'],\n {'md5': files[keys_con].get('md5', None), 'move': keys_con}),\n (keys_phen, files[keys_phen]['download_url'],\n {'md5': files[keys_phen].get('md5', None), 'move': keys_phen})\n ],\n verbose=verbose)\n\n files_keys_con = open(csv_keys_con, 'r').read()\n files_keys_phen = open(csv_keys_phen, 'r').read()\n\n return Bunch(func=func, confounds=con, phenotypic=csv_array_phen,\n description=fdescr, desc_con=files_keys_con,\n desc_phenotypic=files_keys_phen)\n\n\ndef fetch_surf_nki_enhanced(n_subjects=10, data_dir=None,\n url=None, resume=True, verbose=1):\n \"\"\"Download and load the NKI enhanced resting-state dataset,\n preprocessed and projected to the fsaverage5 space surface.\n\n .. versionadded:: 0.3\n\n Parameters\n ----------\n n_subjects: int, optional\n The number of subjects to load from maximum of 102 subjects.\n By default, 10 subjects will be loaded. If None is given,\n all 102 subjects will be loaded.\n\n data_dir: str, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n url: str, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data). Default: None\n\n resume: bool, optional (default True)\n If True, try resuming download if possible.\n\n verbose: int, optional (default 1)\n Defines the level of verbosity of the output.\n\n Returns\n -------\n data: sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n - 'func_left': Paths to Gifti files containing resting state\n time series left hemisphere\n - 'func_right': Paths to Gifti files containing resting state\n time series right hemisphere\n - 'phenotypic': array containing tuple with subject ID, age,\n dominant hand and sex for each subject.\n - 'description': data description of the release and references.\n\n References\n ----------\n :Download: http://fcon_1000.projects.nitrc.org/indi/enhanced/\n\n Nooner et al, (2012). The NKI-Rockland Sample: A model for accelerating the\n pace of discovery science in psychiatry. Frontiers in neuroscience 6, 152.\n URL http://dx.doi.org/10.3389/fnins.2012.00152\n\n \"\"\"\n\n if url is None:\n url = 'https://www.nitrc.org/frs/download.php/'\n\n # Preliminary checks and declarations\n dataset_name = 'nki_enhanced_surface'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n ids = ['A00028185', 'A00033747', 'A00035072', 'A00035827', 'A00035840',\n 'A00037112', 'A00037511', 'A00038998', 'A00039391', 'A00039431',\n 'A00039488', 'A00040524', 'A00040623', 'A00040944', 'A00043299',\n 'A00043520', 'A00043677', 'A00043722', 'A00045589', 'A00050998',\n 'A00051063', 'A00051064', 'A00051456', 'A00051457', 'A00051477',\n 'A00051513', 'A00051514', 'A00051517', 'A00051528', 'A00051529',\n 'A00051539', 'A00051604', 'A00051638', 'A00051658', 'A00051676',\n 'A00051678', 'A00051679', 'A00051726', 'A00051774', 'A00051796',\n 'A00051835', 'A00051882', 'A00051925', 'A00051927', 'A00052070',\n 'A00052117', 'A00052118', 'A00052126', 'A00052180', 'A00052197',\n 'A00052214', 'A00052234', 'A00052307', 'A00052319', 'A00052499',\n 'A00052502', 'A00052577', 'A00052612', 'A00052639', 'A00053202',\n 'A00053369', 'A00053456', 'A00053474', 'A00053546', 'A00053576',\n 'A00053577', 'A00053578', 'A00053625', 'A00053626', 'A00053627',\n 'A00053874', 'A00053901', 'A00053927', 'A00053949', 'A00054038',\n 'A00054153', 'A00054173', 'A00054358', 'A00054482', 'A00054532',\n 'A00054533', 'A00054534', 'A00054621', 'A00054895', 'A00054897',\n 'A00054913', 'A00054929', 'A00055061', 'A00055215', 'A00055352',\n 'A00055353', 'A00055542', 'A00055738', 'A00055763', 'A00055806',\n 'A00056097', 'A00056098', 'A00056164', 'A00056372', 'A00056452',\n 'A00056489', 'A00056949']\n\n nitrc_ids = range(8260, 8470)\n max_subjects = len(ids)\n if n_subjects is None:\n n_subjects = max_subjects\n if n_subjects > max_subjects:\n warnings.warn('Warning: there are only %d subjects' % max_subjects)\n n_subjects = max_subjects\n ids = ids[:n_subjects]\n nitrc_ids = nitrc_ids[:n_subjects]\n\n # Dataset description\n fdescr = _get_dataset_descr(dataset_name)\n\n # First, get the metadata\n phenotypic_file = 'NKI_enhanced_surface_phenotypics.csv'\n phenotypic = (phenotypic_file, url + '8470/pheno_nki_nilearn.csv',\n {'move': phenotypic_file})\n\n phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume,\n verbose=verbose)[0]\n\n # Load the csv file\n phenotypic = np.genfromtxt(phenotypic, skip_header=True,\n names=['Subject', 'Age',\n 'Dominant Hand', 'Sex'],\n delimiter=',', dtype=['U9', '<f8',\n 'U1', 'U1'])\n\n # Keep phenotypic information for selected subjects\n int_ids = np.asarray(ids)\n phenotypic = phenotypic[[np.where(phenotypic['Subject'] == i)[0][0]\n for i in int_ids]]\n\n # Download subjects' datasets\n func_right = []\n func_left = []\n for i in range(len(ids)):\n\n archive = url + '%i/%s_%s_preprocessed_fsaverage5_fwhm6.gii'\n func = os.path.join('%s', '%s_%s_preprocessed_fwhm6.gii')\n rh = _fetch_files(data_dir,\n [(func % (ids[i], ids[i], 'right'),\n archive % (nitrc_ids[i], ids[i], 'rh'),\n {'move': func % (ids[i], ids[i], 'right')}\n )],\n resume=resume, verbose=verbose)\n lh = _fetch_files(data_dir,\n [(func % (ids[i], ids[i], 'left'),\n archive % (nitrc_ids[i], ids[i], 'lh'),\n {'move': func % (ids[i], ids[i], 'left')}\n )],\n resume=resume, verbose=verbose)\n\n func_right.append(rh[0])\n func_left.append(lh[0])\n\n return Bunch(func_left=func_left, func_right=func_right,\n phenotypic=phenotypic,\n description=fdescr)\n\n\ndef _fetch_development_fmri_participants(data_dir, url, verbose):\n \"\"\"Helper function to fetch_development_fmri.\n\n This function helps in downloading and loading participants data from .tsv\n uploaded on Open Science Framework (OSF).\n\n The original .tsv file contains many columns but this function picks only\n those columns that are relevant.\n\n Parameters\n ----------\n data_dir: str\n Path of the data directory. Used to force data storage in a specified\n location. If None is given, data are stored in home directory.\n\n url: str, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data). Default: None\n\n verbose: int\n Defines the level of verbosity of the output.\n\n Returns\n -------\n participants : numpy.ndarray\n Contains data of each subject age, age group, child or adult,\n gender, handedness.\n\n \"\"\"\n dataset_name = 'development_fmri'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n\n if url is None:\n url = 'https://osf.io/yr3av/download'\n\n files = [('participants.tsv', url, {'move': 'participants.tsv'})]\n path_to_participants = _fetch_files(data_dir, files, verbose=verbose)[0]\n\n # Load path to participants\n dtype = [('participant_id', 'U12'), ('Age', '<f8'), ('AgeGroup', 'U6'),\n ('Child_Adult', 'U5'), ('Gender', 'U4'), ('Handedness', 'U4')]\n names = ['participant_id', 'Age', 'AgeGroup', 'Child_Adult', 'Gender',\n 'Handedness']\n participants = csv_to_array(path_to_participants, skip_header=True,\n dtype=dtype, names=names)\n return participants\n\n\ndef _fetch_development_fmri_functional(participants, data_dir, url, verbose):\n \"\"\"Helper function to fetch_development_fmri.\n\n This function helps in downloading functional MRI data in Nifti\n and its confound corresponding to each subject.\n\n The files are downloaded from Open Science Framework (OSF).\n\n Parameters\n ----------\n participants : numpy.ndarray\n Should contain column participant_id which represents subjects id. The\n number of files are fetched based on ids in this column.\n\n data_dir: str\n Path of the data directory. Used to force data storage in a specified\n location. If None is given, data are stored in home directory.\n\n url: str, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data). Default: None\n\n verbose: int\n Defines the level of verbosity of the output.\n\n Returns\n -------\n func: list of str (Nifti files)\n Paths to functional MRI data (4D) for each subject.\n\n regressors: list of str (tsv files)\n Paths to regressors related to each subject.\n \"\"\"\n dataset_name = 'development_fmri'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n\n if url is None:\n url = 'https://osf.io/download/{}'\n\n confounds = '{}_task-pixar_desc-confounds_regressors.tsv'\n func = '{0}_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'\n\n # The gzip contains unique download keys per Nifti file and confound\n # pre-extracted from OSF. Required for downloading files.\n package_directory = os.path.dirname(os.path.abspath(__file__))\n dtype = [('participant_id', 'U12'), ('key_regressor', 'U24'),\n ('key_bold', 'U24')]\n names = ['participant_id', 'key_r', 'key_b']\n # csv file contains download information related to OpenScience(osf)\n osf_data = csv_to_array(os.path.join(package_directory, \"data\",\n \"development_fmri.csv\"),\n skip_header=True, dtype=dtype, names=names)\n\n funcs = []\n regressors = []\n\n for participant_id in participants['participant_id']:\n this_osf_id = osf_data[osf_data['participant_id'] == participant_id]\n # Download regressors\n confound_url = url.format(this_osf_id['key_r'][0])\n regressor_file = [(confounds.format(participant_id),\n confound_url,\n {'move': confounds.format(participant_id)})]\n path_to_regressor = _fetch_files(data_dir, regressor_file,\n verbose=verbose)[0]\n regressors.append(path_to_regressor)\n # Download bold images\n func_url = url.format(this_osf_id['key_b'][0])\n func_file = [(func.format(participant_id, participant_id), func_url,\n {'move': func.format(participant_id)})]\n path_to_func = _fetch_files(data_dir, func_file, verbose=verbose)[0]\n funcs.append(path_to_func)\n return funcs, regressors\n\n\ndef fetch_development_fmri(n_subjects=None, data_dir=None, resume=True,\n verbose=0):\n \"\"\"Fetch movie watching based brain development dataset (fMRI)\n\n The data is downsampled to 4mm resolution for convenience. The origin of\n the data is coming from OpenNeuro. See Notes below.\n\n .. versionadded:: 0.5.2\n\n Parameters\n ----------\n n_subjects: int, optional (default None)\n The number of subjects to load. If None, all the subjects are\n loaded. Total 155 subjects.\n\n data_dir: str, optional (default None)\n Path of the data directory. Used to force data storage in a specified\n location. If None, data are stored in home directory.\n\n resume: bool, optional (default True)\n Whether to resume download of a partly-downloaded file.\n\n verbose: int, optional (default 0)\n Defines the level of verbosity of the output.\n\n Returns\n -------\n data: Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'func': list of str (Nifti files)\n Paths to downsampled functional MRI data (4D) for each subject.\n\n - 'confounds': list of str (tsv files)\n Paths to confounds related to each subject.\n\n - 'phenotypic': numpy.ndarray\n Contains each subject age, age group, child or adult, gender,\n handedness.\n\n Notes\n -----\n The original data is downloaded from OpenNeuro\n https://openneuro.org/datasets/ds000228/versions/1.0.0\n\n This fetcher downloads downsampled data that are available on Open\n Science Framework (OSF). Located here: https://osf.io/5hju4/files/\n\n Preprocessing details: https://osf.io/wjtyq/\n\n References\n ----------\n Please cite this paper if you are using this dataset.\n Richardson, H., Lisandrelli, G., Riobueno-Naylor, A., & Saxe, R. (2018).\n Development of the social brain from age three to twelve years.\n Nature communications, 9(1), 1027.\n https://www.nature.com/articles/s41467-018-03399-2\n \"\"\"\n\n dataset_name = 'development_fmri'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=1)\n\n # Dataset description\n fdescr = _get_dataset_descr(dataset_name)\n\n # Participants data: ids, demographics, etc\n participants = _fetch_development_fmri_participants(data_dir=data_dir,\n url=None,\n verbose=verbose)\n\n max_subjects = len(participants)\n if n_subjects is None:\n n_subjects = max_subjects\n\n if (isinstance(n_subjects, numbers.Number) and\n ((n_subjects > max_subjects) or (n_subjects < 1))):\n warnings.warn(\"Wrong value for n_subjects={0}. The maximum \"\n \"value will be used instead n_subjects={1}\"\n .format(n_subjects, max_subjects))\n n_subjects = max_subjects\n\n # Download functional and regressors based on participants\n child_count = participants['Child_Adult'].tolist().count('child')\n adult_count = participants['Child_Adult'].tolist().count('adult')\n\n # To keep the proportion of children versus adults\n n_child = np.round(float(n_subjects) / max_subjects * child_count).astype(int)\n n_adult = np.round(float(n_subjects) / max_subjects * adult_count).astype(int)\n\n # First, restrict the csv files to the adequate number of subjects\n child_ids = participants[participants['Child_Adult'] ==\n 'child']['participant_id'][:n_child]\n adult_ids = participants[participants['Child_Adult'] ==\n 'adult']['participant_id'][:n_adult]\n ids = np.hstack([child_ids, adult_ids])\n participants = participants[np.in1d(participants['participant_id'],\n ids)]\n\n funcs, regressors = _fetch_development_fmri_functional(participants,\n data_dir=data_dir,\n url=None,\n verbose=verbose)\n\n return Bunch(func=funcs, confounds=regressors, phenotypic=participants,\n description=fdescr)\n"
] | [
[
"numpy.concatenate",
"numpy.max",
"numpy.array",
"numpy.asarray",
"numpy.sum",
"numpy.genfromtxt",
"sklearn.datasets.base.Bunch",
"numpy.lib.recfunctions.join_by",
"sklearn.utils.deprecated",
"numpy.any",
"numpy.loadtxt",
"numpy.arange",
"numpy.where",
"numpy.isfinite",
"numpy.all",
"numpy.hstack",
"numpy.recfromcsv",
"numpy.in1d"
]
] |
ceshine/modern_chinese_nlp | [
"e1d5941f381431ac114f440472d3e0f976437777"
] | [
"legacy/cnlp/fastai_extended.py"
] | [
"import random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset\nimport numpy as np\n\nimport fastai.text\nfrom fastai.core import BasicModel, to_gpu\nfrom fastai.nlp import RNN_Learner\nfrom fastai.lm_rnn import SequentialRNN\nfrom fastai.dataloader import DataLoader\n\nfrom .transformer_decoder import TransformerEncoder, LayerNorm\n\n\nclass TransformerLearner(RNN_Learner):\n def fit(self, *args, **kwargs):\n return super().fit(*args, **kwargs, seq_first=False)\n\n\nclass LanguageModelLoader:\n \"\"\" Returns a language model iterator that iterates through batches that are of length N(bptt,5)\n The first batch returned is always bptt+25; the max possible width. This is done because of they way that pytorch\n allocates cuda memory in order to prevent multiple buffers from being created as the batch width grows.\n \"\"\"\n\n MAX_PLUS = 25\n\n def __init__(self,\n nums: np.array,\n bs: int,\n bptt: int,\n target_length: int,\n backwards: bool = False,\n batch_first: bool = False,\n randomize_bptt: bool = False):\n self.bs, self.bptt, self.backwards = bs, bptt, backwards\n self.batch_first = batch_first\n self.data = self.batchify(nums)\n self.i, self.iter = 0, 0\n self.n = self.data.size(1) if self.batch_first else self.data.size(0)\n self.randomize_bptt = randomize_bptt\n self.target_length = target_length\n\n @property\n def max_possible_seq_len(self) -> int:\n if self.randomize_bptt is False:\n return self.bptt\n return self.bptt + self.MAX_PLUS\n\n def __iter__(self):\n self.i, self.iter = 0, 0\n while self.i < self.n - 1 and self.iter < len(self):\n if self.randomize_bptt:\n if self.i == 0:\n seq_len = self.bptt + 5 * 5\n else:\n bptt = self.bptt if np.random.random(\n ) < 0.95 else self.bptt / 2.\n seq_len = max(\n 5,\n min(\n int(np.random.normal(bptt, 5)),\n self.max_possible_seq_len))\n else:\n seq_len = self.bptt\n if self.i + seq_len >= self.n:\n # ditch residuals\n break\n res = self.get_batch(self.i, seq_len)\n self.i += seq_len\n self.iter += 1\n yield res\n\n def __len__(self):\n return self.n // self.bptt - 1\n\n def batchify(self, data):\n nb = data.shape[0] // self.bs\n data = np.array(data[:nb * self.bs])\n data = data.reshape(self.bs, -1)\n if self.backwards:\n data = data[:, ::-1]\n if not self.batch_first:\n data = data.T\n return torch.from_numpy(data.astype(\"int64\"))\n\n def get_batch(self, i, seq_len):\n source = self.data\n target_offset = max(0, seq_len - self.target_length)\n if self.batch_first:\n return (source[:, i:(i + seq_len)].contiguous(),\n source[:, (i + 1 + target_offset):(\n i + 1 + seq_len)].contiguous().view(-1))\n else:\n return (source[i:(i + seq_len)].contiguous(),\n source[(i + 1 + target_offset):(\n i + 1 + seq_len)].contiguous().view(-1))\n\n\nclass ShuffledLanguageModelLoader(LanguageModelLoader):\n \"\"\"An alternative algorithm to LanguageModelLoader\n\n Useful for models that do not pass information between batches.\n \"\"\"\n\n def __init__(self,\n nums: np.array,\n bs: int,\n bptt: int,\n target_length: int,\n batch_first: bool = False,\n randomize_bptt: bool = False):\n # We intentional don't invoke super class's initializer\n # as we only want to reuse batchify and get_batch method\n super().__init__(nums, bs, bptt, target_length, False, batch_first,\n randomize_bptt)\n\n def __iter__(self):\n for step in range(len(self)):\n i = random.randint(0, self.n - self.max_possible_seq_len - 1)\n if self.randomize_bptt:\n if step == 0:\n seq_len = self.bptt + 5 * 5\n else:\n bptt = self.bptt if np.random.random(\n ) < 0.95 else self.bptt / 2.\n seq_len = max(\n 5,\n min(\n int(np.random.normal(bptt, 5)),\n self.max_possible_seq_len))\n else:\n seq_len = self.bptt\n res = self.get_batch(i, seq_len)\n yield res\n\n\nclass TextDataset(Dataset):\n def __init__(self,\n x,\n y,\n backwards=False,\n sos=None,\n eos=None,\n max_seq_len=-1,\n cut_tail=True):\n self.x, self.y, self.backwards, self.sos, self.eos = x, y, backwards, sos, eos\n self.max_seq_len = max_seq_len\n self.cut_tail = cut_tail\n\n def __getitem__(self, idx):\n x = self.x[idx]\n if self.max_seq_len > 0:\n if self.cut_tail:\n x = x[:self.max_seq_len]\n else:\n x = x[-self.max_seq_len:]\n if self.backwards: x = list(reversed(x))\n if self.eos is not None: x = x + [self.eos]\n if self.sos is not None: x = [self.sos] + x\n return np.array(x), self.y[idx]\n\n def __len__(self):\n return len(self.x)\n\n\nclass FixedLengthDataLoader(DataLoader):\n def __init__(self,\n dataset,\n seq_length,\n batch_size=1,\n shuffle=False,\n sampler=None,\n batch_sampler=None,\n pad_idx=0,\n num_workers=None,\n pin_memory=False,\n drop_last=False,\n pre_pad=True,\n half=False,\n transpose=False,\n transpose_y=False):\n super().__init__(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n sampler=sampler,\n batch_sampler=batch_sampler,\n pad_idx=pad_idx,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=drop_last,\n pre_pad=pre_pad,\n half=half,\n transpose=transpose,\n transpose_y=transpose_y)\n self.seq_length = seq_length\n\n def jag_stack(self, b):\n if len(b[0].shape) not in (1, 2): return np.stack(b)\n ml = self.seq_length\n if min(len(o) for o in b) == ml: return np.stack(b)\n res = np.zeros((len(b), ml), dtype=b[0].dtype) + self.pad_idx\n for i, o in enumerate(b):\n if self.pre_pad: res[i, -len(o):] = o\n else: res[i, :len(o)] = o\n return res\n\n\nclass TransformerLanguageModel(BasicModel):\n def get_layer_groups(self):\n enc = self.model[0]\n dec = self.model[1]\n return [enc.embed, *enc.blocks, dec]\n\n\nclass LanguageModelData(fastai.text.LanguageModelData):\n def get_transformer_model(self, opt_fn, emb_sz, max_seq_len, **kwargs):\n m = get_transformer_language_model(\n self.n_tok,\n max_seq_len,\n self.trn_dl.target_length,\n emb_sz,\n pad_token=self.pad_idx,\n **kwargs)\n model = TransformerLanguageModel(to_gpu(m))\n return TransformerLearner(self, model, opt_fn=opt_fn)\n\n\nclass FlattenPredictions(nn.Module):\n def __init__(self, target_len: int):\n super().__init__()\n self.target_len = target_len\n\n def forward(self, x):\n return x[:, -self.target_len:, :].contiguous().view(-1, x.size(2))\n\n\ndef get_transformer_language_model(n_tok: int,\n max_seq_len: int,\n target_length: int,\n emb_sz: int,\n n_head: int,\n n_layer: int,\n pad_token: int,\n embd_pdrop: float = 0.1,\n attn_pdrop: float = 0.1,\n resid_pdrop: float = 0.1,\n afn: str = 'gelu'):\n enc = TransformerEncoder(\n vocab=n_tok,\n n_ctx=max_seq_len,\n n_embd=emb_sz,\n n_head=n_head,\n n_layer=n_layer,\n pad_token=pad_token,\n embd_pdrop=embd_pdrop,\n attn_pdrop=attn_pdrop,\n resid_pdrop=resid_pdrop,\n afn=afn)\n decoder = nn.Linear(emb_sz, n_tok, bias=False)\n decoder.weight = nn.Parameter(\n enc.embed.weight[:-max_seq_len]) # Tied weights\n return SequentialRNN(enc, decoder, FlattenPredictions(target_length))\n\n\nclass LinearBlock(nn.Module):\n def __init__(self, ni, nf, drop, norm=True):\n super().__init__()\n self.lin = nn.Linear(ni, nf)\n self.drop = nn.Dropout(drop)\n self.norm = norm\n if norm:\n self.ln = nn.LayerNorm(ni)\n # self.ln = nn.BatchNorm1d(ni)\n nn.init.kaiming_normal_(self.lin.weight)\n nn.init.constant_(self.lin.bias, 0)\n\n def forward(self, x):\n if self.norm:\n # return self.ln(self.lin(self.drop(x)))\n return self.lin(self.drop(self.ln(x)))\n else:\n return self.lin(self.drop(x))\n\n\nclass PoolingLinearClassifier(nn.Module):\n def __init__(self, layers, drops, batch_first=False):\n super().__init__()\n self.batch_first = batch_first\n self.layers = nn.ModuleList([\n LinearBlock(\n layers[i],\n layers[i + 1],\n drops[i],\n norm=(i != len(layers) - 2)) for i in range(len(layers) - 1)\n ])\n\n def pool(self, x, bs, is_max):\n f = F.adaptive_max_pool1d if is_max else F.adaptive_avg_pool1d\n if self.batch_first:\n return f(x.permute(0, 2, 1), (1, )).squeeze(-1)\n return f(x.permute(1, 2, 0), (1, )).view(bs, -1)\n\n def forward(self, output):\n if self.batch_first:\n sl, bs, _ = output.size()\n else:\n bs, sl, _ = output.size()\n avgpool = self.pool(output, bs, False)\n mxpool = self.pool(output, bs, True)\n if self.batch_first:\n x = torch.cat([output[:, -1, :], mxpool, avgpool], 1)\n else:\n x = torch.cat([output[-1], mxpool, avgpool], 1)\n for l in self.layers:\n l_x = l(x)\n x = F.relu(l_x)\n return l_x\n\n\nclass MLP(nn.Module):\n def __init__(self, layers, drops, batch_first=False):\n super().__init__()\n self.batch_first = batch_first\n self.layers = nn.ModuleList([\n LinearBlock(layers[i], layers[i + 1], drops[i], norm=True)\n # norm=(i != len(layers) - 2))\n for i in range(len(layers) - 1)\n ])\n\n def forward(self, output):\n if self.batch_first:\n x = output[:, -1, :]\n else:\n x = output[-1]\n for l in self.layers:\n l_x = l(x)\n x = F.relu(l_x)\n return l_x\n\n\n# class TruncateSequence(nn.Module):\n# def __init__(self, max_seq_len: int):\n# super().__init__()\n# self.max_seq_len = max_seq_len\n\n# def forward(self, x):\n# # Use the end of the sequences\n# return x[:, -self.max_seq_len:]\n\n# class TruncatedTransformerLearner(RNN_Learner):\n# def save_encoder(self, name):\n# save_model(self.model[0], self.get_model_path(name))\n\n# def load_encoder(self, name):\n# load_model(self.model[0], self.get_model_path(name))\n\n\ndef get_transformer_classifier(n_tok: int,\n n_ctx: int,\n emb_sz: int,\n n_head: int,\n n_layer: int,\n clf_layers: int,\n pad_token: int,\n embd_pdrop: float = 0.1,\n attn_pdrop: float = 0.1,\n resid_pdrop: float = 0.1,\n clf_pdrop: float = 0.1,\n afn: str = 'gelu'):\n enc = TransformerEncoder(\n vocab=n_tok,\n n_ctx=n_ctx,\n n_embd=emb_sz,\n n_head=n_head,\n n_layer=n_layer,\n pad_token=pad_token,\n embd_pdrop=embd_pdrop,\n attn_pdrop=attn_pdrop,\n resid_pdrop=resid_pdrop,\n afn=afn)\n classifier = MLP(clf_layers, clf_pdrop, batch_first=True)\n return SequentialRNN(enc, classifier)\n\n\nclass TransformerTextModel(BasicModel):\n def get_layer_groups(self):\n enc = self.model[0]\n clf = self.model[1]\n return [enc.embed, *enc.blocks, clf]\n"
] | [
[
"torch.nn.Linear",
"numpy.random.normal",
"numpy.array",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.nn.Parameter",
"numpy.stack",
"torch.nn.functional.relu",
"numpy.random.random"
]
] |
MengHao666/Hand-BMC-pytorch | [
"d9d1a5394ebc84079e9fc25885122c18a9ec1505"
] | [
"weakloss.py"
] | [
"import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn.functional as torch_f\n\nimport config as cfg\n\n\ndef plot_hull(theta, hull):\n del_rdp_hull = hull.detach().cpu().numpy()\n theta = theta.detach().cpu().numpy()\n\n fig = plt.figure()\n figManager = plt.get_current_fig_manager()\n figManager.window.showMaximized()\n\n ax = fig.add_subplot(111)\n ax.scatter(theta[:, 0], theta[:, 1], s=10, c='r')\n ax.set_xlabel(\"flexion\")\n ax.set_ylabel(\"abduction\")\n\n plt.plot(del_rdp_hull[:, 0],\n del_rdp_hull[:, 1],\n '-yo', linewidth=2)\n\n plt.xticks(np.arange(-3, 4, 1))\n plt.yticks(np.arange(-2, 2, 0.5))\n plt.show()\n\n\ndef two_norm(a):\n '''\n\n Args:\n a: B*M*2 or B*M*3\n\n Returns:\n\n '''\n return torch.norm(a, dim=-1)\n\n\ndef one_norm(a):\n '''\n\n Args:\n a: B*M*2 or B*M*3\n\n Returns:\n\n '''\n return torch.norm(a, dim=-1, p=1)\n\n\ndef calculate_joint_angle_loss(thetas, hulls):\n '''\n\n Args:\n Theta: B*15*2\n hulls: list\n\n Returns:\n\n '''\n\n loss = torch.Tensor([0]).cuda()\n for i in range(15):\n # print(\"i=\",i)\n hull = hulls[i] # (M*2)\n theta = thetas[:, i] # (B*2)\n hull = torch.cat((hull, hull[0].unsqueeze(0)), dim=0)\n\n v = (hull[1:] - hull[:-1]).unsqueeze(0) # (M-1)*2\n w = - hull[:-1].unsqueeze(0) + theta.unsqueeze(1).repeat(1, hull[:-1].shape[0], 1) # B*(M-1)*2\n\n cross_product_2d = w[:, :, 0] * v[:, :, 1] - w[:, :, 1] * v[:, :, 0]\n tmp = torch.sum(cross_product_2d < 1e-6, dim=-1)\n\n is_outside = (tmp != (hull.shape[0] - 1))\n if not torch.sum(is_outside):\n sub_loss = torch.Tensor([0]).cuda()\n else:\n outside_theta = theta[is_outside]\n outside_theta = outside_theta.unsqueeze(1).repeat(1, hull[:-1].shape[0], 1)\n w_outside = - hull[:-1].unsqueeze(0) + outside_theta # B*(M-1)*2\n t = torch.clamp(inner_product(w_outside, v) / (two_norm(v) ** 2), min=0, max=1).unsqueeze(2)\n p = hull[:-1] + t * v\n\n D = one_norm(torch.cos(outside_theta) - torch.cos(p)) + one_norm(torch.sin(outside_theta) - torch.sin(p))\n sub_loss = torch.sum(torch.min(D, dim=-1)[0])\n\n vis = 0\n if vis:\n print(theta)\n plot_hull(theta, hull)\n\n loss += sub_loss\n\n loss /= (15 * thetas.shape[0])\n\n return loss\n\n\ndef angle_between(v1, v2):\n epsilon = 1e-7\n cos = torch_f.cosine_similarity(v1, v2, dim=-1).clamp(-1 + epsilon, 1 - epsilon) # (B)\n theta = torch.acos(cos) # (B)\n return theta\n\n\ndef normalize(vec):\n return torch_f.normalize(vec, p=2, dim=-1)\n\n\ndef inner_product(x1, x2):\n return torch.sum(x1 * x2, dim=-1)\n\n\ndef cross_product(x1, x2):\n return torch.cross(x1, x2, dim=-1)\n\n\ndef axangle2mat_torch(axis, angle, is_normalized=False):\n \"\"\" Rotation matrix for rotation angle `angle` around `axis`\n Parameters\n ----------\n axis : [B,M, 3] element sequence\n vector specifying axis for rotation.\n angle :[B,M, ] scalar\n angle of rotation in radians.\n is_normalized : bool, optional\n True if `axis` is already normalized (has norm of 1). Default False.\n Returns\n -------\n mat : array shape (B, M,3,3)\n rotation matrix for specified rotation\n Notes\n -----\n From: http://en.wikipedia.org/wiki/Rotation_matrix#Axis_and_angle\n \"\"\"\n B = axis.shape[0]\n M = axis.shape[1]\n\n if not is_normalized:\n norm_axis = axis.norm(p=2, dim=-1, keepdim=True)\n normed_axis = axis / norm_axis\n else:\n normed_axis = axis\n x, y, z = normed_axis[:, :, 0], normed_axis[:, :, 1], normed_axis[:, :, 2]\n c = torch.cos(angle)\n s = torch.sin(angle)\n C = 1 - c\n\n xs = x * s;\n ys = y * s;\n zs = z * s # noqa\n xC = x * C;\n yC = y * C;\n zC = z * C # noqa\n xyC = x * yC;\n yzC = y * zC;\n zxC = z * xC # noqa\n\n TMP = torch.stack([x * xC + c, xyC - zs, zxC + ys, xyC + zs, y * yC + c, yzC - xs, zxC - ys, yzC + xs, z * zC + c],\n dim=-1)\n return TMP.reshape(B, M, 3, 3)\n\n\ndef interval_loss(value, min, max):\n '''\n calculate interval loss\n Args:\n value: B*M\n max: M\n min: M\n\n Returns:\n\n '''\n\n batch_3d_size = value.shape[0]\n\n min = min.repeat(value.shape[0], 1)\n max = max.repeat(value.shape[0], 1)\n\n loss1 = torch.max(min - value, torch.Tensor([0]).cuda())\n loss2 = torch.max(value - max, torch.Tensor([0]).cuda())\n\n loss = (loss1 + loss2).sum()\n\n loss /= (batch_3d_size * value.shape[1])\n\n return loss\n\n\n\n\n\nclass BMCLoss:\n def __init__(\n self,\n lambda_bl=0.,\n lambda_rb=0.,\n lambda_a=0.,\n ):\n self.lambda_bl = lambda_bl\n self.lambda_rb = lambda_rb\n self.lambda_a = lambda_a\n\n # self.lp = \"../BMC\"\n self.lp = \"BMC\"\n\n self.bone_len_max = np.load(os.path.join(self.lp, \"bone_len_max.npy\"))\n self.bone_len_min = np.load(os.path.join(self.lp, \"bone_len_min.npy\"))\n self.rb_curvatures_max = np.load(os.path.join(self.lp, \"curvatures_max.npy\"))\n self.rb_curvatures_min = np.load(os.path.join(self.lp, \"curvatures_min.npy\"))\n self.rb_PHI_max = np.load(os.path.join(self.lp, \"PHI_max.npy\"))\n self.rb_PHI_min = np.load(os.path.join(self.lp, \"PHI_min.npy\"))\n\n self.joint_angle_limit = np.load(os.path.join(self.lp, \"CONVEX_HULLS.npy\"),\n allow_pickle=True)\n LEN_joint_angle_limit = len(self.joint_angle_limit)\n\n self.bl_max = torch.from_numpy(self.bone_len_max).float().cuda()\n self.bl_min = torch.from_numpy(self.bone_len_min).float().cuda()\n self.rb_curvatures_max = torch.from_numpy(self.rb_curvatures_max).float().cuda()\n self.rb_curvatures_min = torch.from_numpy(self.rb_curvatures_min).float().cuda()\n self.rb_PHI_max = torch.from_numpy(self.rb_PHI_max).float().cuda()\n self.rb_PHI_min = torch.from_numpy(self.rb_PHI_min).float().cuda()\n\n self.joint_angle_limit = [torch.from_numpy(self.joint_angle_limit[i]).float().cuda() for i in\n range(LEN_joint_angle_limit)]\n\n def compute_loss(self, joints):\n '''\n\n Args:\n joints: B*21*3\n\n Returns:\n\n '''\n batch_size = joints.shape[0]\n final_loss = torch.Tensor([0]).cuda()\n\n BMC_losses = {\"bmc_bl\": torch.Tensor([0]).cuda(), \"bmc_rb\": torch.Tensor([0]).cuda(), \"bmc_a\": torch.Tensor([0]).cuda()}\n\n if (self.lambda_bl < 1e-6) and (self.lambda_rb < 1e-6) and (self.lambda_a < 1e-6):\n return final_loss, BMC_losses\n\n ALL_bones = [\n (\n joints[:, i, :] -\n joints[:, cfg.SNAP_PARENT[i], :]\n ) for i in range(21)\n ]\n ALL_bones = torch.stack(ALL_bones[1:], dim=1) # (B,20,3)\n ROOT_bones = ALL_bones[:, cfg.ID_ROOT_bone] # (B,5,3)\n PIP_bones = ALL_bones[:, cfg.ID_PIP_bone]\n DIP_bones = ALL_bones[:, cfg.ID_DIP_bone]\n TIP_bones = ALL_bones[:, cfg.ID_TIP_bone]\n\n ALL_Z_axis = normalize(ALL_bones)\n PIP_Z_axis = ALL_Z_axis[:, cfg.ID_ROOT_bone]\n DIP_Z_axis = ALL_Z_axis[:, cfg.ID_PIP_bone]\n TIP_Z_axis = ALL_Z_axis[:, cfg.ID_DIP_bone]\n\n normals = normalize(cross_product(ROOT_bones[:, 1:], ROOT_bones[:, :-1]))\n\n # compute loss of bone length\n bl_loss = torch.Tensor([0]).cuda()\n if self.lambda_bl:\n bls = two_norm(ALL_bones) # (B,20,1)\n bl_loss = interval_loss(value=bls, min=self.bl_min, max=self.bl_max)\n final_loss += self.lambda_bl * bl_loss\n BMC_losses[\"bmc_bl\"] = bl_loss\n\n # compute loss of Root bones\n rb_loss = torch.Tensor([0]).cuda()\n if self.lambda_rb:\n edge_normals = torch.zeros_like(ROOT_bones).cuda() # (B,5,3)\n edge_normals[:, [0, 4]] = normals[:, [0, 3]]\n edge_normals[:, 1:4] = normalize(normals[:, 1:4] + normals[:, :3])\n\n curvatures = inner_product(edge_normals[:, 1:] - edge_normals[:, :4],\n ROOT_bones[:, 1:] - ROOT_bones[:, :4]) / \\\n (two_norm(ROOT_bones[:, 1:] - ROOT_bones[:, :4]) ** 2)\n PHI = angle_between(ROOT_bones[:, :4], ROOT_bones[:, 1:]) # (B)\n\n rb_loss = interval_loss(value=curvatures, min=self.rb_curvatures_min, max=self.rb_curvatures_max) + \\\n interval_loss(value=PHI, min=self.rb_PHI_min, max=self.rb_PHI_max)\n final_loss += self.lambda_rb * rb_loss\n BMC_losses[\"bmc_rb\"] = rb_loss\n\n # compute loss of Joint angles\n a_loss = torch.Tensor([0]).cuda()\n if self.lambda_a:\n # PIP bones\n PIP_X_axis = torch.zeros([batch_size, 5, 3]).cuda() # (B,5,3)\n PIP_X_axis[:, [0, 1, 4], :] = -normals[:, [0, 1, 3]]\n PIP_X_axis[:, 2:4] = -normalize(normals[:, 2:4] + normals[:, 1:3]) # (B,2,3)\n PIP_Y_axis = normalize(cross_product(PIP_Z_axis, PIP_X_axis)) # (B,5,3)\n\n PIP_bones_xz = PIP_bones - inner_product(PIP_bones, PIP_Y_axis).unsqueeze(2) * PIP_Y_axis\n PIP_theta_flexion = angle_between(PIP_bones_xz, PIP_Z_axis) # in global coordinate (B)\n PIP_theta_abduction = angle_between(PIP_bones_xz, PIP_bones) # in global coordinate (B)\n # x-component of the bone vector\n tmp = inner_product(PIP_bones, PIP_X_axis)\n PIP_theta_flexion = torch.where(tmp < 1e-6, -PIP_theta_flexion, PIP_theta_flexion)\n # y-component of the bone vector\n tmp = torch.sum((PIP_bones * PIP_Y_axis), dim=-1)\n PIP_theta_abduction = torch.where(tmp < 1e-6, -PIP_theta_abduction, PIP_theta_abduction)\n\n temp_axis = normalize(cross_product(PIP_Z_axis, PIP_bones))\n temp_alpha = angle_between(PIP_Z_axis, PIP_bones) # alpha belongs to [pi/2, pi]\n temp_R = axangle2mat_torch(axis=temp_axis, angle=temp_alpha, is_normalized=True)\n\n # DIP bones\n DIP_X_axis = torch.matmul(temp_R, PIP_X_axis.unsqueeze(3)).squeeze()\n DIP_Y_axis = torch.matmul(temp_R, PIP_Y_axis.unsqueeze(3)).squeeze()\n\n DIP_bones_xz = DIP_bones - inner_product(DIP_bones, DIP_Y_axis).unsqueeze(2) * DIP_Y_axis\n DIP_theta_flexion = angle_between(DIP_bones_xz, DIP_Z_axis) # in global coordinate\n DIP_theta_abduction = angle_between(DIP_bones_xz, DIP_bones) # in global coordinate\n # x-component of the bone vector\n tmp = inner_product(DIP_bones, DIP_X_axis)\n DIP_theta_flexion = torch.where(tmp < 0, -DIP_theta_flexion, DIP_theta_flexion)\n # y-component of the bone vector\n tmp = inner_product(DIP_bones, DIP_Y_axis)\n DIP_theta_abduction = torch.where(tmp < 0, -DIP_theta_abduction, DIP_theta_abduction)\n\n temp_axis = normalize(cross_product(DIP_Z_axis, DIP_bones))\n temp_alpha = angle_between(DIP_Z_axis, DIP_bones) # alpha belongs to [pi/2, pi]\n temp_R = axangle2mat_torch(axis=temp_axis, angle=temp_alpha, is_normalized=True)\n\n # TIP bones\n TIP_X_axis = torch.matmul(temp_R, DIP_X_axis.unsqueeze(3)).squeeze()\n TIP_Y_axis = torch.matmul(temp_R, DIP_Y_axis.unsqueeze(3)).squeeze()\n TIP_bones_xz = TIP_bones - inner_product(TIP_bones, TIP_Y_axis).unsqueeze(2) * TIP_Y_axis\n\n TIP_theta_flexion = angle_between(TIP_bones_xz, TIP_Z_axis) # in global coordinate\n TIP_theta_abduction = angle_between(TIP_bones_xz, TIP_bones) # in global coordinate\n # x-component of the bone vector\n tmp = inner_product(TIP_bones, TIP_X_axis)\n TIP_theta_flexion = torch.where(tmp < 1e-6, -TIP_theta_flexion, TIP_theta_flexion)\n # y-component of the bone vector\n tmp = inner_product(TIP_bones, TIP_Y_axis)\n TIP_theta_abduction = torch.where(tmp < 1e-6, -TIP_theta_abduction, TIP_theta_abduction)\n\n # ALL\n ALL_theta_flexion = torch.cat((PIP_theta_flexion, DIP_theta_flexion, TIP_theta_flexion), dim=-1)\n ALL_theta_abduction = torch.cat((PIP_theta_abduction, DIP_theta_abduction, TIP_theta_abduction), dim=-1)\n ALL_theta = torch.stack((ALL_theta_flexion, ALL_theta_abduction), dim=-1)\n\n a_loss = calculate_joint_angle_loss(ALL_theta, self.joint_angle_limit)\n final_loss += self.lambda_a * a_loss\n\n BMC_losses[\"bmc_a\"] = a_loss\n\n return final_loss, BMC_losses\n\n\nif __name__ == '__main__':\n bmc = BMCLoss(lambda_bl=1, lambda_rb=1, lambda_a=1)\n joints = torch.rand(10 * 63).reshape(-1, 21, 3).float().cuda() # (100,21,3)\n loss_total, loss_dict = bmc.compute_loss(joints)\n print(\"loss_total=\", loss_total)\n print(\"loss_dict=\", loss_dict)\n"
] | [
[
"torch.cat",
"torch.acos",
"torch.stack",
"torch.where",
"torch.sum",
"matplotlib.pyplot.get_current_fig_manager",
"torch.norm",
"numpy.arange",
"torch.zeros_like",
"torch.Tensor",
"torch.zeros",
"torch.cos",
"torch.min",
"matplotlib.pyplot.figure",
"torch.cross",
"torch.nn.functional.cosine_similarity",
"matplotlib.pyplot.show",
"torch.nn.functional.normalize",
"torch.rand",
"torch.sin",
"matplotlib.pyplot.plot",
"torch.from_numpy"
]
] |
shabie/vit-pytorch | [
"30b37c4028da42dbed259c309595d8d32fe1acea"
] | [
"vit_pytorch/rvt.py"
] | [
"from math import sqrt, pi, log\n\nimport torch\nfrom torch import nn, einsum\nimport torch.nn.functional as F\n\nfrom einops import rearrange, repeat\nfrom einops.layers.torch import Rearrange\n\n# rotary embeddings\n\ndef rotate_every_two(x):\n x = rearrange(x, '... (d j) -> ... d j', j = 2)\n x1, x2 = x.unbind(dim = -1)\n x = torch.stack((-x2, x1), dim = -1)\n return rearrange(x, '... d j -> ... (d j)')\n\nclass AxialRotaryEmbedding(nn.Module):\n def __init__(self, dim, max_freq = 10):\n super().__init__()\n self.dim = dim\n scales = torch.logspace(1., log(max_freq / 2) / log(2), self.dim // 4, base = 2)\n self.register_buffer('scales', scales)\n\n def forward(self, x):\n device, dtype, n = x.device, x.dtype, int(sqrt(x.shape[-2]))\n\n seq = torch.linspace(-1., 1., steps = n, device = device)\n seq = seq.unsqueeze(-1)\n\n scales = self.scales[(*((None,) * (len(seq.shape) - 1)), Ellipsis)]\n scales = scales.to(x)\n\n seq = seq * scales * pi\n\n x_sinu = repeat(seq, 'i d -> i j d', j = n)\n y_sinu = repeat(seq, 'j d -> i j d', i = n)\n\n sin = torch.cat((x_sinu.sin(), y_sinu.sin()), dim = -1)\n cos = torch.cat((x_sinu.cos(), y_sinu.cos()), dim = -1)\n\n sin, cos = map(lambda t: rearrange(t, 'i j d -> (i j) d'), (sin, cos))\n sin, cos = map(lambda t: repeat(t, 'n d -> () n (d j)', j = 2), (sin, cos))\n return sin, cos\n\n# helper classes\n\nclass PreNorm(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n def forward(self, x, **kwargs):\n return self.fn(self.norm(x), **kwargs)\n\nclass GEGLU(nn.Module):\n def forward(self, x):\n x, gates = x.chunk(2, dim = -1)\n return F.gelu(gates) * x\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, hidden_dim, dropout = 0.):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(dim, hidden_dim * 2),\n GEGLU(),\n nn.Dropout(dropout),\n nn.Linear(hidden_dim, dim),\n nn.Dropout(dropout)\n )\n def forward(self, x):\n return self.net(x)\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):\n super().__init__()\n inner_dim = dim_head * heads\n\n self.heads = heads\n self.scale = dim_head ** -0.5\n\n self.attend = nn.Softmax(dim = -1)\n self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim),\n nn.Dropout(dropout)\n )\n\n def forward(self, x, pos_emb):\n b, n, _, h = *x.shape, self.heads\n qkv = self.to_qkv(x).chunk(3, dim = -1)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), qkv)\n\n # apply 2d rotary embeddings to queries and keys, excluding CLS tokens\n\n sin, cos = pos_emb\n (q_cls, q), (k_cls, k) = map(lambda t: (t[:, :1], t[:, 1:]), (q, k))\n q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))\n\n # concat back the CLS tokens\n\n q = torch.cat((q_cls, q), dim = 1)\n k = torch.cat((k_cls, k), dim = 1)\n\n dots = einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n attn = self.attend(dots)\n\n out = einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass Transformer(nn.Module):\n def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):\n super().__init__()\n self.layers = nn.ModuleList([])\n self.pos_emb = AxialRotaryEmbedding(dim_head)\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),\n PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))\n ]))\n def forward(self, x):\n pos_emb = self.pos_emb(x[:, 1:])\n\n for attn, ff in self.layers:\n x = attn(x, pos_emb = pos_emb) + x\n x = ff(x) + x\n return x\n\n# Rotary Vision Transformer\n\nclass RvT(nn.Module):\n def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):\n super().__init__()\n assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'\n num_patches = (image_size // patch_size) ** 2\n patch_dim = channels * patch_size ** 2\n\n self.to_patch_embedding = nn.Sequential(\n Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),\n nn.Linear(patch_dim, dim),\n )\n\n self.cls_token = nn.Parameter(torch.randn(1, 1, dim))\n self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)\n\n self.mlp_head = nn.Sequential(\n nn.LayerNorm(dim),\n nn.Linear(dim, num_classes)\n )\n\n def forward(self, img):\n x = self.to_patch_embedding(img)\n b, n, _ = x.shape\n\n cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)\n x = torch.cat((cls_tokens, x), dim=1)\n\n x = self.transformer(x)\n\n return self.mlp_head(x)\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.LayerNorm",
"torch.stack",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.einsum",
"torch.nn.ModuleList",
"torch.nn.functional.gelu",
"torch.linspace",
"torch.randn"
]
] |
agoel00/LowFER | [
"4723cb12e1d89c58621ec34c4eb5221c1b51d018"
] | [
"model_conv.py"
] | [
"# -*- codingL utf-8 -*-\n\nimport numpy as np\nimport torch.nn as nn\nimport torch\n\nclass LowFER(nn.Module):\n def __init__(self, d, d1, d2, **kwargs):\n super(LowFER, self).__init__()\n \n self.E = nn.Embedding(len(d.entities), d1, padding_idx=0)\n self.R = nn.Embedding(len(d.relations), d2, padding_idx=0)\n k, o = kwargs.get('k', 30), d1\n self.U = nn.Parameter(torch.tensor(np.random.uniform(-1, 1, (d1, k * o)),\n dtype=torch.float, device=\"cuda\", requires_grad=True))\n self.V = nn.Parameter(torch.tensor(np.random.uniform(-1, 1, (d2, k * o)),\n dtype=torch.float, device=\"cuda\", requires_grad=True))\n self.conv = nn.Conv2d(1, 200, (2,2), 1, 0, bias=True)\n self.pool = nn.AvgPool1d(100, 10)\n self.fc = nn.Linear(1971, k*o)\n self.input_dropout = nn.Dropout(kwargs[\"input_dropout\"])\n self.hidden_dropout1 = nn.Dropout(kwargs[\"hidden_dropout1\"])\n self.hidden_dropout2 = nn.Dropout(kwargs[\"hidden_dropout2\"])\n self.bn0 = nn.BatchNorm1d(d1)\n self.bn1 = nn.BatchNorm1d(d1)\n self.k = k\n self.o = o\n self.loss = nn.BCELoss()\n \n def init(self):\n print('Init model params...')\n nn.init.xavier_normal_(self.E.weight.data)\n nn.init.xavier_normal_(self.R.weight.data)\n # nn.init.uniform_(self.U, -1, 1)\n # nn.init.uniform_(self.V, -1, 1)\n self.U = self.U.cuda()\n self.V = self.V.cuda()\n \n def forward(self, e1_idx, r_idx):\n e1 = self.E(e1_idx)\n e1 = self.bn0(e1)\n e1 = self.input_dropout(e1)\n r = self.R(r_idx)\n\n c = torch.cat([e1.view(-1, 1, 1, self.o), r.view(-1, 1, 1, self.o)], 2)\n c = self.conv(c)\n c = c.view(c.shape[0], 1, -1)\n c = self.pool(c)\n c = c.view(c.shape[0], -1)\n c = self.fc(c)\n\n \n ## MFB\n x = torch.mm(e1, self.U) * torch.mm(r, self.V) * c\n x = self.hidden_dropout1(x)\n x = x.view(-1, self.o, self.k)\n x = x.sum(-1)\n x = torch.mul(torch.sign(x), torch.sqrt(torch.abs(x) + 1e-12))\n x = nn.functional.normalize(x, p=2, dim=-1)\n x = self.bn1(x)\n x = self.hidden_dropout2(x)\n x = torch.mm(x, self.E.weight.transpose(1, 0))\n \n pred = torch.sigmoid(x)\n return pred\n"
] | [
[
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.nn.Dropout",
"torch.sigmoid",
"torch.nn.AvgPool1d",
"torch.sign",
"torch.mm",
"torch.abs",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"numpy.random.uniform",
"torch.nn.BCELoss",
"torch.nn.init.xavier_normal_"
]
] |
HM-SYS/Hackathon2018 | [
"9cac5db855f8ca7c4a65061eba4a2e9ab60721b9"
] | [
"test/performance_test.py"
] | [
"# -*- coding: utf-8 -*-\nimport unittest\nimport time\n\nimport numpy as np\n\nfrom agent import Agent\nfrom functions import BG, FEF, LIP, PFC, Retina, SC, VC, HP, CB\nfrom oculoenv import Environment\nfrom oculoenv import PointToTargetContent, ChangeDetectionContent, OddOneOutContent, VisualSearchContent, \\\n MultipleObjectTrackingContent, RandomDotMotionDiscriminationContent\n\nclass Contents(object):\n POINT_TO_TARGET = 1\n CHANGE_DETECTION = 2\n ODD_ONE_OUT = 3\n VISUAL_SEARCH = 4\n MULTIPLE_OBJECT_TRACKING = 5\n RANDOM_DOT_MOTION_DISCRIMINATION = 6\n\n\nclass TestPerformance(unittest.TestCase):\n def get_content(self, content_type):\n if content_type == Contents.POINT_TO_TARGET:\n content = PointToTargetContent()\n elif content_type == Contents.CHANGE_DETECTION:\n content = ChangeDetectionContent()\n elif content_type == Contents.ODD_ONE_OUT:\n content = OddOneOutContent()\n elif content_type == Contents.VISUAL_SEARCH:\n content = VisualSearchContent()\n elif content_type == Contents.MULTIPLE_OBJECT_TRACKING:\n content = MultipleObjectTrackingContent()\n else:\n content = RandomDotMotionDiscriminationContent()\n return content\n\n def calc_fps(self, content_type, with_agent):\n content = self.get_content(content_type)\n\n if with_agent:\n agent = Agent(\n retina=Retina(),\n lip=LIP(),\n vc=VC(),\n pfc=PFC(),\n fef=FEF(),\n bg=BG(),\n sc=SC(),\n hp=HP(),\n cb=CB()\n )\n \n env = Environment(content)\n obs = env.reset()\n\n reward = 0\n done = False\n \n step_size = 1000\n \n step = 0\n\n start = time.time()\n \n for i in range(step_size):\n if with_agent:\n image, angle = obs['screen'], obs['angle']\n dummy_action = agent(image, angle, reward, done)\n \n dh = np.random.uniform(low=-0.05, high=0.05)\n dv = np.random.uniform(low=-0.05, high=0.05)\n action = np.array([dh, dv])\n \n obs, reward, done, _ = env.step(action)\n step += 1\n if done:\n obs = env.reset()\n\n elapsed_time = time.time() - start\n fps = step_size / elapsed_time\n return fps\n\n\n def test_agent_performance(self):\n print(\"check performance with agent\")\n for i in range(1, 7):\n fps = self.calc_fps(i, with_agent=True)\n print(\" content={0} fps={1:.2f}\".format(i, fps))\n\n def test_environment_performance(self):\n print(\"check performance without agent\")\n for i in range(1, 7):\n fps = self.calc_fps(i, with_agent=False)\n print(\" content={0} fps={1:.2f}\".format(i, fps))\n\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.random.uniform"
]
] |
derrickeckardt/mlmagic | [
"85ba9d02fd6d9fb6d5f02a2cc17ddce578b4bee2"
] | [
"data.py"
] | [
"#!/usr/bin/env python3\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\n# create data sets\ndef create_column_class(dataset, classcolumn, headers, index_column):\n \n # Read in Datafile\n missing_values = get_missing_values()\n raw_data = pd.read_csv(dataset, header=headers, na_values=missing_values, index_col=index_column)\n data = basic_clean_data(raw_data,classcolumn)\n class_column = data[classcolumn]\n class_data = data.drop(classcolumn,1)\n return data, class_data, class_column\n\ndef get_missing_values():\n # Eventually add ability to determine which missing values\n missing_values = [\"n/a\", \"N/A\",\"N/a\",\"n/A\",\"na\",\"NA\",\"Na\",\"nA\",\"NAN\",\"-\",\"\", \" \", \" \"]\n # issue warning about other ways it will not catch\n return missing_values\n\ndef drop_sparse_columns(data, row_count, classcolumn, sparse_column_threshold):\n for column, column_na_value in zip(data.columns,data.isna().sum()):\n if column_na_value / row_count > sparse_column_threshold and column != classcolumn :\n print(\"Column '\",column,\"'has \",column_na_value/row_count,\" as NaN. Do you want to drop it? (Y/N)\")\n drop_column_input = input() #commented out for testing\n if drop_column_input.upper() == \"Y\":\n # data = data.drop(columns=column)\n print(\"drop\",column)\n elif drop_column_input.upper() == \"N\":\n print(\"Would you like blanks to be the mode, 0, None, or something else? If something else, just type it out.\")\n blank_input = input()\n if blank_input.lower() == \"mode\":\n data = replace_with_mode(data)\n elif blank_input.lower() == \"0\":\n data = replace_with(data, 0)\n elif blank_input.lower() == \"none\":\n data = replace_with(data, \"none\")\n else:\n #other\n data = replace_with(data,blank_input.lower())\n return data\n\ndef replace_with_mode(data):\n for column in data.columns:\n data[column].fillna(data[column].mode()[0], inplace=True)\n return data\n \ndef replace_with(data,value):\n for column in data.columns:\n data[column].fillna(value, inplace=True)\n return data\n\n# current system is too simplistic, but it's a start.\ndef basic_clean_data(data, classcolumn):\n # presets - make an option in production\n sparse_column_threshold = 0.01\n row_drop_threshold = 0.05\n row_na_count = data.isna().any(axis=1).sum()\n na_values = data.isna().sum()\n row_count = data.shape[0]\n\n if row_na_count <= row_count*row_drop_threshold:\n # just drop the rows\n print('Dropping rows with values that are NaN')\n data = data.dropna()\n else:\n # we can't just drop the rows\n # Removing sparse columns\n print('Removing sparse columns')\n data = drop_sparse_columns(data, row_count, classcolumn, sparse_column_threshold)\n print('Data successfully cleaned')\n\n # preprocessing data by encoding it\n # data = OneHotEncoder().fit_transform(data)\n\n # Documentation Reference:\n # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.dropna.html\n\n #### Things to Handle Better\n # sparse columns\n # dropping columns with a lot of na\n # ensure you always have at least two columns\n # sparse row\n # drop row if too many values\n # after doing this, then recheck for threshold to see if it works then. \n\n #### Intelligent Items\n # Look for data that is mislabeled, ex a number when should be yes/no\n # Extreme outliers, when the orders of magnitude are off \n # there are some different option PyOD, but htat requires keras and tensorflow\n # https://pyod.readthedocs.io/en/latest/\n # sklearn has some outlier detection options\n # https://scikit-learn.org/stable/auto_examples/plot_anomaly_comparison.html#sphx-glr-auto-examples-plot-anomaly-comparison-py\n \n ##### advanced cleaning\n # Address formatting\n # data formatting\n # date formatting\n # spell checking\n \n return data"
] | [
[
"pandas.read_csv"
]
] |
Dranero/NeuroEvolution-CTRNN_new | [
"19751b1511cebe59c7605ba97737530b69861088"
] | [
"neuro_evolution_ctrnn/tools/env_handler.py"
] | [
"import gym\nimport pybullet_envs # unused import is needed to register pybullet envs\nimport gym_memory_environments\nimport logging\nfrom gym.wrappers.atari_preprocessing import AtariPreprocessing\nfrom tools.configurations import EpisodeRunnerCfg, ReacherMemoryEnvAttributesCfg, AtariEnvAttributesCfg\nfrom tools.atari_wrappers import EpisodicLifeEnv\nfrom gym import Wrapper\nfrom bz2 import BZ2Compressor\nfrom typing import Union, Iterable\nimport numpy as np\nimport cv2\nfrom gym.spaces import Box\nfrom tools.ae_wrapper import AEWrapper\nimport copy\n\n\nclass EnvHandler:\n \"\"\"this class creates and modifies openAI-Environment.\"\"\"\n\n def __init__(self, config: EpisodeRunnerCfg):\n self.config = config\n\n def make_env(self, env_id: str, render=False):\n if env_id == \"ReacherMemory-v0\" or env_id == \"ReacherMemoryDynamic-v0\":\n assert isinstance(self.config.environment_attributes, ReacherMemoryEnvAttributesCfg), \\\n \"For the environment 'ReacherMemory-v0' one must provide the ReacherMemoryEnvAttributesCfg\" \\\n \" (config.environment_attributes)\"\n\n env = gym.make(\n env_id,\n observation_frames=self.config.environment_attributes.observation_frames,\n memory_frames=self.config.environment_attributes.memory_frames,\n action_frames=self.config.environment_attributes.action_frames)\n elif env_id.startswith(\"procgen\"):\n logging.debug(\"initiating procgen with memory\")\n env = ProcEnvHandler(env_id, render)\n elif env_id == 'QbertHard-v0':\n logging.debug(\"wrapping QbertNoFrameskip-v4 in QbertGlitchlessWrapper\")\n env = QbertGlitchlessWrapper(gym.make('QbertNoFrameskip-v4'))\n elif env_id == 'ReverseShaped-v0':\n env = gym.make('Reverse-v0')\n # these options are specific to reverse-v0 and aren't important enough to be part of the\n # global configuration file.\n env.env.last = 15\n env.env.min_length = 7\n logging.debug(\"creating env with min_length \" + str(\n env.env.min_length) + \" and also comparing results over the last \" + str(env.env.last) + \" runs.\")\n\n logging.debug(\"wrapping env in ReverseWrapper\")\n env = ReverseWrapper(env)\n else:\n env = gym.make(env_id)\n\n if self.config.use_autoencoder:\n logging.debug(\"wrapping env in AEWrapper\")\n env = AEWrapper(env)\n else:\n if env.spec.id.endswith(\"NoFrameskip-v4\"):\n logging.debug(\"wrapping env in AtariPreprocessing\")\n\n assert isinstance(self.config.environment_attributes, AtariEnvAttributesCfg), \\\n \"For atari environment one must provide the AtariEnvAttributesCfg\" \\\n \" (config.environment_attributes)\"\n\n # terminal_on_life_loss behaves different than EpisodicLifeEnv\n # terminal_on_life_loss resets the env when the first life is loss so the next agent will start fresh\n # EpisodicLifeEnv does not reset the env, so the next agent will continue where the last one died.\n # env = AtariPreprocessing(env, screen_size=32, scale_obs=True, terminal_on_life_loss=False)\n # env = EpisodicLifeEnv(env)\n env = AtariPreprocessing(env,\n screen_size=self.config.environment_attributes.screen_size,\n scale_obs=self.config.environment_attributes.scale_obs,\n terminal_on_life_loss=self.config.environment_attributes.terminal_on_life_loss,\n grayscale_obs=self.config.environment_attributes.grayscale_obs)\n\n if str(env_id).startswith(\"BipedalWalker\"):\n logging.debug(\"wrapping env in Box2DWalkerWrapper\")\n env = Box2DWalkerWrapper(env)\n\n if self.config.novelty:\n if self.config.novelty.behavior_source in ['observation', 'action', 'state']:\n logging.debug(\"wrapping env in BehaviorWrapper\")\n env = BehaviorWrapper(env, self.config.novelty.behavior_source,\n self.config.novelty.behavioral_interval,\n self.config.novelty.behavioral_max_length)\n\n if self.config.max_steps_per_run:\n logging.debug(\"wrapping env in MaxStepWrapper\")\n env = MaxStepWrapper(env, max_steps=self.config.max_steps_per_run, penalty=self.config.max_steps_penalty)\n\n return env\n\n\nclass ProcEnvHandler(gym.Env):\n \"\"\"\n This Wrapper scales to observation to values between 0 and 1.\n Additionally it implements a seed method because for reasons unknown it not implemented upstream\n \"\"\"\n\n def __init__(self, env_id, render):\n # todo: maybe add env specific configuration, but only after issue #20 has been implemented\n self.env_id = env_id\n self.render_mode = None\n if render:\n self.render_mode = \"rgb_array\"\n super().__init__()\n self._env = self._make_inner_env(start_level=0)\n self.spec = copy.deepcopy(self._env.spec) # deep copy to avoid references to inner gym\n self.action_space = self._env.action_space # use reference, so action_space.seed() works as expected\n self.obs_dtype = np.float16\n self.input_high = 255\n self.current_level = 0\n assert self.input_high == self._env.observation_space.high.min(), \"unexpected bounds for input space\"\n assert self.input_high == self._env.observation_space.high.max(), \"unexpected bounds for input space\"\n assert 0 == self._env.observation_space.low.min(), \"unexpected bounds for input space\"\n assert 0 == self._env.observation_space.low.max(), \"unexpected bounds for input space\"\n self.observation_space = Box(low=0, high=1,\n shape=self._env.observation_space.shape,\n dtype=self.obs_dtype)\n\n def _make_inner_env(self, start_level):\n self.current_level = start_level\n env = gym.make(self.env_id,\n distribution_mode=\"memory\",\n use_monochrome_assets=False,\n restrict_themes=True,\n use_backgrounds=False,\n num_levels=1,\n start_level=self.current_level,\n render_mode=self.render_mode\n )\n return env\n\n def _transform_ob(self, ob):\n return np.asarray(ob, dtype=self.obs_dtype) / 255.0\n\n def render(self, mode='human', **kwargs):\n frame = self._env.render(mode=self.render_mode, **kwargs)\n cv2.imshow(\"ProcGen Agent\", frame)\n cv2.waitKey(1)\n\n def step(self, action):\n ob, rew, done, info = self._env.step(action)\n return self._transform_ob(ob), rew, done, info\n\n def reset(self):\n del self._env\n self._env = self._make_inner_env(start_level=self.current_level + 1)\n return self._transform_ob(self._env.reset())\n\n def seed(self, seed=0):\n # explicitly delete old env to avoid memory leak\n del self._env\n self._env = self._make_inner_env(start_level=seed)\n\n\nclass MaxStepWrapper(Wrapper):\n def __init__(self, env, max_steps, penalty):\n super().__init__(env)\n self.steps = 0\n self.max_steps = max_steps\n self.penalty = penalty\n\n def reset(self, **kwargs):\n self.steps = 0\n return super(MaxStepWrapper, self).reset(**kwargs)\n\n def step(self, action: Union[int, Iterable[int]]):\n self.steps += 1\n ob, rew, done, info = super(MaxStepWrapper, self).step(action)\n if self.steps > self.max_steps:\n logging.debug(\"step limit reached\")\n done = True\n rew += self.penalty\n return ob, rew, done, info\n\n\nclass QbertGlitchlessWrapper(Wrapper):\n def step(self, action: Union[int, Iterable[int]]):\n ob, rew, done, info = super(QbertGlitchlessWrapper, self).step(action)\n if rew == 500 or rew == 525:\n logging.debug(\"remove reward to avoid luring enemy into abyss\")\n rew = 0\n if rew == 300 or rew == 325:\n logging.debug(\"removed reward from fruit to avoid repetitive behavior\")\n rew = 0\n return ob, rew, done, info\n\n\nclass BehaviorWrapper(Wrapper):\n def __init__(self, env, behavior_source, behavioral_interval, behavioral_max_length):\n super().__init__(env)\n self.behavior_source = behavior_source\n self.behavioral_interval = behavioral_interval\n self.behavioral_max_length = behavioral_max_length\n self._reset_compressor()\n\n def _reset_compressor(self):\n self.compressed_behavior = b''\n self.compressor = BZ2Compressor(2)\n self.step_count = 0\n self.aggregate = None\n\n def reset(self, **kwargs):\n return super(BehaviorWrapper, self).reset(**kwargs)\n\n def _aggregate2compressor(self):\n if self.aggregate is not None:\n data_bytes = np.array(self.aggregate).astype(np.float16).tobytes()\n self.compressed_behavior += self.compressor.compress(data_bytes)\n self.aggregate.fill(0)\n\n def _record(self, data):\n if self.behavioral_interval < 0:\n # in this case the actual recording is handled by get_compressed_behavior\n self.aggregate = np.array(data)\n return\n\n if self.aggregate is None:\n self.aggregate = np.array(data, dtype=np.float32)\n self.aggregate.fill(0)\n\n if self.behavioral_interval > 0:\n self.aggregate += np.array(data) / self.behavioral_interval\n\n if self.step_count * self.behavioral_interval < self.behavioral_max_length:\n if self.step_count % self.behavioral_interval == 0:\n self._aggregate2compressor()\n\n def step(self, action: Union[int, Iterable[int]]):\n ob, rew, done, info = super(BehaviorWrapper, self).step(action)\n if self.behavior_source == \"observation\":\n self._record(ob)\n elif self.behavior_source == \"action\":\n self._record(action)\n elif self.behavior_source == \"state\":\n if hasattr(self.env.unwrapped, \"model\") and \"PyMjModel\" in str(type(self.env.unwrapped.model)):\n # since float16.max is only around 65500, we need to make it a little smaller\n data = np.array(self.env.unwrapped.sim.data.qpos.flat) * 10e-3\n self._record(data)\n elif self.env.spec.id.endswith(\"NoFrameskip-v4\"):\n # this is an atari env\n # noinspection PyProtectedMember\n self._record(self.env.unwrapped._get_ram())\n else:\n raise RuntimeError('behavior_source==\"state\" is unsupported for this environment')\n return ob, rew, done, info\n\n def get_compressed_behavior(self):\n if self.behavioral_interval < 0:\n self._aggregate2compressor()\n data = self.compressed_behavior + self.compressor.flush()\n self._reset_compressor()\n return data\n\n\nclass Box2DWalkerWrapper(Wrapper):\n \"\"\" simple speedup for bad agents, because some agents just stand still indefinitely and waste simulation time\"\"\"\n\n def __init__(self, *narg, **kwargs):\n super(Box2DWalkerWrapper, self).__init__(*narg, **kwargs)\n self.consecutive_non_movement = 0\n\n def reset(self, **kwargs):\n self.consecutive_non_movement = 0\n return super(Box2DWalkerWrapper, self).reset(**kwargs)\n\n def step(self, action):\n ob, rew, done, info = super(Box2DWalkerWrapper, self).step(action)\n\n if ob[2] < 0.0001:\n self.consecutive_non_movement = self.consecutive_non_movement + 1\n if self.consecutive_non_movement > 50:\n done = True\n rew = rew - 100\n else:\n self.consecutive_non_movement = 0\n\n return ob, rew, done, info\n\n\nclass ReverseWrapper(Wrapper):\n \"\"\"In reverse-v0 the readhead should be at a specific position when deciding which symbol to write next.\n This Wrapper adds a penalty when the head was in a wrong position, when a symbol was written\"\"\"\n\n def step(self, action):\n ob, rew, done, info = self.env.step(action)\n\n if done:\n if rew < 0:\n inp_act, out_act, pred = action\n dist = abs(len(self.unwrapped.target)\n - self.unwrapped.read_head_position\n - self.unwrapped.write_head_position)\n if dist > 0:\n rew -= 1. * dist\n if self.unwrapped.MOVEMENTS[inp_act] != 'left':\n rew -= 1\n\n return ob, rew, done, info\n"
] | [
[
"numpy.array",
"numpy.asarray"
]
] |
JiangFeng07/feng-python-apply | [
"1dec2d518ea257467c9b253981cfc281d7ac108a"
] | [
"feng-ml-tf/src/DataSetExample.py"
] | [
"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Author: lionel\n\nimport tensorflow as tf\n\nfilepath = '/tmp/ner_data_test'\n\ndef gen():\n with tf.gfile.GFile(filepath, 'r') as f:\n lines = [line.strip().split(' ') for line in f]\n index = 0\n while True:\n label = lines[index][0]\n features = list(lines[index][1])\n yield (features, label)\n index += 1\n if index == len(lines):\n index = 0\n\n\nif __name__ == '__main__':\n dataset = tf.data.Dataset.from_tensors(tf.constant([['jiang', 'feng'], ['messi', 'henry']]))\n print(dataset.output_shapes)\n print(dataset.output_types)\n\n dataset2 = tf.data.Dataset.from_tensor_slices(tf.constant([['jiang', 'feng'], ['messi', 'henry']]))\n print(dataset.output_shapes)\n print(dataset.output_types)\n\n dataset3 = tf.data.Dataset.from_generator(gen, (tf.string, tf.string),\n (tf.TensorShape([None]), tf.TensorShape([])))\n\n dataset4 = tf.data.Dataset.range(100).map(\n lambda x: x + tf.random_uniform([], -10, 10, tf.int64))\n print(dataset4.output_shapes)\n print(dataset4.output_types)\n\n iterator = dataset3.make_one_shot_iterator()\n next_element = iterator.get_next()\n with tf.Session() as sess:\n for i in range(6):\n try:\n # sess.run(next_element)\n print(sess.run(next_element))\n except tf.errors.OutOfRangeError:\n break\n"
] | [
[
"tensorflow.data.Dataset.range",
"tensorflow.random_uniform",
"tensorflow.Session",
"tensorflow.TensorShape",
"tensorflow.gfile.GFile",
"tensorflow.constant"
]
] |
IMvision12/keras-io | [
"44997b0610db078e1109d0dbca58db8319dbc744"
] | [
"examples/vision/object_detection_using_vision_transformer.py"
] | [
"\"\"\"\nTitle: Object detection with Vision Transformers\nAuthor: [Karan V. Dave](https://www.linkedin.com/in/karan-dave-811413164/)\nDate created: 2022/03/27\nLast modified: 2022/03/27\nDescription: A simple Keras implementation of object detection using Vision Transformers.\n\"\"\"\n\n\"\"\"\n## Introduction\n\nThe article\n[Vision Transformer (ViT)](https://arxiv.org/abs/2010.11929)\narchitecture by Alexey Dosovitskiy et al.\ndemonstrates that a pure transformer applied directly to sequences of image\npatches can perform well on object detection tasks.\n\nIn this Keras example, we implement an object detection ViT\nand we train it on the\n[Caltech 101 dataset](http://www.vision.caltech.edu/datasets/)\nto detect an airplane in the given image.\n\nThis example requires TensorFlow 2.4 or higher, and\n[TensorFlow Addons](https://www.tensorflow.org/addons/overview),\nfrom which we import the `AdamW` optimizer.\n\nTensorFlow Addons can be installed via the following command:\n\n```\npip install -U tensorflow-addons\n```\n\"\"\"\n\n\"\"\"\n## Imports and setup\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport tensorflow_addons as tfa\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\nimport scipy.io\nimport shutil\n\n\"\"\"\n## Prepare dataset\n\nWe use the [Caltech 101 Dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech101/).\n\"\"\"\n\n# Path to images and annotations\npath_images = \"/101_ObjectCategories/airplanes/\"\npath_annot = \"/Annotations/Airplanes_Side_2/\"\n\npath_to_downloaded_file = keras.utils.get_file(\n fname=\"caltech_101_zipped\",\n origin=\"https://data.caltech.edu/tindfiles/serve/e41f5188-0b32-41fa-801b-d1e840915e80/\",\n extract=True,\n archive_format=\"zip\", # downloaded file format\n cache_dir=\"/\", # cache and extract in current directory\n)\n\n# Extracting tar files found inside main zip file\nshutil.unpack_archive(\"/datasets/caltech-101/101_ObjectCategories.tar.gz\", \"/\")\nshutil.unpack_archive(\"/datasets/caltech-101/Annotations.tar\", \"/\")\n\n# list of paths to images and annotations\nimage_paths = [\n f for f in os.listdir(path_images) if os.path.isfile(os.path.join(path_images, f))\n]\nannot_paths = [\n f for f in os.listdir(path_annot) if os.path.isfile(os.path.join(path_annot, f))\n]\n\nimage_paths.sort()\nannot_paths.sort()\n\nimage_size = 224 # resize input images to this size\n\nimages, targets = [], []\n\n# loop over the annotations and images, preprocess them and store in lists\nfor i in range(0, len(annot_paths)):\n # Access bounding box coordinates\n annot = scipy.io.loadmat(path_annot + annot_paths[i])[\"box_coord\"][0]\n\n top_left_x, top_left_y = annot[2], annot[0]\n bottom_right_x, bottom_right_y = annot[3], annot[1]\n\n image = keras.utils.load_img(\n path_images + image_paths[i],\n )\n (w, h) = image.size[:2]\n\n # resize train set images\n if i < int(len(annot_paths) * 0.8):\n # resize image if it is for training dataset\n image = image.resize((image_size, image_size))\n\n # convert image to array and append to list\n images.append(keras.utils.img_to_array(image))\n\n # apply relative scaling to bounding boxes as per given image and append to list\n targets.append(\n (\n float(top_left_x) / w,\n float(top_left_y) / h,\n float(bottom_right_x) / w,\n float(bottom_right_y) / h,\n )\n )\n\n# Convert the list to numpy array, split to train and test dataset\n(x_train), (y_train) = (\n np.asarray(images[: int(len(images) * 0.8)]),\n np.asarray(targets[: int(len(targets) * 0.8)]),\n)\n(x_test), (y_test) = (\n np.asarray(images[int(len(images) * 0.8) :]),\n np.asarray(targets[int(len(targets) * 0.8) :]),\n)\n\n\"\"\"\n## Implement multilayer-perceptron (MLP)\n\nWe use the code from the Keras example\n[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/)\nas a reference.\n\"\"\"\n\n\ndef mlp(x, hidden_units, dropout_rate):\n for units in hidden_units:\n x = layers.Dense(units, activation=tf.nn.gelu)(x)\n x = layers.Dropout(dropout_rate)(x)\n return x\n\n\n\"\"\"\n## Implement the patch creation layer\n\"\"\"\n\n\nclass Patches(layers.Layer):\n def __init__(self, patch_size):\n super(Patches, self).__init__()\n self.patch_size = patch_size\n\n # Override function to avoid error while saving model\n def get_config(self):\n config = super().get_config().copy()\n config.update(\n {\n \"input_shape\": input_shape,\n \"patch_size\": patch_size,\n \"num_patches\": num_patches,\n \"projection_dim\": projection_dim,\n \"num_heads\": num_heads,\n \"transformer_units\": transformer_units,\n \"transformer_layers\": transformer_layers,\n \"mlp_head_units\": mlp_head_units,\n }\n )\n return config\n\n def call(self, images):\n batch_size = tf.shape(images)[0]\n patches = tf.image.extract_patches(\n images=images,\n sizes=[1, self.patch_size, self.patch_size, 1],\n strides=[1, self.patch_size, self.patch_size, 1],\n rates=[1, 1, 1, 1],\n padding=\"VALID\",\n )\n # return patches\n return tf.reshape(patches, [batch_size, -1, patches.shape[-1]])\n\n\n\"\"\"\n## Display patches for an input image\n\"\"\"\n\npatch_size = 32 # Size of the patches to be extracted from the input images\n\nplt.figure(figsize=(4, 4))\nplt.imshow(x_train[0].astype(\"uint8\"))\nplt.axis(\"off\")\n\npatches = Patches(patch_size)(tf.convert_to_tensor([x_train[0]]))\nprint(f\"Image size: {image_size} X {image_size}\")\nprint(f\"Patch size: {patch_size} X {patch_size}\")\nprint(f\"{patches.shape[1]} patches per image \\n{patches.shape[-1]} elements per patch\")\n\n\nn = int(np.sqrt(patches.shape[1]))\nplt.figure(figsize=(4, 4))\nfor i, patch in enumerate(patches[0]):\n ax = plt.subplot(n, n, i + 1)\n patch_img = tf.reshape(patch, (patch_size, patch_size, 3))\n plt.imshow(patch_img.numpy().astype(\"uint8\"))\n plt.axis(\"off\")\n\n\"\"\"\n## Implement the patch encoding layer\n\nThe `PatchEncoder` layer linearly transforms a patch by projecting it into a\nvector of size `projection_dim`. It also adds a learnable position\nembedding to the projected vector.\n\"\"\"\n\n\nclass PatchEncoder(layers.Layer):\n def __init__(self, num_patches, projection_dim):\n super(PatchEncoder, self).__init__()\n self.num_patches = num_patches\n self.projection = layers.Dense(units=projection_dim)\n self.position_embedding = layers.Embedding(\n input_dim=num_patches, output_dim=projection_dim\n )\n\n # Override function to avoid error while saving model\n def get_config(self):\n config = super().get_config().copy()\n config.update(\n {\n \"input_shape\": input_shape,\n \"patch_size\": patch_size,\n \"num_patches\": num_patches,\n \"projection_dim\": projection_dim,\n \"num_heads\": num_heads,\n \"transformer_units\": transformer_units,\n \"transformer_layers\": transformer_layers,\n \"mlp_head_units\": mlp_head_units,\n }\n )\n return config\n\n def call(self, patch):\n positions = tf.range(start=0, limit=self.num_patches, delta=1)\n encoded = self.projection(patch) + self.position_embedding(positions)\n return encoded\n\n\n\"\"\"\n## Build the ViT model\n\nThe ViT model has multiple Transformer blocks.\nThe `MultiHeadAttention` layer is used for self-attention,\napplied to the sequence of image patches. The encoded patches (skip connection)\nand self-attention layer outputs are normalized and fed into a\nmultilayer perceptron (MLP).\nThe model outputs four dimensions representing\nthe bounding box coordinates of an object.\n\"\"\"\n\n\ndef create_vit_object_detector(\n input_shape,\n patch_size,\n num_patches,\n projection_dim,\n num_heads,\n transformer_units,\n transformer_layers,\n mlp_head_units,\n):\n inputs = layers.Input(shape=input_shape)\n # Create patches\n patches = Patches(patch_size)(inputs)\n # Encode patches\n encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)\n\n # Create multiple layers of the Transformer block.\n for _ in range(transformer_layers):\n # Layer normalization 1.\n x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)\n # Create a multi-head attention layer.\n attention_output = layers.MultiHeadAttention(\n num_heads=num_heads, key_dim=projection_dim, dropout=0.1\n )(x1, x1)\n # Skip connection 1.\n x2 = layers.Add()([attention_output, encoded_patches])\n # Layer normalization 2.\n x3 = layers.LayerNormalization(epsilon=1e-6)(x2)\n # MLP\n x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)\n # Skip connection 2.\n encoded_patches = layers.Add()([x3, x2])\n\n # Create a [batch_size, projection_dim] tensor.\n representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)\n representation = layers.Flatten()(representation)\n representation = layers.Dropout(0.3)(representation)\n # Add MLP.\n features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.3)\n\n bounding_box = layers.Dense(4)(\n features\n ) # Final four neurons that output bounding box\n\n # return Keras model.\n return keras.Model(inputs=inputs, outputs=bounding_box)\n\n\n\"\"\"\n## Run the experiment\n\"\"\"\n\n\ndef run_experiment(model, learning_rate, weight_decay, batch_size, num_epochs):\n\n optimizer = tfa.optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay\n )\n\n # Compile model.\n model.compile(optimizer=optimizer, loss=keras.losses.MeanSquaredError())\n\n checkpoint_filepath = \"logs/\"\n checkpoint_callback = keras.callbacks.ModelCheckpoint(\n checkpoint_filepath,\n monitor=\"val_loss\",\n save_best_only=True,\n save_weights_only=True,\n )\n\n history = model.fit(\n x=x_train,\n y=y_train,\n batch_size=batch_size,\n epochs=num_epochs,\n validation_split=0.1,\n callbacks=[\n checkpoint_callback,\n keras.callbacks.EarlyStopping(monitor=\"val_loss\", patience=10),\n ],\n )\n\n return history\n\n\ninput_shape = (image_size, image_size, 3) # input image shape\nlearning_rate = 0.001\nweight_decay = 0.0001\nbatch_size = 32\nnum_epochs = 100\nnum_patches = (image_size // patch_size) ** 2\nprojection_dim = 64\nnum_heads = 4\n# Size of the transformer layers\ntransformer_units = [\n projection_dim * 2,\n projection_dim,\n]\ntransformer_layers = 4\nmlp_head_units = [2048, 1024, 512, 64, 32] # Size of the dense layers\n\n\nhistory = []\nnum_patches = (image_size // patch_size) ** 2\n\nvit_object_detector = create_vit_object_detector(\n input_shape,\n patch_size,\n num_patches,\n projection_dim,\n num_heads,\n transformer_units,\n transformer_layers,\n mlp_head_units,\n)\n\n# Train model\nhistory = run_experiment(\n vit_object_detector, learning_rate, weight_decay, batch_size, num_epochs\n)\n\n\n\"\"\"\n## Evaluate the model\n\"\"\"\n\nimport matplotlib.patches as patches\n\n# Saves the model in current path\nvit_object_detector.save(\"vit_object_detector.h5\", save_format=\"h5\")\n\n# To calculate IoU (intersection over union, given two bounding boxes)\ndef bounding_box_intersection_over_union(box_predicted, box_truth):\n # get (x, y) coordinates of intersection of bounding boxes\n top_x_intersect = max(box_predicted[0], box_truth[0])\n top_y_intersect = max(box_predicted[1], box_truth[1])\n bottom_x_intersect = min(box_predicted[2], box_truth[2])\n bottom_y_intersect = min(box_predicted[3], box_truth[3])\n\n # calculate area of the intersection bb (bounding box)\n intersection_area = max(0, bottom_x_intersect - top_x_intersect + 1) * max(\n 0, bottom_y_intersect - top_y_intersect + 1\n )\n\n # calculate area of the prediction bb and ground-truth bb\n box_predicted_area = (box_predicted[2] - box_predicted[0] + 1) * (\n box_predicted[3] - box_predicted[1] + 1\n )\n box_truth_area = (box_truth[2] - box_truth[0] + 1) * (\n box_truth[3] - box_truth[1] + 1\n )\n\n # calculate intersection over union by taking intersection\n # area and dividing it by the sum of predicted bb and ground truth\n # bb areas subtracted by the interesection area\n\n # return ioU\n return intersection_area / float(\n box_predicted_area + box_truth_area - intersection_area\n )\n\n\ni, mean_iou = 0, 0\n\n# Compare results for 10 images in the test set\nfor input_image in x_test[:10]:\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 15))\n im = input_image\n\n # Display the image\n ax1.imshow(im.astype(\"uint8\"))\n ax2.imshow(im.astype(\"uint8\"))\n\n input_image = cv2.resize(\n input_image, (image_size, image_size), interpolation=cv2.INTER_AREA\n )\n input_image = np.expand_dims(input_image, axis=0)\n preds = vit_object_detector.predict(input_image)[0]\n\n (h, w) = (im).shape[0:2]\n\n top_left_x, top_left_y = int(preds[0] * w), int(preds[1] * h)\n\n bottom_right_x, bottom_right_y = int(preds[2] * w), int(preds[3] * h)\n\n box_predicted = [top_left_x, top_left_y, bottom_right_x, bottom_right_y]\n # Create the bounding box\n rect = patches.Rectangle(\n (top_left_x, top_left_y),\n bottom_right_x - top_left_x,\n bottom_right_y - top_left_y,\n facecolor=\"none\",\n edgecolor=\"red\",\n linewidth=1,\n )\n # Add the bounding box to the image\n ax1.add_patch(rect)\n ax1.set_xlabel(\n \"Predicted: \"\n + str(top_left_x)\n + \", \"\n + str(top_left_y)\n + \", \"\n + str(bottom_right_x)\n + \", \"\n + str(bottom_right_y)\n )\n\n top_left_x, top_left_y = int(y_test[i][0] * w), int(y_test[i][1] * h)\n\n bottom_right_x, bottom_right_y = int(y_test[i][2] * w), int(y_test[i][3] * h)\n\n box_truth = top_left_x, top_left_y, bottom_right_x, bottom_right_y\n\n mean_iou += bounding_box_intersection_over_union(box_predicted, box_truth)\n # Create the bounding box\n rect = patches.Rectangle(\n (top_left_x, top_left_y),\n bottom_right_x - top_left_x,\n bottom_right_y - top_left_y,\n facecolor=\"none\",\n edgecolor=\"red\",\n linewidth=1,\n )\n # Add the bounding box to the image\n ax2.add_patch(rect)\n ax2.set_xlabel(\n \"Target: \"\n + str(top_left_x)\n + \", \"\n + str(top_left_y)\n + \", \"\n + str(bottom_right_x)\n + \", \"\n + str(bottom_right_y)\n + \"\\n\"\n + \"IoU\"\n + str(bounding_box_intersection_over_union(box_predicted, box_truth))\n )\n i = i + 1\n\nprint(\"mean_iou: \" + str(mean_iou / len(x_test[:10])))\nplt.show()\n\n\"\"\"\nThis example demonstrates that a pure Transformer can be trained\nto predict the bounding boxes of an object in a given image,\nthus extending the use of Transformers to object detection tasks.\nThe model can be improved further by tuning hyper-parameters and pre-training.\n\"\"\"\n"
] | [
[
"tensorflow.keras.layers.Add",
"tensorflow.keras.utils.get_file",
"tensorflow.reshape",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.shape",
"matplotlib.pyplot.subplots",
"tensorflow.image.extract_patches",
"numpy.sqrt",
"tensorflow.keras.layers.MultiHeadAttention",
"numpy.expand_dims",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"tensorflow.range",
"tensorflow.keras.losses.MeanSquaredError",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dropout",
"matplotlib.pyplot.show",
"tensorflow.keras.utils.load_img",
"tensorflow.convert_to_tensor",
"tensorflow.keras.utils.img_to_array",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.callbacks.EarlyStopping"
]
] |
kevinhkhsu/DA_detection | [
"6859cf3f195b3831c1899625122cc0487f60d05f"
] | [
"lib/layer_utils/proposal_layer.py"
] | [
"# --------------------------------------------------------\n# Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom model.config import cfg\nfrom model.bbox_transform import bbox_transform_inv, clip_boxes\nfrom model.nms_wrapper import nms\n\nimport torch\nfrom torch.autograd import Variable\n\n\ndef proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):\n \"\"\"A simplified version compared to fast/er RCNN\n For details please see the technical report\n \"\"\"\n if type(cfg_key) == bytes:\n cfg_key = cfg_key.decode('utf-8')\n pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N\n post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N\n nms_thresh = cfg[cfg_key].RPN_NMS_THRESH\n\n # Get the scores and bounding boxes\n scores = rpn_cls_prob[:, :, :, num_anchors:]\n rpn_bbox_pred = rpn_bbox_pred.view((-1, 4))\n scores = scores.contiguous().view(-1, 1)\n proposals = bbox_transform_inv(anchors, rpn_bbox_pred)\n proposals = clip_boxes(proposals, im_info[:2])\n\n # Pick the top region proposals\n scores, order = scores.view(-1).sort(descending=True)\n if pre_nms_topN > 0:\n order = order[:pre_nms_topN]\n scores = scores[:pre_nms_topN].view(-1, 1)\n proposals = proposals[order.data, :]\n\n # Non-maximal suppression\n keep = nms(torch.cat((proposals, scores), 1).data, nms_thresh) #error\n\n # Pick the top region proposals after NMS\n if post_nms_topN > 0:\n keep = keep[:post_nms_topN]\n proposals = proposals[keep, :]\n scores = scores[keep,]\n\n # Only support single image as input\n batch_inds = Variable(proposals.data.new(proposals.size(0), 1).zero_())\n blob = torch.cat((batch_inds, proposals), 1)\n\n return blob, scores\n\ndef proposal_layer_fpn(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):\n \"\"\"A simplified version compared to fast/er RCNN\n For details please see the technical report\n \"\"\"\n if type(cfg_key) == bytes:\n cfg_key = cfg_key.decode('utf-8')\n pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N\n post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N\n nms_thresh = cfg[cfg_key].RPN_NMS_THRESH\n\n proposals_total = []\n scores_total = []\n for idx in range(len(rpn_cls_prob)):\n # Get the scores and bounding boxes\n scores = rpn_cls_prob[idx][:, :, :, num_anchors:]\n rpn_bbox_pred[idx] = rpn_bbox_pred[idx].view((-1, 4))\n scores = scores.contiguous().view(-1, 1)\n proposals = bbox_transform_inv(anchors[idx], rpn_bbox_pred[idx])\n proposals = clip_boxes(proposals, im_info[:2])\n \n # Pick the top region proposals\n scores, order = scores.view(-1).sort(descending=True)\n if pre_nms_topN > 0:\n order = order[:pre_nms_topN]\n scores = scores[:pre_nms_topN].view(-1, 1)\n proposals = proposals[order.data, :]\n\n proposals_total.append(proposals)\n scores_total.append(scores)\n\n proposals = torch.cat(proposals_total)\n scores = torch.cat(scores_total)\n\n # Non-maximal suppression\n keep = nms(torch.cat((proposals, scores), 1).data, nms_thresh)\n\n # Pick th top region proposals after NMS\n if post_nms_topN > 0:\n keep = keep[:post_nms_topN]\n proposals = proposals[keep, :]\n scores = scores[keep,]\n\n # Only support single image as input\n batch_inds = Variable(proposals.data.new(proposals.size(0), 1).zero_())\n blob = torch.cat((batch_inds, proposals), 1)\n\n return blob, scores"
] | [
[
"torch.cat"
]
] |
JoeBuzh/DeepWater | [
"9d01167517c91fb2024d2abbcbaa53b072c4fdbf"
] | [
"src/DataSimulation/run.py"
] | [
"# -*- encoding: utf-8 -*-\n'''\n@Filename : main.py\n@Datetime : 2020/08/19 16:12:29\n@Author : Joe-Bu\n@version : 1.0\n'''\n\nimport os\nimport sys\nfrom copy import deepcopy\nsys.path.append('../../')\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\nfrom sqlalchemy import between\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\n\nfrom src.DataSimulation.model import get_cols_data, train_split_std\nfrom src.DataSimulation.model import model_load, model_save, poly_features\nfrom src.DataSimulation.model import model_train, model_predict, model_evaluate\n\nfrom config.common import simulate_params\nfrom dao.orm.SQLite_dynamic import orm\nfrom dao.orm.SQLite_dynamic import ObsDataRaw, ObsDataQcNonLinear\nfrom lib.machineL.NonLinearModel import XGB, GBRT, RF\nfrom utils.SimulateUtils import wipe_anomaly\nfrom utils.ThreadUtils import WorkThread\nfrom utils.ConstantUtils import MonitorItem\n\n\ndef query_data(session, station_name: str, start: datetime, end: datetime) -> pd.DataFrame:\n \"\"\"\n 读取 & 解析数据.\n \"\"\"\n time_format = \"%Y-%m-%d %H:00:00\"\n resp = session.query(\n ObsDataRaw.time,\n ObsDataRaw.watertemp,\n ObsDataRaw.pH,\n ObsDataRaw.DO,\n ObsDataRaw.conductivity,\n ObsDataRaw.turbidity,\n ObsDataRaw.codmn,\n ObsDataRaw.nh3n,\n ObsDataRaw.tp,\n ObsDataRaw.tn) \\\n .filter_by(name=station_name) \\\n .filter(between(ObsDataRaw.time, start.strftime(time_format), end.strftime(time_format))) \\\n .all()\n data = pd.DataFrame(resp)\n \n return data.replace([-999.0, 999.0], [np.nan, np.nan])\n\n\ndef insert_data(session, station_name: str, model_name,data: pd.DataFrame):\n \"\"\"\n 数据写入.\n \"\"\"\n inserts = []\n for i, row in data.iterrows():\n ins = ObsDataQcNonLinear(\n time=row['time'],\n name=station_name,\n method=model_name,\n watertemp=row['watertemp'],\n pH=row['pH'],\n DO=row['DO'],\n conductivity=row['conductivity'],\n turbidity=row['turbidity'],\n codmn=row['codmn_hat'],\n nh3n=row['nh3n_hat'],\n tp=row['tp_hat'],\n tn=row['tn_hat'])\n inserts.append(ins)\n \n session.add_all(inserts)\n session.flush()\n\n\ndef train(model_name: str, index_name: str, train_data: pd.DataFrame) -> bool:\n \"\"\" \n 模型训练过程.\n \"\"\" \n if model_name == 'RF':\n model_in = RF(n_estimators=300, max_features=0.7, oob_score=True)\n elif model_name == 'GBRT':\n model_in = GBRT(n_estimators=300, learning_rate=1, subsample=0.7, max_features=0.6)\n elif model_name == 'XGB':\n model_in = XGB(n_estimators=300, subsample=0.7)\n else:\n print('Bad Model Selection')\n sys.exit()\n\n dataset = get_cols_data(train_data, columns=MonitorItem.ITEMS+[index_name], lower=.05, upper=.95)\n X_train, X_test, y_train, y_test, std_x, std_y = train_split_std(dataset, index=index_name, method='STD')\n poly = poly_features(degree=3)\n print(\"{0}:: Train Size: {1}\".format(index_name, X_train.shape))\n\n # sys.exit()\n\n model_out = model_train(model_in, poly.fit_transform(X_train), y_train)\n train_err = model_evaluate(\n std_y.inverse_transform(y_train), \n std_y.inverse_transform(model_predict(model_out, poly.fit_transform(X_train))))\n test_err = model_evaluate(\n std_y.inverse_transform(y_test), \n std_y.inverse_transform(model_predict(model_out, poly.fit_transform(X_test))))\n print(\"{0}:: Train Error:{1}\\nTest Error:{2}\".format(index_name, train_err, test_err))\n model_save(model_out, \n os.path.join(simulate_params['savedir'], '{0}_{1}.pkl'.format(model_name, index_name)))\n\n if os.path.exists(os.path.join(simulate_params['savedir'], '{0}_{1}.pkl'.format(model_name, index_name))):\n return True\n else:\n return False\n\n\ndef predict(model_name: str, index_name: str, pred_data: pd.DataFrame):\n \"\"\"\n 模型预测过程.\n \"\"\"\n temp_data = deepcopy(pred_data)\n data_qc = wipe_anomaly(data=temp_data, lower=.05, upper=.95)\n model = model_load(os.path.join(simulate_params['savedir'], '{0}_{1}.pkl'.format(model_name, index_name)))\n\n data = data_qc[MonitorItem.ITEMS+[index_name]].interpolate(method='linear').fillna(method='bfill')\n test_std_x = StandardScaler().fit(data[MonitorItem.ITEMS])\n test_std_y = StandardScaler().fit(data[[index_name]])\n poly = poly_features(degree=3)\n\n data['{}_hat'.format(index_name)] = test_std_y.inverse_transform(\n model.predict(poly.fit_transform(test_std_x.transform(data[MonitorItem.ITEMS])))\n ).reshape(-1, 1)\n print(data.shape)\n\n return abs(data[['{}_hat'.format(index_name)]])\n\n\ndef test():\n \"\"\"\n Test.\n \"\"\"\n session = orm.create_session()\n # data query\n train_start = datetime(2018, 5, 1, 0)\n train_end = datetime(2020, 8, 31, 20)\n predict_start = datetime(2018, 5, 1, 0)\n predict_end = datetime(2020, 8, 30, 20)\n station = \"龙门大桥\"\n data_train = query_data(session=session, station_name=station, start=train_start, end=train_end)\n data_predict = query_data(session=session, station_name=station, start=predict_start, end=predict_end)\n print(\"Train Size: {}\".format(data_train.shape))\n print(\"Predict Size: {}\".format(data_predict.shape))\n # model param\n indexs = simulate_params['indexs']\n model = simulate_params['model']\n modes = simulate_params['modes']\n\n if modes == 'train':\n # multiprocessing\n threads = []\n for _, index in enumerate(indexs):\n worker = WorkThread(train, (model,index,data_train), 'train_{}'.format(index))\n threads.append(worker)\n for i in range(len(indexs)):\n threads[i].start()\n for i in range(len(indexs)):\n threads[i].join()\n\n elif modes == 'predict':\n # multiprocessing\n threads = []\n results = []\n for _, index in enumerate(indexs):\n worker = WorkThread(predict, (model,index,data_predict), 'predict_{}'.format(index))\n threads.append(worker)\n for i in range(len(indexs)):\n threads[i].start()\n for i in range(len(indexs)):\n threads[i].join(3)\n results.append(threads[i].get_result()) \n\n# print(data_predict)\n predictions = pd.concat(results, axis=1)\n data_insert = pd.concat([data_predict, predictions], axis=1)\n # insert\n print(data_insert)\n insert_data(session=session, station_name=station, model_name=model, data=data_insert)\n \n session.commit()\n\n else:\n print('Wrong Modes With [ {} ]'.format(modes))\n sys.exit()\n \n session.close()\n\n\nif __name__ == \"__main__\":\n test()"
] | [
[
"pandas.concat",
"pandas.DataFrame",
"sklearn.preprocessing.StandardScaler"
]
] |
xfzlun/WinUIAutomation | [
"0f94388671cf0aacbc8499293b7dd31ddfa205fa"
] | [
"cutAutomation.py"
] | [
"#-*- coding:utf-8 -*-\n\nimport cv2\nimport os\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport imutils\n\n'''\n1. 辨识BIOS光标位置 两个方案:1. 颜色识别,2. 轮廓识别\n'''\n\npicPath = \"C:\\\\Users\\\\Laurence_SZH\\\\Pictures\\\\Camera Roll\\\\201124183637.BMP\"\nfullPath = os.path.expanduser(picPath)\nprint(fullPath)\n\npic = cv2.imread(fullPath)\nprint(pic)\n#pic2 = pic[:,:,[2,1,0]] \npic2 = cv2.cvtColor(pic, cv2.COLOR_BGR2RGB)\n#plt.subplot(20, 20, 10)\n#以下这段主要是show出我们读到的图片\n\n#用plt.show的好处是可以读得到threshod的值\nplt.imshow(pic2)\nplt.title('BGR')\n#plt.show() # 用这个会一直显示\nplt.ion() #搭配plt.pause & plt.close()可以让窗口显示5秒就关闭\nplt.pause(5) \n#plt.waitforbuttonpress(4)\nplt.close() #关闭图像窗口\n\n# 利用阀值_绘制长方形的BIOS光标轮廓\nimg_gray = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)\n# ret, thresh = cv2.threshold(img_gray, 16, 18, 0)\nret, thresh = cv2.threshold(img_gray, 33, 0, 0)\nplt.imshow(img_gray)\nplt.title('thresh')\nplt.ion() \nplt.pause(35)\nplt.close()\n\n#检测图像的连通区(输入为二值化图像)\nthreshold, contours, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n# 绘制寻找到的轮廓线\nimg_contours = cv2.drawContours(pic, contours, -1, (255,255,255),3)\n\ncv2.imshow('contours', img_contours)\ncv2.waitKey(0) & 0xFF == ord('q')\n\n# 测试一下那个阀值的参数更适合我们的BIOS介面\n#import numpy as np\n#from matplotlib import pyplot as plt\n\n#img = cv2.imread(r'/Users/Documents/image01.jpg',0) # 读入灰度图\nret,thresh1 = cv2.threshold(img_gray,127,255,cv2.THRESH_BINARY)\nret,thresh2 = cv2.threshold(img_gray,127,255,cv2.THRESH_BINARY_INV)\nret,thresh3 = cv2.threshold(img_gray,127,255,cv2.THRESH_TRUNC)\nret,thresh4 = cv2.threshold(img_gray,127,255,cv2.THRESH_TOZERO)\nret,thresh5 = cv2.threshold(img_gray,127,255,cv2.THRESH_TOZERO_INV)\n\ntitles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']\nimages = [img_gray, thresh1, thresh2, thresh3, thresh4, thresh5]\n\nfor i in range(6):\n plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')\n plt.title(titles[i])\n plt.xticks([]),plt.yticks([])\n\nplt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.imshow"
]
] |
doncat99/zvt | [
"689aed45bab9e691566b308d2778170a0b3950ec"
] | [
"zvt/analysis/models/drl_agent_models.py"
] | [
"# common library\nimport pandas as pd\nimport numpy as np\nimport time\nimport gym\n\n# RL models from stable-baselines\n#from stable_baselines3 import SAC\n#from stable_baselines3 import TD3\n\nfrom stable_baselines3.dqn import MlpPolicy\nfrom stable_baselines3.common.vec_env import DummyVecEnv\n\nfrom zvt import zvt_env\n\n\nA2C_PARAMS = {'n_steps':5, \n 'ent_coef':0.01, \n 'learning_rate':0.0007,\n 'verbose':0,\n 'timesteps':20000}\nPPO_PARAMS = {'n_steps':128, \n 'ent_coef':0.01, \n 'learning_rate':0.00025, \n 'nminibatches':4,\n 'verbose':0,\n 'timesteps':20000}\nDDPG_PARAMS = {'batch_size':128, \n 'buffer_size':50000,\n 'verbose':0,\n 'timesteps':20000}\nTD3_PARAMS = {'batch_size':128, \n 'buffer_size':50000,\n 'learning_rate':1e-4,\n 'verbose':0,\n 'timesteps':20000}\nSAC_PARAMS = {'batch_size': 64,\n 'buffer_size': 100000,\n 'learning_rate': 0.0001,\n 'learning_starts':100,\n 'ent_coef':'auto_0.1',\n 'timesteps': 50000,\n 'verbose': 0}\n\n\nclass DRLAgent:\n \"\"\"Provides implementations for DRL algorithms\n\n Attributes\n ----------\n env: gym environment class\n user-defined class\n\n Methods\n -------\n train_PPO()\n the implementation for PPO algorithm\n train_A2C()\n the implementation for A2C algorithm\n train_DDPG()\n the implementation for DDPG algorithm\n train_TD3()\n the implementation for TD3 algorithm \n train_SAC()\n the implementation for SAC algorithm \n DRL_prediction() \n make a prediction in a test dataset and get results\n \"\"\"\n def __init__(self, env):\n self.env = env\n\n def train_A2C(self, model_name, model_params = A2C_PARAMS):\n \"\"\"A2C model\"\"\"\n from stable_baselines3 import A2C\n env_train = self.env\n start = time.time()\n model = A2C('MlpPolicy', env_train, \n n_steps = model_params['n_steps'],\n ent_coef = model_params['ent_coef'],\n learning_rate = model_params['learning_rate'],\n verbose = model_params['verbose'],\n tensorboard_log = f\"{zvt_env['log_path']}/{model_name}\"\n )\n model.learn(total_timesteps=model_params['timesteps'], tb_log_name = \"A2C_run\")\n end = time.time()\n\n model.save(f\"{zvt_env['model_path']}/{model_name}\")\n print('Training time (A2C): ', (end-start)/60,' minutes')\n return model\n\n\n def train_DDPG(self, model_name, model_params = DDPG_PARAMS):\n \"\"\"DDPG model\"\"\"\n from stable_baselines3.ddpg.ddpg import DDPG\n # from stable_baselines3.ddpg.policies import DDPGPolicy\n from stable_baselines3.common.noise import OrnsteinUhlenbeckActionNoise\n\n\n env_train = self.env\n\n n_actions = env_train.action_space.shape[-1]\n # param_noise = None\n action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5)*np.ones(n_actions))\n\n start = time.time()\n model = DDPG('MlpPolicy', \n env_train,\n batch_size=model_params['batch_size'],\n buffer_size=model_params['buffer_size'],\n # param_noise=param_noise,\n action_noise=action_noise,\n verbose=model_params['verbose'],\n tensorboard_log = f\"{zvt_env['log_path']}/{model_name}\"\n )\n model.learn(total_timesteps=model_params['timesteps'], tb_log_name = \"DDPG_run\")\n end = time.time()\n\n model.save(f\"{zvt_env['model_path']}/{model_name}\")\n print('Training time (DDPG): ', (end-start)/60,' minutes')\n return model\n\n\n def train_TD3(self, model_name, model_params = TD3_PARAMS):\n \"\"\"TD3 model\"\"\"\n from stable_baselines3 import TD3\n from stable_baselines3.common.noise import NormalActionNoise\n\n env_train = self.env\n\n n_actions = env_train.action_space.shape[-1]\n action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1*np.ones(n_actions))\n\n start = time.time()\n model = TD3('MlpPolicy', env_train,\n batch_size=model_params['batch_size'],\n buffer_size=model_params['buffer_size'],\n learning_rate = model_params['learning_rate'],\n action_noise = action_noise,\n verbose=model_params['verbose'],\n tensorboard_log = f\"{zvt_env['log_path']}/{model_name}\"\n )\n model.learn(total_timesteps=model_params['timesteps'], tb_log_name = \"TD3_run\")\n end = time.time()\n\n model.save(f\"{zvt_env['model_path']}/{model_name}\")\n print('Training time (DDPG): ', (end-start)/60,' minutes')\n return model\n\n def train_SAC(self, model_name, model_params = SAC_PARAMS):\n \"\"\"TD3 model\"\"\"\n from stable_baselines3 import SAC\n\n env_train = self.env\n\n start = time.time()\n model = SAC('MlpPolicy', env_train,\n batch_size=model_params['batch_size'],\n buffer_size=model_params['buffer_size'],\n learning_rate = model_params['learning_rate'],\n learning_starts=model_params['learning_starts'],\n ent_coef=model_params['ent_coef'],\n verbose=model_params['verbose'],\n tensorboard_log = f\"{zvt_env['log_path']}/{model_name}\"\n )\n model.learn(total_timesteps=model_params['timesteps'], tb_log_name = \"SAC_run\")\n end = time.time()\n\n model.save(f\"{zvt_env['model_path']}/{model_name}\")\n print('Training time (SAC): ', (end-start)/60,' minutes')\n return model\n\n\n def train_PPO(self, model_name, model_params = PPO_PARAMS):\n \"\"\"PPO model\"\"\"\n from stable_baselines3 import PPO\n env_train = self.env\n\n start = time.time()\n model = PPO('MlpPolicy', env_train,\n n_steps = model_params['n_steps'],\n ent_coef = model_params['ent_coef'],\n learning_rate = model_params['learning_rate'],\n # nminibatches = model_params['nminibatches'],\n verbose = model_params['verbose'],\n tensorboard_log = f\"{zvt_env['log_path']}/{model_name}\"\n )\n model.learn(total_timesteps=model_params['timesteps'], tb_log_name = \"PPO_run\")\n end = time.time()\n\n model.save(f\"{zvt_env['model_path']}/{model_name}\")\n print('Training time (PPO): ', (end-start)/60,' minutes')\n return model\n\n @staticmethod\n def DRL_prediction(model, test_data, test_env, test_obs):\n \"\"\"make a prediction\"\"\"\n # start = time.time()\n account_memory = []\n for i in range(len(test_data.index.unique())):\n action, _states = model.predict(test_obs)\n test_obs, rewards, dones, info = test_env.step(action)\n if i == (len(test_data.index.unique()) - 2):\n account_memory = test_env.env_method(method_name = 'save_asset_memory')\n actions_memory = test_env.env_method(method_name = 'save_action_memory')\n # end = time.time()\n return account_memory[0], actions_memory[0]"
] | [
[
"numpy.ones",
"numpy.zeros"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.