repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
hutao965/lightseq
[ "9a617306fa711a3d6a25ef3eab9bfbe408692189" ]
[ "lightseq/training/ops/pytorch/transformer_decoder_layer.py" ]
[ "import math\nfrom dataclasses import dataclass\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Function\n\nfrom lightseq.training.ops.pytorch import transformer_cuda_module\nfrom lightseq.training.ops.pytorch.builder import TransformerBuilder\nfrom lightseq.training.ops.pytorch.util import (\n copy_para,\n state_dict,\n MODEL_ARCH,\n check_config,\n calc_offset,\n)\nfrom lightseq.training.ops.pytorch.layer_base import TransformerDecoderLayerBase\n\n\n_all_layer_grads = dict()\n_shared_encdec_attn_kv_params = dict()\n\n\nclass LSTransformerDecoderFunc(Function):\n @staticmethod\n def forward(\n ctx,\n decoder_states,\n encoder_out,\n encoder_padding_mask,\n parameters,\n config,\n cache,\n ):\n cuda_module = transformer_cuda_module\n forward_func = (\n cuda_module.transformer_decoder_layer_fw_fp16\n if config.fp16\n else cuda_module.transformer_decoder_layer_fw_fp32\n )\n\n (output,) = forward_func(\n config.layer_id,\n decoder_states,\n encoder_out,\n encoder_padding_mask,\n config.training,\n config.pre_layer_norm,\n cache,\n )\n\n if config.is_grad_enabled and config.training:\n ctx.save_for_backward(\n output,\n decoder_states,\n encoder_out,\n encoder_padding_mask,\n )\n ctx.config = config\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n cuda_module = transformer_cuda_module\n backward_func = (\n cuda_module.transformer_decoder_layer_bw_fp16\n if ctx.config.fp16\n else cuda_module.transformer_decoder_layer_bw_fp32\n )\n assert ctx.config.training\n (\n output,\n decoder_states,\n encoder_out,\n encoder_padding_mask,\n ) = ctx.saved_tensors\n\n if ctx.config.fp16:\n grad_output = grad_output.to(torch.half)\n output = output.to(torch.half)\n decoder_states = decoder_states.to(torch.half)\n encoder_out = encoder_out.to(torch.half)\n encoder_padding_mask = encoder_padding_mask.to(torch.half)\n\n bw_res = backward_func(\n ctx.config.layer_id,\n grad_output,\n output,\n decoder_states,\n encoder_out,\n encoder_padding_mask,\n )\n if ctx.config.layer_id == 0:\n grad_input, grad_enc_out = bw_res\n else:\n grad_input = bw_res[0]\n grad_enc_out = None\n\n grad = _all_layer_grads[ctx.config.layer_id]\n return (grad_input, grad_enc_out, None, grad, None, None)\n\n\nclass LSTransformerDecoderLayer(TransformerDecoderLayerBase):\n \"\"\"Initialize the Lightseq Transformer Decoder Layer.\n\n Static variable:\n layer_id: The layer-index counter starting from 0 and incrementing by 1 every time a layer object is instantiated,\n e.g. if a model has 24 transformer layers, layer_id goes from 0 to 23.\n Arguments:\n config: An object of LSTransformerDecoderLayer config, see get_config\n\n initial_weights: Optional: Only used for unit test\n\n initial_biases: Optional: Only used for unit test\n \"\"\"\n\n layer_id = 0\n\n def __init__(self, config, initial_weights=None, initial_biases=None):\n super(LSTransformerDecoderLayer, self).__init__()\n\n self.config = config\n self.config.layer_id = LSTransformerDecoderLayer.layer_id\n LSTransformerDecoderLayer.layer_id = LSTransformerDecoderLayer.layer_id + 1\n\n print(\"Lightseq Transformer config is \", self.config.__dict__)\n\n if self.config.local_rank >= 0:\n torch.cuda.set_device(self.config.local_rank)\n\n # create the layer in cuda kernels.\n cuda_module = transformer_cuda_module\n create_layer_func = (\n cuda_module.create_transformer_decoder_layer_fp16\n if self.config.fp16\n else cuda_module.create_transformer_decoder_layer_fp32\n )\n\n create_layer_func(\n self.config.layer_id,\n self.config.max_batch_tokens,\n self.config.max_seq_len,\n self.config.hidden_size,\n self.config.nhead,\n self.config.intermediate_size,\n self.config.attn_prob_dropout_ratio,\n self.config.activation_dropout_ratio,\n self.config.hidden_dropout_ratio,\n self.config.pre_layer_norm,\n self.config.activation_fn,\n )\n\n hs = self.config.hidden_size\n ims = self.config.intermediate_size\n\n self.para_offset = LSTransformerDecoderLayer.gen_offset(\n hs, ims, self.config.nlayer\n )\n if self.config.layer_id != 0:\n self.para_offset = self.para_offset[:-2]\n self.para = nn.Parameter(torch.Tensor(self.para_offset[-1]))\n\n if initial_weights is None and initial_biases is None:\n # enc-dec kv weights and bias\n self.init_transformer_weights()\n return\n\n # For testing only.\n attn_qkvw = [ele.detach().clone() for ele in initial_weights[:3]]\n attn_qkvw = torch.cat(attn_qkvw, dim=0)\n weights = [attn_qkvw] + [copy_para(ele) for ele in initial_weights[3:]]\n\n attn_qkvb = [ele.detach().clone() for ele in initial_biases[:3]]\n attn_qkvb = torch.cat(attn_qkvb, dim=0)\n biases = [attn_qkvb] + [copy_para(ele) for ele in initial_biases[3:]]\n\n idx = 0\n for w, b in zip(weights, biases):\n cur_para = self._get_weights(idx)\n assert cur_para.numel() == w.numel()\n cur_para.copy_(w.view(-1))\n idx += 1\n\n cur_para = self._get_weights(idx)\n assert cur_para.numel() == b.numel()\n cur_para.copy_(b.view(-1))\n idx += 1\n\n @staticmethod\n def gen_offset(hidden_size, intermediate_size, nlayer):\n hs, ims = hidden_size, intermediate_size\n sizes = [\n hs * hs * 3, # attn_qkvw\n hs * 3, # attn_qkvb\n hs * hs, # attn_ow\n hs, # attn_ob\n hs, # attn_nw\n hs, # attn_nb\n hs * hs, # encdec_attn_qw\n hs, # encdec_attn_qb\n hs * hs, # encdec_attn_ow\n hs, # encdec_attn_ob\n hs, # encdec_attn_nw\n hs, # encdec_attn_nb\n hs * ims, # inter_w\n ims, # inter_b\n hs * ims, # output_w\n hs, # output_b\n hs, # ffn_nw\n hs, # ffn_nb\n hs * hs * 2 * nlayer, # encdec_attn_kvw\n hs * 2 * nlayer, # encdec_attn_kvb\n ]\n offsets = calc_offset(sizes)\n return offsets\n\n def _get_weights(self, i):\n return self.para.data.narrow(\n 0, self.para_offset[i], self.para_offset[i + 1] - self.para_offset[i]\n )\n\n def calc_bound(self, w):\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(w)\n bound = 1.0 / math.sqrt(fan_in)\n return bound\n\n def init_transformer_weights(self):\n \"\"\"\n 0 attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb,\n 6 encdec_attn_qw, encdec_attn_qb, encdec_attn_ow, encdec_attn_ob, encdec_attn_nw, encdec_attn_nb,\n 12 inter_w, inter_b, output_w, output_b, ffn_nw, ffn_nb\n 18 encdec_attn_kvw, encdec_attn_kvb,\n \"\"\"\n hs = self.config.hidden_size\n ims = self.config.intermediate_size\n attn_qkvw = self._get_weights(0).view(-1, hs)\n nn.init.xavier_uniform_(attn_qkvw, 1.0 / math.sqrt(2.0))\n bound = self.calc_bound(attn_qkvw)\n nn.init.uniform_(self._get_weights(1), -bound, bound)\n\n encdec_attn_qw = self._get_weights(6).view(hs, hs)\n nn.init.xavier_uniform_(encdec_attn_qw, 1.0 / math.sqrt(2.0))\n bound = self.calc_bound(encdec_attn_qw)\n nn.init.uniform_(self._get_weights(7), -bound, bound)\n\n nn.init.xavier_uniform_(self._get_weights(2).view(hs, hs), 1.0)\n nn.init.zeros_(self._get_weights(3))\n nn.init.xavier_uniform_(self._get_weights(8).view(hs, hs), 1.0)\n nn.init.zeros_(self._get_weights(9))\n\n inter_w = self._get_weights(12).view(ims, hs)\n nn.init.kaiming_uniform_(inter_w, math.sqrt(5.0))\n bound = self.calc_bound(inter_w)\n nn.init.uniform_(self._get_weights(13), -bound, bound)\n\n output_w = self._get_weights(14).view(hs, ims)\n nn.init.kaiming_uniform_(output_w, math.sqrt(5.0))\n bound = self.calc_bound(output_w)\n nn.init.uniform_(self._get_weights(15), -bound, bound)\n\n nn.init.ones_(self._get_weights(4))\n nn.init.zeros_(self._get_weights(5))\n nn.init.ones_(self._get_weights(10))\n nn.init.zeros_(self._get_weights(11))\n nn.init.ones_(self._get_weights(16))\n nn.init.zeros_(self._get_weights(17))\n\n if self.config.layer_id == 0:\n encdec_attn_kvw = self._get_weights(18).view(-1, hs)\n nn.init.xavier_uniform_(encdec_attn_kvw, 1.0 / math.sqrt(2.0))\n bound = self.calc_bound(encdec_attn_kvw)\n nn.init.uniform_(self._get_weights(19), -bound, bound)\n\n def __assign_layer_weight_grad(self):\n param = (\n self.para_16\n if self.config.fp16 and self.para.dtype != torch.half\n else self.para\n )\n\n if self.config.layer_id in _all_layer_grads:\n return\n grad = torch.empty_like(param)\n cuda_module = transformer_cuda_module\n if self.config.fp16:\n func = cuda_module.assign_layer_weight_grad_fp16\n else:\n func = cuda_module.assign_layer_weight_grad_fp32\n func(param, grad, \"TransformerDecoderLayer\", self.config.layer_id)\n _all_layer_grads[self.config.layer_id] = grad\n\n def split_weights(self):\n weights = [self._get_weights(i) for i in range(18)]\n\n if self.config.layer_id == 0:\n _shared_encdec_attn_kv_params[\"w\"] = self._get_weights(18)\n _shared_encdec_attn_kv_params[\"b\"] = self._get_weights(19)\n encdec_kvw = _shared_encdec_attn_kv_params[\"w\"]\n encdec_kvb = _shared_encdec_attn_kv_params[\"b\"]\n\n hs = self.config.hidden_size\n offset = hs * hs * 2 * self.config.layer_id\n encdec_kvw = encdec_kvw.data.narrow(0, offset, hs * hs * 2)\n offset = hs * 2 * self.config.layer_id\n encdec_kvb = encdec_kvb.data.narrow(0, offset, hs * 2)\n weights += [encdec_kvw, encdec_kvb]\n weights[0] = weights[0].view(-1, hs)\n weights[2] = weights[2].view(-1, hs)\n weights[6] = weights[6].view(-1, hs)\n weights[8] = weights[8].view(-1, hs)\n weights[12] = weights[12].view(-1, hs)\n weights[14] = weights[14].view(hs, -1)\n weights[18] = weights[18].view(-1, hs)\n return weights\n\n def state_dict(self, destination=None, prefix=\"\", keep_vars=False):\n destination = state_dict(\n self, destination=destination, prefix=prefix, keep_vars=keep_vars\n )\n return destination\n\n def forward(\n self, decoder_states, encoder_out, encoder_padding_mask, cache, **kwargs\n ):\n \"\"\"\n decoder_states, [batch_size, trg_len, hidden_size] or [batch_size * beam_size, 1, hidden_size]\n encoder_out, [src_len, batch_size, hidden_size]\n encoder_padding_mask, [batch_size, src_len], 0 for non-pad, 1 for padding\n cache, dict, {\"dec_self_k\": [batch*beam, nh, step, hd],\n \"dec_self_v\": [batch*beam, nh, step, hd],\n \"encdec_kv\": [n_dec_layer * 2, batch_size, nhead, src_seq_len, head_dim]\n }\n \"\"\"\n self.config.training = self.training\n self.config.is_grad_enabled = torch.is_grad_enabled()\n decoder_states = decoder_states.contiguous()\n # [s, b, h] -> [b, s, h]\n encoder_out = encoder_out.transpose(0, 1).contiguous()\n encoder_padding_mask = (\n (encoder_padding_mask * -1e8).type_as(decoder_states).contiguous()\n )\n\n if self.config.fp16 and self.para.dtype != torch.half:\n if hasattr(self, \"para_16\"):\n self.para_16.copy_(self.para.to(torch.half))\n else:\n self.register_buffer(\"para_16\", self.para.clone().detach().half())\n\n if self.config.fp16:\n decoder_states = decoder_states.to(torch.half)\n encoder_out = encoder_out.to(torch.half)\n encoder_padding_mask = encoder_padding_mask.to(torch.half)\n\n self.__assign_layer_weight_grad()\n cache_list = []\n if cache is not None:\n # predict\n batch_beams = decoder_states.shape[0]\n if cache:\n # non-empty dict, step 1-n\n step = cache[\"dec_self_k\"].shape[2]\n # Thanks to [email protected]\n # for helping us find this bug.\n cache_list = [\n cache[\"dec_self_k\"].contiguous(),\n cache[\"dec_self_v\"].contiguous(),\n ]\n else:\n # empty dict, step 0\n step = 0\n if self.config.layer_id == 0:\n if step == 0:\n shape = (\n self.config.nlayer * 2,\n encoder_out.shape[0],\n encoder_out.shape[1] * self.config.hidden_size,\n )\n encdec_kv = torch.zeros(\n shape, dtype=decoder_states.dtype, device=decoder_states.device\n ).contiguous()\n cache[\"encdec_kv\"] = encdec_kv\n cache_list.append(cache[\"encdec_kv\"])\n head_dim = int(self.config.hidden_size / self.config.nhead)\n shape = (batch_beams, self.config.nhead, step + 1, head_dim)\n new_k = torch.zeros(\n shape, dtype=decoder_states.dtype, device=decoder_states.device\n ).contiguous()\n new_v = torch.zeros(\n shape, dtype=decoder_states.dtype, device=decoder_states.device\n ).contiguous()\n cache_list = [new_k, new_v] + cache_list\n cache[\"dec_self_k\"] = new_k\n cache[\"dec_self_v\"] = new_v\n self.config.training = False\n bs, sl, dim = decoder_states.size()\n if bs * sl > self.config.max_batch_tokens:\n raise ValueError(\n f\"Batch token numbers {bs * sl} exceeds the limit\"\n f\" {self.config.max_batch_tokens}.\"\n )\n if sl > self.config.max_seq_len:\n raise ValueError(\n f\"Sequence length {sl} exceeds the limit {self.config.max_seq_len}.\"\n )\n if len(encoder_padding_mask.size()) == 1:\n assert encoder_out.size(0) == 1 and encoder_out.size(\n 1\n ) == encoder_padding_mask.size(0)\n else:\n assert encoder_out.size(0) == encoder_padding_mask.size(\n 0\n ) and encoder_out.size(1) == encoder_padding_mask.size(1)\n if cache is None:\n assert bs == encoder_out.size(0)\n else:\n assert bs % encoder_out.size(0) == 0\n output = LSTransformerDecoderFunc.apply(\n decoder_states,\n encoder_out,\n encoder_padding_mask,\n self.para,\n self.config,\n cache_list,\n )\n return output.to(self.para)\n" ]
[ [ "torch.empty_like", "torch.cuda.set_device", "torch.cat", "torch.Tensor", "torch.zeros", "torch.nn.init._calculate_fan_in_and_fan_out", "torch.is_grad_enabled" ] ]
0mza987/azureml-examples
[ "2abb872f1278d4b4e65587e033f38a058512b2e3" ]
[ "cli/jobs/single-step/pytorch/word-language-model/src/generate.py" ]
[ "# Copyright (c) 2017 Facebook, Inc. All rights reserved.\n# BSD 3-Clause License\n#\n# Example adapted from: https://github.com/pytorch/examples/tree/master/word_language_model\n# ==============================================================================\n\n###############################################################################\n# Language Modeling on Wikitext-2\n#\n# This file generates new sentences sampled from the language model\n#\n###############################################################################\n\nimport argparse\n\nimport torch\n\nimport data\n\nparser = argparse.ArgumentParser(description=\"PyTorch Wikitext-2 Language Model\")\n\n# Model parameters.\nparser.add_argument(\n \"--data\", type=str, default=\"./data/wikitext-2\", help=\"location of the data corpus\"\n)\nparser.add_argument(\n \"--checkpoint\", type=str, default=\"./model.pt\", help=\"model checkpoint to use\"\n)\nparser.add_argument(\n \"--outf\", type=str, default=\"generated.txt\", help=\"output file for generated text\"\n)\nparser.add_argument(\n \"--words\", type=int, default=\"1000\", help=\"number of words to generate\"\n)\nparser.add_argument(\"--seed\", type=int, default=1111, help=\"random seed\")\nparser.add_argument(\"--cuda\", action=\"store_true\", help=\"use CUDA\")\nparser.add_argument(\n \"--temperature\",\n type=float,\n default=1.0,\n help=\"temperature - higher will increase diversity\",\n)\nparser.add_argument(\"--log-interval\", type=int, default=100, help=\"reporting interval\")\nargs = parser.parse_args()\n\n# Set the random seed manually for reproducibility.\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\nif args.temperature < 1e-3:\n parser.error(\"--temperature has to be greater or equal 1e-3\")\n\nwith open(args.checkpoint, \"rb\") as f:\n model = torch.load(f).to(device)\nmodel.eval()\n\ncorpus = data.Corpus(args.data)\nntokens = len(corpus.dictionary)\n\nis_transformer_model = (\n hasattr(model, \"model_type\") and model.model_type == \"Transformer\"\n)\nif not is_transformer_model:\n hidden = model.init_hidden(1)\ninput = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)\n\nwith open(args.outf, \"w\") as outf:\n with torch.no_grad(): # no tracking history\n for i in range(args.words):\n if is_transformer_model:\n output = model(input, False)\n word_weights = output[-1].squeeze().div(args.temperature).exp().cpu()\n word_idx = torch.multinomial(word_weights, 1)[0]\n word_tensor = torch.Tensor([[word_idx]]).long().to(device)\n input = torch.cat([input, word_tensor], 0)\n else:\n output, hidden = model(input, hidden)\n word_weights = output.squeeze().div(args.temperature).exp().cpu()\n word_idx = torch.multinomial(word_weights, 1)[0]\n input.fill_(word_idx)\n\n word = corpus.dictionary.idx2word[word_idx]\n\n outf.write(word + (\"\\n\" if i % 20 == 19 else \" \"))\n\n if i % args.log_interval == 0:\n print(\"| Generated {}/{} words\".format(i, args.words))\n" ]
[ [ "torch.randint", "torch.Tensor", "torch.load", "torch.cat", "torch.manual_seed", "torch.multinomial", "torch.no_grad", "torch.cuda.is_available", "torch.device" ] ]
Wolfgang9999/image-super-resolution
[ "6e22da94711e9fc95d012cf84b0944a1000faebf" ]
[ "modules/data.py" ]
[ "import os\r\nimport random\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\n# DATA_PATH = \"/media/shareef/MLDev/Datasets/DIV2K/DIV2K_train_HR\"\r\nDATA_PATH=\"data/DIV2K_valid_HR/SampleDataISR\"\r\n\r\ndef scale_input_image(img):\r\n #img/ 255.\r\n return tf.image.convert_image_dtype(img, dtype=tf.float32)\r\n\r\ndef unscale_output_image(img):\r\n #img * 255\r\n return tf.image.convert_image_dtype(img, dtype=tf.uint8, saturate=True)\r\n\r\ndef random_crop_and_flip(img, random_crop_size):\r\n assert img.shape[2] == 3\r\n height, width = img.shape[0], img.shape[1]\r\n dy, dx = random_crop_size\r\n x = np.random.randint(0, width - dx + 1)\r\n y = np.random.randint(0, height - dy + 1)\r\n image = img[y:(y+dy), x:(x+dx), :]\r\n flip_case = tf.random.uniform([1], 0, 2, dtype=tf.int32)\r\n if(tf.equal(flip_case, 0)):\r\n image = tf.image.flip_left_right(image)\r\n return image\r\n\r\ndef load_and_preprocess_image(image_path, hr_height, hr_width, crop_per_image, ext):\r\n assert ext in ['.png', '.jpg', '.jpeg', '.JPEG']\r\n image = tf.io.read_file(image_path)\r\n if ext == '.png':\r\n image = tf.image.decode_png(image, channels=3)\r\n else:\r\n image = tf.image.decode_jpeg(image, channels=3)\r\n\r\n image = scale_input_image(image)\r\n cropped_images = [ random_crop_and_flip(image, (hr_height, hr_width)) for _ in range(crop_per_image)] \r\n\r\n return cropped_images\r\n\r\n\r\n\r\ndef load_dataset(hr_height, hr_width, scale, crop_per_image=20, ext='.png'):\r\n image_paths = []\r\n for root, _, files in os.walk(DATA_PATH):\r\n for file in files:\r\n if f'{ext}' in file:\r\n image_paths.append(os.path.join(root, file))\r\n \r\n random.shuffle(image_paths)\r\n images = []\r\n for img_path in image_paths:\r\n images += load_and_preprocess_image(img_path, hr_height, hr_width, crop_per_image, ext)\r\n\r\n random.shuffle(images)\r\n hr_images = []\r\n lr_images = []\r\n for img in images:\r\n hr_image = img\r\n lr_shape = [int(hr_image.shape[0]/scale), int(hr_image.shape[1]/scale)]\r\n lr_image = tf.image.resize(hr_image, lr_shape, method=tf.image.ResizeMethod.BICUBIC)\r\n #lr_image = lr_image / 255\r\n lr_image = tf.clip_by_value(\r\n lr_image, 0, 1, name=None\r\n )\r\n hr_images.append(hr_image)\r\n lr_images.append(lr_image)\r\n\r\n lr_dataset = tf.data.Dataset.from_tensor_slices(lr_images)\r\n hr_dataset = tf.data.Dataset.from_tensor_slices(hr_images)\r\n\r\n dataset = tf.data.Dataset.zip((lr_dataset, hr_dataset))\r\n\r\n return dataset\r\n\r\n \r\n\r\n\r\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.image.decode_jpeg", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.random.uniform", "tensorflow.equal", "tensorflow.image.decode_png", "tensorflow.image.flip_left_right", "tensorflow.data.Dataset.zip", "tensorflow.image.resize", "tensorflow.image.convert_image_dtype", "tensorflow.io.read_file", "numpy.random.randint" ] ]
sega-hsj/Video_Feature
[ "4cb6d9a91504df9877c73d3fa73ee1a5adce14c0" ]
[ "videocnn/TSD/mmdet/models/detectors/two_stage.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom mmdet.core import (\n bbox2result,\n bbox2roi,\n bbox_mapping,\n build_assigner,\n build_sampler,\n merge_aug_bboxes,\n multiclass_nms,\n)\nfrom .. import builder\nfrom ..registry import DETECTORS\nfrom .base import BaseDetector\nfrom .test_mixins import BBoxTestMixin, MaskTestMixin, RPNTestMixin\n\n\[email protected]_module\nclass TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin, MaskTestMixin):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(\n self,\n backbone,\n neck=None,\n shared_head=None,\n rpn_head=None,\n bbox_roi_extractor=None,\n bbox_head=None,\n mask_roi_extractor=None,\n mask_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n ):\n super(TwoStageDetector, self).__init__()\n self.backbone = builder.build_backbone(backbone)\n\n if neck is not None:\n self.neck = builder.build_neck(neck)\n\n if shared_head is not None:\n self.shared_head = builder.build_shared_head(shared_head)\n\n if rpn_head is not None:\n self.rpn_head = builder.build_head(rpn_head)\n\n if bbox_head is not None:\n self.bbox_roi_extractor = builder.build_roi_extractor(bbox_roi_extractor)\n self.bbox_head = builder.build_head(bbox_head)\n self.use_TSD = \"TSD\" in bbox_head[\"type\"]\n\n if mask_head is not None:\n if mask_roi_extractor is not None:\n self.mask_roi_extractor = builder.build_roi_extractor(\n mask_roi_extractor\n )\n self.share_roi_extractor = False\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n self.mask_head = builder.build_head(mask_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_rpn(self):\n return hasattr(self, \"rpn_head\") and self.rpn_head is not None\n\n def init_weights(self, pretrained=None):\n super(TwoStageDetector, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_shared_head:\n self.shared_head.init_weights(pretrained=pretrained)\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_bbox:\n self.bbox_roi_extractor.init_weights()\n self.bbox_head.init_weights()\n if self.with_mask:\n self.mask_head.init_weights()\n if not self.share_roi_extractor:\n self.mask_roi_extractor.init_weights()\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck\n \"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs,)\n proposals = torch.randn(1000, 4).to(device=img.device)\n # bbox head\n rois = bbox2roi([proposals])\n if self.with_bbox:\n bbox_feats = self.bbox_roi_extractor(\n x[: self.bbox_roi_extractor.num_inputs], rois\n )\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_feats)\n outs = outs + (cls_score, bbox_pred)\n # mask head\n if self.with_mask:\n mask_rois = rois[:100]\n mask_feats = self.mask_roi_extractor(\n x[: self.mask_roi_extractor.num_inputs], mask_rois\n )\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n mask_pred = self.mask_head(mask_feats)\n outs = outs + (mask_pred,)\n return outs\n\n def forward_train(\n self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n ):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n rpn_loss_inputs = rpn_outs + (gt_bboxes, img_metas, self.train_cfg.rpn)\n rpn_losses = self.rpn_head.loss(\n *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore\n )\n losses.update(rpn_losses)\n\n proposal_cfg = self.train_cfg.get(\"rpn_proposal\", self.test_cfg.rpn)\n proposal_inputs = rpn_outs + (img_metas, proposal_cfg)\n proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)\n else:\n proposal_list = proposals\n\n # assign gts and sample proposals\n if self.with_bbox or self.with_mask:\n bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)\n bbox_sampler = build_sampler(self.train_cfg.rcnn.sampler, context=self)\n num_imgs = img.size(0)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = bbox_assigner.assign(\n proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]\n )\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in x],\n )\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n if self.with_bbox:\n rois = bbox2roi([res.bboxes for res in sampling_results])\n # TODO: a more flexible way to decide which feature maps to use\n bbox_feats = self.bbox_roi_extractor(\n x[: self.bbox_roi_extractor.num_inputs], rois\n )\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n if self.use_TSD:\n cls_score, bbox_pred, TSD_cls_score, TSD_bbox_pred, delta_c, delta_r = self.bbox_head(\n bbox_feats, x[: self.bbox_roi_extractor.num_inputs], rois\n )\n\n bbox_targets = self.bbox_head.get_target(\n rois,\n sampling_results,\n gt_bboxes,\n gt_labels,\n delta_c,\n delta_r,\n cls_score,\n bbox_pred,\n TSD_cls_score,\n TSD_bbox_pred,\n self.train_cfg.rcnn,\n img_metas,\n )\n\n loss_bbox = self.bbox_head.loss(\n cls_score, bbox_pred, TSD_cls_score, TSD_bbox_pred, *bbox_targets\n )\n losses.update(loss_bbox)\n\n else:\n cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n bbox_targets = self.bbox_head.get_target(\n sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn\n )\n loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, *bbox_targets)\n losses.update(loss_bbox)\n\n # mask head forward and loss\n if self.with_mask:\n if not self.share_roi_extractor:\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_feats = self.mask_roi_extractor(\n x[: self.mask_roi_extractor.num_inputs], pos_rois\n )\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0], device=device, dtype=torch.uint8\n )\n )\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0], device=device, dtype=torch.uint8\n )\n )\n pos_inds = torch.cat(pos_inds)\n mask_feats = bbox_feats[pos_inds]\n\n if mask_feats.shape[0] > 0:\n mask_pred = self.mask_head(mask_feats)\n mask_targets = self.mask_head.get_target(\n sampling_results, gt_masks, self.train_cfg.rcnn\n )\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head.loss(mask_pred, mask_targets, pos_labels)\n losses.update(loss_mask)\n\n return losses\n\n async def async_simple_test(self, img, img_meta, proposals=None, rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, \"Bbox head must be implemented.\"\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.async_test_rpn(x, img_meta, self.test_cfg.rpn)\n else:\n proposal_list = proposals\n\n det_bboxes, det_labels = await self.async_test_bboxes(\n x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale\n )\n bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = await self.async_test_mask(\n x,\n img_meta,\n det_bboxes,\n det_labels,\n rescale=rescale,\n mask_test_cfg=self.test_cfg.get(\"mask\"),\n )\n return bbox_results, segm_results\n\n def tsd_simple_test_bboxes(\n self, x, img_metas, proposals, rcnn_test_cfg, rescale=False\n ):\n \"\"\"Test only det bboxes without augmentation.\"\"\"\n rois = bbox2roi(proposals)\n roi_feats = self.bbox_roi_extractor(\n x[: len(self.bbox_roi_extractor.featmap_strides)], rois\n )\n if self.with_shared_head:\n roi_feats = self.shared_head(roi_feats)\n cls_score, bbox_pred, TSD_cls_score, TSD_bbox_pred, delta_c, delta_r = self.bbox_head(\n roi_feats, x[: self.bbox_roi_extractor.num_inputs], rois\n )\n img_shape = img_metas[0][\"img_shape\"]\n scale_factor = img_metas[0][\"scale_factor\"]\n\n w = rois[:, 3] - rois[:, 1] + 1\n h = rois[:, 4] - rois[:, 2] + 1\n scale = 0.1\n rois_r = rois.new_zeros(rois.shape[0], rois.shape[1])\n rois_r[:, 0] = rois[:, 0]\n delta_r = delta_r.to(dtype=rois_r.dtype)\n rois_r[:, 1] = rois[:, 1] + delta_r[:, 0] * scale * w\n rois_r[:, 2] = rois[:, 2] + delta_r[:, 1] * scale * h\n rois_r[:, 3] = rois[:, 3] + delta_r[:, 0] * scale * w\n rois_r[:, 4] = rois[:, 4] + delta_r[:, 1] * scale * h\n\n det_bboxes, det_labels = self.bbox_head.get_det_bboxes(\n rois_r,\n TSD_cls_score,\n TSD_bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg,\n )\n return det_bboxes, det_labels\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, \"Bbox head must be implemented.\"\n\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = self.simple_test_rpn(x, img_metas, self.test_cfg.rpn)\n else:\n proposal_list = proposals\n if self.use_TSD:\n det_bboxes, det_labels = self.tsd_simple_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg.rcnn, rescale=rescale\n )\n else:\n det_bboxes, det_labels = self.simple_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg.rcnn, rescale=rescale\n )\n \n bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n \n return bbox_results, x\n # if not self.with_mask:\n # return bbox_results\n # else:\n # segm_results = self.simple_test_mask(\n # x, img_metas, det_bboxes, det_labels, rescale=rescale\n # )\n # return bbox_results, segm_results\n\n # support for TSD mstest\n def tsd_aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):\n aug_bboxes = []\n aug_scores = []\n for x, img_meta in zip(feats, img_metas):\n # only one image in the batch\n img_shape = img_meta[0][\"img_shape\"]\n scale_factor = img_meta[0][\"scale_factor\"]\n flip = img_meta[0][\"flip\"]\n # TODO more flexible\n proposals = bbox_mapping(\n proposal_list[0][:, :4], img_shape, scale_factor, flip\n )\n rois = bbox2roi([proposals])\n # recompute feature maps to save GPU memory\n roi_feats = self.bbox_roi_extractor(\n x[: len(self.bbox_roi_extractor.featmap_strides)], rois\n )\n cls_score, bbox_pred, TSD_cls_score, TSD_bbox_pred, delta_c, delta_r = self.bbox_head(\n roi_feats, x[: self.bbox_roi_extractor.num_inputs], rois\n )\n\n w = rois[:, 3] - rois[:, 1] + 1\n h = rois[:, 4] - rois[:, 2] + 1\n scale = 0.1\n rois_r = rois.new_zeros(rois.shape[0], rois.shape[1])\n rois_r[:, 0] = rois[:, 0]\n delta_r = delta_r.to(dtype=rois_r.dtype)\n rois_r[:, 1] = rois[:, 1] + delta_r[:, 0] * scale * w\n rois_r[:, 2] = rois[:, 2] + delta_r[:, 1] * scale * h\n rois_r[:, 3] = rois[:, 3] + delta_r[:, 0] * scale * w\n rois_r[:, 4] = rois[:, 4] + delta_r[:, 1] * scale * h\n\n bboxes, scores = self.bbox_head.get_det_bboxes(\n rois_r,\n TSD_cls_score,\n TSD_bbox_pred,\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None,\n )\n\n aug_bboxes.append(bboxes)\n aug_scores.append(scores)\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg\n )\n det_bboxes, det_labels = multiclass_nms(\n merged_bboxes,\n merged_scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img,\n )\n return det_bboxes, det_labels\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # recompute feats to save memory\n proposal_list = self.aug_test_rpn(\n self.extract_feats(imgs), img_metas, self.test_cfg.rpn\n )\n\n if self.use_TSD:\n det_bboxes, det_labels = self.tsd_aug_test_bboxes(\n self.extract_feats(imgs), img_metas, proposal_list, self.test_cfg.rcnn\n )\n else:\n det_bboxes, det_labels = self.aug_test_bboxes(\n self.extract_feats(imgs), img_metas, proposal_list, self.test_cfg.rcnn\n )\n\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= img_metas[0][0][\"scale_factor\"]\n bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes)\n\n # det_bboxes always keep the original scale\n if self.with_mask:\n segm_results = self.aug_test_mask(\n self.extract_feats(imgs), img_metas, det_bboxes, det_labels\n )\n return bbox_results, segm_results\n else:\n return bbox_results\n" ]
[ [ "torch.randn", "torch.zeros", "torch.ones", "torch.cat" ] ]
gmum/lcw-generator
[ "fde1128505194bd04f04bbddcbe7fcec453b0052" ]
[ "src/common/math.py" ]
[ "import torch\r\n\r\n\r\ndef pairwise_distances(x: torch.Tensor, y: torch.Tensor = None) -> torch.Tensor:\r\n if y is None:\r\n y = x\r\n return torch.cdist(x, y)**2\r\n\r\n\r\ndef euclidean_norm_squared(X: torch.Tensor, axis: int) -> torch.Tensor:\r\n return torch.linalg.norm(X, 2, axis)**2\r\n" ]
[ [ "torch.cdist", "torch.linalg.norm" ] ]
Rabbit1010/TensorFlow2.0-Tutorial-2019
[ "def2ec0a93d73d81b9d95e60639ebe6bed383579" ]
[ "Topic2/4_UNet.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 8 20:13:52 2019\n\n@author: Wei-Hsiang, Shen\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\n\ndef downsample(filters, size, apply_batchnorm=True):\n result = tf.keras.Sequential()\n result.add(layers.Conv2D(filters, size, strides=2, padding='same', use_bias=False))\n\n if apply_batchnorm:\n result.add(layers.BatchNormalization())\n\n result.add(layers.LeakyReLU())\n\n return result\n\ndef upsample(filters, size, apply_dropout=False):\n result = tf.keras.Sequential()\n result.add(layers.Conv2DTranspose(filters, size, strides=2, padding='same', use_bias=False))\n\n result.add(layers.BatchNormalization())\n\n if apply_dropout:\n result.add(layers.Dropout(0.5))\n\n result.add(layers.ReLU())\n\n return result\n\ndef Net():\n input_tensor = tf.keras.layers.Input(shape=[256,256,3])\n\n # a list of down sample blocks\n down_stack = [\n downsample(64, 4),\n downsample(128, 4),\n downsample(256, 4),\n downsample(512, 4),\n ]\n\n # a list of up sample blocks\n up_stack = [\n upsample(512, 4),\n upsample(256, 4),\n upsample(128, 4),\n upsample(64, 4),\n ]\n\n # Downsampling through the model\n x = input_tensor\n skips = []\n for down_block in down_stack:\n x = down_block(x)\n skips.append(x)\n\n # Upsampling and establishing the skip connections\n for up_block, skip_tensor in zip(up_stack, reversed(skips[:-1])):\n x = up_block(x)\n x = layers.Concatenate(axis=-1)([x, skip_tensor])\n\n x = layers.Conv2DTranspose(3, 4, strides=2, padding='same', activation='tanh')(x)\n\n return tf.keras.Model(inputs=input_tensor, outputs=x)\n\nif __name__ == '__main__':\n model = Net()\n\n # Plot and inspect the model\n model.summary()\n tf.keras.utils.plot_model(model, '4_UNet.png', show_shapes=True)" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.LeakyReLU", "tensorflow.keras.layers.ReLU", "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.keras.utils.plot_model", "tensorflow.keras.Sequential", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.Model", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Input" ] ]
maksym-taranukhin/lightning-transformers
[ "aa7202657973b5b65c3c36eb745621043859ebc4" ]
[ "lightning_transformers/core/callback.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\n\nimport torch\nfrom pytorch_lightning import Callback\nfrom pytorch_lightning.utilities import rank_zero_info\n\n\nclass CUDACallback(Callback):\n\n def on_train_epoch_start(self, trainer, pl_module):\n # Reset the memory use counter\n torch.cuda.reset_peak_memory_stats(trainer.root_gpu)\n torch.cuda.synchronize(trainer.root_gpu)\n self.start_time = time.time()\n\n def on_train_epoch_end(self, trainer, pl_module, outputs):\n torch.cuda.synchronize(trainer.root_gpu)\n max_memory = torch.cuda.max_memory_allocated(trainer.root_gpu) / 2**20\n epoch_time = time.time() - self.start_time\n\n max_memory = trainer.training_type_plugin.reduce(max_memory)\n epoch_time = trainer.training_type_plugin.reduce(epoch_time)\n\n rank_zero_info(f\"Average Epoch time: {epoch_time:.2f} seconds\")\n rank_zero_info(f\"Average Peak memory {max_memory:.2f}MiB\")\n" ]
[ [ "torch.cuda.max_memory_allocated", "torch.cuda.synchronize", "torch.cuda.reset_peak_memory_stats" ] ]
Mistobaan/tensor2tensor
[ "91d4e1c83f9abb1ca8fcd94a65d6b74aaa3458da" ]
[ "tensor2tensor/layers/common_layers.py" ]
[ "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Layers common to multiple models.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport contextlib\nimport math\nimport random\n\n# Dependency imports\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom tensor2tensor.utils import expert_utils as eu\n\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\n\n# This is a global setting. When turned off, no @function.Defun is used.\nallow_defun = False\n\n\ndef saturating_sigmoid(x):\n \"\"\"Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1].\"\"\"\n with tf.name_scope(\"saturating_sigmoid\", [x]):\n y = tf.sigmoid(x)\n return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))\n\n\ndef hard_sigmoid(x, saturation_limit=0.9):\n saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))\n x_shifted = 0.5 * x + 0.5\n return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost\n\n\ndef hard_tanh(x, saturation_limit=0.9):\n saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))\n return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost\n\n\ndef inverse_exp_decay(max_step, min_value=0.01):\n \"\"\"Inverse-decay exponentially from 0.01 to 1.0 reached at max_step.\"\"\"\n inv_base = tf.exp(tf.log(min_value) / float(max_step))\n step = tf.to_float(tf.train.get_global_step())\n return inv_base**tf.maximum(float(max_step) - step, 0.0)\n\n\ndef inverse_lin_decay(max_step, min_value=0.01):\n \"\"\"Inverse-decay linearly from 0.01 to 1.0 reached at max_step.\"\"\"\n step = tf.to_float(tf.train.get_global_step())\n progress = tf.minimum(step / float(max_step), 1.0)\n return progress * (1.0 - min_value) + min_value\n\n\ndef shakeshake2_py(x, y, equal=False, individual=False):\n \"\"\"The shake-shake sum of 2 tensors, python version.\"\"\"\n if equal:\n alpha = 0.5\n if individual:\n alpha = tf.random_uniform(tf.get_shape(x)[:1])\n else:\n alpha = tf.random_uniform([])\n\n return alpha * x + (1.0 - alpha) * y\n\n\[email protected]()\ndef shakeshake2_grad(x1, x2, dy):\n \"\"\"Overriding gradient for shake-shake of 2 tensors.\"\"\"\n y = shakeshake2_py(x1, x2)\n dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])\n return dx\n\n\[email protected]()\ndef shakeshake2_indiv_grad(x1, x2, dy):\n \"\"\"Overriding gradient for shake-shake of 2 tensors.\"\"\"\n y = shakeshake2_py(x1, x2, individual=True)\n dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])\n return dx\n\n\[email protected]()\ndef shakeshake2_equal_grad(x1, x2, dy):\n \"\"\"Overriding gradient for shake-shake of 2 tensors.\"\"\"\n y = shakeshake2_py(x1, x2, equal=True)\n dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])\n return dx\n\n\[email protected](grad_func=shakeshake2_grad)\ndef shakeshake2(x1, x2):\n \"\"\"The shake-shake function with a different alpha for forward/backward.\"\"\"\n return shakeshake2_py(x1, x2)\n\n\[email protected](grad_func=shakeshake2_indiv_grad)\ndef shakeshake2_indiv(x1, x2):\n return shakeshake2_py(x1, x2, individual=True)\n\n\[email protected](grad_func=shakeshake2_equal_grad)\ndef shakeshake2_eqgrad(x1, x2):\n \"\"\"The shake-shake function with a different alpha for forward/backward.\"\"\"\n return shakeshake2_py(x1, x2)\n\n\ndef shakeshake(xs, equal_grad=False):\n \"\"\"Multi-argument shake-shake, currently approximated by sums of 2.\"\"\"\n if len(xs) == 1:\n return xs[0]\n div = (len(xs) + 1) // 2\n arg1 = shakeshake(xs[:div], equal_grad=equal_grad)\n arg2 = shakeshake(xs[div:], equal_grad=equal_grad)\n if equal_grad:\n return shakeshake2_eqgrad(arg1, arg2)\n return shakeshake2(arg1, arg2)\n\n\ndef standardize_images(x):\n \"\"\"Image standardization on batches (tf.image.per_image_standardization).\"\"\"\n with tf.name_scope(\"standardize_images\", [x]):\n x = tf.to_float(x)\n x_mean = tf.reduce_mean(x, axis=[1, 2, 3], keep_dims=True)\n x_variance = tf.reduce_mean(\n tf.square(x - x_mean), axis=[1, 2, 3], keep_dims=True)\n num_pixels = tf.to_float(tf.shape(x)[1] * tf.shape(x)[2] * 3)\n x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))\n # TODO(lukaszkaiser): remove hack below, needed for greedy decoding for now.\n if x.shape and len(x.shape) == 4 and x.shape[3] == 1:\n x = tf.concat([x, x, x], axis=3) # Not used, just a dead tf.cond branch.\n x.set_shape([None, None, None, 3])\n return x\n\n\ndef image_augmentation(images, do_colors=False):\n \"\"\"Image augmentation: cropping, flipping, and color transforms.\"\"\"\n images = tf.random_crop(images, [299, 299, 3])\n images = tf.image.random_flip_left_right(images)\n if do_colors: # More augmentation, but might be slow.\n images = tf.image.random_brightness(images, max_delta=32. / 255.)\n images = tf.image.random_saturation(images, lower=0.5, upper=1.5)\n images = tf.image.random_hue(images, max_delta=0.2)\n images = tf.image.random_contrast(images, lower=0.5, upper=1.5)\n return images\n\n\ndef cifar_image_augmentation(images):\n \"\"\"Image augmentation suitable for CIFAR-10/100.\n\n As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).\n\n Args:\n images: a Tensor.\n Returns:\n Tensor of the same shape as images.\n \"\"\"\n images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)\n images = tf.random_crop(images, [32, 32, 3])\n images = tf.image.random_flip_left_right(images)\n return images\n\n\ndef flatten4d3d(x):\n \"\"\"Flatten a 4d-tensor into a 3d-tensor by joining width and height.\"\"\"\n xshape = tf.shape(x)\n result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])\n # Preserve static shapes when available.\n xshape_static = x.get_shape()\n result.set_shape([xshape_static[0], None, xshape_static[3]])\n return result\n\n\ndef embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0):\n \"\"\"Embed x of type int64 into dense vectors, reducing to max 4 dimensions.\"\"\"\n with tf.variable_scope(\n name, default_name=\"embedding\", values=[x], reuse=reuse):\n embedding_var = tf.get_variable(\"kernel\", [vocab_size, dense_size])\n # On the backwards pass, we want to convert the gradient from\n # an indexed-slices to a regular tensor before sending it back to the\n # parameter server. This avoids excess computation on the parameter server.\n embedding_var = eu.convert_gradient_to_tensor(embedding_var)\n emb_x = tf.gather(embedding_var, x)\n if multiplier != 1.0:\n emb_x *= multiplier\n shape, static_shape = tf.shape(emb_x), emb_x.shape.as_list()\n if not static_shape or len(static_shape) < 5:\n return emb_x\n # If we had extra channel dimensions, assume it's 1, i.e. shape[3] == 1.\n assert len(static_shape) == 5\n return tf.reshape(emb_x, [shape[0], shape[1], shape[2], static_shape[4]])\n\n\ndef shift_right(x, pad_value=None):\n \"\"\"Shift the second dimension of x right by one.\"\"\"\n if pad_value is None:\n shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]\n else:\n shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]\n return shifted_targets\n\n\ndef shift_right_3d(x, pad_value=None):\n \"\"\"Shift the second dimension of x right by one.\"\"\"\n if pad_value is None:\n shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]\n else:\n shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]\n return shifted_targets\n\n\ndef conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):\n \"\"\"Use a strided convolution to downsample x by 2, `nbr_steps` times.\n\n We use stride and filter size 2 to avoid the checkerboard problem of deconvs.\n As detailed in http://distill.pub/2016/deconv-checkerboard/.\n\n Args:\n x: a `Tensor` with shape `[batch, spatial, depth]` or\n `[batch, spatial_1, spatial_2, depth]`\n nbr_steps: number of halving downsample rounds to apply\n output_filters: an int specifying the filter count for the convolutions\n name: a string\n reuse: a boolean\n\n Returns:\n a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or\n `[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),\n output_filters]`\n \"\"\"\n with tf.variable_scope(\n name, default_name=\"conv_stride2_multistep\", values=[x], reuse=reuse):\n if nbr_steps == 0:\n out = conv(x, output_filters, (1, 1))\n return out, [out]\n hidden_layers = [x]\n for i in xrange(nbr_steps):\n hidden_layers.append(\n conv(\n hidden_layers[-1],\n output_filters, (2, 2),\n strides=2,\n activation=tf.nn.relu,\n name=\"conv\" + str(i)))\n return hidden_layers[-1], hidden_layers\n\n\ndef deconv_stride2_multistep(x,\n nbr_steps,\n output_filters,\n name=None,\n reuse=None):\n \"\"\"Use a deconvolution to upsample x by 2**`nbr_steps`.\n\n Args:\n x: a `Tensor` with shape `[batch, spatial, depth]` or\n `[batch, spatial_1, spatial_2, depth]`\n nbr_steps: an int specifying the number of doubling upsample rounds to\n apply.\n output_filters: an int specifying the filter count for the deconvolutions\n name: a string\n reuse: a boolean\n\n Returns:\n a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or\n `[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),\n output_filters]`\n \"\"\"\n with tf.variable_scope(\n name, default_name=\"deconv_stride2_multistep\", values=[x], reuse=reuse):\n\n def deconv1d(cur, i):\n cur_shape = tf.shape(cur)\n thicker = conv(\n cur,\n output_filters * 2, (1, 1),\n padding=\"SAME\",\n activation=tf.nn.relu,\n name=\"deconv1d\" + str(i))\n return tf.reshape(thicker,\n [cur_shape[0], cur_shape[1] * 2, 1, output_filters])\n\n def deconv2d(cur, i):\n thicker = conv(\n cur,\n output_filters * 4, (1, 1),\n padding=\"SAME\",\n activation=tf.nn.relu,\n name=\"deconv2d\" + str(i))\n return tf.depth_to_space(thicker, 2)\n\n cur = x\n for i in xrange(nbr_steps):\n if cur.get_shape()[2] == 1:\n cur = deconv1d(cur, i)\n else:\n cur = tf.cond(\n tf.equal(tf.shape(cur)[2], 1),\n lambda idx=i: deconv1d(cur, idx),\n lambda idx=i: deconv2d(cur, idx))\n return cur\n\n\ndef conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):\n \"\"\"Conditional conv_fn making kernel 1d or 2d depending on inputs shape.\"\"\"\n static_shape = inputs.get_shape()\n if not static_shape or len(static_shape) != 4:\n raise ValueError(\"Inputs to conv must have statically known rank 4. \"\n \"Shape: \" + str(static_shape))\n # Add support for left padding.\n if \"padding\" in kwargs and kwargs[\"padding\"] == \"LEFT\":\n dilation_rate = (1, 1)\n if \"dilation_rate\" in kwargs:\n dilation_rate = kwargs[\"dilation_rate\"]\n assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1\n height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]\n cond_padding = tf.cond(\n tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0),\n lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))\n width_padding = 0 if static_shape[2] == 1 else cond_padding\n padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]\n inputs = tf.pad(inputs, padding)\n # Set middle two dimensions to None to prevent convolution from complaining\n inputs.set_shape([static_shape[0], None, None, static_shape[3]])\n kwargs[\"padding\"] = \"VALID\"\n\n def conv2d_kernel(kernel_size_arg, name_suffix):\n \"\"\"Call conv2d but add suffix to name.\"\"\"\n if \"name\" in kwargs:\n original_name = kwargs[\"name\"]\n name = kwargs.pop(\"name\") + \"_\" + name_suffix\n else:\n original_name = None\n name = \"conv_\" + name_suffix\n original_force2d = None\n if \"force2d\" in kwargs:\n original_force2d = kwargs.pop(\"force2d\")\n result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)\n if original_name is not None:\n kwargs[\"name\"] = original_name # Restore for other calls.\n if original_force2d is not None:\n kwargs[\"force2d\"] = original_force2d\n return result\n\n return conv2d_kernel(kernel_size, \"single\")\n\n\ndef conv(inputs, filters, kernel_size, dilation_rate=1, **kwargs):\n return conv_internal(\n tf.layers.conv2d,\n inputs,\n filters,\n kernel_size,\n dilation_rate=dilation_rate,\n **kwargs)\n\n\ndef conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs):\n return tf.squeeze(\n conv(\n tf.expand_dims(inputs, 2),\n filters, (kernel_size, 1),\n dilation_rate=(dilation_rate, 1),\n **kwargs), 2)\n\n\ndef separable_conv(inputs, filters, kernel_size, **kwargs):\n return conv_internal(tf.layers.separable_conv2d, inputs, filters, kernel_size,\n **kwargs)\n\n\ndef subseparable_conv(inputs, filters, kernel_size, **kwargs):\n \"\"\"Sub-separable convolution. If separability == 0 it's a separable_conv.\"\"\"\n\n def conv_fn(inputs, filters, kernel_size, **kwargs):\n \"\"\"Sub-separable convolution, splits into separability-many blocks.\"\"\"\n separability = None\n if \"separability\" in kwargs:\n separability = kwargs.pop(\"separability\")\n if separability:\n parts = []\n abs_sep = separability if separability > 0 else -1 * separability\n for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):\n with tf.variable_scope(\"part_%d\" % split_idx):\n if separability > 0:\n parts.append(\n tf.layers.conv2d(split, filters // separability, kernel_size, **\n kwargs))\n else:\n parts.append(\n tf.layers.separable_conv2d(split, filters // abs_sep,\n kernel_size, **kwargs))\n if separability > 1:\n result = tf.layers.conv2d(tf.concat(parts, axis=3), filters, (1, 1))\n elif abs_sep == 1: # If we have just one block, return it.\n assert len(parts) == 1\n result = parts[0]\n else:\n result = tf.concat(parts, axis=3)\n else:\n result = tf.layers.separable_conv2d(inputs, filters, kernel_size,\n **kwargs)\n if separability is not None:\n kwargs[\"separability\"] = separability\n return result\n\n return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)\n\n\ndef layer_norm_vars(filters):\n \"\"\"Create Variables for layer norm.\"\"\"\n scale = tf.get_variable(\n \"layer_norm_scale\", [filters], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"layer_norm_bias\", [filters], initializer=tf.zeros_initializer())\n return scale, bias\n\n\ndef layer_norm_compute_python(x, epsilon, scale, bias):\n \"\"\"Layer norm raw computation.\"\"\"\n mean = tf.reduce_mean(x, axis=[-1], keep_dims=True)\n variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keep_dims=True)\n norm_x = (x - mean) * tf.rsqrt(variance + epsilon)\n return norm_x * scale + bias\n\n\[email protected](compiled=True)\ndef layer_norm_compute_grad(x, epsilon, scale, bias, dy):\n y = layer_norm_compute_python(x, epsilon, scale, bias)\n dx = tf.gradients(ys=[y], xs=[x, epsilon, scale, bias], grad_ys=[dy])\n return dx\n\n\[email protected](\n compiled=True,\n separate_compiled_gradients=True,\n grad_func=layer_norm_compute_grad)\ndef layer_norm_compute(x, epsilon, scale, bias):\n return layer_norm_compute_python(x, epsilon, scale, bias)\n\n\ndef layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):\n \"\"\"Layer normalize the tensor x, averaging over the last dimension.\"\"\"\n if filters is None:\n filters = x.get_shape()[-1]\n with tf.variable_scope(\n name, default_name=\"layer_norm\", values=[x], reuse=reuse):\n scale = tf.get_variable(\n \"layer_norm_scale\", [filters], initializer=tf.ones_initializer())\n bias = tf.get_variable(\n \"layer_norm_bias\", [filters], initializer=tf.zeros_initializer())\n if allow_defun:\n result = layer_norm_compute(x, tf.constant(epsilon), scale, bias)\n result.set_shape(x.get_shape())\n else:\n result = layer_norm_compute_python(x, epsilon, scale, bias)\n return result\n\n\ndef noam_norm(x, epsilon=1.0, name=None):\n \"\"\"One version of layer normalization.\"\"\"\n with tf.name_scope(name, default_name=\"noam_norm\", values=[x]):\n shape = x.get_shape()\n ndims = len(shape)\n return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) *\n tf.sqrt(tf.to_float(shape[-1])))\n\n\ndef apply_norm(x, norm_type, depth, epsilon):\n \"\"\"Apply Normalization.\"\"\"\n if norm_type == \"layer\":\n return layer_norm(x, filters=depth, epsilon=epsilon)\n if norm_type == \"batch\":\n return tf.layers.batch_normalization(x, epsilon=epsilon)\n if norm_type == \"noam\":\n return noam_norm(x, epsilon)\n if norm_type == \"none\":\n return x\n raise ValueError(\"Parameter normalizer_fn must be one of: 'layer', 'batch',\"\n \"'noam', 'none'.\")\n\n\ndef layer_prepostprocess(previous_value,\n x,\n sequence,\n dropout_rate,\n norm_type,\n depth,\n epsilon,\n default_name,\n name=None):\n \"\"\"Apply a sequence of functions to the input or output of a layer.\n\n The sequence is specified as a string which may contain the following\n characters:\n a: add previous_value\n n: apply normalization\n d: apply dropout\n\n For example, if sequence==\"dna\", then the output is\n previous_value + normalize(dropout(x))\n\n Args:\n previous_value: A Tensor, to be added as a residual connection ('a')\n x: A Tensor to be transformed.\n sequence: a string.\n dropout_rate: a float\n norm_type: a string (see apply_norm())\n depth: an integer (size of last dimension of x).\n epsilon: a float (parameter for normalization)\n default_name: a string\n name: a string\n\n Returns:\n a Tensor\n \"\"\"\n with tf.variable_scope(name, default_name=default_name):\n if sequence == \"none\":\n return x\n for c in sequence:\n if c == \"a\":\n x += previous_value\n elif c == \"n\":\n x = apply_norm(x, norm_type, depth, epsilon)\n else:\n assert c == \"d\", (\"Unknown sequence step %s\" % c)\n x = tf.nn.dropout(x, 1.0 - dropout_rate)\n return x\n\n\ndef layer_preprocess(layer_input, hparams):\n \"\"\"Apply layer preprocessing.\n\n See layer_prepostprocess() for details.\n\n A hyperparemeters object is passed for convenience. The hyperparameters\n that may be used are:\n\n layer_preprocess_sequence\n layer_prepostprocess_dropout\n norm_type\n hidden_size\n norm_epsilon\n\n Args:\n layer_input: a Tensor\n hparams: a hyperparameters object.\n\n Returns:\n a Tensor\n \"\"\"\n assert \"a\" not in hparams.layer_preprocess_sequence, (\n \"No residual connections allowed in hparams.layer_preprocess_sequence\")\n return layer_prepostprocess(\n None,\n layer_input,\n sequence=hparams.layer_preprocess_sequence,\n dropout_rate=hparams.layer_prepostprocess_dropout,\n norm_type=hparams.norm_type,\n depth=hparams.hidden_size,\n epsilon=hparams.norm_epsilon,\n default_name=\"layer_prepostprocess\")\n\n\ndef layer_postprocess(layer_input, layer_output, hparams):\n \"\"\"Apply layer postprocessing.\n\n See layer_prepostprocess() for details.\n\n A hyperparemeters object is passed for convenience. The hyperparameters\n that may be used are:\n\n layer_postprocess_sequence\n layer_prepostprocess_dropout\n norm_type\n hidden_size\n norm_epsilon\n\n Args:\n layer_input: a Tensor\n layer_output: a Tensor\n hparams: a hyperparameters object.\n\n Returns:\n a Tensor\n \"\"\"\n return layer_prepostprocess(\n layer_input,\n layer_output,\n sequence=hparams.layer_postprocess_sequence,\n dropout_rate=hparams.layer_prepostprocess_dropout,\n norm_type=hparams.norm_type,\n depth=hparams.hidden_size,\n epsilon=hparams.norm_epsilon,\n default_name=\"layer_postprocess\")\n\n\ndef conv_block_internal(conv_fn,\n inputs,\n filters,\n dilation_rates_and_kernel_sizes,\n first_relu=True,\n use_elu=False,\n separabilities=None,\n **kwargs):\n \"\"\"A block of convolutions.\n\n Args:\n conv_fn: convolution function, e.g. conv or separable_conv.\n inputs: a Tensor\n filters: an Integer\n dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))\n first_relu: whether to do a relu at start (defaults to True)\n use_elu: whether to use ELUs instead of ReLUs (defaults to False)\n separabilities: list of separability factors (per-layer).\n **kwargs: additional arguments (e.g., pooling)\n\n Returns:\n a Tensor.\n \"\"\"\n\n name = kwargs.pop(\"name\") if \"name\" in kwargs else None\n mask = kwargs.pop(\"mask\") if \"mask\" in kwargs else None\n\n # Usage for normalize_fn kwarg:\n # if not specified, use layer norm\n # if given normalize_fn=None, don't use any normalization\n # if given normalize_fn=norm, use the specified norm function\n\n use_layer_norm = \"normalizer_fn\" not in kwargs\n norm = kwargs.pop(\"normalizer_fn\", None)\n use_normalizer_fn = use_layer_norm or norm\n\n if use_layer_norm:\n norm = lambda x, name: layer_norm(x, filters, name=name)\n\n with tf.variable_scope(name, \"conv_block\", [inputs]):\n cur, counter = inputs, -1\n for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:\n counter += 1\n if first_relu or counter > 0:\n cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)\n if mask is not None:\n cur *= mask\n if separabilities:\n cur = conv_fn(\n cur,\n filters,\n kernel_size,\n dilation_rate=dilation_rate,\n name=\"conv_block_%d\" % counter,\n use_bias=norm is None,\n separability=separabilities[counter],\n **kwargs)\n else:\n cur = conv_fn(\n cur,\n filters,\n kernel_size,\n dilation_rate=dilation_rate,\n name=\"conv_block_%d\" % counter,\n use_bias=norm is None,\n **kwargs)\n if use_normalizer_fn:\n cur = norm(cur, name=\"conv_block_norm_%d\" % counter)\n return cur\n\n\ndef conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):\n \"\"\"A block of standard 2d convolutions.\"\"\"\n return conv_block_internal(conv, inputs, filters,\n dilation_rates_and_kernel_sizes, **kwargs)\n\n\ndef conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):\n \"\"\"A block of standard 1d convolutions.\"\"\"\n return conv_block_internal(conv1d, inputs, filters,\n dilation_rates_and_kernel_sizes, **kwargs)\n\n\ndef separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,\n **kwargs):\n \"\"\"A block of separable convolutions.\"\"\"\n return conv_block_internal(separable_conv, inputs, filters,\n dilation_rates_and_kernel_sizes, **kwargs)\n\n\ndef subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,\n **kwargs):\n \"\"\"A block of separable convolutions.\"\"\"\n return conv_block_internal(subseparable_conv, inputs, filters,\n dilation_rates_and_kernel_sizes, **kwargs)\n\n\ndef pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):\n \"\"\"Pooling (supports \"LEFT\").\"\"\"\n with tf.name_scope(\"pool\", [inputs]):\n static_shape = inputs.get_shape()\n if not static_shape or len(static_shape) != 4:\n raise ValueError(\"Inputs to conv must have statically known rank 4.\")\n # Add support for left padding.\n if padding == \"LEFT\":\n assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1\n if len(static_shape) == 3:\n width_padding = 2 * (window_size[1] // 2)\n padding_ = [[0, 0], [width_padding, 0], [0, 0]]\n else:\n height_padding = 2 * (window_size[0] // 2)\n cond_padding = tf.cond(\n tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0),\n lambda: tf.constant(2 * (window_size[1] // 2)))\n width_padding = 0 if static_shape[2] == 1 else cond_padding\n padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]\n inputs = tf.pad(inputs, padding_)\n inputs.set_shape([static_shape[0], None, None, static_shape[3]])\n padding = \"VALID\"\n\n return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides)\n\n\ndef conv_block_downsample(x,\n kernel,\n strides,\n padding,\n separability=0,\n name=None,\n reuse=None):\n \"\"\"Implements a downwards-striding conv block, like Xception exit flow.\"\"\"\n with tf.variable_scope(\n name, default_name=\"conv_block_downsample\", values=[x], reuse=reuse):\n hidden_size = int(x.get_shape()[-1])\n res = conv_block(\n x,\n int(1.25 * hidden_size), [((1, 1), kernel)],\n padding=padding,\n strides=strides,\n name=\"res_conv\")\n\n x = subseparable_conv_block(\n x,\n hidden_size, [((1, 1), kernel)],\n padding=padding,\n separability=separability,\n name=\"conv0\")\n x = subseparable_conv_block(\n x,\n int(1.25 * hidden_size), [((1, 1), kernel)],\n padding=padding,\n separability=separability,\n name=\"conv1\")\n x = pool(x, kernel, \"MAX\", padding, strides=strides)\n\n x += res\n\n x = subseparable_conv_block(\n x,\n 2 * hidden_size, [((1, 1), kernel)],\n first_relu=False,\n padding=padding,\n separability=separability,\n name=\"conv2\")\n x = subseparable_conv_block(\n x,\n int(2.5 * hidden_size), [((1, 1), kernel)],\n padding=padding,\n separability=separability,\n name=\"conv3\")\n return x\n\n\ndef decompress_seqcnn(x,\n targets,\n targets_vocab_size,\n dilations_and_kernels,\n block_size,\n is_2d=False,\n embedding_var=None,\n name=None,\n reuse=None):\n \"\"\"Decompress x into targets size using a Sequence CNN at every element.\"\"\"\n with tf.variable_scope(\n name,\n default_name=\"decompress_batch_seqcnn\",\n values=[x, targets],\n reuse=reuse):\n # We assume targets are [batch x block_size * N x block_size * N x C] if\n # is_2d=True or [batch, block_size * N, 1, C] otherwise, and C is static.\n # Let's shift targets to depth and embed.\n targets_shape, targets_shape_static = tf.shape(targets), targets.get_shape()\n channels = int(targets_shape_static[-1])\n hidden_size = int(x.get_shape()[-1])\n if is_2d:\n depth_targets = tf.space_to_depth(targets, block_size)\n factor = channels * block_size * block_size\n else:\n depth_targets = tf.reshape(targets, [\n targets_shape[0], targets_shape[1] // block_size, 1,\n channels * block_size\n ])\n factor = channels * block_size\n if embedding_var is None:\n embedding_var = tf.get_variable(\"targets_embedding\",\n [targets_vocab_size, hidden_size])\n targets_emb = tf.gather(embedding_var, depth_targets)\n # Flatten x and embedded targets. Flat targets are factor* larger on axis=1.\n flat_x = tf.reshape(x, [-1, 1, 1, hidden_size])\n flat_targets = tf.reshape(targets_emb, [-1, factor, 1, hidden_size])\n shifted_targets = shift_right(flat_targets)\n # Run a SeqCNN large-batch to produce factor outputs out of every target.\n flat_x += tf.zeros_like(shifted_targets) # Broadcast on axis=1.\n flat_outputs = conv_block(\n tf.concat([flat_x, shifted_targets], axis=3),\n hidden_size,\n dilations_and_kernels,\n padding=\"LEFT\")\n # Reshape back to embedded targets shape.\n outputs = tf.reshape(flat_outputs, [\n tf.shape(targets_emb)[0],\n tf.shape(targets_emb)[1],\n tf.shape(targets_emb)[2], factor * hidden_size\n ])\n # Move depth back to target space.\n if is_2d:\n outputs = tf.depth_to_space(outputs, 2)\n else:\n outputs = tf.reshape(outputs, [\n tf.shape(outputs)[0], block_size * tf.shape(outputs)[1], 1,\n hidden_size\n ])\n # Final reshape before prediction to ensure target size.\n outputs = tf.reshape(outputs, [\n targets_shape[0], targets_shape[1], targets_shape[2], channels,\n hidden_size\n ])\n return tf.layers.dense(outputs, targets_vocab_size)\n\n\ndef simple_attention(target, source, bias=None):\n \"\"\"A simple attention function.\n\n Args:\n target: a `Tensor` with shape `[batch, target_timesteps, depth]` or\n `[batch, target_timesteps_1, target_timesteps_2, depth]`\n source: a `Tensor` with shape `[batch, source_timesteps, depth]` or\n `[batch, source_timesteps_1, source_timesteps_2, depth]`\n bias: an optional `Tensor` with shape `[batch, timesteps, 1, 1]` used\n to mask the attention to not attend to padding of input.\n\n Returns:\n a `Tensor` with same shape as `target`\n \"\"\"\n with tf.name_scope(\"simple_attention\", [target, source]):\n target_shape = tf.shape(target)\n source_shape = tf.shape(source)\n target = tf.reshape(target, [\n target_shape[0], target_shape[1] * target_shape[2], target_shape[3]\n ])\n source = tf.reshape(source, [\n source_shape[0], source_shape[1] * source_shape[2], source_shape[3]\n ])\n attention = tf.matmul(target, source, transpose_b=True)\n attention *= tf.rsqrt(tf.to_float(tf.shape(target)[2]))\n if bias is not None:\n attention += tf.expand_dims(tf.squeeze(bias, axis=[2, 3]), axis=1)\n attention = tf.nn.softmax(attention)\n if not tf.get_variable_scope().reuse:\n tf.summary.image(\"attention\", tf.expand_dims(attention, 3), max_outputs=5)\n attended = tf.matmul(attention, source)\n return tf.reshape(attended, target_shape)\n\n\ndef multiscale_conv_sum(inputs, output_size, dilation_rates_and_kernel_sizes,\n pooling_type, **kwargs):\n \"\"\"Sum of several dilated convolutions.\n\n For all convolutions with dilation_rate > 1, we first pool the input with\n width dilation_rate.\n\n Args:\n inputs: a Tensor\n output_size: an Integer\n dilation_rates_and_kernel_sizes: a list of pairs (dilation, kernel_size)\n pooling_type: \"AVG\" or \"MAX\"\n **kwargs: additional\n\n Returns:\n a Tensor.\n \"\"\"\n name = kwargs.pop(\"name\") if \"name\" in kwargs else None\n with tf.variable_scope(name, \"multiscale_conv_sum\", [inputs]):\n padding = kwargs[\"padding\"]\n results, counter = [], -1\n for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:\n counter += 1\n if dilation_rate[0] > 1:\n pooled = pool(inputs, kernel_size, pooling_type, padding)\n else:\n pooled = inputs\n results.append(\n conv(\n pooled,\n output_size,\n kernel_size,\n dilation_rate=dilation_rate,\n name=\"conv_layer%d\" % counter,\n **kwargs))\n return tf.add_n(results) * (len(results)**-0.5)\n\n\ndef multiscale_conv_and_attention(x, padding, hparams, source=None):\n \"\"\"A common part of t2t layers.\n\n First, do a linear multiscale convolution\n Second, do attention (if source is not None)\n\n Applies residuals and normalization on both steps.\n\n Args:\n x: a Tensor.\n padding: a padding type\n hparams: hyperparameters for model\n source: optional source tensor for attention. (encoder output)\n\n Returns:\n a Tensor.\n \"\"\"\n # TODO(noam): The number of different scales should be a hyperparameter.\n conv_sum = multiscale_conv_sum(\n x,\n hparams.hidden_size, [((hparams.kernel_height**i, hparams.kernel_width**\n i), (hparams.kernel_height, hparams.kernel_width))\n for i in xrange(3)],\n \"AVG\",\n padding=padding)\n # For residuals a rescale if necessary if channels differ.\n if x.get_shape().as_list()[-1] != conv_sum.get_shape().as_list()[-1]:\n x = conv(x, hparams.hidden_size, (1, 1))\n x = noam_norm(x + conv_sum)\n if source is not None:\n x = noam_norm(x + simple_attention(x, source))\n return x\n\n\ndef conv_with_pools(inputs, output_size, kernel_size, pool_sizes, pooling_type,\n **kwargs):\n \"\"\"Convolution plus 1x1 convolution applied to specified pools.\n\n For example we might do a regular convolution with kernel size (3, 1),\n and pools of sizes [(9, 1), (27, 1)].\n\n Args:\n inputs: a Tensor\n output_size: an Integer\n kernel_size: a tuple of integers\n pool_sizes: a list of tuples of integers.\n pooling_type: \"AVG\" or \"MAX\"\n **kwargs: additional keyword args for conv\n\n Returns:\n a Tensor.\n \"\"\"\n name = kwargs.pop(\"name\") if \"name\" in kwargs else None\n with tf.variable_scope(name, \"conv_with_pools\", [inputs]):\n padding = kwargs[\"padding\"]\n results = []\n results.append(conv(inputs, output_size, kernel_size, **kwargs))\n for i, pool_size in enumerate(pool_sizes):\n pooled = pool(inputs, pool_size, pooling_type, padding)\n results.append(\n conv(pooled, output_size, (1, 1), name=\"pool_%d\" % i, **kwargs))\n return tf.add_n(results) * (len(results)**-0.5)\n\n\ndef conv_with_pools_and_attention(x, padding, hparams, source=None):\n \"\"\"A common part of t2t layers.\n\n First, do conv_with_pools\n Second, do attention (if source is not None)\n\n Applies residuals and normalization on both steps.\n\n Args:\n x: a Tensor.\n padding: a padding type\n hparams: hyperparameters for model\n source: optional source tensor for attention. (encoder output)\n\n Returns:\n a Tensor.\n \"\"\"\n conv_sum = conv_with_pools(\n x,\n hparams.hidden_size, (hparams.kernel_height, hparams.kernel_width),\n hparams.pool_sizes,\n \"AVG\",\n padding=padding)\n if x.get_shape().as_list()[-1] == conv_sum.get_shape().as_list()[-1]:\n conv_sum += x\n x = noam_norm(conv_sum)\n if source is not None:\n x = noam_norm(x + simple_attention(x, source))\n return x\n\n\ndef get_timing_signal(length,\n min_timescale=1,\n max_timescale=1e4,\n num_timescales=16):\n \"\"\"Create Tensor of sinusoids of different frequencies.\n\n Args:\n length: Length of the Tensor to create, i.e. Number of steps.\n min_timescale: a float\n max_timescale: a float\n num_timescales: an int\n\n Returns:\n Tensor of shape (length, 2*num_timescales)\n \"\"\"\n positions = tf.to_float(tf.range(length))\n log_timescale_increment = (math.log(max_timescale / min_timescale) /\n (num_timescales - 1))\n inv_timescales = min_timescale * tf.exp(\n tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)\n scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)\n return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)\n\n\ndef add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):\n \"\"\"Adds a bunch of sinusoids of different frequencies to a Tensor.\n\n This allows attention to learn to use absolute and relative positions.\n The timing signal should be added to some precursor of both the source\n and the target of the attention.\n\n The use of relative position is possible because sin(x+y) and cos(x+y) can be\n experessed in terms of y, sin(x) and cos(x).\n\n In particular, we use a geometric sequence of timescales starting with\n min_timescale and ending with max_timescale. For each timescale, we\n generate the two sinusoidal signals sin(timestep/timescale) and\n cos(timestep/timescale). All of these sinusoids are concatenated in\n the depth dimension, padded with zeros to be the same depth as the input,\n and added into input.\n\n Args:\n x: a Tensor with shape [?, length, ?, depth]\n min_timescale: a float\n max_timescale: a float\n num_timescales: an int <= depth/2\n\n Returns:\n a Tensor the same shape as x.\n \"\"\"\n length = tf.shape(x)[1]\n depth = tf.shape(x)[3]\n signal = get_timing_signal(length, min_timescale, max_timescale,\n num_timescales)\n padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])\n return x + tf.reshape(padded_signal, [1, length, 1, depth])\n\n\ndef mask_from_embedding(emb):\n \"\"\"Input embeddings -> padding mask.\n\n We have hacked symbol_modality to return all-zero embeddings for padding.\n Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.\n\n Args:\n emb: a Tensor with shape [batch, width, height, depth].\n Returns:\n a 0.0/1.0 Tensor with shape [batch, width, height, 1].\n \"\"\"\n return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keep_dims=True))\n\n\ndef mask_leq(target_length, source_length):\n \"\"\"A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.\n\n Args:\n target_length: an integer\n source_length: an integer\n Returns:\n a Tensor with shape [1, target_length, source_length]\n \"\"\"\n return tf.expand_dims(\n tf.matrix_band_part(tf.ones([target_length, source_length]), -1, 0), 0)\n\n\ndef attention_1d_v0(source,\n target,\n attention_size,\n output_size,\n num_heads,\n mask=None,\n transform_source=True,\n transform_target=True,\n transform_output=True,\n name=None):\n \"\"\"multi-headed attention.\n\n TODO(noam): this could probably be extended to 2d.\n\n Args:\n source: a Tensor of shape [batch, source_length, source_depth]\n target: a Tensor of shape [batch, target_length, target_depth]\n attention_size: an integer\n output_size: an integer\n num_heads: an integer divisor of attention_size\n mask: a float32 Tensor of shape [batch, target_length, source_length]\n 1.0 means can-see; 0.0 means can't-see.\n Any dimension can be 1 (supports broadcasting).\n transform_source: a boolean\n transform_target: a boolean\n transform_output: a boolean\n name: an optional string\n\n Returns:\n a Tensor of shape [batch, length, output_size]\n \"\"\"\n with tf.variable_scope(name, default_name=\"attention\", values=[target]):\n source_length = tf.shape(source)[1]\n target_length = tf.shape(target)[1]\n batch = tf.shape(source)[0]\n\n def _maybe_transform(t, size, should_transform, name):\n if should_transform:\n return conv1d(t, size, 1, name=name)\n else:\n assert t.get_shape()[-1] == size\n return t\n\n source_attention = _maybe_transform(source, attention_size,\n transform_source, \"source_attention\")\n target_attention = _maybe_transform(target, attention_size,\n transform_target, \"target_attention\")\n assert attention_size % num_heads == 0\n size_per_head = attention_size // num_heads\n source_attention = tf.reshape(\n source_attention, [batch, source_length, num_heads, size_per_head])\n target_attention = tf.reshape(\n target_attention, [batch, target_length, num_heads, size_per_head])\n # [batch, num_heads, length, size_per_head]\n source_attention = tf.transpose(source_attention, [0, 2, 1, 3])\n target_attention = tf.transpose(target_attention, [0, 2, 1, 3])\n\n # [batch, num_heads, target_length, source_length]\n attention = tf.matmul(target_attention, source_attention, transpose_b=True)\n attention *= size_per_head**-0.5\n\n if mask is not None:\n mask = tf.expand_dims(mask, 1)\n mask = (1.0 - mask) * -1e9\n attention += mask\n attention = tf.nn.softmax(attention)\n if not tf.get_variable_scope().reuse:\n # Compute a color image summary.\n image = tf.reshape(attention,\n [batch, num_heads, target_length, source_length])\n image = tf.transpose(image, [0, 2, 3, 1])\n image = tf.pow(image, 0.2) # for high-dynamic-range\n # Each head will correspond to one of RGB.\n # pad the heads to be a multiple of 3\n extra_heads = -num_heads % 3\n image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, -num_heads % 3]])\n image = tf.reshape(image, [\n batch, target_length, source_length, 3, (num_heads + extra_heads) // 3\n ])\n image = tf.reduce_max(image, 4)\n tf.summary.image(\"local_attention\", image, max_outputs=1)\n # output: [batch, num_heads, target_length, size_per_head]\n output = tf.matmul(attention, source_attention)\n output = tf.transpose(output, [0, 2, 1, 3])\n output = tf.reshape(output, [batch, target_length, attention_size])\n output = _maybe_transform(output, output_size, transform_output,\n \"attention_output\")\n return output\n\n\ndef relu_density_logit(x, reduce_dims):\n \"\"\"logit(density(x)).\n\n Useful for histograms.\n\n Args:\n x: a Tensor, typilcally the output of tf.relu\n reduce_dims: a list of dimensions\n\n Returns:\n a Tensor\n \"\"\"\n frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)\n scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))\n return scaled\n\n\ndef conv_hidden_relu(inputs,\n hidden_size,\n output_size,\n kernel_size=(1, 1),\n second_kernel_size=(1, 1),\n dropout=0.0,\n **kwargs):\n \"\"\"Hidden layer with RELU activation followed by linear projection.\"\"\"\n name = kwargs.pop(\"name\") if \"name\" in kwargs else None\n with tf.variable_scope(name, \"conv_hidden_relu\", [inputs]):\n if inputs.get_shape().ndims == 3:\n is_3d = True\n inputs = tf.expand_dims(inputs, 2)\n else:\n is_3d = False\n conv_f1 = conv if kernel_size == (1, 1) else separable_conv\n h = conv_f1(\n inputs,\n hidden_size,\n kernel_size,\n activation=tf.nn.relu,\n name=\"conv1\",\n **kwargs)\n if dropout != 0.0:\n h = tf.nn.dropout(h, 1.0 - dropout)\n if not tf.get_variable_scope().reuse:\n tf.summary.histogram(\"hidden_density_logit\",\n relu_density_logit(\n h, list(range(inputs.shape.ndims - 1))))\n conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv\n ret = conv_f2(h, output_size, second_kernel_size, name=\"conv2\", **kwargs)\n if is_3d:\n ret = tf.squeeze(ret, 2)\n return ret\n\n\ndef conv_gru(x,\n kernel_size,\n filters,\n padding=\"SAME\",\n dilation_rate=(1, 1),\n name=None,\n reuse=None):\n \"\"\"Convolutional GRU in 1 dimension.\"\"\"\n\n # Let's make a shorthand for conv call first.\n def do_conv(args, name, bias_start, padding):\n return conv(\n args,\n filters,\n kernel_size,\n padding=padding,\n dilation_rate=dilation_rate,\n bias_initializer=tf.constant_initializer(bias_start),\n name=name)\n\n # Here comes the GRU gate.\n with tf.variable_scope(\n name, default_name=\"conv_gru\", values=[x], reuse=reuse):\n reset = saturating_sigmoid(do_conv(x, \"reset\", 1.0, padding))\n gate = saturating_sigmoid(do_conv(x, \"gate\", 1.0, padding))\n candidate = tf.tanh(do_conv(reset * x, \"candidate\", 0.0, padding))\n return gate * x + (1 - gate) * candidate\n\n\ndef conv_lstm(x,\n kernel_size,\n filters,\n padding=\"SAME\",\n dilation_rate=(1, 1),\n name=None,\n reuse=None):\n \"\"\"Convolutional LSTM in 1 dimension.\"\"\"\n with tf.variable_scope(\n name, default_name=\"conv_lstm\", values=[x], reuse=reuse):\n gates = conv(\n x,\n 4 * filters,\n kernel_size,\n padding=padding,\n dilation_rate=dilation_rate)\n g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)\n new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])\n return tf.sigmoid(g[2]) * tf.tanh(new_cell)\n\n\ndef diagonal_conv_gru(x,\n kernel_size,\n filters,\n dropout=0.0,\n name=None,\n reuse=None):\n \"\"\"Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727.\"\"\"\n\n # Let's make a shorthand for conv call first.\n def do_conv(args, name, bias_start):\n return conv(\n args,\n filters,\n kernel_size,\n padding=\"SAME\",\n bias_initializer=tf.constant_initializer(bias_start),\n name=name)\n\n # Here comes the GRU gate.\n with tf.variable_scope(\n name, default_name=\"diagonal_conv_gru\", values=[x], reuse=reuse):\n reset, reset_cost = hard_sigmoid(do_conv(x, \"reset\", 0.5))\n gate, gate_cost = hard_sigmoid(do_conv(x, \"gate\", 0.7))\n candidate = tf.tanh(do_conv(reset * x, \"candidate\", 0.0))\n\n if dropout > 0.0:\n candidate = tf.nn.dropout(candidate, 1.0 - dropout)\n\n # Diagonal shift.\n shift_filters = filters // 3\n base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) +\n [[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)\n shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)\n shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)\n x_shifted = tf.nn.depthwise_conv2d(\n x, shift_filter, [1, 1, 1, 1], padding=\"SAME\")\n\n # Return the gated result and cost.\n total_cost_avg = 0.5 * (reset_cost + gate_cost)\n return gate * x_shifted + (1 - gate) * candidate, total_cost_avg\n\n\ndef pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):\n \"\"\"Pad tensors x and y on axis 1 so that they have the same length.\"\"\"\n if axis not in [1, 2]:\n raise ValueError(\"Only axis=1 and axis=2 supported for now.\")\n with tf.name_scope(\"pad_to_same_length\", [x, y]):\n x_length = tf.shape(x)[axis]\n y_length = tf.shape(y)[axis]\n max_length = tf.maximum(x_length, y_length)\n if final_length_divisible_by > 1:\n # Find the nearest larger-or-equal integer divisible by given number.\n max_length += final_length_divisible_by - 1\n max_length //= final_length_divisible_by\n max_length *= final_length_divisible_by\n length_diff1 = max_length - x_length\n length_diff2 = max_length - y_length\n\n def padding_list(length_diff, arg):\n if axis == 1:\n return [[[0, 0], [0, length_diff]],\n tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]\n return [[[0, 0], [0, 0], [0, length_diff]],\n tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]\n\n paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)\n paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)\n res_x = tf.pad(x, paddings1)\n res_y = tf.pad(y, paddings2)\n # Static shapes are the same except for axis=1.\n x_shape = x.shape.as_list()\n x_shape[axis] = None\n res_x.set_shape(x_shape)\n y_shape = y.shape.as_list()\n y_shape[axis] = None\n res_y.set_shape(y_shape)\n return res_x, res_y\n\n\ndef pad_with_zeros(logits, labels):\n \"\"\"Pad labels on the length dimension to match logits length.\"\"\"\n with tf.name_scope(\"pad_with_zeros\", [logits, labels]):\n logits, labels = pad_to_same_length(logits, labels)\n if len(labels.shape.as_list()) == 3: # 2-d labels.\n logits, labels = pad_to_same_length(logits, labels, axis=2)\n return logits, labels\n\n\ndef weights_nonzero(labels):\n \"\"\"Assign weight 1.0 to all labels except for padding (id=0).\"\"\"\n return tf.to_float(tf.not_equal(labels, 0))\n\n\ndef weights_prepend_inputs_to_targets(labels):\n \"\"\"Assign weight 1.0 to only the \"targets\" portion of the labels.\n\n Weight 1.0 is assigned to all nonzero labels past the first zero.\n See prepend_mode in common_hparams.py\n\n Args:\n labels: A Tensor of int32s.\n\n Returns:\n A Tensor of floats.\n \"\"\"\n past_first_zero = tf.cumsum(tf.to_float(tf.equal(labels, 0)), axis=1)\n nonzero = tf.to_float(labels)\n return tf.to_float(tf.not_equal(past_first_zero * nonzero, 0))\n\n\ndef weights_all(labels):\n \"\"\"Assign weight 1.0 to all labels.\"\"\"\n return tf.ones_like(labels, dtype=tf.float32)\n\n\ndef weights_concatenated(labels):\n \"\"\"Assign weight 1.0 to the \"target\" part of the concatenated labels.\n\n The labels look like:\n source English I love you . ID1 target French Je t'aime . ID1 source\n English the cat ID1 target French le chat ID1 source English ...\n\n We want to assign weight 1.0 to all words in the target text (including the\n ID1 end symbol), but not to the source text or the boilerplate. In the\n above example, the target words that get positive weight are:\n Je t'aime . ID1 le chat ID1\n\n Args:\n labels: a Tensor\n Returns:\n a Tensor\n \"\"\"\n eos_mask = tf.to_int32(tf.equal(labels, 1))\n sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)\n in_target = tf.equal(tf.mod(sentence_num, 2), 1)\n # first two tokens of each sentence are boilerplate.\n sentence_num_plus_one = sentence_num + 1\n shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0],\n [0, 0]])[:, :-2, :, :]\n nonboilerplate = tf.equal(sentence_num_plus_one, shifted)\n ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))\n return ret\n\n\ndef padded_cross_entropy(logits,\n labels,\n label_smoothing,\n weights_fn=weights_nonzero,\n reduce_sum=True):\n \"\"\"Compute cross-entropy assuming 0s are padding.\n\n Computes a loss numerator (the sum of losses), and loss denominator\n (the number of non-padding tokens).\n\n Args:\n logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`.\n optionally a FactoredTensor.\n labels: an integer `Tensor` with shape `[batch, timesteps]`.\n label_smoothing: a floating point `Scalar`.\n weights_fn: A function from labels to weights.\n reduce_sum: a Boolean, whether to sum at the end or not.\n\n Returns:\n loss_numerator: a `Scalar`. Sum of losses.\n loss_denominator: a `Scalar. The number of non-padding target tokens.\n \"\"\"\n if isinstance(logits, FactoredTensor):\n return padded_cross_entropy_factored(\n logits,\n labels,\n label_smoothing,\n weights_fn=weights_fn,\n reduce_sum=reduce_sum)\n confidence = 1.0 - label_smoothing\n vocab_size = tf.shape(logits)[-1]\n with tf.name_scope(\"padded_cross_entropy\", [logits, labels]):\n pad_logits, pad_labels = pad_with_zeros(logits, labels)\n xent = smoothing_cross_entropy(pad_logits, pad_labels, vocab_size,\n confidence)\n weights = weights_fn(pad_labels)\n if not reduce_sum:\n return xent * weights, weights\n return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)\n\n\ndef smoothing_cross_entropy(logits, labels, vocab_size, confidence):\n \"\"\"Cross entropy with label smoothing to limit over-confidence.\"\"\"\n with tf.name_scope(\"smoothing_cross_entropy\", [logits, labels]):\n # Low confidence is given to all non-true labels, uniformly.\n low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)\n # Normalizing constant is the best cross-entropy value with soft targets.\n # We subtract it just for readability, makes no difference on learning.\n normalizing = -(confidence * tf.log(confidence) + tf.to_float(\n vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20))\n # Soft targets.\n soft_targets = tf.one_hot(\n tf.cast(labels, tf.int32),\n depth=vocab_size,\n on_value=confidence,\n off_value=low_confidence)\n xentropy = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=soft_targets)\n return xentropy - normalizing\n\n\ndef global_pool_1d(inputs, pooling_type=\"MAX\", mask=None):\n \"\"\"Pool elements across the last dimension.\n\n Useful to convert a list of vectors into a single vector so as\n to get a representation of a set.\n\n Args:\n inputs: A tensor of dimensions batch_size x sequence_length x input_dims\n containing the sequences of input vectors.\n pooling_type: the pooling type to use, MAX or AVR\n mask: A tensor of dimensions batch_size x sequence_length containing a\n mask for the inputs with 1's for existing elements, and 0's elsewhere.\n\n Returns:\n output: A tensor of dimensions batch_size x input_dims\n dimension containing the sequences of transformed vectors.\n \"\"\"\n with tf.name_scope(\"global_pool\", [inputs]):\n if mask is not None:\n mask = tf.expand_dims(mask, axis=2)\n inputs = tf.multiply(inputs, mask)\n\n if pooling_type == \"MAX\":\n # A tf.pool can be used here, but reduce is cleaner\n output = tf.reduce_max(inputs, axis=1)\n elif pooling_type == \"AVR\":\n if mask is not None:\n # Some elems are dummy elems so we can't just reduce the average.\n output = tf.reduce_sum(inputs, axis=1)\n num_elems = tf.reduce_sum(mask, axis=1, keep_dims=True)\n output = tf.div(output, tf.maximum(num_elems, 1))\n else:\n output = tf.reduce_mean(inputs, axis=1)\n\n return output\n\n\ndef running_global_pool_1d(inputs, pooling_type=\"MAX\"):\n \"\"\"Same global pool, but only for the elements up to the current element.\n\n Useful for outputs where the state of future elements is not known.\n Takes no mask as all elements up to the current element are assumed to exist.\n Currently only supports maximum. Equivalent to using a lower triangle bias.\n\n Args:\n inputs: A tensor of dimensions batch_size x sequence_length x input_dims\n containing the sequences of input vectors.\n pooling_type: Pooling type to use. Currently only supports 'MAX'.\n\n Returns:\n output: A tensor of dimensions batch_size x sequence_length x input_dims\n dimension containing the running 'totals'.\n \"\"\"\n del pooling_type\n with tf.name_scope(\"running_global_pool\", [inputs]):\n scan_fct = tf.maximum\n # Permute inputs so seq_length is first.\n elems = tf.transpose(inputs, [1, 0, 2])\n # Perform scan.\n cumulatives = tf.scan(scan_fct, elems, swap_memory=True)\n # Permute output to get back to original order.\n output = tf.transpose(cumulatives, [1, 0, 2])\n return output\n\n\ndef linear_set_layer(layer_size,\n inputs,\n context=None,\n activation_fn=tf.nn.relu,\n dropout=0.0,\n name=None):\n \"\"\"Basic layer type for doing funky things with sets.\n\n Applies a linear transformation to each element in the input set.\n If a context is supplied, it is concatenated with the inputs.\n e.g. One can use global_pool_1d to get a representation of the set which\n can then be used as the context for the next layer.\n\n TODO: Add bias add (or control the biases used).\n\n Args:\n layer_size: Dimension to transform the input vectors to.\n inputs: A tensor of dimensions batch_size x sequence_length x input_dims\n containing the sequences of input vectors.\n context: A tensor of dimensions batch_size x context_dims\n containing a global statistic about the set.\n activation_fn: The activation function to use.\n dropout: Dropout probability.\n name: name.\n\n Returns:\n output: A tensor of dimensions batch_size x sequence_length x output_dims\n dimension containing the sequences of transformed vectors.\n \"\"\"\n with tf.variable_scope(\n name, default_name=\"linear_set_layer\", values=[inputs]):\n # Apply 1D convolution to apply linear filter to each element\n # along the 2nd dimension.\n outputs = conv1d(inputs, layer_size, 1, activation=None, name=\"set_conv\")\n\n # Apply the context if it exists.\n if context is not None:\n # Unfortunately tf doesn't support broadcasting via concat, but we can\n # simply add the transformed context to get the same effect.\n if len(context.get_shape().as_list()) == 2:\n context = tf.expand_dims(context, axis=1)\n cont_tfm = conv1d(\n context, layer_size, 1, activation=None, name=\"cont_conv\")\n outputs += cont_tfm\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n\n if dropout != 0.0:\n outputs = tf.nn.dropout(outputs, 1.0 - dropout)\n\n return outputs\n\n\ndef ravanbakhsh_set_layer(layer_size,\n inputs,\n mask=None,\n sequential=False,\n activation_fn=tf.nn.tanh,\n dropout=0.0,\n name=None):\n \"\"\"Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 .\n\n More parameter-efficient verstion of a linear-set-layer with context.\n\n Args:\n layer_size: Dimension to transform the input vectors to.\n inputs: A tensor of dimensions batch_size x sequence_length x vector\n containing the sequences of input vectors.\n mask: A tensor of dimensions batch_size x sequence_length containing a\n mask for the inputs with 1's for existing elements, and 0's elsewhere.\n sequential: If true, will use a running global pool so each element will\n only depend on those before it. Set true if this layer is being used in\n an output sequence.\n activation_fn: The activation function to use.\n dropout: dropout.\n name: name.\n\n Returns:\n output: A tensor of dimensions batch_size x sequence_length x vector\n dimension containing the sequences of transformed vectors.\n \"\"\"\n del dropout\n with tf.variable_scope(name, \"ravanbakhsh_set_layer\", [inputs]):\n if sequential:\n return linear_set_layer(\n layer_size,\n inputs - running_global_pool_1d(inputs),\n activation_fn=activation_fn,\n name=name)\n return linear_set_layer(\n layer_size,\n inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1),\n activation_fn=activation_fn,\n name=name)\n\n\ndef fn_device_dependency_dict():\n \"\"\"State container for fn_device_dependency.\"\"\"\n if not hasattr(tf.get_default_graph(), \"dependency_dict\"):\n setattr(tf.get_default_graph(), \"dependency_dict\", defaultdict(list))\n return tf.get_default_graph().dependency_dict\n\n\[email protected]\ndef fn_device_dependency(name, device=\"\"):\n \"\"\"Add control deps for name and device.\"\"\"\n key = name + \"_\" + device\n outs = []\n\n def body():\n with tf.control_dependencies(fn_device_dependency_dict()[key]):\n yield outs\n assert outs\n\n deps = outs\n if isinstance(outs[0], list) or isinstance(outs[0], tuple):\n assert len(outs) == 1\n deps = outs[0]\n fn_device_dependency_dict()[key] = deps\n\n if device:\n with tf.device(device):\n return body()\n else:\n return body()\n\n\ndef underlying_variable_ref(t):\n \"\"\"Find the underlying variable ref.\n\n Traverses through Identity, ReadVariableOp, and Enter ops.\n Stops when op type has Variable or VarHandle in name.\n\n Args:\n t: a Tensor\n\n Returns:\n a Tensor that is a variable ref, or None on error.\n \"\"\"\n while t.op.type in [\"Identity\", \"ReadVariableOp\", \"Enter\"]:\n t = t.op.inputs[0]\n\n op_type = t.op.type\n if \"Variable\" in op_type or \"VarHandle\" in op_type:\n return t\n else:\n return None\n\n\ndef underlying_variable(t):\n \"\"\"Find the underlying tf.Variable object.\n\n Args:\n t: a Tensor\n\n Returns:\n a tf.Varaible object.\n \"\"\"\n t = underlying_variable_ref(t)\n assert t is not None\n # make sure that the graph has a variable index and that it is up-to-date\n if not hasattr(tf.get_default_graph(), \"var_index\"):\n tf.get_default_graph().var_index = {}\n var_index = tf.get_default_graph().var_index\n for v in tf.global_variables()[len(var_index):]:\n var_index[v.name] = v\n return var_index[t.name]\n\n\ndef approximate_split(x, num_splits, axis=0):\n \"\"\"Split approximately equally into num_splits parts.\n\n Args:\n x: a Tensor\n num_splits: an integer\n axis: an integer.\n\n Returns:\n a list of num_splits Tensors.\n \"\"\"\n size = tf.shape(x)[axis]\n size_splits = [tf.div(size + i, num_splits) for i in xrange(num_splits)]\n return tf.split(x, size_splits, axis=axis)\n\n\nclass FactoredTensor(object):\n \"\"\"A concise factored representation of Tensor as two tensors.\n\n This class represents the tensor tf.matmul(a, b, transpose_b=True)\n by storing the values of Tensors a and b.\n\n The reason for this is that the product may be too big to fully realize at\n once, so it can be realized a part at a time.\n\n \"a\" may have extra leading dimensions, in which case they are flattened out\n before computing the matrix product, then re-expanded afterwards.\n \"\"\"\n\n def __init__(self, a, b):\n self._a = a\n self._b = b\n\n @property\n def a(self):\n return self._a\n\n @property\n def b(self):\n return self._b\n\n def to_tensor(self):\n inner_dim = tf.shape(self.b)[1]\n result_dim = tf.shape(self.b)[0]\n flat_a = tf.reshape(self.a, [-1, inner_dim])\n product = tf.matmul(flat_a, self.b, transpose_b=True)\n product_shape = tf.concat([tf.shape(self.a)[:-1], [result_dim]], 0)\n product = tf.reshape(product, product_shape)\n product.set_shape(self.a.get_shape().as_list()[:-1] +\n [self.b.get_shape()[0]])\n return product\n\n\ndef _convert_factored_tensor_to_tensor(value, *args, **kwargs):\n # call ops.convert_to_tensor to handle optional arguments appropriately\n return ops.internal_convert_to_tensor(value.to_tensor(), *args, **kwargs)\n\n\ntf.register_tensor_conversion_function(FactoredTensor,\n _convert_factored_tensor_to_tensor)\n\n\ndef smoothing_cross_entropy_factored_grad(op, dy):\n \"\"\"Gradient function for smoothing_cross_entropy_factored.\"\"\"\n a = op.inputs[0]\n b = op.inputs[1]\n labels = op.inputs[2]\n confidence = op.inputs[3]\n num_splits = 16\n vocab_size = tf.shape(b)[0]\n labels = approximate_split(labels, num_splits)\n a = approximate_split(a, num_splits)\n dy = approximate_split(dy, num_splits)\n b_grad = None\n a_grad_parts = []\n deps = []\n for part in xrange(num_splits):\n with tf.control_dependencies(deps):\n logits = tf.matmul(a[part], b, transpose_b=True)\n output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,\n confidence)\n a_grad_part, b_grad_part = tf.gradients(\n ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])\n a_grad_parts.append(a_grad_part)\n if part > 0:\n b_grad += b_grad_part\n else:\n b_grad = b_grad_part\n deps = [b_grad, a_grad_part]\n a_grad = tf.concat(a_grad_parts, 0)\n return a_grad, b_grad, None, None\n\n\[email protected](\n noinline=True,\n python_grad_func=smoothing_cross_entropy_factored_grad,\n compiled=True,\n separate_compiled_gradients=True)\ndef smoothing_cross_entropy_factored(a, b, labels, confidence):\n \"\"\"Memory-efficient computation of smoothing cross-entropy.\n\n Avoids realizing the entire logits matrix at once.\n\n Args:\n a: a Tensor with shape [batch, inner_dim]\n b: a Tensor with shape [vocab_size, inner_dim]\n labels: an integer Tensor with shape [batch]\n confidence: a float\n\n Returns:\n A Tensor with shape [batch]\n \"\"\"\n num_splits = 16\n vocab_size = tf.shape(b)[0]\n labels = approximate_split(labels, num_splits)\n a = approximate_split(a, num_splits)\n parts = []\n for part in xrange(num_splits):\n with tf.control_dependencies(parts[-1:]):\n logits = tf.matmul(a[part], b, transpose_b=True)\n parts.append(\n smoothing_cross_entropy(logits, labels[part], vocab_size, confidence))\n return tf.concat(parts, 0)\n\n\ndef padded_cross_entropy_factored(factored_logits,\n labels,\n label_smoothing,\n weights_fn=weights_nonzero,\n reduce_sum=True):\n \"\"\"Memory-efficient computation of smoothing cross-entropy.\n\n Avoids realizing the entire logits matrix at once.\n\n Args:\n factored_logits: a `FactoredTensor` representing a Tensor\n with shape `[batch, timesteps, vocab_size]`.\n labels: an integer `Tensor` with shape `[batch, timesteps]`.\n label_smoothing: a floating point `Scalar`.\n weights_fn: A function from labels to weights.\n reduce_sum: a Boolean, whether to sum at the end or not.\n\n Returns:\n loss_numerator: a `Scalar`. Sum of losses.\n loss_denominator: a `Scalar. The number of non-padding target tokens.\n \"\"\"\n a = factored_logits.a\n b = factored_logits.b\n confidence = 1.0 - label_smoothing\n with tf.name_scope(\"padded_cross_entropy_factored\", [a, b, labels]):\n labels_flat = tf.reshape(labels, [-1])\n a_flat = tf.reshape(a, [-1, tf.shape(b)[1]])\n xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat,\n tf.convert_to_tensor(confidence))\n xent = tf.reshape(xent, tf.shape(labels))\n weights = weights_fn(labels)\n if not reduce_sum:\n return xent * weights, weights\n return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)\n\n\ndef fn_with_custom_grad(grad_fn, use_global_vars=False):\n \"\"\"Decorator to create a subgraph with a custom gradient function.\n\n The subgraph created by the decorated function is NOT put in a Defun and so\n does not suffer from the limitations of the Defun (all subgraph ops on the\n same device, no summaries).\n\n Args:\n grad_fn: function with signature\n (inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),\n all of which are lists of Tensors.\n use_global_vars: if True, variables will be the global variables created.\n If False, will be the trainable variables.\n\n Returns:\n Decorator for function such that the gradient is defined by grad_fn.\n \"\"\"\n\n def dec(fn):\n\n def wrapped(*args):\n return _fn_with_custom_grad(\n fn, args, grad_fn, use_global_vars=use_global_vars)\n\n return wrapped\n\n return dec\n\n\ndef _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):\n \"\"\"Create a subgraph with a custom gradient.\n\n Args:\n fn: function that takes inputs as arguments and produces 1 or more Tensors.\n inputs: list<Tensor>, will be passed as fn(*inputs).\n grad_fn: function with signature\n (inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),\n all of which are lists of Tensors.\n use_global_vars: if True, variables will be the global variables created.\n If False, will be the trainable variables.\n\n Returns:\n fn(*inputs)\n \"\"\"\n vs = tf.get_variable_scope()\n get_vars_fn = (vs.global_variables if use_global_vars else\n vs.trainable_variables)\n len_before_vars = len(get_vars_fn())\n inputs = list(inputs)\n outputs = fn(*inputs)\n train_vars = get_vars_fn()[len_before_vars:]\n\n if grad_fn is None:\n return outputs\n else:\n if not (isinstance(outputs, tuple) or isinstance(outputs, list)):\n outputs = [outputs]\n outputs = list(outputs)\n\n in_types = [t.dtype for t in inputs]\n out_types = [t.dtype for t in outputs]\n var_types = [t.dtype for t in train_vars]\n\n def custom_grad_fn(op, *dys):\n \"\"\"Custom grad fn applying grad_fn for identity Defun.\"\"\"\n dys = list(dys)\n fn_inputs = op.inputs[:len(inputs)]\n fn_vars = op.inputs[len(inputs):len(inputs) + len(train_vars)]\n fn_outputs = op.inputs[len(inputs) + len(train_vars):]\n assert len(fn_outputs) == len(outputs)\n assert len(fn_outputs) == len(dys)\n\n grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)\n grad_outputs = [None] * len(fn_outputs)\n return tuple(grad_inputs + grad_vars + grad_outputs)\n\n # The Defun takes as input the original inputs, the trainable variables\n # created in fn, and the outputs. In the forward it passes through the\n # outputs. In the backwards, it produces gradients for the original inputs\n # and the trainable variables.\n @function.Defun(\n *(in_types + var_types + out_types),\n func_name=\"identity_custom_grad%d\" % random.randint(1, 10**9),\n python_grad_func=custom_grad_fn,\n shape_func=lambda _: [t.get_shape() for t in outputs])\n def identity(*args):\n outs = args[len(inputs) + len(train_vars):]\n return tuple([tf.identity(t) for t in outs])\n\n id_out = identity(*(inputs + train_vars + outputs))\n return id_out\n\n\n_function_cache = {}\n\n\ndef conv_hidden_relu_memory_efficient(x,\n filter_size,\n epsilon=1e-6,\n forget=True,\n test_vars=None,\n name=None):\n \"\"\"LayerNorm, Conv, ReLU, Conv.\n\n All convolutions have kernel size 1.\n\n returns conv(relu(conv(layer_norm(x))))\n\n Args:\n x: input Tensor with shape [batch, length, io_size]\n filter_size: an integer - size of the hidden layer.\n epsilon: a float (for layer norm)\n forget: a boolean - forget forwards activations and recompute on backprop\n test_vars: optional tuple of variables for testing purposes\n name: an optional string\n\n Returns:\n a Tensor with shape [batch, length, io_size]\n \"\"\"\n io_size = x.get_shape().as_list()[-1]\n\n def forward_internal(x, f1, f2, scale, bias):\n \"\"\"Forward function.\"\"\"\n # split batch-wise to avoid exhausting memory in cast the batch is large\n # and the hidden layer is large.\n num_splits = 4\n x_flat = tf.reshape(x, [-1, 1, tf.shape(x)[2]])\n xs = approximate_split(x_flat, num_splits)\n ys = []\n for i in xrange(num_splits):\n with tf.control_dependencies(ys[-1:]):\n n = layer_norm_compute_python(xs[i], epsilon, scale, bias)\n y = tf.nn.conv1d(n, f1, 1, \"SAME\")\n y = tf.nn.relu(y)\n y = tf.nn.conv1d(y, f2, 1, \"SAME\")\n ys.append(y)\n y = tf.concat(ys, 0)\n y = tf.reshape(y, tf.shape(x))\n return y\n key = (\"conv_hidden_relu_memory_efficient %s\" % epsilon)\n if not forget:\n forward_fn = forward_internal\n elif key in _function_cache:\n forward_fn = _function_cache[key]\n else:\n @function.Defun(compiled=True)\n def grad_fn(x, f1, f2, scale, bias, dy):\n with tf.control_dependencies([dy]):\n num_splits = 4\n x_shape = tf.shape(x)\n flat_shape = [-1, 1, x_shape[2]]\n x = tf.reshape(x, flat_shape)\n dy = tf.reshape(dy, flat_shape)\n xs = approximate_split(x, num_splits)\n dys = approximate_split(dy, num_splits)\n dxs = []\n df1 = 0\n df2 = 0\n dscale = 0\n dbias = 0\n deps = []\n for i in xrange(num_splits):\n with tf.control_dependencies(deps):\n n = layer_norm_compute_python(xs[i], epsilon, scale, bias)\n y = tf.nn.conv1d(n, f1, 1, \"SAME\")\n y = tf.nn.relu(y)\n y = tf.nn.conv1d(y, f2, 1, \"SAME\")\n dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients(\n ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]])\n df1 += pdf1\n df2 += pdf2\n dscale += pdscale\n dbias += pdbias\n dxs.append(dxi)\n deps = [dxi, df1, df2, dscale, dbias]\n with tf.control_dependencies(deps):\n dx = tf.concat(dxs, 0)\n dx = tf.reshape(dx, x_shape)\n return dx, df1, df2, dscale, dbias\n\n @function.Defun(grad_func=grad_fn, compiled=True,\n separate_compiled_gradients=True)\n def forward_fn(x, f1, f2, scale, bias):\n return forward_internal(x, f1, f2, scale, bias)\n\n with tf.variable_scope(name, default_name=\"ffn2\", values=[x]):\n # TODO(noam): it would be nice to save memory by casting x to float16\n # here, but this causes problems with the gradients. Figure out if there\n # is a way to leave the gradients as float32.\n if test_vars is not None:\n f1, f2, scale, bias = list(test_vars)\n else:\n f1 = tf.get_variable(\"f1\", [1, io_size, filter_size])\n f2 = tf.get_variable(\"f2\", [1, filter_size, io_size])\n scale, bias = layer_norm_vars(io_size)\n if forget:\n y = forward_fn(x, f1, f2, scale, bias)\n else:\n y = forward_internal(x, f1, f2, scale, bias)\n y.set_shape(x.get_shape())\n return y\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.control_dependencies", "tensorflow.reduce_sum", "tensorflow.tanh", "tensorflow.image.random_flip_left_right", "tensorflow.summary.image", "tensorflow.layers.dense", "tensorflow.train.get_global_step", "tensorflow.square", "tensorflow.matmul", "tensorflow.image.random_brightness", "tensorflow.nn.elu", "tensorflow.zeros_like", "tensorflow.split", "tensorflow.reduce_mean", "tensorflow.space_to_depth", "tensorflow.ones_like", "tensorflow.ones", "tensorflow.layers.separable_conv2d", "tensorflow.random_uniform", "tensorflow.concat", "tensorflow.global_variables", "tensorflow.pad", "tensorflow.get_default_graph", "tensorflow.nn.conv1d", "tensorflow.layers.batch_normalization", "tensorflow.div", "tensorflow.gather", "tensorflow.register_tensor_conversion_function", "tensorflow.pow", "tensorflow.identity", "tensorflow.not_equal", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.nn.pool", "tensorflow.expand_dims", "tensorflow.random_crop", "tensorflow.log", "tensorflow.rsqrt", "tensorflow.get_variable_scope", "tensorflow.get_variable", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.cast", "tensorflow.image.random_saturation", "tensorflow.rank", "tensorflow.add_n", "tensorflow.python.framework.function.Defun", "tensorflow.squeeze", "tensorflow.nn.l2_normalize", "tensorflow.shape", "numpy.transpose", "tensorflow.image.resize_image_with_crop_or_pad", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.reshape", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.sqrt", "tensorflow.abs", "tensorflow.logical_and", "tensorflow.device", "tensorflow.image.random_contrast", "tensorflow.scan", "tensorflow.equal", "tensorflow.nn.depthwise_conv2d", "tensorflow.cumsum", "tensorflow.get_shape", "tensorflow.image.random_hue", "tensorflow.gradients", "tensorflow.to_float", "tensorflow.name_scope", "tensorflow.nn.dropout", "tensorflow.depth_to_space", "tensorflow.layers.conv2d", "tensorflow.zeros_initializer", "tensorflow.nn.relu", "tensorflow.sin", "tensorflow.cos", "tensorflow.maximum", "tensorflow.sigmoid", "tensorflow.mod", "tensorflow.ones_initializer" ] ]
DXYyang/shenNeng_gasAnalysis
[ "d94e2451d1938c090d1377dfbd487d0c6a649188" ]
[ "app/main/analysis/gas_kmeans_plt.py" ]
[ "def gas_kmeans_pit(dist,list,clusters):\n import matplotlib.pyplot as plt\n from sklearn.manifold import MDS\n plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\n plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n MDS()\n mds = MDS(n_components=2, dissimilarity=\"precomputed\", random_state=1)\n pos = mds.fit_transform(dist) # 将1-余弦距离矩阵降维转化为二维数组\n xs, ys = pos[:, 0], pos[:, 1]#所有点在二维图上的坐标\n clusters_name={}\n for index,data in enumerate(list):\n clusters_name[index]=','.join(data[:3])\n import pandas as pd\n df = pd.DataFrame(dict(x=xs, y=ys, label=clusters))\n groups = df.groupby('label')\n # 设置绘图\n fig, ax = plt.subplots(figsize=(17, 9)) # 设置大小\n ax.margins(0.05) # 可选项,只添加 5% 的填充(padding)来自动缩放(auto scaling)。\n # 对聚类进行迭代并分布在绘图上\n # 我用到了 cluster_name 和 cluster_color 字典的“name”项,这样会返回相应的 color 和 label\n for name, group in groups:\n ax.plot(group.x, group.y, marker='o', linestyle='', ms=12,\n label=clusters_name[name],\n mec='none')\n ax.set_aspect('auto')\n ax.tick_params(\n axis='x', # 使用 x 坐标轴\n which='both', # 同时使用主刻度标签(major ticks)和次刻度标签(minor ticks)\n bottom='off', # 取消底部边缘(bottom edge)标签\n top='off', # 取消顶部边缘(top edge)标签\n labelbottom='off')\n ax.tick_params(\n axis='y', # 使用 y 坐标轴\n which='both', # 同时使用主刻度标签(major ticks)和次刻度标签(minor ticks)\n left='off', # 取消底部边缘(bottom edge)标签\n top='off', # 取消顶部边缘(top edge)标签\n labelleft='off')\n ax.legend(numpoints=1) # 图例(legend)中每项只显示一个点\n return plt\n" ]
[ [ "sklearn.manifold.MDS", "matplotlib.pyplot.subplots" ] ]
GregHilston/Google-Trends-Scraper
[ "fa5ccd0443bccffa99299748759491acf040e561" ]
[ "google_trends_scraper/google_trends_scraper.py" ]
[ "import sys\nimport os\nimport time\nimport pandas as pd\nfrom selenium import webdriver\n\nprint(f\"before path: {sys.path}\")\n\n# Adding geckodriver to our path so whoever imports our library can run correctly\nsys.path.insert(0, \"google_trends_scraper\")\n\nprint(f\"after path: {sys.path}\")\n\nclass GoogleTrendsScraper:\n original_output_file_name = \"multiTimeline.csv\" # the name of the output CSV file from Google Trends\n\n \"\"\"Grabs weekly data from start to end for given query\n \"\"\"\n def __init__(self, query, start_date, end_date, output_file_name=\"output.csv\", seconds_delay=15, weekly_granularity=False):\n \"\"\"\n\n :param query: the query we're scraping\n :param start_date: the start date of the range we're scraping for in format (YYYY-MM-DDD)\n :param end_date: the start date of the range we're scraping for in format (YYYY-MM-DDD)\n :param output_file_name: the name of the output csv\n :param seconds_delay: how long to wait between delays (caution don't set this too low out of fear of being banned)\n :param weekly_granularity: whether Google Trends data should be broken up to many weeks\n \"\"\"\n\n self.query = query.replace(' ', \"%20\")\n self.start_date = start_date\n self.end_date = end_date\n self.output_file_name = output_file_name\n self.seconds_delay = seconds_delay\n self.weekly_granularity = weekly_granularity\n\n def generate_url(self, start_date, end_date):\n \"\"\"Generates a Google Trends URL for a given range\n\n :param str start_date: the start date\n :param str end_date: the end date\n\n :return: the formatted Google Trends URL from start to end\n :rtype: str\n \"\"\"\n\n base = \"https://trends.google.com/trends/explore\"\n date = f\"date={start_date}%20{end_date}\"\n query = \"q=it%20is%20wednesday%20my%20dudes\"\n url = f\"{base}?{date}&{query}\"\n\n return url\n\n def fetch_week_trends(self, url, output_file_name=original_output_file_name):\n \"\"\"Fetch the trends for a given week, in daily granularity\n\n :param str url: URL to fetch the CSV from\n :param str output_file_name: file path for where to save the CSV file\n\n :return: None\n \"\"\"\n\n # Accept the save dialogue\n fp = webdriver.FirefoxProfile()\n fp.set_preference(\"browser.download.folderList\", 2)\n fp.set_preference(\"browser.download.manager.showWhenStarting\", False)\n fp.set_preference(\"browser.download.dir\", os.getcwd())\n fp.set_preference(\"browser.helperApps.neverAsk.openFile\",\n \"text/csv,application/x-msexcel,application/excel,application/x-excel,application/vnd.ms-excel,image/png,image/jpeg,text/html,text/plain,application/msword,application/xml\")\n fp.set_preference(\"browser.helperApps.neverAsk.saveToDisk\",\n \"text/csv,application/x-msexcel,application/excel,application/x-excel,application/vnd.ms-excel,image/png,image/jpeg,text/html,text/plain,application/msword,application/xml\")\n fp.set_preference(\"browser.helperApps.alwaysAsk.force\", False)\n fp.set_preference(\"browser.download.manager.alertOnEXEOpen\", False)\n fp.set_preference(\"browser.download.manager.focusWhenStarting\", False)\n fp.set_preference(\"browser.download.manager.useWindow\", False)\n fp.set_preference(\"browser.download.manager.showAlertOnComplete\", False)\n fp.set_preference(\"browser.download.manager.closeWhenDone\", False)\n\n # Download the CSV file\n driver = webdriver.Firefox(fp, executable_path=\"google_trends_scraper/geckodriver\")\n driver.get(url)\n driver.implicitly_wait(5) # may need to implicitly wait longer on slow connections\n button = driver.find_element_by_class_name('export')\n button.click()\n\n # wait for the file to download\n while not os.path.exists(self.original_output_file_name):\n print(\"waiting 1 second, perpetually, for file to be downloaded\")\n time.sleep(1)\n\n print(f\"about to rename {self.original_output_file_name} to {output_file_name}\")\n os.rename(self.original_output_file_name, output_file_name)\n\n driver.close()\n\n def generate_weeks(self, start_date, end_date):\n \"\"\"Generates all start of the weeks between the start and end, specifically with the same day as Start\n\n :param str start_date: The start of the range\n :param str end_date: The end of the range\n\n :return: list of weeks within range\n :rtype: list\n \"\"\"\n\n print(f\"\\tstart:\\t{start_date}\")\n print(f\"\\tend:\\t{end_date}\")\n\n # Generate every week from start to finish\n dr = pd.date_range(start=start_date, end=end_date, freq=\"7D\")\n print(f\"dr {dr}\")\n weeks = dr + pd.Timedelta(weeks=1)\n print(f\"weeks {weeks}\")\n weeks_str = []\n\n # Converting to a str representation\n for week in list(weeks):\n print(f\"week {week}\")\n weeks_str.append(str(week.date()))\n\n for i in range(0, len(weeks_str), 1):\n try:\n start_week = weeks_str[i]\n end_week = weeks_str[i + 1]\n except IndexError as e:\n print(\"Warning: End date wasn't at end of week, missing a most recent few days\")\n continue\n\n print(f\"week {i}:\")\n print(f\"\\tstart:\\t{start_week}\")\n print(f\"\\tend:\\t{end_week}\")\n print()\n\n return weeks_str\n\n def combine_csv_files(self, file_names, output=None):\n \"\"\"Combines all given csv file names, of the same structure, to a single one\n\n :param list file_names: a list of all file names to combine\n :param str output: the filename of the output we'll be making\n\n :return: None\n \"\"\"\n\n # How you're supposed to set a default value to a class variable, weird but you can't reference self in the\n # function definition\n if output is None:\n output = self.output_file_name\n\n dfs = []\n for filename in sorted(file_names):\n dfs.append(pd.read_csv(filename, skiprows=2))\n full_df = pd.concat(dfs)\n\n full_df.to_csv(output, index=False) # removes the useless index column\n\n def weekly_scrape(self):\n weeks = self.generate_weeks(self.start_date, self.end_date)\n\n for i in range(0, len(weeks), 1):\n start_day = weeks[i]\n end_day = weeks[i + 1]\n\n url = self.generate_url(start_day, end_day)\n self.fetch_week_trends(url, f\"{start_day}_to_{end_day}.csv\")\n\n print(f\"Waiting {self.seconds_delay} to avoid IP banning\")\n time.sleep(self.seconds_delay)\n\n self.combine_csv_files([\"data/multiTimeline1.csv\", \"data/multiTimeline2.csv\"])\n\n def total_scrape(self):\n url = self.generate_url(self.start_date, self.end_date)\n self.fetch_week_trends(url, f\"{self.start_date}_to_{self.end_date}.csv\")\n\n return pd.read_csv(f\"{self.start_date}_to_{self.end_date}.csv\")\n\n def scrape(self):\n \"\"\"Begin the scrape, returning a DataFrame of the scraped data and writing the output to a CSV\n\n :return: the scraped data\n :rtype: DataFrame\n \"\"\"\n\n print(os.getcwd())\n\n if self.weekly_granularity:\n return self.weekly_scrape()\n else:\n return self.total_scrape()\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.Timedelta", "pandas.date_range" ] ]
dapatil211/Jacinle
[ "37117de4abf1774548786e9534c90977d67091d8" ]
[ "jaclearn/vision/coco/setup.py" ]
[ "from setuptools import setup, Extension\nimport numpy as np\n\n# To compile and install locally run \"python setup.py build_ext --inplace\"\n# To install library to Python site-packages run \"python setup.py build_ext install\"\n\next_modules = [\n Extension(\n 'pycocotools._mask',\n sources=['src/maskApi.c', 'pycocotools/_mask.pyx'],\n include_dirs = [np.get_include(), 'src'],\n extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'],\n )\n]\n\nif __name__ == '__main__':\n setup(\n name='pycocotools',\n packages=['pycocotools'],\n package_dir = {'pycocotools': 'pycocotools'},\n install_requires=[\n 'setuptools>=18.0',\n 'cython>=0.27.3',\n 'matplotlib>=2.1.0'\n ],\n version='2.0',\n ext_modules= ext_modules\n )\n\n" ]
[ [ "numpy.get_include" ] ]
cmla-psu/dpgen
[ "f9ba8bd140cc8978f20c52de175ed52cb870fe09" ]
[ "dpgen/algorithms/adaptive_svt_private.py" ]
[ "import time\n\nimport numba\nimport numpy as np\nimport pyswarms as ps\nimport sympy as sp\n\nLENGTH = 100\nEPSILON = 1\n\n\[email protected]\ndef my_assert(cond):\n if not cond:\n return 1\n else:\n return 0\n\n\[email protected]\ndef unpack_inputs(all_inputs):\n q, dq = all_inputs[:LENGTH], all_inputs[LENGTH:2 * LENGTH]\n noise, T, N = all_inputs[2 * LENGTH: 4 * LENGTH + 1], all_inputs[4 * LENGTH + 1], int(all_inputs[4 * LENGTH + 2])\n return q, dq, noise, T, N\n\n\[email protected]\ndef unpack_alignments(alignments: np.ndarray):\n proof, holes = alignments[:17], alignments[17:]\n\n return proof.astype(np.int8), holes.astype(np.int8)\n\n\ndef convert_alignment_str(alignments):\n proof, holes = unpack_alignments(alignments)\n eta2_true_dist = sp.simplify(\n f\"{proof[1]} + {proof[2] * proof[0]} + {proof[3]} * q_i_dist\")\n\n eta2_false_dist = sp.simplify(\n f\"{proof[4]} + {proof[5] * proof[0]} + {proof[6]} * q_i_dist\")\n\n eta2 = sp.simplify(\n f\"{holes[1]} + {holes[2]} * c\"\n )\n\n eta3_true_dist = sp.simplify(\n f\"{proof[7]} + {proof[8] * proof[0]} + {proof[9]} * q_i_dist\")\n\n eta3_false_dist = sp.simplify(\n f\"{proof[10]} + {proof[11] * proof[0]} + {proof[12]} * q_i_dist\")\n\n eta3 = sp.simplify(\n f\"{holes[3]} + {holes[4]} * c\"\n )\n\n while_cond = sp.simplify(\n f\"Max(Abs({proof[13]}) * EPSILON / ({eta2}), Abs({proof[14]}) * EPSILON / ({eta2}), Abs({proof[15]}) * EPSILON / ({eta3}), Abs({proof[16]}) * EPSILON / ({eta3}))\"\n )\n\n return f\"alignments: eta1: {proof[0]}, eta2: Omega_top ? {eta2_true_dist} : {eta2_false_dist}, eta3: Omega_middle ? {eta3_true_dist} : {eta3_false_dist}\", f\"eta1: {holes[0]} | eta2: {eta2} | eta3: {eta3} | while: {while_cond}\"\n\n\[email protected](fastmath=True)\ndef adaptivesvt_original(all_inputs, alignments):\n # unpack the specific inputs from all_inputs\n q, dq, noise, T, c = unpack_inputs(all_inputs)\n proof, holes = unpack_alignments(alignments)\n\n cost = 0\n failures = 0\n T_bar = T + np.random.laplace(0, (holes[0] / EPSILON)) if proof[0] != 0 else 0\n dist_T_bar = (proof[0])\n cost += np.abs(dist_T_bar) * (EPSILON / holes[0])\n i = 0\n sigma = 10\n true_positives, false_positives = 0, 0\n # [1, 1, 0, -1, 0, 0, 0, 1, 0, -1, 0, 0, 0, 2, 0, 8, 0, 4]\n while i < LENGTH and cost <= EPSILON - max(\n np.abs(proof[13]) * EPSILON / (holes[1] + holes[2] * c),\n np.abs(proof[14]) * EPSILON / (holes[1] + holes[2] * c),\n np.abs(proof[15]) * EPSILON / (holes[3] + holes[4] * c),\n np.abs(proof[16]) * EPSILON / (holes[3] + holes[4] * c)\n ) + 1e-3:\n eta2 = np.random.laplace(0, (holes[1] + holes[2] * c) / EPSILON)\n if q[i] + eta2 - T_bar >= sigma: # NOTICE: should be sigma\n cost += np.abs(proof[13]) * EPSILON / (holes[1] + holes[2] * c)\n if q[i] >= T:\n true_positives += 1\n else:\n false_positives += 1\n else:\n cost += np.abs(proof[14]) * EPSILON / (holes[1] + holes[2] * c)\n eta3 = np.random.laplace(0, (holes[3] + holes[4] * c) / EPSILON)\n if q[i] + eta3 - T_bar >= 0:\n if q[i] >= T:\n true_positives += 1\n else:\n false_positives += 1\n cost += np.abs(proof[15]) * EPSILON / (holes[3] + holes[4] * c)\n else:\n cost += np.abs(proof[16]) * EPSILON / (holes[3] + holes[4] * c)\n i += 1\n return true_positives, false_positives\n\n\[email protected](fastmath=True)\ndef adaptivesvt(all_inputs, alignments):\n # unpack the specific inputs from all_inputs\n q, dq, noise, T, c = unpack_inputs(all_inputs)\n proof, holes = unpack_alignments(alignments)\n\n # TODO: avoiding zero division problem\n if holes[0] < 1e-5 or holes[1] + holes[2] * c < 1e-5 or holes[3] + holes[4] * c < 1e-5:\n return 1000 * EPSILON, 10000, 0\n\n cost = 0\n failures = 0\n idx = 0\n T_bar = T + noise[idx]\n idx += 1\n dist_T_bar = (proof[0])\n cost += np.abs(dist_T_bar) * (EPSILON / holes[0])\n i = 0\n sigma = 5\n # top_queries = 0\n # [1, 1, 0, -1, 0, 0, 0, 1, 0, -1, 0, 0, 0, 2, 0, 2, 0, 2, 0, 8, 0, 4]\n # [1, 1, 0, -1, 0, 0, 0, 1, 0, -1, 0, 0, 0, 2, 0, 8, 0, 4]\n # [0, 0, 0, -1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1, 0, 1, 0, 3, 6, 5, 5, 4]\n # [1, 1, 0, -1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 2, 0]\n while i < LENGTH and cost <= EPSILON - max(\n np.abs(proof[13]) * EPSILON / (holes[1] + holes[2] * c),\n np.abs(proof[14]) * EPSILON / (holes[1] + holes[2] * c),\n np.abs(proof[15]) * EPSILON / (holes[3] + holes[4] * c),\n np.abs(proof[16]) * EPSILON / (holes[3] + holes[4] * c)\n ) + 1e-3:\n eta2 = noise[idx]\n idx += 1\n if q[i] + eta2 - T_bar >= sigma: # NOTICE: should be sigma\n eta2_dist = (proof[1] + proof[2] * dist_T_bar + proof[3] * dq[i])\n if np.abs(proof[13]) < np.abs(eta2_dist):\n return 1000 * EPSILON, 10000, 0\n failures += my_assert(q[i] + dq[i] + eta2 + eta2_dist - (T_bar + dist_T_bar) >= sigma)\n\n cost += np.abs(proof[13]) * EPSILON / (holes[1] + holes[2] * c)\n failures += my_assert(np.abs(dq[i] + eta2_dist - dist_T_bar) < 1e-5)\n # top_queries += 1\n else:\n eta2_dist = (proof[4] + proof[5] * dist_T_bar + proof[6] * dq[i])\n if np.abs(proof[14]) < np.abs(eta2_dist):\n return 1000 * EPSILON, 10000, 0\n failures += my_assert(q[i] + dq[i] + eta2 + eta2_dist - (T_bar + dist_T_bar) < sigma)\n cost += np.abs(proof[14]) * EPSILON / (holes[1] + holes[2] * c)\n eta3 = noise[idx]\n idx += 1\n if q[i] + eta3 - T_bar >= 0:\n eta3_dist = (proof[7] + proof[8] * dist_T_bar + proof[9] * dq[i])\n if np.abs(proof[15]) < np.abs(eta3_dist):\n return 1000 * EPSILON, 10000, 0\n failures += my_assert(q[i] + dq[i] + eta3 + eta3_dist - (T_bar + dist_T_bar) >= 0)\n cost += np.abs(proof[15]) * EPSILON / (holes[3] + holes[4] * c)\n failures += my_assert(np.abs(dq[i] + eta3_dist - dist_T_bar) < 1e-5)\n # top_queries += 1\n else:\n eta3_dist = (proof[10] + proof[11] * dist_T_bar + proof[12] * dq[i])\n if np.abs(proof[16]) < np.abs(eta3_dist):\n return 1000 * EPSILON, 10000, 0\n failures += my_assert(q[i] + dq[i] + eta3 + eta3_dist - (T_bar + dist_T_bar) < 0)\n cost += np.abs(proof[16]) * EPSILON / (holes[3] + holes[4] * c)\n i += 1\n\n if i == 0:\n failures += 1\n\n if cost - EPSILON > 1e-3:\n failures += 1\n\n return cost, failures, (np.square(holes[0] / EPSILON) + np.square((holes[1] + holes[2] * c) / EPSILON) + np.square(\n (holes[3] + holes[4] * c) / EPSILON)) / (np.square(9 / EPSILON) + 2 * np.square((9 + 9 * c) / EPSILON))\n\n\[email protected](parallel=True, fastmath=True)\ndef find_inputs(all_inputs, alignments):\n # bootstrap the process\n # for each particle\n results = np.empty(all_inputs.shape[0])\n for i in numba.prange(all_inputs.shape[0]):\n # TODO: this is the same as setting a constraint stating N <= LENGTH / 5\n N = int(all_inputs[i][4 * LENGTH + 2])\n dq = all_inputs[i][LENGTH:2 * LENGTH]\n if N > int(LENGTH / 5) or np.linalg.norm(dq,\n ord=1) < 0.5 * LENGTH: # NOTICE: this constrains norm(dq) > 0.8 * LENGTH\n results[i] = 1e12 * N\n else:\n cost, failures, variance = adaptivesvt(all_inputs[i], alignments)\n results[i] = -failures\n\n return results\n\n\[email protected](parallel=True, fastmath=True)\ndef find_alignments(alignments: np.ndarray, all_inputs: np.ndarray):\n # bootstrap the process\n # for each particle\n c = int(all_inputs[0][4 * LENGTH + 2])\n\n results = np.zeros(alignments.shape[0])\n # alignments[0] = np.array([1, 1, 0, -1, 0, 0, 0, 1, 0, -1, 0, 0, 0, 2, 0, 2, 0, 2, 0, 8, 0, 4])\n for i in numba.prange(alignments.shape[0]):\n for cex in all_inputs:\n cost, failures, variance = adaptivesvt(cex, alignments[i])\n # hard requirement\n if failures > 0:\n results[i] = 1e8 * EPSILON * failures\n break\n\n # passed all privacy checks, we run accuracy check\n if np.abs(results[i]) < 1e-3:\n # run the original svt multiple times to get estimates of #true positive and #false positive\n total = 5000\n true_positive, false_positive, penalty = 0, 0, 0\n for _ in range(total):\n local_true, local_false = adaptivesvt_original(all_inputs[0], alignments[i])\n true_positive += local_true\n false_positive += local_false\n # if local_true + local_false < 0.8 * c:\n # results[i] += 1e8 * (c - (local_true + local_false))\n # break\n\n results[i] += -true_positive / total + false_positive / total\n # results[i] = variance #+ np.abs(cost - EPSILON) #- (count_false / LENGTH) + np.abs(count_true - N) / max(N, LENGTH - N)# - 20 * (count_true / LENGTH)\n return results\n\n\ndef main():\n \"\"\"\n This *tries* to find the vanilla SVT, which is | eta1: 2 and eta2: 3c |, however, sometimes it finds | eta1: 5 and eta2: (2c + 2) |, which violates the privacy cost, so the LENGTH should be set higher\n also, we need to set N <= size / 5, moreover, we add a initial counterexample with dq = -1 and c = 15, to maximize the privacy cost, so that we can avoid the wrong solution.\n also, we set the bounds for eta1 to be (0, 5), so that we don't need to extend query length further\n with the above setting, we can reliably find ('alignments: eta1: 1, eta2: Omega ? 1 - q_i_dist : 0', 'eta1: 3 | eta2: 3*c')\n \"\"\"\n start = time.time()\n # Initialize swarm\n options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9}\n min_bounds = (np.array(\n [-10 for _ in range(LENGTH)] + [-1 for _ in range(LENGTH)] + [-100 for _ in range(2 * LENGTH + 1)] + [-10, 1]))\n max_bounds = (np.array(\n [10 for _ in range(LENGTH)] + [1 for _ in range(LENGTH)] + [100 for _ in range(2 * LENGTH + 1)] + [10, LENGTH]))\n bounds = (min_bounds, max_bounds)\n\n align_min_bounds = [-2 for _ in range(17)] + [0, 0, 0, 0, 0]\n align_max_bounds = [3 for _ in range(17)] + [5, 10, 10, 10, 10]\n alignment_bounds = (align_min_bounds, align_max_bounds)\n # q, dq, noise, T, N\n alignment = np.array([0 for _ in range(17)] + [1, 1, 0, 1, 0])\n # alignment = np.array([0, 0, 0, -1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1, 0, 1, 0, 3, 6, 5, 5, 4])\n # print(convert_alignment_str(alignment))\n # alignment = np.array([1, 1, 0, -1, 0, 0, 0, 1, 0, -1, 0, 0, 0, 2, 0, 2, 0, 2, 0, 8, 0, 4])\n # alignment = np.array([1, 1, 0, -1, 0, 0, 0, 1, 0, -1, 0, 0, 0, 2, 0, 2, 0, 4, 9, 9, 7, 9])\n # alignment = np.array([1, 0, 1, -1, 0, 0, 0, 0, 1, -1, 0, 0, 0, 2, 0, 2, 0, 3, 3, 9, 5, 9])\n # alignment = np.array([1, 1, 0, -1, 0, 0, 0, 1, 0, -1, 0, 0, 0, 2, 0, 2, 0, 3, 0, 6, 0, 3])\n # alignment = np.array([1, 0, 1, -1, 0, 0, 0, 1, 0, -1, -1, 1, 0, 2, 0, 2, 0, 3, 7, 4, 6, 3])\n # counterexamples = [(np.array([0 for _ in range(LENGTH)] + [-1 for _ in range(LENGTH)] + [0 for _ in range(2 * LENGTH + 1)] + [0, 15]))]\n q = np.array([-1000 for _ in range(int(0.75 * LENGTH))] + [1000 for _ in range(int(0.1 * LENGTH))] + [50 for _ in\n range(\n int(0.15 * LENGTH))])\n counterexamples = [\n np.concatenate((q, np.array([0 for _ in range(LENGTH)] + [0 for _ in range(2 * LENGTH + 1)] + [0, 20]))).astype(\n float)]\n iterations = 0\n tp, fp = adaptivesvt_original(counterexamples[0], alignment)\n print(tp, fp)\n\n alignment_options = {'c1': 0.5, 'c2': 0.3, 'w': 2}\n oh_strategy = {\"w\": 'exp_decay', \"c1\": 'nonlin_mod', \"c2\": 'lin_variation'}\n while True:\n print(f'{iterations} | Searching for cex')\n optimizer = ps.single.GlobalBestPSO(n_particles=10000, dimensions=4 * LENGTH + 3, options=options,\n bounds=bounds, ftol=0.1, ftol_iter=50)\n cost, pos = optimizer.optimize(lambda x: find_inputs(x, alignment), iters=500)\n counterexamples.append(np.array(pos))\n if cost > -1e-03:\n print(f'Final Alignment: {alignment}')\n print(f'Final alignment: {convert_alignment_str(alignment)}')\n for cex in counterexamples:\n cost, failures, variance = adaptivesvt(cex, alignment)\n print(cost, failures)\n break\n iterations += 1\n print(f'{iterations} | Searching for alignment')\n # q, dq, noise, T, N = unpack_inputs(pos)\n # print(f'q+noise: {q + noise[1:]} | dq+noise: {q + dq + noise[1:]} | T+noise: {T + noise[0]} | N: {N}')\n optimizer = ps.single.GlobalBestPSO(n_particles=50000, dimensions=len(alignment), options=options,\n oh_strategy=oh_strategy, bounds=alignment_bounds, ftol=0.1, ftol_iter=30)\n cost, pos = optimizer.optimize(lambda x: find_alignments(x, all_inputs=np.asarray(counterexamples)), iters=500)\n proof, holes = unpack_alignments(pos)\n alignment = np.concatenate((proof, holes))\n proof_str, hole_str = convert_alignment_str(alignment)\n print(proof_str, hole_str)\n iterations += 1\n print(f'Total Time: {time.time() - start}s')\n" ]
[ [ "numpy.square", "numpy.abs", "numpy.asarray", "numpy.linalg.norm", "numpy.concatenate", "numpy.random.laplace", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
gdaisukesuzuki/cudf
[ "aa5c8b686b1513dba7bce168200c1259f1eda908" ]
[ "python/cudf/cudf/core/column/lists.py" ]
[ "# Copyright (c) 2020-2021, NVIDIA CORPORATION.\n\nimport pickle\nfrom typing import Sequence\n\nimport numpy as np\nimport pyarrow as pa\n\nimport cudf\nfrom cudf._lib.copying import segmented_gather\nfrom cudf._lib.lists import (\n concatenate_list_elements,\n concatenate_rows,\n contains_scalar,\n count_elements,\n drop_list_duplicates,\n extract_element,\n sort_lists,\n)\nfrom cudf._lib.table import Table\nfrom cudf._typing import BinaryOperand, ColumnLike, Dtype, ScalarLike\nfrom cudf.core.buffer import Buffer\nfrom cudf.core.column import ColumnBase, as_column, column\nfrom cudf.core.column.methods import ColumnMethods, ParentType\nfrom cudf.core.dtypes import ListDtype\nfrom cudf.utils.dtypes import _is_non_decimal_numeric_dtype, is_list_dtype\n\n\nclass ListColumn(ColumnBase):\n dtype: ListDtype\n\n def __init__(\n self, size, dtype, mask=None, offset=0, null_count=None, children=(),\n ):\n super().__init__(\n None,\n size,\n dtype,\n mask=mask,\n offset=offset,\n null_count=null_count,\n children=children,\n )\n\n def __sizeof__(self):\n if self._cached_sizeof is None:\n n = 0\n if self.nullable:\n n += cudf._lib.null_mask.bitmask_allocation_size_bytes(\n self.size\n )\n\n child0_size = (self.size + 1) * self.base_children[\n 0\n ].dtype.itemsize\n current_base_child = self.base_children[1]\n current_offset = self.offset\n n += child0_size\n while type(current_base_child) is ListColumn:\n child0_size = (\n current_base_child.size + 1 - current_offset\n ) * current_base_child.base_children[0].dtype.itemsize\n current_offset = current_base_child.base_children[0][\n current_offset\n ]\n n += child0_size\n current_base_child = current_base_child.base_children[1]\n\n n += (\n current_base_child.size - current_offset\n ) * current_base_child.dtype.itemsize\n\n if current_base_child.nullable:\n n += cudf._lib.null_mask.bitmask_allocation_size_bytes(\n current_base_child.size\n )\n self._cached_sizeof = n\n\n return self._cached_sizeof\n\n def __setitem__(self, key, value):\n if isinstance(value, list):\n value = cudf.Scalar(value)\n if isinstance(value, cudf.Scalar):\n if value.dtype != self.dtype:\n raise TypeError(\"list nesting level mismatch\")\n elif value is cudf.NA:\n value = cudf.Scalar(value, dtype=self.dtype)\n else:\n raise ValueError(f\"Can not set {value} into ListColumn\")\n super().__setitem__(key, value)\n\n @property\n def base_size(self):\n # in some cases, libcudf will return an empty ListColumn with no\n # indices; in these cases, we must manually set the base_size to 0 to\n # avoid it being negative\n return max(0, len(self.base_children[0]) - 1)\n\n def binary_operator(\n self, binop: str, other: BinaryOperand, reflect: bool = False\n ) -> ColumnBase:\n \"\"\"\n Calls a binary operator *binop* on operands *self*\n and *other*.\n\n Parameters\n ----------\n self, other : list columns\n\n binop : binary operator\n Only \"add\" operator is currently being supported\n for lists concatenation functions\n\n reflect : boolean, default False\n If ``reflect`` is ``True``, swap the order of\n the operands.\n\n Returns\n -------\n Series : the output dtype is determined by the\n input operands.\n\n Examples\n --------\n >>> import cudf\n >>> gdf = cudf.DataFrame({'val': [['a', 'a'], ['b'], ['c']]})\n >>> gdf\n val\n 0 [a, a]\n 1 [b]\n 2 [c]\n >>> gdf['val'] + gdf['val']\n 0 [a, a, a, a]\n 1 [b, b]\n 2 [c, c]\n Name: val, dtype: list\n\n \"\"\"\n\n if isinstance(other.dtype, ListDtype):\n if binop == \"add\":\n return concatenate_rows(Table({0: self, 1: other}))\n else:\n raise NotImplementedError(\n \"Lists concatenation for this operation is not yet\"\n \"supported\"\n )\n else:\n raise TypeError(\"can only concatenate list to list\")\n\n @property\n def elements(self):\n \"\"\"\n Column containing the elements of each list (may itself be a\n ListColumn)\n \"\"\"\n return self.children[1]\n\n @property\n def offsets(self):\n \"\"\"\n Integer offsets to elements specifying each row of the ListColumn\n \"\"\"\n return self.children[0]\n\n def to_arrow(self):\n offsets = self.offsets.to_arrow()\n elements = (\n pa.nulls(len(self.elements))\n if len(self.elements) == self.elements.null_count\n else self.elements.to_arrow()\n )\n pa_type = pa.list_(elements.type)\n\n if self.nullable:\n nbuf = self.mask.to_host_array().view(\"int8\")\n nbuf = pa.py_buffer(nbuf)\n buffers = (nbuf, offsets.buffers()[1])\n else:\n buffers = offsets.buffers()\n return pa.ListArray.from_buffers(\n pa_type, len(self), buffers, children=[elements]\n )\n\n def set_base_data(self, value):\n if value is not None:\n raise RuntimeError(\n \"ListColumn's do not use data attribute of Column, use \"\n \"`set_base_children` instead\"\n )\n else:\n super().set_base_data(value)\n\n def serialize(self):\n header = {}\n frames = []\n header[\"type-serialized\"] = pickle.dumps(type(self))\n header[\"null_count\"] = self.null_count\n header[\"size\"] = self.size\n header[\"dtype\"], dtype_frames = self.dtype.serialize()\n header[\"dtype_frames_count\"] = len(dtype_frames)\n frames.extend(dtype_frames)\n\n sub_headers = []\n\n for item in self.children:\n sheader, sframes = item.serialize()\n sub_headers.append(sheader)\n frames.extend(sframes)\n\n if self.null_count > 0:\n frames.append(self.mask)\n\n header[\"subheaders\"] = sub_headers\n header[\"frame_count\"] = len(frames)\n\n return header, frames\n\n @classmethod\n def deserialize(cls, header, frames):\n\n # Get null mask\n if header[\"null_count\"] > 0:\n mask = Buffer(frames[-1])\n else:\n mask = None\n\n # Deserialize dtype\n dtype = pickle.loads(header[\"dtype\"][\"type-serialized\"]).deserialize(\n header[\"dtype\"], frames[: header[\"dtype_frames_count\"]]\n )\n\n # Deserialize child columns\n children = []\n f = header[\"dtype_frames_count\"]\n for h in header[\"subheaders\"]:\n fcount = h[\"frame_count\"]\n child_frames = frames[f : f + fcount]\n column_type = pickle.loads(h[\"type-serialized\"])\n children.append(column_type.deserialize(h, child_frames))\n f += fcount\n\n # Materialize list column\n return column.build_column(\n data=None,\n dtype=dtype,\n mask=mask,\n children=tuple(children),\n size=header[\"size\"],\n )\n\n @property\n def __cuda_array_interface__(self):\n raise NotImplementedError(\n \"Lists are not yet supported via `__cuda_array_interface__`\"\n )\n\n def _with_type_metadata(\n self: \"cudf.core.column.ListColumn\", dtype: Dtype\n ) -> \"cudf.core.column.ListColumn\":\n if isinstance(dtype, ListDtype):\n return column.build_list_column(\n indices=self.base_children[0],\n elements=self.base_children[1]._with_type_metadata(\n dtype.element_type\n ),\n mask=self.base_mask,\n size=self.size,\n offset=self.offset,\n null_count=self.null_count,\n )\n\n return self\n\n def leaves(self):\n if isinstance(self.elements, ListColumn):\n return self.elements.leaves()\n else:\n return self.elements\n\n @classmethod\n def from_sequences(\n cls, arbitrary: Sequence[ColumnLike]\n ) -> \"cudf.core.column.ListColumn\":\n \"\"\"\n Create a list column for list of column-like sequences\n \"\"\"\n data_col = column.column_empty(0)\n mask_col = []\n offset_col = [0]\n offset = 0\n\n # Build Data, Mask & Offsets\n for data in arbitrary:\n if cudf._lib.scalar._is_null_host_scalar(data):\n mask_col.append(False)\n offset_col.append(offset)\n else:\n mask_col.append(True)\n data_col = data_col.append(as_column(data))\n offset += len(data)\n offset_col.append(offset)\n\n offset_col = column.as_column(offset_col, dtype=\"int32\")\n\n # Build ListColumn\n res = cls(\n size=len(arbitrary),\n dtype=cudf.ListDtype(data_col.dtype),\n mask=cudf._lib.transform.bools_to_mask(as_column(mask_col)),\n offset=0,\n null_count=0,\n children=(offset_col, data_col),\n )\n return res\n\n\nclass ListMethods(ColumnMethods):\n \"\"\"\n List methods for Series\n \"\"\"\n\n _column: ListColumn\n\n def __init__(self, parent: ParentType):\n if not is_list_dtype(parent.dtype):\n raise AttributeError(\n \"Can only use .list accessor with a 'list' dtype\"\n )\n super().__init__(parent=parent)\n\n def get(self, index: int) -> ParentType:\n \"\"\"\n Extract element at the given index from each component\n\n Extract element from lists, tuples, or strings in\n each element in the Series/Index.\n\n Parameters\n ----------\n index : int\n\n Returns\n -------\n Series or Index\n\n Examples\n --------\n >>> s = cudf.Series([[1, 2, 3], [3, 4, 5], [4, 5, 6]])\n >>> s.list.get(-1)\n 0 3\n 1 5\n 2 6\n dtype: int64\n \"\"\"\n min_col_list_len = self.len().min()\n if -min_col_list_len <= index < min_col_list_len:\n return self._return_or_inplace(\n extract_element(self._column, index)\n )\n else:\n raise IndexError(\"list index out of range\")\n\n def contains(self, search_key: ScalarLike) -> ParentType:\n \"\"\"\n Returns boolean values indicating whether the specified scalar\n is an element of each row.\n\n Parameters\n ----------\n search_key : scalar\n element being searched for in each row of the list column\n\n Returns\n -------\n Series or Index\n\n Examples\n --------\n >>> s = cudf.Series([[1, 2, 3], [3, 4, 5], [4, 5, 6]])\n >>> s.list.contains(4)\n Series([False, True, True])\n dtype: bool\n \"\"\"\n search_key = cudf.Scalar(search_key)\n try:\n res = self._return_or_inplace(\n contains_scalar(self._column, search_key)\n )\n except RuntimeError as e:\n if (\n \"Type/Scale of search key does not\"\n \"match list column element type\" in str(e)\n ):\n raise TypeError(\n \"Type/Scale of search key does not\"\n \"match list column element type\"\n ) from e\n raise\n else:\n return res\n\n @property\n def leaves(self) -> ParentType:\n \"\"\"\n From a Series of (possibly nested) lists, obtain the elements from\n the innermost lists as a flat Series (one value per row).\n\n Returns\n -------\n Series or Index\n\n Examples\n --------\n >>> a = cudf.Series([[[1, None], [3, 4]], None, [[5, 6]]])\n >>> a.list.leaves\n 0 1\n 1 <NA>\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n \"\"\"\n return self._return_or_inplace(\n self._column.leaves(), retain_index=False\n )\n\n def len(self) -> ParentType:\n \"\"\"\n Computes the length of each element in the Series/Index.\n\n Returns\n -------\n Series or Index\n\n Examples\n --------\n >>> s = cudf.Series([[1, 2, 3], None, [4, 5]])\n >>> s\n 0 [1, 2, 3]\n 1 None\n 2 [4, 5]\n dtype: list\n >>> s.list.len()\n 0 3\n 1 <NA>\n 2 2\n dtype: int32\n \"\"\"\n return self._return_or_inplace(count_elements(self._column))\n\n def take(self, lists_indices: ColumnLike) -> ParentType:\n \"\"\"\n Collect list elements based on given indices.\n\n Parameters\n ----------\n lists_indices: Series-like of lists\n Specifies what to collect from each row\n\n Returns\n -------\n Series or Index\n\n Examples\n --------\n >>> s = cudf.Series([[1, 2, 3], None, [4, 5]])\n >>> s\n 0 [1, 2, 3]\n 1 None\n 2 [4, 5]\n dtype: list\n >>> s.list.take([[0, 1], [], []])\n 0 [1, 2]\n 1 None\n 2 []\n dtype: list\n \"\"\"\n\n lists_indices_col = as_column(lists_indices)\n if not isinstance(lists_indices_col, ListColumn):\n raise ValueError(\"lists_indices should be list type array.\")\n if not lists_indices_col.size == self._column.size:\n raise ValueError(\n \"lists_indices and list column is of different \" \"size.\"\n )\n if not _is_non_decimal_numeric_dtype(\n lists_indices_col.children[1].dtype\n ) or not np.issubdtype(\n lists_indices_col.children[1].dtype, np.integer\n ):\n raise TypeError(\n \"lists_indices should be column of values of index types.\"\n )\n\n try:\n res = self._return_or_inplace(\n segmented_gather(self._column, lists_indices_col)\n )\n except RuntimeError as e:\n if \"contains nulls\" in str(e):\n raise ValueError(\"lists_indices contains null.\") from e\n raise\n else:\n return res\n\n def unique(self) -> ParentType:\n \"\"\"\n Returns the unique elements in each list.\n The ordering of elements is not guaranteed.\n\n Returns\n -------\n Series or Index\n\n Examples\n --------\n >>> s = cudf.Series([[1, 1, 2, None, None], None, [4, 4], []])\n >>> s\n 0 [1.0, 1.0, 2.0, nan, nan]\n 1 None\n 2 [4.0, 4.0]\n 3 []\n dtype: list\n >>> s.list.unique() # Order of list element is not guaranteed\n 0 [1.0, 2.0, nan]\n 1 None\n 2 [4.0]\n 3 []\n dtype: list\n \"\"\"\n\n if is_list_dtype(self._column.children[1].dtype):\n raise NotImplementedError(\"Nested lists unique is not supported.\")\n\n return self._return_or_inplace(\n drop_list_duplicates(\n self._column, nulls_equal=True, nans_all_equal=True\n )\n )\n\n def sort_values(\n self,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool = False,\n ) -> ParentType:\n \"\"\"\n Sort each list by the values.\n\n Sort the lists in ascending or descending order by some criterion.\n\n Parameters\n ----------\n ascending : bool, default True\n If True, sort values in ascending order, otherwise descending.\n na_position : {'first', 'last'}, default 'last'\n 'first' puts nulls at the beginning, 'last' puts nulls at the end.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, ..., n - 1.\n\n Returns\n -------\n Series or Index with each list sorted\n\n Notes\n -----\n Difference from pandas:\n * Not supporting: `inplace`, `kind`\n\n Examples\n --------\n >>> s = cudf.Series([[4, 2, None, 9], [8, 8, 2], [2, 1]])\n >>> s.list.sort_values(ascending=True, na_position=\"last\")\n 0 [2.0, 4.0, 9.0, nan]\n 1 [2.0, 8.0, 8.0]\n 2 [1.0, 2.0]\n dtype: list\n \"\"\"\n if inplace:\n raise NotImplementedError(\"`inplace` not currently implemented.\")\n if kind != \"quicksort\":\n raise NotImplementedError(\"`kind` not currently implemented.\")\n if na_position not in {\"first\", \"last\"}:\n raise ValueError(f\"Unknown `na_position` value {na_position}\")\n if is_list_dtype(self._column.children[1].dtype):\n raise NotImplementedError(\"Nested lists sort is not supported.\")\n\n return self._return_or_inplace(\n sort_lists(self._column, ascending, na_position),\n retain_index=not ignore_index,\n )\n\n def concat(self, dropna=True) -> ParentType:\n \"\"\"\n For a column with at least one level of nesting, concatenate the\n lists in each row.\n\n Parameters\n ----------\n dropna: bool, optional\n If True (default), ignores top-level null elements in each row.\n If False, and top-level null elements are present, the resulting\n row in the output is null.\n\n Returns\n -------\n Series or Index\n\n Examples\n --------\n >>> s1\n 0 [[1.0, 2.0], [3.0, 4.0, 5.0]]\n 1 [[6.0, None], [7.0], [8.0, 9.0]]\n dtype: list\n >>> s1.list.concat()\n 0 [1.0, 2.0, 3.0, 4.0, 5.0]\n 1 [6.0, None, 7.0, 8.0, 9.0]\n dtype: list\n\n Null values at the top-level in each row are dropped by default:\n\n >>> s2\n 0 [[1.0, 2.0], None, [3.0, 4.0, 5.0]]\n 1 [[6.0, None], [7.0], [8.0, 9.0]]\n dtype: list\n >>> s2.list.concat()\n 0 [1.0, 2.0, 3.0, 4.0, 5.0]\n 1 [6.0, None, 7.0, 8.0, 9.0]\n dtype: list\n\n Use ``dropna=False`` to produce a null instead:\n\n >>> s2.list.concat(dropna=False)\n 0 None\n 1 [6.0, nan, 7.0, 8.0, 9.0]\n dtype: list\n \"\"\"\n try:\n result = concatenate_list_elements(self._column, dropna=dropna)\n except RuntimeError as e:\n if \"Rows of the input column must be lists.\" in str(e):\n raise ValueError(\n \"list.concat() can only be called on \"\n \"list columns with at least one level \"\n \"of nesting\"\n )\n return self._return_or_inplace(result)\n" ]
[ [ "numpy.issubdtype" ] ]
netneurolab/markello_transcriptome
[ "3abbc85596a5baacd93e5e9e56c906c9dbb080f3" ]
[ "scripts/generate_parameters.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nGenerates CSVs containing all combinations of processing parameters to test\n\"\"\"\n\nimport itertools\nfrom pathlib import Path\nimport uuid\n\nimport pandas as pd\n\nDATA_DIR = Path('./data/derivatives').resolve()\n\n# generate giant list of lists of dict with different parameter combinations\n# that we're going to run `abagen` with\nOPTS = [\n [{key: val} for val in vals] for key, vals in [\n ('ibf_threshold', [0, 0.25, 0.5]),\n ('probe_selection', ['average', 'max_intensity', 'corr_variance',\n 'corr_intensity', 'diff_stability', 'rnaseq']),\n ('donor_probes', ['aggregate', 'independent', 'common']),\n ('lr_mirror', [None, 'bidirectional', 'leftright']),\n ('missing', [None, 'centroids']),\n ('tolerance', [0, 1, 2]),\n ('sample_norm', ['srs', 'zscore', None]),\n ('gene_norm', ['srs', 'zscore', None]),\n ('norm_matched', [True, False]),\n ('norm_structures', [True, False]),\n ('region_agg', ['donors', 'samples']),\n ('agg_metric', ['mean', 'median']),\n ('corrected_mni', [True, False]),\n ('reannotated', [True, False]),\n ]\n]\n# `probe_selection` methods for which `donor_probes` MUST be 'aggregate'\nAGG_METHODS = ['average', 'diff_stability', 'rnaseq']\n\n\ndef gen_params():\n \"\"\" Generates parameters.csv files for running abagen pipelines\n \"\"\"\n\n fnames = set()\n for atlas in ('dk', 'dksurf'):\n data = []\n for args in itertools.product(*OPTS):\n # make a dictionary with the given parameter combination\n kwargs = {k: v for d in args for k, v in d.items()}\n kwargs['atlas_name'] = atlas\n # this combination would raise an error\n if (kwargs['donor_probes'] != 'aggregate'\n and kwargs['probe_selection'] in AGG_METHODS):\n continue\n # get a unique filename\n while True:\n fname = f'{str(uuid.uuid4())}.h5'\n if fname not in fnames:\n fnames.add(fname)\n break\n kwargs['filename'] = fname\n data.append(kwargs)\n df = pd.DataFrame(data)\n df.to_csv(DATA_DIR / atlas / 'parameters.csv', index=False)\n\n\nif __name__ == '__main__':\n gen_params()\n" ]
[ [ "pandas.DataFrame" ] ]
AliGhadirzadeh/yumi_follow_trajectory
[ "d30b05c979d6dc4d79f92bb207da47d1d527f9f5" ]
[ "scripts/waypoint_to_trajectory.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\nfrom time import sleep\nfrom scipy import interpolate\nimport argparse\nimport os\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--file-name\", type=str, default=None, help=\"Filename of the waypoint file with .npy extention\")\nparser.add_argument(\"--time-step\", type=float, default=0.5, help=\"The timestep between the waypoints\")\nparser.add_argument(\"--sample-rate\", type=float, default=20, help=\"The control sample rate (Hz)\")\n\nargs = parser.parse_args()\n\nassert(os.path.isfile(args.file_name))\n\nwaypoints = np.load(args.file_name)\n\nnpoints = waypoints.shape[0]\n\ntime_waypoints = np.arange(npoints) * args.time_step\nsampling_time = np.arange(time_waypoints[0], time_waypoints[npoints-1], 1.0/args.sample_rate)\nnsamples = sampling_time.shape[0]\ntraj_jpos = np.zeros((nsamples,7))\ntraj_jvel = np.zeros((nsamples,7))\ntraj_gripper = np.zeros((nsamples,1))\n\nfor j in range(7):\n tck = interpolate.CubicSpline(time_waypoints, waypoints[:,j],bc_type='clamped')\n traj_jpos[:, j] = tck(sampling_time)\n traj_jvel[:, j] = tck(sampling_time,1)\n\n plt.subplot(311)\n plt.plot(sampling_time, traj_jpos[:,j],'-*')\n #plt.plot(time_waypoints, waypoints[:,j], '*')\n plt.subplot(312)\n plt.plot(sampling_time, traj_jvel[:,j])\n\ntck = interpolate.CubicSpline(time_waypoints, (waypoints[:,7]-0.5)*10.0, bc_type='periodic')\ntraj_gripper = tck(sampling_time)\nplt.subplot(313)\nplt.plot(sampling_time, traj_gripper)\nplt.plot(time_waypoints, (waypoints[:,7]-0.5)*10.0, '*')\nplt.show()\n\ntraj = np.append(sampling_time.reshape(nsamples,1), traj_jpos, axis = 1)\ntraj = np.append(traj, traj_jvel, axis = 1)\ntraj = np.append(traj, traj_gripper.reshape(nsamples,1), axis = 1)\n\nnp.save('traj.npy', traj)\nnp.savetxt('traj.txt', traj)\n" ]
[ [ "numpy.arange", "numpy.save", "matplotlib.pyplot.plot", "numpy.append", "matplotlib.pyplot.subplot", "scipy.interpolate.CubicSpline", "numpy.savetxt", "numpy.load", "matplotlib.pyplot.show", "numpy.zeros" ] ]
tranlethaison/NumpyNeuralNet
[ "8a22784348b07e9414c70bdc3674d9a51dd81641" ]
[ "numpynn/losses.py" ]
[ "import numpy as np\n\n\nclass MSE:\n @staticmethod\n def f(y, a):\n return np.mean(0.5 * np.sum(np.square(y - a), axis=0))\n\n @staticmethod\n def df_da(y, a):\n \"\"\"Return partial derivative wrt `a` (element-wise).\"\"\"\n return a - y\n\n\nclass CrossEntropy:\n @staticmethod\n def f(y, a):\n return np.mean(-np.sum(y * np.log(a) + (1 - y) * np.log(1 - a), axis=0))\n\n @staticmethod\n def df_da(y, a):\n \"\"\"Return partial derivative wrt `a` (element-wise).\"\"\"\n return -(y / a + (y - 1) / (1 - a))\n\n\nclass LogLikelihood:\n @staticmethod\n def f(y, a):\n j = np.argmax(y, axis=0)\n losses = np.zeros(j.shape)\n\n for sid in range(len(losses)):\n losses[sid] = -np.log(a[j[sid], sid])\n return np.mean(losses)\n\n @staticmethod\n def df_da(y, a):\n \"\"\"Return partial derivative wrt `a` (element-wise).\"\"\"\n j = np.argmax(y, axis=0)\n r = np.zeros(a.shape)\n\n for sid in range(r.shape[-1]):\n r[:, sid] = -1 / a[j[sid], sid]\n return r\n" ]
[ [ "numpy.square", "numpy.log", "numpy.argmax", "numpy.mean", "numpy.zeros" ] ]
poc1673/ML-for-Networks
[ "201ca30ab51954a7b1471740eb404b98f1d26213" ]
[ "gcn-master/gcn-master/gcn/Forced Implementation.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 6 16:45:47 2020\r\n\r\n@author: USER\r\n\"\"\"\r\n\r\nimport os \r\n\r\nos.chdir(\"C://Users//USER//Dropbox//Projects//Work on Graphs//gcn-master//gcn-master//gcn\")\r\n\r\nimport setup_for_forced_procedures\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport time\r\nimport tensorflow as tf\r\n\r\n#from gcn.utils import *\r\n#from gcn.models import GCN, MLP\r\n\r\n\r\n\r\n#from utils import *\r\n#from models import GCN, MLP\r\n\r\n# Set random seed\r\nseed = 123\r\nnp.random.seed(seed)\r\ntf.set_random_seed(seed)\r\n\r\n# Settings\r\nflags = tf.app.flags\r\ndel FLAGS\r\nFLAGS = flags.FLAGS\r\nflags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'\r\nflags.DEFINE_string('model', 'gcn', 'Model string.') # 'gcn', 'gcn_cheby', 'dense'\r\nflags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\r\nflags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')\r\nflags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')\r\nflags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')\r\nflags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')\r\nflags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')\r\nflags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')\r\n\r\n# Load data\r\nadj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)\r\n\r\n# Some preprocessing\r\nfeatures = preprocess_features(features)\r\nif FLAGS.model == 'gcn':\r\n support = [preprocess_adj(adj)]\r\n num_supports = 1\r\n model_func = GCN\r\nelif FLAGS.model == 'gcn_cheby':\r\n support = chebyshev_polynomials(adj, FLAGS.max_degree)\r\n num_supports = 1 + FLAGS.max_degree\r\n model_func = GCN\r\nelif FLAGS.model == 'dense':\r\n support = [preprocess_adj(adj)] # Not used\r\n num_supports = 1\r\n model_func = MLP\r\nelse:\r\n raise ValueError('Invalid argument for model: ' + str(FLAGS.model))\r\n\r\n# Define placeholders\r\nplaceholders = {\r\n 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],\r\n 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),\r\n 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),\r\n 'labels_mask': tf.placeholder(tf.int32),\r\n 'dropout': tf.placeholder_with_default(0., shape=()),\r\n 'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout\r\n}\r\n\r\n# Create model\r\nmodel = model_func(placeholders, input_dim=features[2][1], logging=True)\r\n\r\n# Initialize session\r\nsess = tf.Session()\r\n\r\n\r\n# Define model evaluation function\r\ndef evaluate(features, support, labels, mask, placeholders):\r\n t_test = time.time()\r\n feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)\r\n outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)\r\n return outs_val[0], outs_val[1], (time.time() - t_test)\r\n\r\n\r\n# Init variables\r\nsess.run(tf.global_variables_initializer())\r\n\r\ncost_val = []\r\n\r\n# Train model\r\nfor epoch in range(FLAGS.epochs):\r\n\r\n t = time.time()\r\n # Construct feed dictionary\r\n feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders)\r\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\r\n\r\n # Training step\r\n outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)\r\n\r\n # Validation\r\n cost, acc, duration = evaluate(features, support, y_val, val_mask, placeholders)\r\n cost_val.append(cost)\r\n\r\n # Print results\r\n print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]),\r\n \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost),\r\n \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t))\r\n\r\n if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):\r\n print(\"Early stopping...\")\r\n break\r\n\r\nprint(\"Optimization Finished!\")\r\n\r\n# Testing\r\ntest_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, placeholders)\r\nprint(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost),\r\n \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration))\r\n" ]
[ [ "tensorflow.sparse_placeholder", "tensorflow.constant", "tensorflow.placeholder_with_default", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.set_random_seed" ] ]
vaibhav02498/NumberPlateDetection
[ "043183f5e7c0cf31ddfcf5179799c4d99f413ed4" ]
[ "tools.py" ]
[ "\"\"\"\nAuthor : Vaibhav Goyal : Automatic licence plate detection and recognition\n\n\"\"\"\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.externals import joblib\nfrom matplotlib import pyplot as plt\nimport scipy.ndimage\nimport numpy as np\nimport cv2\nimport os\n\n\ndef square(img):\n \"\"\"\n This function resize non square image to square one (height == width)\n :param img: input image as numpy array\n :return: numpy array\n \"\"\"\n\n # image after making height equal to width\n squared_image = img\n\n # Get image height and width\n h = img.shape[0]\n w = img.shape[1]\n\n # In case height superior than width\n if h > w:\n diff = h-w\n if diff % 2 == 0:\n x1 = np.zeros(shape=(h, diff//2))\n x2 = x1\n else:\n x1 = np.zeros(shape=(h, diff//2))\n x2 = np.zeros(shape=(h, (diff//2)+1))\n\n squared_image = np.concatenate((x1, img, x2), axis=1)\n\n # In case height inferior than width\n if h < w:\n diff = w-h\n if diff % 2 == 0:\n x1 = np.zeros(shape=(diff//2, w))\n x2 = x1\n else:\n x1 = np.zeros(shape=(diff//2, w))\n x2 = np.zeros(shape=((diff//2)+1, w))\n\n squared_image = np.concatenate((x1, img, x2), axis=0)\n\n return squared_image\n\n\ndef histogram_of_pixel_projection(img):\n \"\"\"\n This method is responsible for licence plate segmentation with histogram of pixel projection approach\n :param img: input image\n :return: list of image, each one contain a digit\n \"\"\"\n # list that will contains all digits\n caracrter_list_image = list()\n\n # img = crop(img)\n\n # Add black border to the image\n BLACK = [0, 0, 0]\n img = cv2.copyMakeBorder(img, 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=BLACK)\n\n # change to gray\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Change to numpy array format\n nb = np.array(gray)\n\n # Binarization\n nb[nb > 120] = 255\n nb[nb < 120] = 0\n\n # compute the sommation\n x_sum = cv2.reduce(nb, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32S)\n y_sum = cv2.reduce(nb, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)\n\n # rotate the vector x_sum\n x_sum = x_sum.transpose()\n\n # get height and weight\n x = gray.shape[1]\n y = gray.shape[0]\n\n # division the result by height and weight\n x_sum = x_sum / y\n y_sum = y_sum / x\n\n # x_arr and y_arr are two vector weight and height to plot histogram projection properly\n x_arr = np.arange(x)\n y_arr = np.arange(y)\n\n # convert x_sum to numpy array\n z = np.array(x_sum)\n\n # convert y_arr to numpy array\n w = np.array(y_sum)\n\n # convert to zero small details\n z[z < 15] = 0\n z[z > 15] = 1\n\n # convert to zero small details and 1 for needed details\n w[w < 20] = 0\n w[w > 20] = 1\n\n # vertical segmentation\n test = z.transpose() * nb\n\n # horizontal segmentation\n test = w * test\n\n # plot histogram projection result using pyplot\n horizontal = plt.plot(w, y_arr)\n vertical = plt.plot(x_arr ,z)\n\n plt.show(horizontal)\n plt.show(vertical)\n\n f = 0\n ff = z[0]\n t1 = list()\n t2 = list()\n for i in range(z.size):\n if z[i] != ff:\n f += 1\n ff = z[i]\n t1.append(i)\n rect_h = np.array(t1)\n\n f = 0\n ff = w[0]\n for i in range(w.size):\n if w[i] != ff:\n f += 1\n ff = w[i]\n t2.append(i)\n rect_v = np.array(t2)\n\n # take the appropriate height\n rectv = []\n rectv.append(rect_v[0])\n rectv.append(rect_v[1])\n max = int(rect_v[1]) - int(rect_v[0])\n for i in range(len(rect_v) - 1):\n diff2 = int(rect_v[i + 1]) - int(rect_v[i])\n\n if diff2 > max:\n rectv[0] = rect_v[i]\n rectv[1] = rect_v[i + 1]\n max = diff2\n\n # extract caracter\n for i in range(len(rect_h) - 1):\n\n # eliminate slice that can't be a digit, a digit must have width bigger then 8\n diff1 = int(rect_h[i + 1]) - int(rect_h[i])\n\n if (diff1 > 5) and (z[rect_h[i]] == 1):\n # cutting nb (image) and adding each slice to the list caracrter_list_image\n caracrter_list_image.append(nb[int(rectv[0]):int(rectv[1]), rect_h[i]:rect_h[i + 1]])\n\n # draw rectangle on digits\n cv2.rectangle(img, (rect_h[i], rectv[0]), (rect_h[i + 1], rectv[1]), (0, 255, 0), 1)\n\n # Show segmentation result\n image = plt.imshow(img)\n plt.show(image)\n\n return caracrter_list_image\n\n\ndef load_dataset(input_path):\n \"\"\"\n This method load images and their labels from a folder, each folder name is label for all images that contain\n the folder\n :param input_path: Folder path where all data exist\n :return: two list contains images and their labels\n \"\"\"\n\n # List that will contain images\n features_list = []\n\n # List that will contain labels\n features_label = []\n\n # Load all directory\n for root, dirs, files in os.walk(input_path):\n\n # Filter through every folder\n for dir in dirs:\n\n # Filter all files in the folder\n for filename in os.listdir(input_path + \"/\" + dir):\n\n # Load image\n training_digit_image = cv2.imread(input_path + \"/\" + dir + \"/\" + filename)\n\n # BGR to Gray\n gray = cv2.cvtColor(training_digit_image, cv2.COLOR_BGR2GRAY)\n\n # convert to one dim vector\n df = np.array(gray).ravel()\n\n # Append image and it's label to training list\n features_list.append(df)\n features_label.append(dir)\n\n return features_list, features_label\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.arange", "matplotlib.pyplot.plot", "numpy.concatenate", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show" ] ]
jhonore/jesse
[ "5b54e7abd20e3d5d5461dc0714e00bd64da468ac" ]
[ "tests/test_indicators.py" ]
[ "import numpy as np\n\nimport jesse.indicators as ta\nfrom jesse.factories import fake_range_candle_from_range_prices\nfrom .data.test_candles_indicators import *\n\n\ndef test_acosc():\n candles = np.array(test_candles_19)\n single = ta.acosc(candles)\n seq = ta.acosc(candles, sequential=True)\n\n assert type(single).__name__ == 'AC'\n assert round(single.osc, 2) == -21.97\n assert round(single.change, 2) == -9.22\n\n assert seq.osc[-1] == single.osc\n assert len(seq.osc) == len(candles)\n\n\ndef test_ad():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.ad(candles)\n seq = ta.ad(candles, sequential=True)\n assert round(single, 0) == 6346031\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_adosc():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.adosc(candles, fast_period=3, slow_period=10)\n seq = ta.adosc(candles, fast_period=3, slow_period=10, sequential=True)\n\n assert round(single / 1000000, 3) == -1.122\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_adx():\n candles = np.array(test_candles_10)\n\n single = ta.adx(candles)\n seq = ta.adx(candles, sequential=True)\n\n assert round(single) == 26\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_adxr():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.adxr(candles, period=14)\n seq = ta.adxr(candles, period=14, sequential=True)\n\n assert round(single, 0) == 36\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_alligator():\n candles = np.array(test_candles_19)\n single = ta.alligator(candles)\n seq = ta.alligator(candles, sequential=True)\n\n assert type(single).__name__ == 'AG'\n assert round(single.teeth, 0) == 236\n assert round(single.jaw, 0) == 233\n assert round(single.lips, 0) == 222\n\n assert seq.teeth[-1] == single.teeth\n assert len(seq.teeth) == len(candles)\n\n\ndef test_alma():\n candles = np.array(test_candles_19)\n single = ta.alma(candles)\n seq = ta.alma(candles, sequential=True)\n\n assert round(single, 2) == 179.17\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ao():\n candles = np.array(test_candles_19)\n single = ta.ao(candles)\n seq = ta.ao(candles, sequential=True)\n\n assert round(single.osc, 0) == -46\n assert len(seq[-1]) == len(candles)\n assert seq.osc[-1] == single.osc\n\n\ndef test_apo():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.apo(candles, fast_period=12, slow_period=26, matype=1)\n seq = ta.apo(candles, fast_period=12, slow_period=26, matype=1, sequential=True)\n\n assert round(single, 2) == -15.32\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_aroon():\n candles = np.array(test_candles_19)\n\n aroon = ta.aroon(candles, period=14)\n assert type(aroon).__name__ == 'AROON'\n assert round(aroon.down, 2) == 100\n assert round(aroon.up, 2) == 64.29\n\n seq_aroon = ta.aroon(candles, period=14, sequential=True)\n assert seq_aroon.down[-1] == aroon.down\n assert len(seq_aroon.down) == len(candles)\n assert len(seq_aroon.up) == len(candles)\n\n\ndef test_aroon_osc():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.aroonosc(candles, period=14)\n seq = ta.aroonosc(candles, period=14, sequential=True)\n\n assert round(single, 2) == -35.71\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_atr():\n candles = np.array(test_candles_2)\n\n single = ta.atr(candles)\n seq = ta.atr(candles, sequential=True)\n\n assert round(single, 1) == 2.8\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_avgprice():\n candles = np.array(test_candles_19)\n\n single = ta.avgprice(candles)\n seq = ta.avgprice(candles, sequential=True)\n\n assert round(single, 1) == 149.8\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_bandpass():\n candles = np.array(test_candles_19)\n\n bp = ta.bandpass(candles)\n assert type(bp).__name__ == 'BandPass'\n assert round(bp.bp, 2) == -7.56\n assert round(bp.bp_normalized, 2) == -0.29\n assert bp.signal == 1\n assert round(bp.trigger, 2) == -0.27\n\n seq_bp = ta.bandpass(candles, sequential=True)\n assert seq_bp.bp[-1] == bp.bp\n assert len(seq_bp.bp) == len(candles)\n assert len(seq_bp.bp_normalized) == len(candles)\n assert len(seq_bp.signal) == len(candles)\n assert len(seq_bp.trigger) == len(candles)\n\n\ndef test_beta():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.beta(candles)\n seq = ta.beta(candles, sequential=True)\n\n assert round(single, 2) == -0.31\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_bollinger_bands():\n candles = np.array(test_candles_11)\n\n bb = ta.bollinger_bands(candles)\n u, m, l = bb\n assert type(bb).__name__ == 'BollingerBands'\n assert round(u, 1) == 145.8\n assert round(m, 1) == 141.2\n assert round(l, 1) == 136.7\n\n seq_bb = ta.bollinger_bands(candles, sequential=True)\n assert seq_bb.upperband[-1] == u\n assert len(seq_bb.upperband) == len(candles)\n assert len(seq_bb.middleband) == len(candles)\n assert len(seq_bb.lowerband) == len(candles)\n\n\ndef test_bollinger_bands_width():\n candles = np.array(test_candles_12)\n\n single = ta.bollinger_bands_width(candles)\n seq = ta.bollinger_bands_width(candles, sequential=True)\n\n assert round(single, 4) == 0.0771\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_bop():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.bop(candles)\n seq = ta.bop(candles, sequential=True)\n\n assert round(single, 2) == -0.92\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_cc():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.cc(candles)\n seq = ta.cc(candles, sequential=True)\n\n assert round(single, 0) == -41\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_cci():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.cci(candles, period=14)\n seq = ta.cci(candles, period=14, sequential=True)\n\n assert round(single, 2) == -285.29\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_cfo():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.cfo(candles)\n seq = ta.cfo(candles, sequential=True)\n\n assert round(single, 2) == -66.53\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_cg():\n candles = np.array(test_candles_19)\n single = ta.cg(candles)\n seq = ta.cg(candles, sequential=True)\n assert round(single, 2) == -5.37\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_chande():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single_long = ta.chande(candles)\n seq_long = ta.chande(candles, sequential=True)\n\n single_short = ta.chande(candles, direction=\"short\")\n seq_short = ta.chande(candles, direction=\"short\", sequential=True)\n\n assert round(single_long, 0) == 213\n assert round(single_short, 0) == 165\n\n assert len(seq_short) == len(candles)\n assert len(seq_long) == len(candles)\n assert seq_long[-1] == single_long\n assert seq_short[-1] == single_short\n\n\ndef test_chop():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.chop(candles)\n seq = ta.chop(candles, sequential=True)\n\n assert round(single, 2) == 28.82\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_cksp():\n candles = np.array(test_candles_19)\n\n single = ta.cksp(candles)\n assert type(single).__name__ == 'CKSP'\n assert round(single.long, 2) == 247.62\n assert round(single.short, 2) == 127.89\n\n seq = ta.cksp(candles, sequential=True)\n assert seq.long[-1] == single.long\n assert seq.short[-1] == single.short\n assert len(seq.long) == len(candles)\n assert len(seq.short) == len(candles)\n\n\ndef test_cmo():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.cmo(candles, period=9)\n seq = ta.cmo(candles, period=9, sequential=True)\n\n assert round(single, 0) == -70\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_correl():\n candles = np.array(test_candles_19)\n\n single = ta.correl(candles)\n seq = ta.correl(candles, sequential=True)\n\n assert round(single, 2) == 0.58\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_correlation_cycle():\n candles = np.array(test_candles_19)\n\n single = ta.correlation_cycle(candles)\n assert type(single).__name__ == 'CC'\n assert round(single.real, 2) == 0.23\n assert round(single.imag, 2) == 0.38\n assert round(single.angle, 2) == -55.87\n assert round(single.state, 2) == -1\n\n seq = ta.correlation_cycle(candles, sequential=True)\n assert seq.real[-1] == single.real\n assert seq.imag[-1] == single.imag\n assert seq.angle[-1] == single.angle\n assert seq.state[-1] == single.state\n assert len(seq.real) == len(candles)\n assert len(seq.imag) == len(candles)\n assert len(seq.angle) == len(candles)\n assert len(seq.state) == len(candles)\n\n\ndef test_cvi():\n candles = np.array(test_candles_19)\n\n single = ta.cvi(candles)\n seq = ta.cvi(candles, sequential=True)\n\n assert round(single, 2) == 196.8\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\ndef test_cwma():\n candles = np.array(test_candles_19)\n\n single = ta.cwma(candles)\n seq = ta.cwma(candles, sequential=True)\n\n assert round(single, 2) == 182.8\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\ndef test_damiani_volatmeter():\n candles = np.array(test_candles_19)\n\n single = ta.damiani_volatmeter(candles)\n assert type(single).__name__ == 'DamianiVolatmeter'\n assert round(single.vol, 2) == 1.39\n assert round(single.anti, 2) == 0.93\n\n seq = ta.damiani_volatmeter(candles, sequential=True)\n assert seq.vol[-1] == single.vol\n assert seq.anti[-1] == single.anti\n assert len(seq.vol) == len(candles)\n assert len(seq.anti) == len(candles)\n\n\ndef test_dec_osc():\n candles = np.array(test_candles_19)\n single = ta.dec_osc(candles)\n seq = ta.dec_osc(candles, sequential=True)\n assert round(single, 0) == -20\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_decycler():\n candles = np.array(test_candles_19)\n single = ta.decycler(candles)\n seq = ta.decycler(candles, sequential=True)\n assert round(single, 0) == 233\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_dema():\n candles = np.array(test_candles_18)\n\n single = ta.dema(candles, 9)\n seq = ta.dema(candles, 9, sequential=True)\n\n assert round(single, 0) == 165\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_devstop():\n candles = np.array(test_candles_19)\n\n single = ta.devstop(candles)\n seq = ta.devstop(candles, sequential=True)\n\n assert round(single, 0) == 248.0\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_di():\n candles = np.array(test_candles_19)\n\n single = ta.di(candles, period=14)\n assert type(single).__name__ == 'DI'\n assert round(single.plus, 2) == 10.80\n assert round(single.minus, 1) == 45.3\n\n seq = ta.di(candles, period=14, sequential=True)\n assert seq.plus[-1] == single.plus\n assert seq.minus[-1] == single.minus\n assert len(seq.plus) == len(candles)\n assert len(seq.minus) == len(candles)\n\n\ndef test_dm():\n candles = np.array(test_candles_19)\n\n single = ta.dm(candles, period=14)\n assert type(single).__name__ == 'DM'\n assert round(single.plus, 2) == 36.78\n assert round(single.minus, 1) == 154.1\n\n seq = ta.dm(candles, period=14, sequential=True)\n assert seq.plus[-1] == single.plus\n assert seq.minus[-1] == single.minus\n assert len(seq.plus) == len(candles)\n assert len(seq.minus) == len(candles)\n\n\ndef test_donchian():\n candles = np.array(test_candles_19)\n\n single = ta.donchian(candles, period=20)\n seq = ta.donchian(candles, period=20, sequential=True)\n\n assert type(single).__name__ == 'DonchianChannel'\n assert round(single.upperband, 2) == 277.20\n assert round(single.middleband, 2) == 189.20\n assert round(single.lowerband, 2) == 101.20\n\n assert seq.middleband[-1] == single.middleband\n assert len(seq.upperband) == len(candles)\n assert len(seq.middleband) == len(candles)\n assert len(seq.lowerband) == len(candles)\n\n\ndef test_dpo():\n candles = np.array(test_candles_18)\n\n single = ta.dpo(candles)\n seq = ta.dpo(candles, sequential=True)\n\n assert round(single, 0) == 22\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_dti():\n candles = np.array(test_candles_19)\n\n single = ta.dti(candles)\n seq = ta.dti(candles, sequential=True)\n\n assert round(single, 2) == -32.6\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_dx():\n candles = np.array(test_candles_18)\n\n single = ta.dx(candles)\n seq = ta.dx(candles, sequential=True)\n\n assert round(single, 0) == 67\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_edcf():\n candles = np.array(test_candles_19)\n single = ta.edcf(candles)\n seq = ta.edcf(candles, sequential=True)\n\n assert round(single, 2) == 197.49\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_efi():\n candles = np.array(test_candles_19)\n single = ta.efi(candles)\n seq = ta.efi(candles, sequential=True)\n assert round(single, 0) == -51628073\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ema():\n close_prices = [\n 204.23, 205.01, 196.9, 197.33, 198.7, 199.86, 202.23, 200.3, 212.3, 210.82603059, 220.84, 218.99,\n 212.71, 211.01, 213.19, 212.99724894,\n 212.67760477, 209.85, 187.2, 184.15, 176.99, 175.9, 178.99, 150.96, 133.85, 138.18, 126.32, 125.23,\n 114.79,\n 118.73, 110.74409879, 111.72, 124.04, 118.52, 113.64, 119.65, 117.11129288, 109.23, 110.77, 102.65,\n 91.99\n ]\n candles = fake_range_candle_from_range_prices(close_prices)\n\n single = ta.ema(candles, 8)\n seq = ta.ema(candles, 8, sequential=True)\n\n assert round(single, 3) == 108.546\n assert len(seq) == len(candles)\n assert seq[-1] == single\n assert np.isnan(ta.ema(candles, 400))\n\n\ndef test_emd():\n candles = np.array(test_candles_19)\n\n single = ta.emd(candles)\n seq = ta.emd(candles, sequential=True)\n\n assert type(single).__name__ == 'EMD'\n assert round(single.middleband, 2) == 3.12\n assert round(single.upperband, 2) == 1.21\n assert round(single.lowerband, 2) == -0.28\n\n assert seq.middleband[-1] == single.middleband\n assert seq.upperband[-1] == single.upperband\n assert seq.lowerband[-1] == single.lowerband\n assert len(seq.middleband) == len(candles)\n assert len(seq.upperband) == len(candles)\n assert len(seq.lowerband) == len(candles)\n\n\ndef test_emv():\n candles = np.array(test_candles_19)\n single = ta.emv(candles)\n seq = ta.emv(candles, sequential=True)\n assert round(single, 0) == -11\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\ndef test_epma():\n candles = np.array(test_candles_19)\n\n single = ta.epma(candles)\n seq = ta.epma(candles, sequential=True)\n\n assert round(single, 2) == 175.31\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\ndef test_er():\n candles = np.array(test_candles_19)\n single = ta.er(candles)\n seq = ta.er(candles, sequential=True)\n assert round(single, 2) == 0.02\n assert len(seq) == len(candles)\n assert round(seq[-1], 2) == round(single, 2)\n\n\ndef test_eri():\n candles = np.array(test_candles_19)\n single = ta.eri(candles)\n seq = ta.eri(candles, sequential=True)\n\n assert type(single).__name__ == 'ERI'\n assert round(single.bull, 2) == -7.14\n assert round(single.bear, 2) == -101.49\n\n assert seq.bull[-1] == single.bull\n assert len(seq.bull) == len(candles)\n assert len(seq.bear) == len(candles)\n\n\ndef test_fisher():\n candles = np.array(test_candles_19)\n single = ta.fisher(candles, period=9)\n seq = ta.fisher(candles, period=9, sequential=True)\n\n assert type(single).__name__ == 'FisherTransform'\n assert round(single.fisher, 2) == -1.77\n assert round(single.signal, 2) == -1.31\n\n assert seq.fisher[-1] == single.fisher\n assert len(seq.fisher) == len(candles)\n assert len(seq.signal) == len(candles)\n\n\ndef test_fosc():\n candles = np.array(test_candles_19)\n single = ta.fosc(candles)\n seq = ta.fosc(candles, sequential=True)\n assert round(single, 0) == -69\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_frama():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.frama(candles, window=10, SC=200, FC=10, )\n seq = ta.frama(candles, window=10, SC=200, FC=10, sequential=True)\n\n assert round(single, 0) == 219\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_fwma():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.fwma(candles)\n seq = ta.fwma(candles, sequential=True)\n\n assert round(single, 0) == 161\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_gator():\n candles = np.array(test_candles_19)\n single = ta.gatorosc(candles)\n seq = ta.gatorosc(candles, sequential=True)\n\n assert type(single).__name__ == 'GATOR'\n assert round(single.upper, 2) == 2.39\n assert round(single.upper_change, 2) == 0.98\n assert round(single.lower, 2) == -13.44\n assert round(single.lower_change, 2) == 5.06\n\n assert seq.upper[-1] == single.upper\n assert len(seq.upper) == len(candles)\n\n\ndef test_gauss():\n candles = np.array(test_candles_19)\n single = ta.gauss(candles)\n seq = ta.gauss(candles, sequential=True)\n assert round(single, 0) == 190\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_high_pass():\n candles = np.array(test_candles_19)\n single = ta.high_pass(candles)\n seq = ta.high_pass(candles, sequential=True)\n assert round(single, 0) == -106\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_high_pass_2_pole():\n candles = np.array(test_candles_19)\n single = ta.high_pass_2_pole(candles)\n seq = ta.high_pass_2_pole(candles, sequential=True)\n assert round(single, 0) == -101\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_hma():\n candles = np.array(test_candles_19)\n single = ta.hma(candles)\n seq = ta.hma(candles, sequential=True)\n\n assert round(single, 0) == 134\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ht_dcperiod():\n candles = np.array(test_candles_19)\n single = ta.ht_dcperiod(candles)\n seq = ta.ht_dcperiod(candles, sequential=True)\n\n assert round(single, 0) == 24\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ht_dcphase():\n candles = np.array(test_candles_19)\n single = ta.ht_dcphase(candles)\n seq = ta.ht_dcphase(candles, sequential=True)\n\n assert round(single, 0) == 10\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ht_phasor():\n candles = np.array(test_candles_19)\n single = ta.ht_phasor(candles)\n seq = ta.ht_phasor(candles, sequential=True)\n\n assert type(single).__name__ == 'IQ'\n assert round(single.inphase, 0) == 11\n assert round(single.quadrature, 0) == -52\n\n assert seq.inphase[-1] == single.inphase\n assert seq.quadrature[-1] == single.quadrature\n assert len(seq.inphase) == len(candles)\n assert len(seq.quadrature) == len(candles)\n\n\ndef test_ht_sine():\n candles = np.array(test_candles_19)\n single = ta.ht_sine(candles)\n seq = ta.ht_sine(candles, sequential=True)\n\n assert type(single).__name__ == 'SINEWAVE'\n assert round(single.sine, 2) == 0.18\n assert round(single.lead, 2) == 0.82\n\n assert seq.sine[-1] == single.sine\n assert seq.lead[-1] == single.lead\n assert len(seq.sine) == len(candles)\n assert len(seq.lead) == len(candles)\n\n\ndef test_ht_trendline():\n candles = np.array(test_candles_19)\n single = ta.ht_trendline(candles)\n seq = ta.ht_trendline(candles, sequential=True)\n\n assert round(single, 0) == 236\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ht_trendmode():\n candles = np.array(test_candles_19)\n single = ta.ht_trendmode(candles)\n seq = ta.ht_trendmode(candles, sequential=True)\n\n assert single == 1\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_hurst():\n candles = np.array(test_candles_19)\n\n try:\n import numba\n no_numba = False\n except ImportError:\n no_numba = True\n\n if not no_numba:\n rs = ta.hurst_exponent(candles, method=0)\n assert round(rs, 2) == 0.51\n\n dma = ta.hurst_exponent(candles, method=1)\n dsod = ta.hurst_exponent(candles, method=2)\n\n assert round(dma, 2) == 0.26\n assert round(dsod, 2) == 0.5\n\n\ndef test_hwma():\n candles = np.array(test_candles_19)\n single = ta.hwma(candles)\n seq = ta.hwma(candles, sequential=True)\n\n assert round(single, 2) == 159.8\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ichimoku_cloud():\n candles = np.array(test_candles_15)\n\n ic = ta.ichimoku_cloud(candles)\n\n current_conversion_line, current_base_line, span_a, span_b = ic\n\n assert type(ic).__name__ == 'IchimokuCloud'\n\n assert (current_conversion_line, current_base_line, span_a, span_b) == (8861.59, 8861.59, 8466.385, 8217.45)\n\n\ndef test_ichimoku_cloud_seq():\n candles = np.array(test_candles_15)\n\n conversion_line, base_line, span_a, span_b, lagging_line, future_span_a, future_span_b = ta.ichimoku_cloud_seq(\n candles)\n seq = ta.ichimoku_cloud_seq(candles, sequential=True)\n\n assert type(seq).__name__ == 'IchimokuCloud'\n assert (conversion_line, base_line, span_a, span_b, lagging_line, future_span_a, future_span_b) == (\n seq.conversion_line[-1], seq.base_line[-1], seq.span_a[-1], seq.span_b[-1], seq.lagging_line[-1],\n seq.future_span_a[-1], seq.future_span_b[-1])\n assert (conversion_line, base_line, span_a, span_b, lagging_line, future_span_a, future_span_b) == (\n 8861.59, 8861.59, 8465.25, 8204.715, 8730.0, 8861.59, 8579.49)\n assert len(seq.conversion_line) == len(candles)\n\n\ndef test_ift_rsi():\n # use the same candles as dema_candles\n candles = np.array(test_candles_19)\n\n single = ta.ift_rsi(candles)\n seq = ta.ift_rsi(candles, sequential=True)\n\n assert round(single, 2) == 0.89\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_itrend():\n candles = np.array(test_candles_19)\n single = ta.itrend(candles)\n seq = ta.itrend(candles, sequential=True)\n\n assert type(single).__name__ == 'ITREND'\n assert round(single.it, 0) == 223\n assert round(single.trigger, 0) == 182\n assert single.signal == -1\n\n assert seq.it[-1] == single.it\n assert seq.signal[-1] == single.signal\n assert seq.trigger[-1] == single.trigger\n assert len(seq.it) == len(candles)\n\n\ndef test_jma():\n # use the same candles as dema_candles\n candles = np.array(test_candles_19)\n\n single = ta.jma(candles)\n seq = ta.jma(candles, sequential=True)\n\n assert round(single, 2) == 156.72\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\ndef test_jsa():\n # use the same candles as dema_candles\n candles = np.array(test_candles_19)\n\n single = ta.jsa(candles)\n seq = ta.jsa(candles, sequential=True)\n\n assert round(single, 2) == 172.26\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\ndef test_kama():\n # use the same candles as dema_candles\n candles = np.array(test_candles_18)\n\n single = ta.kama(candles, 10)\n seq = ta.kama(candles, 10, sequential=True)\n\n assert round(single, 0) == 202\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_kaufmanstop():\n # use the same candles as dema_candles\n candles = np.array(test_candles_18)\n\n single = ta.kaufmanstop(candles)\n seq = ta.kaufmanstop(candles, sequential=True)\n\n assert round(single, 0) == 57\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_kdj():\n candles = np.array(test_candles_19)\n\n kd = ta.kdj(candles)\n k, d, j = kd\n assert type(kd).__name__ == 'KDJ'\n assert round(k, 1) == 13.3\n assert round(d, 1) == 15.7\n assert round(j, 1) == 8.6\n\n seq_kd = ta.kdj(candles, sequential=True)\n assert seq_kd.k[-1] == k\n assert len(seq_kd.k) == len(candles)\n assert len(seq_kd.d) == len(candles)\n assert len(seq_kd.j) == len(candles)\n\n\ndef test_kelner_channels():\n candles = np.array(test_candles_13)\n\n kc = ta.keltner(candles)\n u, m, l = kc\n assert type(kc).__name__ == 'KeltnerChannel'\n assert round(u, 1) == 145.0\n assert round(m, 1) == 139.7\n assert round(l, 1) == 134.4\n\n seq_kc = ta.keltner(candles, sequential=True)\n assert seq_kc.upperband[-1] == u\n assert len(seq_kc.upperband) == len(candles)\n assert len(seq_kc.middleband) == len(candles)\n assert len(seq_kc.lowerband) == len(candles)\n\n\ndef test_kst():\n candles = np.array(test_candles_19)\n\n single = ta.kst(candles)\n seq = ta.kst(candles, sequential=True)\n\n assert type(single).__name__ == 'KST'\n assert round(single.line, 2) == -93.38\n assert round(single.signal, 2) == 31.1\n\n assert seq.line[-1] == single.line\n assert seq.signal[-1] == single.signal\n assert len(seq.line) == len(candles)\n assert len(seq.signal) == len(candles)\n\n\ndef test_kurtosis():\n candles = np.array(test_candles_19)\n\n single = ta.kurtosis(candles)\n seq = ta.kurtosis(candles, sequential=True)\n\n assert round(single, 2) == -0.22\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_kvo():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.kvo(candles)\n seq = ta.kvo(candles, sequential=True)\n\n assert round(single / 10000000, 2) == -5.52\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_linearreg():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.linearreg(candles)\n seq = ta.linearreg(candles, sequential=True)\n\n assert round(single, 2) == 179.56\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_linearreg_angle():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.linearreg_angle(candles)\n seq = ta.linearreg_angle(candles, sequential=True)\n\n assert round(single, 2) == -78.42\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_linearreg_intercept():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.linearreg_intercept(candles)\n seq = ta.linearreg_intercept(candles, sequential=True)\n\n assert round(single, 2) == 242.98\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_linearreg_slope():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.linearreg_slope(candles)\n seq = ta.linearreg_slope(candles, sequential=True)\n\n assert round(single, 2) == -4.88\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_lrsi():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.lrsi(candles)\n seq = ta.lrsi(candles, sequential=True)\n\n assert round(single, 2) == 0.1\n assert round(seq[-2], 2) == 0.04\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ma():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.ma(candles, matype=9)\n seq = ta.ma(candles, matype=9, sequential=True)\n\n seq_average = ta.ma(seq, matype=9, sequential=True)\n\n assert round(single, 2) == 166.99\n assert round(seq[-2], 2) == 203.56\n assert round(seq_average[-2], 2) == 212.12\n assert len(seq) == len(candles)\n assert len(seq_average) == len(candles)\n assert seq[-1] == single\n\n\ndef test_maaq():\n candles = np.array(test_candles_19)\n\n single = ta.maaq(candles)\n seq = ta.maaq(candles, sequential=True)\n\n assert round(single, 2) == 205.95\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_mab():\n candles = np.array(test_candles_11)\n\n bb = ta.mab(candles)\n u, m, l = bb\n assert type(bb).__name__ == 'MAB'\n assert round(u, 1) == 142.8\n assert round(m, 1) == 140.0\n assert round(l, 1) == 130.1\n\n seq = ta.mab(candles, sequential=True)\n assert seq.upperband[-1] == u\n assert len(seq.upperband) == len(candles)\n assert len(seq.middleband) == len(candles)\n assert len(seq.lowerband) == len(candles)\n\n\ndef test_macd():\n candles = np.array(test_candles_19)\n\n single = ta.macd(candles, fast_period=12, slow_period=26, signal_period=9)\n seq = ta.macd(candles, fast_period=12, slow_period=26, signal_period=9, sequential=True)\n\n assert type(single).__name__ == 'MACD'\n assert round(single.macd, 2) == -15.32\n assert round(single.signal, 2) == -4.10\n assert round(single.hist, 2) == -11.22\n\n assert seq.macd[-1] == single.macd\n assert len(seq.macd) == len(candles)\n assert len(seq.signal) == len(candles)\n assert len(seq.hist) == len(candles)\n\n\ndef test_macdext():\n candles = np.array(test_candles_19)\n\n single = ta.macdext(candles, fast_period=12, fast_matype=0, slow_period=26, slow_matype=0, signal_period=9,\n signal_matype=0)\n seq = ta.macdext(candles, fast_period=12, fast_matype=0, slow_period=26, slow_matype=0, signal_period=9,\n signal_matype=0,\n sequential=True)\n\n assert type(single).__name__ == 'MACDEXT'\n assert round(single.macd, 2) == -23.12\n assert round(single.signal, 2) == -18.51\n assert round(single.hist, 2) == -4.61\n\n assert seq.macd[-1] == single.macd\n assert len(seq.macd) == len(candles)\n assert len(seq.signal) == len(candles)\n assert len(seq.hist) == len(candles)\n\n\ndef test_mama():\n candles = np.array(test_candles_19)\n\n mama = ta.mama(candles, 0.5, 0.05)\n assert type(mama).__name__ == 'MAMA'\n assert round(mama.mama, 2) == 206.78\n assert round(mama.fama, 2) == 230.26\n\n seq_mama = ta.mama(candles, 0.5, 0.05, sequential=True)\n assert seq_mama.mama[-1] == mama.mama\n assert len(seq_mama.mama) == len(candles)\n assert len(seq_mama.fama) == len(candles)\n\n\ndef test_marketfi():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.marketfi(candles)\n seq = ta.marketfi(candles, sequential=True)\n\n assert round(single * 100000, 2) == 2.47\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_mass():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.mass(candles)\n seq = ta.mass(candles, sequential=True)\n\n assert round(single, 2) == 5.76\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_mcginley_dynamic():\n candles = np.array(test_candles_19)\n\n single = ta.mcginley_dynamic(candles)\n seq = ta.mcginley_dynamic(candles, sequential=True)\n assert round(single, 2) == 107.82\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_mean_ad():\n candles = np.array(test_candles_19)\n\n single = ta.mean_ad(candles)\n seq = ta.mean_ad(candles, sequential=True)\n\n assert round(single, 2) == 23.82\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_median_ad():\n candles = np.array(test_candles_19)\n\n single = ta.median_ad(candles)\n seq = ta.median_ad(candles, sequential=True)\n\n assert round(single, 2) == 6.86\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_medprice():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.medprice(candles)\n seq = ta.medprice(candles, sequential=True)\n\n assert round(single, 1) == 148.4\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_mfi():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.mfi(candles, period=9)\n seq = ta.mfi(candles, period=9, sequential=True)\n\n assert round(single, 1) == 31.2\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_midpoint():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.midpoint(candles)\n seq = ta.midpoint(candles, sequential=True)\n\n assert round(single, 1) == 176.4\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_midprice():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.midprice(candles)\n seq = ta.midprice(candles, sequential=True)\n\n assert round(single, 1) == 176.6\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_minmax():\n candles = np.array(test_candles_19)\n single = ta.minmax(candles)\n seq = ta.minmax(candles, sequential=True)\n\n assert type(single).__name__ == 'EXTREMA'\n assert round(seq.is_max[-6], 2) == 251.93\n assert round(seq.is_min[-15], 2) == 210\n assert round(single.last_max, 2) == 251.93\n assert round(single.last_min, 2) == 210\n\n assert seq.last_max[-1] == single.last_max\n assert seq.last_min[-1] == single.last_min\n assert len(seq.is_min) == len(candles)\n\n\ndef test_mom():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.mom(candles, period=9)\n seq = ta.mom(candles, period=9, sequential=True)\n\n assert round(single, 2) == -116.09\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_msw():\n candles = np.array(test_candles_19)\n single = ta.msw(candles)\n seq = ta.msw(candles, sequential=True)\n\n assert type(single).__name__ == 'MSW'\n assert round(single.lead, 2) == -0.66\n assert round(single.sine, 2) == -1.0\n\n assert seq.lead[-1] == single.lead\n assert seq.sine[-1] == single.sine\n assert len(seq.sine) == len(candles)\n\n\ndef test_mwdx():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.mwdx(candles)\n seq = ta.mwdx(candles, sequential=True)\n\n assert round(single, 2) == 192.11\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_natr():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.natr(candles, period=14)\n seq = ta.natr(candles, period=14, sequential=True)\n\n assert round(single, 2) == 22.55\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_nma():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.nma(candles)\n seq = ta.nma(candles, sequential=True)\n\n assert round(single, 2) == 185.39\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_nvi():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.nvi(candles)\n seq = ta.nvi(candles, sequential=True)\n\n assert round(single, 2) == 722.58\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_obv():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.obv(candles)\n seq = ta.obv(candles, sequential=True)\n\n assert round(single / 1000000, 0) == -6\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_pattern_recognizion():\n candles = np.array(test_candles_6)\n res = ta.pattern_recognition(candles, pattern_type=\"CDLINVERTEDHAMMER\")\n seq = ta.pattern_recognition(candles, pattern_type=\"CDLINVERTEDHAMMER\", sequential=True)\n assert len(seq) == len(candles)\n assert res == 0\n\n candles = np.array(test_candles_9)\n res = ta.pattern_recognition(candles, pattern_type=\"CDLENGULFING\")\n assert res == 0\n\n candles = np.array(test_candles_8)\n res = ta.pattern_recognition(candles, pattern_type=\"CDLENGULFING\")\n assert res == 0\n\n candles = np.array(test_candles_7)\n res = ta.pattern_recognition(candles, pattern_type=\"CDLHAMMER\")\n assert res == 0\n\n candles = np.array(test_candles_5)\n res = ta.pattern_recognition(candles, pattern_type=\"CDLDOJI\")\n assert res == 1\n\n\ndef test_pfe():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.pfe(candles)\n seq = ta.pfe(candles, sequential=True)\n\n assert round(single, 2) == -211.85\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_pivot():\n candles = np.array(test_candles_19)\n single = ta.pivot(candles, mode=0)\n seq = ta.pivot(candles, mode=0, sequential=True)\n\n assert type(single).__name__ == 'PIVOT'\n\n assert seq.r1[-1] == single.r1\n assert len(seq.r1) == len(candles)\n assert len(seq.r2) == len(candles)\n assert len(seq.r3) == len(candles)\n assert len(seq.r4) == len(candles)\n assert len(seq.pp) == len(candles)\n assert len(seq.s1) == len(candles)\n assert len(seq.s2) == len(candles)\n assert len(seq.s3) == len(candles)\n assert len(seq.s4) == len(candles)\n\n\ndef test_pivot1():\n candles = np.array(test_candles_19)\n single = ta.pivot(candles, mode=1)\n seq = ta.pivot(candles, mode=1, sequential=True)\n\n assert type(single).__name__ == 'PIVOT'\n\n assert seq.r1[-1] == single.r1\n assert len(seq.r1) == len(candles)\n assert len(seq.r2) == len(candles)\n assert len(seq.r3) == len(candles)\n assert len(seq.r4) == len(candles)\n assert len(seq.pp) == len(candles)\n assert len(seq.s1) == len(candles)\n assert len(seq.s2) == len(candles)\n assert len(seq.s3) == len(candles)\n assert len(seq.s4) == len(candles)\n\n\ndef test_pivot2():\n candles = np.array(test_candles_19)\n single = ta.pivot(candles, mode=2)\n seq = ta.pivot(candles, mode=2, sequential=True)\n\n assert type(single).__name__ == 'PIVOT'\n\n assert seq.r1[-1] == single.r1\n assert len(seq.r1) == len(candles)\n assert len(seq.r2) == len(candles)\n assert len(seq.r3) == len(candles)\n assert len(seq.r4) == len(candles)\n assert len(seq.pp) == len(candles)\n assert len(seq.s1) == len(candles)\n assert len(seq.s2) == len(candles)\n assert len(seq.s3) == len(candles)\n assert len(seq.s4) == len(candles)\n\n\ndef test_pivot3():\n candles = np.array(test_candles_19)\n single = ta.pivot(candles, mode=3)\n seq = ta.pivot(candles, mode=3, sequential=True)\n\n assert type(single).__name__ == 'PIVOT'\n\n assert seq.r1[-1] == single.r1\n assert len(seq.r1) == len(candles)\n assert len(seq.r2) == len(candles)\n assert len(seq.r3) == len(candles)\n assert len(seq.r4) == len(candles)\n assert len(seq.pp) == len(candles)\n assert len(seq.s1) == len(candles)\n assert len(seq.s2) == len(candles)\n assert len(seq.s3) == len(candles)\n assert len(seq.s4) == len(candles)\n\n\ndef test_pivot4():\n candles = np.array(test_candles_19)\n single = ta.pivot(candles, mode=4)\n seq = ta.pivot(candles, mode=4, sequential=True)\n\n assert type(single).__name__ == 'PIVOT'\n\n assert seq.r1[-1] == single.r1\n assert len(seq.r1) == len(candles)\n assert len(seq.r2) == len(candles)\n assert len(seq.r3) == len(candles)\n assert len(seq.r4) == len(candles)\n assert len(seq.pp) == len(candles)\n assert len(seq.s1) == len(candles)\n assert len(seq.s2) == len(candles)\n assert len(seq.s3) == len(candles)\n assert len(seq.s4) == len(candles)\n\ndef test_pma():\n candles = np.array(test_candles_19)\n\n single = ta.pma(candles)\n assert type(single).__name__ == 'PMA'\n assert round(single.predict, 2) == 171.05\n assert round(single.trigger, 2) == 190.91\n\n seq = ta.pma(candles, sequential=True)\n assert seq.predict[-1] == single.predict\n assert seq.trigger[-1] == single.trigger\n assert len(seq.predict) == len(candles)\n assert len(seq.trigger) == len(candles)\n\n\ndef test_ppo():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.ppo(candles, fast_period=12, slow_period=26, matype=1)\n seq = ta.ppo(candles, fast_period=12, slow_period=26, matype=1, sequential=True)\n\n assert round(single, 0) == -7\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_pvi():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.pvi(candles)\n seq = ta.pvi(candles, sequential=True)\n\n assert round(single, 0) == 661\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_pwma():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.pwma(candles)\n seq = ta.pwma(candles, sequential=True)\n\n assert round(single, 2) == 193.82\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_qstick():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.qstick(candles)\n seq = ta.qstick(candles, sequential=True)\n\n assert round(single, 0) == -26.0\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_reflex():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.reflex(candles)\n seq = ta.reflex(candles, sequential=True)\n\n assert round(single, 2) == -0.55\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_roc():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.roc(candles, period=14)\n seq = ta.roc(candles, period=14, sequential=True)\n\n assert round(single, 2) == -52.67\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_rocp():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.rocp(candles, period=14)\n seq = ta.rocp(candles, period=14, sequential=True)\n\n assert round(single, 2) == -0.53\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_rocr():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.rocr(candles, period=14)\n seq = ta.rocr(candles, period=14, sequential=True)\n\n assert round(single, 2) == 0.47\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_rocr100():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.rocr100(candles, period=14)\n seq = ta.rocr100(candles, period=14, sequential=True)\n\n assert round(single, 2) == 47.33\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_roofing():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.roofing(candles)\n seq = ta.roofing(candles, sequential=True)\n\n assert round(single, 0) == -36\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_rsi():\n candles = np.array(test_candles_14)\n\n single = ta.rsi(candles)\n seq = ta.rsi(candles, sequential=True)\n\n assert round(single, 2) == 57.84\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_rsmk():\n candles = np.array(test_candles_4)\n candles2 = np.array(test_candles_19)\n\n rsmk = ta.rsmk(candles, candles2)\n assert type(rsmk).__name__ == 'RSMK'\n assert round(rsmk.indicator, 2) == 2.1\n assert round(rsmk.signal, 2) == -31.56\n\n rsmk_seq = ta.rsmk(candles, candles2, sequential=True)\n assert rsmk_seq.indicator[-1] == rsmk.indicator\n assert rsmk_seq.signal[-1] == rsmk.signal\n assert len(rsmk_seq.indicator) == len(candles)\n assert len(rsmk_seq.signal) == len(candles)\n\n\ndef test_rsx():\n candles = np.array(test_candles_19)\n\n single = ta.rsx(candles)\n seq = ta.rsx(candles, sequential=True)\n\n assert round(single, 2) == 27.81\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_rvi():\n candles = np.array(test_candles_19)\n\n single = ta.rvi(candles)\n seq = ta.rvi(candles, sequential=True)\n\n assert round(single, 2) == 27.99\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_safezonestop():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.safezonestop(candles)\n seq = ta.safezonestop(candles, sequential=True)\n\n assert round(single, 2) == -39.15\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_sar():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.sar(candles, acceleration=0.02, maximum=0.2)\n seq = ta.sar(candles, acceleration=0.02, maximum=0.2, sequential=True)\n\n assert round(single, 2) == 243.15\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_sar_ext():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.sarext(candles, start_value=0.02, offset_on_reverse=0, acceleration_init_long=0.02,\n acceleration_long=0.02,\n acceleration_max_long=0.2, acceleration_init_short=0.02, acceleration_short=0.02,\n acceleration_max_short=0.2)\n seq = ta.sarext(candles, start_value=0.02, offset_on_reverse=0, acceleration_init_long=0.02, acceleration_long=0.02,\n acceleration_max_long=0.2, acceleration_init_short=0.02, acceleration_short=0.02,\n acceleration_max_short=0.2,\n sequential=True)\n\n assert round(single, 2) == -243.15\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_sinwma():\n candles = np.array(test_candles_19)\n\n single = ta.sinwma(candles)\n seq = ta.sinwma(candles, sequential=True)\n\n assert round(single, 2) == 218.86\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_skew():\n candles = np.array(test_candles_19)\n\n single = ta.skew(candles)\n seq = ta.skew(candles, sequential=True)\n\n assert round(single, 2) == -1.05\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_sma():\n close_prices = [22.27, 22.19, 22.08, 22.17, 22.18, 22.13, 22.23, 22.43, 22.24, 22.29]\n candles = fake_range_candle_from_range_prices(close_prices)\n\n single = ta.sma(candles, 10)\n seq = ta.sma(candles, 10, sequential=True)\n\n assert round(single, 2) == 22.22\n assert len(seq) == len(candles)\n assert seq[-1] == single\n assert np.isnan(ta.sma(candles, 30))\n\n\ndef test_smma():\n candles = np.array(test_candles_19)\n single = ta.smma(candles)\n seq = ta.smma(candles, sequential=True)\n\n assert round(single, 0) == 192\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_sqwma():\n candles = np.array(test_candles_19)\n single = ta.sqwma(candles)\n seq = ta.sqwma(candles, sequential=True)\n\n assert round(single, 2) == 191.02\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_srsi():\n candles = np.array(test_candles_4)\n period = 14\n\n srsi = ta.srsi(candles)\n k, d = srsi\n assert type(srsi).__name__ == 'StochasticRSI'\n assert round(k, 2) == 21.36\n assert round(d, 2) == 12.4\n\n srsi = ta.srsi(candles, period=period, sequential=True)\n assert srsi.d[-1] == d\n assert srsi.k[-1] == k\n assert len(srsi.d) == len(candles)\n assert len(srsi.k) == len(candles)\n\n\ndef test_srwma():\n candles = np.array(test_candles_19)\n single = ta.srwma(candles)\n seq = ta.srwma(candles, sequential=True)\n\n assert round(single, 2) == 205.38\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_stc():\n candles = np.array(test_candles_19)\n single = ta.stc(candles)\n seq = ta.stc(candles, sequential=True)\n\n assert round(single, 2) == 0.0\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_stddev():\n candles = np.array(test_candles_19)\n single = ta.stddev(candles)\n seq = ta.stddev(candles, sequential=True)\n\n assert round(single, 0) == 37\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_stoch():\n candles = np.array(test_candles_3)\n\n stoch = ta.stoch(candles, fastk_period=14, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)\n k, d = stoch\n assert type(stoch).__name__ == 'Stochastic'\n assert round(k, 2) == 53.68\n assert round(d, 2) == 49.08\n\n stoch = ta.stoch(candles, fastk_period=14, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0,\n sequential=True)\n assert stoch.d[-1] == d\n assert stoch.k[-1] == k\n assert len(stoch.d) == len(candles)\n assert len(stoch.k) == len(candles)\n\n\ndef test_stochf():\n candles = np.array(test_candles_19)\n\n single = ta.stochf(candles, fastk_period=5, fastd_period=3, fastd_matype=0)\n seq = ta.stochf(candles, fastk_period=5, fastd_period=3, fastd_matype=0, sequential=True)\n\n assert type(single).__name__ == 'StochasticFast'\n assert round(single.k, 2) == 4.87\n assert round(single.d, 2) == 13.5\n\n assert seq.k[-1] == single.k\n assert len(seq.k) == len(candles)\n assert len(seq.d) == len(candles)\n\n\ndef test_supersmoother():\n candles = np.array(test_candles_19)\n single = ta.supersmoother(candles)\n seq = ta.supersmoother(candles, sequential=True)\n assert round(single, 0) == 201\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_supersmoother_3_pole():\n candles = np.array(test_candles_19)\n single = ta.supersmoother_3_pole(candles)\n seq = ta.supersmoother_3_pole(candles, sequential=True)\n assert round(single, 0) == 207\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_supertrend():\n candles = np.array(test_candles_19)\n\n single = ta.supertrend(candles, period=10, factor=3)\n seq = ta.supertrend(candles, period=10, factor=3, sequential=True)\n\n assert type(single).__name__ == 'SuperTrend'\n assert round(single.trend, 2) == 228.45\n assert seq.changed[-16] == True\n assert seq.changed[-1] == False\n\n assert seq.trend[-1] == single.trend\n assert len(seq.trend) == len(candles)\n assert len(seq.changed) == len(candles)\n\n\ndef test_swma():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.swma(candles)\n seq = ta.swma(candles, sequential=True)\n\n assert round(single, 2) == 189.35\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_t3():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.t3(candles, period=5, vfactor=0.7)\n seq = ta.t3(candles, period=5, vfactor=0.7, sequential=True)\n\n assert round(single, 0) == 194\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_tema():\n # use the same candles as trix_candles\n candles = np.array(test_candles_17)\n\n single = ta.tema(candles)\n seq = ta.tema(candles, sequential=True)\n\n assert round(single, 2) == 213.2\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_trange():\n candles = np.array(test_candles_19)\n\n single = ta.trange(candles)\n seq = ta.trange(candles, sequential=True)\n\n assert round(single, 2) == 94.35\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_trendflex():\n candles = np.array(test_candles_19)\n\n single = ta.trendflex(candles)\n seq = ta.trendflex(candles, sequential=True)\n\n assert round(single, 2) == -1.48\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_trima():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.trima(candles, period=9)\n seq = ta.trima(candles, period=9, sequential=True)\n\n assert round(single, 0) == 211\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_trix():\n candles = np.array(test_candles_17)\n\n single = ta.trix(candles)\n seq = ta.trix(candles, sequential=True)\n\n assert round(single, 2) == 30.87\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_tsf():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.tsf(candles)\n seq = ta.tsf(candles, sequential=True)\n\n assert round(single, 1) == 174.7\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_tsi():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.tsi(candles)\n seq = ta.tsi(candles, sequential=True)\n\n assert round(single, 1) == -20.5\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ttm_trend():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.ttm_trend(candles)\n seq = ta.ttm_trend(candles, sequential=True)\n\n assert single == False\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_typprice():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.typprice(candles)\n seq = ta.typprice(candles, sequential=True)\n\n assert round(single, 1) == 134.9\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ui():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.ui(candles)\n seq = ta.ui(candles, sequential=True)\n\n assert round(single, 1) == 23.7\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_ultosc():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.ultosc(candles, timeperiod1=7, timeperiod2=14, timeperiod3=28)\n seq = ta.ultosc(candles, timeperiod1=7, timeperiod2=14, timeperiod3=28, sequential=True)\n\n assert round(single, 2) == 31.37\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_var():\n candles = np.array(test_candles_16)\n single = ta.var(candles)\n seq = ta.var(candles, sequential=True)\n\n assert round(single, 2) == 69.96\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_vi():\n candles = np.array(test_candles_19)\n\n single = ta.vi(candles)\n seq = ta.vi(candles, sequential=True)\n\n assert type(single).__name__ == 'VI'\n assert round(single.plus, 2) == 0.66\n assert round(single.minus, 2) == 1.13\n\n assert seq.plus[-1] == single.plus\n assert len(seq.plus) == len(candles)\n assert len(seq.minus) == len(candles)\n\n\ndef test_vidya():\n candles = np.array(test_candles_16)\n single = ta.vidya(candles)\n seq = ta.vidya(candles, sequential=True)\n\n assert round(single, 2) == 194.75\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_vlma():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.vlma(candles)\n seq = ta.vlma(candles, sequential=True)\n\n assert round(single, 2) == 208.1\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_vosc():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.vosc(candles)\n seq = ta.vosc(candles, sequential=True)\n\n assert round(single, 2) == 38.18\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_voss():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.voss(candles)\n seq = ta.voss(candles, sequential=True)\n\n assert type(single).__name__ == 'VossFilter'\n assert round(single.voss, 2) == -30.71\n assert round(single.filt, 2) == -5.98\n\n assert seq.voss[-1] == single.voss\n assert seq.filt[-1] == single.filt\n assert len(seq.voss) == len(candles)\n assert len(seq.filt) == len(candles)\n\n\ndef test_vpci():\n candles = np.array(test_candles_19)\n single = ta.vpci(candles)\n seq = ta.vpci(candles, sequential=True)\n\n assert round(single.vpci, 2) == -29.46\n assert round(single.vpcis, 2) == -14.4\n assert len(seq.vpci) == len(candles)\n assert seq.vpci[-1] == single.vpci\n\n\ndef test_vpt():\n candles = np.array(test_candles_19)\n single = ta.vpt(candles)\n seq = ta.vpt(candles, sequential=True)\n\n assert round(single, 2) == -1733928.99\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_vpwma():\n candles = np.array(test_candles_19)\n single = ta.vpwma(candles)\n seq = ta.vpwma(candles, sequential=True)\n\n assert round(single, 2) == 206.52\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_vwap():\n candles = np.array(test_candles_19)\n single = ta.vwap(candles)\n seq = ta.vwap(candles, sequential=True)\n\n assert round(single, 2) == 134.86\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_vwma():\n candles = np.array(test_candles_16)\n single = ta.vwma(candles)\n seq = ta.vwma(candles, sequential=True)\n\n assert round(single, 2) == 195.86\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_vwmacd():\n candles = np.array(test_candles_19)\n\n single = ta.vwmacd(candles, fast_period=12, slow_period=26, signal_period=9)\n seq = ta.vwmacd(candles, fast_period=12, slow_period=26, signal_period=9, sequential=True)\n\n assert type(single).__name__ == 'VWMACD'\n assert round(single.macd, 2) == -31.37\n assert round(single.signal, 2) == -19.64\n assert round(single.hist, 2) == -11.73\n\n assert seq.macd[-1] == single.macd\n assert len(seq.macd) == len(candles)\n assert len(seq.signal) == len(candles)\n assert len(seq.hist) == len(candles)\n\n\ndef test_wad():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.wad(candles)\n seq = ta.wad(candles, sequential=True)\n\n assert round(single, 2) == -122.14\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_wclprice():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.wclprice(candles)\n seq = ta.wclprice(candles, sequential=True)\n\n assert round(single, 2) == 128.1\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_wilders():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.wilders(candles)\n seq = ta.wilders(candles, sequential=True)\n\n assert round(single, 2) == 192.11\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_willr():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.willr(candles, period=9)\n seq = ta.willr(candles, period=9, sequential=True)\n\n assert round(single, 2) == -95.61\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_wma():\n # use the same candles as mama_candles\n candles = np.array(test_candles_19)\n\n single = ta.wma(candles, period=9)\n seq = ta.wma(candles, period=9, sequential=True)\n\n assert round(single, 2) == 189.13\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_zlema():\n candles = np.array(test_candles_19)\n single = ta.zlema(candles)\n seq = ta.zlema(candles, sequential=True)\n\n assert round(single, 0) == 189\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_zscore():\n candles = np.array(test_candles_19)\n single = ta.zscore(candles)\n seq = ta.zscore(candles, sequential=True)\n\n assert round(single, 1) == -3.2\n assert len(seq) == len(candles)\n assert seq[-1] == single\n\n\ndef test_wt():\n candles = np.array(wavetrend_candles)\n single = ta.wt(candles)\n seq = ta.wt(candles, sequential=True)\n\n assert type(single).__name__ == 'Wavetrend'\n assert round(single.wt1, 2) == -27.25\n assert round(single.wt2, 2) == -15.51\n assert not single.wtCrossUp\n assert single.wtCrossDown\n assert single.wtCrossUp != single.wtCrossDown\n assert not single.wtOversold\n assert not single.wtOverbought\n assert round(single.wtVwap, 2) == -11.74\n\n assert seq.wt1[-1] == single.wt1\n assert seq.wt2[-1] == single.wt2\n assert seq.wtCrossUp[-1] is single.wtCrossUp\n assert seq.wtCrossDown[-1] is single.wtCrossDown\n assert seq.wtOversold[-1] is single.wtOversold\n assert seq.wtOverbought[-1] is single.wtOverbought\n assert seq.wtVwap[-1] == single.wtVwap\n assert len(seq.wt1) == len(candles)\n" ]
[ [ "numpy.array" ] ]
expeditiona/expeditiona.github.io
[ "85c6cbddf724d87ab1c7f924d717a6aadc23286e" ]
[ "Activity327Folder/327 q2.py" ]
[ "import matplotlib.pyplot as plt\nf = open(\"3.2.7 Investigating Data - Ques 2.csv\", 'r') #Open file\n\nspot = [] #Create empty lists\npct = []\nshotpct = []\ncombined = [] \n\nlineNumber = 1\n\nfor line in f: #Iterate through lines of file\n line = line.strip()\n if lineNumber > 1 and lineNumber < 9: \n spot.append(line.split(\",\")[0]) #Append data to lists\n pct.append(line.split(\",\")[1])\n shotpct.append(line.split(\",\")[2])\n lineNumber += 1 \n\nfor x in range(7):\n combined.append(spot[x] + ': ' + shotpct[x] + ' FG pct.') #Format labels\n \n\n\ncolors = ['#fdb927', '#006bb6', '#fdb927','#006bb6', '#fdb927', '#006bb6','#ffffff'] #Warriors colors\n\n\n\nfig, ax = plt.subplots(1, 1)\nexplode = (0.1,0.1,0.1,0.1,0.1,0.1,0.1) #Explode pie chart\n\nax.pie(pct, labels=combined, colors=colors, autopct='%.0f%%', explode = explode)\n\nax.set_aspect(1) #Square axes for round plot\n\nax.set_title('Shooting Percentage and Distribution of Stephen Curry\\'s Shots in 2015-16')\n\nfig.show() #Display pie chart" ]
[ [ "matplotlib.pyplot.subplots" ] ]
ACasey13/senpy
[ "00b5403dc95a0741abfc56c3a3e2a1e6247f15d4" ]
[ "senpy/logistic_funcs.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 28 15:29:17 2020\n\n@author: alexc\n\"\"\"\n\nimport numpy as np\nfrom scipy.special import expit\n\nSTABILITY = 1E-8\n\ndef z(x, mu, sigma):\n \"\"\"\n Returns the z-location of the given stimulus levels\n \"\"\"\n return (x - mu) / sigma\n \ndef prob(z):\n \"\"\"\n Cumulative distribution function (CDF) for a logistic distribution\n \"\"\"\n return expit(z)\n \n\ndef pred(pts, theta, inverted):\n \"\"\"\n Returns the predictive probability at given pts under an assumed latent logistic distribution.\n \"\"\"\n mu, sigma = theta\n P = prob(z(pts, mu, sigma))\n if inverted:\n return np.maximum(1-P, 0.0)\n else:\n return P\n \n\ndef cost(theta, X, Y):\n \"\"\"\n Cost function under an assumed latent logistic distribution.\n\n Parameters\n ----------\n theta : array\n theta[0] is mu\n theta[1] is sigma\n X : column vector\n The tested stimulus level(s)\n Y : column vector with values [0 or 1]\n The response\n\n Returns\n -------\n numeric\n The cost, or negative log likelihood, of the data under theta.\n\n \"\"\"\n mu = theta[0]\n sigma = theta[1]\n p_values = prob(z(X, mu, sigma))\n term_1 = -1 * Y * np.log(p_values + STABILITY)\n term_2 = -1 * (1 - Y) * np.log(1 - p_values + STABILITY)\n return np.sum(term_1 + term_2)\n\ndef cost_deriv(theta, X, Y):\n \"\"\"\n Derivative with respect to theta of the cost function under an assumed latent logistic distribution.\n\n Parameters\n ----------\n theta : array-like [1x2]\n theta[0] is mu\n theta[1] is sigma\n X : column vector\n The tested stimulus level(s)\n Y : column vector with values [0 or 1]\n The response\n\n Returns\n -------\n array-like\n The gradient vector [1x2] of cost with respect to theta.\n\n \"\"\"\n mu = theta[0]\n sigma = theta[1]\n z_values = z(X, mu, sigma)\n p_values = prob(z_values)\n q_values = 1-p_values\n term_1 = Y * q_values / sigma\n term_2 = -1 * (1-Y) * p_values / sigma\n term_3 = Y * q_values * z_values / sigma\n term_4 = -1 * (1-Y) * p_values * z_values / sigma\n return np.sum(np.hstack((term_1 + term_2, term_3 + term_4)), axis=0)\n\ndef opt_config(X):\n low = np.min(X)\n high = np.max(X)\n sigma_0 = (high - low)/4\n sigma_low = max(sigma_0/2, .00001)\n sigma_high = sigma_0 + sigma_0/2\n bounds = [(None, None), (.00001, None)]\n return [low, high, sigma_low, sigma_high, bounds]\n\ndef estimate_names(latex=False):\n if latex:\n return ['$\\mu$', '$\\sigma$']\n else:\n return ['mu', 'sigma']\n\ndef Hessian(X, y, mu, sigma):\n z_values = z(X, mu, sigma)\n P = prob(z_values)\n Q = 1 - P\n exp = np.nan_to_num(np.exp(-z_values))\n \n t = -1 * exp * (P/sigma)**2\n t1 = y*Q - (1-y)*P\n \n a_11 = np.sum(t)\n a_12 = np.sum(t*z_values + (1/sigma**2) * t1)\n a_22 = np.sum(t*z_values**2 + (2*z_values/sigma**2) * t1)\n \n return np.array([[a_11, a_12], [a_12, a_22]])\n\ndef cdf_deriv(x_pts, mu, sigma):\n \"\"\"\n Derivative of the latent distribution cdf with respect to mu and sigma.\n\n Parameters\n ----------\n x_pts : array [n_pts x 1]\n An array of the levels at which to compute the gradient.\n mu : numeric\n The center of the normal distribution.\n sigma : numeric (strictly positive)\n The standard deviation of the normal distribution.\n\n Returns\n -------\n array [n_pts x 2]\n The gradient of the cdf at n points with respect to mu and sigma.\n\n \"\"\"\n z_values = z(x_pts, mu, sigma)\n exp = np.nan_to_num(np.exp(-z_values))\n P = prob(z_values)\n t = exp * P**2 * (-1/sigma)\n dmu = t\n dsig = t * z_values\n return np.hstack((dmu, dsig))\n\ndef expected_info(X, mu, sigma):\n z_values = z(X, mu, sigma)\n exp = np.nan_to_num(np.exp(-z_values))\n p = prob(z_values)\n \n a_11 = (p/sigma)**2 * exp\n a_12 = np.sum(a_11 * z_values)\n a_22 = np.sum(a_11 * z_values**2)\n info = np.array([[np.sum(a_11), a_12], [a_12, a_22]])\n return info\n\nfunction_dictionary = {'cost': cost,\n 'cost_deriv': cost_deriv,\n 'opt_config': opt_config,\n 'pred': pred,\n 'estimate_names': estimate_names,\n 'Hessian': Hessian,\n 'cdf_deriv': cdf_deriv,\n 'info': expected_info}\n" ]
[ [ "numpy.hstack", "numpy.log", "numpy.maximum", "scipy.special.expit", "numpy.min", "numpy.max", "numpy.exp", "numpy.array", "numpy.sum" ] ]
GT-AcerZhang/paddle-voice
[ "b243144a86e9d34cabe8a5def9e8a2dae013b3fa" ]
[ "src/train.py" ]
[ "get_ipython().system('pip install paddlex -i https://mirror.baidu.com/pypi/simple')\n\n#开始模型的训练\n\n# 设置使用0号GPU卡\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nimport paddlex as pdx\n\n# 图像预处理+数据增强\nfrom paddlex.det import transforms\ntrain_transforms = transforms.Compose([\n transforms.MixupImage(mixup_epoch=250),\n transforms.RandomDistort(),\n transforms.RandomExpand(),\n transforms.RandomCrop(),\n transforms.Resize(target_size=500, interp='RANDOM'),\n transforms.RandomHorizontalFlip(),\n transforms.Normalize(),\n])\n\neval_transforms = transforms.Compose([\n transforms.Resize(target_size=500, interp='CUBIC'),\n transforms.Normalize(),\n])\n\n# 数据迭代器的定义\ntrain_dataset = pdx.datasets.VOCDetection(\n data_dir='dataset',\n file_list='dataset/train_list.txt',\n label_list='dataset/labels.txt',\n transforms=train_transforms,\n shuffle=True)\neval_dataset = pdx.datasets.VOCDetection(\n data_dir='dataset',\n file_list='dataset/val_list.txt',\n label_list='dataset/labels.txt',\n transforms=eval_transforms)\n\n# 开始训练\nnum_classes = len(train_dataset.labels)\nmodel = pdx.det.YOLOv3(num_classes=num_classes, backbone='DarkNet53')\nmodel.train(\n num_epochs=200,\n train_dataset=train_dataset,\n train_batch_size=16,\n eval_dataset=eval_dataset,\n learning_rate=0.0001,\n warmup_steps = 500,\n lr_decay_epochs=[50, 170],\n save_interval_epochs=10,\n save_dir='output/yolov3_darknet53')\n\n# 开始预测\nimport matplotlib.pyplot as plt\nimport cv2\n\nimg1 = cv2.imread('test.jpg')\nb,g,r = cv2.split(img1)\nimg1 = cv2.merge([r,g,b])\nget_ipython().run_line_magic('matplotlib', 'inline')\nplt.imshow(img1)\n\n#加载模型\nimage_name = 'test.jpg'\nresult = model.predict(image_name)\npdx.det.visualize(image_name, result, threshold=0.5, save_dir='PrePicture')\n\nimg2 = cv2.imread('PrePicture/visualize_test.jpg')\nb,g,r = cv2.split(img2)\nimg2 = cv2.merge([r,g,b])\nget_ipython().run_line_magic('matplotlib', 'inline')\nplt.imshow(img2)\n" ]
[ [ "matplotlib.pyplot.imshow" ] ]
anotherjoshsmith/NovoNordisk_Capstone
[ "a39adb2ae68f001bdf0e4b2200d7b8f923f27c2f" ]
[ "ndac/predict.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Flatten\nfrom keras.layers import Dropout\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import to_categorical\n\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.wrappers.scikit_learn import KerasClassifier\n\n\ndef _clstm(categorical=False, vocab_size=False, seq_len=200,\n embedding_length=10, cnn_filters=128, filter_length=3,\n pool_size=2, nodes=100, lstm_drop=0.2, dropout=0.5):\n model = Sequential()\n model.add(Embedding(input_dim=vocab_size,\n output_dim=embedding_length,\n input_length=seq_len))\n model.add(Conv1D(filters=cnn_filters, kernel_size=filter_length,\n padding='same', activation='selu'))\n # if user requests no embedding, replace w/ CNN only\n if not embedding_length:\n model.pop()\n model.pop()\n model.add(Conv1D(filters=cnn_filters, kernel_size=filter_length,\n input_shape=(seq_len, vocab_size),\n padding='same', activation='selu'))\n model.add(MaxPooling1D(pool_size=pool_size))\n model.add(LSTM(nodes, dropout=dropout, recurrent_dropout=lstm_drop))\n if not categorical:\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n else:\n model.add(Dense(categorical, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model\n\n\ndef _lstm(categorical=False, vocab_size=False, seq_len=200,\n embedding_length=10, nodes=100, lstm_drop=0.2,\n dropout=0.5):\n model = Sequential()\n model.add(Embedding(input_dim=vocab_size,\n output_dim=embedding_length,\n input_length=seq_len))\n model.add(LSTM(nodes, dropout=dropout, recurrent_dropout=lstm_drop))\n # if user requests no embedding, replace w/ LSTM only\n if not embedding_length:\n model.pop()\n model.pop()\n model.add(LSTM(nodes, dropout=dropout,\n recurrent_dropout=lstm_drop,\n input_shape=(seq_len, vocab_size)))\n if not categorical:\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n else:\n model.add(Dense(categorical, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n return model\n\n\ndef _cnn(categorical=False, vocab_size=False, seq_len=150,\n embedding_length=10, cnn_filters=128, filter_length=3,\n pool_size=2, nodes=100, dropout=0.5):\n model = Sequential()\n model.add(Embedding(input_dim=vocab_size,\n output_dim=embedding_length,\n input_length=seq_len))\n model.add(Conv1D(filters=cnn_filters, kernel_size=filter_length,\n padding='same', activation='selu'))\n # if user requests no embedding, replace w/ CNN only\n if not embedding_length:\n model.pop()\n model.pop()\n model.add(Conv1D(filters=cnn_filters, kernel_size=filter_length,\n input_shape=(seq_len, vocab_size),\n padding='same', activation='selu'))\n model.add(MaxPooling1D(pool_size=pool_size))\n model.add(Flatten())\n model.add(Dense(nodes))\n model.add(Dropout(dropout))\n if not categorical:\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n else:\n model.add(Dense(categorical, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n return model\n\n\ndef train_model(x, y, architecture='clstm', test_fraction=0,\n skip_embedding=False, batch_size=100, epochs=5,\n verbose=1, save_file=None, **kwargs):\n # fix random seed for reproducibility\n np.random.seed(7)\n\n if test_fraction:\n # create test-train split\n x, x_test, y, y_test = train_test_split(\n x, y, test_size=test_fraction\n )\n\n kwargs['vocab_size'] = int(x.max() + 1)\n # convert None embedding value to False to prevent error\n if skip_embedding:\n kwargs['embedding_length'] = False\n x = to_categorical(x)\n if test_fraction:\n x_test = to_categorical(x_test, num_classes=x.shape[-1])\n\n # get embedding parameters from x matrix\n kwargs['seq_len'] = int(x.shape[1])\n\n if not np.isscalar(y[0]):\n kwargs['categorical'] = y.shape[1]\n\n # select model architecture\n if architecture == 'clstm':\n model = _clstm(**kwargs)\n elif architecture == 'lstm':\n model = _lstm(**kwargs)\n elif architecture == 'cnn':\n model = _cnn(**kwargs)\n\n print(model.summary())\n\n fit_args = {\n 'epochs': epochs,\n 'batch_size': batch_size,\n 'verbose': verbose\n }\n\n if test_fraction:\n fit_args['validation_data'] = (x_test, y_test)\n\n model.fit(x, y, **fit_args)\n\n # report the test accuracy if we performed a train_test_split\n if test_fraction:\n # Final evaluation of the model\n scores = model.evaluate(x_test, y_test, verbose=0)\n print(\"Accuracy: %.2f%%\" % (scores[1] * 100))\n\n if save_file:\n model.save(save_file)\n\n return model\n\n\ndef cross_validate(x, y, architecture='clstm', save_file=None,\n skip_embedding=False, batch_size=100, epochs=35,\n verbose=10, k=3, params=None):\n # fix random seed for reproducibility\n np.random.seed(7)\n params['vocab_size'] = [int(x.max() + 1)]\n\n if skip_embedding:\n params['embedding_length'] = [False]\n x = to_categorical(x)\n\n # get embedding parameters from x matrix\n params['seq_len'] = [int(x.shape[1])]\n\n if not np.isscalar(y[0]):\n params['categorical'] = [y.shape[1]]\n\n # print(model.summary())\n if architecture == 'clstm':\n model = KerasClassifier(build_fn=_clstm, batch_size=batch_size,\n epochs=epochs, verbose=verbose)\n if architecture == 'lstm':\n model = KerasClassifier(build_fn=_lstm, batch_size=batch_size,\n epochs=epochs, verbose=verbose)\n if architecture == 'cnn':\n model = KerasClassifier(build_fn=_cnn, batch_size=batch_size,\n epochs=epochs, verbose=verbose)\n\n grid = GridSearchCV(estimator=model, param_grid=params, cv=k) # , n_jobs=28)\n grid_result = grid.fit(x, y)\n # summarize results\n print(\"Best: %f using %s\" % (grid_result.best_score_, grid_result.best_params_))\n\n if save_file:\n grid_df = pd.DataFrame(grid_result.cv_results_['params'])\n grid_df['means'] = grid_result.cv_results_['mean_test_score']\n grid_df['stddev'] = grid_result.cv_results_['std_test_score']\n # print results to csv file\n grid_df.to_csv(save_file)\n\n return\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "numpy.random.seed", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "numpy.isscalar" ] ]
alexjones85/aXeleRate
[ "52437fc0b1d6cd9de2ccd6071f5fb489dc84e99d" ]
[ "example_scripts/arm_nn/yolov2.py" ]
[ "# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.\r\n# SPDX-License-Identifier: MIT\r\n\r\n\"\"\"\r\nContains functions specific to decoding and processing inference results for YOLO V3 Tiny models.\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom box import BoundBox, nms_boxes, boxes_to_array, to_minmax, draw_boxes\r\n\r\n\r\ndef yolo_processing(netout):\r\n anchors = [1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025]\r\n nms_threshold=0.2\r\n \"\"\"Convert Yolo network output to bounding box\r\n\r\n # Args\r\n netout : 4d-array, shape of (grid_h, grid_w, num of boxes per grid, 5 + n_classes)\r\n YOLO neural network output array\r\n\r\n # Returns\r\n boxes : array, shape of (N, 4)\r\n coordinate scale is normalized [0, 1]\r\n probs : array, shape of (N, nb_classes)\r\n \"\"\"\r\n netout = netout[0].reshape(7,7,5,6)\r\n grid_h, grid_w, nb_box = netout.shape[:3]\r\n boxes = []\r\n\r\n # decode the output by the network\r\n netout[..., 4] = _sigmoid(netout[..., 4])\r\n netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])\r\n netout[..., 5:] *= netout[..., 5:] > 0.3\r\n\r\n for row in range(grid_h):\r\n for col in range(grid_w):\r\n for b in range(nb_box):\r\n # from 4th element onwards are confidence and class classes\r\n classes = netout[row,col,b,5:]\r\n \r\n if np.sum(classes) > 0:\r\n # first 4 elements are x, y, w, and h\r\n x, y, w, h = netout[row,col,b,:4]\r\n\r\n x = (col + _sigmoid(x)) / grid_w # center position, unit: image width\r\n y = (row + _sigmoid(y)) / grid_h # center position, unit: image height\r\n w = anchors[2 * b + 0] * np.exp(w) / grid_w # unit: image width\r\n h = anchors[2 * b + 1] * np.exp(h) / grid_h # unit: image height\r\n confidence = netout[row,col,b,4]\r\n box = BoundBox(x, y, w, h, confidence, classes)\r\n boxes.append(box)\r\n\r\n boxes = nms_boxes(boxes, len(classes), nms_threshold, 0.3)\r\n boxes, probs = boxes_to_array(boxes)\r\n #print(boxes)\r\n predictions = []\r\n def _to_original_scale(boxes):\r\n minmax_boxes = to_minmax(boxes)\r\n minmax_boxes[:,0] *= 224\r\n minmax_boxes[:,2] *= 224\r\n minmax_boxes[:,1] *= 224\r\n minmax_boxes[:,3] *= 224\r\n return minmax_boxes.astype(np.int)\r\n\r\n if len(boxes) > 0:\r\n boxes = _to_original_scale(boxes)\r\n\r\n for i in range(len(boxes)):\r\n predictions.append([0, boxes[i], probs[i][0]])\r\n\r\n return predictions\r\n\r\ndef _sigmoid(x):\r\n return 1. / (1. + np.exp(-x))\r\n\r\ndef _softmax(x, axis=-1, t=-100.):\r\n x = x - np.max(x)\r\n if np.min(x) < t:\r\n x = x/np.min(x)*t\r\n e_x = np.exp(x)\r\n return e_x / e_x.sum(axis, keepdims=True)\r\n\r\ndef yolo_resize_factor(video: cv2.VideoCapture, input_binding_info: tuple):\r\n \"\"\"\r\n Gets a multiplier to scale the bounding box positions to\r\n their correct position in the frame.\r\n\r\n Args:\r\n video: Video capture object, contains information about data source.\r\n input_binding_info: Contains shape of model input layer.\r\n\r\n Returns:\r\n Resizing factor to scale box coordinates to output frame size.\r\n \"\"\"\r\n frame_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)\r\n frame_width = video.get(cv2.CAP_PROP_FRAME_WIDTH)\r\n model_height, model_width = list(input_binding_info[1].GetShape())[1:3]\r\n return max(frame_height, frame_width) / max(model_height, model_width)\r\n" ]
[ [ "numpy.max", "numpy.exp", "numpy.sum", "numpy.min" ] ]
fangyuchu/rethinking-network-pruning
[ "3d3726e1277b5d9bd12b2b26d3c9bf1730709a42", "3d3726e1277b5d9bd12b2b26d3c9bf1730709a42", "3d3726e1277b5d9bd12b2b26d3c9bf1730709a42", "3d3726e1277b5d9bd12b2b26d3c9bf1730709a42" ]
[ "cifar/soft-filter-pruning/pruning_cifar10_pretrain.py", "imagenet/l1-norm-pruning/main_finetune.py", "cifar/weight-level/cifar_finetune.py", "cifar/soft-filter-pruning/pruning_resnet_longer_scratch.py" ]
[ "from __future__ import division\n\nimport os, sys, shutil, time, random\nimport argparse\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nfrom utils import AverageMeter, RecorderMeter, time_string, convert_secs2time\nimport models\nimport numpy as np\n\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='Trains ResNeXt on CIFAR or ImageNet', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('data_path', type=str, help='Path to dataset')\nparser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'imagenet', 'svhn', 'stl10'], help='Choose between Cifar10/100 and ImageNet.')\nparser.add_argument('--arch', metavar='ARCH', default='resnet18', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnext29_8_64)')\n# Optimization options\nparser.add_argument('--epochs', type=int, default=300, help='Number of epochs to train.')\nparser.add_argument('--batch_size', type=int, default=128, help='Batch size.')\nparser.add_argument('--learning_rate', type=float, default=0.1, help='The Learning Rate.')\nparser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')\nparser.add_argument('--decay', type=float, default=0.0005, help='Weight decay (L2 penalty).')\nparser.add_argument('--schedule', type=int, nargs='+', default=[150, 225], help='Decrease learning rate at these epochs.')\nparser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1], help='LR is multiplied by gamma on schedule, number of gammas should be equal to schedule')\n# Checkpoints\nparser.add_argument('--print_freq', default=200, type=int, metavar='N', help='print frequency (default: 200)')\nparser.add_argument('--save_path', type=str, default='./', help='Folder to save checkpoints and log.')\nparser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')\nparser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')\n# Acceleration\nparser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')\nparser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\n# random seed\nparser.add_argument('--manualSeed', type=int, help='manual seed')\n#compress rate\nparser.add_argument('--rate', type=float, default=0.9, help='compress rate of model')\nparser.add_argument('--layer_begin', type=int, default=1, help='compress layer of model')\nparser.add_argument('--layer_end', type=int, default=1, help='compress layer of model')\nparser.add_argument('--layer_inter', type=int, default=1, help='compress layer of model')\nparser.add_argument('--epoch_prune', type=int, default=1, help='compress layer of model')\nparser.add_argument('--use_state_dict', dest='use_state_dict', action='store_true', help='use state dcit or not')\n\n\nargs = parser.parse_args()\nargs.use_cuda = args.ngpu>0 and torch.cuda.is_available()\n\nif args.manualSeed is None:\n args.manualSeed = random.randint(1, 10000)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\nif args.use_cuda:\n torch.cuda.manual_seed_all(args.manualSeed)\ncudnn.benchmark = True\n\ndef main():\n # Init logger\n if not os.path.isdir(args.save_path):\n os.makedirs(args.save_path)\n log = open(os.path.join(args.save_path, 'log_seed_{}.txt'.format(args.manualSeed)), 'w')\n print_log('save path : {}'.format(args.save_path), log)\n state = {k: v for k, v in args._get_kwargs()}\n print_log(state, log)\n print_log(\"Random Seed: {}\".format(args.manualSeed), log)\n print_log(\"python version : {}\".format(sys.version.replace('\\n', ' ')), log)\n print_log(\"torch version : {}\".format(torch.__version__), log)\n print_log(\"cudnn version : {}\".format(torch.backends.cudnn.version()), log)\n print_log(\"Compress Rate: {}\".format(args.rate), log)\n print_log(\"Layer Begin: {}\".format(args.layer_begin), log)\n print_log(\"Layer End: {}\".format(args.layer_end), log)\n print_log(\"Layer Inter: {}\".format(args.layer_inter), log)\n print_log(\"Epoch prune: {}\".format(args.epoch_prune), log)\n # Init dataset\n if not os.path.isdir(args.data_path):\n os.makedirs(args.data_path)\n\n if args.dataset == 'cifar10':\n mean = [x / 255 for x in [125.3, 123.0, 113.9]]\n std = [x / 255 for x in [63.0, 62.1, 66.7]]\n elif args.dataset == 'cifar100':\n mean = [x / 255 for x in [129.3, 124.1, 112.4]]\n std = [x / 255 for x in [68.2, 65.4, 70.4]]\n else:\n assert False, \"Unknow dataset : {}\".format(args.dataset)\n\n train_transform = transforms.Compose(\n [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n test_transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize(mean, std)])\n\n if args.dataset == 'cifar10':\n train_data = dset.CIFAR10(args.data_path, train=True, transform=train_transform, download=True)\n test_data = dset.CIFAR10(args.data_path, train=False, transform=test_transform, download=True)\n num_classes = 10\n elif args.dataset == 'cifar100':\n train_data = dset.CIFAR100(args.data_path, train=True, transform=train_transform, download=True)\n test_data = dset.CIFAR100(args.data_path, train=False, transform=test_transform, download=True)\n num_classes = 100\n elif args.dataset == 'svhn':\n train_data = dset.SVHN(args.data_path, split='train', transform=train_transform, download=True)\n test_data = dset.SVHN(args.data_path, split='test', transform=test_transform, download=True)\n num_classes = 10\n elif args.dataset == 'stl10':\n train_data = dset.STL10(args.data_path, split='train', transform=train_transform, download=True)\n test_data = dset.STL10(args.data_path, split='test', transform=test_transform, download=True)\n num_classes = 10\n elif args.dataset == 'imagenet':\n assert False, 'Do not finish imagenet code'\n else:\n assert False, 'Do not support dataset : {}'.format(args.dataset)\n\n train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n print_log(\"=> creating model '{}'\".format(args.arch), log)\n # Init model, criterion, and optimizer\n net = models.__dict__[args.arch](num_classes)\n print_log(\"=> network :\\n {}\".format(net), log)\n\n net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))\n\n # define loss function (criterion) and optimizer\n criterion = torch.nn.CrossEntropyLoss()\n\n optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],\n weight_decay=state['decay'], nesterov=True)\n\n if args.use_cuda:\n net.cuda()\n criterion.cuda()\n\n recorder = RecorderMeter(args.epochs)\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print_log(\"=> loading checkpoint '{}'\".format(args.resume), log)\n checkpoint = torch.load(args.resume)\n if args.use_state_dict:\n net.load_state_dict(checkpoint['state_dict'])\n else:\n net = checkpoint['state_dict']\n\n print_log(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume, checkpoint['epoch']), log)\n else:\n print_log(\"=> no checkpoint found at '{}'\".format(args.resume), log)\n else:\n print_log(\"=> do not use any checkpoint for {} model\".format(args.arch), log)\n\n if args.evaluate:\n time1 = time.time()\n validate(test_loader, net, criterion, log)\n time2 = time.time()\n print ('function took %0.3f ms' % ((time2-time1)*1000.0))\n return\n\n comp_rate = args.rate\n print(\"-\"*10+\"one epoch begin\"+\"-\"*10)\n print(\"the compression rate now is %f\" % comp_rate)\n\n val_acc_1, val_los_1 = validate(test_loader, net, criterion, log)\n\n print(\" accu before is: %.3f %%\" % val_acc_1)\n\n # Main loop\n start_time = time.time()\n epoch_time = AverageMeter()\n for epoch in range(args.start_epoch, args.epochs):\n current_learning_rate = adjust_learning_rate(optimizer, epoch, args.gammas, args.schedule)\n\n need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (args.epochs-epoch))\n need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)\n\n print_log('\\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:6.4f}]'.format(time_string(), epoch, args.epochs, need_time, current_learning_rate) \\\n + ' [Best : Accuracy={:.2f}, Error={:.2f}]'.format(recorder.max_accuracy(False), 100-recorder.max_accuracy(False)), log)\n\n # train for one epoch\n train_acc, train_los = train(train_loader, net, criterion, optimizer, epoch, log)\n\n # evaluate on validation set\n val_acc_1, val_los_1 = validate(test_loader, net, criterion, log)\n \n is_best = recorder.update(epoch, train_los, train_acc, val_los_1, val_acc_1)\n\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': net,\n 'recorder': recorder,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, args.save_path, 'checkpoint.pth.tar')\n\n # measure elapsed time\n epoch_time.update(time.time() - start_time)\n start_time = time.time()\n\n log.close()\n\n# train function (forward, backward, update)\ndef train(train_loader, model, criterion, optimizer, epoch, log):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.use_cuda:\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.data[0], input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print_log(' Epoch: [{:03d}][{:03d}/{:03d}] '\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) '\n 'Loss {loss.val:.4f} ({loss.avg:.4f}) '\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f}) '\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f}) '.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5) + time_string(), log)\n print_log(' **Train** Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5, error1=100-top1.avg), log)\n return top1.avg, losses.avg\n\ndef validate(val_loader, model, criterion, log):\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n for i, (input, target) in enumerate(val_loader):\n if args.use_cuda:\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input, volatile=True)\n target_var = torch.autograd.Variable(target, volatile=True)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.data[0], input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n print_log(' **Test** Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5, error1=100-top1.avg), log)\n\n return top1.avg, losses.avg\n\ndef print_log(print_string, log):\n print(\"{}\".format(print_string))\n log.write('{}\\n'.format(print_string))\n log.flush()\n\ndef save_checkpoint(state, is_best, save_path, filename):\n filename = os.path.join(save_path, filename)\n torch.save(state, filename)\n if is_best:\n bestname = os.path.join(save_path, 'model_best.pth.tar')\n shutil.copyfile(filename, bestname)\n\ndef adjust_learning_rate(optimizer, epoch, gammas, schedule):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.learning_rate\n assert len(gammas) == len(schedule), \"length of gammas and schedule should be equal\"\n for (gamma, step) in zip(gammas, schedule):\n if (epoch >= step):\n lr = lr * gamma\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nclass Mask:\n def __init__(self,model):\n self.model_size = {}\n self.model_length = {}\n self.compress_rate = {}\n self.mat = {}\n self.model = model\n self.mask_index = []\n \n \n def get_codebook(self, weight_torch,compress_rate,length):\n weight_vec = weight_torch.view(length)\n weight_np = weight_vec.cpu().numpy()\n \n weight_abs = np.abs(weight_np)\n weight_sort = np.sort(weight_abs)\n \n threshold = weight_sort[int (length * (1-compress_rate) )]\n weight_np [weight_np <= -threshold ] = 1\n weight_np [weight_np >= threshold ] = 1\n weight_np [weight_np !=1 ] = 0\n \n print(\"codebook done\")\n return weight_np\n\n def get_filter_codebook(self, weight_torch,compress_rate,length):\n codebook = np.ones(length)\n if len( weight_torch.size())==4:\n filter_pruned_num = int(weight_torch.size()[0]*(1-compress_rate))\n weight_vec = weight_torch.view(weight_torch.size()[0],-1)\n norm2 = torch.norm(weight_vec,2,1)\n norm2_np = norm2.cpu().numpy()\n filter_index = norm2_np.argsort()[:filter_pruned_num]\n# norm1_sort = np.sort(norm1_np)\n# threshold = norm1_sort[int (weight_torch.size()[0] * (1-compress_rate) )]\n kernel_length = weight_torch.size()[1] *weight_torch.size()[2] *weight_torch.size()[3]\n for x in range(0,len(filter_index)):\n codebook [filter_index[x] *kernel_length : (filter_index[x]+1) *kernel_length] = 0\n\n print(\"filter codebook done\")\n else:\n pass\n return codebook\n \n def convert2tensor(self,x):\n x = torch.FloatTensor(x)\n return x\n \n def init_length(self):\n for index, item in enumerate(self.model.parameters()):\n self.model_size [index] = item.size()\n \n for index1 in self.model_size:\n for index2 in range(0,len(self.model_size[index1])):\n if index2 ==0:\n self.model_length[index1] = self.model_size[index1][0]\n else:\n self.model_length[index1] *= self.model_size[index1][index2]\n \n def init_rate(self, layer_rate):\n for index, item in enumerate(self.model.parameters()):\n self.compress_rate [index] = 1\n for key in range(args.layer_begin, args.layer_end + 1, args.layer_inter):\n self.compress_rate[key]= layer_rate\n #different setting for different architecture\n if args.arch == 'resnet20':\n last_index = 57\n elif args.arch == 'resnet32':\n last_index = 93\n elif args.arch == 'resnet56':\n last_index = 165\n elif args.arch == 'resnet110':\n last_index = 327\n self.mask_index = [x for x in range (0,last_index,3)]\n \n def init_mask(self,layer_rate):\n self.init_rate(layer_rate)\n for index, item in enumerate(self.model.parameters()):\n if(index in self.mask_index):\n self.mat[index] = self.get_filter_codebook(item.data, self.compress_rate[index],self.model_length[index] )\n self.mat[index] = self.convert2tensor(self.mat[index])\n if args.use_cuda:\n self.mat[index] = self.mat[index].cuda()\n print(\"mask Ready\")\n\n def do_mask(self):\n for index, item in enumerate(self.model.parameters()):\n if(index in self.mask_index):\n a = item.data.view(self.model_length[index])\n b = a * self.mat[index]\n item.data = b.view(self.model_size[index])\n print(\"mask Done\")\n\n def if_zero(self):\n for index, item in enumerate(self.model.parameters()):\n# if(index in self.mask_index):\n if(index ==0):\n a = item.data.view(self.model_length[index])\n b = a.cpu().numpy()\n \n print(\"number of nonzero weight is %d, zero is %d\" %( np.count_nonzero(b),len(b)- np.count_nonzero(b)))\n \nif __name__ == '__main__':\n main()", "import argparse\nimport os\nimport numpy as np\nimport shutil\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nfrom resnet import resnet34\n\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('--arch', '-a', metavar='ARCH', default='resnet34',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=25, type=int, metavar='N',\n help='number of data loading workers (default: 25)')\nparser.add_argument('--epochs', default=20, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=1, type=int,\n help='number of distributed processes')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='gloo', type=str,\n help='distributed backend')\nparser.add_argument('--s', type=float, default=0,\n help='scale sparse rate (default: 0)')\nparser.add_argument('--save', default='.', type=str, metavar='PATH',\n help='path to save prune model (default: current directory)')\nparser.add_argument('--refine', default='', type=str, metavar='PATH',\n help='the PATH to pruned model')\n\nbest_prec1 = 0\n\ndef main():\n global args, best_prec1\n args = parser.parse_args()\n\n args.distributed = args.world_size > 1\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n\n if args.distributed:\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size)\n\n if args.refine:\n checkpoint = torch.load(args.refine)\n model = resnet34(cfg=checkpoint['cfg'])\n\n if not args.distributed:\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n else:\n model.cuda()\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n if args.refine:\n model.load_state_dict(checkpoint['state_dict'])\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda()\n\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion)\n return\n\n history_score = np.zeros((args.epochs + 1, 1))\n np.savetxt(os.path.join(args.save, 'record.txt'), history_score, fmt = '%10.5f', delimiter=',')\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch)\n\n # evaluate on validation set\n prec1 = validate(val_loader, model, criterion)\n history_score[epoch] = prec1\n np.savetxt(os.path.join(args.save, 'record.txt'), history_score, fmt = '%10.5f', delimiter=',')\n\n # remember best prec@1 and save checkpoint\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, args.save)\n\n history_score[-1] = best_prec1\n np.savetxt(os.path.join(args.save, 'record.txt'), history_score, fmt = '%10.5f', delimiter=',')\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n target = target.cuda()\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.data[0], input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n\ndef validate(val_loader, model, criterion):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n target = target.cuda()\n input_var = torch.autograd.Variable(input, volatile=True)\n target_var = torch.autograd.Variable(target, volatile=True)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.data[0], input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\ndef save_checkpoint(state, is_best, filepath):\n torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))\n if is_best:\n shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'), os.path.join(filepath, 'model_best.pth.tar'))\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\nif __name__ == '__main__':\n main()", "from __future__ import print_function\n\nimport argparse\nimport os\nimport random\nimport shutil\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nimport models.cifar as models\nfrom utils.misc import get_conv_zero_param\nfrom utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig\n\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10/100 Training')\n# Datasets\nparser.add_argument('-d', '--dataset', default='cifar10', type=str)\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n# Optimization options\nparser.add_argument('--epochs', default=40, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--train-batch', default=64, type=int, metavar='N',\n help='train batchsize')\nparser.add_argument('--test-batch', default=50, type=int, metavar='N',\n help='test batchsize')\nparser.add_argument('--lr', '--learning-rate', default=0.001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--drop', '--dropout', default=0, type=float,\n metavar='Dropout', help='Dropout ratio')\nparser.add_argument('--schedule', type=int, nargs='+', default=[41, 42],\n help='Decrease learning rate at these epochs.')\nparser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n# Architecture\nparser.add_argument('--arch', '-a', metavar='ARCH', default='resnet20',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('--depth', type=int, default=29, help='Model depth.')\nparser.add_argument('--cardinality', type=int, default=8, help='Model cardinality (group).')\nparser.add_argument('--widen-factor', type=int, default=4, help='Widen factor. 4 -> 64, 8 -> 128, ...')\nparser.add_argument('--growthRate', type=int, default=12, help='Growth rate for DenseNet.')\nparser.add_argument('--compressionRate', type=int, default=1, help='Compression Rate (theta) for DenseNet.')\n# Miscs\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\n\nparser.add_argument('--save_dir', default='test_checkpoint/', type=str)\n#Device options\nparser.add_argument('--percent', default=0.6, type=float)\n\nargs = parser.parse_args()\nstate = {k: v for k, v in args._get_kwargs()}\n\n# Validate dataset\nassert args.dataset == 'cifar10' or args.dataset == 'cifar100', 'Dataset can only be cifar10 or cifar100.'\n\n# Use CUDA\nuse_cuda = torch.cuda.is_available()\n\n# Random seed\nif args.manualSeed is None:\n args.manualSeed = random.randint(1, 10000)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\nif use_cuda:\n torch.cuda.manual_seed_all(args.manualSeed)\n\nbest_acc = 0 # best test accuracy\n\ndef main():\n global best_acc\n start_epoch = args.start_epoch # start from epoch 0 or last checkpoint epoch\n\n if not os.path.isdir(args.save_dir):\n mkdir_p(args.save_dir)\n\n # Data\n print('==> Preparing dataset %s' % args.dataset)\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n if args.dataset == 'cifar10':\n dataloader = datasets.CIFAR10\n num_classes = 10\n else:\n dataloader = datasets.CIFAR100\n num_classes = 100\n\n\n trainset = dataloader(root='./data', train=True, download=True, transform=transform_train)\n trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)\n\n testset = dataloader(root='./data', train=False, download=False, transform=transform_test)\n testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)\n\n # Model\n print(\"==> creating model '{}'\".format(args.arch))\n if args.arch.startswith('resnext'):\n model = models.__dict__[args.arch](\n cardinality=args.cardinality,\n num_classes=num_classes,\n depth=args.depth,\n widen_factor=args.widen_factor,\n dropRate=args.drop,\n )\n elif args.arch.startswith('densenet'):\n model = models.__dict__[args.arch](\n num_classes=num_classes,\n depth=args.depth,\n growthRate=args.growthRate,\n compressionRate=args.compressionRate,\n dropRate=args.drop,\n )\n elif args.arch.startswith('wrn'):\n model = models.__dict__[args.arch](\n num_classes=num_classes,\n depth=args.depth,\n widen_factor=args.widen_factor,\n dropRate=args.drop,\n )\n elif args.arch.endswith('resnet'):\n model = models.__dict__[args.arch](\n num_classes=num_classes,\n depth=args.depth,\n )\n else:\n model = models.__dict__[args.arch](num_classes=num_classes)\n\n model = torch.nn.DataParallel(model).cuda()\n cudnn.benchmark = True\n print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # default is 0.001\n\n # Resume\n title = 'cifar-10-' + args.arch\n if args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n\n logger = Logger(os.path.join(args.save_dir, 'log_finetune.txt'), title=title)\n logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])\n\n # Train and val\n for epoch in range(start_epoch, args.epochs):\n adjust_learning_rate(optimizer, epoch)\n\n print('\\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))\n num_parameters = get_conv_zero_param(model)\n print('Zero parameters: {}'.format(num_parameters))\n num_parameters = sum([param.nelement() for param in model.parameters()])\n print('Parameters: {}'.format(num_parameters))\n\n train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda)\n test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)\n\n # append logger file\n logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])\n\n # save model\n is_best = test_acc > best_acc\n best_acc = max(test_acc, best_acc)\n\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'acc': test_acc,\n 'best_acc': best_acc,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, checkpoint=args.save_dir)\n\n logger.close()\n\n print('Best acc:')\n print(best_acc)\n\ndef train(trainloader, model, criterion, optimizer, epoch, use_cuda):\n # switch to train mode\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n end = time.time()\n\n bar = Bar('Processing', max=len(trainloader))\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)\n\n # compute output\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))\n losses.update(loss.data[0], inputs.size(0))\n top1.update(prec1[0], inputs.size(0))\n top5.update(prec5[0], inputs.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n #-----------------------------------------\n for k, m in enumerate(model.modules()):\n # print(k, m)\n if isinstance(m, nn.Conv2d):\n weight_copy = m.weight.data.abs().clone()\n mask = weight_copy.gt(0).float().cuda()\n m.weight.grad.data.mul_(mask)\n #-----------------------------------------\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(\n batch=batch_idx + 1,\n size=len(trainloader),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n loss=losses.avg,\n top1=top1.avg,\n top5=top5.avg,\n )\n bar.next()\n bar.finish()\n return (losses.avg, top1.avg)\n\ndef test(testloader, model, criterion, epoch, use_cuda):\n global best_acc\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n bar = Bar('Processing', max=len(testloader))\n for batch_idx, (inputs, targets) in enumerate(testloader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)\n\n # compute output\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))\n losses.update(loss.data[0], inputs.size(0))\n top1.update(prec1[0], inputs.size(0))\n top5.update(prec5[0], inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(\n batch=batch_idx + 1,\n size=len(testloader),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n loss=losses.avg,\n top1=top1.avg,\n top5=top5.avg,\n )\n bar.next()\n bar.finish()\n return (losses.avg, top1.avg)\n\ndef save_checkpoint(state, is_best, checkpoint, filename='finetuned.pth.tar'):\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n\ndef adjust_learning_rate(optimizer, epoch):\n global state\n if epoch in args.schedule:\n state['lr'] *= args.gamma\n for param_group in optimizer.param_groups:\n param_group['lr'] = state['lr']\n\nif __name__ == '__main__':\n main()\n", "from __future__ import division\n\nimport argparse, math, os, sys, shutil, time, random\nimport numpy as np\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\n\nfrom utils import AverageMeter, RecorderMeter, time_string, convert_secs2time\n\nimport models\n\nfrom compute_flops import count_model_param_flops\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='Trains ResNeXt on CIFAR or ImageNet', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('data_path', type=str, help='Path to dataset')\nparser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'imagenet', 'svhn', 'stl10'], help='Choose between Cifar10/100 and ImageNet.')\nparser.add_argument('--arch', metavar='ARCH', default='resnet18', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnext29_8_64)')\n# Optimization options\nparser.add_argument('--epochs', type=int, default=300, help='Number of epochs to train.')\nparser.add_argument('--batch_size', type=int, default=128, help='Batch size.')\nparser.add_argument('--learning_rate', type=float, default=0.1, help='The Learning Rate.')\nparser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')\nparser.add_argument('--decay', type=float, default=0.0005, help='Weight decay (L2 penalty).')\nparser.add_argument('--schedule', type=int, nargs='+', default=[150, 225], help='Decrease learning rate at these epochs.')\nparser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1], help='LR is multiplied by gamma on schedule, number of gammas should be equal to schedule')\n# Checkpoints\nparser.add_argument('--print_freq', default=200, type=int, metavar='N', help='print frequency (default: 200)')\nparser.add_argument('--save_path', type=str, default='./', help='Folder to save checkpoints and log.')\nparser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')\nparser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')\n# Acceleration\nparser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')\nparser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\n# random seed\nparser.add_argument('--manualSeed', type=int, help='manual seed')\n#compress rate\nparser.add_argument('--rate', type=float, default=0.9, help='compress rate of model')\nparser.add_argument('--layer_begin', type=int, default=1, help='compress layer of model')\nparser.add_argument('--layer_end', type=int, default=1, help='compress layer of model')\nparser.add_argument('--layer_inter', type=int, default=1, help='compress layer of model')\nparser.add_argument('--epoch_prune', type=int, default=1, help='compress layer of model')\nparser.add_argument('--use_state_dict', dest='use_state_dict', action='store_true', help='use state dcit or not')\n\n\nargs = parser.parse_args()\nargs.use_cuda = args.ngpu>0 and torch.cuda.is_available()\n\nif args.manualSeed is None:\n args.manualSeed = random.randint(1, 10000)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\nif args.use_cuda:\n torch.cuda.manual_seed_all(args.manualSeed)\ncudnn.benchmark = True\n\ndef main():\n # Init logger\n if not os.path.isdir(args.save_path):\n os.makedirs(args.save_path)\n log = open(os.path.join(args.save_path, 'log_seed_{}.txt'.format(args.manualSeed)), 'w')\n print_log('save path : {}'.format(args.save_path), log)\n state = {k: v for k, v in args._get_kwargs()}\n print_log(state, log)\n print_log(\"Random Seed: {}\".format(args.manualSeed), log)\n print_log(\"python version : {}\".format(sys.version.replace('\\n', ' ')), log)\n print_log(\"torch version : {}\".format(torch.__version__), log)\n print_log(\"cudnn version : {}\".format(torch.backends.cudnn.version()), log)\n print_log(\"Compress Rate: {}\".format(args.rate), log)\n print_log(\"Layer Begin: {}\".format(args.layer_begin), log)\n print_log(\"Layer End: {}\".format(args.layer_end), log)\n print_log(\"Layer Inter: {}\".format(args.layer_inter), log)\n print_log(\"Epoch prune: {}\".format(args.epoch_prune), log)\n # Init dataset\n if not os.path.isdir(args.data_path):\n os.makedirs(args.data_path)\n\n if args.dataset == 'cifar10':\n mean = [x / 255 for x in [125.3, 123.0, 113.9]]\n std = [x / 255 for x in [63.0, 62.1, 66.7]]\n elif args.dataset == 'cifar100':\n mean = [x / 255 for x in [129.3, 124.1, 112.4]]\n std = [x / 255 for x in [68.2, 65.4, 70.4]]\n else:\n assert False, \"Unknow dataset : {}\".format(args.dataset)\n\n train_transform = transforms.Compose(\n [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n test_transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize(mean, std)])\n\n if args.dataset == 'cifar10':\n train_data = dset.CIFAR10(args.data_path, train=True, transform=train_transform, download=True)\n test_data = dset.CIFAR10(args.data_path, train=False, transform=test_transform, download=True)\n num_classes = 10\n elif args.dataset == 'cifar100':\n train_data = dset.CIFAR100(args.data_path, train=True, transform=train_transform, download=True)\n test_data = dset.CIFAR100(args.data_path, train=False, transform=test_transform, download=True)\n num_classes = 100\n elif args.dataset == 'svhn':\n train_data = dset.SVHN(args.data_path, split='train', transform=train_transform, download=True)\n test_data = dset.SVHN(args.data_path, split='test', transform=test_transform, download=True)\n num_classes = 10\n elif args.dataset == 'stl10':\n train_data = dset.STL10(args.data_path, split='train', transform=train_transform, download=True)\n test_data = dset.STL10(args.data_path, split='test', transform=test_transform, download=True)\n num_classes = 10\n elif args.dataset == 'imagenet':\n assert False, 'Do not finish imagenet code'\n else:\n assert False, 'Do not support dataset : {}'.format(args.dataset)\n\n train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n print_log(\"=> creating model '{}'\".format(args.arch), log)\n # Init model, criterion, and optimizer\n net = models.__dict__[args.arch](num_classes)\n net_ref = models.__dict__[args.arch](num_classes)\n print_log(\"=> network :\\n {}\".format(net), log)\n\n net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))\n net_ref = torch.nn.DataParallel(net_ref, device_ids=list(range(args.ngpu)))\n\n\n # define loss function (criterion) and optimizer\n criterion = torch.nn.CrossEntropyLoss()\n\n optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],\n weight_decay=state['decay'], nesterov=True)\n\n if args.use_cuda:\n net.cuda()\n criterion.cuda()\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print_log(\"=> loading checkpoint '{}'\".format(args.resume), log)\n checkpoint = torch.load(args.resume)\n net_ref = checkpoint['state_dict']\n print_log(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume, checkpoint['epoch']), log)\n else:\n print_log(\"=> no checkpoint found at '{}'\".format(args.resume), log)\n else:\n print_log(\"=> do not use any checkpoint for {} model\".format(args.arch), log)\n\n flops_std = count_model_param_flops(net, 32)\n flops_small = count_model_param_flops(net_ref, 32)\n\n ratio = flops_std / flops_small\n args.epochs = int(400 * ratio)\n print(\"Total epochs %d\"%args.epochs)\n schedule = args.schedule\n args.schedule = [1, int(schedule[1] * ratio), int(schedule[2] * ratio), int(schedule[3] * ratio)]\n print(args.schedule)\n\n recorder = RecorderMeter(args.epochs)\n ###################################################################################################################\n for m, m_ref in zip(net.modules(), net_ref.modules()):\n if isinstance(m, nn.Conv2d):\n weight_copy = m_ref.weight.data.abs().clone()\n mask = weight_copy.gt(0).float().cuda()\n n = mask.sum() / float(m.in_channels)\n m.weight.data.normal_(0, math.sqrt(2. / n))\n m.weight.data.mul_(mask)\n ###################################################################################################################\n\n if args.evaluate:\n time1 = time.time()\n validate(test_loader, net, criterion, log)\n time2 = time.time()\n print ('function took %0.3f ms' % ((time2-time1)*1000.0))\n return\n\n m=Mask(net)\n \n m.init_length()\n\n comp_rate = args.rate\n print(\"-\"*10+\"one epoch begin\"+\"-\"*10)\n print(\"the compression rate now is %f\" % comp_rate)\n\n val_acc_1, val_los_1 = validate(test_loader, net, criterion, log)\n\n print(\" accu before is: %.3f %%\" % val_acc_1)\n\n if args.use_cuda:\n net = net.cuda() \n val_acc_2, val_los_2 = validate(test_loader, net, criterion, log)\n print(\" accu after is: %s %%\" % val_acc_2)\n\n # Main loop\n start_time = time.time()\n epoch_time = AverageMeter()\n for epoch in range(args.start_epoch, args.epochs):\n current_learning_rate = adjust_learning_rate(optimizer, epoch, args.gammas, args.schedule)\n\n need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (args.epochs-epoch))\n need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)\n\n print_log('\\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:6.4f}]'.format(time_string(), epoch, args.epochs, need_time, current_learning_rate) \\\n + ' [Best : Accuracy={:.2f}, Error={:.2f}]'.format(recorder.max_accuracy(False), 100-recorder.max_accuracy(False)), log)\n\n num_parameters = get_conv_zero_param(net)\n print_log('Zero parameters: {}'.format(num_parameters), log)\n num_parameters = sum([param.nelement() for param in net.parameters()])\n print_log('Parameters: {}'.format(num_parameters), log)\n\n # train for one epoch\n train_acc, train_los = train(train_loader, net, criterion, optimizer, epoch, log)\n\n # evaluate on validation set\n val_acc_1, val_los_1 = validate(test_loader, net, criterion, log)\n\n is_best = recorder.update(epoch, train_los, train_acc, val_los_2, val_acc_2)\n\n save_checkpoint({\n 'arch': args.arch,\n 'state_dict': net.state_dict(),\n 'recorder': recorder,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, args.save_path, 'checkpoint.pth.tar')\n\n # measure elapsed time\n epoch_time.update(time.time() - start_time)\n start_time = time.time()\n\n log.close()\n\ndef get_conv_zero_param(model):\n total = 0\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n total += torch.sum(m.weight.data.eq(0))\n return total\n\n# train function (forward, backward, update)\ndef train(train_loader, model, criterion, optimizer, epoch, log):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.use_cuda:\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.data[0], input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n\n for k, m in enumerate(model.modules()):\n if isinstance(m, nn.Conv2d):\n weight_copy = m.weight.data.abs().clone()\n mask = weight_copy.gt(0).float().cuda()\n m.weight.grad.data.mul_(mask)\n\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print_log(' Epoch: [{:03d}][{:03d}/{:03d}] '\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) '\n 'Loss {loss.val:.4f} ({loss.avg:.4f}) '\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f}) '\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f}) '.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5) + time_string(), log)\n print_log(' **Train** Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5, error1=100-top1.avg), log)\n return top1.avg, losses.avg\n\ndef validate(val_loader, model, criterion, log):\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n for i, (input, target) in enumerate(val_loader):\n if args.use_cuda:\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input, volatile=True)\n target_var = torch.autograd.Variable(target, volatile=True)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.data[0], input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n print_log(' **Test** Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5, error1=100-top1.avg), log)\n\n return top1.avg, losses.avg\n\ndef print_log(print_string, log):\n print(\"{}\".format(print_string))\n log.write('{}\\n'.format(print_string))\n log.flush()\n\ndef save_checkpoint(state, is_best, save_path, filename):\n filename = os.path.join(save_path, filename)\n torch.save(state, filename)\n if is_best:\n bestname = os.path.join(save_path, 'model_best.pth.tar')\n shutil.copyfile(filename, bestname)\n\ndef adjust_learning_rate(optimizer, epoch, gammas, schedule):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.learning_rate\n assert len(gammas) == len(schedule), \"length of gammas and schedule should be equal\"\n for (gamma, step) in zip(gammas, schedule):\n if (epoch >= step):\n lr = lr * gamma\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nclass Mask:\n def __init__(self,model):\n self.model_size = {}\n self.model_length = {}\n self.compress_rate = {}\n self.mat = {}\n self.model = model\n self.mask_index = []\n \n \n def get_codebook(self, weight_torch,compress_rate,length):\n weight_vec = weight_torch.view(length)\n weight_np = weight_vec.cpu().numpy()\n \n weight_abs = np.abs(weight_np)\n weight_sort = np.sort(weight_abs)\n \n threshold = weight_sort[int (length * (1-compress_rate) )]\n weight_np [weight_np <= -threshold ] = 1\n weight_np [weight_np >= threshold ] = 1\n weight_np [weight_np !=1 ] = 0\n \n print(\"codebook done\")\n return weight_np\n\n def get_filter_codebook(self, weight_torch,compress_rate,length):\n codebook = np.ones(length)\n if len( weight_torch.size())==4:\n filter_pruned_num = int(weight_torch.size()[0]*(1-compress_rate))\n weight_vec = weight_torch.view(weight_torch.size()[0],-1)\n norm2 = torch.norm(weight_vec,2,1)\n norm2_np = norm2.cpu().numpy()\n filter_index = norm2_np.argsort()[:filter_pruned_num]\n kernel_length = weight_torch.size()[1] *weight_torch.size()[2] *weight_torch.size()[3]\n for x in range(0,len(filter_index)):\n codebook [filter_index[x] *kernel_length : (filter_index[x]+1) *kernel_length] = 0\n\n print(\"filter codebook done\")\n else:\n pass\n return codebook\n \n def convert2tensor(self,x):\n x = torch.FloatTensor(x)\n return x\n \n def init_length(self):\n for index, item in enumerate(self.model.parameters()):\n self.model_size [index] = item.size()\n \n for index1 in self.model_size:\n for index2 in range(0,len(self.model_size[index1])):\n if index2 ==0:\n self.model_length[index1] = self.model_size[index1][0]\n else:\n self.model_length[index1] *= self.model_size[index1][index2]\n \n def init_rate(self, layer_rate):\n for index, item in enumerate(self.model.parameters()):\n self.compress_rate [index] = 1\n for key in range(args.layer_begin, args.layer_end + 1, args.layer_inter):\n self.compress_rate[key]= layer_rate\n #different setting for different architecture\n if args.arch == 'resnet20':\n last_index = 57\n elif args.arch == 'resnet32':\n last_index = 93\n elif args.arch == 'resnet56':\n last_index = 165\n elif args.arch == 'resnet110':\n last_index = 327\n self.mask_index = [x for x in range (0,last_index,3)]\n \n def init_mask(self,layer_rate):\n self.init_rate(layer_rate)\n for index, item in enumerate(self.model.parameters()):\n if(index in self.mask_index):\n self.mat[index] = self.get_filter_codebook(item.data, self.compress_rate[index],self.model_length[index] )\n self.mat[index] = self.convert2tensor(self.mat[index])\n if args.use_cuda:\n self.mat[index] = self.mat[index].cuda()\n print(\"mask Ready\")\n\n def do_mask(self):\n for index, item in enumerate(self.model.parameters()):\n if(index in self.mask_index):\n a = item.data.view(self.model_length[index])\n b = a * self.mat[index]\n item.data = b.view(self.model_size[index])\n print(\"mask Done\")\n\n def if_zero(self):\n for index, item in enumerate(self.model.parameters()):\n if(index ==0):\n a = item.data.view(self.model_length[index])\n b = a.cpu().numpy()\n \n print(\"number of nonzero weight is %d, zero is %d\" %( np.count_nonzero(b),len(b)- np.count_nonzero(b)))\n \nif __name__ == '__main__':\n main()" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.norm", "numpy.abs", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.backends.cudnn.version", "numpy.sort", "numpy.ones", "torch.autograd.Variable", "torch.FloatTensor", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "numpy.count_nonzero", "torch.save" ], [ "torch.nn.CrossEntropyLoss", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.load", "torch.utils.data.DataLoader", "torch.nn.DataParallel", "numpy.zeros", "torch.nn.parallel.DistributedDataParallel", "torch.autograd.Variable" ], [ "torch.nn.CrossEntropyLoss", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.autograd.Variable", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.nn.DataParallel", "torch.save" ], [ "torch.nn.CrossEntropyLoss", "torch.norm", "numpy.abs", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.backends.cudnn.version", "numpy.sort", "numpy.ones", "torch.autograd.Variable", "torch.FloatTensor", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "numpy.count_nonzero", "torch.save" ] ]
devenxu1985/onnx-tensorflow
[ "4fe611422ad3236973c498c5bff51fdd55657a4e" ]
[ "onnx_tf/common/data_type.py" ]
[ "from numbers import Number\n\nimport numpy as np\nfrom onnx import mapping\nfrom onnx import TensorProto\nimport tensorflow as tf\n\n\ndef tf2onnx(dtype):\n if isinstance(dtype, Number):\n tf_dype = tf.as_dtype(dtype)\n elif isinstance(dtype, tf.DType):\n tf_dype = dtype\n elif isinstance(dtype, list):\n return [tf2onnx(t) for t in dtype]\n else:\n raise RuntimeError(\"dtype should be number or tf.DType.\")\n \n # Usually, tf2onnx is done via tf_type->numpy_type->onnx_type\n # to leverage existing type conversion infrastructure;\n # However, we need to intercept the string type early because \n # lowering tf.string type to numpy dtype results in loss of \n # information. <class 'object'> is returned instead of the \n # numpy string type desired.\n if tf_dype is tf.string:\n return TensorProto.STRING\n \n return mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(tf_dype.as_numpy_dtype)]\n\n\ndef onnx2tf(dtype):\n return tf.as_dtype(mapping.TENSOR_TYPE_TO_NP_TYPE[_onnx_dtype(dtype)])\n\n\ndef onnx2field(dtype):\n return mapping.STORAGE_TENSOR_TYPE_TO_FIELD[_onnx_dtype(dtype)]\n\n\ndef _onnx_dtype(dtype):\n if isinstance(dtype, Number):\n onnx_dype = dtype\n elif isinstance(dtype, str):\n onnx_dype = TensorProto.DataType.Value(dtype)\n else:\n raise RuntimeError(\"dtype should be number or str.\")\n return onnx_dype\n" ]
[ [ "numpy.dtype", "tensorflow.as_dtype" ] ]
HansBug/dgdvapp
[ "f3142d2c265afda427bbeee46c8073e1126eeef5" ]
[ "app/process/log.py" ]
[ "import os\nfrom operator import itemgetter\nfrom typing import Tuple, Iterator\n\nimport numpy as np\nimport pandas as pd\n\nfrom .exp_center import find_expdata_in_directory, exp_center_file_in_directory, exp_center_trans\nfrom .simudata import find_simudata_in_directory, simudata_file_in_directory, simudata_trans\n\n\ndef is_log_directory(directory: str) -> bool:\n try:\n find_simudata_in_directory(directory)\n find_expdata_in_directory(directory)\n except FileNotFoundError:\n return False\n else:\n return True\n\n\ndef walk_log_directories(root: str) -> Iterator[str]:\n for directory, _, _ in os.walk(root, followlinks=True):\n if is_log_directory(directory):\n yield directory\n\n\ndef log_trans(directory: str) -> Tuple[pd.DataFrame, pd.DataFrame]:\n simudata_df = simudata_trans(find_simudata_in_directory(directory))\n exp_center_df = exp_center_trans(find_expdata_in_directory(directory))\n\n records = {}\n for lineno, row in simudata_df.iterrows():\n time_ = row['time']\n x, y = row['x'], row['y']\n height = row['height']\n if time_ not in records:\n records[time_] = []\n records[time_].append((x, y, height))\n\n means = {}\n for time_, items in records.items():\n x_array = np.asarray(list(map(itemgetter(0), items)))\n y_array = np.asarray(list(map(itemgetter(1), items)))\n h_array = np.asarray(list(map(itemgetter(2), items)))\n means[time_] = (np.mean(x_array), np.mean(y_array), np.mean(h_array))\n\n xs, ys, hs = [], [], []\n for lineno, row in exp_center_df.iterrows():\n time_ = row['time']\n rx, ry, rh = means.get(time_, (-1, -1, -1))\n xs.append(rx)\n ys.append(ry)\n hs.append(rh)\n\n exp_center_df['r_x'] = xs\n exp_center_df['r_y'] = ys\n exp_center_df['r_h'] = hs\n\n return simudata_df, exp_center_df\n\n\ndef log_process(directory: str, force: bool = False):\n simudata_file = simudata_file_in_directory(directory)\n exp_center_file = exp_center_file_in_directory(directory)\n if not force and os.path.exists(simudata_file) and os.path.exists(exp_center_file):\n return\n\n simudata_pd, exp_center_pd = log_trans(directory)\n simudata_pd.to_csv(simudata_file)\n exp_center_pd.to_csv(exp_center_file)\n" ]
[ [ "numpy.mean" ] ]
aspratyush/neural-structured-learning
[ "6cb6b22174ba5f5d6b621443eb2b147831be320d" ]
[ "neural_structured_learning/tools/pack_nbrs.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Tool that prepares input for graph-based Neural Structured Learning.\n\nIn particular, this tool merges into each labeled training example the features\nfrom its out-edge neighbor examples according to a supplied *similarity graph*.\n\nUSAGE:\n\n`python pack_nbrs.py` [*flags*] *labeled.tfr unlabeled.tfr graph.tsv output.tfr*\n\nThe *labeled.tfr* command-line argument is expected to name a TFRecord file\ncontaining labeled `tf.train.Examples`, while the *unlabeled.tfr* command-line\nargument is expected to name a TFRecord file containing unlabeled examples.\nThe *unlabeled.tfr* argument can be an empty string ('' or \"\" as the shell\ncommand-line argument) if there are no unlabeled examples. Each example read\nfrom either of those files is expected to have a feature that contains its ID\n(represented as a singleton `bytes_list` value); the name of this feature is\nspecified by the value of the `--id_feature_name` flag (default: 'id').\n\nThe *graph.tsv* command-line argument is expected to name a TSV file that\nspecifies a graph as a set of edges representing similarity relationships\nbetween the labeled and unlabeled `Example`s. Each graph edge is identified by a\nsource instance ID, a target instance ID, and an optional edge weight. These\nedges are specified by TSV lines of the following form:\n\n```\nsource_id<TAB>target_id[<TAB>edge_weight]\n```\n\nIf no `edge_weight` is specified, it defaults to 1.0. If your input graph is\nnot symmetric and you'd like all edges in it to be treated as bi-directional,\nyou can use the `--add_undirected_edges` flag to accomplish that. To build a\ngraph based on the similarity of your instances' dense embeddings, you can use\nthe `build_graph.py` tool included in the Neural Structured Learning\npackage.\n\nThis program merges into each labeled example the features of that example's\nout-edge neighbors according to that instance's in-edges in the graph. If a\nvalue is specified for the `--max_nbrs` flag, then at most that many neighbors'\nfeatures are merged into each labeled instance (based on which neighbors have\nthe largest edge weights, with ties broken using instance IDs).\n\nHere's how the merging process works. For each labeled example, the features of\nits `i`'th out-edge neighbor will be prefixed by `NL_nbr_<i>_`, with indexes `i`\nin the half-open interval `[0, K)`, where K is the minimum of `--max_nbrs` and\nthe number of the labeled example's out-edges in the graph. A feature named\n`NL_nbr_<i>_weight` will also be merged into the labeled example whose value\nwill be the neighbor's corresponding edge weight. The top neighbors to use in\nthis process are selected by consulting the input graph and selecting the\nlabeled example's out-edge neighbors with the largest edge weight; ties are\nbroken by preferring neighbor IDs with larger lexicographic order. Finally, a\nfeature named `NL_num_nbrs` is set on the result (a singleton `int64_list`)\ndenoting the number of neighbors `K` merged into the labeled example.\n\nFinally, the merged examples are written to a TFRecord file named by the\n*output.tfr* command-line argument.\n\nFor details about this program's flags, run `python pack_nbrs.py --help`.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport time\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom neural_structured_learning.tools import graph_utils\nimport six\nimport tensorflow as tf\n\n\ndef _read_tfrecord_examples(filename, id_feature_name):\n \"\"\"Returns a dict containing the Examples read from a TFRecord file.\n\n Args:\n filename: Name of the TFRecord file to read. Each `tensorflow.Example` in\n the input is expected to have a feature named `id` that maps to a\n singleton `bytes_list` value.\n id_feature_name: Name of the singleton `bytes_list` feature in each input\n `Example` whose value is the Example's ID.\n\n Returns:\n A dictionary that maps the ID of each Example to that Example.\n \"\"\"\n def parse_example(raw_record):\n \"\"\"Parses and returns a single record of a TFRecord file.\"\"\"\n example = tf.train.Example()\n example.ParseFromString(raw_record.numpy())\n return example\n\n def get_id(tf_example):\n \"\"\"Returns the (singleton) value of the Example's `id_feature_name` feature.\n\n Args:\n tf_example: The `tensorflow.Example` from which to extract the ID feature.\n This is expected to contain a singleton bytes_list value.\n\n Returns: The ID feature value as a (decoded) string.\n \"\"\"\n id_feature = tf_example.features.feature[id_feature_name].bytes_list\n return id_feature.value[0].decode('utf-8')\n\n start_time = time.time()\n logging.info('Reading tf.train.Examples from TFRecord file: %s...', filename)\n result = {}\n for raw_record in tf.data.TFRecordDataset([filename]):\n tf_example = parse_example(raw_record)\n result[get_id(tf_example)] = tf_example\n logging.info('Done reading %d tf.train.Examples from: %s (%.2f seconds).',\n len(result), filename, (time.time() - start_time))\n return result\n\n\ndef _join_examples(seed_exs, nbr_exs, graph, max_nbrs):\n r\"\"\"Joins the `seeds` and `nbrs` Examples using the edges in `graph`.\n\n This generator joins and augments each labeled Example in `seed_exs` with the\n features of at most `max_nbrs` of the seed's neighbors according to the given\n `graph`, and yields each merged result.\n\n Args:\n seed_exs: A `dict` mapping node IDs to labeled Examples.\n nbr_exs: A `dict` mapping node IDs to unlabeled Examples.\n graph: A `dict`: source -> (target, weight).\n max_nbrs: The maximum number of neighbors to merge into each seed Example,\n or `None` if the number of neighbors per node is unlimited.\n\n Yields:\n The result of merging each seed Example with the features of its neighbors,\n as described by the module comment.\n \"\"\"\n # A histogram of the out-degrees of all seed Examples. The keys of this dict\n # range from 0 to 'max_nbrs' (inclusive) if 'max_nbrs' is finite.\n out_degree_count = collections.Counter()\n\n def has_ex(node_id):\n \"\"\"Returns true iff 'node_id' is in the 'seed_exs' or 'nbr_exs dict'.\"\"\"\n result = (node_id in seed_exs) or (node_id in nbr_exs)\n if not result:\n logging.warning('No tf.train.Example found for edge target ID: \"%s\"',\n node_id)\n return result\n\n def lookup_ex(node_id):\n \"\"\"Returns the Example from `seed_exs` or `nbr_exs` with the given ID.\"\"\"\n return seed_exs[node_id] if node_id in seed_exs else nbr_exs[node_id]\n\n def join_seed_to_nbrs(seed_id):\n \"\"\"Joins the seed with ID `seed_id` to its out-edge graph neighbors.\n\n This also has the side-effect of maintaining the `out_degree_count`.\n\n Args:\n seed_id: The ID of the seed Example to start from.\n\n Returns:\n A list of (nbr_wt, nbr_id) pairs (in decreasing weight order) of the\n seed Example's top `max_nbrs` neighbors. So the resulting list will have\n size at most `max_nbrs`, but it may be less (or even empty if the seed\n Example has no out-edges).\n \"\"\"\n nbr_dict = graph[seed_id] if seed_id in graph else {}\n nbr_wt_ex_list = [(nbr_wt, nbr_id)\n for (nbr_id, nbr_wt) in six.iteritems(nbr_dict)\n if has_ex(nbr_id)]\n result = sorted(nbr_wt_ex_list, reverse=True)[:max_nbrs]\n out_degree_count[len(result)] += 1\n return result\n\n def merge_examples(seed_ex, nbr_wt_ex_list):\n \"\"\"Merges neighbor Examples into the given seed Example `seed_ex`.\n\n Args:\n seed_ex: A labeled Example.\n nbr_wt_ex_list: A list of (nbr_wt, nbr_id) pairs (in decreasing nbr_wt\n order) representing the neighbors of 'seed_ex'.\n\n Returns:\n The Example that results from merging the features of the neighbor\n Examples (as well as creating a feature for each neighbor's edge weight)\n into `seed_ex`. See the `join()` description above for how the neighbor\n features are named in the result.\n \"\"\"\n # Make a deep copy of the seed Example to augment.\n merged_ex = tf.train.Example()\n merged_ex.CopyFrom(seed_ex)\n\n # Add a feature for the number of neighbors.\n merged_ex.features.feature['NL_num_nbrs'].int64_list.value.append(\n len(nbr_wt_ex_list))\n\n # Enumerate the neighbors, and merge in the features of each.\n for index, (nbr_wt, nbr_id) in enumerate(nbr_wt_ex_list):\n prefix = 'NL_nbr_{}_'.format(index)\n # Add the edge weight value as a new singleton float feature.\n weight_feature = prefix + 'weight'\n merged_ex.features.feature[weight_feature].float_list.value.append(nbr_wt)\n # Copy each of the neighbor Examples features, prefixed with 'prefix'.\n nbr_ex = lookup_ex(nbr_id)\n for (feature_name, feature_val) in six.iteritems(nbr_ex.features.feature):\n new_feature = merged_ex.features.feature[prefix + feature_name]\n new_feature.CopyFrom(feature_val)\n return merged_ex\n\n start_time = time.time()\n logging.info(\n 'Joining seed and neighbor tf.train.Examples with graph edges...')\n for (seed_id, seed_ex) in six.iteritems(seed_exs):\n yield merge_examples(seed_ex, join_seed_to_nbrs(seed_id))\n logging.info(\n 'Done creating and writing %d merged tf.train.Examples (%.2f seconds).',\n len(seed_exs), (time.time() - start_time))\n logging.info('Out-degree histogram: %s', sorted(out_degree_count.items()))\n\n\ndef _main(argv):\n \"\"\"Main function for running the pack_nbrs program.\"\"\"\n flag = flags.FLAGS\n flag.showprefixforinfo = False\n start_time = time.time()\n # Check that the correct number of arguments have been provided.\n if len(argv) != 5:\n raise app.UsageError(\n 'Invalid number of arguments; expected 4, got %d' % (len(argv) - 1))\n\n # Read seed and neighbor TFRecord input files.\n seed_exs = _read_tfrecord_examples(argv[1], flag.id_feature_name)\n # Unlabeled neighbor input instances are optional. If not provided, all\n # neighbors used will be labeled instances.\n nbr_exs = _read_tfrecord_examples(argv[2],\n flag.id_feature_name) if argv[2] else {}\n\n # Read the input graph in TSV format, and conditionally reverse all its edges.\n graph = graph_utils.read_tsv_graph(argv[3])\n if flag.add_undirected_edges: graph_utils.add_undirected_edges(graph)\n\n # Join the edges with the seed and neighbor Examples, and write out the\n # results to the output TFRecord file.\n output_tfr = argv[4]\n with tf.io.TFRecordWriter(output_tfr) as writer:\n for merged_ex in _join_examples(seed_exs, nbr_exs, graph, flag.max_nbrs):\n writer.write(merged_ex.SerializeToString())\n logging.info('Output written to TFRecord file: %s.', output_tfr)\n logging.info('Total running time: %.2f minutes.',\n (time.time() - start_time) / 60.0)\n\n\nif __name__ == '__main__':\n flags.DEFINE_integer(\n 'max_nbrs', None,\n 'The maximum number of neighbors to merge into each labeled Example.')\n flags.DEFINE_string(\n 'id_feature_name', 'id',\n \"\"\"Name of the singleton bytes_list feature in each input Example\n whose value is the Example's ID.\"\"\"\n )\n flags.DEFINE_bool(\n 'add_undirected_edges', False,\n \"\"\"By default, the set of neighbors of a node S are\n only those nodes T such that there is an edge S-->T in the input graph. If\n this flag is True, all edges of the graph will be made symmetric before\n determining each node's neighbors (and in the case where edges S-->T and\n T-->S exist in the input graph with weights w1 and w2, respectively, the\n weight of the symmetric edge will be max(w1, w2)).\"\"\")\n\n # Ensure TF 2.0 behavior even if TF 1.X is installed.\n tf.compat.v1.enable_v2_behavior()\n app.run(_main)\n" ]
[ [ "tensorflow.io.TFRecordWriter", "tensorflow.data.TFRecordDataset", "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.train.Example" ] ]
mhd53/ssd-from-torch
[ "1ae6eaab87afd6ef243b2fe444cbb5b15a12cfc7" ]
[ "trainer/trainer.py" ]
[ "import numpy as np\nimport torch\nfrom torchvision.utils import make_grid\nfrom base import BaseTrainer\nfrom utils import inf_loop, MetricTracker\n\n\nclass Trainer(BaseTrainer):\n \"\"\"\n Trainer class\n \"\"\"\n\n def __init__(\n self,\n model,\n criterion,\n metric_ftns,\n optimizer,\n config,\n data_loader,\n valid_data_loader=None,\n lr_scheduler=None,\n len_epoch=None,\n ):\n super().__init__(model, criterion, metric_ftns, optimizer, config, data_loader)\n self.config = config\n self.data_loader = data_loader\n if len_epoch is None:\n # epoch-based training\n self.len_epoch = len(self.data_loader)\n else:\n # iteration-based training\n self.data_loader = inf_loop(data_loader)\n self.len_epoch = len_epoch\n self.valid_data_loader = valid_data_loader\n self.do_validation = self.valid_data_loader is not None\n self.lr_scheduler = lr_scheduler\n self.log_step = int(np.sqrt(data_loader.batch_size))\n\n self.train_metrics = MetricTracker(\n \"loss\", *[m.__name__ for m in self.metric_ftns], writer=self.writer\n )\n self.valid_metrics = MetricTracker(\n \"loss\", *[m.__name__ for m in self.metric_ftns], writer=self.writer\n )\n\n def _train_epoch(self, epoch):\n \"\"\"\n Training logic for an epoch\n\n :param epoch: Integer, current training epoch.\n :return: A log that contains average loss and metric in this epoch.\n \"\"\"\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n loss = self.criterion(output, target)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update(\"loss\", loss.item())\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug(\n \"Train Epoch: {} {} Loss: {:.6f}\".format(\n epoch, self._progress(batch_idx), loss.item()\n )\n )\n self.writer.add_image(\n \"input\", make_grid(data.cpu(), nrow=8, normalize=True)\n )\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{\"val_\" + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log\n\n def _valid_epoch(self, epoch):\n \"\"\"\n Validate after training an epoch\n\n :param epoch: Integer, current training epoch.\n :return: A log that contains information about validation\n \"\"\"\n self.model.eval()\n self.valid_metrics.reset()\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(self.valid_data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n output = self.model(data)\n loss = self.criterion(output, target)\n\n self.writer.set_step(\n (epoch - 1) * len(self.valid_data_loader) + batch_idx, \"valid\"\n )\n self.valid_metrics.update(\"loss\", loss.item())\n for met in self.metric_ftns:\n self.valid_metrics.update(met.__name__, met(output, target))\n self.writer.add_image(\n \"input\", make_grid(data.cpu(), nrow=8, normalize=True)\n )\n\n # add histogram of model parameters to the tensorboard\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(name, p, bins=\"auto\")\n return self.valid_metrics.result()\n\n def _progress(self, batch_idx):\n base = \"[{}/{} ({:.0f}%)]\"\n if hasattr(self.data_loader, \"n_samples\"):\n current = batch_idx * self.data_loader.batch_size\n total = self.data_loader.n_samples\n else:\n current = batch_idx\n total = self.len_epoch\n return base.format(current, total, 100.0 * current / total)\n" ]
[ [ "torch.no_grad", "numpy.sqrt" ] ]
Lnaden/openmmtools
[ "7a9c61cea5c657e333f433dabbd7c87624f8227f" ]
[ "openmmtools/multistate/sams.py" ]
[ "#!/usr/local/bin/env python\n\n# ==============================================================================\n# MODULE DOCSTRING\n# ==============================================================================\n\n\"\"\"\nSamsSampler\n===========\n\nSelf-adjusted mixture sampling (SAMS), also known as optimally-adjusted mixture sampling.\n\nThis implementation uses stochastic approximation to allow one or more replicas to sample the whole range of thermodynamic states\nfor rapid online computation of free energies.\n\nCOPYRIGHT\n\nWritten by John D. Chodera <[email protected]> while at Memorial Sloan Kettering Cancer Center.\n\nLICENSE\n\nThis code is licensed under the latest available version of the MIT License.\n\n\"\"\"\n\nimport logging\nimport numpy as np\nimport openmmtools as mmtools\nfrom scipy.special import logsumexp\n\nfrom openmmtools import multistate, utils\nfrom openmmtools.multistate.multistateanalyzer import MultiStateSamplerAnalyzer\nimport mpiplus\n\n\nlogger = logging.getLogger(__name__)\n\n# ==============================================================================\n# PARALLEL TEMPERING\n# ==============================================================================\n\n\nclass SAMSSampler(multistate.MultiStateSampler):\n \"\"\"Self-adjusted mixture sampling (SAMS), also known as optimally-adjusted mixture sampling.\n\n This class provides a facility for self-adjusted mixture sampling simulations.\n One or more replicas use the method of expanded ensembles [1] to sample multiple thermodynamic states within each replica,\n with log weights for each thermodynamic state adapted on the fly [2] to achieve the desired target probabilities for each state.\n\n Attributes\n ----------\n log_target_probabilities : array-like\n log_target_probabilities[state_index] is the log target probability for state ``state_index``\n state_update_scheme : str\n Thermodynamic state sampling scheme. One of ['global-jump', 'local-jump', 'restricted-range']\n locality : int\n Number of neighboring states on either side to consider for local update schemes\n update_stages : str\n Number of stages to use for update. One of ['one-stage', 'two-stage']\n weight_update_method : str\n Method to use for updating log weights in SAMS. One of ['optimal', 'rao-blackwellized']\n adapt_target_probabilities : bool\n If True, target probabilities will be adapted to achieve minimal thermodynamic length between terminal thermodynamic states.\n gamma0 : float, optional, default=0.0\n Initial weight adaptation rate.\n logZ_guess : array-like of shape [n_states] of floats, optional, default=None\n Initial guess for logZ for all states, if available.\n\n References\n ----------\n [1] Lyubartsev AP, Martsinovski AA, Shevkunov SV, and Vorontsov-Velyaminov PN. New approach to Monte Carlo calculation of the free energy: Method of expanded ensembles. JCP 96:1776, 1992\n http://dx.doi.org/10.1063/1.462133\n\n [2] Tan, Z. Optimally adjusted mixture sampling and locally weighted histogram analysis, Journal of Computational and Graphical Statistics 26:54, 2017.\n http://dx.doi.org/10.1080/10618600.2015.1113975\n\n Examples\n --------\n SAMS simulation of alanine dipeptide in implicit solvent at different temperatures.\n\n Create the system:\n\n >>> import math\n >>> from openmm import unit\n >>> from openmmtools import testsystems, states, mcmc\n >>> testsystem = testsystems.AlanineDipeptideVacuum()\n >>> import os\n >>> import tempfile\n\n Create thermodynamic states for parallel tempering with exponentially-spaced schedule:\n\n >>> n_replicas = 3 # Number of temperature replicas.\n >>> T_min = 298.0 * unit.kelvin # Minimum temperature.\n >>> T_max = 600.0 * unit.kelvin # Maximum temperature.\n >>> temperatures = [T_min + (T_max - T_min) * (math.exp(float(i) / float(n_replicas-1)) - 1.0) / (math.e - 1.0)\n ... for i in range(n_replicas)]\n >>> thermodynamic_states = [states.ThermodynamicState(system=testsystem.system, temperature=T)\n ... for T in temperatures]\n\n Initialize simulation object with options. Run with a GHMC integrator:\n\n >>> move = mcmc.GHMCMove(timestep=2.0*unit.femtoseconds, n_steps=50)\n >>> simulation = SAMSSampler(mcmc_moves=move, number_of_iterations=2,\n ... state_update_scheme='global-jump', locality=5,\n ... update_stages='two-stage', flatness_criteria='logZ-flatness',\n ... flatness_threshold=0.2, weight_update_method='rao-blackwellized',\n ... adapt_target_probabilities=False)\n\n\n Create a single-replica SAMS simulation bound to a storage file and run:\n\n >>> storage_path = tempfile.NamedTemporaryFile(delete=False).name + '.nc'\n >>> reporter = multistate.MultiStateReporter(storage_path, checkpoint_interval=1)\n >>> simulation.create(thermodynamic_states=thermodynamic_states,\n ... sampler_states=[states.SamplerState(testsystem.positions)],\n ... storage=reporter)\n Please cite the following:\n <BLANKLINE>\n Friedrichs MS, Eastman P, Vaidyanathan V, Houston M, LeGrand S, Beberg AL, Ensign DL, Bruns CM, and Pande VS. Accelerating molecular dynamic simulations on graphics processing unit. J. Comput. Chem. 30:864, 2009. DOI: 10.1002/jcc.21209\n Eastman P and Pande VS. OpenMM: A hardware-independent framework for molecular simulations. Comput. Sci. Eng. 12:34, 2010. DOI: 10.1109/MCSE.2010.27\n Eastman P and Pande VS. Efficient nonbonded interactions for molecular dynamics on a graphics processing unit. J. Comput. Chem. 31:1268, 2010. DOI: 10.1002/jcc.21413\n Eastman P and Pande VS. Constant constraint matrix approximation: A robust, parallelizable constraint method for molecular simulations. J. Chem. Theor. Comput. 6:434, 2010. DOI: 10.1021/ct900463w\n >>> simulation.run() # This runs for a maximum of 2 iterations.\n >>> simulation.iteration\n 2\n >>> simulation.run(n_iterations=1)\n >>> simulation.iteration\n 2\n\n To resume a simulation from an existing storage file and extend it beyond\n the original number of iterations.\n\n >>> del simulation\n >>> simulation = SAMSSampler.from_storage(reporter)\n Please cite the following:\n <BLANKLINE>\n Friedrichs MS, Eastman P, Vaidyanathan V, Houston M, LeGrand S, Beberg AL, Ensign DL, Bruns CM, and Pande VS. Accelerating molecular dynamic simulations on graphics processing unit. J. Comput. Chem. 30:864, 2009. DOI: 10.1002/jcc.21209\n Eastman P and Pande VS. OpenMM: A hardware-independent framework for molecular simulations. Comput. Sci. Eng. 12:34, 2010. DOI: 10.1109/MCSE.2010.27\n Eastman P and Pande VS. Efficient nonbonded interactions for molecular dynamics on a graphics processing unit. J. Comput. Chem. 31:1268, 2010. DOI: 10.1002/jcc.21413\n Eastman P and Pande VS. Constant constraint matrix approximation: A robust, parallelizable constraint method for molecular simulations. J. Chem. Theor. Comput. 6:434, 2010. DOI: 10.1021/ct900463w\n >>> simulation.extend(n_iterations=1)\n >>> simulation.iteration\n 3\n\n You can extract several information from the NetCDF file using the Reporter\n class while the simulation is running. This reads the SamplerStates of every\n run iteration.\n\n >>> reporter = multistate.MultiStateReporter(storage=storage_path, open_mode='r', checkpoint_interval=1)\n >>> sampler_states = reporter.read_sampler_states(iteration=3)\n >>> len(sampler_states)\n 1\n >>> sampler_states[-1].positions.shape # Alanine dipeptide has 22 atoms.\n (22, 3)\n\n Clean up.\n\n >>> os.remove(storage_path)\n\n See Also\n --------\n ReplicaExchangeSampler\n\n \"\"\"\n\n _TITLE_TEMPLATE = ('Self-adjusted mixture sampling (SAMS) simulation using SAMSSampler '\n 'class of yank.multistate on {}')\n\n def __init__(self,\n number_of_iterations=1,\n log_target_probabilities=None,\n state_update_scheme='global-jump',\n locality=5,\n update_stages='two-stage',\n flatness_criteria='logZ-flatness',\n flatness_threshold=0.2,\n weight_update_method='rao-blackwellized',\n adapt_target_probabilities=False,\n gamma0=1.0,\n logZ_guess=None,\n **kwargs):\n \"\"\"Initialize a SAMS sampler.\n\n Parameters\n ----------\n log_target_probabilities : array-like or None\n ``log_target_probabilities[state_index]`` is the log target probability for thermodynamic state ``state_index``\n When converged, each state should be sampled with the specified log probability.\n If None, uniform probabilities for all states will be assumed.\n state_update_scheme : str, optional, default='global-jump'\n Specifies the scheme used to sample new thermodynamic states given fixed sampler states.\n One of ['global-jump', 'local-jump', 'restricted-range-jump']\n ``global_jump`` will allow the sampler to access any thermodynamic state\n ``local-jump`` will propose a move to one of the local neighborhood states, and accept or reject.\n ``restricted-range`` will compute the probabilities for each of the states in the local neighborhood, increasing jump probability\n locality : int, optional, default=1\n Number of neighboring states on either side to consider for local update schemes.\n update_stages : str, optional, default='two-stage'\n One of ['one-stage', 'two-stage']\n ``one-stage`` will use the asymptotically optimal scheme throughout the entire simulation (not recommended due to slow convergence)\n ``two-stage`` will use a heuristic first stage to achieve flat histograms before switching to the asymptotically optimal scheme\n flatness_criteria : string, optiona, default='logZ-flatness'\n Method of assessing when to switch to asymptotically optimal scheme\n One of ['logZ-flatness','minimum-visits','histogram-flatness']\n flatness_threshold : float, optional, default=0.2\n Histogram relative flatness threshold to use for first stage of two-stage scheme.\n weight_update_method : str, optional, default='rao-blackwellized'\n Method to use for updating log weights in SAMS. One of ['optimal', 'rao-blackwellized']\n ``rao-blackwellized`` will update log free energy estimate for all states for which energies were computed\n ``optimal`` will use integral counts to update log free energy estimate of current state only\n adapt_target_probabilities : bool, optional, default=False\n If True, target probabilities will be adapted to achieve minimal thermodynamic length between terminal thermodynamic states.\n (EXPERIMENTAL)\n gamma0 : float, optional, default=0.0\n Initial weight adaptation rate.\n logZ_guess : array-like of shape [n_states] of floats, optiona, default=None\n Initial guess for logZ for all states, if available.\n \"\"\"\n # Initialize multi-state sampler\n super(SAMSSampler, self).__init__(number_of_iterations=number_of_iterations, **kwargs)\n # Options\n self.log_target_probabilities = log_target_probabilities\n self.state_update_scheme = state_update_scheme\n self.locality = locality\n self.update_stages = update_stages\n self.flatness_criteria = flatness_criteria\n self.flatness_threshold = flatness_threshold\n self.weight_update_method = weight_update_method\n self.adapt_target_probabilities = adapt_target_probabilities\n self.gamma0 = gamma0\n self.logZ_guess = logZ_guess\n # Private variables\n # self._replica_neighbors[replica_index] is a list of states that form the neighborhood of ``replica_index``\n self._replica_neighbors = None\n self._cached_state_histogram = None\n\n class _StoredProperty(multistate.MultiStateSampler._StoredProperty):\n\n @staticmethod\n def _state_update_scheme_validator(instance, scheme):\n supported_schemes = ['global-jump', 'local-jump', 'restricted-range-jump']\n supported_schemes = ['global-jump'] # TODO: Eliminate this after release\n if scheme not in supported_schemes:\n raise ValueError(\"Unknown update scheme '{}'. Supported values \"\n \"are {}.\".format(scheme, supported_schemes))\n return scheme\n\n @staticmethod\n def _update_stages_validator(instance, scheme):\n supported_schemes = ['one-stage', 'two-stage']\n if scheme not in supported_schemes:\n raise ValueError(\"Unknown update scheme '{}'. Supported values \"\n \"are {}.\".format(scheme, supported_schemes))\n return scheme\n\n @staticmethod\n def _flatness_criteria_validator(instance, scheme):\n supported_schemes = ['minimum-visits', 'logZ-flatness', 'histogram-flatness']\n if scheme not in supported_schemes:\n raise ValueError(\"Unknown update scheme '{}'. Supported values \"\n \"are {}.\".format(scheme, supported_schemes))\n return scheme\n\n @staticmethod\n def _weight_update_method_validator(instance, scheme):\n supported_schemes = ['optimal', 'rao-blackwellized']\n if scheme not in supported_schemes:\n raise ValueError(\"Unknown update scheme '{}'. Supported values \"\n \"are {}.\".format(scheme, supported_schemes))\n return scheme\n\n @staticmethod\n def _adapt_target_probabilities_validator(instance, scheme):\n supported_schemes = [False]\n if scheme not in supported_schemes:\n raise ValueError(\"Unknown update scheme '{}'. Supported values \"\n \"are {}.\".format(scheme, supported_schemes))\n return scheme\n\n log_target_probabilities = _StoredProperty('log_target_probabilities', validate_function=None)\n state_update_scheme = _StoredProperty('state_update_scheme', validate_function=_StoredProperty._state_update_scheme_validator)\n locality = _StoredProperty('locality', validate_function=None)\n update_stages = _StoredProperty('update_stages', validate_function=_StoredProperty._update_stages_validator)\n flatness_criteria = _StoredProperty('flatness_criteria', validate_function=_StoredProperty._flatness_criteria_validator)\n flatness_threshold = _StoredProperty('flatness_threshold', validate_function=None)\n weight_update_method = _StoredProperty('weight_update_method', validate_function=_StoredProperty._weight_update_method_validator)\n adapt_target_probabilities = _StoredProperty('adapt_target_probabilities', validate_function=_StoredProperty._adapt_target_probabilities_validator)\n gamma0 = _StoredProperty('gamma0', validate_function=None)\n logZ_guess = _StoredProperty('logZ_guess', validate_function=None)\n\n def _initialize_stage(self):\n self._t0 = 0 # reference iteration to subtract\n if self.update_stages == 'one-stage':\n self._stage = 1 # start with asymptotically-optimal stage\n elif self.update_stages == 'two-stage':\n self._stage = 0 # start with rapid heuristic adaptation initial stage\n\n def _pre_write_create(self, thermodynamic_states: list, sampler_states: list, storage,\n **kwargs):\n \"\"\"Initialize SAMS sampler.\n\n Parameters\n ----------\n thermodynamic_states : list of openmmtools.states.ThermodynamicState\n Thermodynamic states to simulate, where one replica is allocated per state.\n Each state must have a system with the same number of atoms.\n sampler_states : list of openmmtools.states.SamplerState\n One or more sets of initial sampler states.\n The number of replicas is determined by the number of sampler states provided,\n and does not need to match the number of thermodynamic states.\n Most commonly, a single sampler state is provided.\n storage : str or Reporter\n If str: path to the storage file, checkpoint options are default\n If Reporter: Instanced :class:`Reporter` class, checkpoint information is read from\n In the future this will be able to take a Storage class as well.\n initial_thermodynamic_states : None or list or array-like of int of length len(sampler_states), optional,\n default: None.\n Initial thermodynamic_state index for each sampler_state.\n If no initial distribution is chosen, ``sampler_states`` are distributed between the\n ``thermodynamic_states`` following these rules:\n\n * If ``len(thermodynamic_states) == len(sampler_states)``: 1-to-1 distribution\n\n * If ``len(thermodynamic_states) > len(sampler_states)``: First and last state distributed first\n remaining ``sampler_states`` spaced evenly by index until ``sampler_states`` are depleted.\n If there is only one ``sampler_state``, then the only first ``thermodynamic_state`` will be chosen\n\n * If ``len(thermodynamic_states) < len(sampler_states)``, each ``thermodynamic_state`` receives an\n equal number of ``sampler_states`` until there are insufficient number of ``sampler_states`` remaining\n to give each ``thermodynamic_state`` an equal number. Then the rules from the previous point are\n followed.\n metadata : dict, optional\n Simulation metadata to be stored in the file.\n \"\"\"\n # Initialize replica-exchange simulation.\n super()._pre_write_create(thermodynamic_states, sampler_states, storage=storage, **kwargs)\n\n if self.state_update_scheme == 'global-jump':\n self.locality = None # override locality to be global\n if self.locality is not None:\n if self.locality < 1:\n raise Exception('locality must be >= 1')\n elif self.locality >= self.n_states:\n self.locality = None\n\n # Record current weight update stage\n self._initialize_stage()\n\n # Update log target probabilities\n if self.log_target_probabilities is None:\n self.log_target_probabilities = np.zeros([self.n_states], np.float64) - np.log(self.n_states) # log(1/n_states)\n #logger.debug('Setting log target probabilities: %s' % str(self.log_target_probabilities))\n #logger.debug('Target probabilities: %s' % str(np.exp(self.log_target_probabilities)))\n\n # Record initial logZ estimates\n self._logZ = np.zeros([self.n_states], np.float64)\n if self.logZ_guess is not None:\n if len(self.logZ_guess) != self.n_states:\n raise Exception('Initial logZ_guess (dim {}) must have same number of states as n_states ({})'.format(\n len(self.logZ_guess), self.n_states))\n self._logZ = np.array(self.logZ_guess, np.float64)\n\n # Update log weights\n self._update_log_weights()\n\n def _restore_sampler_from_reporter(self, reporter):\n super()._restore_sampler_from_reporter(reporter)\n self._cached_state_histogram = self._compute_state_histogram(reporter=reporter)\n logger.debug('Restored state histogram: {}'.format(self._cached_state_histogram))\n data = reporter.read_online_analysis_data(self._iteration, 'logZ', 'stage', 't0')\n self._logZ = data['logZ']\n self._stage = int(data['stage'][0])\n self._t0 = int(data['t0'][0])\n\n # Compute log weights from log target probability and logZ estimate\n self._update_log_weights()\n\n # Determine t0\n self._update_stage()\n\n @mpiplus.on_single_node(rank=0, broadcast_result=False, sync_nodes=False)\n @mpiplus.delayed_termination\n def _report_iteration_items(self):\n super(SAMSSampler, self)._report_iteration_items()\n\n self._reporter.write_online_data_dynamic_and_static(self._iteration, logZ=self._logZ, stage=self._stage, t0=self._t0)\n # Split into which states and how many samplers are in each state\n # Trying to do histogram[replica_thermo_states] += 1 does not correctly handle multiple\n # replicas in the same state.\n states, counts = np.unique(self._replica_thermodynamic_states, return_counts=True)\n if self._cached_state_histogram is None:\n self._cached_state_histogram = np.zeros(self.n_states, dtype=int)\n self._cached_state_histogram[states] += counts\n\n @mpiplus.on_single_node(0, broadcast_result=True)\n def _mix_replicas(self):\n \"\"\"Update thermodynamic states according to user-specified scheme.\"\"\"\n logger.debug(\"Updating thermodynamic states using %s scheme...\" % self.state_update_scheme)\n\n # Reset storage to keep track of swap attempts this iteration.\n self._n_accepted_matrix[:, :] = 0\n self._n_proposed_matrix[:, :] = 0\n\n # Perform swap attempts according to requested scheme.\n # TODO: We may be able to refactor this to simply have different update schemes compute neighborhoods differently.\n # TODO: Can we allow \"plugin\" addition of new update schemes that can be registered externally?\n with mmtools.utils.time_it('Mixing of replicas'):\n # Initialize statistics. This matrix is modified by the jump function and used when updating the logZ estimates.\n replicas_log_P_k = np.zeros([self.n_replicas, self.n_states], np.float64)\n if self.state_update_scheme == 'global-jump':\n self._global_jump(replicas_log_P_k)\n elif self.state_update_scheme == 'local-jump':\n self._local_jump(replicas_log_P_k)\n elif self.state_update_scheme == 'restricted-range-jump':\n self._restricted_range_jump(replicas_log_P_k)\n else:\n raise Exception('Programming error: Unreachable code')\n\n # Determine fraction of swaps accepted this iteration.\n n_swaps_proposed = self._n_proposed_matrix.sum()\n n_swaps_accepted = self._n_accepted_matrix.sum()\n swap_fraction_accepted = 0.0\n if n_swaps_proposed > 0:\n # TODO drop casting to float when dropping Python 2 support.\n swap_fraction_accepted = float(n_swaps_accepted) / n_swaps_proposed\n logger.debug(\"Accepted {}/{} attempted swaps ({:.1f}%)\".format(n_swaps_accepted, n_swaps_proposed,\n swap_fraction_accepted * 100.0))\n\n # Update logZ estimates\n self._update_logZ_estimates(replicas_log_P_k)\n\n # Update log weights based on target probabilities\n self._update_log_weights()\n\n def _local_jump(self, replicas_log_P_k):\n n_replica, n_states, locality = self.n_replicas, self.n_states, self.locality\n for (replica_index, current_state_index) in enumerate(self._replica_thermodynamic_states):\n u_k = np.zeros([n_states], np.float64)\n log_P_k = np.zeros([n_states], np.float64)\n # Determine current neighborhood.\n neighborhood = self._neighborhood()\n neighborhood_size = len(neighborhood)\n # Propose a move from the current neighborhood.\n proposed_state_index = np.random.choice(neighborhood, p=np.ones([neighborhood_size], np.float64) / float(neighborhood_size))\n # Determine neighborhood for proposed state.\n proposed_neighborhood = self._neighborhood(proposed_state_index)\n proposed_neighborhood_size = len(proposed_neighborhood)\n # Compute state log weights.\n log_Gamma_j_L = - float(proposed_neighborhood_size) # log probability of proposing return\n log_Gamma_L_j = - float(neighborhood_size) # log probability of proposing new state\n L = current_state_index\n # Compute potential for all states in neighborhood\n for j in neighborhood:\n u_k[j] = self._energy_thermodynamic_states[replica_index, j]\n # Compute log of probability of selecting each state in neighborhood\n for j in neighborhood:\n if j != L:\n log_P_k[j] = log_Gamma_L_j + min(0.0, log_Gamma_j_L - log_Gamma_L_j + (self.log_weights[j] - u_k[j]) - (self.log_weights[L] - u_k[L]))\n P_k = np.zeros([n_states], np.float64)\n P_k[neighborhood] = np.exp(log_P_k[neighborhood])\n # Compute probability to return to current state L\n P_k[L] = 0.0\n P_k[L] = 1.0 - P_k[neighborhood].sum()\n log_P_k[L] = np.log(P_k[L])\n # Update context.\n new_state_index = np.random.choice(neighborhood, p=P_k[neighborhood])\n self._replica_thermodynamic_states[replica_index] = new_state_index\n # Accumulate statistics\n replicas_log_P_k[replica_index,:] = log_P_k[:]\n self._n_proposed_matrix[current_state_index, neighborhood] += 1\n self._n_accepted_matrix[current_state_index, new_state_index] += 1\n\n def _global_jump(self, replicas_log_P_k):\n \"\"\"\n Global jump scheme.\n This method is described after Eq. 3 in [2]\n \"\"\"\n n_replica, n_states = self.n_replicas, self.n_states\n for replica_index, current_state_index in enumerate(self._replica_thermodynamic_states):\n neighborhood = self._neighborhood(current_state_index)\n\n # Compute unnormalized log probabilities for all thermodynamic states.\n log_P_k = np.zeros([n_states], np.float64)\n for state_index in neighborhood:\n u_k = self._energy_thermodynamic_states[replica_index, :]\n log_P_k[state_index] = - u_k[state_index] + self.log_weights[state_index]\n log_P_k -= logsumexp(log_P_k)\n\n # Update sampler Context to current thermodynamic state.\n P_k = np.exp(log_P_k[neighborhood])\n new_state_index = np.random.choice(neighborhood, p=P_k)\n self._replica_thermodynamic_states[replica_index] = new_state_index\n\n # Accumulate statistics.\n replicas_log_P_k[replica_index,:] = log_P_k[:]\n self._n_proposed_matrix[current_state_index, neighborhood] += 1\n self._n_accepted_matrix[current_state_index, new_state_index] += 1\n\n def _restricted_range_jump(self, replicas_log_P_k):\n # TODO: This has an major bug in that we also need to compute energies in `proposed_neighborhood`.\n # I'm working on a way to make this work.\n n_replica, n_states, locality = self.n_replicas, self.n_states, self.locality\n logger.debug('Using restricted range jump with locality %s' % str(self.locality))\n for (replica_index, current_state_index) in enumerate(self._replica_thermodynamic_states):\n u_k = self._energy_thermodynamic_states[replica_index, :]\n log_P_k = np.zeros([n_states], np.float64)\n # Propose new state from current neighborhood.\n neighborhood = self._neighborhood(current_state_index)\n logger.debug(' Current state : %d' % current_state_index)\n logger.debug(' Neighborhood : %s' % str(neighborhood))\n logger.debug(' Relative u_k : %s' % str(u_k[neighborhood] - u_k[current_state_index]))\n log_P_k[neighborhood] = self.log_weights[neighborhood] - u_k[neighborhood]\n log_P_k[neighborhood] -= logsumexp(log_P_k[neighborhood])\n logger.debug(' Neighborhood log_P_k: %s' % str(log_P_k[neighborhood]))\n P_k = np.exp(log_P_k[neighborhood])\n logger.debug(' Neighborhood P_k : %s' % str(P_k))\n proposed_state_index = np.random.choice(neighborhood, p=P_k)\n logger.debug(' Proposed state : %d' % proposed_state_index)\n # Determine neighborhood of proposed state.\n proposed_neighborhood = self._neighborhood(proposed_state_index)\n logger.debug(' Proposed neighborhood : %s' % str(proposed_neighborhood))\n # Accept or reject.\n log_P_accept = logsumexp(self.log_weights[neighborhood] - u_k[neighborhood]) - logsumexp(self.log_weights[proposed_neighborhood] - u_k[proposed_neighborhood])\n logger.debug(' log_P_accept : %f' % log_P_accept)\n logger.debug(' logsumexp(g[forward] - u[forward]) : %f' % logsumexp(self.log_weights[neighborhood] - u_k[neighborhood]))\n logger.debug(' logsumexp(g[reverse] - u[reverse]) : %f' % logsumexp(self.log_weights[proposed_neighborhood] - u_k[proposed_neighborhood]))\n new_state_index = current_state_index\n if (log_P_accept >= 0.0) or (np.random.rand() < np.exp(log_P_accept)):\n new_state_index = proposed_state_index\n logger.debug(' new_state_index : %d' % new_state_index)\n self._replica_thermodynamic_states[replica_index] = new_state_index\n # Accumulate statistics\n replicas_log_P_k[replica_index,:] = log_P_k[:]\n self._n_proposed_matrix[current_state_index, neighborhood] += 1\n self._n_accepted_matrix[current_state_index, new_state_index] += 1\n\n @property\n def _state_histogram(self):\n \"\"\"\n Compute the histogram for the number of times each state has been visited.\n\n Returns\n -------\n N_k : array-like of shape [n_states] of int\n N_k[state_index] is the number of times a replica has visited state ``state_index``\n \"\"\"\n if self._cached_state_histogram is None:\n self._cached_state_histogram = self._compute_state_histogram()\n return self._cached_state_histogram\n\n def _compute_state_histogram(self, reporter=None):\n \"\"\" Compute state histogram from disk\"\"\"\n if reporter is None:\n reporter = self._reporter\n replica_thermodynamic_states = reporter.read_replica_thermodynamic_states()\n logger.debug('Read replica thermodynamic states: {}'.format(replica_thermodynamic_states))\n n_k, _ = np.histogram(replica_thermodynamic_states, bins=np.arange(-0.5, self.n_states + 0.5))\n return n_k\n\n def _update_stage(self):\n \"\"\"\n Determine which adaptation stage we're in by checking histogram flatness.\n\n \"\"\"\n # TODO: Make minimum_visits a user option\n minimum_visits = 1\n N_k = self._state_histogram\n logger.debug(' state histogram counts ({} total): {}'.format(self._cached_state_histogram.sum(), self._cached_state_histogram))\n if (self.update_stages == 'two-stage') and (self._stage == 0):\n advance = False\n if N_k.sum() == 0:\n # No samples yet; don't do anything.\n return\n\n if self.flatness_criteria == 'minimum-visits':\n # Advance if every state has been visited at least once\n if np.all(N_k >= minimum_visits):\n advance = True\n elif self.flatness_criteria == 'histogram-flatness':\n # Check histogram flatness\n empirical_pi_k = N_k[:] / N_k.sum()\n pi_k = np.exp(self.log_target_probabilities)\n relative_error_k = np.abs(pi_k - empirical_pi_k) / pi_k\n if np.all(relative_error_k < self.flatness_threshold):\n advance = True\n elif self.flatness_criteria == 'logZ-flatness':\n # TODO: Advance to asymptotically optimal scheme when logZ update fractional counts per state exceed threshold\n # for all states.\n criteria = abs(self._logZ / self.gamma0) > self.flatness_threshold\n logger.debug('logZ-flatness criteria met (%d total): %s' % (np.sum(criteria), str(np.array(criteria, 'i1'))))\n if np.all(criteria):\n advance = True\n else:\n raise ValueError(\"Unknown flatness_criteria %s\" % flatness_criteria)\n\n if advance or ((self._t0 > 0) and (self._iteration > self._t0)):\n # Histograms are sufficiently flat; switch to asymptotically optimal scheme\n self._stage = 1 # asymptotically optimal\n # TODO: On resuming, we need to recompute or restore t0, or use some other way to compute it\n self._t0 = self._iteration - 1\n\n def _update_logZ_estimates(self, replicas_log_P_k):\n \"\"\"\n Update the logZ estimates according to selected SAMS update method\n\n References\n ----------\n [1] http://www.stat.rutgers.edu/home/ztan/Publication/SAMS_redo4.pdf\n\n \"\"\"\n logger.debug('Updating logZ estimates...')\n\n # Store log weights used at the beginning of this iteration\n self._reporter.write_online_analysis_data(self._iteration, log_weights=self.log_weights)\n\n # Retrieve target probabilities\n log_pi_k = self.log_target_probabilities\n pi_k = np.exp(self.log_target_probabilities)\n #logger.debug(' log target probabilities log_pi_k: %s' % str(log_pi_k))\n #logger.debug(' target probabilities pi_k: %s' % str(pi_k))\n\n # Update which stage we're in, checking histogram flatness\n self._update_stage()\n\n logger.debug(' stage: %s' % self._stage)\n\n # Update logZ estimates from all replicas\n for (replica_index, state_index) in enumerate(self._replica_thermodynamic_states):\n logger.debug(' Replica %d state %d' % (replica_index, state_index))\n # Compute attenuation factor gamma\n beta_factor = 0.8\n pi_star = pi_k.min()\n t = float(self._iteration)\n if self._stage == 0: # initial stage\n gamma = self.gamma0 * min(pi_star, t**(-beta_factor)) # Eq. 15 of [1]\n elif self._stage == 1:\n gamma = self.gamma0 * min(pi_star, (t - self._t0 + self._t0**beta_factor)**(-1)) # Eq. 15 of [1]\n else:\n raise Exception('stage {} unknown'.format(self._stage))\n\n #logger.debug(' gamma: %s' % gamma)\n\n # Update online logZ estimate\n if self.weight_update_method == 'optimal':\n # Based on Eq. 9 of Ref. [1]\n logZ_update = gamma * np.exp(-log_pi_k[state_index])\n #logger.debug(' optimal logZ increment: %s' % str(logZ_update))\n self._logZ[state_index] += logZ_update\n elif self.weight_update_method == 'rao-blackwellized':\n # Based on Eq. 12 of Ref [1]\n # TODO: This has to be the previous state index and log_P_k used before update; store neighborhood?\n # TODO: Can we use masked arrays for this purpose?\n log_P_k = replicas_log_P_k[replica_index,:]\n neighborhood = np.where(self._neighborhoods[replica_index,:])[0] # compact list of states defining neighborhood\n #logger.debug(' using neighborhood: %s' % str(neighborhood))\n #logger.debug(' using log_P_k : %s' % str(log_P_k[neighborhood]))\n #logger.debug(' using log_pi_k: %s' % str(log_pi_k[neighborhood]))\n logZ_update = gamma * np.exp(log_P_k[neighborhood] - log_pi_k[neighborhood])\n #logger.debug(' Rao-Blackwellized logZ increment: %s' % str(logZ_update))\n self._logZ[neighborhood] += logZ_update\n else:\n raise Exception('Programming error: Unreachable code')\n\n # Subtract off logZ[0] to prevent logZ from growing without bound once we reach the asymptotically optimal stage\n if self._stage == 1: # asymptotically optimal or one-stage\n self._logZ[:] -= self._logZ[0]\n\n # Format logZ\n msg = ' logZ: ['\n for i, val in enumerate(self._logZ):\n if i > 0: msg += ', '\n msg += '%6.1f' % val\n msg += ']'\n logger.debug(msg)\n\n # Store gamma\n self._reporter.write_online_analysis_data(self._iteration, gamma=gamma)\n\n def _update_log_weights(self):\n \"\"\"\n Update the log weights based on current online logZ estimates\n\n \"\"\"\n # TODO: Add option to adapt target probabilities as well\n # TODO: If target probabilities are adapted, we need to store them as well\n\n self.log_weights = self.log_target_probabilities[:] - self._logZ[:]\n\n\nclass SAMSAnalyzer(MultiStateSamplerAnalyzer):\n \"\"\"\n The SAMSAnalyzer is the analyzer for a simulation generated from a SAMSSampler simulation.\n\n See Also\n --------\n ReplicaExchangeAnalyzer\n PhaseAnalyzer\n\n \"\"\"\n pass\n\n# ==============================================================================\n# MAIN AND TESTS\n# ==============================================================================\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n" ]
[ [ "numpy.log", "numpy.sum", "numpy.abs", "numpy.unique", "numpy.random.choice", "numpy.arange", "numpy.ones", "numpy.all", "numpy.where", "numpy.random.rand", "numpy.exp", "numpy.array", "numpy.zeros", "scipy.special.logsumexp" ] ]
tu-rbo/concarne
[ "0e9ae1fa21e132bd240b23e116e7f21e8c45735b" ]
[ "example/simple_multiview.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis example illustrates how simple it is to train a classifier using\nside information.\n\nIt illustrates the exemplary use of the multi-view pattern; for more info\non how to use other patterns, check out synthetic.py.\n\nFor a realistic example with real data check out handwritten.py. \n\nFor more details see the documentation and the paper\nhttp://arxiv.org/abs/1511.06429 \n\"\"\"\n\nfrom __future__ import print_function\n\nimport concarne\nimport concarne.patterns\nimport concarne.training\n\nimport lasagne\nimport theano.tensor as T\n\ntry:\n import sklearn.linear_model as sklm\nexcept:\n print (\n\"\"\"You don't have scikit-learn installed; install it to compare\nlearning with side information to simple supervised learning\"\"\")\n sklm = None\n\nimport numpy as np\n\n\n\nif __name__ == \"__main__\":\n \n #-------------------------------------------------------- \n # Generate the data\n \n num_samples = 300\n \n input_dim = 50\n side_dim = 50\n \n # generate some random data with 100 samples and 5 dimensions\n X = np.random.randn(num_samples, input_dim)\n \n # select the third dimension as the relevant for our classification\n # task\n S = X[:, 2:3]\n \n # The labels are simply the sign of S \n # (note the downcast to int32 - this is required by theano)\n y = np.asarray(S > 0, dtype='int32').reshape( (-1,) )\n # This means we have 2 classes - we will use that later for building\n # the pattern\n num_classes = 2\n \n # Now let's define some side information: we simulate an additional sensor\n # which contains S, but embedded into a different space\n Z = np.random.randn(num_samples, side_dim)\n # set second dimension of Z to correspond to S\n Z[:, 1] = S[:,0]\n \n # let's make it harder to find S in X and Z by applying a random rotations\n # to both data sets\n R = np.linalg.qr(np.random.randn(input_dim, input_dim))[0] # random linear rotation\n X = X.dot(R)\n\n Q = np.linalg.qr(np.random.randn(side_dim, side_dim))[0] # random linear rotation\n Z = Z.dot(Q)\n \n #--------------------------------------------------------\n # Define the pattern\n \n # now that we have some data, we can use a pattern to learn\n # from it. \n # since X and Z are two different \"views\" of the relevant data S,\n # the multi-view pattern is the most natural choice.\n \n # The pattern needs three functions: phi(X) which maps X to an intermediate\n # representation (that should somewhat correspond to S); psi which \n # performs classification using phi(X); and beta(Z) which maps Z to S.\n # The goal of the multi-view pattern is to find phi and beta, s.t.\n # phi(X)~beta(X) and psi s.t. that psi(phi(X))~Y\n \n # Let's first define the theano variables which will represent our data\n input_var = T.matrix('inputs') # for X\n target_var = T.ivector('targets') # for Y\n side_var = T.matrix('sideinfo') # for Z\n \n # Size of the intermediate representation phi(X); since S is 1-dim,\n # phi(X) can also map to a 1-dim vector\n representation_dim = 1 \n\n # Now define the functions - we choose linear functions\n # there are two ways to do it. the first way is to define the \n # lasagne network (in our case only on layer) yourself.\n# phi = lasagne.layers.DenseLayer(input_layer, representation_dim, nonlinearity=None, b=None, name=\"phi\")\n\n # the easier way of doing it is to pass a list of tuples with a layer\n # class and the instantion parameters in a dictionary (layer, layer_params).\n # This has the benefit that you don't have to worry about the definition\n # of input layers and the correct wiring of phi, psi and beta - this is \n # all taken care of by the pattern.\n\n # Also, users of the nolearn library might be familiar with this type\n # of specifying a neural network.\n \n # optionally you can pass an input layer, but it is not required and\n # will automatically be inferred by the pattern\n #phi = [ \n #(lasagne.layers.InputLayer, {'shape': (None, input_dim), 'input_var': input_var}),\n #(lasagne.layers.DenseLayer, { 'num_units': concarne.patterns.PairwisePredictTransformationPattern.PHI_OUTPUT_SHAPE,\n # 'nonlinearity':None, 'b':None })]\n \n phi = [ (lasagne.layers.DenseLayer, \n { \n # for the variable of your layer that denotes the output of the\n # network you should use the markers PHI_OUTPUT_SHAPE,\n # PSI_OUTPUT_SHAPE and BETA_OUTPUT_SHAPE, so that the pattern\n # can automatically infer the correct shape\n 'num_units': concarne.patterns.Pattern.PHI_OUTPUT_SHAPE,\n 'nonlinearity':None, 'b':None })]\n psi = [(lasagne.layers.DenseLayer, \n { 'num_units': concarne.patterns.Pattern.PSI_OUTPUT_SHAPE, \n 'nonlinearity':lasagne.nonlinearities.softmax, 'b':None })]\n beta = [(lasagne.layers.DenseLayer, \n { 'num_units': concarne.patterns.Pattern.BETA_OUTPUT_SHAPE, \n 'nonlinearity':None, 'b':None })]\n \n # now that we have figured our all functions, we can pass them to the pattern\n pattern = concarne.patterns.MultiViewPattern(phi=phi, psi=psi, beta=beta,\n # the following parameters are required to automatically\n # build the functions and the losses\n input_var=input_var, \n target_var=target_var, \n side_var=side_var,\n input_shape=input_dim,\n target_shape=num_classes,\n side_shape=side_dim,\n representation_shape=representation_dim,\n # we have to define two loss functions: \n # the target loss deals with optimizing psi and phi wrt. X & Y\n target_loss=lasagne.objectives.categorical_crossentropy,\n # the side loss deals with optimizing beta and phi wrt. X & Z,\n # for multi-view it is beta(Z)~phi(X)\n side_loss=lasagne.objectives.squared_error)\n\n #--------------------------------------------------------\n # Training \n \n # first split our data into training, test, and validation data\n split = int(num_samples/3)\n\n X_train = X[:split]\n X_val = X[split:2*split]\n X_test = X[2*split:]\n \n y_train = y[:split]\n y_val = y[split:2*split]\n y_test = y[2*split:]\n\n Z_train = Z[:split]\n Z_val = Z[split:2*split]\n Z_test = Z[2*split:]\n \n \n # instantiate the PatternTrainer which trains the pattern via stochastic\n # gradient descent\n trainer = concarne.training.PatternTrainer(pattern,\n procedure='simultaneous',\n num_epochs=500,\n batch_size=10,\n update=lasagne.updates.nesterov_momentum,\n update_learning_rate=0.01,\n update_momentum=0.9,\n )\n \n # we use the fit_XYZ method because our X, Y and Z data all have the same\n # size. Also note the [] our Z_train - because it is possible to pass\n # multiple side information to some patterns, you have to pass side information\n # in a list.\n # We can also pass validation data to the fit method, however it only\n # has an effect if we set the verbose switch to true to give us\n # information about the learning progress\n trainer.fit_XYZ(X_train, y_train, [Z_train], \n X_val=X_val, y_val=y_val, \n verbose=True)\n\n # print some statistics\n print(\"=================\")\n print(\"Test score...\")\n trainer.score(X_test, y_test, verbose=True) \n \n # Let's compare to supervised learning!\n if sklm is not None:\n # let's try different regularizations\n for c in [1e-5, 1e-1, 1, 10, 100, 1e5]:\n lr = sklm.LogisticRegression(C=c)\n lr.fit(X_train, y_train)\n print (\"Logistic Regression (C=%f) accuracy = %.3f %%\" % (c, 100*lr.score(X_test, y_test)))\n" ]
[ [ "numpy.asarray", "numpy.random.randn", "sklearn.linear_model.LogisticRegression" ] ]
jacr20/pax
[ "d64d0ae4e4ec3e9bb3e61065ed92e9ea23328940" ]
[ "tests/test_posrec_neuralnet.py" ]
[ "import unittest\nimport numpy as np\n\nfrom pax import core, plugin\nfrom pax.datastructure import Event, Peak\n\n\nclass TestPosRecNeuralNet(unittest.TestCase):\n\n def setUp(self):\n self.pax = core.Processor(config_names='XENON100', just_testing=True, config_dict={'pax': {\n 'plugin_group_names': ['test'],\n 'test': 'NeuralNet.PosRecNeuralNet'}})\n self.plugin = self.pax.get_plugin_by_name('PosRecNeuralNet')\n\n def tearDown(self):\n delattr(self, 'pax')\n delattr(self, 'plugin')\n\n @staticmethod\n def example_event(channels_with_something):\n bla = np.zeros(243)\n bla[np.array(channels_with_something)] = 1\n e = Event.empty_event()\n e.peaks.append(Peak({'left': 5,\n 'right': 9,\n 'type': 'S2',\n 'detector': 'tpc',\n 'area_per_channel': bla}))\n return e\n\n def test_get_nn_plugin(self):\n self.assertIsInstance(self.plugin, plugin.TransformPlugin)\n self.assertEqual(self.plugin.__class__.__name__, 'PosRecNeuralNet')\n\n def test_posrec(self):\n e = self.example_event([40, 41, 42])\n e = self.plugin.transform_event(e)\n self.assertIsInstance(e, Event)\n self.assertEqual(len(e.peaks), 1)\n self.assertEqual(len(e.S2s()), 1)\n self.assertEqual(len(e.peaks[0].reconstructed_positions), 1)\n rp = e.peaks[0].reconstructed_positions[0]\n self.assertEqual(rp.algorithm, self.plugin.name)\n self.assertEqual(rp.x, 11.076582570681966)\n self.assertEqual(rp.y, 6.831207460290031)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
drholera/olx-parser
[ "837166bca48b39e03bc1987c9ebb2511697fe3fd" ]
[ "parser.py" ]
[ "import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport webbrowser\nimport os\nimport io\n\n\nclass Parser(object):\n __url = ''\n __results = []\n\n def __init__(self):\n search_query = input(\"Please, enter your search query \\n\")\n self.__url = 'https://www.olx.ua/list/q-' + search_query.replace(' ', '-')\n self.__page = requests.get(self.__url)\n\n def parse(self):\n soup = BeautifulSoup(self.__page.content, 'html.parser')\n pager_exists = soup.find('div', class_='pager')\n if pager_exists:\n max_page_number = soup.find('div', class_='pager').find_all('span', class_='item')[-1].getText().strip()\n if int(max_page_number) > 10:\n max_page_number = 10\n\n self.fill_data(soup)\n\n # Extract other pages.\n if 'max_page_number' in locals():\n for p in range(2, int(max_page_number)):\n url = self.__url + '/?page=' + str(p)\n pages = requests.get(url)\n soups = BeautifulSoup(pages.content, 'html.parser')\n self.fill_data(soups)\n\n # Distinct list values, exclude duplicate links.\n self.__results = list({v['link']: v for v in self.__results}.values())\n # Sort by price.\n self.__results = sorted(self.__results, key=lambda i: i['price'])\n # Store result to HTML file and open it in prowser.\n self.html_dump()\n\n def fill_data(self, soup: BeautifulSoup):\n ads = soup.find_all('table', class_='fixed')\n\n for ad in ads:\n try:\n title = ad.find('h3')\n price = ad.find('p', class_='price')\n link = ad.find('a', class_='detailsLink')\n image = ad.find('img')\n if title.text and price.text and link.get('href'):\n link_href = link.get('href').strip()\n product_list = {\n 'product': title.text.strip(),\n 'price': int(price.text.strip().replace(' ', '').replace('грн.', '')),\n 'link': '<a target=\"_blank\" href={0}>{0}</a>'.format(link_href),\n 'image': '<img src={} />'.format(image.get('src').strip())\n }\n\n self.__results.append(product_list)\n except Exception as e:\n continue\n\n def make_clickable(val):\n # target _blank to open new window\n return '<a target=\"_blank\" href=\"{}\">{}</a>'.format(val, val)\n\n def html_dump(self):\n html_string = '''\n <html>\n <head><title>HTML Pandas Dataframe with CSS</title></head>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"assets/css/df_style.css\"/>\n <body>\n {table}\n </body>\n </html>.\n '''\n\n df = pd.DataFrame(self.__results)\n df.set_index('price')\n\n # Write html to file\n with io.open('results.html', 'w', encoding='UTF-8') as f:\n f.write(html_string.format(table=df.to_html(escape=False, classes='result-table')))\n\n webbrowser.open('file://' + os.path.realpath('results.html'))\n\n\nParser().parse()\n" ]
[ [ "pandas.DataFrame" ] ]
gdevenyi/gabriel.devenyi.ca
[ "cc001f1dc6ed07ff46c3b5cca66865b977710acc" ]
[ "markdown_generator/talks.py" ]
[ "# coding: utf-8\n\n# # Talks markdown generator for academicpages\n#\n# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.\n#\n# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.\n\n# In[1]:\n\nimport pandas as pd\nimport os\n\n\n# ## Data format\n#\n# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.\n#\n# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to \"Talk\"\n# - `date` must be formatted as YYYY-MM-DD.\n# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.\n# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`\n# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames\n#\n\n\n# ## Import TSV\n#\n# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\\t`.\n#\n# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.\n\n# In[3]:\n\ntalks = pd.read_csv(\"talks.tsv\", sep=\"\\t\", header=0)\ntalks\n\n\n# ## Escape special characters\n#\n# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.\n\n# In[4]:\n\nhtml_escape_table = {\"&\": \"&amp;\", '\"': \"&quot;\", \"'\": \"&apos;\"}\n\n\ndef html_escape(text):\n if type(text) is str:\n return \"\".join(html_escape_table.get(c, c) for c in text)\n else:\n return \"False\"\n\n\n# ## Creating the markdown files\n#\n# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.\n\n# In[5]:\n\nloc_dict = {}\n\nfor row, item in talks.iterrows():\n\n md_filename = str(item.date) + \"-\" + item.url_slug + \".md\"\n html_filename = str(item.date) + \"-\" + item.url_slug\n year = item.date[:4]\n\n md = '---\\ntitle: \"' + item.title + '\"\\n'\n md += \"collection: talks\" + \"\\n\"\n\n if len(str(item.type)) > 3:\n md += 'type: \"' + item.type + '\"\\n'\n else:\n md += 'type: \"Talk\"\\n'\n\n md += \"permalink: /talks/\" + html_filename + \"\\n\"\n\n if len(str(item.venue)) > 3:\n md += 'venue: \"' + item.venue + '\"\\n'\n\n if len(str(item.location)) > 3:\n md += \"date: \" + str(item.date) + \"\\n\"\n\n if len(str(item.location)) > 3:\n md += 'location: \"' + str(item.location) + '\"\\n'\n\n md += \"---\\n\"\n\n if len(str(item.talk_url)) > 3:\n md += \"\\n[More information here](\" + item.talk_url + \")\\n\"\n\n if len(str(item.description)) > 3:\n md += \"\\n\" + html_escape(item.description) + \"\\n\"\n\n md_filename = os.path.basename(md_filename)\n # print(md)\n\n with open(\"../_talks/\" + md_filename, \"w\") as f:\n f.write(md)\n\n\n# These files are in the talks directory, one directory below where we're working from.\n\n" ]
[ [ "pandas.read_csv" ] ]
saegersven/robocup
[ "3ce18d68d99da43ab12c19417c988bdad38d7373" ]
[ "scripts/calibrate_camera.py" ]
[ "import numpy as np\r\nimport cv2\r\nimport glob\r\nimport array\r\nimport time\r\nimport json\r\n\r\nimage_folder = \"img\" # str(input(\"Input image folder: \"))\r\nout_file = \"test.json\" # str(input(\"Input output file: \"))\r\n\r\nX = 5\r\nY = 7\r\n# Termination criteria\r\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\r\n# Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\nobjp = np.zeros((X*Y, 3), np.float32)\r\nobjp[:, :2] = np.mgrid[0:X, 0:Y].T.reshape(-1, 2)\r\n\r\n# Arrays to store object points and image points from all the images.\r\nobjpoints = [] # 3d point in real world space\r\nimgpoints = [] # 2d points in image plane.\r\n\r\ng = None\r\n\r\nimages = glob.glob(image_folder + '/*.jpg')\r\nfor fname in images:\r\n print(fname)\r\n img = cv2.imread(fname)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n g = gray\r\n cv2.imshow(\"g\", g)\r\n cv2.waitKey(500)\r\n # Find the chess board corners\r\n ret, corners = cv2.findChessboardCorners(gray, (X,Y), None)\r\n # If found, add object points, image points (after refining them)\r\n if ret:\r\n print(\"Found\")\r\n objpoints.append(objp)\r\n corners2 = cv2.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)\r\n imgpoints.append(corners2)\r\n # Draw and display the corners\r\n cv2.drawChessboardCorners(img, (X,Y), corners2, ret)\r\n cv2.imshow('img', img)\r\n cv2.waitKey(500)\r\n\r\n\r\ncv2.destroyAllWindows()\r\n\r\nprint(\"Calibrating\")\r\n\r\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,\r\n g.shape[::-1], None, None)\r\n\r\nout = {\r\n 'fx': mtx[0][0],\r\n 'fy': mtx[1][1],\r\n 'cx': mtx[0][2],\r\n 'cy': mtx[2][1],\r\n\r\n 'k': [dist[0][0], dist[0][1], dist[0][2], dist[0][3], dist[0][4]]\r\n}\r\nj = json.dumps(out, indent=4)\r\n\r\noutput_file = open(out_file, 'w')\r\noutput_file.write(j)\r\noutput_file.close()\r\n\r\nprint(\"Written camera parameters to output file\")\r\nprint(\"Calculating error\")\r\n\r\nmean_error = 0\r\nfor i, objpoint in enumerate(objpoints):\r\n imgpoints2, _ = cv2.projectPoints(objpoint, rvecs[i], tvecs[i], mtx, dist)\r\n error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)\r\n mean_error = mean_error + error\r\n\r\nprint(f\"Total error: {mean_error/len(objpoints)}\")\r\n" ]
[ [ "numpy.zeros" ] ]
DavidDePauw1/dairlib
[ "3c75c8f587927b12a58f2e88dda61cc0e7dc82a3" ]
[ "bindings/pydairlib/dircon_trajectory_plotter.py" ]
[ "import sys\nimport matplotlib.pyplot as plt\nimport pydairlib.lcm_trajectory\nfrom pydairlib.common import FindResourceOrThrow\nfrom pydrake.trajectories import PiecewisePolynomial\nimport numpy as np\n\n\ndef main():\n # Default filename for the example\n filename = FindResourceOrThrow(\"examples/Cassie/saved_trajectories/jumping_0.2h_0.25d\")\n if len(sys.argv) == 2:\n filename = sys.argv[1]\n dircon_traj = pydairlib.lcm_trajectory.DirconTrajectory(filename)\n\n # Reconstructing state and input trajectory as piecewise polynomials\n state_traj = dircon_traj.ReconstructStateTrajectory()\n input_traj = dircon_traj.ReconstructInputTrajectory()\n state_datatypes = dircon_traj.GetTrajectory(\"state_traj0\").datatypes\n input_datatypes = dircon_traj.GetTrajectory(\"input_traj\").datatypes\n\n force_traj = PiecewisePolynomial.ZeroOrderHold(dircon_traj.GetForceBreaks(0), dircon_traj.GetForceSamples(0))\n force_datatypes = dircon_traj.GetTrajectory(\"force_vars0\").datatypes\n\n collocation_force_points = dircon_traj.GetCollocationForceSamples(0)\n\n n_points = 500\n t = np.linspace(state_traj.start_time(), state_traj.end_time(), n_points)\n state_samples = np.zeros((n_points, state_traj.value(0).shape[0]))\n input_samples = np.zeros((n_points, input_traj.value(0).shape[0]))\n force_samples = np.zeros((n_points, force_traj.value(0).shape[0]))\n for i in range(n_points):\n state_samples[i] = state_traj.value(t[i])[:, 0]\n input_samples[i] = input_traj.value(t[i])[:, 0]\n force_samples[i] = force_traj.value(t[i])[:, 0]\n\n # Plotting reconstructed state trajectories\n plt.figure(\"state trajectory\")\n plt.plot(t, state_samples)\n plt.legend(state_datatypes)\n\n plt.figure(\"input trajectory\")\n plt.plot(t, input_samples)\n plt.legend(input_datatypes)\n\n plt.figure(\"force trajectory\")\n plt.plot(t, force_samples)\n plt.legend(force_datatypes)\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
jason-sunjiankang/tensorflow_object_detection
[ "dd53b458cb8809b9ec804f31aabdf04c76893977" ]
[ "generate_tfrecord.py" ]
[ "\"\"\"\nUsage:\n # From tensorflow/models/\n # Create train data:\n python generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=train.record\n\n # Create test data:\n python generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=test.record\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport io\nimport pandas as pd\nimport tensorflow as tf\n\nfrom PIL import Image\nfrom object_detection.utils import dataset_util\nfrom collections import namedtuple, OrderedDict\n\n# flags = tf.app.flags\n# flags.DEFINE_string('csv_input', '', 'Path to the CSV input')\n# flags.DEFINE_string('output_path', '', 'Path to output TFRecord')\n# FLAGS = flags.FLAGS\n\n\n# TO-DO replace this with label map\ndef class_text_to_int(row_label):\n if row_label == 'raccoon':\n return 1\n else:\n None\n\n\ndef split(df, group):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby(group)\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]\n\n\ndef create_tf_example(group, path):\n with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n width, height = image.size\n\n filename = group.filename.encode('utf8')\n image_format = b'jpg'\n xmins = []\n xmaxs = []\n ymins = []\n ymaxs = []\n classes_text = []\n classes = []\n\n for index, row in group.object.iterrows():\n xmins.append(row['xmin'] / width)\n xmaxs.append(row['xmax'] / width)\n ymins.append(row['ymin'] / height)\n ymaxs.append(row['ymax'] / height)\n classes_text.append(row['class'].encode('utf8'))\n classes.append(class_text_to_int(row['class']))\n\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(filename),\n 'image/source_id': dataset_util.bytes_feature(filename),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n }))\n return tf_example\n\n\ndef main(_):\n\n # csv_input_path = \"./data/train_labels.csv\" \n # tfrecord_output_path = \"./data/train.record\"\n # images_path = \"./images/train\"\n\n csv_input_path = \"./data/test_labels.csv\" \n tfrecord_output_path = \"./data/test.record\"\n images_path = \"./images/test\"\n\n\n writer = tf.python_io.TFRecordWriter(tfrecord_output_path)\n examples = pd.read_csv(csv_input_path)\n grouped = split(examples, 'filename')\n for group in grouped:\n tf_example = create_tf_example(group, images_path)\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n print('Successfully created the TFRecords: {}'.format(tfrecord_output_path))\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.app.run", "pandas.read_csv", "tensorflow.python_io.TFRecordWriter" ] ]
HashGehlot03/HeartDiseasePrediction
[ "22a24b113d26e2fd776d28fc3c038474fb93741f" ]
[ "model.py" ]
[ "import pandas as pd\r\nfrom sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn import metrics\r\nimport pickle\r\nimport plotly.express as px\r\n\r\ndf = pd.read_csv('C:\\\\Users\\\\user\\\\Downloads\\\\heart.csv')\r\nfeatures = df.iloc[:,:-1]\r\nlabels = df.iloc[:,-1]\r\nx_train,x_test,y_train,y_test = train_test_split(features,labels,test_size = 0.8,stratify = labels)\r\ndef scores(x_train,x_test,y_train,y_test):\r\n acc_metrics = pd.DataFrame(columns = ['Models','Accuracies'])\r\n acc_metrics.Models = ['Logistic Regression','Decision Tree Classifier','Random Forest Classifier','ADA Boost Classifier','Gradient Boost Classifier','Support Vector Classifier']\r\n logreg = LogisticRegression()\r\n logreg.fit(x_train,y_train)\r\n logreg_pred = logreg.predict(x_test)\r\n dt = DecisionTreeClassifier()\r\n dt.fit(x_train,y_train)\r\n dt_pred = dt.predict(x_test)\r\n rf = RandomForestClassifier()\r\n rf.fit(x_train,y_train)\r\n rf_pred = rf.predict(x_test)\r\n ada = AdaBoostClassifier()\r\n ada.fit(x_train,y_train)\r\n ada_pred = ada.predict(x_test)\r\n gb = GradientBoostingClassifier()\r\n gb.fit(x_train,y_train)\r\n gb_pred = gb.predict(x_test)\r\n sv = SVC()\r\n sv.fit(x_train,y_train)\r\n sv_pred = sv.predict(x_test)\r\n acc_metrics.Accuracies = [metrics.accuracy_score(y_test,logreg_pred),metrics.accuracy_score(y_test,dt_pred),metrics.accuracy_score(y_test,rf_pred),metrics.accuracy_score(y_test,ada_pred),metrics.accuracy_score(y_test,gb_pred),metrics.accuracy_score(y_test,sv_pred)]\r\n return acc_metrics\r\n# accuracy_scores = scores(x_train,x_test,y_train,y_test)\r\n# accuracy_scores # Random Forest works best so lets hypertune it\r\n\r\nparam_grid = [\r\n{'n_estimators': [10, 25], 'max_features': [5, 10], \r\n 'max_depth': [10, 50, None], 'bootstrap': [True, False]}\r\n]\r\n\r\n#rfc = RandomForestClassifier()\r\n\r\n#gridsv = GridSearchCV(rfc,param_grid,scoring='accuracy')\r\n#gridsv.fit(x_train,y_train)\r\n#final_model = RandomForestClassifier(max_depth = 10,max_features = 5,n_estimators = 10,bootstrap = False)\r\n#final_model.fit(x_train,y_train)\r\n#final_model.predict(x_test)\r\n#pickle.dump(final_model,open('HeartDiseasePredictor','wb'))" ]
[ [ "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "sklearn.ensemble.RandomForestClassifier", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.tree.DecisionTreeClassifier", "sklearn.ensemble.AdaBoostClassifier", "sklearn.svm.SVC", "sklearn.ensemble.GradientBoostingClassifier", "sklearn.metrics.accuracy_score" ] ]
feloundou/safe-experts
[ "9592bd48ce7eed721a36cb688dd10dc7f527a13b", "9592bd48ce7eed721a36cb688dd10dc7f527a13b" ]
[ "algos/train_expert_ppo_penalized.py", "algos/training_regimes.py" ]
[ "# Main entrance of GAIL\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom adabelief_pytorch import AdaBelief\nimport gym\nimport safety_gym\nimport time\n\n\nfrom neural_nets import ActorCritic, count_vars\n\nfrom utils import BufferActor\nfrom utils import mpi_fork, proc_id, num_procs, EpochLogger,\\\n average_gradients, sync_all_params, setup_pytorch_for_mpi, sync_params, mpi_avg_grads\n # compute_loss_policy\n\nimport wandb\n\ndef ppo_penalized(env_fn,\n actor_critic=ActorCritic,\n ac_kwargs=dict(),\n seed=0,\n episodes_per_epoch=40,\n epochs=500,\n gamma=0.99,\n lam=0.98,\n pi_lr=3e-4,\n vf_lr=1e-3,\n train_v_iters=80,\n train_pi_iters=1, ## NOTE: Incredibly Important That This Be Low For Penalized Learning\n max_ep_len=1000,\n logger_kwargs=dict(),\n clip_ratio = 0.2, # tuned????\n # Cost constraints / penalties:\n cost_lim=25,\n penalty_init=1.,\n penalty_lr=5e-3,\n config_name='standard',\n save_freq=10):\n\n # W&B Logging\n wandb.login()\n\n composite_name = 'new_ppo_penalized_' + config_name\n wandb.init(project=\"LearningCurves\", group=\"PPO Expert\", name=composite_name)\n\n # Special function to avoid certain slowdowns from PyTorch + MPI combo.\n setup_pytorch_for_mpi()\n\n # Set up logger and save configuration\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n seed += 10000 * proc_id()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n # Instantiate environment\n env = env_fn()\n obs_dim = env.observation_space.shape\n act_dim = env.action_space.shape\n\n ac_kwargs['action_space'] = env.action_space\n\n # Models\n # Create actor-critic module and monitor it\n ac = actor_critic(input_dim=obs_dim[0], **ac_kwargs)\n\n # Set up model saving\n logger.setup_pytorch_saver(ac)\n\n # Sync params across processes\n sync_params(ac)\n\n # Buffers\n local_episodes_per_epoch = int(episodes_per_epoch / num_procs())\n buf = BufferActor(obs_dim[0], act_dim[0], local_episodes_per_epoch, max_ep_len)\n\n # Count variables\n var_counts = tuple(count_vars(module) for module in [ac.pi, ac.v])\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t v: %d\\n' % var_counts)\n\n # Optimizers\n pi_optimizer = torch.optim.Adam(ac.pi.parameters(), lr=pi_lr)\n vf_optimizer = torch.optim.Adam(ac.v.parameters(), lr=vf_lr)\n\n # pi_optimizer = AdaBelief(ac.pi.parameters(), betas=(0.9, 0.999), eps=1e-8)\n # vf_optimizer = AdaBelief(ac.v.parameters(), betas=(0.9, 0.999), eps=1e-8)\n\n\n # # Parameters Sync\n # sync_all_params(ac.parameters())\n\n # Set up function for computing PPO policy loss\n def compute_loss_pi(obs, act, adv, logp_old):\n # Policy loss # policy gradient term + entropy term\n # Policy loss with clipping (without clipping, loss_pi = -(logp*adv).mean()).\n # TODO: Think about removing clipping\n _, logp, _ = ac.pi(obs, act)\n\n ratio = torch.exp(logp - logp_old)\n clip_adv = torch.clamp(ratio, 1 - clip_ratio, 1 + clip_ratio) * adv\n loss_pi = -(torch.min(ratio * adv, clip_adv)).mean()\n\n return loss_pi\n\n\n def penalty_update(cur_penalty):\n cur_cost = logger.get_stats('EpCost')[0]\n cur_rew = logger.get_stats('EpRet')[0]\n\n # Penalty update\n cur_penalty = max(0, cur_penalty + penalty_lr * (cur_cost - cost_lim))\n return cur_penalty\n\n def update(e):\n obs, act, adv, ret, logp_old = [torch.Tensor(x) for x in buf.retrieve_all()]\n\n # Policy\n _, logp, _ = ac.pi(obs, act)\n entropy = (-logp).mean()\n\n # Train policy with multiple steps of gradient descent\n for _ in range(train_pi_iters):\n pi_optimizer.zero_grad()\n loss_pi = compute_loss_pi(obs, act, adv, ret)\n loss_pi.backward()\n # average_gradients(train_pi.param_groups)\n # mpi_avg_grads(pi_optimizer.param_groups)\n mpi_avg_grads(ac.pi)\n pi_optimizer.step()\n\n # Value function training\n v = ac.v(obs)\n v_l_old = F.mse_loss(v, ret) # old loss\n\n for _ in range(train_v_iters):\n v = ac.v(obs)\n v_loss = F.mse_loss(v, ret) # how well did our value function predict loss?\n\n # Value function train\n vf_optimizer.zero_grad()\n v_loss.backward()\n # average_gradients(vf_optimizer.param_groups)\n mpi_avg_grads(ac.v) # average gradients across MPI processes\n vf_optimizer.step()\n\n # Log the changes\n _, logp, _, v = ac(obs, act)\n # entropy_new = (-logp).mean()\n pi_loss_new = -(logp * adv).mean()\n v_loss_new = F.mse_loss(v, ret)\n kl = (logp_old - logp).mean()\n logger.store(LossPi=loss_pi, LossV=v_l_old, DeltaLossPi=(pi_loss_new - loss_pi),\n DeltaLossV=(v_loss_new - v_l_old), Entropy=entropy, KL=kl)\n\n # Prepare for interaction with the environment\n start_time = time.time()\n o, r, d, ep_ret, ep_cost, ep_len = env.reset(), 0, False, 0, 0, 0\n total_t = 0\n\n # Initialize penalty\n cur_penalty = np.log(max(np.exp(penalty_init) - 1, 1e-8))\n\n for epoch in range(epochs):\n ac.eval() # eval mode\n # Policy rollout\n for _ in range(local_episodes_per_epoch):\n for _ in range(max_ep_len):\n\n # obs =\n a, _, lopg_t, v_t = ac(torch.Tensor(o.reshape(1, -1)))\n\n logger.store(VVals=v_t)\n\n o, r, d, info = env.step(a.detach().numpy()[0])\n\n c = info.get('cost', 0) # Include penalty on cost\n\n r_total = r - cur_penalty * c\n r_total /= (1 + cur_penalty)\n\n # store\n buf.store(o, a.detach().numpy(), r_total, v_t.item(), lopg_t.detach().numpy())\n\n ep_ret += r\n ep_cost += c\n ep_len += 1\n total_t += 1\n\n terminal = d or (ep_len == max_ep_len)\n if terminal:\n # buf.end_episode()\n buf.finish_path()\n logger.store(EpRet=ep_ret, EpCost=ep_cost, EpLen=ep_len)\n\n print(\"end of episode return: \", ep_ret)\n\n\n\n episode_metrics = {'average ep ret': ep_ret, 'average ep cost': ep_cost}\n wandb.log(episode_metrics)\n\n o, r, d, ep_ret, ep_cost, ep_len = env.reset(), 0, False, 0, 0, 0\n\n if (epoch % save_freq == 0) or (epoch == epochs - 1):\n # logger._torch_save(ac, fname=\"expert_torch_save.pt\")\n # logger._torch_save(ac, fname=\"model.pt\")\n logger.save_state({'env': env}, None, None)\n\n # Update\n ac.train()\n\n # update penalty\n cur_penalty = penalty_update(cur_penalty)\n\n # update networks\n update(epoch)\n\n # Log\n logger.log_tabular('Epoch', epoch)\n # logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpRet', average_only=True)\n logger.log_tabular('EpCost', average_only=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('VVals', average_only=True)\n # logger.log_tabular('VVals', with_min_and_max=True)\n logger.log_tabular('TotalEnvInteracts', total_t)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('DeltaLossPi', average_only=True)\n logger.log_tabular('LossV', average_only=True)\n logger.log_tabular('DeltaLossV', average_only=True)\n logger.log_tabular('Entropy', average_only=True)\n # logger.log_tabular('KL', average_only=True)\n logger.log_tabular('Time', time.time() - start_time)\n logger.dump_tabular()\n\n wandb.finish()\n\n\n\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='Safexp-PointGoal1-v0')\n parser.add_argument('--hid', type=int, default=128)\n parser.add_argument('--l', type=int, default=4)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--lam', type=float, default=0.95)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--cpu', type=int, default=2)\n parser.add_argument('--episodes-per-epoch', type=int, default=20)\n parser.add_argument('--cost-lim', type=int, default=25)\n # parser.add_argument('--episodes-per-epoch', type=int, default=40)\n parser.add_argument('--epochs', type=int, default=1000)\n parser.add_argument('--exp_name', type=str, default='test-pen-ppo')\n args = parser.parse_args()\n\n mpi_fork(args.cpu)\n\n from utils import setup_logger_kwargs\n\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n ppo_penalized(lambda: gym.make(args.env), actor_critic=ActorCritic, ac_kwargs=dict(hidden_dims=[args.hid]*args.l),\n gamma=args.gamma, lam=args.lam, seed=args.seed, episodes_per_epoch=args.episodes_per_epoch,\n epochs=args.epochs, logger_kwargs=logger_kwargs, cost_lim = args.cost_lim)\n", "# Main entrance of GAIL\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport gym\nimport safety_gym\nimport time\nimport os.path as osp\n\nfrom torch.distributions.categorical import Categorical\n\nfrom neural_nets import Discriminator, ActorCritic, ValorDiscriminator, count_vars\n\nfrom utils import BufferS, BufferT, BufferActor, VALORBuffer\nfrom utils import mpi_fork, proc_id, num_procs, EpochLogger,\\\n average_gradients, sync_all_params\n\n\n\ndef policyg(env_fn,\n actor_critic=ActorCritic,\n ac_kwargs=dict(),\n seed=0,\n episodes_per_epoch=40,\n epochs=500,\n gamma=0.99,\n lam=0.97,\n pi_lr=3e-4,\n vf_lr=1e-3,\n train_v_iters=80,\n max_ep_len=1000,\n logger_kwargs=dict(),\n save_freq=10):\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n seed += 10000 * proc_id()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n env = env_fn()\n obs_dim = env.observation_space.shape\n act_dim = env.action_space.shape\n\n ac_kwargs['action_space'] = env.action_space\n\n # Models\n ac = actor_critic(input_dim=obs_dim[0], **ac_kwargs)\n\n # Set up model saving\n logger.setup_pytorch_saver(ac)\n\n # Buffers\n local_episodes_per_epoch = int(episodes_per_epoch / num_procs())\n buf = BufferActor(obs_dim[0], act_dim[0], local_episodes_per_epoch, max_ep_len)\n\n # Count variables\n var_counts = tuple(count_vars(module) for module in [ac.policy, ac.value_f])\n print(\"POLICY GRADIENT\")\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t v: %d\\n' % var_counts)\n\n # Optimizers\n train_pi = torch.optim.Adam(ac.policy.parameters(), lr=pi_lr)\n train_v = torch.optim.Adam(ac.value_f.parameters(), lr=vf_lr)\n\n # Parameters Sync\n sync_all_params(ac.parameters())\n\n def update(e):\n obs, act, adv, ret, lgp_old = [torch.Tensor(x) for x in buf.retrieve_all()]\n\n # Policy\n _, lgp, _ = ac.policy(obs, act)\n entropy = (-lgp).mean()\n\n # Policy loss # policy gradient term + entropy term\n pi_loss = -(lgp * adv).mean()\n\n # Train policy\n train_pi.zero_grad()\n pi_loss.backward()\n average_gradients(train_pi.param_groups)\n train_pi.step()\n\n # Value function\n v = ac.value_f(obs)\n v_l_old = F.mse_loss(v, ret)\n for _ in range(train_v_iters):\n v = ac.value_f(obs)\n v_loss = F.mse_loss(v, ret)\n\n # Value function train\n train_v.zero_grad()\n v_loss.backward()\n average_gradients(train_v.param_groups)\n train_v.step()\n\n # Log the changes\n _, lgp, _, v = ac(obs, act)\n entropy_new = (-lgp).mean()\n pi_loss_new = -(lgp * adv).mean()\n v_loss_new = F.mse_loss(v, ret)\n kl = (lgp_old - lgp).mean()\n logger.store(LossPi=pi_loss, LossV=v_l_old, DeltaLossPi=(pi_loss_new - pi_loss),\n DeltaLossV=(v_loss_new - v_l_old), Entropy=entropy, KL=kl)\n\n start_time = time.time()\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n total_t = 0\n\n for epoch in range(epochs):\n ac.eval()\n # Policy rollout\n for _ in range(local_episodes_per_epoch):\n for _ in range(max_ep_len):\n obs = torch.Tensor(o.reshape(1, -1))\n a, _, lopg_t, v_t = ac(obs)\n\n buf.store(o, a.detach().numpy(), r, v_t.item(), lopg_t.detach().numpy())\n logger.store(VVals=v_t)\n\n o, r, d, _ = env.step(a.detach().numpy()[0])\n ep_ret += r\n ep_len += 1\n total_t += 1\n\n terminal = d or (ep_len == max_ep_len)\n if terminal:\n buf.end_episode()\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n\n if (epoch % save_freq == 0) or (epoch == epochs - 1):\n # logger._torch_save(ac, fname=\"expert_torch_save.pt\")\n # logger._torch_save(ac, fname=\"model.pt\")\n logger.save_state({'env': env}, None, None)\n\n # Update\n ac.train()\n\n update(epoch)\n\n # Log\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('VVals', with_min_and_max=True)\n logger.log_tabular('TotalEnvInteracts', total_t)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('DeltaLossPi', average_only=True)\n logger.log_tabular('LossV', average_only=True)\n logger.log_tabular('DeltaLossV', average_only=True)\n logger.log_tabular('Entropy', average_only=True)\n logger.log_tabular('KL', average_only=True)\n logger.log_tabular('Time', time.time() - start_time)\n logger.dump_tabular()\n\n\n\n\n\n\n\ndef gail(env_fn, actor_critic=ActorCritic, ac_kwargs=dict(),\n disc=Discriminator,\n dc_kwargs=dict(), seed=0,\n episodes_per_epoch=40,\n epochs=500,\n gamma=0.99, lam=0.97,\n pi_lr=3e-3, vf_lr=3e-3, dc_lr=5e-4, train_v_iters=80, train_dc_iters=80,\n max_ep_len=1000, logger_kwargs=dict(), save_freq=10):\n l_lam = 0 # balance two loss term\n\n print(\"starting now\")\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n seed += 10000 * proc_id()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n env = env_fn()\n obs_dim = env.observation_space.shape\n act_dim = env.action_space.shape\n\n ac_kwargs['action_space'] = env.action_space\n\n # Models\n ac = actor_critic(input_dim=obs_dim[0], **ac_kwargs)\n disc = disc(input_dim=obs_dim[0], **dc_kwargs)\n\n # Set up model saving\n logger.setup_pytorch_saver([ac, disc])\n\n # TODO: Load expert policy here\n expert = actor_critic(input_dim=obs_dim[0], **ac_kwargs)\n # expert_name = \"expert_torch_save.pt\"\n expert_name = \"model.pt\"\n # expert = torch.load(osp.join(logger_kwargs['output_dir'],'pyt_save' , expert_name))\n expert = torch.load('/home/tyna/Documents/openai/research-project/data/anonymous-expert/anonymous-expert_s0/pyt_save/model.pt')\n\n print('RUNNING GAIL')\n\n # Buffers\n local_episodes_per_epoch = int(episodes_per_epoch / num_procs())\n buff_s = BufferS(obs_dim[0], act_dim[0], local_episodes_per_epoch, max_ep_len)\n buff_t = BufferT(obs_dim[0], act_dim[0], local_episodes_per_epoch, max_ep_len)\n\n # Count variables\n var_counts = tuple(count_vars(module) for module in [ac.policy, ac.value_f, disc.policy])\n print(\"GAIL\")\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t v: %d, \\t d: %d\\n' % var_counts)\n\n\n # Optimizers\n train_pi = torch.optim.Adam(ac.policy.parameters(), lr=pi_lr)\n train_v = torch.optim.Adam(ac.value_f.parameters(), lr=vf_lr)\n train_dc = torch.optim.Adam(disc.policy.parameters(), lr=dc_lr)\n\n # Parameters Sync\n sync_all_params(ac.parameters())\n sync_all_params(disc.parameters())\n\n def update(e):\n obs_s, act, adv, ret, lgp_old = [torch.Tensor(x) for x in buff_s.retrieve_all()]\n obs_t, _ = [torch.Tensor(x) for x in buff_t.retrieve_all()]\n\n # Policy\n _, lgp, _ = ac.policy(obs_s, act)\n entropy = (-lgp).mean()\n\n # Policy loss\n # policy gradient term + entropy term\n pi_loss = -(lgp * adv).mean() - l_lam * entropy\n\n # Train policy\n if e > 10:\n train_pi.zero_grad()\n pi_loss.backward()\n average_gradients(train_pi.param_groups)\n train_pi.step()\n\n # Value function\n v = ac.value_f(obs_s)\n v_l_old = F.mse_loss(v, ret)\n\n for _ in range(train_v_iters):\n v = ac.value_f(obs_s)\n v_loss = F.mse_loss(v, ret)\n\n # Value function train\n train_v.zero_grad()\n v_loss.backward()\n average_gradients(train_v.param_groups)\n train_v.step()\n\n # Discriminator\n gt1 = torch.ones(obs_s.size()[0], dtype=torch.int)\n gt2 = torch.zeros(obs_t.size()[0], dtype=torch.int)\n _, lgp_s, _ = disc(obs_s, gt=gt1)\n _, lgp_t, _ = disc(obs_t, gt=gt2)\n dc_loss_old = - lgp_s.mean() - lgp_t.mean()\n\n for _ in range(train_dc_iters):\n _, lgp_s, _ = disc(obs_s, gt=gt1)\n _, lgp_t, _ = disc(obs_t, gt=gt2)\n dc_loss = - lgp_s.mean() - lgp_t.mean()\n\n # Discriminator train\n train_dc.zero_grad()\n dc_loss.backward()\n average_gradients(train_dc.param_groups)\n train_dc.step()\n\n _, lgp_s, _ = disc(obs_s, gt=gt1)\n _, lgp_t, _ = disc(obs_t, gt=gt2)\n dc_loss_new = - lgp_s.mean() - lgp_t.mean()\n\n # Log the changes\n _, lgp, _, v = ac(obs, act)\n entropy_new = (-lgp).mean()\n pi_loss_new = -(lgp * adv).mean() - l_lam * entropy\n v_loss_new = F.mse_loss(v, ret)\n kl = (lgp_old - lgp).mean()\n logger.store(LossPi=pi_loss, LossV=v_l_old, LossDC=dc_loss_old, DeltaLossPi=(pi_loss_new - pi_loss),\n DeltaLossV=(v_loss_new - v_l_old), DeltaLossDC=(dc_loss_new - dc_loss_old),\n DeltaEnt=(entropy_new - entropy),\n Entropy=entropy, KL=kl)\n\n start_time = time.time()\n o, r, sdr, d, ep_ret, ep_sdr, ep_len = env.reset(), 0, 0, False, 0, 0, 0\n total_t = 0\n\n ep_len_t = 0\n for epoch in range(epochs):\n ac.eval()\n disc.eval()\n # We recognize the probability term of index [0] correspond to the teacher's policy\n\n # Student's policy rollout\n for _ in range(local_episodes_per_epoch):\n for _ in range(max_ep_len):\n obs = torch.Tensor(o.reshape(1, -1))\n a, _, lopg_t, v_t = ac(obs)\n\n buff_s.store(o, a.detach().numpy(), r, sdr, v_t.item(), lopg_t.detach().numpy())\n logger.store(VVals=v_t)\n\n o, r, d, _ = env.step(a.detach().numpy()[0])\n _, sdr, _ = disc(torch.Tensor(o.reshape(1, -1)), gt=torch.Tensor([0]))\n if sdr < -4: # Truncate rewards\n sdr = -4\n ep_ret += r\n ep_sdr += sdr\n ep_len += 1\n total_t += 1\n\n terminal = d or (ep_len == max_ep_len)\n if terminal:\n buff_s.end_episode()\n logger.store(EpRetS=ep_ret, EpLenS=ep_len, EpSdrS=ep_sdr)\n print(\"Student Episode Return: \\t\", ep_ret)\n o, r, sdr, d, ep_ret, ep_sdr, ep_len = env.reset(), 0, 0, False, 0, 0, 0\n\n # Teacher's policy rollout\n for _ in range(local_episodes_per_epoch):\n for _ in range(max_ep_len):\n obs = torch.Tensor(o.reshape(1, -1))\n a, _, _, _ = expert(obs)\n\n buff_t.store(o, a.detach().numpy(), r)\n\n o, r, d, _ = env.step(a.detach().numpy()[0])\n ep_ret += r\n ep_len += 1\n total_t += 1\n\n terminal = d or (ep_len == max_ep_len)\n if terminal:\n buff_t.end_episode()\n logger.store(EpRetT=ep_ret, EpLenT=ep_len)\n print(\"Teacher Episode Return: \\t\", ep_ret)\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n\n if (epoch % save_freq == 0) or (epoch == epochs - 1):\n logger.save_state({'env': env}, [ac, disc], None)\n\n # Update\n ac.train()\n disc.train()\n\n update(epoch)\n\n # Log\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRetS', with_min_and_max=True)\n logger.log_tabular('EpSdrS', with_min_and_max=True)\n logger.log_tabular('EpLenS', average_only=True)\n logger.log_tabular('EpRetT', with_min_and_max=True)\n logger.log_tabular('EpLenT', average_only=True)\n logger.log_tabular('VVals', with_min_and_max=True)\n logger.log_tabular('TotalEnvInteracts', total_t)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('DeltaLossPi', average_only=True)\n logger.log_tabular('LossV', average_only=True)\n logger.log_tabular('DeltaLossV', average_only=True)\n logger.log_tabular('LossDC', average_only=True)\n logger.log_tabular('DeltaLossDC', average_only=True)\n logger.log_tabular('Entropy', average_only=True)\n logger.log_tabular('DeltaEnt', average_only=True)\n logger.log_tabular('KL', average_only=True)\n logger.log_tabular('Time', time.time() - start_time)\n logger.dump_tabular()\n\n\ndef valor(env_fn, actor_critic=ActorCritic, ac_kwargs=dict(),\n disc=Discriminator, dc_kwargs=dict(), seed=0,\n episodes_per_epoch=40,\n epochs=50, gamma=0.99, pi_lr=3e-4, vf_lr=1e-3, dc_lr=5e-4,\n train_v_iters=80, train_dc_iters=10,\n train_dc_interv=10,\n lam=0.97, max_ep_len=1000, logger_kwargs=dict(), con_dim=5, save_freq=10, k=1):\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n seed += 10000 * proc_id()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n env = env_fn()\n obs_dim = env.observation_space.shape\n act_dim = env.action_space.shape\n\n ac_kwargs['action_space'] = env.action_space\n\n # Model\n ac = actor_critic(input_dim=obs_dim[0] + con_dim, **ac_kwargs)\n disc = disc(input_dim=obs_dim[0], context_dim=con_dim, **dc_kwargs)\n\n # Set up model saving\n logger.setup_pytorch_saver([ac, disc])\n\n # Buffer\n local_episodes_per_epoch = int(episodes_per_epoch / num_procs())\n buffer = VALORBuffer(con_dim, obs_dim[0], act_dim[0], local_episodes_per_epoch, max_ep_len, train_dc_interv)\n\n # Count variables\n var_counts = tuple(count_vars(module) for module in\n [ac.policy, ac.value_f, disc.policy])\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t v: %d, \\t d: %d\\n' % var_counts)\n\n # Optimizers\n train_pi = torch.optim.Adam(ac.policy.parameters(), lr=pi_lr)\n train_v = torch.optim.Adam(ac.value_f.parameters(), lr=vf_lr)\n train_dc = torch.optim.Adam(disc.policy.parameters(), lr=dc_lr)\n\n # Parameters Sync\n sync_all_params(ac.parameters())\n sync_all_params(disc.parameters())\n\n def update(e):\n obs, act, adv, pos, ret, logp_old = [torch.Tensor(x) for x in buffer.retrieve_all()]\n\n # Policy\n _, logp, _ = ac.policy(obs, act)\n entropy = (-logp).mean()\n\n # Policy loss\n pi_loss = -(logp * (k * adv + pos)).mean()\n\n # Train policy\n train_pi.zero_grad()\n pi_loss.backward()\n average_gradients(train_pi.param_groups)\n train_pi.step()\n\n # Value function\n v = ac.value_f(obs)\n v_l_old = F.mse_loss(v, ret)\n for _ in range(train_v_iters):\n v = ac.value_f(obs)\n v_loss = F.mse_loss(v, ret)\n\n # Value function train\n train_v.zero_grad()\n v_loss.backward()\n average_gradients(train_v.param_groups)\n train_v.step()\n\n # Discriminator\n if (e + 1) % train_dc_interv == 0:\n print('Discriminator Update!')\n con, s_diff = [torch.Tensor(x) for x in buffer.retrieve_dc_buff()]\n _, logp_dc, _ = disc(s_diff, con)\n d_l_old = -logp_dc.mean()\n\n # Discriminator train\n for _ in range(train_dc_iters):\n _, logp_dc, _ = disc(s_diff, con)\n d_loss = -logp_dc.mean()\n train_dc.zero_grad()\n d_loss.backward()\n average_gradients(train_dc.param_groups)\n train_dc.step()\n\n _, logp_dc, _ = disc(s_diff, con)\n dc_l_new = -logp_dc.mean()\n else:\n d_l_old = 0\n dc_l_new = 0\n\n # Log the changes\n _, logp, _, v = ac(obs, act)\n pi_l_new = -(logp * (k * adv + pos)).mean()\n v_l_new = F.mse_loss(v, ret)\n kl = (logp_old - logp).mean()\n logger.store(LossPi=pi_loss, LossV=v_l_old, KL=kl, Entropy=entropy, DeltaLossPi=(pi_l_new - pi_loss),\n DeltaLossV=(v_l_new - v_l_old), LossDC=d_l_old, DeltaLossDC=(dc_l_new - d_l_old))\n # logger.store(Adv=adv.reshape(-1).numpy().tolist(), Pos=pos.reshape(-1).numpy().tolist())\n\n start_time = time.time()\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n context_dist = Categorical(logits=torch.Tensor(np.ones(con_dim)))\n total_t = 0\n\n for epoch in range(epochs):\n ac.eval()\n disc.eval()\n for _ in range(local_episodes_per_epoch):\n c = context_dist.sample()\n c_onehot = F.one_hot(c, con_dim).squeeze().float()\n for _ in range(max_ep_len):\n concat_obs = torch.cat([torch.Tensor(o.reshape(1, -1)), c_onehot.reshape(1, -1)], 1)\n a, _, logp_t, v_t = ac(concat_obs)\n\n buffer.store(c, concat_obs.squeeze().detach().numpy(), a.detach().numpy(), r, v_t.item(),\n logp_t.detach().numpy())\n logger.store(VVals=v_t)\n\n o, r, d, _ = env.step(a.detach().numpy()[0])\n ep_ret += r\n ep_len += 1\n total_t += 1\n\n terminal = d or (ep_len == max_ep_len)\n if terminal:\n dc_diff = torch.Tensor(buffer.calc_diff()).unsqueeze(0)\n con = torch.Tensor([float(c)]).unsqueeze(0)\n _, _, log_p = disc(dc_diff, con)\n buffer.end_episode(log_p.detach().numpy())\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n\n if (epoch % save_freq == 0) or (epoch == epochs - 1):\n logger.save_state({'env': env}, [ac, disc], None)\n\n # Update\n ac.train()\n disc.train()\n\n update(epoch)\n\n # Log\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('VVals', with_min_and_max=True)\n logger.log_tabular('TotalEnvInteracts', total_t)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('DeltaLossPi', average_only=True)\n logger.log_tabular('LossV', average_only=True)\n logger.log_tabular('DeltaLossV', average_only=True)\n logger.log_tabular('LossDC', average_only=True)\n logger.log_tabular('DeltaLossDC', average_only=True)\n logger.log_tabular('Entropy', average_only=True)\n logger.log_tabular('KL', average_only=True)\n logger.log_tabular('Time', time.time() - start_time)\n logger.dump_tabular()\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='Safexp-PointGoal1-v0')\n parser.add_argument('--hid', type=int, default=64)\n parser.add_argument('--l', type=int, default=2)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--lam', type=float, default=0.97)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--cpu', type=int, default=1)\n parser.add_argument('--episodes-per-epoch', type=int, default=5)\n # parser.add_argument('--episodes-per-epoch', type=int, default=40)\n parser.add_argument('--epochs', type=int, default=1000)\n parser.add_argument('--exp_name', type=str, default='valor-anonymous-expert')\n parser.add_argument('--con', type=int, default=5)\n args = parser.parse_args()\n\n mpi_fork(args.cpu)\n\n from utils import setup_logger_kwargs\n\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n # policyg(lambda: gym.make(args.env), actor_critic=ActorCritic, ac_kwargs=dict(hidden_dims=[args.hid]*args.l),\n # gamma=args.gamma, lam=args.lam, seed=args.seed, episodes_per_epoch=args.episodes_per_epoch,\n # epochs=args.epochs, logger_kwargs=logger_kwargs)\n\n # gail(lambda: gym.make(args.env), actor_critic=ActorCritic, ac_kwargs=dict(hidden_dims=[args.hid] * args.l),\n # disc=Discriminator, dc_kwargs=dict(hidden_dims=[args.hid] * args.l), gamma=args.gamma, lam=args.lam,\n # seed=args.seed, episodes_per_epoch=args.episodes_per_epoch, epochs=args.epochs, logger_kwargs=logger_kwargs)\n\n\n valor(lambda: gym.make(args.env), actor_critic=ActorCritic, ac_kwargs=dict(hidden_dims=[args.hid] * args.l),\n disc=ValorDiscriminator, dc_kwargs=dict(hidden_dims=args.hid),\n gamma=args.gamma, seed=args.seed, episodes_per_epoch=args.episodes_per_epoch, epochs=args.epochs,\n logger_kwargs=logger_kwargs, con_dim=args.con)\n\n" ]
[ [ "numpy.random.seed", "torch.Tensor", "torch.manual_seed", "torch.min", "torch.exp", "torch.nn.functional.mse_loss", "torch.clamp", "numpy.exp" ], [ "torch.Tensor", "numpy.random.seed", "torch.load", "torch.manual_seed", "numpy.ones", "torch.nn.functional.mse_loss", "torch.nn.functional.one_hot" ] ]
Johannes0Horn/Cooperative-Deep-RL-Multi-Agents
[ "fd30246d33a91ae488c3c093a0de55825a43f8b9" ]
[ "SingleAgentProfiling/TD3.py" ]
[ "# Library Imports\nimport numpy as np\nimport tensorflow as tf\n\nclass ReplayBuffer:\n \"\"\"Defines the Buffer dataset from which the agent learns\"\"\"\n def __init__(self, max_size, input_shape, dim_actions):\n self.mem_size = max_size\n self.mem_cntr = 0\n self.state_memory = np.zeros((self.mem_size, input_shape), dtype=np.float32)\n self.new_state_memory = np.zeros((self.mem_size, input_shape), dtype=np.float32)\n self.action_memory = np.zeros((self.mem_size, dim_actions), dtype=np.float32)\n self.reward_memory = np.zeros(self.mem_size, dtype=np.float32)\n self.terminal_memory = np.zeros(self.mem_size, dtype=np.bool)\n \n def store_transition(self, state, action, reward, new_state, done):\n index = self.mem_cntr % self.mem_size\n self.state_memory[index] = state\n self.new_state_memory[index] = new_state\n self.action_memory[index] = action\n self.reward_memory[index] = reward\n self.terminal_memory[index] = done\n self.mem_cntr +=1\n \n def sample_buffer(self, batch_size):\n max_mem = min(self.mem_cntr, self.mem_size)\n batch = np.random.choice(max_mem, batch_size, replace=False)\n states = self.state_memory[batch]\n _states = self.new_state_memory[batch]\n actions = self.action_memory[batch]\n rewards = self.reward_memory[batch]\n dones = self.terminal_memory[batch]\n return states, actions, rewards, _states, dones\n \nclass Critic(tf.keras.Model):\n \"\"\"Defines a Critic Deep Learning Network\"\"\"\n def __init__(self, dim_actions, H1_dim=512, H2_dim=512, name='critic'):\n super(Critic, self).__init__()\n self.H1_dim = H1_dim\n self.H2_dim = H2_dim\n self.dim_actions = dim_actions\n self.model_name = name\n self.checkpoint = self.model_name+'.h5'\n self.H1 = tf.keras.layers.Dense(self.H1_dim, activation='relu')\n self.H2 = tf.keras.layers.Dense(self.H2_dim, activation='relu') \n self.Q = tf.keras.layers.Dense(1, activation=None)\n \n def call(self, state, action):\n action = self.H1(tf.concat([state,action], axis=1))\n action = self.H2(action)\n Q = self.Q(action)\n return Q\n \nclass Actor(tf.keras.Model):\n \"\"\"Defines a Actor Deep Learning Network\"\"\"\n def __init__(self, dim_actions, H1_dim=512, H2_dim=512, name='actor'):\n super(Actor, self).__init__()\n self.H1_dim = H1_dim\n self.H2_dim = H2_dim\n self.dim_actions = dim_actions\n self.model_name = name\n self.checkpoint = self.model_name+'.h5'\n self.H1 = tf.keras.layers.Dense(self.H1_dim, activation='relu')\n self.H2 = tf.keras.layers.Dense(self.H2_dim, activation='relu') \n self.mu = tf.keras.layers.Dense(self.dim_actions, activation='tanh')\n \n def call(self, state):\n action_prob = self.H1(state)\n action_prob = self.H2(action_prob)\n mu = self.mu(action_prob)\n return mu\n \nclass Agent:\n \"\"\"Defines a RL Agent based on Actor-Critc method\"\"\"\n def __init__(self, env, alpha=0.001, beta=0.002,\n gamma=0.99, max_size=1000000, tau=0.005,\n H1=512, H2=256, batch_size=64, noise=0.1):\n self.gamma = gamma\n self.tau = tau\n self.obs_shape = env.observation_space.shape[0]\n self.n_actions = env.action_space.shape[0]\n self.memory = ReplayBuffer(max_size, self.obs_shape, self.n_actions)\n self.batch_size = batch_size\n self.noise = noise\n self.max_action = env.action_space.high\n self.min_action = env.action_space.low\n \n self.actor = Actor(self.n_actions, name='actor')\n self.critic = Critic(self.n_actions, name='critic')\n self.target_actor = Actor(self.n_actions, name='target_actor')\n self.target_critic = Critic(self.n_actions, name='target_critic')\n \n self.actor.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=alpha))\n self.critic.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=beta))\n self.target_actor.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=alpha))\n self.target_critic.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=beta))\n self.update_networks(tau=1)\n \n def update_networks(self, tau=None):\n if tau is None:\n tau = self.tau\n \n weights=[]\n targets = self.target_actor.weights\n for i, weight in enumerate(self.actor.weights):\n weights.append(weight*tau + targets[i]*(1-tau))\n self.target_actor.set_weights(weights)\n \n weights=[]\n targets = self.target_critic.weights\n for i, weight in enumerate(self.critic.weights):\n weights.append(weight*tau + targets[i]*(1-tau))\n self.target_critic.set_weights(weights)\n \n def store_exp(self, state, action, reward, new_state, done):\n self.memory.store_transition(state, action, reward, new_state, done)\n \n def save_models(self):\n self.actor.save_weights(self.actor.checkpoint)\n self.critic.save_weights(self.critic.checkpoint)\n self.target_actor.save_weights(self.target_actor.checkpoint)\n self.target_critic.save_weights(self.target_critic.checkpoint)\n \n def load_models(self):\n self.actor.load_weights(self.actor.checkpoint)\n self.critic.load_weights(self.critic.checkpoint)\n self.target_actor.load_weights(self.target_actor.checkpoint)\n self.target_critic.load_weights(self.target_critic.checkpoint)\n \n def choose_action(self, observation):\n evaluate=False\n state = tf.convert_to_tensor([observation], dtype=tf.float32)\n actions = self.actor(state)\n if not evaluate:\n actions += tf.random.normal(shape=[self.n_actions], mean=0.0, stddev=self.noise)\n actions = tf.clip_by_value(actions, self.min_action, self.max_action)\n return actions[0]\n \n def learn(self):\n if self.memory.mem_cntr < self.batch_size:\n return\n\n state, action, reward, new_state, done = self.memory.sample_buffer(self.batch_size)\n\n states = tf.convert_to_tensor(state, dtype=tf.float32)\n states_ = tf.convert_to_tensor(new_state, dtype=tf.float32)\n rewards = tf.convert_to_tensor(reward, dtype=tf.float32)\n actions = tf.convert_to_tensor(action, dtype=tf.float32)\n\n with tf.GradientTape() as tape:\n target_actions = self.target_actor(states_)\n critic_value_ = tf.squeeze(self.target_critic(states_, target_actions), 1)\n critic_value = tf.squeeze(self.critic(states, actions), 1)\n target = reward + self.gamma*critic_value_*(1-done)\n critic_loss = tf.keras.losses.MSE(target, critic_value)\n\n critic_network_gradient = tape.gradient(critic_loss,self.critic.trainable_variables)\n self.critic.optimizer.apply_gradients(zip(critic_network_gradient, self.critic.trainable_variables))\n\n with tf.GradientTape() as tape:\n new_policy_actions = self.actor(states)\n actor_loss = -self.critic(states, new_policy_actions)\n actor_loss = tf.math.reduce_mean(actor_loss)\n\n actor_network_gradient = tape.gradient(actor_loss, self.actor.trainable_variables)\n self.actor.optimizer.apply_gradients(zip(actor_network_gradient, self.actor.trainable_variables))\n self.update_networks()" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.clip_by_value", "tensorflow.concat", "numpy.random.choice", "tensorflow.random.normal", "tensorflow.keras.layers.Dense", "tensorflow.keras.losses.MSE", "tensorflow.math.reduce_mean", "tensorflow.keras.optimizers.Adam", "numpy.zeros", "tensorflow.GradientTape" ] ]
czielinski/facerecognition
[ "2ddd9b74a96e3e7eef3dbab52e5eaf7669d33dc4" ]
[ "facerecognition/facerecognition.py" ]
[ "#!/usr/bin/python\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2015 Christian Zielinski\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULtAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nimport os\nimport sys\nimport datetime\nimport config\n\n\ndef main():\n # Display splash screen\n print(config.splash_screen)\n\n # Set up the video capture\n video_capture = cv2.VideoCapture(0)\n if not video_capture.isOpened():\n print(\"ERROR: Could not open video capture device\")\n sys.exit(1)\n\n # Set up the cascade classifier\n if not os.path.isfile(config.CLASSIFIER_PATH):\n print(\"ERROR: Cannot open classifier file\")\n sys.exit(2)\n\n classifier = cv2.CascadeClassifier(config.CLASSIFIER_PATH)\n \n # Create a resizable window\n cv2.namedWindow(config.MAIN_WINDOW, cv2.cv.CV_WINDOW_NORMAL)\n\n # Capture loop\n while video_capture.isOpened():\n success, image = video_capture.read()\n if not success:\n print(\"ERROR: Could not read from video capture device\")\n break\n\n # Rescale to IMAGE_WIDTH\n aspect_ratio = image.shape[0] / float(image.shape[1])\n image_size = (config.IMAGE_WIDTH, int(aspect_ratio * config.IMAGE_WIDTH))\n image = cv2.resize(image, image_size)\n\n # Detect\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n objects = classifier.detectMultiScale(gray, **config.DETECT_ARGS)\n time_string = str(datetime.datetime.now())\n\n # Extract zoomed images\n zoom_images = []\n for x, y, w, h in objects[:config.NUM_ZOOMS]:\n zoom_image = image[y : y + h, x : x + w, :].copy()\n zoom_image = cv2.resize(zoom_image, (config.ZOOM_SIZE, config.ZOOM_SIZE))\n zoom_images.append(zoom_image)\n \n # Draw markers\n num_objects = len(objects)\n for num, box in enumerate(objects, start=1):\n x, y, w, h = box\n \n # Draw circle\n center = (int(x + w/2), int(y + h/2))\n scale = config.MARKER_SCALE\n radius = int(scale * min(w, h))\n\n cv2.circle(image, center, radius, config.MARKER_COLOR, config.MARKER_THICK)\n\n # Write text\n text_pos = (int(center[0] + scale * w), int(center[1] + scale * h))\n text_msg = \"{}\".format(num)\n\n cv2.putText(image, text_msg, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 1, config.MARKER_COLOR, 3)\n\n # Status message\n format_args = (time_string, num, num_objects, center[0], center[1])\n print(\"{}: Detected object {}/{} at (x, y) = ({}, {})\".format(*format_args))\n\n if num_objects > 0:\n print()\n\n # Display zoom bar\n if len(zoom_images) > 0:\n zoom_bar = np.hstack(zoom_images)\n zoom_h, zoom_w = zoom_bar.shape[:2]\n image[:zoom_h, -zoom_w:] = zoom_bar\n\n # Display the resulting image\n cv2.imshow(config.MAIN_WINDOW, image)\n\n # Waiting for escape key\n if cv2.waitKey(1) == config.ESC_KEY:\n break\n\n # Clean up\n video_capture.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.hstack" ] ]
michaelhall28/clone-competition-simulation
[ "deaa68ce020fa3c1b8fa499c91c829bad4f0def6", "deaa68ce020fa3c1b8fa499c91c829bad4f0def6" ]
[ "clone_competition_simulation/general_sim_class.py", "clone_competition_simulation/fitness_classes.py" ]
[ "import numpy as np\nimport math\n# import matplotlib as mpl\n# mpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport itertools\nimport bisect\nfrom collections import Counter\nimport pickle\nfrom clone_competition_simulation.useful_functions import mean_clone_size, mean_clone_size_fit, surviving_clones_fit, \\\n incomplete_moment, add_incom_to_plot\nfrom clone_competition_simulation.animator import NonSpatialToGridAnimator, HexAnimator, HexFitnessAnimator\nimport warnings\nfrom scipy.sparse import lil_matrix, SparseEfficiencyWarning\nimport gzip\nfrom treelib import Tree\nwarnings.simplefilter('ignore',SparseEfficiencyWarning)\n\n\nclass GeneralSimClass(object):\n \"\"\"\n Common functions for all simulation algorithms.\n Functions for setting up simulations and for plotting results\n \"\"\"\n def __init__(self, parameters):\n # Get attributes from the parameters\n self.total_pop = parameters.initial_cells\n self.initial_size_array = parameters.initial_size_array\n self.initial_clones = len(self.initial_size_array)\n self.mutation_rates = parameters.mutation_rates\n self.mutation_generator = parameters.mutation_generator\n self.division_rate = parameters.division_rate\n self.max_time = parameters.max_time\n self.times = parameters.times\n # To make sure the floating point errors do not lead to incorrect times when searching adjust by small value.\n # Generally not used - finds the closest time instead.\n if len(self.times) > 1:\n min_diff = np.diff(self.times).min()\n else:\n min_diff = self.times[0]\n self._search_times = self.times - min_diff/100\n self.sample_points = parameters.sample_points\n self.non_zero_calc = parameters.non_zero_calc\n self.label_times = parameters.label_times\n self.label_frequencies = parameters.label_frequencies\n self.label_values = parameters.label_values\n self.label_fitness = parameters.label_fitness\n self.label_genes = parameters.label_genes\n\n self.label_times = self._adjust_raw_times(self.label_times)\n if self.label_times is not None:\n self.next_label_time = self.label_times[0]\n else:\n self.next_label_time = np.inf\n self.label_count = 0\n\n self.current_fitness_multiplier = None # The effect of the current treatment\n self.treatment_count = -1\n if parameters.treatment_timings is None:\n # No treatment applied. But this set up means the initial fitness is set correctly then not changed.\n self.treatment_timings = [0, np.inf]\n self.treatment_effects = [1, 1] # Always neutral\n self.next_treatment_time = 0\n self.treatment_replace_fitness = False\n else:\n self.treatment_timings = self._adjust_raw_times(parameters.treatment_timings)\n self.treatment_timings = list(self.treatment_timings) + [np.inf]\n self.treatment_effects = parameters.treatment_effects\n self.next_treatment_time = self.treatment_timings[0] # First value will always be zero\n self.treatment_replace_fitness = parameters.treatment_replace_fitness\n\n self.parameters = parameters\n\n self.sim_length = len(self.times)\n\n self.raw_fitness_array = parameters.fitness_array\n self.clones_array = None # Will store the information about the clones. One row per clone.\n # A clone here will contain exactly the same combination of mutations.\n self.population_array = None # Will store the clone sizes. One row per clone. One column per sample.\n\n # Include indices here for later use. These are the columns of self.clones_array\n self.id_idx = 0 # Unique integer id for each clone. Int.\n self.label_idx = 1 # The type of the clone. Inherited label does not change. Int. Represents GFP or similar.\n self.fitness_idx = 2 # The fitness of the clone. Float.\n self.generation_born_idx = 3 # The sample the clone first appeared in. Int.\n self.parent_idx = 4 # The id of the clone that this clone emerged from. Int.\n self.gene_mutated_idx = 5 # The gene (index) the mutation appears in (or similar info). Int.\n # Could encode gene and/or nonsense/missense/... depending on how genes are defined\n\n self.s_muts = set() # Synonymous mutations. Indices of the first clone they appear in\n self.ns_muts = set() # Non-synonymous mutations. Indices of the first clone they appear in\n self.label_muts = set() # Labelled clones. Indices of the clones that get a labeled (after init)\n\n self.plot_idx = 0 # Keeping track of x-coordinate of the plot\n\n self.new_mutation_count = 0\n\n # We can calculate the number of mutations added in each generation beforehand and make the arrays the correct\n # size. This should speed things up for long, mutation heavy simulations.\n self._precalculate_mutations()\n self.total_clone_count = self.initial_clones + self.new_mutation_count\n if parameters.progress:\n print(self.new_mutation_count, 'mutations to add', flush=True)\n\n # Make the arrays the correct size.\n self.tree = Tree()\n self.tree.create_node(str(-1), -1) # Make a root node that isn't a clone.\n self.trimmed_tree = None # Used for mutant clone arrays.\n self._init_arrays(parameters.label_array, parameters.gene_label_array)\n self.next_mutation_index = self.initial_clones # Keeping track of how many mutations added\n\n # Details for plotting\n self.figsize = parameters.figsize\n self.descendant_counts = {}\n self.colourscales = parameters.colourscales\n self.progress = parameters.progress # Prints update every n samples\n self.i = 0\n self.colours = None\n\n # Stores the sizes of clones containing particular mutants.\n self.mutant_clone_array = None\n\n self.tmp_store = parameters.tmp_store\n self.store_rotation = 0 # Alternates between two tmp stores (0, 1) in case error occurs during pickle dump.\n self.is_lil = True # Is the population array stored in scipy.sparse.lil_matrix (True) or numpy array (False)\n self.finished = False\n self.random_state = None # For storing the state of the random sequence for continuing\n\n ############ Functions for running simulations ############\n\n ##### Functions for setting up the simulations\n def _adjust_raw_times(self, array):\n # Takes an array of time points and converts to number of simulation steps\n # This is for the Moran simulations. Overwrite for the other cases\n if array is not None:\n array = np.array(array) * self.division_rate * self.total_pop\n return array\n\n def _precalculate_mutations(self):\n # To be overwritten. Will calculate the number and timing of all mutations in the simulation\n self.new_mutation_count = 0\n\n def _init_arrays(self, labels_array, gene_label_array):\n \"\"\"\n Defines self.clones_array, self.population_array and self.raw_fitness_array\n Fills the self.clones_array with any information given about the initial cells.\n \"\"\"\n self.clones_array = np.zeros((self.total_clone_count, 6))\n self.clones_array[:, self.id_idx] = np.arange(len(self.clones_array)) # Give clone an identifier\n\n if labels_array is None:\n labels_array = 0\n self.clones_array[:self.initial_clones, self.label_idx] = labels_array # Give each intial cell a type\n\n if gene_label_array is None:\n gene_label_array = -1\n # Give each initial cell mutation type. -1 if no mutation\n self.clones_array[:self.initial_clones, self.gene_mutated_idx] = gene_label_array\n\n self.clones_array[:self.initial_clones, self.generation_born_idx] = 0\n self.clones_array[:self.initial_clones, self.parent_idx] = -1\n\n # For each clone, need an raw fitness array as long as the number of genes\n # Needs to be dtype=float, which is the default of np.zeros\n if self.mutation_generator.multi_gene_array:\n num_cols_genes = len(self.mutation_generator.genes)+1\n if self.mutation_generator.epistatics is not None:\n num_cols = num_cols_genes + len(self.mutation_generator.epistatics)\n else:\n num_cols = num_cols_genes\n blank_fitness_array = np.full((self.total_clone_count, num_cols),\n np.nan, dtype=float)\n blank_fitness_array[:, 0] = self.parameters.default_fitness\n blank_fitness_array[:self.initial_clones, :num_cols_genes] = self.raw_fitness_array\n self.raw_fitness_array = blank_fitness_array\n # self.clones_array[:, self.fitness_idx] = self._apply_treatment(fitness_arrays=self.raw_fitness_array)\n else:\n blank_fitness_array = np.full((self.total_clone_count, 1), self.parameters.default_fitness, dtype=float)\n blank_fitness_array[:self.initial_clones, 0] = self.raw_fitness_array\n self.raw_fitness_array = blank_fitness_array\n # self.clones_array[:, self.fitness_idx] = self._apply_treatment(fitness_values=self.raw_fitness_array)\n\n self.population_array = lil_matrix((self.total_clone_count,\n self.sim_length)) # Will store the population counts\n\n # Start with the initial_quantities\n if self.times[0] == 0:\n self.population_array[:self.initial_clones, 0] = self.initial_size_array.reshape(\n len(self.initial_size_array), 1)\n self.plot_idx = 1\n\n # Make any initial clones roots of the clone tree\n for i in range(self.initial_clones):\n self.tree.create_node(str(i), i, parent=-1) # Directly descended from the root node\n\n ##### Functions for running the simulation\n def run_sim(self, continue_sim=False):\n # Functions which runs any of the simulation types.\n # self.sim_step will include the differences between the methods.\n\n if self.i > 0:\n # Not the first time it has been run\n if self.finished:\n print('Simulation already run')\n return\n elif continue_sim:\n print('Continuing from step', self.i)\n else:\n print('Simulation already started but incomplete')\n return\n\n current_population = np.zeros(len(self.clones_array), dtype=int)\n current_population[:self.initial_clones] = self.initial_size_array\n if self.non_zero_calc: # Faster for the non-spatial simulations to only track the current surviving clones\n non_zero_clones = np.where(current_population > 0)[0]\n current_population = current_population[non_zero_clones]\n else:\n non_zero_clones = None\n\n # Change treatment if required (can change fitness of clones)\n if self._check_treatment_time():\n self._change_treatment(initial=True)\n\n # Add a label (similar to a lineage tracing label) if requested\n if self._check_label_time():\n current_population, non_zero_clones = self._add_label(current_population,\n non_zero_clones,\n self.label_frequencies[self.label_count],\n self.label_values[self.label_count],\n self.label_fitness[self.label_count],\n self.label_genes[self.label_count])\n if self.progress:\n print('Steps completed:')\n\n while self.plot_idx < self.sim_length:\n # Run step of the simulation\n # Each step can be a generation (Wright-Fisher), a single birth-death-mutation event (Moran) or\n # a single birth or death event (Branching)\n current_population, non_zero_clones = self._sim_step(self.i, current_population,\n non_zero_clones)\n self.i += 1\n self._record_results(self.i, current_population, non_zero_clones) # Record the current state\n\n # Add a label (similar to a lineage tracing label) if requested\n if self._check_label_time():\n current_population, non_zero_clones = self._add_label(current_population,\n non_zero_clones,\n self.label_frequencies[self.label_count],\n self.label_values[self.label_count],\n self.label_fitness[self.label_count],\n self.label_genes[self.label_count])\n\n # Change treatment if required (can change fitness of clones)\n if self._check_treatment_time():\n self._change_treatment()\n\n if self.progress:\n print('Finished', self.i, 'steps')\n\n # Clean up the results arrays\n self._finish_up()\n self.finished = True\n\n def continue_sim(self):\n if self.random_state is not None:\n np.random.set_state(self.random_state)\n self.run_sim(continue_sim=True)\n\n def _sim_step(self, i, current_population, non_zero_clones): # Overwrite\n return current_population, non_zero_clones\n\n def _finish_up(self):\n \"\"\"\n Some of the simulations may required some tidying up at the end,\n for example, removing unused rows in the arrays.\n :return:\n \"\"\"\n pass\n\n ##### Functions for storing population counts.\n def _record_results(self, i, current_population, non_zero_clones):\n \"\"\"\n Check if the current step is one of the sample points\n Record the results at the point the simulation is up to.\n Report progress if required\n :param i:\n :param current_population:\n :return:\n \"\"\"\n if i == self.sample_points[self.plot_idx]: # Regularly take a sample for the plot\n self._take_sample(current_population, non_zero_clones)\n\n if self.progress:\n if i % self.progress == 0:\n print(i, end=', ', flush=True)\n\n def _take_sample(self, current_population, non_zero_clones):\n if self.non_zero_calc:\n self.population_array[non_zero_clones, self.plot_idx] = current_population\n else:\n non_zero = np.where(current_population > 0)[0]\n self.population_array[non_zero, self.plot_idx] = current_population[non_zero]\n self.plot_idx += 1\n if self.tmp_store is not None: # Store current state of the simulation.\n if self.store_rotation == 0:\n self.pickle_dump(self.tmp_store)\n self.store_rotation = 1\n else:\n self.pickle_dump(self.tmp_store + '1')\n self.store_rotation = 0\n\n def pickle_dump(self, filename):\n self.random_state = np.random.get_state()\n with gzip.open(filename, 'wb') as f:\n pickle.dump(self, f, protocol=4)\n\n ##### Functions for changing treatment (changes clone fitness)\n def _check_treatment_time(self):\n if self.i >= self.next_treatment_time:\n return True\n return False\n\n def _change_treatment(self, initial=False):\n self.treatment_count += 1\n self.current_fitness_multiplier = self.treatment_effects[self.treatment_count]\n self.next_treatment_time = self.treatment_timings[self.treatment_count+1]\n if initial:\n if self.mutation_generator.multi_gene_array:\n self.clones_array[:self.initial_clones, self.fitness_idx] = self._apply_treatment(fitness_arrays=self.raw_fitness_array[:self.initial_clones])\n else:\n self.clones_array[:self.initial_clones, self.fitness_idx] = self._apply_treatment(fitness_values=self.raw_fitness_array[:self.initial_clones])\n else:\n if self.mutation_generator.multi_gene_array:\n self.clones_array[:, self.fitness_idx] = self._apply_treatment(fitness_arrays=self.raw_fitness_array)\n else:\n self.clones_array[:, self.fitness_idx] = self._apply_treatment(fitness_values=self.raw_fitness_array)\n\n def _apply_treatment(self, fitness_values=None, fitness_arrays=None):\n # Apply the treatment affects to an array of fitnesses\n # fitness_values is 1D array of overall fitness for each clone\n # fitness_arrays is a 2D array with one row per clone and one column per gene plus a column for wild type\n if self.mutation_generator.multi_gene_array:\n # Apply the treatment to the genes, then calculate the overall fitness.\n if not self.treatment_replace_fitness: # Multiply the fitness by the treatment effect\n adjusted_fitness_array = fitness_arrays * self.current_fitness_multiplier\n combined_fitness_array, _ = self.mutation_generator.combine_vectors(adjusted_fitness_array)\n else:\n # Replace the fitness with the new fitness effect\n adjusted_fitness_array = np.tile(self.current_fitness_multiplier, (len(fitness_arrays), 1))\n adjusted_fitness_array[np.isnan(fitness_arrays)] = np.nan # Leave all unmutated genes unmutated\n\n # Leave all untreated genes (nan multiplier) as they were.\n adjusted_fitness_array[:,\n np.isnan(self.current_fitness_multiplier)] = fitness_arrays[:,\n np.isnan(self.current_fitness_multiplier)]\n combined_fitness_array, _ = self.mutation_generator.combine_vectors(adjusted_fitness_array)\n return combined_fitness_array\n else:\n # Applies per clone.\n if not self.treatment_replace_fitness:\n # Multiply the overall fitness of each clone.\n return fitness_values.T * self.current_fitness_multiplier\n else:\n # Replace fitness per clone with the new fitness (if not nan).\n new_fitness_values = self.current_fitness_multiplier\n new_fitness_values[np.isnan(new_fitness_values)] = fitness_values[np.isnan(new_fitness_values), 0]\n return new_fitness_values\n\n ##### Functions for adding labelled clones (similar to lineage tracing experiments)\n def _check_label_time(self):\n if self.i >= self.next_label_time:\n return True\n return False\n\n def _add_label(self, current_population, non_zero_clones, label_frequency, label, label_fitness, label_gene):\n \"\"\"\n Add some labelling at the current label frequency.\n The labelling is not exact, so each cell has same chance.\n Use a Poisson distribution of events for each clone.\n \"\"\"\n # Random draw for each clone base on clone size\n labels_per_clone = np.random.binomial(current_population, label_frequency)\n assert not np.any(labels_per_clone - current_population > 0), (labels_per_clone, current_population)\n\n num_labels = np.sum(labels_per_clone)\n self._extend_arrays_fixed_amount(num_labels)\n\n # Add the new clones to the current population. Cells will be removed from the old clones below.\n current_population = np.concatenate([current_population, np.ones(num_labels, dtype=int)])\n non_zero_clones = np.concatenate([non_zero_clones,\n np.arange(self.next_mutation_index,\n self.next_mutation_index + num_labels)])\n\n for i, (c, n) in enumerate(zip(labels_per_clone, non_zero_clones)):\n for j in range(c):\n current_population[i] -= 1\n self._add_labelled_clone(n, label, label_fitness, label_gene)\n\n gr_z = np.where(current_population > 0)[0] # The indices of clones alive at this point in the current pop\n non_zero_clones = non_zero_clones[gr_z] # Convert to the original clone numbers\n current_population = current_population[gr_z] # Only keep the currently alive clones in current pop\n\n self.label_count += 1\n if len(self.label_times) > self.label_count:\n self.next_label_time = self.label_times[self.label_count]\n else:\n self.next_label_time = np.inf\n\n return current_population, non_zero_clones\n\n def _add_labelled_clone(self, parent_idx, label, label_fitness, label_gene):\n \"\"\"Select a fitness for the new mutation and the cell in which the mutation occurs\n parent_idx = the id of the clone in which the mutation occurs\n \"\"\"\n selected_clone = self.clones_array[parent_idx]\n old_fitness = selected_clone[self.fitness_idx]\n old_mutation_array = self.raw_fitness_array[parent_idx]\n new_fitness_array = old_mutation_array.copy()\n if label_gene is None:\n gene_mutated = -1 # Not a gene mutation. Any fitness change will be on wild type\n label_gene = 0\n else:\n gene_mutated = label_gene\n if label_fitness is not None: # Fitness will replace what went before for that gene/wild type\n new_fitness_array[label_gene] = label_fitness\n new_fitness, self.raw_fitness_array[self.next_mutation_index] \\\n = self.mutation_generator.combine_vectors(np.atleast_2d(new_fitness_array))\n else:\n new_fitness = old_fitness\n\n self.label_muts.add(self.next_mutation_index)\n\n # Add the new clone to the clone_array\n self.clones_array[self.next_mutation_index] = self.next_mutation_index, label, new_fitness, \\\n self.plot_idx, parent_idx, gene_mutated\n\n # Update ancestors and descendants. Note, all clones already have themselves as ancestor and descendant.\n self.tree.create_node(str(self.next_mutation_index), self.next_mutation_index, parent=parent_idx)\n\n # Update the mutation_array\n self.raw_fitness_array[self.next_mutation_index] = new_fitness_array\n\n self.next_mutation_index += 1\n\n def _extend_arrays_fixed_amount(self, extension):\n \"\"\"\n Add new rows to the population and clones arrays. For when the labels are added.\n \"\"\"\n s = self.population_array.shape[0]\n new_pop_array = lil_matrix((s + extension, self.sim_length))\n new_pop_array[:s] = self.population_array\n self.population_array = new_pop_array\n\n self.clones_array = np.concatenate([self.clones_array, np.zeros((extension, 6))], axis=0)\n\n self.raw_fitness_array = np.concatenate([self.raw_fitness_array,\n np.full((extension, self.raw_fitness_array.shape[1]), np.nan)],\n axis=0)\n\n ##### Functions for adding mutations\n def _draw_mutations_for_single_cell(self, parent_idxs):\n \"\"\"\n For the case where a single or multiple mutations are added to the same cell.\n If multiple, they must be added one at a time so that they combine fitness with each other correctly\n\n :param parent_idxs:\n :return:\n \"\"\"\n for p in parent_idxs:\n self._draw_multiple_mutations_and_add_to_array([p])\n\n def _draw_multiple_mutations_and_add_to_array(self, parent_idxs):\n \"\"\"Select a fitness for the new mutation and the cell in which the mutation occurs\n parent_idx = the id of the clone in which the mutation occurs\n\n For multiple mutations at once. Need the new mutation generator\n \"\"\"\n selected_clones = self.clones_array[parent_idxs]\n new_types = selected_clones[:, self.label_idx] # Are the new clones labelled or not\n old_fitnesses = selected_clones[:, self.fitness_idx]\n old_mutation_arrays = self.raw_fitness_array[parent_idxs]\n\n # Get a fitness value for the new clone.\n new_fitness_values, new_fitness_arrays, \\\n synonymous, genes_mutated = self.mutation_generator.get_new_fitnesses(old_fitnesses, old_mutation_arrays)\n\n mutation_indices = np.arange(self.next_mutation_index, self.next_mutation_index + len(parent_idxs))\n\n s = synonymous == 1\n ns = synonymous == 0\n self.s_muts.update(mutation_indices[s])\n self.ns_muts.update(mutation_indices[ns])\n\n # Add the new clone to the clone_array\n new_fitness_values = self._apply_treatment(new_fitness_values, new_fitness_arrays)\n new_array = np.array([mutation_indices, new_types, new_fitness_values,\n np.full(len(parent_idxs), self.plot_idx), parent_idxs, genes_mutated]).T\n self.clones_array[self.next_mutation_index:self.next_mutation_index + len(parent_idxs)] = new_array\n\n # Update clone tree\n for m, p in zip(mutation_indices, parent_idxs):\n self.tree.create_node(str(m), m, parent=p)\n\n # Update the mutation_array\n self.raw_fitness_array[self.next_mutation_index:self.next_mutation_index + len(parent_idxs)] = new_fitness_arrays\n\n self.next_mutation_index += len(parent_idxs)\n\n def _store_any_extras(self, new_growth_rate, synonymous, gene_mutated, parent_idx):\n # A function to be used if more information needs storing after a mutation\n pass\n\n ############ Functions for post-processing simulations ############\n def change_sparse_to_csr(self):\n # Converts to a different type of sparse matrix.\n # Required for some of the post-processing and plotting functions.\n if self.is_lil:\n self.population_array = self.population_array.tocsr() # Convert to CSR matrix\n self.is_lil = False\n\n def _convert_time_to_index(self, t, nearest=True):\n if nearest: # Find nearest point to the time of interest\n return self._find_nearest(t)\n else: # Find the index at or just before the time of interest\n i = bisect.bisect_right(self._search_times, t)\n if i:\n return i - 1\n raise ValueError\n\n def _find_nearest(self, t):\n # From stackoverflow, Demitri, https://stackoverflow.com/a/26026189\n array = self.times\n idx = np.searchsorted(array, t, side=\"left\")\n if idx > 0 and (idx == len(array) or math.fabs(t - array[idx - 1]) < math.fabs(t - array[idx])):\n return idx - 1\n else:\n return idx\n\n def get_clone_sizes_array_for_non_mutation(self, t=None, index_given=False, label=None, exclude_zeros=True):\n \"\"\"\n Gets array of all clone sizes.\n Clones here are defined by a unique set of mutations, not per mutation.\n Therefore this is only really suitable for a simulation without mutations, where we want to track the sizes of\n a number of initial clones.\n :param t: time or index of the sample to get the distribution for.\n :param index_given: True if t is the index\n :return:\n \"\"\"\n if self.is_lil:\n self.change_sparse_to_csr()\n\n if t is None:\n index_given = True\n t = -1\n if not index_given:\n i = self._convert_time_to_index(t)\n else:\n i = t\n if label is not None:\n clones_to_select = np.where(self.clones_array[:, self.label_idx] == label)\n clones = self.population_array[clones_to_select, i]\n else:\n clones = self.population_array[:, i]\n\n clones = clones.toarray().astype(int).flatten() # Must convert to 1D array to use bincount\n if exclude_zeros:\n clones = clones[clones > 0]\n\n return clones\n\n def get_clone_size_distribution_for_non_mutation(self, t=None, index_given=False, label=None, exclude_zeros=False):\n \"\"\"\n Gets the clone size frequencies. Not normalised.\n Clones here are defined by a unique set of mutations, not per mutation.\n Therefore this is only really suitable for a simulation without mutations, where we want to track the sizes of\n a number of initial clones.\n :param t: time or index of the sample to get the distribution for.\n :param index_given: True if t is the index\n :return:\n \"\"\"\n clones = self.get_clone_sizes_array_for_non_mutation(t=t, index_given=index_given, label=label,\n exclude_zeros=exclude_zeros)\n counts = np.bincount(clones)\n return counts\n\n def get_surviving_clones_for_non_mutation(self, times=None, label=None):\n \"\"\"\n Follows the surviving clones based on of each row in the clone array. This is a clone defined by a unique set of\n mutations, not be a particular mutation.\n Therefore, this function is only suitable for tracking the progress of clones growing without any mutations.\n For comparing to single progenitor model in lineage tracing experiments.\n \"\"\"\n if times is None:\n times = self.times\n surviving_clones = []\n if label is not None:\n clones_to_select = np.where(self.clones_array[:, self.label_idx] == label)\n pop_array = self.population_array[clones_to_select]\n else:\n pop_array = self.population_array\n for t in times:\n idx = self._convert_time_to_index(t)\n surviving_clones.append(pop_array[:, idx].count_nonzero())\n\n return surviving_clones, times\n\n def get_clone_ancestors(self, clone_idx):\n return [n for n in self.tree.rsearch(clone_idx)]\n\n def get_clone_descendants(self, clone_idx):\n return list(self.tree.subtree(clone_idx).nodes.keys()) # Might be better way to do this\n\n def _trim_tree(self):\n # Some clones may have appeared and died between sampling points.\n # These won't affect the results but can slow down the processing\n # Make new tree just from sampled clones.\n\n if self.trimmed_tree is None:\n\n non_zero_sampled_clones = np.unique((self.population_array.nonzero()[0]))\n sampled_clones_set = set()\n for clone in non_zero_sampled_clones[::-1]:\n if clone not in sampled_clones_set:\n ancestors = self.get_clone_ancestors(clone)\n sampled_clones_set.update(ancestors)\n\n sampled_clones_set.remove(-1)\n self.trimmed_tree = Tree()\n self.trimmed_tree.create_node(\"-1\", -1)\n self.sampled_clones = sorted(sampled_clones_set)\n for n in sorted(self.sampled_clones): # For every clone that is alive at a sampling time\n for n2 in self.tree.rsearch(n): # Find the first ancestor that was sampled. This is the new parent.\n if n != n2 and (n2 == -1 or n2 in sampled_clones_set):\n self.trimmed_tree.create_node(str(n), n, parent=n2)\n break\n\n def _get_clone_descendants_trimmed(self, clone_idx):\n \"\"\"Must run trim tree first\"\"\"\n return list(self.trimmed_tree.subtree(clone_idx).nodes.keys())\n\n def track_mutations(self, selection='all'):\n \"\"\"\n Get a dictionary of the clones which contain each mutation.\n :param selection: 'all', 'ns', 's'. All/non-synonymous only/synonymous only.\n :return: Dict. Key: mutation id (id of first clone which contains the mutation),\n value: set of clone ids which contain that mutation\n \"\"\"\n if selection == 's':\n mutant_clones = {k: self.get_clone_descendants(k) for k in self.s_muts}\n elif selection == 'ns':\n mutant_clones = {k: self.get_clone_descendants(k) for k in self.ns_muts}\n elif selection == 'all':\n mutant_clones = {k: self.get_clone_descendants(k) for k in range(len(self.clones_array))}\n elif selection == 'label':\n mutant_clones = {k: self.get_clone_descendants(k) for k in self.label_muts}\n elif selection == 'mutations':\n mutant_clones = {k: self.get_clone_descendants(k) for k in self.ns_muts.union(self.s_muts)}\n elif selection == 'non_zero':\n self._trim_tree()\n mutant_clones = {k: self._get_clone_descendants_trimmed(k) for k in self.sampled_clones}\n else:\n print(\"Please select from 'all', 's', 'ns', 'label', 'mutations' or 'non_zero'\")\n raise ValueError(\"Please select from 'all', 's', 'ns', 'label', 'mutations' or 'non_zero'\")\n\n return mutant_clones\n\n def _create_mutant_clone_array(self):\n \"\"\"\n Create an array with the clone sizes for each mutant across the entire simulation.\n The populations will usually add up to more than the total since many clones will have multiple mutations\n \"\"\"\n mutant_clones = self.track_mutations(selection='non_zero')\n self.mutant_clone_array = lil_matrix(self.population_array.shape)\n for mutant in mutant_clones:\n self.mutant_clone_array[mutant] = self.population_array[mutant_clones[mutant]].sum(axis=0)\n\n def get_idx_of_gene_mutated(self, gene_mutated):\n \"\"\"\n Returns a set of all clones with gene_mutated given\n \"\"\"\n gene_num = self.mutation_generator.get_gene_number(gene_mutated)\n return set(np.where(self.clones_array[:, self.gene_mutated_idx] == gene_num)[0])\n\n def get_mutant_clone_sizes(self, t=None, selection='mutations', index_given=False, gene_mutated=None, non_zero_only=False):\n \"\"\"\n Get an array of mutant clone sizes at a particular time\n WARNING: This may not work exactly as expected if there were multiple initial clones!\n :param t: time/sample index\n :param selection: 'all', 'ns', 's'. All/non-synonymous only/synonymous only.\n :param index_given: True if t is an index of the sample, False if t is a time.\n :param gene_mutated: Gene name. Only return clone sizes for a particular additional label.\n For example to only get mutations for a single gene.\n :param non_zero_only: Only return mutants with a positive cell count.\n :return: np.array of ints\n \"\"\"\n if t is None:\n t = self.max_time\n index_given = False\n if not index_given:\n i = self._convert_time_to_index(t)\n else:\n i = t\n if self.mutant_clone_array is None:\n # If the mutant clone array has not been created yet, create it.\n self._create_mutant_clone_array()\n # We now find all rows in the mutant clone array that we want to keep\n if selection == 'all':\n muts = set(range(self.initial_clones, len(self.clones_array))) # Get all rows except the initial clones\n elif selection == 'mutations':\n muts = self.ns_muts.union(self.s_muts)\n elif selection == 'ns':\n muts = self.ns_muts\n elif selection == 's':\n muts = self.s_muts\n elif selection == 'label':\n muts = self.label_muts\n if gene_mutated is not None:\n muts = list(muts.intersection(self.get_idx_of_gene_mutated(gene_mutated)))\n else:\n muts = list(muts)\n\n mutant_clones = self.mutant_clone_array[muts][:, i].toarray().astype(int).flatten()\n\n if non_zero_only:\n return mutant_clones[mutant_clones > 0]\n else:\n return mutant_clones\n\n def get_mutant_clone_size_distribution(self, t=None, selection='mutations', index_given=False, gene_mutated=None):\n \"\"\"\n Get the frequencies of mutant clone sizes. Not normalised.\n :param t: time/sample index\n :param selection: 'mutations', 'ns', 's'. All/non-synonymous only/synonymous only.\n :param index_given: True if t is an index of the sample, False if t is a time.\n :param gene_mutated: Int. Only return clone sizes for a particular additional label.\n For example to only get mutations for a single gene.\n :return: np.array of ints.\n \"\"\"\n if t is None:\n t = self.max_time\n index_given = False\n if not index_given:\n i = self._convert_time_to_index(t)\n else:\n i = t\n if selection == 'mutations':\n if self.ns_muts and not self.s_muts:\n selection = 'ns'\n elif self.s_muts and not self.ns_muts:\n selection = 's'\n elif not self.s_muts and not self.ns_muts:\n print('No mutations at all')\n return None\n elif selection == 'ns' and not self.ns_muts:\n print('No non-synonymous mutations')\n return None\n elif selection == 's' and not self.s_muts:\n print('No synonymous mutations')\n return None\n\n clones = self.get_mutant_clone_sizes(i, selection=selection, index_given=True,\n gene_mutated=gene_mutated)\n\n counts = np.bincount(clones)\n return counts\n\n def get_dnds(self, t=None, min_size=1, gene=None):\n \"\"\"\n Returns the dN/dS at a particular time.\n :param t: Time. If None, will be the end of the simulation.\n :param min_size: Int. The minimum size of clones to include.\n :param gene: Int. The type of the mutation. E.g. For getting dN/dS for a particular gene.\n :return:\n \"\"\"\n if t is None:\n t = self.max_time\n ns_mut = self.get_mutant_clone_sizes(t, selection='ns', gene_mutated=gene)\n s_mut = self.get_mutant_clone_sizes(t, selection='s', gene_mutated=gene)\n ns_mut_measured = ns_mut[ns_mut >= min_size]\n total_ns = len(ns_mut_measured)\n s_mut_measured = s_mut[s_mut >= min_size]\n total_s = len(s_mut_measured)\n\n gene_num = self.mutation_generator.get_gene_number(gene) # If gene is None, will get the overall ns\n expected_ns = total_s * (1 / self.mutation_generator.get_synonymous_proportion(gene_num) - 1)\n try:\n dnds = total_ns / expected_ns\n return dnds\n except ZeroDivisionError as e:\n return np.nan\n\n def get_labeled_population(self, label=None):\n \"\"\"\n If label is None, will return the total population (not interesting for the fixed population models)\n :param label:\n :return: Array of population at all time points\n \"\"\"\n if label is not None:\n clones_to_select = np.where(self.clones_array[:, self.label_idx] == label)\n pop = self.population_array[clones_to_select]\n else:\n pop = self.population_array\n\n return pop.toarray().sum(axis=0)\n\n def get_mean_clone_size(self, t=None, selection='mutations', index_given=False, gene_mutated=None):\n clone_sizes = self.get_mutant_clone_sizes(t=t, selection=selection, index_given=index_given,\n gene_mutated=gene_mutated)\n mean_ = clone_sizes[clone_sizes > 0].mean()\n return mean_\n\n def get_mean_clone_sizes_syn_and_non_syn(self, t=None, index_given=False, gene_mutated=None):\n\n mean_syn = self.get_mean_clone_size(t=t, selection='s', index_given=index_given, gene_mutated=gene_mutated)\n mean_non_syn = self.get_mean_clone_size(t=t, selection='ns', index_given=index_given, gene_mutated=gene_mutated)\n\n return mean_syn, mean_non_syn\n\n def get_average_fitness(self, t=None):\n self.change_sparse_to_csr()\n if t is None:\n idx = -1\n else:\n idx = self._convert_time_to_index(t)\n\n fitnesses = self.clones_array[:, self.fitness_idx]\n weights = np.squeeze(self.population_array[:, idx].toarray()) * fitnesses\n global_average_fitness = weights.sum() / self.population_array[:, idx].sum()\n return global_average_fitness\n\n ############ Plotting functions ############\n def _get_colour(self, scaled_fitness, clone_label, ns, initial, last_mutated_gene, genes_mutated):\n \"\"\"\n Gets the colour for a clone to be plotted in.\n The colour will depend on the chosen colourscale and the attributes of the clone.\n :param scaled_fitness: The growth_rate/fitness of the clone\n :param clone_label: The type of the clone\n :param ns: Whether the (last) mutation is non-synonymous\n :param initial: Whether the clone is one from the start of the simuation (True), or whether it was created by\n a mutation during the simulation (False)\n :param last_mutated_gene:\n :param genes_mutated: all genes mutated in the clone\n :return:\n \"\"\"\n if self.colourscales is not None:\n return self.colourscales._get_colour(scaled_fitness, clone_label, ns, initial, last_mutated_gene,\n genes_mutated)\n else:\n return cm.YlOrBr(scaled_fitness / 2) # Range of yellow/brown/orange\n\n def _get_colours(self, clones_array, force_regenerate=False):\n # Generate the colours for the clones plot. Colour depends on type (wild type/A), relative fitness and s/ns\n if not self.colours or force_regenerate:\n rates = clones_array[:, self.fitness_idx]\n min_ = rates.min() - 0.1\n max_ = rates.max()\n self.colours = {}\n for i, clone in enumerate(clones_array):\n scaled_fitness = (clone[self.fitness_idx] - min_) / (max_ - min_)\n if clone[self.id_idx] in self.ns_muts:\n ns = True\n else:\n ns = False\n if clone[self.generation_born_idx] == 0:\n initial = True\n else:\n initial = False\n self.colours[clone[self.id_idx]] = self._get_colour(scaled_fitness, clone[self.label_idx], ns, initial,\n clone[self.gene_mutated_idx],\n tuple(np.where(~np.isnan(self.raw_fitness_array[i]))[0]))\n\n def muller_plot(self, plot_file=None, plot_against_time=True, quick=False, min_size=1,\n allow_y_extension=False, plot_order=None, figsize=None, force_new_colours=False, ax=None):\n \"\"\"\n Plots the results of the simulation over time.\n Mutations marked with X\n The clones will appear as growing and shrinking sideways tear drops.\n Sub-clones emerge from their parent clones\n :param plot_file: File name to save the plot. If none, the plot will be displayed.\n If a file name, include the file type, e.g. \"output_plot.pdf\"\n :param prepare_only: Bool. Does not display plot.\n Runs some of the function to prepare data for plotting with the animation (although not implemented yet).\n :param plot_against_time: Bool. Use the time from the simulation instead of index of the sample for x-axis\n :param quick: Bool. Runs a faster version of the plotting which looks worse\n :param min_size: 0<= Float <= 1. Show only clones which reach this proportion of the total population.\n Showing less clones speeds up the plotting and can make the plot clearer.\n :param allow_y_extension: If the population is not constant, allows the y-axis to extend beyond the initial pop\n :return: None\n \"\"\"\n if self.is_lil:\n self.change_sparse_to_csr()\n\n self._get_colours(self.clones_array, force_new_colours) # Get the colours used for the plots\n\n if min_size > 0: # Have to keep as >0 as some algorithms (e.g. relative fitness may have fractional counts)\n # Removes clones too small to plot by absorbing them into their parent clones\n clone_array, populations = self._absorb_small_clones(min_size)\n else:\n clone_array, populations = self.clones_array, self.population_array\n\n # Break up the populations so subclones appear from their parent clone\n split_pops_for_plotting, plot_order = self._split_populations_for_muller_plot(clone_array,\n populations, plot_order)\n if ax is None:\n if figsize is None:\n figsize = self.figsize\n fig, ax = plt.subplots(figsize=figsize)\n\n if quick:\n self._make_quick_stackplot(ax, split_pops_for_plotting, plot_order, plot_against_time)\n else:\n cumulative_array = np.cumsum(split_pops_for_plotting, axis=0)\n self._make_stackplot(ax, cumulative_array, plot_order, plot_against_time) # Add the clone populations to the plots\n\n # Add the clone births to the plot as X's\n x = []\n y = []\n c = []\n for clone in clone_array:\n gen = clone[self.generation_born_idx]\n if gen > 0:\n plot_gen = int(gen - 1) # Puts the mutation mark so it appears at the start of the clone region\n pops = np.where(plot_order == clone[self.id_idx])[0][0]\n if plot_against_time:\n x.append(self.times[int(plot_gen)])\n else:\n x.append(plot_gen)\n y.append(split_pops_for_plotting[:pops][:, plot_gen].sum())\n if clone[self.id_idx] in self.ns_muts:\n c.append('r') # Plot non-synonymous mutations with a red X\n elif clone[self.id_idx] in self.s_muts:\n c.append('b') # Plot synonymous mutations with a blue X\n else:\n c.append('k') # Plot a labelling event with a black X\n\n ax.scatter(x, y, c=c, marker='x')\n if allow_y_extension:\n plt.gca().set_ylim(bottom=0)\n else:\n plt.ylim([0, self.total_pop])\n if plot_against_time:\n plt.xlim([0, self.max_time])\n else:\n plt.xlim([0, self.sim_length - 1])\n\n if plot_file:\n plt.savefig('{0}'.format(plot_file))\n\n return ax\n\n def _absorb_small_clones(self, min_size=1):\n \"\"\"Creates a new clones_array and population_array removing clones that never get larger than the\n minimum proportion min_prop.\n Clones which are too small are absorbed into their parent clone so the total population remains the same.\n \"\"\"\n clones_to_remove = set()\n new_pop_array = self.population_array.copy()\n parent_set = Counter(self.clones_array[:, self.parent_idx])\n for i in range(len(self.clones_array) - 1, -1, -1): # Start from the youngest clones\n if new_pop_array[i].max() < min_size: # If smaller than minimum proportion\n if parent_set[i] == 0: # If clone does not have any large descendants.\n parent = int(self.clones_array[i, self.parent_idx]) # Find the parent of this small clone\n new_pop_array[parent] += new_pop_array[i] # Add the population of the small clone to the parent\n new_pop_array[i] = 0 # Remove the population of the small clone\n clones_to_remove.add(i)\n parent_set[parent] -= 1\n clones_to_keep = sorted(set(range(len(self.clones_array))).difference(clones_to_remove))\n return self.clones_array[clones_to_keep], new_pop_array[clones_to_keep]\n\n def _get_children(self, clones_array, idx):\n # Return the ids of immediate subclones of the given clone idx\n return clones_array[clones_array[:, self.parent_idx] == idx][:, self.id_idx]\n\n def _get_descendants_for_muller_plot(self, clones_array, idx, order):\n # Find the subclones of the given clone. Runs iteratively until found all descendants\n # Adds clone ids to order list.\n # order will be used to make the stackplot so that the subclones appear from their parent clone\n # Uses the clones array rather than the tree since it may be filtered to remove small clones.\n order.append(idx)\n children = self._get_children(clones_array, idx) # Immediate subclones of the clone idx\n np.random.shuffle(children)\n self.descendant_counts[idx] = len(children)\n for ch in children: # Find the subclones of the subclones.\n if ch != idx:\n self._get_descendants_for_muller_plot(clones_array, ch, order)\n order.append(idx)\n\n def _split_populations_for_muller_plot(self, clones_array, population_array, plot_order=None):\n # Breaks up the populations so subclones appear from their parent clone\n\n original_clones = clones_array[clones_array[:, self.parent_idx] == -1]\n\n # Will put labelled clones together if plot_order given or if the labels are in the original clones.\n if plot_order is None:\n all_types = np.unique(original_clones[:, self.label_idx])\n else:\n all_types = plot_order\n\n orders = []\n for t in all_types:\n order_t = []\n originators = original_clones[original_clones[:, self.label_idx] == t]\n for orig in originators[:, self.id_idx]:\n self._get_descendants_for_muller_plot(clones_array, orig, order_t)\n orders.append(order_t)\n\n split_pops_for_plotting = np.concatenate([np.concatenate([\n population_array[clones_array[:, self.id_idx] == o].toarray() / (self.descendant_counts[o] + 1) \\\n for o in order]) for order in orders], axis=0)\n\n plot_order = list(itertools.chain.from_iterable(orders))\n return split_pops_for_plotting, plot_order\n\n def _make_stackplot(self, ax, cumulative_array, plot_order, plot_against_time=True):\n # Make the stackplot using fill between. Prevents gaps in the plot that appear with using matplotlib stackplot\n for i in range(len(plot_order) - 1, -1, -1): # Start from the end/top\n colour = self.colours[plot_order[i]]\n array = cumulative_array[i]\n if i > 0:\n next_array = cumulative_array[i - 1]\n else:\n next_array = 0\n\n if plot_against_time:\n x = self.times\n else:\n x = list(range(self.sim_length))\n ax.fill_between(x, array, 0, where=array > next_array, facecolor=colour,\n interpolate=True, linewidth=0) # Fill all the way from the top of the clone to the x-axis\n\n def _make_quick_stackplot(self, ax, split_pops_for_plotting, plot_order, plot_against_time=True):\n # Make the stackplot using matplotlib stackplot\n if plot_against_time:\n x = self.times\n else:\n x = list(range(self.sim_length))\n ax.stackplot(x, split_pops_for_plotting, colors=[self.colours[i] for i in plot_order])\n\n def plot_incomplete_moment(self, t=None, selection='mutations', xlim=None, ylim=None, plt_file=None, sem=False,\n show_fit=False, show_legend=True, fit_prop=1,\n min_size=1, errorevery=1, clear_previous=True, show_plot=False, max_size=None,\n fit_style='m--', label='InMo', ax=None):\n \"\"\"\n Plots the incomplete moment\n :param t: The time to plot the incomplete moment for. If None, will use the end of the simulation\n :param selection: 'mutations', 'ns' or 's' for all mutations, non-synonymous only or synonymous only\n :param xlim: Tuple/list for the x-limits of the plot\n :param ylim: Tuple/list for the y-limits of the plot\n :param plt_file: File to output the plot - include the file type e.g. incom_plot.pdf.\n :param sem: Will display the SEM on the plot\n :param show_fit: Adds a straight line fit to the log plot. The intercept will be fixed at the (min_size, 1).\n Will be fitted to a proportion of the data specified by fit_prop\n :param show_legend: Shows a legend with the R^2 coefficient of the straight line fit.\n :param fit_prop: The proportion of the data to fit the straight line on.\n Starts from the smallest included sizes. Will be the clone sizes that together contain fit_prop proportion\n of the clones.\n :param min_size: The smallest clone size to include. All smaller clones will be ignored.\n :param errorevery: If showing the SEM, will only show the errorbar every errorevery points.\n :param clear_previous: If wanting to show more on the same plot, set to false and plot the other traces\n before running this function\n :param show_plot: If needing to show the plot rather than adding more traces after\n :return:\n \"\"\"\n if t is None:\n t = self.max_time\n clone_size_dist = self.get_mutant_clone_size_distribution(t, selection)\n if clone_size_dist is not None:\n if min_size > 0:\n clone_size_dist[:min_size] = 0\n if max_size is not None:\n clone_size_dist = clone_size_dist[:max_size + 1]\n incom = incomplete_moment(clone_size_dist)\n if clear_previous and ax is None:\n plt.close('all')\n fig, ax = plt.subplots()\n if incom is not None:\n add_incom_to_plot(incom, clone_size_dist, sem=sem, show_fit=show_fit, fit_prop=fit_prop,\n min_size=min_size, label=label, errorevery=errorevery, fit_style=fit_style, ax=ax)\n\n ax.set_yscale(\"log\")\n if xlim is not None:\n ax.xlim(xlim)\n if ylim is not None:\n ax.ylim(ylim)\n if show_legend:\n ax.legend()\n\n ax.set_xlabel('Clone size (cells)')\n ax.set_ylabel('First incomplete moment')\n\n if plt_file is not None:\n plt.savefig('{0}'.format(plt_file))\n elif show_plot:\n plt.show()\n\n def _expected_incomplete_moment(self, t, max_n):\n \"\"\"The expected incomplete moment if the simulation is neutral and all clones are measured accurately\"\"\"\n return np.exp(-np.arange(1, max_n + 1) / (self.division_rate * t))\n\n def plot_dnds(self, plt_file=None, min_size=1, gene=None, clear_previous=True, legend_label=None, ax=None):\n if clear_previous and ax is None:\n plt.close('all')\n fig, ax = plt.subplots()\n elif ax is None:\n ax = plt.gca()\n dndss = [self.get_dnds(t, min_size, gene) for t in self.times]\n ax.plot(self.times, dndss, label=legend_label)\n if plt_file is not None:\n plt.savefig('{0}'.format(plt_file))\n\n def plot_overall_population(self, label=None, legend_label=None, ax=None):\n \"\"\"\n With no label, plots for simulations without a fixed total population\n (will also run for the fixed population, but will not be interesting)\n\n With a label, will plot the labelled population\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n pop = self.get_labeled_population(label=label)\n ax.plot(self.times, pop, label=legend_label)\n\n def plot_average_fitness_over_time(self, legend_label=None, ax=None):\n if ax is None:\n fig, ax = plt.subplots()\n avg_fit = [self.get_average_fitness(t) for t in self.times]\n ax.plot(self.times, avg_fit, label=legend_label)\n\n def animate(self, animation_file, grid_size=None, generations_per_frame=1, starting_clones=1,\n figsize=None, figxsize=5, bitrate=500, min_prop=0, external_call=False, dpi=100, fps=5,\n fitness=False, fitness_cmap=cm.Reds, fixed_label_text=None, fixed_label_loc=(0, 0),\n fixed_label_kwargs=None, show_time_label=False, time_label_units=None,\n time_label_decimal_places=0, time_label_loc=(0, 0), time_label_kwargs=None):\n \"\"\"\n Output an animation of the simulation on a 2D grid\n :param external_call: Only for 2D grids. Will run a version which is cruder and may run faster\n \"\"\"\n if self.is_lil:\n self.change_sparse_to_csr()\n\n if self.parameters.algorithm in self.parameters.spatial_algorithms:\n if fitness:\n animator = HexFitnessAnimator(self, cmap=fitness_cmap, figxsize=figxsize, figsize=figsize, dpi=dpi,\n bitrate=bitrate, fps=fps)\n else:\n animator = HexAnimator(self, figxsize=figxsize, figsize=figsize, dpi=dpi, bitrate=bitrate,\n fps=fps, external_call=external_call, fixed_label_text=fixed_label_text,\n fixed_label_loc=fixed_label_loc, fixed_label_kwargs=fixed_label_kwargs,\n show_time_label=show_time_label, time_label_units=time_label_units,\n time_label_decimal_places=time_label_decimal_places,\n time_label_loc=time_label_loc, time_label_kwargs=time_label_kwargs)\n\n else:\n if fitness:\n print('Cannot currently animate fitness for non-spatial simulations')\n animator = NonSpatialToGridAnimator(self, grid_size=grid_size, generations_per_frame=generations_per_frame,\n starting_clones=starting_clones, figsize=figsize, bitrate=bitrate, min_prop=min_prop,\n dpi=dpi, fps=fps)\n\n animator.animate(animation_file)\n\n ## Plots for lineage tracing experiments\n # These assume no mutations occurred during the simulation,\n # but all mutations (or labelled clones) are induced at the start.\n def plot_mean_clone_size_graph_for_non_mutation(self, times=None, label=None, plot_fit=True, fit_rate=None,\n legend_label=None, legend_label_fit=None, ax=None):\n \"\"\"\n Follows the mean clone sizes of each row in the clone array. This is a clone defined by a unique set of\n mutations, not be a particular mutation.\n Therefore, this function is only suitable for tracking the progress of clones growing without any mutations.\n For comparing to single progenitor model in lineage tracing experiments.\n \"\"\"\n if times is None:\n times = self.times\n\n means = []\n for t in times:\n means.append(mean_clone_size(self.get_clone_size_distribution_for_non_mutation(t, label=label)))\n\n if ax is None:\n fig, ax = plt.subplots()\n if plot_fit:\n if fit_rate is None:\n fit_rate = self.division_rate\n # Plot the theoretical mean clone size from the single progenitor model\n ax.plot(times, mean_clone_size_fit(times, fit_rate), 'r--', label=legend_label_fit)\n ax.set_xlabel('Time')\n ax.set_ylabel('Mean clone size of surviving clones')\n ax.scatter(times, means, label=legend_label)\n\n def plot_surviving_clones_for_non_mutation(self, times=None, ax=None, label=None, show_spm_fit=False):\n \"\"\"\n Follows the surviving clones based on of each row in the clone array. This is a clone defined by a unique set of\n mutations, not be a particular mutation.\n Therefore, this function is only suitable for tracking the progress of clones growing without any mutations.\n For comparing to single progenitor model in lineage tracing experiments.\n \"\"\"\n surviving_clones, times = self.get_surviving_clones_for_non_mutation(times=times, label=label)\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # Plot the theoretical number of surviving clones from the single progenitor model\n if show_spm_fit: # Assumes the Moran model. Timing will be wrong for the WF models.\n ax.plot(times, surviving_clones_fit(times, self.division_rate,\n self.get_surviving_clones_for_non_mutation(times=[0], label=label)),\n 'r--')\n ax.scatter(times, surviving_clones)\n ax.set_xlabel('Time')\n ax.set_ylabel('Surviving clones')\n ax.set_yscale(\"log\")\n\n def plot_clone_size_distribution_for_non_mutation(self, t=None, label=None, legend_label=None, ax=None,\n as_bar=False):\n \"\"\"\n Plots the clone size distribution, with the clones defined by the clones_array - i.e. not one clone per\n mutation, one clone per unique set of mutations.\n WARNING - Only really suitable for the case of no mutations, where we want to track the growth of a number of\n initial clones over time.\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n if t is None:\n t = self.max_time\n csd = self.get_clone_size_distribution_for_non_mutation(t, label=label)\n csd = csd / csd[1:].sum()\n if as_bar:\n ax.bar(range(1, len(csd)), csd[1:], label=legend_label)\n else:\n ax.scatter(range(1, len(csd)), csd[1:], label=legend_label)\n ax.set_ylim([0, csd[1:].max() * 1.1])\n\n def plot_clone_size_scaling_for_non_mutation(self, times, markersize=2, label=None, legend_label=\"\", ax=None):\n \"\"\"Mostly useful for simulations without any mutations. For comparing to single progenitor model.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n for t in times:\n csd = self.get_clone_size_distribution_for_non_mutation(t, label=label)\n mean_ = mean_clone_size(csd)\n csd = csd / csd[1:].sum()\n revcumsum = np.cumsum(csd[::-1])[::-1]\n x = np.arange(1, len(csd)) / mean_\n ax.scatter(x, revcumsum[1:], alpha=0.5, s=markersize, label=legend_label + str(t))\n ax.legend()\n\n\ndef pickle_load(filename, change_sparse_to_csr=True):\n \"\"\"\n Load a simulation from a gzipped pickle\n :param filename:\n :return:\n \"\"\"\n with gzip.open(filename, 'rb') as f:\n sim = pickle.load(f)\n\n if change_sparse_to_csr:\n sim.change_sparse_to_csr()\n\n return sim\n", "# Functions/classes to calculate the fitness of clones from the random mutation fitnesses\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n\n# Probability distributions for drawing the fitness of new mutations.\n# Set up so can be called like functions without argument, but can print the attributes\nclass NormalDist(object):\n \"\"\"Will draw again if the value is below zero.\"\"\"\n def __init__(self, var, mean=1.):\n self.var = var\n self.mean = mean\n\n def __str__(self):\n return 'Normal distribution(mean {0}, variance {1})'.format(self.mean, self.var)\n\n def __call__(self):\n g = np.random.normal(self.mean, self.var)\n if g < 0:\n # print('growth rate below zero! Redrawing a new rate')\n return self()\n return g\n\n def get_mean(self):\n return self.mean\n\n\nclass FixedValue(object):\n def __init__(self, value):\n self.mean = value\n\n def __str__(self):\n return 'Fixed value {0}'.format(self.mean)\n\n def __call__(self):\n return self.mean\n\n def get_mean(self):\n return self.mean\n\n\nclass ExponentialDist(object):\n def __init__(self, mean, offset=1):\n self.mean = mean\n self.offset = offset # Offset of 1 means the mutations will start from neutral.\n\n def __str__(self):\n return 'Exponential distribution(mean {0}, offset {1})'.format(self.mean, self.offset)\n\n def __call__(self):\n g = self.offset + np.random.exponential(self.mean - self.offset)\n return g\n\n def get_mean(self):\n return self.mean\n\n\nclass UniformDist(object):\n def __init__(self, low, high):\n self.low = low\n self.high = high\n\n def __str__(self):\n return 'Uniform distribution(low {0}, high {1})'.format(self.low, self.high)\n\n def __call__(self):\n g = np.random.uniform(self.low, self.high)\n return g\n\n def get_mean(self):\n return (self.high + self.low)/2\n\n\n##################\n# Classes for diminishing returns or other transformations of the raw fitness\nclass UnboundedFitness:\n # No bound or transformation on the fitness.\n def __str__(self):\n return 'UnboundedFitness'\n\n def fitness(self, x):\n return x\n\n def inverse(self, x):\n return x\n\n\nclass BoundedLogisticFitness:\n \"\"\"\n The effect of new (beneficial) mutations tails off as the clone gets stronger.\n There is a maximum fitness.\n \"\"\"\n def __init__(self, a, b=math.exp(1)):\n \"\"\"\n fitness = a/(1+c*b**(-x)) where x is the product of all mutation effects\n c is picked so that fitness(1) = 1\n \"\"\"\n assert (a > 1)\n assert (b > 1)\n self.a = a\n self.b = b\n self.c = (a - 1) * self.b\n\n def __str__(self):\n return 'Bounded Logistic: a {0}, b {1}, c {2}'.format(self.a, self.b, self.c)\n\n def fitness(self, x):\n return self.a / (1 + self.c * (self.b ** (-x)))\n\n def inverse(self, y):\n return math.log(self.c / (self.a / y - 1), self.b)\n\n\n################\n# Class for storing information about a gene\nclass Gene(object):\n \"\"\"Determines the mutations which are create for a gene\"\"\"\n\n def __init__(self, name, mutation_distribution, synonymous_proportion, weight=1.):\n self.name = name\n self.mutation_distribution = mutation_distribution\n self.synonymous_proportion = synonymous_proportion\n self.weight = weight\n\n def __str__(self):\n return \"Gene. Name: {0}, MutDist: {1}, SynProp: {2}, Weight: {3}\".format(self.name,\n self.mutation_distribution.__str__(),\n self.synonymous_proportion,\n self.weight)\n\n\n##################\n# Class to put it all together\n\nclass MutationGenerator(object):\n \"\"\"\n This class determines the effects of mutations and how they are combined to define the fitness of a clone.\n\n New mutations are drawn at random from the given genes\n Each gene in the model can have a different mutation fitness distribution and synonymous proportion.\n - If multi_gene_array=False, the effects of mutations are simply combined according to the combine_mutations option\n - If multi_gene_array=True, the effects of mutations within each gene are calculated first according to the\n combine_mutations option, then the effects of each gene are combined using the combine_array option.\n This is useful for cases such as where a second mutation in a gene will not have a further fitness effect.\n\n combine_mutations options:\n multiply - multiplies the fitness effect of all mutations in a gene to get a new fitness\n add - multiplies the fitness effect of all mutations in a gene to get a new fitness\n replace - a new mutation will define the fitness of a gene and any previous effects are ignored\n max - the new gene fitness is the max of the old and the new\n min - the new gene fitness is the min of the old and the new\n\n combine_array options (only if multi_gene_array=True):\n multiply - multiplies the fitness effects on each gene to get a new fitness for the cell\n add - adds the fitness effects on each gene to get a new fitness for the cell\n max - the cell fitness is given by the gene with the highest fitness\n min - the cell fitness is given by the gene with the minimum fitness\n priority - the cell fitness is given by the last gene in the list given\n (or by the last epistatic effect in any are used)\n\n epistatics can be used to define more complex relationships between genes.\n This is a list of epistatic relationships which define a set of genes and the distribution of fitness effects if\n all of those genes are mutated (which replaces the fitness effects of those individual genes).\n Those epistatic effects are then combined (along with any genes not in a triggered epistatic effect)\n according to the combine_array option.\n Each item in the list is (name for epistatic, gene_name1, gene_name2, ..., epistatic fitness distribution)\n Only intended for quite simple combinations of a few genes.\n\n Fitness changes imposed by labelling events will be applied elsewhere.\n Any effects due to treatment are applied elsewhere.\n There are many options here and when applying treatment and labels, so be careful that the clone fitnesses are\n combining as intended, especially if defining epistatic effects.\n Can use MutationGenerator.plot_fitness_combinations to check the fitness combinations are as intended.\n \"\"\"\n # Options for combining mutations in the same gene or when multi_gene_array=False\n combine_options = ('multiply', 'add', 'replace', 'max', 'min')\n\n # Options for combining mutations in different genes or epistatic effects.\n combine_array_options = ('multiply', 'add', 'max', 'min', 'priority')\n\n def __init__(self, combine_mutations='multiply', multi_gene_array=False, combine_array='multiply',\n genes=(Gene('all', NormalDist(1.1), synonymous_proportion=0.5, weight=1),),\n mutation_combination_class=UnboundedFitness(), epistatics=None):\n if combine_mutations not in self.combine_options:\n raise ValueError(\n \"'{0}' is not a valid option for 'combine_mutations'. Pick from {1}\".format(combine_mutations,\n self.combine_options))\n if combine_array not in self.combine_array_options:\n raise ValueError(\"'{0}' is not a valid option for 'combine_array'. Pick from {1}\".format(combine_array,\n self.combine_array_options))\n\n self.combine_mutations = combine_mutations\n self.multi_gene_array = multi_gene_array\n self.combine_array = combine_array\n self.genes = genes\n self.num_genes = len(self.genes)\n self.gene_indices = {g.name: i for i, g in enumerate(genes)}\n self.mutation_distributions = [g.mutation_distribution for g in genes]\n self.gene_weights = [g.weight for g in genes]\n self.synonymous_proportion = np.array([g.synonymous_proportion for g in genes])\n self.overall_synonymous_proportion = np.array(\n [g.synonymous_proportion * g.weight for g in genes]).sum() / np.array(self.gene_weights).sum()\n\n self.relative_weights = np.array(self.gene_weights) / sum(self.gene_weights)\n self.relative_weights_cumsum = self.relative_weights.cumsum()\n self.mutation_combination_class = mutation_combination_class # E.g. BoundedLogisticFitness above\n if epistatics is not None:\n if not multi_gene_array:\n print('Using multi_gene_array because there are epistatic relationships')\n self.multi_gene_array = True\n # List of epistatic relationships\n # Each item in the list is (name, gene_name1, gene_name2, ..., epistatic fitness distribution)\n # Add the names to the gene list\n self.gene_indices.update({e[0]: j+self.num_genes for j, e in enumerate(epistatics)})\n # Convert to the gene indices\n self.epistatics = [tuple([self.get_gene_number(ee) for ee in e[1:-1]] + [e[-1]]) for e in epistatics]\n self.epistatic_cols = range(len(genes) + 1, len(genes) + len(self.epistatics) + 1)\n else:\n self.epistatics = None\n self.params = {\n 'combine_mutations': combine_mutations,\n 'genes': [g.__str__() for g in self.genes],\n 'fitness_class': mutation_combination_class.__str__(),\n }\n\n def __str__(self):\n s = \"<MutGen: comb_muts={0}, genes={1}, fitness_class={2}>\".format(self.params['combine_mutations'],\n self.params['genes'],\n self.params['fitness_class'])\n return s\n\n def get_new_fitnesses(self, old_fitnesses, old_mutation_arrays):\n \"\"\"\n Gets the effects of the new mutations and combines them with the old mutations in those cells.\n Multiple mutated cells can be processed at once.\n However, each cell can only get one new mutation. If multiple mutations occur in the same step in the same cell\n then this function is called multiple times.\n :param old_fitnesses: 1D array of fitnesses. These are the actual fitness of the clones used for calculating\n clonal dynamics.\n :param old_mutation_arrays: 2D array of fitnesses. Has an array of fitness effects for each gene (plus WT and\n any epistatics used). This is updated with the new mutations and used to calculate the new overall fitness of\n each mutated cell.\n :return:\n \"\"\"\n num = len(old_fitnesses)\n genes_mutated = self._get_genes(num)\n syns = self._are_synonymous(genes_mutated)\n\n new_fitnesses, new_mutation_arrays = self._update_fitness_arrays(old_mutation_arrays, genes_mutated, syns)\n\n return new_fitnesses, new_mutation_arrays, syns, genes_mutated\n\n def _are_synonymous(self, mut_types):\n \"\"\"\n Determines whether the new mutations are synonymous\n :param mut_types:\n :return:\n \"\"\"\n return np.random.binomial(1, self.synonymous_proportion[mut_types])\n\n def _get_genes(self, num):\n \"\"\"\n Determines which genes are mutated\n :param num:\n :return:\n \"\"\"\n r = np.random.rand(num, 1)\n k = (self.relative_weights_cumsum < r).sum(axis=1)\n return k\n\n def _update_fitness_arrays(self, old_mutation_arrays, genes_mutated, syns):\n # Only have to update the cells in which\n non_syns = np.where(1 - syns)\n new_mutation_fitnesses_non_syn = [self.mutation_distributions[g]() for g in\n genes_mutated[non_syns]] # The fitness of the new mutation alone\n if self.multi_gene_array:\n array_idx = genes_mutated[non_syns] + 1 # +1 for the wild type column\n else:\n array_idx = np.zeros(len(non_syns), dtype=int)\n\n new_mutation_arrays = old_mutation_arrays.copy()\n\n # Get the effects of any existing mutations in the newly mutated genes\n old_fitnesses = old_mutation_arrays[non_syns, array_idx]\n # Combine the old effects with the new ones and assign to the new mutation array.\n new_fitnesses = self._combine_fitnesses(old_fitnesses, new_mutation_fitnesses_non_syn)\n new_mutation_arrays[(non_syns, array_idx)] = new_fitnesses\n\n # Combine the effects of mutations in different genes into a single 1D array of fitness per cell\n # Also applies any diminishing returns to the fitness\n new_fitnesses, new_mutation_arrays = self.combine_vectors(new_mutation_arrays)\n\n return new_fitnesses, new_mutation_arrays\n\n def _combine_fitnesses(self, old_fitnesses, new_mutation_fitnesses):\n \"\"\"\n Applies the selected rules to combine the new mutations with those already in the cell.\n If using multi_gene_array=True, this will just combine the fitness of mutations within the same gene.\n\n The arrays have nans where the gene is not mutated.\n Need to turn these into ones for the calculations.\n :param old_fitnesses:\n :param new_mutation_fitnesses:\n :return:\n \"\"\"\n\n old_fitnesses[np.where(np.isnan(old_fitnesses))] = 1\n if self.combine_mutations == 'multiply':\n combined_fitness = old_fitnesses * new_mutation_fitnesses\n elif self.combine_mutations == 'add':\n combined_fitness = old_fitnesses + new_mutation_fitnesses - 1\n combined_fitness[combined_fitness < 0] = 0\n elif self.combine_mutations == 'replace':\n combined_fitness = new_mutation_fitnesses\n elif self.combine_mutations == 'max':\n combined_fitness = np.maximum(new_mutation_fitnesses, old_fitnesses)\n elif self.combine_mutations == 'min':\n combined_fitness = np.minimum(new_mutation_fitnesses, old_fitnesses)\n else:\n raise NotImplementedError(\"Have tried to use {}\".format(self.combine_mutations))\n return combined_fitness\n\n def _epistatic_combinations(self, fitness_arrays):\n \"\"\"\n Take the mutated (non-nan) genes and check whether they complete an epistatic set.\n The effects of genes in an epistatic set are replaced by the epistatic effect.\n Epistatic effects are stored in extra columns in the fitness array.\n Then any multiple epistatic results can be combined as usual (along with any uninvolved genes).\n Assume no mutations back to wild type, so once an epistatic effect is in a clone, it is not lost.\n :param fitness_arrays:\n :return:\n \"\"\"\n\n raw_gene_arr = fitness_arrays[:, :self.epistatic_cols[0]]\n non_nan = ~np.isnan(raw_gene_arr)\n\n epi_rows = fitness_arrays[:, self.epistatic_cols]\n not_already_epi_rows = np.isnan(epi_rows)\n row_positions_to_blank = []\n col_positions_to_blank = []\n for i, epi in enumerate(self.epistatics):\n epi_genes, dfe = epi[:-1], epi[-1]\n matching_rows = np.all(non_nan[:, tuple([g+1 for g in epi_genes])], axis=1) # +1 because of the WT column\n new_matching_rows = matching_rows * not_already_epi_rows[:, i]\n new_draws = [dfe() for j in new_matching_rows if j]\n epi_rows[new_matching_rows, i] = new_draws\n for g in epi_genes:\n row_positions_to_blank.extend(np.where(matching_rows)[0])\n col_positions_to_blank.extend([g + 1] * matching_rows.sum())\n\n fitness_array = np.concatenate([raw_gene_arr, epi_rows], axis=1)\n epistatic_fitness_array = fitness_array.copy()\n epistatic_fitness_array[row_positions_to_blank, col_positions_to_blank] = np.nan\n return fitness_array, epistatic_fitness_array\n\n def combine_vectors(self, fitness_arrays):\n \"\"\"\n\n :param fitness_arrays:\n :return:\n \"\"\"\n # Combines the raw fitness values from each gene. Can apply any diminishing returns etc here.\n if self.epistatics is not None:\n # Replace the raw fitness array with one including the epistatic effects\n full_fitness_arrays, fitness_arrays = self._epistatic_combinations(fitness_arrays)\n # fitness_arrays now updated for calculation of epistatic fitness\n # full_fitness_arrays also contains the raw fitness of the genes\n else:\n full_fitness_arrays = fitness_arrays\n\n if not self.multi_gene_array: # Don't have to combine genes, just reduce to 1D array\n combined_fitness = fitness_arrays[:, 0]\n elif self.combine_array == 'multiply':\n combined_fitness = np.nanprod(fitness_arrays, axis=1)\n elif self.combine_array == 'add':\n combined_fitness = np.nansum(fitness_arrays, axis=1) - np.count_nonzero(~np.isnan(fitness_arrays),\n axis=1) + 1\n combined_fitness[combined_fitness < 0] = 0\n elif self.combine_array == 'max':\n combined_fitness = np.nanmax(fitness_arrays, axis=1)\n elif self.combine_array == 'min':\n combined_fitness = np.nanmin(fitness_arrays, axis=1)\n elif self.combine_array == 'priority':\n # Find the right-most non-nan value. Useful for epistatic interactions that are superseded by another\n # To find the last non-nan columns, reverse the column order and find the first non-zero entry.\n fitness_arrays = fitness_arrays[:, ::-1]\n c = np.isnan(fitness_arrays)\n d = np.argmin(c, axis=1)\n combined_fitness = fitness_arrays[range(len(fitness_arrays)), d]\n else:\n raise NotImplementedError(\"Have tried to use {}\".format(self.combine_array))\n return self.mutation_combination_class.fitness(combined_fitness), full_fitness_arrays\n\n def get_gene_number(self, gene_name):\n if gene_name is None:\n return None\n return self.gene_indices[gene_name]\n\n def get_synonymous_proportion(self, gene_num):\n if gene_num is None:\n return self.overall_synonymous_proportion\n else:\n return self.synonymous_proportion[gene_num]\n\n def plot_fitness_combinations(self):\n \"\"\"\n The combinations of multiple mutations can be complicated, especially if epistatic relationships are defined.\n This will plot the average fitness of all fitness combinations of all genes defined as a visual check that\n it is as intended.\n Assumes that the background fitness (first column of the fitness array) is 1.\n \"\"\"\n if not self.multi_gene_array and self.combine_mutations == 'replace':\n # No combinations here. Just need to plot individual genes\n print('No combinations defined. Only most recent non-silent mutation defines fitness')\n xticklabels = ['Background']\n fitness_values = [1]\n for i, gene in enumerate(self.genes):\n xticklabels.append(gene.name)\n fitness_values.append(gene.mutation_distribution.get_mean())\n plt.bar(range(len(fitness_values)), fitness_values)\n plt.ylabel('Fitness')\n plt.xticks(range(len(fitness_values)), xticklabels, rotation=90)\n return fitness_values\n else:\n # Make a fitness array with all possible combinations of genes\n num_genes = len(self.genes)\n\n if self.epistatics is None:\n num_epi = 0\n else:\n num_epi = len(self.epistatics)\n fitness_array = np.full((2 ** num_genes, num_genes + num_epi + 1), np.nan)\n\n fitness_array[:, 0] = 1 # Assume background fitness is 1\n xticklabels = ['Background']\n for i in range(fitness_array.shape[0]):\n binary_string = format(i, '#0{}b'.format(num_genes + 2))[2:][::-1]\n tick_label = []\n for j, b in enumerate(binary_string):\n if b == '1':\n # Mutate the gene\n gene_fitness = self.mutation_distributions[j].get_mean()\n fitness_array[i, j + 1] = gene_fitness\n tick_label.append(self.genes[j].name)\n if i > 0:\n xticklabels.append(\" + \".join(tick_label))\n\n if self.multi_gene_array:\n new_fitnesses, new_mutation_arrays = self.combine_vectors(fitness_array)\n else:\n # Temporarily change the combine_mutations attribute so same combination functions can be used\n if self.combine_mutations in ('add', 'multiply'):\n print('This allows multiple mutations per gene to have an effect. ' \\\n 'Just showing combinations of up to one (mean fitness) mutation from each gene.')\n ca = self.combine_array\n self.combine_array = self.combine_mutations\n self.multi_gene_array = True\n new_fitnesses, new_mutation_arrays = self.combine_vectors(fitness_array)\n self.combine_array = ca\n self.multi_gene_array = False\n\n plt.bar(range(fitness_array.shape[0]), new_fitnesses)\n plt.ylabel('Fitness')\n plt.xticks(range(fitness_array.shape[0]), xticklabels, rotation=90)\n\n return new_fitnesses\n\n\n\n" ]
[ [ "numpy.cumsum", "numpy.any", "numpy.searchsorted", "numpy.where", "matplotlib.pyplot.gca", "numpy.unique", "numpy.arange", "numpy.full", "numpy.random.set_state", "numpy.diff", "matplotlib.pyplot.close", "numpy.zeros", "numpy.isnan", "matplotlib.pyplot.ylim", "numpy.atleast_2d", "numpy.random.binomial", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "numpy.random.get_state", "matplotlib.pyplot.subplots", "numpy.random.shuffle", "numpy.ones", "matplotlib.pyplot.xlim", "numpy.bincount", "matplotlib.cm.YlOrBr", "scipy.sparse.lil_matrix" ], [ "numpy.nanmax", "numpy.maximum", "numpy.nanprod", "numpy.minimum", "numpy.isnan", "numpy.random.exponential", "numpy.nanmin", "numpy.full", "numpy.concatenate", "numpy.random.normal", "numpy.nansum", "numpy.argmin", "numpy.random.rand", "numpy.random.binomial", "numpy.random.uniform", "numpy.array", "numpy.where", "matplotlib.pyplot.ylabel" ] ]
samyoo78/NearPy
[ "1b534b864d320d875508e95cd2b76b6d8c07a90b" ]
[ "tests/distances_tests.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2013 Ole Krause-Sparmann\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport numpy\nimport scipy\nimport unittest\n\nfrom nearpy.distances import EuclideanDistance, CosineDistance, ManhattanDistance\n\n########################################################################\n\n# Helper functions\n\n\ndef check_distance_symmetry(test_obj, distance):\n for k in range(100):\n x = numpy.random.randn(10)\n y = numpy.random.randn(10)\n d_xy = distance.distance(x, y)\n d_yx = distance.distance(y, x)\n\n # I had precision issues with a local install. This test is more tolerant to that.\n test_obj.assertAlmostEqual(d_xy, d_yx, delta=0.00000000000001)\n\n for k in range(100):\n x = scipy.sparse.rand(30, 1, density=0.3)\n y = scipy.sparse.rand(30, 1, density=0.3)\n d_xy = distance.distance(x, y)\n d_yx = distance.distance(y, x)\n\n # I had precision issues with a local install. This test is more tolerant to that.\n test_obj.assertAlmostEqual(d_xy, d_yx, delta=0.00000000000001)\n\n\ndef check_distance_triangle_inequality(test_obj, distance):\n for k in range(100):\n x = numpy.random.randn(10)\n y = numpy.random.randn(10)\n z = numpy.random.randn(10)\n\n d_xy = distance.distance(x, y)\n d_xz = distance.distance(x, z)\n d_yz = distance.distance(y, z)\n\n test_obj.assertLessEqual(d_xy, d_xz + d_yz)\n\n for k in range(100):\n x = scipy.sparse.rand(30, 1, density=0.3)\n y = scipy.sparse.rand(30, 1, density=0.3)\n z = scipy.sparse.rand(30, 1, density=0.3)\n\n d_xy = distance.distance(x, y)\n d_xz = distance.distance(x, z)\n d_yz = distance.distance(y, z)\n\n test_obj.assertTrue(d_xy <= d_xz + d_yz)\n\n########################################################################\n\n\nclass TestEuclideanDistance(unittest.TestCase):\n\n def setUp(self):\n self.euclidean = EuclideanDistance()\n\n def test_triangle_inequality(self):\n check_distance_triangle_inequality(self, self.euclidean)\n\n def test_symmetry(self):\n check_distance_symmetry(self, self.euclidean)\n\nclass TestCosineDistance(unittest.TestCase):\n\n def setUp(self):\n self.cosine = CosineDistance()\n\n def test_symmetry(self):\n check_distance_symmetry(self, self.cosine)\n\nclass TestManhattanDistance(unittest.TestCase):\n\n def setUp(self):\n self.manhattan = ManhattanDistance()\n\n def test_triangle_inequality(self):\n check_distance_triangle_inequality(self, self.manhattan)\n\n def test_symmetry(self):\n check_distance_symmetry(self, self.manhattan)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "scipy.sparse.rand", "numpy.random.randn" ] ]
Csinclair0/fairseq
[ "6d9cf6a850c31d12a3ac63e89b005756b09cebeb" ]
[ "fairseq/models/fairseq_model.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nBase classes for various fairseq models.\n\"\"\"\n\nimport logging\nfrom argparse import Namespace\nfrom typing import Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fairseq import utils\nfrom fairseq.data import Dictionary\nfrom fairseq.dataclass.utils import (\n convert_namespace_to_omegaconf,\n gen_parser_from_dataclass,\n)\nfrom fairseq.models import FairseqDecoder, FairseqEncoder\nfrom omegaconf import DictConfig\nfrom torch import Tensor\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_type(module, expected_type):\n if hasattr(module, \"unwrapped_module\"):\n assert isinstance(module.unwrapped_module, expected_type), \\\n f\"{type(module.unwrapped_module)} != {expected_type}\"\n else:\n assert isinstance(module, expected_type), f\"{type(module)} != {expected_type}\"\n\n\nclass BaseFairseqModel(nn.Module):\n \"\"\"Base class for fairseq models.\"\"\"\n\n def __init__(self):\n super().__init__()\n self._is_generation_fast = False\n\n @classmethod\n def add_args(cls, parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n dc = getattr(cls, \"__dataclass\", None)\n if dc is not None:\n # do not set defaults so that settings defaults from various architectures still works\n gen_parser_from_dataclass(parser, dc(), delete_default=True)\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n raise NotImplementedError(\"Model must implement the build_model method\")\n\n def get_targets(self, sample, net_output):\n \"\"\"Get targets from either the sample or the net's output.\"\"\"\n return sample[\"target\"]\n\n def get_normalized_probs(\n self,\n net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],\n log_probs: bool,\n sample: Optional[Dict[str, Tensor]] = None,\n ):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n return self.get_normalized_probs_scriptable(net_output, log_probs, sample)\n\n # TorchScript doesn't support super() method so that the scriptable Subclass\n # can't access the base class model in Torchscript.\n # Current workaround is to add a helper function with different name and\n # call the helper function from scriptable Subclass.\n def get_normalized_probs_scriptable(\n self,\n net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],\n log_probs: bool,\n sample: Optional[Dict[str, Tensor]] = None,\n ):\n \"\"\"Scriptable helper function for get_normalized_probs in ~BaseFairseqModel\"\"\"\n if hasattr(self, \"decoder\"):\n return self.decoder.get_normalized_probs(net_output, log_probs, sample)\n elif torch.is_tensor(net_output):\n # syntactic sugar for simple models which don't have a decoder\n # (e.g., the classification tutorial)\n logits = net_output.float()\n if log_probs:\n return F.log_softmax(logits, dim=-1)\n else:\n return F.softmax(logits, dim=-1)\n raise NotImplementedError\n\n def extract_features(self, *args, **kwargs):\n \"\"\"Similar to *forward* but only return features.\"\"\"\n return self(*args, **kwargs)\n\n def max_positions(self):\n \"\"\"Maximum length supported by the model.\"\"\"\n return None\n\n def load_state_dict(\n self,\n state_dict,\n strict=True,\n model_cfg: Optional[DictConfig] = None,\n args: Optional[Namespace] = None,\n ):\n \"\"\"Copies parameters and buffers from *state_dict* into this module and\n its descendants.\n\n Overrides the method in :class:`nn.Module`. Compared with that method\n this additionally \"upgrades\" *state_dicts* from old checkpoints.\n \"\"\"\n\n if model_cfg is None and args is not None:\n logger.warn(\"using 'args' is deprecated, please update your code to use dataclass config\")\n model_cfg = convert_namespace_to_omegaconf(args).model\n\n self.upgrade_state_dict(state_dict)\n\n from fairseq.checkpoint_utils import prune_state_dict\n\n new_state_dict = prune_state_dict(state_dict, model_cfg)\n return super().load_state_dict(new_state_dict, strict)\n\n def upgrade_state_dict(self, state_dict):\n \"\"\"Upgrade old state dicts to work with newer code.\"\"\"\n self.upgrade_state_dict_named(state_dict, \"\")\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade old state dicts to work with newer code.\n\n Args:\n state_dict (dict): state dictionary to upgrade, in place\n name (str): the state dict key corresponding to the current module\n \"\"\"\n assert state_dict is not None\n\n def do_upgrade(m, prefix):\n if len(prefix) > 0:\n prefix += \".\"\n\n for n, c in m.named_children():\n name = prefix + n\n if hasattr(c, \"upgrade_state_dict_named\"):\n c.upgrade_state_dict_named(state_dict, name)\n elif hasattr(c, \"upgrade_state_dict\"):\n c.upgrade_state_dict(state_dict)\n do_upgrade(c, name)\n\n do_upgrade(self, name)\n\n def set_num_updates(self, num_updates):\n \"\"\"State from trainer to pass along to model at every update.\"\"\"\n for m in self.modules():\n if hasattr(m, \"set_num_updates\") and m != self:\n m.set_num_updates(num_updates)\n\n def prepare_for_inference_(self, cfg: DictConfig):\n \"\"\"Prepare model for inference.\"\"\"\n kwargs = {}\n kwargs[\"beamable_mm_beam_size\"] = (\n None\n if getattr(cfg.generation, \"no_beamable_mm\", False)\n else getattr(cfg.generation, \"beam\", 5)\n )\n kwargs[\"need_attn\"] = False\n if getattr(cfg.generation, \"retain_dropout\", False):\n kwargs[\"retain_dropout\"] = cfg.generation.retain_dropout\n kwargs[\"retain_dropout_modules\"] = cfg.generation.retain_dropout_modules\n self.make_generation_fast_(**kwargs)\n\n def make_generation_fast_(self, **kwargs):\n \"\"\"\n Legacy entry point to optimize model for faster generation.\n Prefer prepare_for_inference_.\n \"\"\"\n if self._is_generation_fast:\n return # only apply once\n self._is_generation_fast = True\n\n # remove weight norm from all modules in the network\n def apply_remove_weight_norm(module):\n try:\n nn.utils.remove_weight_norm(module)\n except (AttributeError, ValueError): # this module didn't have weight norm\n return\n\n self.apply(apply_remove_weight_norm)\n\n def apply_make_generation_fast_(module, prefix):\n if len(prefix) > 0:\n prefix += \".\"\n\n base_func = BaseFairseqModel.make_generation_fast_\n for n, m in module.named_modules():\n if (\n m != self\n and hasattr(m, \"make_generation_fast_\")\n # don't call this implementation again, e.g., if\n # children modules also inherit from BaseFairseqModel\n and m.make_generation_fast_.__func__ is not base_func\n ):\n name = prefix + n\n m.make_generation_fast_(name=name, **kwargs)\n\n apply_make_generation_fast_(self, \"\")\n\n def train(mode=True):\n if mode:\n raise RuntimeError(\"cannot train after make_generation_fast\")\n\n # this model should no longer be used for training\n self.eval()\n self.train = train\n\n def prepare_for_onnx_export_(self, **kwargs):\n \"\"\"Make model exportable via ONNX trace.\"\"\"\n seen = set()\n\n def apply_prepare_for_onnx_export_(module):\n if (\n module != self\n and hasattr(module, \"prepare_for_onnx_export_\")\n and module not in seen\n ):\n seen.add(module)\n module.prepare_for_onnx_export_(**kwargs)\n\n self.apply(apply_prepare_for_onnx_export_)\n\n @classmethod\n def from_pretrained(\n cls,\n model_name_or_path,\n checkpoint_file=\"model.pt\",\n data_name_or_path=\".\",\n **kwargs,\n ):\n \"\"\"\n Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model\n file. Downloads and caches the pre-trained model file if needed.\n\n The base implementation returns a\n :class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to\n generate translations or sample from language models. The underlying\n :class:`~fairseq.models.FairseqModel` can be accessed via the\n *generator.models* attribute.\n\n Other models may override this to implement custom hub interfaces.\n\n Args:\n model_name_or_path (str): either the name of a pre-trained model to\n load or a path/URL to a pre-trained model state dict\n checkpoint_file (str, optional): colon-separated list of checkpoint\n files in the model archive to ensemble (default: 'model.pt')\n data_name_or_path (str, optional): point args.data to the archive\n at the given path/URL. Can start with '.' or './' to reuse the\n model archive path.\n \"\"\"\n from fairseq import hub_utils\n\n x = hub_utils.from_pretrained(\n model_name_or_path,\n checkpoint_file,\n data_name_or_path,\n archive_map=cls.hub_models(),\n **kwargs,\n )\n logger.info(x[\"args\"])\n return hub_utils.GeneratorHubInterface(x[\"args\"], x[\"task\"], x[\"models\"])\n\n @classmethod\n def hub_models(cls):\n return {}\n\n\nclass FairseqEncoderDecoderModel(BaseFairseqModel):\n \"\"\"Base class for encoder-decoder models.\n\n Args:\n encoder (FairseqEncoder): the encoder\n decoder (FairseqDecoder): the decoder\n \"\"\"\n\n def __init__(self, encoder, decoder):\n super().__init__()\n\n self.encoder = encoder\n self.decoder = decoder\n\n check_type(self.encoder, FairseqEncoder)\n check_type(self.decoder, FairseqDecoder)\n\n def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):\n \"\"\"\n Run the forward pass for an encoder-decoder model.\n\n First feed a batch of source tokens through the encoder. Then, feed the\n encoder output and previous decoder outputs (i.e., teacher forcing) to\n the decoder to produce the next outputs::\n\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.decoder(prev_output_tokens, encoder_out)\n\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)\n decoder_out = self.decoder(\n prev_output_tokens, encoder_out=encoder_out, **kwargs\n )\n return decoder_out\n\n def forward_decoder(self, prev_output_tokens, **kwargs):\n return self.decoder(prev_output_tokens, **kwargs)\n\n def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):\n \"\"\"\n Similar to *forward* but only return features.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)\n features = self.decoder.extract_features(\n prev_output_tokens, encoder_out=encoder_out, **kwargs\n )\n return features\n\n def output_layer(self, features, **kwargs):\n \"\"\"Project features to the default output size (typically vocabulary size).\"\"\"\n return self.decoder.output_layer(features, **kwargs)\n\n def max_positions(self):\n \"\"\"Maximum length supported by the model.\"\"\"\n return (self.encoder.max_positions(), self.decoder.max_positions())\n\n def max_decoder_positions(self):\n \"\"\"Maximum length supported by the decoder.\"\"\"\n return self.decoder.max_positions()\n\n\nclass FairseqModel(FairseqEncoderDecoderModel):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n utils.deprecation_warning(\n \"FairseqModel is deprecated, please use FairseqEncoderDecoderModel \"\n \"or BaseFairseqModel instead\",\n stacklevel=4,\n )\n\n\nclass FairseqMultiModel(BaseFairseqModel):\n \"\"\"Base class for combining multiple encoder-decoder models.\"\"\"\n\n def __init__(self, encoders, decoders):\n super().__init__()\n assert encoders.keys() == decoders.keys()\n self.keys = list(encoders.keys())\n for key in self.keys:\n check_type(encoders[key], FairseqEncoder)\n check_type(decoders[key], FairseqDecoder)\n\n self.models = nn.ModuleDict(\n {\n key: FairseqEncoderDecoderModel(encoders[key], decoders[key])\n for key in self.keys\n }\n )\n\n @staticmethod\n def build_shared_embeddings(\n dicts: Dict[str, Dictionary],\n langs: List[str],\n embed_dim: int,\n build_embedding: callable,\n pretrained_embed_path: Optional[str] = None,\n ):\n \"\"\"\n Helper function to build shared embeddings for a set of languages after\n checking that all dicts corresponding to those languages are equivalent.\n\n Args:\n dicts: Dict of lang_id to its corresponding Dictionary\n langs: languages that we want to share embeddings for\n embed_dim: embedding dimension\n build_embedding: callable function to actually build the embedding\n pretrained_embed_path: Optional path to load pretrained embeddings\n \"\"\"\n shared_dict = dicts[langs[0]]\n if any(dicts[lang] != shared_dict for lang in langs):\n raise ValueError(\n \"--share-*-embeddings requires a joined dictionary: \"\n \"--share-encoder-embeddings requires a joined source \"\n \"dictionary, --share-decoder-embeddings requires a joined \"\n \"target dictionary, and --share-all-embeddings requires a \"\n \"joint source + target dictionary.\"\n )\n return build_embedding(shared_dict, embed_dim, pretrained_embed_path)\n\n def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):\n raise NotImplementedError\n\n def max_positions(self):\n \"\"\"Maximum length supported by the model.\"\"\"\n return {\n key: (\n self.models[key].encoder.max_positions(),\n self.models[key].decoder.max_positions(),\n )\n for key in self.keys\n }\n\n def max_decoder_positions(self):\n \"\"\"Maximum length supported by the decoder.\"\"\"\n return min(model.decoder.max_positions() for model in self.models.values())\n\n @property\n def encoder(self):\n return self.models[self.keys[0]].encoder\n\n @property\n def decoder(self):\n return self.models[self.keys[0]].decoder\n\n def forward_decoder(self, prev_output_tokens, **kwargs):\n return self.decoder(prev_output_tokens, **kwargs)\n\n def load_state_dict(\n self,\n state_dict,\n strict=True,\n model_cfg=None,\n args: Optional[Namespace] = None,\n ):\n \"\"\"Copies parameters and buffers from *state_dict* into this module and\n its descendants.\n\n Overrides the method in :class:`nn.Module`. Compared with that method\n this additionally \"upgrades\" *state_dicts* from old checkpoints.\n \"\"\"\n\n if model_cfg is None and args is not None:\n logger.warn(\"using 'args' is deprecated, please update your code to use dataclass config\")\n model_cfg = convert_namespace_to_omegaconf(args).model\n\n self.upgrade_state_dict(state_dict)\n\n from fairseq.checkpoint_utils import prune_state_dict\n\n new_state_dict = prune_state_dict(state_dict, model_cfg)\n return super().load_state_dict(new_state_dict, strict)\n\n\nclass FairseqLanguageModel(BaseFairseqModel):\n \"\"\"Base class for decoder-only models.\n\n Args:\n decoder (FairseqDecoder): the decoder\n \"\"\"\n\n def __init__(self, decoder):\n super().__init__()\n self.decoder = decoder\n check_type(self.decoder, FairseqDecoder)\n\n def forward(self, src_tokens, **kwargs):\n \"\"\"\n Run the forward pass for a decoder-only model.\n\n Feeds a batch of tokens through the decoder to predict the next tokens.\n\n Args:\n src_tokens (LongTensor): tokens on which to condition the decoder,\n of shape `(batch, tgt_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, seq_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n return self.decoder(src_tokens, **kwargs)\n\n def forward_decoder(self, prev_output_tokens, **kwargs):\n return self.decoder(prev_output_tokens, **kwargs)\n\n def extract_features(self, src_tokens, **kwargs):\n \"\"\"\n Similar to *forward* but only return features.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, seq_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n return self.decoder.extract_features(src_tokens, **kwargs)\n\n def output_layer(self, features, **kwargs):\n \"\"\"Project features to the default output size (typically vocabulary size).\"\"\"\n return self.decoder.output_layer(features, **kwargs)\n\n def max_positions(self):\n \"\"\"Maximum length supported by the model.\"\"\"\n return self.decoder.max_positions()\n\n def max_decoder_positions(self):\n \"\"\"Maximum length supported by the decoder.\"\"\"\n return self.decoder.max_positions()\n\n @property\n def supported_targets(self):\n return {\"future\"}\n\n\nclass FairseqEncoderModel(BaseFairseqModel):\n \"\"\"Base class for encoder-only models.\n\n Args:\n encoder (FairseqEncoder): the encoder\n \"\"\"\n\n def __init__(self, encoder):\n super().__init__()\n self.encoder = encoder\n check_type(self.encoder, FairseqEncoder)\n\n def forward(self, src_tokens, src_lengths, **kwargs):\n \"\"\"\n Run the forward pass for a encoder-only model.\n\n Feeds a batch of tokens through the encoder to generate features.\n\n Args:\n src_tokens (LongTensor): input tokens of shape `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n\n Returns:\n the encoder's output, typically of shape `(batch, src_len, features)`\n \"\"\"\n return self.encoder(src_tokens, src_lengths, **kwargs)\n\n def get_normalized_probs(self, net_output, log_probs, sample=None):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n encoder_out = net_output[\"encoder_out\"]\n if torch.is_tensor(encoder_out):\n logits = encoder_out.float()\n if log_probs:\n return F.log_softmax(logits, dim=-1)\n else:\n return F.softmax(logits, dim=-1)\n raise NotImplementedError\n\n def max_positions(self):\n \"\"\"Maximum length supported by the model.\"\"\"\n return self.encoder.max_positions()\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.is_tensor", "torch.nn.utils.remove_weight_norm", "torch.nn.functional.softmax" ] ]
willcanniford/python-notes
[ "1c2a33ab976e589fc6f801de2b6bd740d3aca2d7" ]
[ "machine_learning/sklearn-polynomial.py" ]
[ "# Import the libraries and functions that we are going to need\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\n\n# Load in some fake data for comparison\ndata = pd.read_csv('./data/polynomial.csv')\nX = data[['Var_X']].values.reshape(-1, 1)\ny = data[['Var_Y']].values\n\n# Assign the data to predictor and outcome variables for another theoretical set\n# X = np.array([1, 3, 4, 5, 2, 8, 7, 6, 5, 4, 2]).reshape(11, 1)\n# y = np.array(list(map(lambda x: 2*x**3 - 3*x**2 + 5*x + 9, X))).flatten()\n\n# Create polynomial features\n# Create a PolynomialFeatures object, then fit and transform the predictor\n# feature to use these polynomial features\npoly_feat = PolynomialFeatures(degree=4)\nX_poly = poly_feat.fit_transform(X)\n\n# Make and fit the polynomial regression model\n# Create a LinearRegression object and fit it to the polynomial predictor\n# features\npoly_model = LinearRegression(fit_intercept=False).fit(X_poly, y)\n\n# Make predictions using the linear model with poly features\ndata['Predictions'] = poly_model.predict(X_poly)\n# Sort by the values of the X variable to fix model line plotting\ndata.sort_values('Var_X', inplace=True)\n\n# Visualise the predictions against the real values\nplt.scatter(data[['Var_X']].values, data[['Var_Y']].values, c='Blue')\nplt.plot(data[['Var_X']].values, data[['Predictions']], c='Red')\nplt.title('4 Degree Polynomial predictions using sklearn')\nplt.show()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "sklearn.preprocessing.PolynomialFeatures", "matplotlib.pyplot.plot", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.show" ] ]
BlinkCreator/Machinelearning_MINIST
[ "02ccefa92c9fd794d7fc1cbc0e2b7767c931c563" ]
[ "main.py" ]
[ "from __future__ import print_function\nimport torch\nimport torch.optim as optim\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\nfrom models.conv import Net\nfrom models.rnn_conv import ImageRNN\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\n\n# functions to show an image\ndef imsave(img):\n npimg = img.numpy()\n npimg = (np.transpose(npimg, (1, 2, 0)) * 255).astype(np.uint8)\n im = Image.fromarray(npimg)\n im.save(\"./results/your_file.jpeg\")\n\ndef train_cnn(log_interval, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n # forward + backward + optimize\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward(); optimizer.step()\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef train_rnn(log_interval, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n # reset hidden states\n model.hidden = model.init_hidden()\n data = data.view(-1, 28, 28)\n outputs = model(data)\n criterion = torch.nn.CrossEntropyLoss()\n loss = criterion(outputs, target)\n loss.backward(); optimizer.step()\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef test(model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n #Squeeze is needed for RNN\n # data = torch.squeeze(data)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\ndef main():\n epoches = 14\n gamma = 0.7\n log_interval = 10\n torch.manual_seed(1)\n save_model = True\n\n #RNN\n #Set to false so default CNN is selected\n RNN = False\n N_STEPS = 28\n N_INPUTS = 28\n N_NEURONS = 150\n N_OUTPUTS = 10\n\n # Check whether you can use Cuda\n use_cuda = torch.cuda.is_available()\n # Use Cuda if you can\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n\n ######################3 Torchvision ###########################3\n # Use data predefined loader\n # Pre-processing by using the transform.Compose\n # divide into batches\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor()\n ])),\n batch_size=64, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor()\n ])),\n batch_size=1000, shuffle=True, **kwargs)\n\n # get some random training images\n dataiter = iter(train_loader)\n images, labels = dataiter.next()\n # img = torchvision.utils.make_grid(images)\n # imsave(img)\n\n # ##################### Build your network and run ############################\n if RNN:\n model = ImageRNN(64, N_STEPS, N_INPUTS, N_NEURONS, N_OUTPUTS, device).to(device)\n else:\n model = Net().to(device)\n\n if RNN:\n optimizer = optim.Adadelta(model.parameters(), lr=0.01)\n else:\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n scheduler = StepLR(optimizer, step_size=1, gamma=gamma)\n\n for epoch in range(1, epoches + 1):\n if RNN:\n train_rnn(log_interval, model, device, train_loader, optimizer, epoch)\n else:\n train_cnn(log_interval, model, device, train_loader, optimizer, epoch)\n\n test(model, device, test_loader)\n scheduler.step()\n\n if save_model:\n torch.save(model.state_dict(), \"./results/mnist_cnn.pt\")\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.functional.nll_loss", "torch.manual_seed", "torch.no_grad", "torch.cuda.is_available", "numpy.transpose", "torch.device", "torch.optim.lr_scheduler.StepLR" ] ]
alumae/audiomentations
[ "275347fcfcf14ea395d228c192efa57496addf8f" ]
[ "audiomentations/augmentations/transforms.py" ]
[ "import functools\nimport os\nimport random\nimport sys\nimport tempfile\nimport uuid\nimport warnings\n\nimport librosa\nimport numpy as np\nfrom scipy.signal import butter, sosfilt, convolve\n\nfrom audiomentations.core.audio_loading_utils import load_sound_file\nfrom audiomentations.core.transforms_interface import BaseWaveformTransform\nfrom audiomentations.core.utils import (\n calculate_rms,\n calculate_desired_noise_rms,\n get_file_paths,\n convert_decibels_to_amplitude_ratio,\n convert_float_samples_to_int16,\n)\n\n\nclass AddImpulseResponse(BaseWaveformTransform):\n \"\"\"Convolve the audio with a random impulse response.\n Impulse responses can be created using e.g. http://tulrich.com/recording/ir_capture/\n Impulse responses are represented as wav files in the given ir_path.\n \"\"\"\n\n def __init__(self, ir_path=\"/tmp/ir\", p=0.5, lru_cache_size=128):\n \"\"\"\n :param ir_path: Path to a folder that contains one or more wav files of impulse\n responses. Must be str or a Path instance.\n :param p:\n :param lru_cache_size: Maximum size of the LRU cache for storing impuls files \n in memory.\n \"\"\"\n super().__init__(p)\n self.ir_files = get_file_paths(ir_path)\n self.ir_files = [str(p) for p in self.ir_files]\n assert len(self.ir_files) > 0\n self.__load_ir = functools.lru_cache(\n maxsize=lru_cache_size)(AddImpulseResponse.__load_ir)\n\n @staticmethod\n def __load_ir(file_path, sample_rate):\n return load_sound_file(file_path, sample_rate)\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"ir_file_path\"] = random.choice(self.ir_files)\n\n def apply(self, samples, sample_rate):\n ir, sample_rate2 = self.__load_ir(self.parameters[\"ir_file_path\"], sample_rate)\n if sample_rate != sample_rate2:\n # This will typically not happen, as librosa should automatically resample the\n # impulse response sound to the desired sample rate\n raise Exception(\n \"Recording sample rate {} did not match Impulse Response signal\"\n \" sample rate {}!\".format(sample_rate, sample_rate2)\n )\n signal_ir = convolve(samples, ir)\n max_value = max(np.amax(signal_ir), -np.amin(signal_ir))\n scale = 0.5 / max_value\n signal_ir *= scale\n return signal_ir\n\n\nclass FrequencyMask(BaseWaveformTransform):\n \"\"\"\n Mask some frequency band on the spectrogram.\n Inspired by https://arxiv.org/pdf/1904.08779.pdf\n \"\"\"\n\n def __init__(self, min_frequency_band=0.0, max_frequency_band=0.5, p=0.5):\n \"\"\"\n :param min_frequency_band: Minimum bandwidth, float\n :param max_frequency_band: Maximum bandwidth, float\n :param p:\n \"\"\"\n super().__init__(p)\n self.min_frequency_band = min_frequency_band\n self.max_frequency_band = max_frequency_band\n\n def __butter_bandstop(self, lowcut, highcut, fs, order=5):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n sos = butter(order, [low, high], btype=\"bandstop\", output=\"sos\")\n return sos\n\n def __butter_bandstop_filter(self, data, lowcut, highcut, fs, order=5):\n sos = self.__butter_bandstop(lowcut, highcut, fs, order=order)\n y = sosfilt(sos, data).astype(np.float32)\n return y\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"bandwidth\"] = random.randint(\n self.min_frequency_band * sample_rate // 2,\n self.max_frequency_band * sample_rate // 2,\n )\n self.parameters[\"freq_start\"] = random.randint(\n 16, sample_rate // 2 - self.parameters[\"bandwidth\"] - 1\n )\n\n def apply(self, samples, sample_rate):\n bandwidth = self.parameters[\"bandwidth\"]\n freq_start = self.parameters[\"freq_start\"]\n samples = self.__butter_bandstop_filter(\n samples, freq_start, freq_start + bandwidth, sample_rate, order=6\n )\n return samples\n\n\nclass TimeMask(BaseWaveformTransform):\n \"\"\"\n Make a randomly chosen part of the audio silent.\n Inspired by https://arxiv.org/pdf/1904.08779.pdf\n \"\"\"\n\n def __init__(self, min_band_part=0.0, max_band_part=0.5, fade=False, p=0.5):\n \"\"\"\n :param min_band_part: Minimum length of the silent part as a fraction of the\n total sound length. Float.\n :param max_band_part: Maximum length of the silent part as a fraction of the\n total sound length. Float.\n :param fade: Bool, Add linear fade in and fade out of the silent part.\n :param p:\n \"\"\"\n super().__init__(p)\n self.min_band_part = min_band_part\n self.max_band_part = max_band_part\n self.fade = fade\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n num_samples = samples.shape[0]\n self.parameters[\"t\"] = random.randint(\n int(num_samples * self.min_band_part),\n int(num_samples * self.max_band_part),\n )\n self.parameters[\"t0\"] = random.randint(\n 0, num_samples - self.parameters[\"t\"]\n )\n\n def apply(self, samples, sample_rate):\n new_samples = samples.copy()\n t = self.parameters[\"t\"]\n t0 = self.parameters[\"t0\"]\n mask = np.zeros(t)\n if self.fade:\n fade_length = min(int(sample_rate * 0.01), int(t * 0.1))\n mask[0:fade_length] = np.linspace(1, 0, num=fade_length)\n mask[-fade_length:] = np.linspace(0, 1, num=fade_length)\n new_samples[t0 : t0 + t] *= mask\n return new_samples\n\n\nclass AddGaussianSNR(BaseWaveformTransform):\n \"\"\"Add gaussian noise to the samples with random Signal to Noise Ratio (SNR)\"\"\"\n\n def __init__(self, min_SNR=0.001, max_SNR=1.0, p=0.5):\n \"\"\"\n :param min_SNR: Minimum signal-to-noise ratio\n :param max_SNR: Maximum signal-to-noise ratio\n :param p:\n \"\"\"\n super().__init__(p)\n self.min_SNR = min_SNR\n self.max_SNR = max_SNR\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n std = np.std(samples)\n self.parameters[\"noise_std\"] = random.uniform(\n self.min_SNR * std, self.max_SNR * std\n )\n\n def apply(self, samples, sample_rate):\n noise = np.random.normal(\n 0.0, self.parameters[\"noise_std\"], size=len(samples)\n ).astype(np.float32)\n return samples + noise\n\n\nclass AddGaussianNoise(BaseWaveformTransform):\n \"\"\"Add gaussian noise to the samples\"\"\"\n\n def __init__(self, min_amplitude=0.001, max_amplitude=0.015, p=0.5):\n super().__init__(p)\n self.min_amplitude = min_amplitude\n self.max_amplitude = max_amplitude\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"amplitude\"] = random.uniform(\n self.min_amplitude, self.max_amplitude\n )\n\n def apply(self, samples, sample_rate):\n noise = np.random.randn(len(samples)).astype(np.float32)\n samples = samples + self.parameters[\"amplitude\"] * noise\n return samples\n\n\nclass TimeStretch(BaseWaveformTransform):\n \"\"\"Time stretch the signal without changing the pitch\"\"\"\n\n def __init__(self, min_rate=0.8, max_rate=1.25, leave_length_unchanged=True, p=0.5):\n super().__init__(p)\n assert min_rate > 0.1\n assert max_rate < 10\n assert min_rate <= max_rate\n self.min_rate = min_rate\n self.max_rate = max_rate\n self.leave_length_unchanged = leave_length_unchanged\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n \"\"\"\n If rate > 1, then the signal is sped up.\n If rate < 1, then the signal is slowed down.\n \"\"\"\n self.parameters[\"rate\"] = random.uniform(self.min_rate, self.max_rate)\n\n def apply(self, samples, sample_rate):\n time_stretched_samples = librosa.effects.time_stretch(\n samples, self.parameters[\"rate\"]\n )\n if self.leave_length_unchanged:\n # Apply zero padding if the time stretched audio is not long enough to fill the\n # whole space, or crop the time stretched audio if it ended up too long.\n padded_samples = np.zeros(shape=samples.shape, dtype=samples.dtype)\n window = time_stretched_samples[: samples.shape[0]]\n actual_window_length = len(window) # may be smaller than samples.shape[0]\n padded_samples[:actual_window_length] = window\n time_stretched_samples = padded_samples\n return time_stretched_samples\n\n\nclass PitchShift(BaseWaveformTransform):\n \"\"\"Pitch shift the sound up or down without changing the tempo\"\"\"\n\n def __init__(self, min_semitones=-4, max_semitones=4, p=0.5):\n super().__init__(p)\n assert min_semitones >= -12\n assert max_semitones <= 12\n assert min_semitones <= max_semitones\n self.min_semitones = min_semitones\n self.max_semitones = max_semitones\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"num_semitones\"] = random.uniform(\n self.min_semitones, self.max_semitones\n )\n\n def apply(self, samples, sample_rate):\n pitch_shifted_samples = librosa.effects.pitch_shift(\n samples, sample_rate, n_steps=self.parameters[\"num_semitones\"]\n )\n return pitch_shifted_samples\n\n\nclass Shift(BaseWaveformTransform):\n \"\"\"\n Shift the samples forwards or backwards, with or without rollover\n \"\"\"\n\n def __init__(self, min_fraction=-0.5, max_fraction=0.5, rollover=True, p=0.5):\n \"\"\"\n :param min_fraction: float, fraction of total sound length\n :param max_fraction: float, fraction of total sound length\n :param rollover: When set to True, samples that roll beyond the first or last position\n are re-introduced at the last or first. When set to False, samples that roll beyond\n the first or last position are discarded. In other words, rollover=False results in\n an empty space (with zeroes).\n :param p:\n \"\"\"\n super().__init__(p)\n assert min_fraction >= -1\n assert max_fraction <= 1\n self.min_fraction = min_fraction\n self.max_fraction = max_fraction\n self.rollover = rollover\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"num_places_to_shift\"] = int(\n round(\n random.uniform(self.min_fraction, self.max_fraction) * len(samples)\n )\n )\n\n def apply(self, samples, sample_rate):\n num_places_to_shift = self.parameters[\"num_places_to_shift\"]\n shifted_samples = np.roll(samples, num_places_to_shift)\n if not self.rollover:\n if num_places_to_shift > 0:\n shifted_samples[:num_places_to_shift] = 0.0\n elif num_places_to_shift < 0:\n shifted_samples[num_places_to_shift:] = 0.0\n return shifted_samples\n\n\nclass Normalize(BaseWaveformTransform):\n \"\"\"\n Apply a constant amount of gain, so that highest signal level present in the sound becomes\n 0 dBFS, i.e. the loudest level allowed if all samples must be between -1 and 1. Also known\n as peak normalization.\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__(p)\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"max_amplitude\"] = np.amax(np.abs(samples))\n\n def apply(self, samples, sample_rate):\n if self.parameters[\"max_amplitude\"] > 0:\n normalized_samples = samples / self.parameters[\"max_amplitude\"]\n else:\n normalized_samples = samples\n return normalized_samples\n\n\nclass Trim(BaseWaveformTransform):\n \"\"\"\n Trim leading and trailing silence from an audio signal using librosa.effects.trim\n \"\"\"\n\n def __init__(self, top_db=20, p=1.0):\n super().__init__(p)\n self.top_db = top_db\n\n def apply(self, samples, sample_rate):\n samples, lens = librosa.effects.trim(samples, top_db=self.top_db)\n return samples\n\n\nclass Resample(BaseWaveformTransform):\n \"\"\"\n Resample signal using librosa.core.resample\n\n To do downsampling only set both minimum and maximum sampling rate lower than original\n sampling rate and vice versa to do upsampling only.\n \"\"\"\n\n def __init__(self, min_sample_rate=8000, max_sample_rate=44100, p=0.5):\n \"\"\"\n :param min_sample_rate: int, Minimum sample rate\n :param max_sample_rate: int, Maximum sample rate\n :param p:\n \"\"\"\n super().__init__(p)\n assert min_sample_rate <= max_sample_rate\n self.min_sample_rate = min_sample_rate\n self.max_sample_rate = max_sample_rate\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"target_sample_rate\"] = random.randint(\n self.min_sample_rate, self.max_sample_rate\n )\n\n def apply(self, samples, sample_rate):\n samples = librosa.core.resample(\n samples,\n orig_sr=sample_rate,\n target_sr=self.parameters[\"target_sample_rate\"],\n )\n return samples\n\n\nclass ClippingDistortion(BaseWaveformTransform):\n \"\"\"Distort signal by clipping a random percentage of points\n\n The percentage of points that will ble clipped is drawn from a uniform distribution between\n the two input parameters min_percentile_threshold and max_percentile_threshold. If for instance\n 30% is drawn, the samples are clipped if they're below the 15th or above the 85th percentile.\n \"\"\"\n\n def __init__(self, min_percentile_threshold=0, max_percentile_threshold=40, p=0.5):\n \"\"\"\n :param min_percentile_threshold: int, A lower bound on the total percent of samples that\n will be clipped\n :param max_percentile_threshold: int, A upper bound on the total percent of samples that\n will be clipped\n :param p:\n \"\"\"\n super().__init__(p)\n assert min_percentile_threshold <= max_percentile_threshold\n assert 0 <= min_percentile_threshold <= 100\n assert 0 <= max_percentile_threshold <= 100\n self.min_percentile_threshold = min_percentile_threshold\n self.max_percentile_threshold = max_percentile_threshold\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"percentile_threshold\"] = random.randint(\n self.min_percentile_threshold, self.max_percentile_threshold\n )\n\n def apply(self, samples, sample_rate):\n lower_percentile_threshold = int(self.parameters[\"percentile_threshold\"] / 2)\n lower_threshold, upper_threshold = np.percentile(\n samples, [lower_percentile_threshold, 100 - lower_percentile_threshold]\n )\n samples = np.clip(samples, lower_threshold, upper_threshold)\n return samples\n\n\nclass AddBackgroundNoise(BaseWaveformTransform):\n \"\"\"Mix in another sound, e.g. a background noise. Useful if your original sound is clean and\n you want to simulate an environment where background noise is present.\n\n Can also be used for mixup, as in https://arxiv.org/pdf/1710.09412.pdf\n\n A folder of (background noise) sounds to be mixed in must be specified. These sounds should\n ideally be at least as long as the input sounds to be transformed. Otherwise, the background\n sound will be repeated, which may sound unnatural.\n\n Note that the gain of the added noise is relative to the amount of signal in the input. This\n implies that if the input is completely silent, no noise will be added.\n \"\"\"\n\n def __init__(self, sounds_path=None, min_snr_in_db=3, max_snr_in_db=30, p=0.5, lru_cache_size=2):\n \"\"\"\n :param sounds_path: Path to a folder that contains sound files to randomly mix in. These\n files can be flac, mp3, ogg or wav.\n :param min_snr_in_db: Minimum signal-to-noise ratio in dB\n :param max_snr_in_db: Maximum signal-to-noise ratio in dB\n :param p:\n :param lru_cache_size: Maximum size of the LRU cache for storing noise files in memory\n \"\"\"\n super().__init__(p)\n self.sound_file_paths = get_file_paths(sounds_path)\n self.sound_file_paths = [str(p) for p in self.sound_file_paths]\n assert len(self.sound_file_paths) > 0\n self.min_snr_in_db = min_snr_in_db\n self.max_snr_in_db = max_snr_in_db\n self._load_sound = functools.lru_cache(\n maxsize=lru_cache_size)(AddBackgroundNoise._load_sound)\n\n @staticmethod\n def _load_sound(file_path, sample_rate):\n return load_sound_file(file_path, sample_rate)\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"snr_in_db\"] = random.uniform(\n self.min_snr_in_db, self.max_snr_in_db\n )\n self.parameters[\"noise_file_path\"] = random.choice(self.sound_file_paths)\n\n num_samples = len(samples)\n noise_sound, _ = self._load_sound(\n self.parameters[\"noise_file_path\"], sample_rate\n )\n\n num_noise_samples = len(noise_sound)\n min_noise_offset = 0\n max_noise_offset = max(0, num_noise_samples - num_samples - 1)\n self.parameters[\"noise_start_index\"] = random.randint(\n min_noise_offset, max_noise_offset\n )\n self.parameters[\"noise_end_index\"] = (\n self.parameters[\"noise_start_index\"] + num_samples\n )\n\n def apply(self, samples, sample_rate):\n noise_sound, _ = self._load_sound(\n self.parameters[\"noise_file_path\"], sample_rate\n )\n noise_sound = noise_sound[\n self.parameters[\"noise_start_index\"] : self.parameters[\"noise_end_index\"]\n ]\n\n noise_rms = calculate_rms(noise_sound)\n if noise_rms < 1e-9:\n warnings.warn(\n \"The file {} is too silent to be added as noise. Returning the input\"\n \" unchanged.\".format(self.parameters[\"noise_file_path\"])\n )\n return samples\n\n clean_rms = calculate_rms(samples)\n desired_noise_rms = calculate_desired_noise_rms(\n clean_rms, self.parameters[\"snr_in_db\"]\n )\n\n # Adjust the noise to match the desired noise RMS\n noise_sound = noise_sound * (desired_noise_rms / noise_rms)\n\n # Repeat the sound if it shorter than the input sound\n num_samples = len(samples)\n while len(noise_sound) < num_samples:\n noise_sound = np.concatenate((noise_sound, noise_sound))\n\n if len(noise_sound) > num_samples:\n noise_sound = noise_sound[0:num_samples]\n\n # Return a mix of the input sound and the background noise sound\n return samples + noise_sound\n\n\nclass AddShortNoises(BaseWaveformTransform):\n \"\"\"Mix in various (bursts of overlapping) sounds with random pauses between. Useful if your\n original sound is clean and you want to simulate an environment where short noises sometimes\n occur.\n\n A folder of (noise) sounds to be mixed in must be specified.\n \"\"\"\n\n def __init__(\n self,\n sounds_path=None,\n min_snr_in_db=0,\n max_snr_in_db=24,\n min_time_between_sounds=4.0,\n max_time_between_sounds=16.0,\n burst_probability=0.22,\n min_pause_factor_during_burst=0.1,\n max_pause_factor_during_burst=1.1,\n min_fade_in_time=0.005,\n max_fade_in_time=0.08,\n min_fade_out_time=0.01,\n max_fade_out_time=0.1,\n p=0.5,\n lru_cache_size=64\n ):\n \"\"\"\n :param sounds_path: Path to a folder that contains sound files to randomly mix in. These\n files can be flac, mp3, ogg or wav.\n :param min_snr_in_db: Minimum signal-to-noise ratio in dB. A lower value means the added\n sounds/noises will be louder.\n :param max_snr_in_db: Maximum signal-to-noise ratio in dB. A lower value means the added\n sounds/noises will be louder.\n :param min_time_between_sounds: Minimum pause time between the added sounds/noises\n :param max_time_between_sounds: Maximum pause time between the added sounds/noises\n :param burst_probability: The probability of adding an extra sound/noise that overlaps\n :param min_pause_factor_during_burst: Min value of how far into the current sound (as\n fraction) the burst sound should start playing. The value must be greater than 0.\n :param max_pause_factor_during_burst: Max value of how far into the current sound (as\n fraction) the burst sound should start playing. The value must be greater than 0.\n :param min_fade_in_time: Min sound/noise fade in time in seconds. Use a value larger\n than 0 to avoid a \"click\" at the start of the sound/noise.\n :param max_fade_in_time: Min sound/noise fade out time in seconds. Use a value larger\n than 0 to avoid a \"click\" at the start of the sound/noise.\n :param min_fade_out_time: Min sound/noise fade out time in seconds. Use a value larger\n than 0 to avoid a \"click\" at the end of the sound/noise.\n :param max_fade_out_time: Max sound/noise fade out time in seconds. Use a value larger\n than 0 to avoid a \"click\" at the end of the sound/noise.\n :param p: The probability of applying this transform\n :param lru_cache_size: Maximum size of the LRU cache for storing noise files in memory\n \"\"\"\n super().__init__(p)\n self.sound_file_paths = get_file_paths(sounds_path)\n self.sound_file_paths = [str(p) for p in self.sound_file_paths]\n assert len(self.sound_file_paths) > 0\n assert min_snr_in_db <= max_snr_in_db\n assert min_time_between_sounds <= max_time_between_sounds\n assert 0.0 < burst_probability <= 1.0\n if burst_probability == 1.0:\n assert (\n min_pause_factor_during_burst > 0.0\n ) # or else an infinite loop will occur\n assert 0.0 < min_pause_factor_during_burst <= 1.0\n assert max_pause_factor_during_burst > 0.0\n assert max_pause_factor_during_burst >= min_pause_factor_during_burst\n assert min_fade_in_time >= 0.0\n assert max_fade_in_time >= 0.0\n assert min_fade_in_time <= max_fade_in_time\n assert min_fade_out_time >= 0.0\n assert max_fade_out_time >= 0.0\n assert min_fade_out_time <= max_fade_out_time\n\n self.min_snr_in_db = min_snr_in_db\n self.max_snr_in_db = max_snr_in_db\n self.min_time_between_sounds = min_time_between_sounds\n self.max_time_between_sounds = max_time_between_sounds\n self.burst_probability = burst_probability\n self.min_pause_factor_during_burst = min_pause_factor_during_burst\n self.max_pause_factor_during_burst = max_pause_factor_during_burst\n self.min_fade_in_time = min_fade_in_time\n self.max_fade_in_time = max_fade_in_time\n self.min_fade_out_time = min_fade_out_time\n self.max_fade_out_time = max_fade_out_time\n self._load_sound = functools.lru_cache(\n maxsize=lru_cache_size)(AddShortNoises.__load_sound)\n\n @staticmethod\n def __load_sound(file_path, sample_rate):\n return load_sound_file(file_path, sample_rate)\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n input_sound_duration = len(samples) / sample_rate\n\n current_time = 0\n global_offset = random.uniform(\n -self.max_time_between_sounds, self.max_time_between_sounds\n )\n current_time += global_offset\n sounds = []\n while current_time < input_sound_duration:\n sound_file_path = random.choice(self.sound_file_paths)\n sound, _ = self.__load_sound(sound_file_path, sample_rate)\n sound_duration = len(sound) / sample_rate\n\n # Ensure that the fade time is not longer than the duration of the sound\n fade_in_time = min(\n sound_duration,\n random.uniform(self.min_fade_in_time, self.max_fade_in_time),\n )\n fade_out_time = min(\n sound_duration,\n random.uniform(self.min_fade_out_time, self.max_fade_out_time),\n )\n\n sounds.append(\n {\n \"fade_in_time\": fade_in_time,\n \"start\": current_time,\n \"end\": current_time + sound_duration,\n \"fade_out_time\": fade_out_time,\n \"file_path\": sound_file_path,\n \"snr_in_db\": random.uniform(\n self.min_snr_in_db, self.max_snr_in_db\n ),\n }\n )\n\n # burst mode - add overlapping sounds\n while (\n random.random() < self.burst_probability\n and current_time < input_sound_duration\n ):\n pause_factor = random.uniform(\n self.min_pause_factor_during_burst,\n self.max_pause_factor_during_burst,\n )\n pause_time = pause_factor * sound_duration\n current_time = sounds[-1][\"start\"] + pause_time\n\n if current_time >= input_sound_duration:\n break\n\n sound_file_path = random.choice(self.sound_file_paths)\n sound, _ = self.__load_sound(sound_file_path, sample_rate)\n sound_duration = len(sound) / sample_rate\n\n fade_in_time = min(\n sound_duration,\n random.uniform(self.min_fade_in_time, self.max_fade_in_time),\n )\n fade_out_time = min(\n sound_duration,\n random.uniform(self.min_fade_out_time, self.max_fade_out_time),\n )\n\n sounds.append(\n {\n \"fade_in_time\": fade_in_time,\n \"start\": current_time,\n \"end\": current_time + sound_duration,\n \"fade_out_time\": fade_out_time,\n \"file_path\": sound_file_path,\n \"snr_in_db\": random.uniform(\n self.min_snr_in_db, self.max_snr_in_db\n ),\n }\n )\n\n # wait until the last sound is done\n current_time += sound_duration\n\n # then add a pause\n pause_duration = random.uniform(\n self.min_time_between_sounds, self.max_time_between_sounds\n )\n current_time += pause_duration\n\n self.parameters[\"sounds\"] = sounds\n\n def apply(self, samples, sample_rate):\n num_samples = len(samples)\n noise_placeholder = np.zeros_like(samples)\n for sound_params in self.parameters[\"sounds\"]:\n if sound_params[\"end\"] < 0:\n # Skip a sound if it ended before the start of the input sound\n continue\n\n noise_samples, _ = self.__load_sound(sound_params[\"file_path\"], sample_rate)\n\n # Apply fade in and fade out\n noise_gain = np.ones_like(noise_samples)\n fade_in_time_in_samples = int(sound_params[\"fade_in_time\"] * sample_rate)\n fade_in_mask = np.linspace(0.0, 1.0, num=fade_in_time_in_samples)\n fade_out_time_in_samples = int(sound_params[\"fade_out_time\"] * sample_rate)\n fade_out_mask = np.linspace(1.0, 0.0, num=fade_out_time_in_samples)\n noise_gain[: fade_in_mask.shape[0]] = fade_in_mask\n noise_gain[-fade_out_mask.shape[0] :] = np.minimum(\n noise_gain[-fade_out_mask.shape[0] :], fade_out_mask\n )\n noise_samples = noise_samples * noise_gain\n\n start_sample_index = int(sound_params[\"start\"] * sample_rate)\n end_sample_index = start_sample_index + len(noise_samples)\n\n if start_sample_index < 0:\n # crop noise_samples: shave off a chunk in the beginning\n num_samples_to_shave_off = abs(start_sample_index)\n noise_samples = noise_samples[num_samples_to_shave_off:]\n start_sample_index = 0\n\n if end_sample_index > num_samples:\n # crop noise_samples: shave off a chunk in the end\n num_samples_to_shave_off = end_sample_index - num_samples\n noise_samples = noise_samples[\n : len(noise_samples) - num_samples_to_shave_off\n ]\n end_sample_index = num_samples\n\n clean_rms = calculate_rms(samples[start_sample_index:end_sample_index])\n noise_rms = calculate_rms(noise_samples)\n if noise_rms > 0:\n desired_noise_rms = calculate_desired_noise_rms(\n clean_rms, sound_params[\"snr_in_db\"]\n )\n\n # Adjust the noise to match the desired noise RMS\n noise_samples = noise_samples * (desired_noise_rms / noise_rms)\n\n noise_placeholder[start_sample_index:end_sample_index] += noise_samples\n\n # Return a mix of the input sound and the added sounds\n return samples + noise_placeholder\n\n\nclass PolarityInversion(BaseWaveformTransform):\n \"\"\"\n Flip the audio samples upside-down, reversing their polarity. In other words, multiply the\n waveform by -1, so negative values become positive, and vice versa. The result will sound\n the same compared to the original when played back in isolation. However, when mixed with\n other audio sources, the result may be different. This waveform inversion technique\n is sometimes used for audio cancellation or obtaining the difference between two waveforms.\n However, in the context of audio data augmentation, this transform can be useful when\n training phase-aware machine learning models.\n \"\"\"\n\n supports_multichannel = True\n\n def __init__(self, p=0.5):\n \"\"\"\n :param p:\n \"\"\"\n super().__init__(p)\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n\n def apply(self, samples, sample_rate):\n return -samples\n\n\nclass Gain(BaseWaveformTransform):\n \"\"\"\n Multiply the audio by a random amplitude factor to reduce or increase the volume. This\n technique can help a model become somewhat invariant to the overall gain of the input audio.\n\n Warning: This transform can return samples outside the [-1, 1] range, which may lead to\n clipping or wrap distortion, depending on what you do with the audio in a later stage.\n See also https://en.wikipedia.org/wiki/Clipping_(audio)#Digital_clipping\n \"\"\"\n\n supports_multichannel = True\n\n def __init__(self, min_gain_in_db=-12, max_gain_in_db=12, p=0.5):\n \"\"\"\n :param p:\n \"\"\"\n super().__init__(p)\n assert min_gain_in_db <= max_gain_in_db\n self.min_gain_in_db = min_gain_in_db\n self.max_gain_in_db = max_gain_in_db\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"amplitude_ratio\"] = convert_decibels_to_amplitude_ratio(\n random.uniform(self.min_gain_in_db, self.max_gain_in_db)\n )\n\n def apply(self, samples, sample_rate):\n return samples * self.parameters[\"amplitude_ratio\"]\n\n\nclass Mp3Compression(BaseWaveformTransform):\n \"\"\"Compress the audio using an MP3 encoder to lower the audio quality.\n This may help machine learning models deal with compressed, low-quality audio.\n\n This transform depends on either lameenc or pydub/ffmpeg.\n\n Note that bitrates below 32 kbps are only supported for low sample rates (up to 24000 hz).\n\n Note: When using the lameenc backend, the output may be slightly longer than the input due\n to the fact that the LAME encoder inserts some silence at the beginning of the audio.\n\n Warning: This transform writes to disk, so it may be slow. Ideally, the work should be done\n in memory. Contributions are welcome.\n \"\"\"\n\n SUPPORTED_BITRATES = [\n 8,\n 16,\n 24,\n 32,\n 40,\n 48,\n 56,\n 64,\n 80,\n 96,\n 112,\n 128,\n 144,\n 160,\n 192,\n 224,\n 256,\n 320,\n ]\n\n def __init__(\n self, min_bitrate: int = 8, max_bitrate: int = 64, backend: str = \"pydub\", p=0.5\n ):\n \"\"\"\n :param min_bitrate: Minimum bitrate in kbps\n :param max_bitrate: Maximum bitrate in kbps\n :param backend: \"pydub\" or \"lameenc\".\n Pydub may use ffmpeg under the hood.\n Pros: Seems to avoid introducing latency in the output.\n Cons: Slower than lameenc.\n lameenc:\n Pros: You can set the quality parameter in addition to bitrate.\n Cons: Seems to introduce some silence at the start of the audio.\n :param p: The probability of applying this transform\n \"\"\"\n super().__init__(p)\n assert self.SUPPORTED_BITRATES[0] <= min_bitrate <= self.SUPPORTED_BITRATES[-1]\n assert self.SUPPORTED_BITRATES[0] <= max_bitrate <= self.SUPPORTED_BITRATES[-1]\n assert min_bitrate <= max_bitrate\n self.min_bitrate = min_bitrate\n self.max_bitrate = max_bitrate\n assert backend in (\"pydub\", \"lameenc\")\n self.backend = backend\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n bitrate_choices = [\n bitrate\n for bitrate in self.SUPPORTED_BITRATES\n if self.min_bitrate <= bitrate <= self.max_bitrate\n ]\n self.parameters[\"bitrate\"] = random.choice(bitrate_choices)\n\n def apply(self, samples, sample_rate):\n if self.backend == \"lameenc\":\n return self.apply_lameenc(samples, sample_rate)\n elif self.backend == \"pydub\":\n return self.apply_pydub(samples, sample_rate)\n else:\n raise Exception(\"Backend {} not recognized\".format(self.backend))\n\n def apply_lameenc(self, samples, sample_rate):\n try:\n import lameenc\n except ImportError:\n print(\n \"Failed to import the lame encoder. Maybe it is not installed? \"\n \"To install the optional lameenc dependency of audiomentations,\"\n \" do `pip install audiomentations[extras]` instead of\"\n \" `pip install audiomentations`\",\n file=sys.stderr,\n )\n raise\n\n assert len(samples.shape) == 1\n assert samples.dtype == np.float32\n\n int_samples = convert_float_samples_to_int16(samples)\n\n encoder = lameenc.Encoder()\n encoder.set_bit_rate(self.parameters[\"bitrate\"])\n encoder.set_in_sample_rate(sample_rate)\n encoder.set_channels(1)\n encoder.set_quality(7) # 2 = highest, 7 = fastest\n encoder.silence()\n\n mp3_data = encoder.encode(int_samples.tobytes())\n mp3_data += encoder.flush()\n\n # Write a temporary MP3 file that will then be decoded\n tmp_dir = tempfile.gettempdir()\n tmp_file_path = os.path.join(\n tmp_dir, \"tmp_compressed_{}.mp3\".format(str(uuid.uuid4())[0:12])\n )\n with open(tmp_file_path, \"wb\") as f:\n f.write(mp3_data)\n\n degraded_samples, _ = librosa.load(tmp_file_path, sample_rate)\n\n os.unlink(tmp_file_path)\n\n return degraded_samples\n\n def apply_pydub(self, samples, sample_rate):\n try:\n import pydub\n except ImportError:\n print(\n \"Failed to import pydub. Maybe it is not installed? \"\n \"To install the optional pydub dependency of audiomentations,\"\n \" do `pip install audiomentations[extras]` instead of\"\n \" `pip install audiomentations`\",\n file=sys.stderr,\n )\n raise\n\n assert len(samples.shape) == 1\n assert samples.dtype == np.float32\n\n int_samples = convert_float_samples_to_int16(samples)\n\n audio_segment = pydub.AudioSegment(\n int_samples.tobytes(),\n frame_rate=sample_rate,\n sample_width=int_samples.dtype.itemsize,\n channels=1,\n )\n\n tmp_dir = tempfile.gettempdir()\n tmp_file_path = os.path.join(\n tmp_dir, \"tmp_compressed_{}.mp3\".format(str(uuid.uuid4())[0:12])\n )\n\n bitrate_string = \"{}k\".format(self.parameters[\"bitrate\"])\n file_handle = audio_segment.export(tmp_file_path, bitrate=bitrate_string)\n file_handle.close()\n\n degraded_samples, _ = librosa.load(tmp_file_path, sample_rate)\n\n os.unlink(tmp_file_path)\n\n return degraded_samples\n" ]
[ [ "numpy.amax", "numpy.ones_like", "numpy.minimum", "numpy.linspace", "numpy.clip", "numpy.amin", "scipy.signal.sosfilt", "numpy.abs", "numpy.percentile", "numpy.concatenate", "scipy.signal.butter", "numpy.std", "numpy.zeros_like", "scipy.signal.convolve", "numpy.roll", "numpy.zeros" ] ]
benjeffery/tsconvert
[ "a7d68389fedf269d45387ecc44842f6ffe24b2cc" ]
[ "tests/test_newick.py" ]
[ "#\n# MIT License\n#\n# Copyright (c) 2019 Tskit Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport unittest\n\nimport msprime\nimport numpy as np\nimport tskit\nimport itertools\nimport dendropy\n\nimport tsconvert\n\n\ndef kc_distance(tree1, tree2):\n \"\"\"\n Returns the Kendall-Colijn topological distance between the specified\n pair of trees. This is a very simple and direct implementation for testing.\n \"\"\"\n samples = tree1.tree_sequence.samples()\n if not np.array_equal(samples, tree2.tree_sequence.samples()):\n raise ValueError(\"Trees must have the same samples\")\n k = samples.shape[0]\n n = (k * (k - 1)) // 2\n M = [np.ones(n + k), np.ones(n + k)]\n for tree_index, tree in enumerate([tree1, tree2]):\n stack = [(tree.root, 0)]\n while len(stack) > 0:\n u, depth = stack.pop()\n children = tree.children(u)\n for v in children:\n stack.append((v, depth + 1))\n for c1, c2 in itertools.combinations(children, 2):\n for u in tree.samples(c1):\n for v in tree.samples(c2):\n if u < v:\n a, b = u, v\n else:\n a, b = v, u\n pair_index = a * (a - 2 * k + 1) // -2 + b - a - 1\n assert M[tree_index][pair_index] == 1\n M[tree_index][pair_index] = depth\n return np.linalg.norm(M[0] - M[1])\n\n\ndef get_nonbinary_example(sample_size=20, recombination_rate=0, random_seed=42):\n ts = msprime.simulate(\n sample_size=sample_size, recombination_rate=recombination_rate,\n random_seed=random_seed,\n demographic_events=[\n msprime.SimpleBottleneck(time=0.5, population=0, proportion=1)])\n # Make sure this really has some non-binary nodes\n found = False\n for e in ts.edgesets():\n if len(e.children) > 2:\n found = True\n break\n assert found\n return ts\n\n\nclass TestSingleTreeRoundTrip(unittest.TestCase):\n \"\"\"\n Tests that we can successfully roundtrip trees with various topologies\n from a tree sequence containing a single tree.\n \"\"\"\n def verify(self, ts):\n self.assertEqual(ts.num_trees, 1)\n source_tree = ts.first()\n newick = tsconvert.to_newick(source_tree)\n conv_ts = tsconvert.from_newick(newick)\n self.assertEqual(conv_ts.num_trees, 1)\n conv_tree = conv_ts.first()\n source_str = source_tree.draw(format=\"unicode\", node_labels={})\n conv_str = conv_tree.draw(format=\"unicode\", node_labels={})\n # The tree sequences are canonical, so the nodes are allocated in\n # time order. We should be identical other than the leaf labels.\n self.assertEqual(source_str, conv_str)\n self.assertTrue(\n np.allclose(conv_ts.tables.nodes.time, ts.tables.nodes.time))\n self.assertEqual(list(range(ts.num_samples)), list(conv_tree.leaves()))\n self.assertEqual(list(range(ts.num_samples)), list(conv_tree.samples()))\n\n def test_msprime_binary(self):\n self.verify(msprime.simulate(10, random_seed=1))\n\n def test_msprime_non_binary(self):\n self.verify(get_nonbinary_example(8))\n\n\nclass TestMsRoundTrip(unittest.TestCase):\n \"\"\"\n Tests if we can round trip tree sequences through the ms format.\n \"\"\"\n def verify(self, ts):\n msout = tsconvert.to_ms(ts.simplify())\n new_ts = tsconvert.from_ms(msout)\n self.assertEqual(ts.num_trees, new_ts.num_trees)\n for t1, t2 in zip(ts.trees(), new_ts.trees()):\n self.assertAlmostEqual(t1.interval[0], t2.interval[0])\n self.assertAlmostEqual(t1.interval[1], t2.interval[1])\n self.assertEqual(kc_distance(t1, t2), 0)\n\n def test_msprime_single_tree(self):\n self.verify(msprime.simulate(10, random_seed=12))\n\n def test_msprime_binary(self):\n self.verify(msprime.simulate(10, recombination_rate=1, random_seed=1))\n\n def test_msprime_non_binary(self):\n ts = get_nonbinary_example(8, recombination_rate=1)\n self.assertGreater(ts.num_trees, 1)\n self.verify(ts)\n\n # TODO more examples\n\n\nclass TestFromMs(unittest.TestCase):\n \"\"\"\n Tests for the from_ms function.\n \"\"\"\n def test_empty_input(self):\n self.assertRaises(ValueError, tsconvert.from_ms, \"\")\n\n def test_ms_without_trees_flag(self):\n msout = \"\"\"ms 3 1 -t 4 -r 5 6 -seeds 1 2 3\n 1 2 3\n\n //\n segsites: 3\n positions: 0.3054 0.3812 0.5338\n 111\n 000\n 100\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n def test_empty_trees(self):\n msout = \"\"\"\n [];\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n msout = \"\"\"\n [1];\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n def test_ms_without_recombination(self):\n msout = \"\"\"\n ms 4 1 -t 5 -T -seeds 1 2 3\n 1 2 3\n\n //\n ((2:0.0680,3:0.0680):0.1481,(1:0.2124,4:0.2124):0.0038);\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n def test_malformed_length(self):\n msout = \"\"\"\n 5(1:0.27413282187548,2:0.27413282187548);\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n msout = \"\"\"\n [XXX](1:0.27413282187548,2:0.27413282187548);\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n msout = \"\"\"\n [](1:0.27413282187548,2:0.27413282187548);\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n msout = \"\"\"\n [5(1:0.27413282187548,2:0.27413282187548);\n \"\"\"\n self.assertRaises(\n dendropy.utility.error.DataParseError, tsconvert.from_ms, msout)\n\n def test_nonmatching_tips(self):\n msout = \"\"\"\n [2](1:0.2144,3:0.2144);\n [4](3:0.2144,(1:0.0768,2:0.0768):0.1376);\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n msout = \"\"\"\n [2](2:0.2930,(1:0.2144,4:0.2144):0.0786);\n [4](3:0.2144,(1:0.0768,2:0.0768):0.1376);\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n def test_identical_node_times(self):\n msout = \"\"\"\n [2](((1:1.5,2:1.5):1.7,3:3.2):1.1,(4:1.5,5:1.5):2.8);\n \"\"\"\n self.assertRaises(ValueError, tsconvert.from_ms, msout)\n\n def test_zero_sequence_length(self):\n msout = \"\"\"\n [0](1:0.27413282187548,2:0.27413282187548);\n \"\"\"\n self.assertRaises(tskit.TskitException, tsconvert.from_ms, msout)\n\n def test_bad_edges(self):\n msout = \"\"\"\n [1](1:0.27413282187548,2:0.27413282187548);\n [0](1:0.27413282187548,2:0.27413282187548);\n \"\"\"\n self.assertRaises(tskit.TskitException, tsconvert.from_ms, msout)\n\n def test_single_tree(self):\n msout = \"\"\"\n [1](1:0.27413282187548,2:0.27413282187548);\n \"\"\"\n ts = tsconvert.from_ms(msout)\n self.assertEqual(ts.num_trees, 1)\n\n def test_full_ms_output(self):\n msout = \"\"\"\n ms 3 1 -t 4 -r 5 6 -seeds 1 2 3 -T\n 1 2 3\n\n //\n [2](2:0.2930,(1:0.2144,3:0.2144):0.0786);\n [4](3:0.2144,(1:0.0768,2:0.0768):0.1376);\n segsites: 3\n positions: 0.3054 0.3812 0.5338\n 111\n 000\n 100\n \"\"\"\n ts = tsconvert.from_ms(msout)\n self.assertEqual(ts.num_samples, 3)\n self.assertEqual(ts.sequence_length, 6)\n self.assertEqual(ts.num_trees, 2)\n self.assertEqual(ts.num_nodes, 6)\n\n trees = ts.trees()\n tree = next(trees)\n self.assertEqual(tree.interval, (0, 2))\n internal_nodes = set(tree.nodes()) - set(ts.samples())\n self.assertAlmostEqual(tree.branch_length(0), 0.2144)\n self.assertAlmostEqual(tree.branch_length(1), 0.2930)\n self.assertAlmostEqual(tree.branch_length(2), 0.2144)\n self.assertAlmostEqual(tree.branch_length(min(internal_nodes)), 0.0786)\n\n tree = next(trees)\n self.assertEqual(tree.interval, (2, 6))\n internal_nodes = set(tree.nodes()) - set(ts.samples())\n self.assertAlmostEqual(tree.branch_length(0), 0.0768)\n self.assertAlmostEqual(tree.branch_length(1), 0.0768)\n self.assertAlmostEqual(tree.branch_length(2), 0.2144)\n self.assertAlmostEqual(tree.branch_length(min(internal_nodes)), 0.1376)\n\n def test_equal_internal_node_time(self):\n # 6\n # ┏━┻━┓\n # 4 5\n # ┏┻┓ ┏┻┓\n # 0 1 2 3\n tables = tskit.TableCollection(1)\n for _ in range(4):\n tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)\n tables.nodes.add_row(0, time=1)\n tables.nodes.add_row(0, time=1)\n tables.nodes.add_row(0, time=2)\n\n tables.edges.add_row(0, 1, 4, 0)\n tables.edges.add_row(0, 1, 4, 1)\n tables.edges.add_row(0, 1, 5, 2)\n tables.edges.add_row(0, 1, 5, 3)\n tables.edges.add_row(0, 1, 6, 4)\n tables.edges.add_row(0, 1, 6, 5)\n tables.sort()\n ts = tables.tree_sequence()\n msout = tsconvert.to_ms(ts)\n # The current algorithm assumes node times are unique\n with self.assertRaises(ValueError):\n tsconvert.from_ms(msout)\n\n def test_n2_example(self):\n # j$ mspms 2 1 -T -r 4 10 -p 14\n msout = \"\"\"\n /home/jk/.local/bin/mspms 2 1 -T -r 4 10 -p 14\n 1774173160 1383299789 1436475231\n\n //\n [5](1:0.27413282187548,2:0.27413282187548);\n [3](1:0.43103605328988,2:0.43103605328988);\n [1](1:1.96842212024363,2:1.96842212024363);\n [1](1:2.06027985820196,2:2.06027985820196);\n \"\"\"\n ts = tsconvert.from_ms(msout)\n self.assertEqual(ts.num_samples, 2)\n self.assertEqual(ts.sequence_length, 10)\n self.assertEqual(ts.num_trees, 4)\n\n trees = ts.trees()\n tree = next(trees)\n self.assertEqual(tree.interval, (0, 5))\n self.assertAlmostEqual(tree.branch_length(0), 0.27413282187548)\n self.assertAlmostEqual(tree.branch_length(1), 0.27413282187548)\n\n tree = next(trees)\n self.assertEqual(tree.interval, (5, 8))\n self.assertAlmostEqual(tree.branch_length(0), 0.43103605328988)\n self.assertAlmostEqual(tree.branch_length(1), 0.43103605328988)\n\n tree = next(trees)\n self.assertEqual(tree.interval, (8, 9))\n self.assertAlmostEqual(tree.branch_length(0), 1.96842212024363)\n self.assertAlmostEqual(tree.branch_length(1), 1.96842212024363)\n\n tree = next(trees)\n self.assertEqual(tree.interval, (9, 10))\n self.assertAlmostEqual(tree.branch_length(0), 2.06027985820196)\n self.assertAlmostEqual(tree.branch_length(1), 2.06027985820196)\n\n def test_n4_example(self):\n # $ mspms 4 1 -T -r 4 10 -p 8\n msout = \"\"\"\n /home/jk/.local/bin/mspms 4 1 -T -r 4 10 -p 8\n 961626313 1881970557 110898863\n\n //\n [5](1:0.70961771,(4:0.33536000,(2:0.12737966,3:0.12737966):0.20798034):0.37425772);\n [1]((2:0.12737966,3:0.12737966):0.20798034,(1:0.21249950,4:0.21249950):0.12286050);\n [2]((3:0.12737966,(2:0.02380236,4:0.02380236):0.10357730):0.20798034,1:0.33536000);\n [1](1:1.32624987,(3:0.12737966,(2:0.02380236,4:0.02380236):0.10357730):1.19887022);\n [2](1:1.80041212,(3:0.12737966,(2:0.02380236,4:0.02380236):0.10357730):1.67303246);\n \"\"\"\n ts = tsconvert.from_ms(msout)\n self.assertEqual(ts.num_samples, 4)\n self.assertEqual(ts.sequence_length, 11)\n self.assertEqual(ts.num_trees, 5)\n\n trees = ts.trees()\n tree = next(trees)\n self.assertEqual(tree.interval, (0, 5))\n self.assertAlmostEqual(tree.branch_length(0), 0.70961771)\n self.assertAlmostEqual(tree.branch_length(1), 0.12737966)\n self.assertAlmostEqual(tree.branch_length(2), 0.12737966)\n self.assertAlmostEqual(tree.branch_length(3), 0.33536000)\n\n tree = next(trees)\n self.assertEqual(tree.interval, (5, 6))\n self.assertAlmostEqual(tree.branch_length(0), 0.21249950)\n self.assertAlmostEqual(tree.branch_length(1), 0.12737966)\n self.assertAlmostEqual(tree.branch_length(2), 0.12737966)\n self.assertAlmostEqual(tree.branch_length(3), 0.21249950)\n\n tree = next(trees)\n self.assertEqual(tree.interval, (6, 8))\n\n tree = next(trees)\n self.assertEqual(tree.interval, (8, 9))\n\n tree = next(trees)\n self.assertEqual(tree.interval, (9, 11))\n self.assertAlmostEqual(tree.branch_length(0), 1.80041212)\n self.assertAlmostEqual(tree.branch_length(1), 0.02380236)\n self.assertAlmostEqual(tree.branch_length(2), 0.12737966)\n self.assertAlmostEqual(tree.branch_length(3), 0.02380236)\n\n # Should get the same output if we strip off the header stuff.\n msout = \"\"\"\n [5](1:0.70961771,(4:0.33536000,(2:0.12737966,3:0.12737966):0.20798034):0.37425772);\n [1]((2:0.12737966,3:0.12737966):0.20798034,(1:0.21249950,4:0.21249950):0.12286050);\n [2]((3:0.12737966,(2:0.02380236,4:0.02380236):0.10357730):0.20798034,1:0.33536000);\n [1](1:1.32624987,(3:0.12737966,(2:0.02380236,4:0.02380236):0.10357730):1.19887022);\n [2](1:1.80041212,(3:0.12737966,(2:0.02380236,4:0.02380236):0.10357730):1.67303246);\n \"\"\"\n tables = tsconvert.from_ms(msout).tables\n self.assertEqual(tables, ts.tables)\n" ]
[ [ "numpy.allclose", "numpy.linalg.norm", "numpy.ones" ] ]
rafmudaf/dash-slicer
[ "e959f1ea94f3bb1d061acd3f18727227a08144ed" ]
[ "tests/test_utils.py" ]
[ "from dash_slicer.utils import (\n img_as_ubyte,\n img_array_to_uri,\n get_thumbnail_size,\n shape3d_to_size2d,\n mask_to_coloured_slices,\n)\n\nimport numpy as np\nfrom pytest import raises\n\n\ndef test_img_as_ubyte():\n\n im = np.zeros((100, 100), np.float32)\n im[0, 0] = 100\n\n # Anything but uint8 is stretched to min-max\n im2 = img_as_ubyte(im)\n assert im2.dtype == np.uint8\n assert im2.min() == 0 and im2.max() == 255\n\n # Uint- stays where it is\n im2[0, 0] = 100\n im3 = img_as_ubyte(im2)\n assert im3.dtype == np.uint8\n assert im3.min() == 0 and im3.max() == 100\n\n\ndef test_img_array_to_uri():\n\n im = np.random.uniform(0, 255, (100, 100)).astype(np.uint8)\n\n r1 = img_array_to_uri(im)\n r2 = img_array_to_uri(im, 32)\n r3 = img_array_to_uri(im, 8)\n\n for r in (r1, r2, r3):\n assert isinstance(r, str)\n assert r.startswith(\"data:image/png;base64,\")\n\n assert len(r1) > len(r2) > len(r3)\n\n\ndef test_get_thumbnail_size():\n\n assert get_thumbnail_size((100, 100), 16) == (16, 16)\n assert get_thumbnail_size((50, 100), 16) == (16, 32)\n assert get_thumbnail_size((100, 100), 8) == (8, 8)\n assert get_thumbnail_size((100, 50), 8) == (16, 8)\n\n\ndef test_shape3d_to_size2d():\n # shape -> z, y, x\n # size -> x, y, out-of-plane\n assert shape3d_to_size2d((12, 13, 14), 0) == (14, 13, 12)\n assert shape3d_to_size2d((12, 13, 14), 1) == (14, 12, 13)\n assert shape3d_to_size2d((12, 13, 14), 2) == (13, 12, 14)\n\n with raises(IndexError):\n shape3d_to_size2d((12, 13, 14), 3)\n\n\ndef test_mask_to_coloured_slices():\n vol = np.random.uniform(0, 255, (10, 20, 30)).astype(np.uint8)\n mask = vol > 20\n\n # Check handling of axis\n assert len(mask_to_coloured_slices(mask, 0)) == 10\n assert len(mask_to_coloured_slices(mask, 1)) == 20\n assert len(mask_to_coloured_slices(mask, 2)) == 30\n\n # Bool overlay\n overlay = mask_to_coloured_slices(mask, 0)\n assert isinstance(overlay, list)\n assert all(isinstance(x, str) for x in overlay)\n\n # Bool overlay - with color\n overlay = mask_to_coloured_slices(mask, 0, \"#ff0000\")\n assert isinstance(overlay, list)\n assert all(isinstance(x, str) for x in overlay)\n\n # Bool overlay - with color rgb\n overlay = mask_to_coloured_slices(mask, 0, [0, 255, 0])\n assert all(isinstance(x, str) for x in overlay)\n\n # Bool overlay - with color rgba\n overlay = mask_to_coloured_slices(mask, 0, [0, 255, 0, 100])\n assert all(isinstance(x, str) for x in overlay)\n\n # Uint8 overlay - with colormap\n overlay = mask_to_coloured_slices(vol.astype(np.uint8), 0, [\"#ff0000\", \"#00ff00\"])\n assert all(isinstance(x, str) for x in overlay)\n\n # Reset by zero mask\n overlay = mask_to_coloured_slices(vol > 300, 0)\n assert all(x is None for x in overlay)\n\n # Wrong\n with raises(ValueError):\n mask_to_coloured_slices(mask, 0, \"red\") # named colors not supported yet\n with raises(ValueError):\n mask_to_coloured_slices(mask, 0, [0, 255, 0, 100, 100]) # not a color\n with raises(ValueError):\n mask_to_coloured_slices(mask, 0, [0, 255]) # not a color\n with raises(TypeError):\n mask_to_coloured_slices(\"not a valid mask\", 0)\n with raises(TypeError):\n mask_to_coloured_slices(\n None, 0\n ) # note that the mask in create_overlay_data can be None\n with raises(ValueError):\n mask_to_coloured_slices(vol.astype(np.float32), 0) # wrong dtype\n" ]
[ [ "numpy.random.uniform", "numpy.zeros" ] ]
pji/pjinoise
[ "3967d69fa57be1136cdeb8f4a5d187ee455fa783" ]
[ "tests/test_sources.py" ]
[ "\"\"\"\ntest_sources\n~~~~~~~~~~~~\n\nUnit tests for the pjinoise.generator module.\n\"\"\"\nfrom copy import deepcopy\nimport unittest as ut\nfrom unittest.mock import call, patch\n\nimport numpy as np\n\nfrom pjinoise import sources as s\nfrom pjinoise.common import grayscale_to_ints_list, print_array\nfrom pjinoise.constants import P\n\n\n# Common test functions.\ndef source_fill_test(test, exp, src_cls, src_kwargs, size, loc=(0, 0, 0)):\n src = src_cls(**src_kwargs)\n result = src.fill(size, loc)\n act = grayscale_to_ints_list(result)\n test.assertListEqual(exp, act)\n\n\n# Test cases.\nclass CachingTestCase(ut.TestCase):\n def test_cache_fill(self):\n \"\"\"The first time a fill is generated from a caching source,\n that fill should be cached and returned every time an instance\n of that class with the same key generates a fill of the same\n size.\n \"\"\"\n # Expected value.\n exp = [\n [\n [0x40, 0x40, 0x40,],\n [0x40, 0x40, 0x40,],\n [0x40, 0x40, 0x40,],\n ],\n [\n [0x40, 0x40, 0x40,],\n [0x40, 0x40, 0x40,],\n [0x40, 0x40, 0x40,],\n ],\n ]\n\n # Set up test data and state.\n class Source(s.Source):\n def __init__(self, value):\n self.value = value\n\n def fill(self, size, _):\n a = np.zeros(size, dtype=float)\n a.fill(self.value)\n return a\n\n class CachingSource(s.CachingMixin, Source):\n _cache = {}\n\n src1 = CachingSource('spam', 0.25)\n src2 = CachingSource('spam', 0.75)\n size = (2, 3, 3)\n _ = src1.fill(size)\n\n # Run test.\n result = src2.fill(size)\n\n # Extract actual result from test.\n act = grayscale_to_ints_list(result)\n\n # Determine if test passed.\n self.assertListEqual(exp, act)\n\n\nclass OctaveTestCases(ut.TestCase):\n def test_octavecosinecurtains_fill(self):\n \"\"\"Given the size of a volume to generate, fill the space\n with octave cosine curtain noise and return it.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x11, 0xad, 0xc7, 0x87, 0xe5, 0x5d, 0x5e, 0x68,],\n [0x11, 0xad, 0xc7, 0x87, 0xe5, 0x5d, 0x5e, 0x68,],\n [0x11, 0xad, 0xc7, 0x87, 0xe5, 0x5d, 0x5e, 0x68,],\n [0x11, 0xad, 0xc7, 0x87, 0xe5, 0x5d, 0x5e, 0x68,],\n [0x11, 0xad, 0xc7, 0x87, 0xe5, 0x5d, 0x5e, 0x68,],\n [0x11, 0xad, 0xc7, 0x87, 0xe5, 0x5d, 0x5e, 0x68,],\n [0x11, 0xad, 0xc7, 0x87, 0xe5, 0x5d, 0x5e, 0x68,],\n [0x11, 0xad, 0xc7, 0x87, 0xe5, 0x5d, 0x5e, 0x68,],\n ],\n [\n [0x78, 0x9f, 0xbb, 0xc6, 0x6c, 0x77, 0xd7, 0x30,],\n [0x78, 0x9f, 0xbb, 0xc6, 0x6c, 0x77, 0xd7, 0x30,],\n [0x78, 0x9f, 0xbb, 0xc6, 0x6c, 0x77, 0xd7, 0x30,],\n [0x78, 0x9f, 0xbb, 0xc6, 0x6c, 0x77, 0xd7, 0x30,],\n [0x78, 0x9f, 0xbb, 0xc6, 0x6c, 0x77, 0xd7, 0x30,],\n [0x78, 0x9f, 0xbb, 0xc6, 0x6c, 0x77, 0xd7, 0x30,],\n [0x78, 0x9f, 0xbb, 0xc6, 0x6c, 0x77, 0xd7, 0x30,],\n [0x78, 0x9f, 0xbb, 0xc6, 0x6c, 0x77, 0xd7, 0x30,],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'octaves': 4,\n 'persistence': 8,\n 'amplitude': 8,\n 'frequency': 2,\n 'unit': (4, 4, 4),\n 'ease': 'l',\n 'table': P,\n }\n obj = s.OctaveCosineCurtains(**kwargs)\n\n # Run test.\n result = obj.fill((2, 8, 8))\n\n # Extract actual values.\n result = np.around(result * 0xff).astype(int)\n act = result.tolist()\n\n # Determine if test passed.\n self.assertListEqual(exp, act)\n\n def test_octaveperlinnoise_fill(self):\n \"\"\"Given the size of a space to fill, PerlinNoise.fill should\n return a np.array of that shape filled with noise.\n \"\"\"\n # Expected data.\n exp = [\n [\n [0x80, 0x70, 0x7c, 0x8b,],\n [0x78, 0x79, 0x7e, 0x82,],\n [0x7c, 0x89, 0x80, 0x7e,],\n [0x76, 0x80, 0x86, 0x7f,],\n ],\n ]\n\n # Set up test data and state.\n size = (1, 4, 4)\n start = (4, 0, 0)\n kwargs = {\n 'octaves': 4,\n 'persistence': 8,\n 'amplitude': 8,\n 'frequency': 2,\n 'unit': (8, 8, 8),\n 'ease': '',\n 'table': P,\n }\n cls = s.OctavePerlin\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size, start)\n\n\nclass PatternTestCase(ut.TestCase):\n def test_box_fill(self):\n \"\"\"Given the size of a space to fill with noise, return an\n array of that size filled with noise.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'origin': (0, 1, 1),\n 'dimensions': (1, 2, 3),\n 'color': .5,\n }\n cls = s.Box\n size = (2, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_data_fill(self):\n \"\"\"Given a size of space to fill, return a slice of the seeded\n data that fills the space.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n data = np.array(exp, dtype=float) / 0xff\n kwargs = {\n 'data': data,\n }\n cls = s.Data\n size = (2, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_data_fill_is_larger_than_data(self):\n \"\"\"Given a size of space to fill that is larger than the\n seeded fill, return a fill that is magnified to fill the\n space.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0],\n [0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xf0],\n [0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xff, 0xf0],\n [0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xf0, 0xf0, 0xe0],\n [0xc0, 0xd0, 0xe0, 0xf0, 0xff, 0xf0, 0xe0, 0xd0],\n [0xd0, 0xe0, 0xf0, 0xf0, 0xf0, 0xe0, 0xd0, 0xc0],\n [0xe0, 0xf0, 0xff, 0xf0, 0xe0, 0xd0, 0xc0, 0xb0],\n [0xf0, 0xf0, 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0],\n ],\n [\n [0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0],\n [0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xf0],\n [0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xff, 0xf0],\n [0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xf0, 0xf0, 0xe0],\n [0xc0, 0xd0, 0xe0, 0xf0, 0xff, 0xf0, 0xe0, 0xd0],\n [0xd0, 0xe0, 0xf0, 0xf0, 0xf0, 0xe0, 0xd0, 0xc0],\n [0xe0, 0xf0, 0xff, 0xf0, 0xe0, 0xd0, 0xc0, 0xb0],\n [0xf0, 0xf0, 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0],\n ],\n ]\n\n # Set up test data and state.\n L = [\n [\n [0x80, 0xa0, 0xc0, 0xe0, 0xff,],\n [0xa0, 0xc0, 0xe0, 0xff, 0xe0,],\n [0xc0, 0xe0, 0xff, 0xe0, 0xc0,],\n [0xe0, 0xff, 0xe0, 0xc0, 0xa0,],\n [0xff, 0xe0, 0xc0, 0xa0, 0x80,],\n ],\n ]\n data = np.array(L[:], dtype=float) / 0xff\n kwargs = {\n 'data': data,\n }\n cls = s.Data\n size = (2, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_data_fill_is_smaller_than_data(self):\n \"\"\"Given a size of space to fill, return a slice of the seeded\n data that fills the space.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n L = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n data = np.array(L[:], dtype=float) / 0xff\n kwargs = {\n 'data': data,\n }\n cls = s.Data\n size = (1, 4, 5)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_data_fill_is_smaller_and_at_location(self):\n \"\"\"Given a size of space to fill, return a slice of the seeded\n data that fills the space.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x40],\n [0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n L = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n data = np.array(L[:], dtype=float) / 0xff\n kwargs = {\n 'data': data,\n }\n cls = s.Data\n size = (1, 4, 5)\n loc = (1, 3, 2)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size, loc)\n\n def test_gradient_fill(self):\n \"\"\"Given the size of a space to fill with noise, return an\n array of that size filled with noise.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00],\n [0x80, 0x80, 0x80, 0x80],\n [0xff, 0xff, 0xff, 0xff],\n [0x80, 0x80, 0x80, 0x80],\n [0x00, 0x00, 0x00, 0x00],\n ],\n [\n [0x00, 0x00, 0x00, 0x00],\n [0x80, 0x80, 0x80, 0x80],\n [0xff, 0xff, 0xff, 0xff],\n [0x80, 0x80, 0x80, 0x80],\n [0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'direction': 'v',\n 'stops': [0., 0., .5, 1., 1., 0.],\n 'ease': 'l',\n }\n cls = s.Gradient\n size = (2, 5, 4)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_lines_class(self):\n \"\"\"An instance of noise.LineNoise should be initiated with\n the given attributes.\n \"\"\"\n # Expected values.\n exp_cls = s.Lines\n exp_attrs = {\n 'type': 'lines',\n 'direction': 'h',\n 'length': 10,\n 'ease': 'i5',\n }\n\n # Set up test data and state.\n attrs = deepcopy(exp_attrs)\n del attrs['type']\n\n # Perform test.\n act_obj = exp_cls(**attrs)\n act_attrs = act_obj.asdict()\n\n # Determine if test passed.\n self.assertIsInstance(act_obj, exp_cls)\n self.assertDictEqual(exp_attrs, act_attrs)\n\n def test_lines_fill(self):\n \"\"\"Given the size of a space to fill with noise, return an\n array of that size filled with noise.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00],\n [0x80, 0x80, 0x80, 0x80],\n [0xff, 0xff, 0xff, 0xff],\n [0x80, 0x80, 0x80, 0x80],\n ],\n [\n [0x80, 0x80, 0x80, 0x80],\n [0xff, 0xff, 0xff, 0xff],\n [0x80, 0x80, 0x80, 0x80],\n [0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'direction': 'h',\n 'length': 5,\n 'ease': 'io3',\n }\n cls = s.Lines\n size = (2, 4, 4)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_rays_fill(self):\n \"\"\"Given a size and location, Ray.fill should return a\n volume filled with rays emanating from a central point.\n \"\"\"\n # Expected value.\n exp = [\n [\n [0x8f, 0x51, 0x13, 0x04, 0x45, 0xa7, 0xe8, 0xfe],\n [0xc9, 0x8f, 0x36, 0x00, 0x58, 0xd3, 0xfe, 0xf6],\n [0xf9, 0xe0, 0x8f, 0x06, 0x87, 0xfe, 0xe8, 0xc2],\n [0xf1, 0xf9, 0xff, 0x8f, 0xfe, 0xa5, 0x76, 0x61],\n [0x9e, 0x89, 0x5a, 0x01, 0x70, 0x00, 0x06, 0x0e],\n [0x3d, 0x17, 0x01, 0x78, 0xf9, 0x70, 0x1f, 0x06],\n [0x09, 0x01, 0x2c, 0xa7, 0xff, 0xc9, 0x70, 0x36],\n [0x01, 0x17, 0x58, 0xba, 0xfb, 0xec, 0xae, 0x70],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'count': 3,\n 'offset': np.pi / 2,\n 'ease': 'ios',\n }\n cls = s.Rays\n size = (1, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_ring_fill(self):\n \"\"\"Given a size and location, Ring.fill should return a\n volume filled with concentric rings.\n \"\"\"\n # Expected value.\n exp = [\n [\n [0x50, 0x00, 0x0e, 0xc0, 0xff, 0xc0, 0x0e, 0x00],\n [0x00, 0x83, 0x36, 0x00, 0x00, 0x00, 0x36, 0x83],\n [0x0e, 0x36, 0x00, 0x87, 0xff, 0x87, 0x00, 0x36],\n [0xc0, 0x00, 0x87, 0x00, 0x00, 0x00, 0x87, 0x00],\n [0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0xff, 0x00],\n [0xc0, 0x00, 0x87, 0x00, 0x00, 0x00, 0x87, 0x00],\n [0x0e, 0x36, 0x00, 0x87, 0xff, 0x87, 0x00, 0x36],\n [0x00, 0x83, 0x36, 0x00, 0x00, 0x00, 0x36, 0x83],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'radius': 2,\n 'width': 1,\n 'gap': 2,\n 'count': 3,\n 'ease': 'l'\n }\n cls = s.Ring\n size = (1, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_solid_fill(self):\n \"\"\"Given a size and location, Solid.fill should return a\n volume filled with a single color.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x40, 0x40, 0x40, 0x40],\n [0x40, 0x40, 0x40, 0x40],\n [0x40, 0x40, 0x40, 0x40],\n [0x40, 0x40, 0x40, 0x40],\n ],\n [\n [0x40, 0x40, 0x40, 0x40],\n [0x40, 0x40, 0x40, 0x40],\n [0x40, 0x40, 0x40, 0x40],\n [0x40, 0x40, 0x40, 0x40],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'color': .25,\n }\n cls = s.Solid\n size = (2, 4, 4)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_spheres_fill_x(self):\n \"\"\"Given a size and location, Spheres.fill should return a\n volume filled a radial gradient.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x2f, 0x42, 0x53, 0x60, 0x69, 0x6c, 0x69, 0x60,],\n [0x42, 0x58, 0x6c, 0x7b, 0x86, 0x89, 0x86, 0x7b,],\n [0x53, 0x6c, 0x82, 0x95, 0xa2, 0xa7, 0xa2, 0x95,],\n [0x60, 0x7b, 0x95, 0xac, 0xbd, 0xc4, 0xbd, 0xac,],\n [0x69, 0x86, 0xa2, 0xbd, 0xd5, 0xe2, 0xd5, 0xbd,],\n [0x6c, 0x89, 0xa7, 0xc4, 0xe2, 0xff, 0xe2, 0xc4,],\n [0x69, 0x86, 0xa2, 0xbd, 0xd5, 0xe2, 0xd5, 0xbd,],\n [0x60, 0x7b, 0x95, 0xac, 0xbd, 0xc4, 0xbd, 0xac,],\n ],\n [\n [0x2d, 0x40, 0x51, 0x5e, 0x66, 0x69, 0x66, 0x5e,],\n [0x40, 0x56, 0x69, 0x78, 0x82, 0x86, 0x82, 0x78,],\n [0x51, 0x69, 0x7f, 0x91, 0x9d, 0xa2, 0x9d, 0x91,],\n [0x5e, 0x78, 0x91, 0xa7, 0xb7, 0xbd, 0xb7, 0xa7,],\n [0x66, 0x82, 0x9d, 0xb7, 0xcc, 0xd5, 0xcc, 0xb7,],\n [0x69, 0x86, 0xa2, 0xbd, 0xd5, 0xe2, 0xd5, 0xbd,],\n [0x66, 0x82, 0x9d, 0xb7, 0xcc, 0xd5, 0xcc, 0xb7,],\n [0x5e, 0x78, 0x91, 0xa7, 0xb7, 0xbd, 0xb7, 0xa7,],\n ],\n ]\n\n # Set up test data and state.\n args = ['5', 'x', 'l']\n n = s.Spheres(*args)\n size = (2, 8, 8)\n\n # Run test.\n values = n.fill(size)\n\n # Extract actual values.\n values = np.around(values * 0xff).astype(int)\n act = values.tolist()\n\n # Determine if test passed.\n self.assertListEqual(exp, act)\n\n def test_spheres_fill_y(self):\n \"\"\"Given a size and location, Spheres.fill should return a\n volume filled a radial gradient.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x6c, 0x89, 0xa7, 0xc4, 0xe2, 0xff, 0xe2, 0xc4,],\n [0x69, 0x86, 0xa2, 0xbd, 0xd5, 0xe2, 0xd5, 0xbd,],\n [0x60, 0x7b, 0x95, 0xac, 0xbd, 0xc4, 0xbd, 0xac,],\n [0x53, 0x6c, 0x82, 0x95, 0xa2, 0xa7, 0xa2, 0x95,],\n [0x42, 0x58, 0x6c, 0x7b, 0x86, 0x89, 0x86, 0x7b,],\n [0x2f, 0x42, 0x53, 0x60, 0x69, 0x6c, 0x69, 0x60,],\n [0x42, 0x58, 0x6c, 0x7b, 0x86, 0x89, 0x86, 0x7b,],\n [0x53, 0x6c, 0x82, 0x95, 0xa2, 0xa7, 0xa2, 0x95,],\n ],\n [\n [0x69, 0x86, 0xa2, 0xbd, 0xd5, 0xe2, 0xd5, 0xbd,],\n [0x66, 0x82, 0x9d, 0xb7, 0xcc, 0xd5, 0xcc, 0xb7,],\n [0x5e, 0x78, 0x91, 0xa7, 0xb7, 0xbd, 0xb7, 0xa7,],\n [0x51, 0x69, 0x7f, 0x91, 0x9d, 0xa2, 0x9d, 0x91,],\n [0x40, 0x56, 0x69, 0x78, 0x82, 0x86, 0x82, 0x78,],\n [0x2d, 0x40, 0x51, 0x5e, 0x66, 0x69, 0x66, 0x5e,],\n [0x40, 0x56, 0x69, 0x78, 0x82, 0x86, 0x82, 0x78,],\n [0x51, 0x69, 0x7f, 0x91, 0x9d, 0xa2, 0x9d, 0x91,],\n ],\n ]\n\n # Set up test data and state.\n args = ['5', 'y', 'l']\n n = s.Spheres(*args)\n size = (2, 8, 8)\n\n # Run test.\n values = n.fill(size)\n\n # Extract actual values.\n values = np.around(values * 0xff).astype(int)\n act = values.tolist()\n\n # Determine if test passed.\n self.assertListEqual(exp, act)\n\n def test_spot_fill(self):\n \"\"\"Given a size and location, Spot.fill should return a\n volume filled with a spot of color.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x18, 0x32, 0x4c, 0x5f, 0x65, 0x5f, 0x4c, 0x32],\n [0x32, 0x58, 0x7c, 0x95, 0x9e, 0x95, 0x7c, 0x58],\n [0x4c, 0x7c, 0xa7, 0xc5, 0xd0, 0xc5, 0xa7, 0x7c],\n [0x5f, 0x95, 0xc5, 0xe7, 0xf3, 0xe7, 0xc5, 0x95],\n [0x65, 0x9e, 0xd0, 0xf3, 0xff, 0xf3, 0xd0, 0x9e],\n [0x5f, 0x95, 0xc5, 0xe7, 0xf3, 0xe7, 0xc5, 0x95],\n [0x4c, 0x7c, 0xa7, 0xc5, 0xd0, 0xc5, 0xa7, 0x7c],\n [0x32, 0x58, 0x7c, 0x95, 0x9e, 0x95, 0x7c, 0x58],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'radius': 5,\n 'ease': 'ios'\n }\n cls = s.Spot\n size = (1, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_text_fill(self):\n \"\"\"Given a size and location, Text.fill should return a\n volume with the configured text.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x0b, 0x50, 0x2c, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x8e, 0x33, 0x3c, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x29, 0x8a, 0x74, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x61, 0x6f, 0x8a, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'text': 's',\n 'size': 6,\n 'origin': (3, 0),\n }\n cls = s.Text\n size = (1, 10, 10)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_waves_fill(self):\n \"\"\"Waves.fill should return a series of concentric rings.\"\"\"\n # Expected value.\n exp = [\n [\n [0x4c, 0x22, 0x76, 0xa4, 0xa4, 0x76, 0x22, 0x4c],\n [0x22, 0xa4, 0xf1, 0xb2, 0xb2, 0xf1, 0xa4, 0x22],\n [0x76, 0xf1, 0x6a, 0x0e, 0x0e, 0x6a, 0xf1, 0x76],\n [0xa4, 0xb2, 0x0e, 0x87, 0x87, 0x0e, 0xb2, 0xa4],\n [0xa4, 0xb2, 0x0e, 0x87, 0x87, 0x0e, 0xb2, 0xa4],\n [0x76, 0xf1, 0x6a, 0x0e, 0x0e, 0x6a, 0xf1, 0x76],\n [0x22, 0xa4, 0xf1, 0xb2, 0xb2, 0xf1, 0xa4, 0x22],\n [0x4c, 0x22, 0x76, 0xa4, 0xa4, 0x76, 0x22, 0x4c],\n ],\n [\n [0x4c, 0x22, 0x76, 0xa4, 0xa4, 0x76, 0x22, 0x4c],\n [0x22, 0xa4, 0xf1, 0xb2, 0xb2, 0xf1, 0xa4, 0x22],\n [0x76, 0xf1, 0x6a, 0x0e, 0x0e, 0x6a, 0xf1, 0x76],\n [0xa4, 0xb2, 0x0e, 0x87, 0x87, 0x0e, 0xb2, 0xa4],\n [0xa4, 0xb2, 0x0e, 0x87, 0x87, 0x0e, 0xb2, 0xa4],\n [0x76, 0xf1, 0x6a, 0x0e, 0x0e, 0x6a, 0xf1, 0x76],\n [0x22, 0xa4, 0xf1, 0xb2, 0xb2, 0xf1, 0xa4, 0x22],\n [0x4c, 0x22, 0x76, 0xa4, 0xa4, 0x76, 0x22, 0x4c],\n ],\n ]\n\n # Set up test data and state.\n cls = s.Waves\n kwargs = {\n 'length': 3,\n 'growth': 'l',\n 'ease': '',\n }\n size = (2, 8, 8)\n\n # Run test and determine if passed.\n source_fill_test(self, exp, cls, kwargs, size)\n\n\nclass RandomTestCase(ut.TestCase):\n def test_random_fill(self):\n \"\"\"Given a size and a location, Random.fill should return a\n space filled with random noise that is centered around a given\n midpoint.\n \"\"\"\n # Expected value.\n exp = [\n [\n [0x8b, 0x93, 0x8d, 0x84, 0x73, 0x68, 0x8e, 0x82],\n [0x8a, 0x6a, 0x6c, 0x6c, 0x72, 0x6f, 0x68, 0x83],\n [0x88, 0x8a, 0x85, 0x77, 0x97, 0x8b, 0x8b, 0x6c],\n [0x76, 0x82, 0x94, 0x92, 0x7f, 0x8e, 0x76, 0x6f],\n [0x6a, 0x8d, 0x8b, 0x74, 0x8e, 0x74, 0x66, 0x89],\n [0x75, 0x96, 0x7a, 0x82, 0x97, 0x8c, 0x87, 0x68],\n [0x82, 0x6d, 0x97, 0x7d, 0x85, 0x6f, 0x88, 0x82],\n [0x85, 0x88, 0x8b, 0x92, 0x68, 0x91, 0x81, 0x8e],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'mid': .5,\n 'scale': .1,\n 'seed': 'spam',\n 'ease': 'l',\n }\n cls = s.Random\n size = (1, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_seededrandom_fill(self):\n \"\"\"When given the size of an array, return an array that\n contains randomly generated noise.\n \"\"\"\n exp = [\n [\n [0xb7, 0xe0, 0xc5, 0x95, 0x3f, 0x0b, 0xc7, 0x8d],\n [0xb2, 0x15, 0x20, 0x1f, 0x3e, 0x2f, 0x09, 0x92],\n [0xaa, 0xb6, 0x9d, 0x57, 0xf5, 0xb7, 0xbb, 0x1d],\n [0x52, 0x8a, 0xe5, 0xdb, 0x7d, 0xc7, 0x52, 0x2c],\n [0x16, 0xc5, 0xb9, 0x46, 0xca, 0x45, 0x02, 0xaf],\n [0x49, 0xef, 0x63, 0x8b, 0xf7, 0xbd, 0xa5, 0x0a],\n [0x8d, 0x21, 0xf7, 0x72, 0x9a, 0x2c, 0xaa, 0x8b],\n [0x9a, 0xa8, 0xba, 0xda, 0x0b, 0xd8, 0x86, 0xc9],\n ],\n [\n [0xe4, 0x10, 0xc0, 0xf4, 0xf6, 0x18, 0xf4, 0x94],\n [0xd7, 0x73, 0x80, 0xd2, 0x6b, 0xc8, 0x5d, 0xee],\n [0xb8, 0xcf, 0x10, 0x28, 0x7e, 0x7f, 0xe5, 0xfd],\n [0x5d, 0x91, 0xb5, 0x01, 0x78, 0x02, 0x5e, 0x1c],\n [0x05, 0x20, 0xb8, 0x23, 0x51, 0xc3, 0x67, 0x45],\n [0x94, 0x13, 0x72, 0x00, 0x68, 0x22, 0x63, 0xa5],\n [0x67, 0x7a, 0x77, 0xa6, 0xf9, 0xcf, 0x47, 0xc2],\n [0xe7, 0x73, 0xa0, 0xa6, 0xb5, 0x17, 0x05, 0x4c],\n ],\n ]\n src_class = s.SeededRandom\n src_kwargs = {'seed': 'spam'}\n size = (2, 8, 8)\n source_fill_test(self, exp, src_class, src_kwargs, size)\n\n def test_seededrandom_with_seed_repeats_noise(self):\n \"\"\"When given the same seed, two instances of SeededRandom\n should return the same noise.\n \"\"\"\n # Set up for expected values.\n seed = 'spam'\n size = (2, 8, 8)\n src_a = s.SeededRandom(seed)\n result = src_a.fill(size)\n\n # Expected value.\n exp = grayscale_to_ints_list(result)\n\n # Set up test data and state.\n src_b = s.SeededRandom(seed)\n\n # Run test.\n result = src_b.fill(size)\n\n # Extract actual test results.\n act = grayscale_to_ints_list(result)\n\n # Determine if test passed.\n self.assertListEqual(exp, act)\n\n def test_seededrandom_without_seed_not_repeat_noise(self):\n \"\"\"When given the same seed, two instances of SeededRandom\n should return the same noise.\n \"\"\"\n # Set up for expected values.\n seed_exp = 'spam'\n size = (2, 8, 8)\n src_a = s.SeededRandom(seed_exp)\n result = src_a.fill(size)\n\n # Expected value.\n exp = grayscale_to_ints_list(result)\n\n # Set up test data and state.\n seed_act = 'eggs'\n src_b = s.SeededRandom(seed_act)\n\n # Run test.\n result = src_b.fill(size)\n\n # Extract actual test results.\n act = grayscale_to_ints_list(result)\n\n # Determine if test passed.\n self.assertNotEqual(exp, act)\n\n def test_embers_fill(self):\n \"\"\"Given a size and a location, Embers.fill should fill the\n space with an amount of points or small dots of color that\n look like burning embers or stars.\n \"\"\"\n # Expected value.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x3c, 0xb4, 0xc0, 0xc0],\n [0x00, 0x25, 0x4f, 0x1a, 0x22, 0x65, 0x6c, 0x6c],\n [0x00, 0x4f, 0xa9, 0x38, 0xc0, 0x00, 0x00, 0x00],\n [0x00, 0x1a, 0x38, 0x13, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xc1, 0x00, 0x22, 0x66, 0x30, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x3d, 0xb6, 0x55, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'depth': 2,\n 'threshold': .95,\n 'blend': 'lighter',\n 'seed': 'spam',\n 'ease': 'l',\n }\n cls = s.Embers\n size = (1, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_worley_fill(self):\n \"\"\"Given a size and a location and whether the points should\n be scattered over the Z axis, Worley.fill should fill the\n space with Worley noise. Worley noise is a cellular noise\n algorithm, where the color value is determined by the pixel's\n distance from the closest of a number of randomly placed\n points.\n \"\"\"\n # Expected value.\n exp = [\n [\n [0x4d, 0x4d, 0x6d, 0xac, 0xf3, 0xe7, 0xbc, 0xac],\n [0x00, 0x00, 0x4d, 0x9a, 0xe7, 0xbc, 0x85, 0x6d],\n [0x4d, 0x4d, 0x6d, 0xac, 0xbc, 0xac, 0x6d, 0x4d],\n [0x6d, 0x85, 0xac, 0xbc, 0x85, 0x6d, 0x85, 0x6d],\n [0x85, 0x6d, 0x85, 0xac, 0x6d, 0x4d, 0x6d, 0xac],\n [0x6d, 0x4d, 0x6d, 0xac, 0x6d, 0x4d, 0x6d, 0xac],\n [0x85, 0x6d, 0x85, 0xac, 0x6d, 0x4d, 0x6d, 0xac],\n [0xbc, 0xac, 0xbc, 0xbc, 0x85, 0x6d, 0x85, 0xbc],\n ],\n [\n [0x6d, 0x6d, 0x85, 0xbc, 0xff, 0xd9, 0xac, 0x9a],\n [0x4d, 0x4d, 0x6d, 0xac, 0xf3, 0xac, 0x6d, 0x4d],\n [0x00, 0x4d, 0x85, 0xbc, 0xac, 0x9a, 0x4d, 0x00],\n [0x4d, 0x6d, 0xac, 0xac, 0x6d, 0x4d, 0x6d, 0x4d],\n [0x6d, 0x4d, 0x6d, 0x9a, 0x4d, 0x00, 0x4d, 0x9a],\n [0x4d, 0x00, 0x4d, 0x9a, 0x4d, 0x00, 0x4d, 0x9a],\n [0x6d, 0x4d, 0x6d, 0x9a, 0x4d, 0x00, 0x4d, 0x9a],\n [0xac, 0x9a, 0xac, 0xac, 0x6d, 0x4d, 0x6d, 0xac],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'points': 8,\n 'is_3d': True,\n 'seed': 'spam'\n }\n cls = s.Worley\n size = (2, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_worleycell_fill(self):\n \"\"\"Given a size and a location and whether the points should\n be scattered over the Z axis, Worley.fill should fill the\n space with Worley noise. Worley noise is a cellular noise\n algorithm, where the color value is determined by the pixel's\n distance from the closest of a number of randomly placed\n points.\n \"\"\"\n # Expected value.\n exp = [\n [\n [0xdc, 0xff, 0xff, 0xff, 0xfd, 0xcf, 0xab, 0xaa],\n [0xaa, 0xff, 0xff, 0xff, 0xdb, 0xaa, 0xaa, 0xaa],\n [0x55, 0xaa, 0xdc, 0xee, 0xad, 0xaa, 0xaa, 0xaa],\n [0x55, 0x78, 0xaa, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa],\n [0x55, 0x69, 0x78, 0x9b, 0xaa, 0xaa, 0xaa, 0x9b],\n [0x55, 0x63, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55],\n [0x55, 0x25, 0x22, 0x0f, 0x00, 0x00, 0x00, 0x0f],\n [0x2f, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'points': 4,\n 'seed': 'spam',\n }\n cls = s.WorleyCell\n size = (1, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n\nclass UnitNoiseTestCase(ut.TestCase):\n def test_animatedpath_draw_branches_at_same_time(self):\n \"\"\"Given a path that branches in two directions in places,\n draw those branches at the same time.\n \"\"\"\n # Expected value.\n exp = [\n [\n ((0, 0, 0), (0, 1, 0)),\n ((0, 1, 0), (0, 1, 1)),\n ((0, 1, 1), (0, 1, 2)),\n ((0, 1, 2), (0, 0, 2)),\n ((0, 0, 2), (0, 0, 1)),\n ],\n [\n None,\n None,\n ((0, 1, 2), (0, 2, 2)),\n ((0, 2, 2), (0, 2, 1)),\n ((0, 2, 1), (0, 2, 0)),\n ]\n ]\n\n # Set up test data and state.\n kwargs = {\n 'width': .4,\n 'unit': (1, 16, 16),\n 'seed': 'testa-',\n }\n cls = s.AnimatedPath\n size = (1, 64, 64)\n obj = cls(**kwargs)\n path = [\n ((0, 0, 0), (0, 1, 0)),\n ((0, 1, 0), (0, 1, 1)),\n ((0, 1, 1), (0, 1, 2)),\n ((0, 1, 2), (0, 0, 2)),\n ((0, 0, 2), (0, 0, 1)),\n ((0, 1, 2), (0, 2, 2)),\n ((0, 2, 2), (0, 2, 1)),\n ((0, 2, 1), (0, 2, 0)),\n ]\n\n # Run test.\n act = obj._find_branches(path)\n\n # Determine if test passed.\n self.assertListEqual(exp, act)\n\n def test_curtains_fill(self):\n \"\"\"Given a size and location, CosineCurtains.fill should\n return a space filled with randomized vertical stripes.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x11, 0x2a, 0x44, 0x5e, 0x77, 0x7e, 0x85, 0x8c,],\n [0x11, 0x2a, 0x44, 0x5e, 0x77, 0x7e, 0x85, 0x8c,],\n [0x11, 0x2a, 0x44, 0x5e, 0x77, 0x7e, 0x85, 0x8c,],\n [0x11, 0x2a, 0x44, 0x5e, 0x77, 0x7e, 0x85, 0x8c,],\n [0x11, 0x2a, 0x44, 0x5e, 0x77, 0x7e, 0x85, 0x8c,],\n [0x11, 0x2a, 0x44, 0x5e, 0x77, 0x7e, 0x85, 0x8c,],\n [0x11, 0x2a, 0x44, 0x5e, 0x77, 0x7e, 0x85, 0x8c,],\n [0x11, 0x2a, 0x44, 0x5e, 0x77, 0x7e, 0x85, 0x8c,],\n ],\n [\n [0x3a, 0x52, 0x69, 0x80, 0x97, 0x94, 0x92, 0x8f,],\n [0x3a, 0x52, 0x69, 0x80, 0x97, 0x94, 0x92, 0x8f,],\n [0x3a, 0x52, 0x69, 0x80, 0x97, 0x94, 0x92, 0x8f,],\n [0x3a, 0x52, 0x69, 0x80, 0x97, 0x94, 0x92, 0x8f,],\n [0x3a, 0x52, 0x69, 0x80, 0x97, 0x94, 0x92, 0x8f,],\n [0x3a, 0x52, 0x69, 0x80, 0x97, 0x94, 0x92, 0x8f,],\n [0x3a, 0x52, 0x69, 0x80, 0x97, 0x94, 0x92, 0x8f,],\n [0x3a, 0x52, 0x69, 0x80, 0x97, 0x94, 0x92, 0x8f,],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'unit': '4,4,4',\n 'table': P,\n 'ease': 'l',\n }\n cls = s.Curtains\n size = (2, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_cosinecurtains_fill(self):\n \"\"\"Given a size and location, CosineCurtains.fill should\n return a space filled with randomized vertical stripes that\n has been eased with a cosine-based easing function.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x29, 0x3a, 0x62, 0x8a, 0x9b, 0x85, 0x4e, 0x18],\n [0x29, 0x3a, 0x62, 0x8a, 0x9b, 0x85, 0x4e, 0x18],\n [0x29, 0x3a, 0x62, 0x8a, 0x9b, 0x85, 0x4e, 0x18],\n [0x29, 0x3a, 0x62, 0x8a, 0x9b, 0x85, 0x4e, 0x18],\n [0x29, 0x3a, 0x62, 0x8a, 0x9b, 0x85, 0x4e, 0x18],\n [0x29, 0x3a, 0x62, 0x8a, 0x9b, 0x85, 0x4e, 0x18],\n [0x29, 0x3a, 0x62, 0x8a, 0x9b, 0x85, 0x4e, 0x18],\n [0x29, 0x3a, 0x62, 0x8a, 0x9b, 0x85, 0x4e, 0x18],\n ],\n [\n [0x29, 0x37, 0x59, 0x7b, 0x89, 0x7a, 0x57, 0x33],\n [0x29, 0x37, 0x59, 0x7b, 0x89, 0x7a, 0x57, 0x33],\n [0x29, 0x37, 0x59, 0x7b, 0x89, 0x7a, 0x57, 0x33],\n [0x29, 0x37, 0x59, 0x7b, 0x89, 0x7a, 0x57, 0x33],\n [0x29, 0x37, 0x59, 0x7b, 0x89, 0x7a, 0x57, 0x33],\n [0x29, 0x37, 0x59, 0x7b, 0x89, 0x7a, 0x57, 0x33],\n [0x29, 0x37, 0x59, 0x7b, 0x89, 0x7a, 0x57, 0x33],\n [0x29, 0x37, 0x59, 0x7b, 0x89, 0x7a, 0x57, 0x33],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'unit': (4, 4, 4),\n 'seed': 'spam',\n 'ease': 'l',\n }\n cls = s.CosineCurtains\n size = (2, 8, 8)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_path_fill(self):\n \"\"\"Given a size and location, Path.fill should return a space\n filled with with a path created by walking the unit grid by\n choosing the lowest neighboring value and never walking to the\n same location twice.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'width': .34,\n 'inset': (0, 1, 1),\n 'unit': (1, 3, 3),\n 'seed': 'spam',\n 'ease': 'l',\n }\n cls = s.Path\n size = (2, 10, 10)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_path_accepts_origin(self):\n \"\"\"Initialized with an origin, sources.Path should start\n walking the path from that set of coordinates in the unit\n grid.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'width': .34,\n 'inset': (0, 1, 1),\n 'origin': (0, 1, 1),\n 'unit': (1, 3, 3),\n 'seed': 'spam',\n 'ease': 'l',\n }\n cls = s.Path\n size = (2, 10, 10)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_path_accepts_descriptive_origin(self):\n \"\"\"Initialized with a descriptive origin, sources.Path should\n start walking the path from that set of coordinates in the unit\n grid.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'width': .34,\n 'inset': (0, 1, 1),\n 'origin': 'bottom-middle',\n 'unit': (1, 3, 3),\n 'seed': 'spam',\n 'ease': 'l',\n }\n cls = s.Path\n size = (2, 10, 10)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_perlin_fill(self):\n \"\"\"Given the size of a space to fill, Perlin.fill should\n return a np.array of that shape filled with noise.\n \"\"\"\n # Expected value.\n exp = [[\n [0x9f, 0x8e, 0x77, 0x60],\n [0xa5, 0x94, 0x7d, 0x65],\n [0x9f, 0x90, 0x7c, 0x68],\n [0x8b, 0x81, 0x74, 0x67],\n ],]\n\n # Set up test data and state.\n size = (1, 4, 4)\n start = (4, 0, 0)\n kwargs = {\n 'unit': (8, 8, 8),\n 'ease': '',\n 'table': P,\n }\n cls = s.Perlin\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size, start)\n\n def test_solvedpath_fill(self):\n \"\"\"Given a size and location, SolvedPath.fill should return a\n space filled with the solution to the maze created by path with\n the same seed value and size.\n \"\"\"\n # Expected values.\n exp = [\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n [\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],\n ],\n ]\n\n # Set up test data and state.\n kwargs = {\n 'start': 'tl',\n 'end': 'br',\n 'width': .34,\n 'inset': (0, 1, 1),\n 'unit': (1, 3, 3),\n 'seed': 'spam',\n 'ease': 'l',\n }\n cls = s.SolvedPath\n size = (2, 10, 10)\n\n # Run test.\n source_fill_test(self, exp, cls, kwargs, size)\n\n def test_unitnoise_seeds_table_creation(self):\n \"\"\"When initialized with a seed value, UnitNoise should use\n that value to seed the random generation of its table.\n \"\"\"\n # Set up expected values.\n class Spam(s.UnitNoise):\n def fill(*args, **kwargs):\n return None\n\n kwargs = {\n 'unit': (1024, 1024, 1024),\n 'seed': 'spam',\n }\n exp_obj = Spam(**kwargs)\n\n # Expected value.\n exp = exp_obj.table.tolist()\n\n # Run test.\n act_obj = Spam(**kwargs)\n\n # Extract actual data.\n act = act_obj.table.tolist()\n\n # Determine if test passed.\n self.assertListEqual(exp, act)\n\n def test_unitnoise_diff_seeds_diff_table(self):\n \"\"\"If you pass different seeds to two different UnitNoise\n objects, their tables will be different.\n \"\"\"\n # Set up expected values.\n class Spam(s.UnitNoise):\n def fill(*args, **kwargs):\n return None\n\n kwargs = {\n 'unit': (1024, 1024, 1024),\n 'seed': 'spam',\n }\n exp_obj = Spam(**kwargs)\n\n # Expected value.\n exp = exp_obj.table.tolist()\n\n # Set up test data and state.\n kwargs_act = {\n 'unit': kwargs['unit'],\n 'seed': 'eggs',\n }\n\n # Run test.\n act_obj = Spam(**kwargs_act)\n\n # Extract actual data.\n act = act_obj.table.tolist()\n\n # Determine if test passed.\n self.assertNotEqual(exp, act)\n\n def test_unitnoise_serializes_seed_not_table(self):\n \"\"\"If the UnitNoise object was given a seed,\n UnitNoise.asdict() should serialize the seed\n rather than the entire table.\n \"\"\"\n # Expected value.\n exp = {\n 'ease': 'l',\n 'unit': (1024, 1024, 1024),\n 'seed': 'spam',\n 'type': 'spam',\n }\n\n # Set up test data and state.\n class Spam(s.UnitNoise):\n def fill(*args, **kwargs):\n return None\n\n attrs = {\n 'unit': exp['unit'],\n 'seed': exp['seed']\n }\n obj = Spam(**attrs)\n\n # Run test.\n act = obj.asdict()\n\n # Determine if test passed.\n self.assertDictEqual(exp, act)\n\n def test_values_fill_with_noise(self):\n \"\"\"Given the size of each dimension of the noise,\n Values.fill should return an array that contains\n the expected noise.\n \"\"\"\n # Expected values.\n exp = [[\n [0x00, 0x40, 0x7f, 0xbf, 0xff,],\n [0x00, 0x40, 0x7f, 0xbf, 0xff,],\n [0x00, 0x40, 0x7f, 0xbf, 0xff,],\n ],]\n\n # Set up test data and state.\n table = [\n [\n [0x00, 0x7f, 0xff, 0xff,],\n [0x00, 0x7f, 0xff, 0xff,],\n [0x00, 0x7f, 0xff, 0xff,],\n [0x00, 0x7f, 0xff, 0xff,],\n ],\n [\n [0x00, 0x7f, 0xff, 0xff,],\n [0x00, 0x7f, 0xff, 0xff,],\n [0x00, 0x7f, 0xff, 0xff,],\n [0x00, 0x7f, 0xff, 0xff,],\n ],\n [\n [0x00, 0x7f, 0xff, 0xff,],\n [0x00, 0x7f, 0xff, 0xff,],\n [0x00, 0x7f, 0xff, 0xff,],\n [0x00, 0x7f, 0xff, 0xff,],\n ],\n ]\n unit = (2, 2, 2)\n kwargs = {\n 'unit': unit,\n 'table': table,\n 'ease': 'l',\n }\n cls = s.Values\n size = (1, 3, 5)\n\n # Perform test.\n source_fill_test(self, exp, cls, kwargs, size)\n" ]
[ [ "numpy.around", "numpy.array", "numpy.zeros" ] ]
richford/qsiprep
[ "7499a1479691394775eeab571f36a86c1dac4b54" ]
[ "qsiprep/interfaces/reports.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nInterfaces to generate reportlets\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n\"\"\"\n\nimport os\nimport os.path as op\nimport time\nimport json\nimport re\nfrom collections import defaultdict\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401\nimport seaborn as sns\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nfrom scipy.io.matlab import loadmat\nimport pandas as pd\nimport numpy as np\nfrom nipype.interfaces.base import (\n traits, TraitedSpec, BaseInterfaceInputSpec,\n File, Directory, InputMultiPath, InputMultiObject, Str, isdefined,\n SimpleInterface)\nfrom nipype.interfaces import freesurfer as fs\nfrom nipype.interfaces.mixins import reporting\nimport nibabel as nb\nfrom dipy.core.sphere import HemiSphere\nfrom .gradients import concatenate_bvals, concatenate_bvecs\nfrom .qc import createB0_ColorFA_Mask_Sprites, createSprite4D\nfrom .bids import get_bids_params\nfrom ..niworkflows.viz.utils import peak_slice_series, odf_roi_plot\nfrom .converters import fib2amps, mif2amps\n\nSUBJECT_TEMPLATE = \"\"\"\\t<ul class=\"elem-desc\">\n\\t\\t<li>Subject ID: {subject_id}</li>\n\\t\\t<li>Structural images: {n_t1s:d} T1-weighted {t2w}</li>\n\\t\\t<li>Diffusion-weighted series: inputs {n_dwis:d}, outputs {n_outputs:d}</li>\n{groupings}\n\\t\\t<li>Resampling targets: {output_spaces}\n\\t\\t<li>FreeSurfer reconstruction: {freesurfer_status}</li>\n\\t</ul>\n\"\"\"\n\nDIFFUSION_TEMPLATE = \"\"\"\\t\\t<h3 class=\"elem-title\">Summary</h3>\n\\t\\t<ul class=\"elem-desc\">\n\\t\\t\\t<li>Phase-encoding (PE) direction: {pedir}</li>\n\\t\\t\\t<li>Susceptibility distortion correction: {sdc}</li>\n\\t\\t\\t<li>Coregistration Transform: {coregistration}</li>\n\\t\\t\\t<li>Denoising Window: {denoise_window}</li>\n\\t\\t\\t<li>HMC Transform: {hmc_transform}</li>\n\\t\\t\\t<li>HMC Model: {hmc_model}</li>\n\\t\\t\\t<li>DWI series resampled to spaces: {output_spaces}</li>\n\\t\\t\\t<li>Confounds collected: {confounds}</li>\n\\t\\t\\t<li>Impute slice threshold: {impute_slice_threshold}</li>\n\\t\\t</ul>\n{validation_reports}\n\"\"\"\n\nABOUT_TEMPLATE = \"\"\"\\t<ul>\n\\t\\t<li>qsiprep version: {version}</li>\n\\t\\t<li>qsiprep command: <code>{command}</code></li>\n\\t\\t<li>Date preprocessed: {date}</li>\n\\t</ul>\n</div>\n\"\"\"\n\nTOPUP_TEMPLATE = \"\"\"\\\n\\t\\t<p class=\"elem-desc\">\n\\t\\t{summary}</p>\n\"\"\"\n\nGROUPING_TEMPLATE = \"\"\"\\t<ul>\n\\t\\t<li>Output Name: {output_name}</li>\n{input_files}\n</ul>\n\"\"\"\n\nINTERACTIVE_TEMPLATE = \"\"\"\n<script src=\"https://unpkg.com/vue\"></script>\n<script src=\"https://nipreps.github.io/dmriprep-viewer/dmriprepReport.umd.min.js\"></script>\n<link rel=\"stylesheet\" href=\"https://nipreps.github.io/dmriprep-viewer/dmriprepReport.css\">\n\n<div id=\"app\">\n <demo :report=\"report\"></demo>\n</div>\n\n<script>\nvar report = REPORT\n new Vue({\n components: {\n demo: dmriprepReport\n },\n data () {\n return {\n report\n }\n }\n }).$mount('#app')\n\n</script>\n\"\"\"\n\n\nclass SummaryOutputSpec(TraitedSpec):\n out_report = File(exists=True, desc='HTML segment containing summary')\n\n\nclass SummaryInterface(SimpleInterface):\n output_spec = SummaryOutputSpec\n\n def _generate_segment(self):\n raise NotImplementedError()\n\n def _run_interface(self, runtime):\n segment = self._generate_segment()\n fname = os.path.join(runtime.cwd, 'report.html')\n with open(fname, 'w') as fobj:\n fobj.write(segment)\n self._results['out_report'] = fname\n return runtime\n\n\nclass SubjectSummaryInputSpec(BaseInterfaceInputSpec):\n t1w = InputMultiPath(File(exists=True), desc='T1w structural images')\n t2w = InputMultiPath(File(exists=True), desc='T2w structural images')\n subjects_dir = Directory(desc='FreeSurfer subjects directory')\n subject_id = Str(desc='Subject ID')\n dwi_groupings = traits.Dict(desc='groupings of DWI files and their output names')\n output_spaces = traits.List(desc='Target spaces')\n template = traits.Enum('MNI152NLin2009cAsym', desc='Template space')\n\n\nclass SubjectSummaryOutputSpec(SummaryOutputSpec):\n # This exists to ensure that the summary is run prior to the first ReconAll\n # call, allowing a determination whether there is a pre-existing directory\n subject_id = Str(desc='FreeSurfer subject ID')\n\n\nclass SubjectSummary(SummaryInterface):\n input_spec = SubjectSummaryInputSpec\n output_spec = SubjectSummaryOutputSpec\n\n def _run_interface(self, runtime):\n if isdefined(self.inputs.subject_id):\n self._results['subject_id'] = self.inputs.subject_id\n return super(SubjectSummary, self)._run_interface(runtime)\n\n def _generate_segment(self):\n if not isdefined(self.inputs.subjects_dir):\n freesurfer_status = 'Not run'\n else:\n recon = fs.ReconAll(subjects_dir=self.inputs.subjects_dir,\n subject_id=self.inputs.subject_id,\n T1_files=self.inputs.t1w,\n flags='-noskullstrip')\n if recon.cmdline.startswith('echo'):\n freesurfer_status = 'Pre-existing directory'\n else:\n freesurfer_status = 'Run by qsiprep'\n\n output_spaces = [self.inputs.template if space == 'template' else space\n for space in self.inputs.output_spaces]\n\n t2w_seg = ''\n if self.inputs.t2w:\n t2w_seg = '(+ {:d} T2-weighted)'.format(len(self.inputs.t2w))\n\n # Add text for how the dwis are grouped\n n_dwis = 0\n n_outputs = 0\n groupings = ''\n if isdefined(self.inputs.dwi_groupings):\n for output_fname, group_info in self.inputs.dwi_groupings.items():\n n_outputs += 1\n files_desc = []\n files_desc.append(\n '\\t\\t\\t<li>Scan group: %s (PE Dir %s)</li><ul>' % (\n output_fname, group_info['dwi_series_pedir']))\n files_desc.append('\\t\\t\\t\\t<li>DWI Files: </li>')\n for dwi_file in group_info['dwi_series']:\n files_desc.append(\"\\t\\t\\t\\t\\t<li> %s </li>\" % dwi_file)\n n_dwis += 1\n fieldmap_type = group_info['fieldmap_info']['suffix']\n if fieldmap_type is not None:\n files_desc.append('\\t\\t\\t\\t<li>Fieldmap type: %s </li>' % fieldmap_type)\n\n for key, value in group_info['fieldmap_info'].items():\n files_desc.append(\"\\t\\t\\t\\t\\t<li> %s: %s </li>\" % (key, str(value)))\n n_dwis += 1\n files_desc.append(\"</ul>\")\n groupings += GROUPING_TEMPLATE.format(output_name=output_fname,\n input_files='\\n'.join(files_desc))\n\n return SUBJECT_TEMPLATE.format(subject_id=self.inputs.subject_id,\n n_t1s=len(self.inputs.t1w),\n t2w=t2w_seg,\n n_dwis=n_dwis,\n n_outputs=n_outputs,\n groupings=groupings,\n output_spaces=', '.join(output_spaces),\n freesurfer_status=freesurfer_status)\n\n\nclass DiffusionSummaryInputSpec(BaseInterfaceInputSpec):\n distortion_correction = traits.Str(desc='Susceptibility distortion correction method',\n mandatory=True)\n pe_direction = traits.Enum(None, 'i', 'i-', 'j', 'j-', mandatory=True,\n desc='Phase-encoding direction detected')\n distortion_correction = traits.Str(mandatory=True, desc='Method used for SDC')\n impute_slice_threshold = traits.CFloat(desc='threshold for imputing a slice')\n hmc_transform = traits.Str(mandatory=True, desc='transform used during HMC')\n hmc_model = traits.Str(desc='model used for hmc')\n b0_to_t1w_transform = traits.Enum(\"Rigid\", \"Affine\", desc='Transform type for coregistration')\n dwi_denoise_window = traits.Int(desc='window size for dwidenoise')\n output_spaces = traits.List(desc='Target spaces')\n confounds_file = File(exists=True, desc='Confounds file')\n validation_reports = InputMultiObject(File(exists=True))\n\n\nclass DiffusionSummary(SummaryInterface):\n input_spec = DiffusionSummaryInputSpec\n\n def _generate_segment(self):\n if self.inputs.pe_direction is None:\n pedir = 'MISSING - Assuming Anterior-Posterior'\n else:\n pedir = {'i': 'Left-Right', 'j': 'Anterior-Posterior'}[self.inputs.pe_direction[0]]\n\n if isdefined(self.inputs.confounds_file):\n with open(self.inputs.confounds_file) as cfh:\n conflist = cfh.readline().strip('\\n').strip()\n else:\n conflist = ''\n\n validation_summaries = []\n for summary in self.inputs.validation_reports:\n with open(summary, 'r') as summary_f:\n validation_summaries.extend(summary_f.readlines())\n validation_summary = '\\n'.join(validation_summaries)\n\n return DIFFUSION_TEMPLATE.format(\n pedir=pedir,\n sdc=self.inputs.distortion_correction,\n coregistration=self.inputs.b0_to_t1w_transform,\n hmc_transform=self.inputs.hmc_transform,\n hmc_model=self.inputs.hmc_model,\n denoise_window=self.inputs.dwi_denoise_window,\n output_spaces=', '.join(self.inputs.output_spaces),\n confounds=re.sub(r'[\\t ]+', ', ', conflist),\n impute_slice_threshold=self.inputs.impute_slice_threshold,\n validation_reports=validation_summary\n )\n\n\nclass AboutSummaryInputSpec(BaseInterfaceInputSpec):\n version = Str(desc='qsiprep version')\n command = Str(desc='qsiprep command')\n # Date not included - update timestamp only if version or command changes\n\n\nclass AboutSummary(SummaryInterface):\n input_spec = AboutSummaryInputSpec\n\n def _generate_segment(self):\n return ABOUT_TEMPLATE.format(version=self.inputs.version,\n command=self.inputs.command,\n date=time.strftime(\"%Y-%m-%d %H:%M:%S %z\"))\n\n\nclass TopupSummaryInputSpec(BaseInterfaceInputSpec):\n summary = Str(desc='Summary of TOPUP inputs')\n\n\nclass TopupSummary(SummaryInterface):\n input_spec = TopupSummaryInputSpec\n\n def _generate_segment(self):\n return TOPUP_TEMPLATE.format(summary=self.inputs.summary)\n\n\nclass GradientPlotInputSpec(BaseInterfaceInputSpec):\n orig_bvec_files = InputMultiObject(File(exists=True), mandatory=True,\n desc='bvecs from DWISplit')\n orig_bval_files = InputMultiObject(File(exists=True), mandatory=True,\n desc='bvals from DWISplit')\n source_files = traits.List(desc='source file for each gradient')\n final_bvec_file = File(exists=True, desc='bval file')\n\n\nclass GradientPlotOutputSpec(SummaryOutputSpec):\n plot_file = File(exists=True)\n\n\nclass GradientPlot(SummaryInterface):\n input_spec = GradientPlotInputSpec\n output_spec = GradientPlotOutputSpec\n\n def _run_interface(self, runtime):\n outfile = os.path.join(runtime.cwd, \"bvec_plot.gif\")\n sns.set_style(\"whitegrid\")\n sns.set_context(\"paper\", font_scale=0.8)\n\n orig_bvecs = concatenate_bvecs(self.inputs.orig_bvec_files)\n bvals = concatenate_bvals(self.inputs.orig_bval_files, None)\n if isdefined(self.inputs.source_files):\n file_array = np.array(self.inputs.source_files)\n _, filenums = np.unique(file_array, return_inverse=True)\n else:\n filenums = np.ones_like(bvals)\n\n # Account for the possibility that this is a PE Pair average\n if len(filenums) == len(bvals) * 2:\n filenums = filenums[:len(bvals)]\n\n # Plot the final bvecs if provided\n final_bvecs = None\n if isdefined(self.inputs.final_bvec_file):\n final_bvecs = np.loadtxt(self.inputs.final_bvec_file).T\n\n plot_gradients(bvals, orig_bvecs, filenums, outfile, final_bvecs)\n self._results['plot_file'] = outfile\n return runtime\n\n\ndef plot_gradients(bvals, orig_bvecs, source_filenums, output_fname, final_bvecs=None,\n frames=60):\n qrads = np.sqrt(bvals)\n qvecs = (qrads[:, np.newaxis] * orig_bvecs)\n qx, qy, qz = qvecs.T\n maxvals = qvecs.max(0)\n minvals = qvecs.min(0)\n\n def add_lines(ax):\n labels = ['L', 'P', 'S']\n for axnum in range(3):\n minvec = np.zeros(3)\n maxvec = np.zeros(3)\n minvec[axnum] = minvals[axnum]\n maxvec[axnum] = maxvals[axnum]\n x, y, z = np.column_stack([minvec, maxvec])\n ax.plot(x, y, z, color=\"k\")\n txt_pos = maxvec + 5\n ax.text(txt_pos[0], txt_pos[1], txt_pos[2], labels[axnum], size=8,\n zorder=1, color='k')\n\n if final_bvecs is not None:\n if final_bvecs.shape[0] == 3:\n final_bvecs = final_bvecs.T\n fqx, fqy, fqz = (qrads[:, np.newaxis] * final_bvecs).T\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5),\n subplot_kw={\"aspect\": \"equal\", \"projection\": \"3d\"})\n orig_ax = axes[0]\n final_ax = axes[1]\n axes_list = [orig_ax, final_ax]\n final_ax.scatter(fqx, fqy, fqz, c=source_filenums, marker=\"+\")\n orig_ax.scatter(qx, qy, qz, c=source_filenums, marker=\"+\")\n final_ax.axis('off')\n add_lines(final_ax)\n final_ax.set_title('After Preprocessing')\n else:\n fig, orig_ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5),\n subplot_kw={\"aspect\": \"equal\", \"projection\": \"3d\"})\n axes_list = [orig_ax]\n orig_ax.scatter(qx, qy, qz, c=source_filenums, marker=\"+\")\n orig_ax.axis('off')\n orig_ax.set_title(\"Original Scheme\")\n add_lines(orig_ax)\n # Animate rotating the axes\n rotate_amount = np.ones(frames) * 180 / frames\n stay_put = np.zeros_like(rotate_amount)\n rotate_azim = np.concatenate([rotate_amount, stay_put, -rotate_amount, stay_put])\n rotate_elev = np.concatenate([stay_put, rotate_amount, stay_put, -rotate_amount])\n plt.tight_layout()\n\n def rotate(i):\n for ax in axes_list:\n ax.azim += rotate_azim[i]\n ax.elev += rotate_elev[i]\n return tuple(axes_list)\n\n anim = animation.FuncAnimation(fig, rotate, frames=frames*4,\n interval=20, blit=False)\n anim.save(output_fname, writer='imagemagick', fps=32)\n\n plt.close(fig)\n fig = None\n\n\ndef topup_selection_to_report(selected_indices, original_files, spec_lookup,\n image_source='combined DWI series'):\n \"\"\"Write a description of how the images were selected for TOPUP.\n\n >>> selected_indices = [0, 15, 30, 45]\n >>> original_files = [\"sub-1_dir-AP_dwi.nii.gz\"] * 30 + [\"sub-1_dir-PA_dwi.nii.gz\"] * 30\n >>> spec_lookup = {\"sub-1_dir-AP_dwi.nii.gz\": \"0 1 0 0.087\",\n ... \"sub-1_dir-PA_dwi.nii.gz\": \"0 -1 0 0.087\"}\n >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))\n A total of 2 distortion groups was included in the combined dwi data. Distortion \\\ngroup '0 1 0 0.087' was represented by images 0, 15 from sub-1_dir-AP_dwi.nii.gz. \\\nDistortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz. \"\n\n Or\n\n >>> selected_indices = [0, 15, 30, 45]\n >>> original_files = [\"sub-1_dir-AP_run-1_dwi.nii.gz\"] * 15 + [\n ... \"sub-1_dir-AP_run-2_dwi.nii.gz\"] * 15 + [\n ... \"sub-1_dir-PA_dwi.nii.gz\"] * 30\n >>> spec_lookup = {\"sub-1_dir-AP_run-1_dwi.nii.gz\": \"0 1 0 0.087\",\n ... \"sub-1_dir-AP_run-2_dwi.nii.gz\": \"0 1 0 0.087\",\n ... \"sub-1_dir-PA_dwi.nii.gz\": \"0 -1 0 0.087\"}\n >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))\n A total of 2 distortion groups was included in the combined dwi data. Distortion \\\ngroup '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz and \\\nimage 0 from sub-1_dir-AP_run-2_dwi.nii.gz. Distortion group '0 -1 0 0.087' was represented \\\nby images 0, 15 from sub-1_dir-PA_dwi.nii.gz.\n\n >>> selected_indices = [0, 15, 30, 45, 60]\n >>> original_files = [\"sub-1_dir-AP_run-1_dwi.nii.gz\"] * 15 + [\n ... \"sub-1_dir-AP_run-2_dwi.nii.gz\"] * 15 + [\n ... \"sub-1_dir-AP_run-3_dwi.nii.gz\"] * 15 + [\n ... \"sub-1_dir-PA_dwi.nii.gz\"] * 30\n >>> spec_lookup = {\"sub-1_dir-AP_run-1_dwi.nii.gz\": \"0 1 0 0.087\",\n ... \"sub-1_dir-AP_run-2_dwi.nii.gz\": \"0 1 0 0.087\",\n ... \"sub-1_dir-AP_run-3_dwi.nii.gz\": \"0 1 0 0.087\",\n ... \"sub-1_dir-PA_dwi.nii.gz\": \"0 -1 0 0.087\"}\n >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))\n A total of 2 distortion groups was included in the combined dwi data. Distortion \\\ngroup '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz, \\\nimage 0 from sub-1_dir-AP_run-2_dwi.nii.gz and image 0 from sub-1_dir-AP_run-3_dwi.nii.gz. \\\nDistortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz.\n\n >>> selected_indices = [0, 15, 30, 45]\n >>> original_files = [\"sub-1_dir-PA_dwi.nii.gz\"] * 60\n >>> spec_lookup = {\"sub-1_dir-PA_dwi.nii.gz\": \"0 -1 0 0.087\"}\n >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))\n A total of 1 distortion group was included in the combined dwi data. \\\nDistortion group '0 -1 0 0.087' was represented by images 0, 15, 30, 45 \\\nfrom sub-1_dir-PA_dwi.nii.gz.\n\n \"\"\"\n image_indices = defaultdict(list)\n for imgnum, image in enumerate(original_files):\n image_indices[image].append(imgnum)\n\n # Collect the original volume number within each source image\n selected_per_image = defaultdict(list)\n for b0_index in selected_indices:\n b0_image = original_files[b0_index]\n first_index = min(image_indices[b0_image])\n within_image_index = b0_index - first_index\n selected_per_image[b0_image].append(within_image_index)\n\n # Collect the images and indices within each warp group\n selected_per_warp_group = defaultdict(list)\n for original_image, selection in selected_per_image.items():\n warp_group = spec_lookup[original_image]\n selected_per_warp_group[warp_group].append((original_image, selection))\n\n # Make the description\n num_groups = len(selected_per_warp_group)\n plural = 's' if num_groups > 1 else ''\n plural2 = 'were' if plural == 's' else 'was'\n desc = [\"A total of {num_groups} distortion group{plural} {plural2} included in the \"\n \"{image_source} data. \".format(num_groups=num_groups, plural=plural,\n plural2=plural2, image_source=image_source)]\n for distortion_group, image_list in selected_per_warp_group.items():\n group_desc = [\n \"Distortion group '{spec}' was represented by \".format(spec=distortion_group)]\n for image_name, image_indices in image_list:\n formatted_indices = \", \".join(map(str, image_indices))\n plural = 's' if len(image_indices) > 1 else ''\n group_desc += [\n \"image{plural} {imgnums} from {img_name}\".format(plural=plural,\n imgnums=formatted_indices,\n img_name=image_name),\n \", \"]\n group_desc[-1] = \". \"\n if len(image_list) > 1:\n group_desc[-3] = \" and \"\n desc += group_desc\n\n return ''.join(desc)\n\n\nclass _SeriesQCInputSpec(BaseInterfaceInputSpec):\n pre_qc = File(exists=True, desc='qc file from the raw data')\n t1_qc = File(exists=True, desc='qc file from preprocessed image in t1 space')\n mni_qc = File(exists=True, desc='qc file from preprocessed image in template space')\n confounds_file = File(exists=True, desc='confounds file')\n t1_dice_score = traits.Float()\n mni_dice_score = traits.Float()\n output_file_name = traits.File()\n\n\nclass _SeriesQCOutputSpec(TraitedSpec):\n series_qc_file = File(exists=True)\n\n\nclass SeriesQC(SimpleInterface):\n input_spec = _SeriesQCInputSpec\n output_spec = _SeriesQCOutputSpec\n\n def _run_interface(self, runtime):\n image_qc = _load_qc_file(self.inputs.pre_qc, prefix=\"raw_\")\n if isdefined(self.inputs.t1_qc):\n image_qc.update(_load_qc_file(self.inputs.t1_qc, prefix=\"t1_\"))\n if isdefined(self.inputs.mni_qc):\n image_qc.update(_load_qc_file(self.inputs.mni_qc, prefix=\"mni_\"))\n motion_summary = calculate_motion_summary(self.inputs.confounds_file)\n image_qc.update(motion_summary)\n\n # Add in Dice scores if available\n if isdefined(self.inputs.t1_dice_score):\n image_qc['t1_dice_distance'] = [self.inputs.t1_dice_score]\n if isdefined(self.inputs.mni_dice_score):\n image_qc['mni_dice_distance'] = [self.inputs.mni_dice_score]\n\n # Get the metadata\n output_file = self.inputs.output_file_name\n image_qc['file_name'] = output_file\n bids_info = get_bids_params(output_file)\n image_qc.update(bids_info)\n output = op.join(runtime.cwd, \"dwi_qc.csv\")\n pd.DataFrame(image_qc).to_csv(output, index=False)\n self._results['series_qc_file'] = output\n return runtime\n\n\ndef _load_qc_file(fname, prefix=\"\"):\n qc_data = pd.read_csv(fname).to_dict(orient='records')[0]\n renamed = dict([\n (prefix + key, value) for key, value in qc_data.items()])\n return renamed\n\n\ndef motion_derivatives(translations, rotations, framewise_disp,\n original_files):\n\n def padded_diff(data):\n out = np.zeros_like(data)\n out[1:] = np.diff(data, axis=0)\n return out\n\n drotations = padded_diff(rotations)\n dtranslations = padded_diff(translations)\n\n # We don't want the relative values across the boundaries of runs.\n # Determine which values should be ignored\n file_labels, _ = pd.factorize(original_files)\n new_files = padded_diff(file_labels)\n\n def file_masked(data):\n masked_data = data.copy()\n masked_data[new_files > 0] = 0\n return masked_data\n\n framewise_disp = file_masked(framewise_disp)\n return {\n \"mean_fd\": [framewise_disp.mean()],\n \"max_fd\": [framewise_disp.max()],\n \"max_rotation\": [file_masked(np.abs(rotations)).max()],\n \"max_translation\": [file_masked(np.abs(translations)).max()],\n \"max_rel_rotation\": [file_masked(np.abs(drotations)).max()],\n \"max_rel_translation\": [file_masked(np.abs(dtranslations)).max()]\n }\n\n\ndef calculate_motion_summary(confounds_tsv):\n if not isdefined(confounds_tsv) or confounds_tsv is None:\n return {\n \"mean_fd\": [np.nan],\n \"max_fd\": [np.nan],\n \"max_rotation\": [np.nan],\n \"max_translation\": [np.nan],\n \"max_rel_rotation\": [np.nan],\n \"max_rel_translation\": [np.nan]\n }\n df = pd.read_csv(confounds_tsv, delimiter=\"\\t\")\n\n # the default case where each output image comes from one input image\n if 'trans_x' in df.columns:\n translations = df[['trans_x', 'trans_y', 'trans_z']].values\n rotations = df[['rot_x', 'rot_y', 'rot_z']].values\n return motion_derivatives(translations, rotations, df['framewise_displacement'],\n df['original_file'])\n\n # If there was a PE Pair averaging, get motion from both\n motion1 = motion_derivatives(df[['trans_x_1', 'trans_y_1', 'trans_z_1']].values,\n df[['rot_x_1', 'rot_y_1', 'rot_z_1']].values,\n df['framewise_displacement_1'],\n df['original_file_1'])\n\n motion2 = motion_derivatives(df[['trans_x_2', 'trans_y_2', 'trans_z_2']].values,\n df[['rot_x_2', 'rot_y_2', 'rot_z_2']].values,\n df['framewise_displacement_2'],\n df['original_file_2'])\n\n # Combine the FDs from both PE directions\n # both_fd = np.column_stack([m1, m2])\n # framewise_disp = both_fd[np.nanargmax(np.abs(both_fd), axis=1)]\n def compare_series(key_name, comparator):\n m1 = motion1[key_name][0]\n m2 = motion2[key_name][0]\n return [comparator(m1, m2)]\n\n return {\n \"mean_fd\": compare_series(\"mean_fd\", lambda a, b: (a + b) / 2),\n \"max_fd\": compare_series(\"max_fd\", max),\n \"max_rotation\": compare_series(\"max_rotation\", max),\n \"max_translation\": compare_series(\"max_translation\", max),\n \"max_rel_rotation\": compare_series(\"max_rel_rotation\", max),\n \"max_rel_translation\": compare_series(\"max_rel_translation\", max)\n }\n\n\nclass _InteractiveReportInputSpec(TraitedSpec):\n raw_dwi_file = File(exists=True, mandatory=True)\n processed_dwi_file = File(exists=True, mandatory=True)\n confounds_file = File(exists=True, mandatory=True)\n mask_file = File(exists=True, mandatory=True)\n color_fa = File(exists=True, mandatory=True)\n carpetplot_data = File(exists=True, mandatory=True)\n series_qc_file = File(exists=True, mandatory=True)\n\n\nclass InteractiveReport(SimpleInterface):\n input_spec = _InteractiveReportInputSpec\n output_spec = SummaryOutputSpec\n\n def _run_interface(self, runtime):\n report = {}\n report['dwi_corrected'] = createSprite4D(self.inputs.processed_dwi_file)\n\n b0, colorFA, mask = createB0_ColorFA_Mask_Sprites(self.inputs.processed_dwi_file,\n self.inputs.color_fa,\n self.inputs.mask_file)\n report['carpetplot'] = []\n if isdefined(self.inputs.carpetplot_data):\n with open(self.inputs.carpetplot_data, 'r') as carpet_f:\n carpet_data = json.load(carpet_f)\n report.update(carpet_data)\n\n # Load the QC file\n report['qc_scores'] = json.loads(\n pd.read_csv(self.inputs.series_qc_file).to_json(orient=\"records\"))[0]\n\n report['b0'] = b0\n report['colorFA'] = colorFA\n report['anat_mask'] = mask\n report['outlier_volumes'] = []\n report['eddy_params'] = [[i, i] for i in range(30)]\n eddy_qc = {}\n report['eddy_quad'] = eddy_qc\n report['subject_id'] = \"sub-test\"\n report['analysis_level'] = \"participant\"\n report['pipeline'] = \"qsiprep\"\n report['boilerplate'] = \"boilerplate\"\n\n df = pd.read_csv(self.inputs.confounds_file, delimiter=\"\\t\")\n translations = df[['trans_x', 'trans_y', 'trans_z']].values\n rms = np.sqrt((translations ** 2).sum(1))\n fdisp = df['framewise_displacement'].tolist()\n fdisp[0] = None\n report['eddy_params'] = [[fd_, rms_] for fd_, rms_ in zip(fdisp, rms)]\n\n # Get the sampling scheme\n xyz = df[[\"grad_x\", \"grad_y\", \"grad_z\"]].values\n bval = df['bval'].values\n qxyz = np.sqrt(bval)[:, None] * xyz\n report['q_coords'] = qxyz.tolist()\n report['color'] = _filename_to_colors(df['original_file'])\n\n safe_json = json.dumps(report)\n out_file = op.join(runtime.cwd, \"interactive_report.json\")\n with open(out_file, \"w\") as out_html:\n out_html.write(safe_json)\n self._results['out_report'] = out_file\n return runtime\n\n\ndef _filename_to_colors(labels_column, colormap=\"rainbow\"):\n cmap = matplotlib.cm.get_cmap(colormap)\n labels, _ = pd.factorize(labels_column)\n n_samples = labels.shape[0]\n max_label = labels.max()\n if max_label == 0:\n return [(1.0, 0.0, 0.0)] * n_samples\n labels = labels / max_label\n colors = np.array([cmap(label) for label in labels])\n return colors.tolist()\n\n\nclass _ReconPeaksReportInputSpec(BaseInterfaceInputSpec):\n mif_file = File(exists=True)\n fib_file = File(exists=True)\n odf_file = File(exists=True)\n directions_file = File(exists=True)\n mask_file = File(exists=True)\n background_image = File(exists=True)\n odf_rois = File(exists=True)\n subtract_iso = traits.Bool(False, usedefault=True,\n desc='subtract isotropic component from ODFs')\n\n\nclass _ReconPeaksReportOutputSpec(reporting.ReportCapableOutputSpec):\n odf_report = File(exists=True)\n\n\nclass ReconPeaksReport(SimpleInterface):\n input_spec = _ReconPeaksReportInputSpec\n output_spec = _ReconPeaksReportOutputSpec\n _ncuts = 4\n _padding = 4\n _redirect_x = True\n\n def _run_interface(self, runtime):\n \"\"\"Generate a reportlet.\"\"\"\n if isdefined(self.inputs.mif_file):\n odf_img, directions = mif2amps(self.inputs.mif_file, runtime.cwd)\n elif isdefined(self.inputs.fib_file):\n odf_img, directions = fib2amps(self.inputs.fib_file,\n self.inputs.background_image,\n runtime.cwd)\n elif isdefined(self.inputs.odf_file) and isdefined(self.inputs.directions_file):\n odf_img = nb.load(self.inputs.odf_file)\n directions = np.load(self.inputs.directions_file)\n else:\n raise Exception('Requires either a mif file or fib file')\n odf_4d = odf_img.get_fdata()\n sphere = HemiSphere(xyz=directions.astype(np.float))\n if not isdefined(self.inputs.background_image) or self.inputs.background_image is None:\n background_data = odf_4d.mean(3)\n else:\n background_data = nb.load(self.inputs.background_image).get_fdata()\n\n peak_report = op.join(runtime.cwd, 'peak_report.png')\n peak_slice_series(odf_4d, sphere, background_data, peak_report,\n n_cuts=self._ncuts, mask_image=self.inputs.mask_file,\n padding=self._padding)\n self._results['out_report'] = peak_report\n\n # Plot ODFs in interesting regions\n if isdefined(self.inputs.odf_rois):\n odf_report = op.join(runtime.cwd, 'odf_report.png')\n odf_roi_plot(odf_4d, sphere, background_data, odf_report, self.inputs.odf_rois,\n subtract_iso=self.inputs.subtract_iso,\n mask=self.inputs.mask_file)\n self._results['odf_report'] = odf_report\n return runtime\n\n\nclass _ConnectivityReportInputSpec(BaseInterfaceInputSpec):\n connectivity_matfile = File(exists=True)\n\n\nclass _ConnectivityReportOutputSpec(reporting.ReportCapableOutputSpec):\n odf_report = File(exists=True)\n\n\nclass ConnectivityReport(SimpleInterface):\n input_spec = _ConnectivityReportInputSpec\n output_spec = _ConnectivityReportOutputSpec\n\n def _run_interface(self, runtime):\n \"\"\"Generate a reportlet.\"\"\"\n mat = loadmat(self.inputs.connectivity_matfile)\n connectivity_keys = [key for key in mat.keys() if key.endswith('connectivity')]\n atlases = sorted(set([key.split(\"_\")[0] for key in connectivity_keys]))\n measures = sorted(set([\"_\".join(key.split(\"_\")[1:-1]) for key in connectivity_keys]))\n nrows = len(atlases)\n ncols = len(measures)\n fig, ax = plt.subplots(nrows=nrows, ncols=ncols, squeeze=False)\n for connectivity_key in connectivity_keys:\n atlas = connectivity_key.split(\"_\")[0]\n measure = \"_\".join(connectivity_key.split(\"_\")[1:-1])\n row = atlases.index(atlas)\n col = measures.index(measure)\n ax[row, col].imshow(mat[connectivity_key], interpolation='nearest',\n cmap=\"Greys\", aspect='equal')\n ax[row, col].set_xticks([])\n ax[row, col].set_yticks([])\n fig.set_size_inches((ncols, nrows))\n fig.subplots_adjust(left=0.05, top=0.95, wspace=0, hspace=0, bottom=0, right=1)\n\n for measure_num, measure_name in enumerate(measures):\n ax[0, measure_num].set_title(measure_name.replace('_', '/'),\n fontdict={'fontsize': 6})\n for atlas_num, atlas_name in enumerate(atlases):\n ax[atlas_num, 0].set_ylabel(atlas_name, fontdict={'fontsize': 8})\n\n conn_report = op.join(runtime.cwd, 'conn_report.svg')\n fig.savefig(conn_report)\n self._results['out_report'] = conn_report\n return runtime\n" ]
[ [ "numpy.sqrt", "pandas.DataFrame", "numpy.concatenate", "numpy.zeros_like", "matplotlib.pyplot.tight_layout", "pandas.read_csv", "numpy.ones_like", "numpy.unique", "numpy.diff", "matplotlib.pyplot.close", "numpy.column_stack", "numpy.load", "numpy.zeros", "pandas.factorize", "matplotlib.animation.FuncAnimation", "numpy.array", "numpy.abs", "matplotlib.pyplot.subplots", "numpy.ones", "scipy.io.matlab.loadmat", "matplotlib.cm.get_cmap", "numpy.loadtxt" ] ]
olzhaskabdolov/bot
[ "ea4bd182affe9e607ddb06cf1d7001d6474f10aa" ]
[ "drqa/pipeline/drqa.py" ]
[ "#!/usr/bin/env python3\n# Copyright 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Full DrQA pipeline.\"\"\"\n\nimport torch\nimport regex\nimport heapq\nimport math\nimport time\nimport logging\n\nfrom multiprocessing import Pool as ProcessPool\nfrom multiprocessing.util import Finalize\n\nfrom ..reader.vector import batchify\nfrom ..reader.data import ReaderDataset, SortedBatchSampler\nfrom .. import reader\nfrom .. import tokenizers\nfrom . import DEFAULTS\n\nlogger = logging.getLogger(__name__)\n\n\n# ------------------------------------------------------------------------------\n# Multiprocessing functions to fetch and tokenize text\n# ------------------------------------------------------------------------------\n\nPROCESS_TOK = None\nPROCESS_DB = None\nPROCESS_CANDS = None\n\n\ndef init(tokenizer_class, tokenizer_opts, db_class, db_opts, candidates=None):\n global PROCESS_TOK, PROCESS_DB, PROCESS_CANDS\n PROCESS_TOK = tokenizer_class(**tokenizer_opts)\n Finalize(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)\n PROCESS_DB = db_class(**db_opts)\n Finalize(PROCESS_DB, PROCESS_DB.close, exitpriority=100)\n PROCESS_CANDS = candidates\n\n\ndef fetch_text(doc_id):\n global PROCESS_DB\n return PROCESS_DB.get_doc_text(doc_id)\n\n\ndef tokenize_text(text):\n global PROCESS_TOK\n return PROCESS_TOK.tokenize(text)\n\n\n# ------------------------------------------------------------------------------\n# Main DrQA pipeline\n# ------------------------------------------------------------------------------\n\n\nclass DrQA(object):\n # Target size for squashing short paragraphs together.\n # 0 = read every paragraph independently\n # infty = read all paragraphs together\n GROUP_LENGTH = 0\n\n def __init__(\n self,\n reader_model=None,\n embedding_file=None,\n tokenizer=None,\n fixed_candidates=None,\n batch_size=128,\n cuda=True,\n data_parallel=False,\n max_loaders=5,\n num_workers=None,\n db_config=None,\n ranker_config=None\n ):\n \"\"\"Initialize the pipeline.\n\n Args:\n reader_model: model file from which to load the DocReader.\n embedding_file: if given, will expand DocReader dictionary to use\n all available pretrained embeddings.\n tokenizer: string option to specify tokenizer used on docs.\n fixed_candidates: if given, all predictions will be constrated to\n the set of candidates contained in the file. One entry per line.\n batch_size: batch size when processing paragraphs.\n cuda: whether to use the gpu.\n data_parallel: whether to use multile gpus.\n max_loaders: max number of async data loading workers when reading.\n (default is fine).\n num_workers: number of parallel CPU processes to use for tokenizing\n and post processing resuls.\n db_config: config for doc db.\n ranker_config: config for ranker.\n \"\"\"\n self.batch_size = batch_size\n self.max_loaders = max_loaders\n self.fixed_candidates = fixed_candidates is not None\n self.cuda = cuda\n\n logger.info('Initializing document ranker...')\n ranker_config = ranker_config or {}\n ranker_class = ranker_config.get('class', DEFAULTS['ranker'])\n ranker_opts = ranker_config.get('options', {})\n self.ranker = ranker_class(**ranker_opts)\n\n logger.info('Initializing document reader...')\n reader_model = reader_model or DEFAULTS['reader_model']\n self.reader = reader.DocReader.load(reader_model, normalize=False)\n if embedding_file:\n logger.info('Expanding dictionary...')\n words = reader.utils.index_embedding_words(embedding_file)\n added = self.reader.expand_dictionary(words)\n self.reader.load_embeddings(added, embedding_file)\n if cuda:\n self.reader.cuda()\n if data_parallel:\n self.reader.parallelize()\n\n if not tokenizer:\n tok_class = DEFAULTS['tokenizer']\n else:\n tok_class = tokenizers.get_class(tokenizer)\n annotators = tokenizers.get_annotators_for_model(self.reader)\n tok_opts = {'annotators': annotators}\n\n db_config = db_config or {}\n db_class = db_config.get('class', DEFAULTS['db'])\n db_opts = db_config.get('options', {})\n\n logger.info('Initializing tokenizers and document retrievers...')\n self.num_workers = num_workers\n self.processes = ProcessPool(\n num_workers,\n initializer=init,\n initargs=(tok_class, tok_opts, db_class, db_opts, fixed_candidates)\n )\n\n def _split_doc(self, doc):\n \"\"\"Given a doc, split it into chunks (by paragraph).\"\"\"\n curr = []\n curr_len = 0\n for split in regex.split(r'\\n+', doc):\n split = split.strip()\n if len(split) == 0:\n continue\n # Maybe group paragraphs together until we hit a length limit\n if len(curr) > 0 and curr_len + len(split) > self.GROUP_LENGTH:\n yield ' '.join(curr)\n curr = []\n curr_len = 0\n curr.append(split)\n curr_len += len(split)\n if len(curr) > 0:\n yield ' '.join(curr)\n\n def _get_loader(self, data, num_loaders):\n \"\"\"Return a pytorch data iterator for provided examples.\"\"\"\n dataset = ReaderDataset(data, self.reader)\n sampler = SortedBatchSampler(\n dataset.lengths(),\n self.batch_size,\n shuffle=False\n )\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=self.batch_size,\n sampler=sampler,\n num_workers=num_loaders,\n collate_fn=batchify,\n pin_memory=self.cuda,\n )\n return loader\n\n def process(self, query, candidates=None, top_n=1, n_docs=5,\n return_context=False):\n \"\"\"Run a single query.\"\"\"\n predictions = self.process_batch(\n [query], [candidates] if candidates else None,\n top_n, n_docs, return_context\n )\n return predictions[0]\n\n def process_batch(self, queries, candidates=None, top_n=1, n_docs=5,\n return_context=False):\n \"\"\"Run a batch of queries (more efficient).\"\"\"\n t0 = time.time()\n logger.info('Processing %d queries...' % len(queries))\n logger.info('Retrieving top %d docs...' % n_docs)\n\n # Rank documents for queries.\n if len(queries) == 1:\n ranked = [self.ranker.closest_docs(queries[0], k=n_docs)]\n else:\n ranked = self.ranker.batch_closest_docs(\n queries, k=n_docs, num_workers=self.num_workers\n )\n all_docids, all_doc_scores = zip(*ranked)\n\n # Flatten document ids and retrieve text from database.\n # We remove duplicates for processing efficiency.\n flat_docids = list({d for docids in all_docids for d in docids})\n did2didx = {did: didx for didx, did in enumerate(flat_docids)}\n doc_texts = self.processes.map(fetch_text, flat_docids)\n\n # Split and flatten documents. Maintain a mapping from doc (index in\n # flat list) to split (index in flat list).\n flat_splits = []\n didx2sidx = []\n for text in doc_texts:\n splits = self._split_doc(text)\n didx2sidx.append([len(flat_splits), -1])\n for split in splits:\n flat_splits.append(split)\n didx2sidx[-1][1] = len(flat_splits)\n\n # Push through the tokenizers as fast as possible.\n q_tokens = self.processes.map_async(tokenize_text, queries)\n s_tokens = self.processes.map_async(tokenize_text, flat_splits)\n q_tokens = q_tokens.get()\n s_tokens = s_tokens.get()\n\n # Group into structured example inputs. Examples' ids represent\n # mappings to their question, document, and split ids.\n examples = []\n for qidx in range(len(queries)):\n for rel_didx, did in enumerate(all_docids[qidx]):\n start, end = didx2sidx[did2didx[did]]\n for sidx in range(start, end):\n if (len(q_tokens[qidx].words()) > 0 and\n len(s_tokens[sidx].words()) > 0):\n examples.append({\n 'id': (qidx, rel_didx, sidx),\n 'question': q_tokens[qidx].words(),\n 'qlemma': q_tokens[qidx].lemmas(),\n 'document': s_tokens[sidx].words(),\n 'lemma': s_tokens[sidx].lemmas(),\n 'pos': s_tokens[sidx].pos(),\n 'ner': s_tokens[sidx].entities(),\n })\n\n logger.info('Reading %d paragraphs...' % len(examples))\n\n # Push all examples through the document reader.\n # We decode argmax start/end indices asychronously on CPU.\n result_handles = []\n num_loaders = min(self.max_loaders, math.floor(len(examples) / 1e3))\n for batch in self._get_loader(examples, num_loaders):\n if candidates or self.fixed_candidates:\n batch_cands = []\n for ex_id in batch[-1]:\n batch_cands.append({\n 'input': s_tokens[ex_id[2]],\n 'cands': candidates[ex_id[0]] if candidates else None\n })\n handle = self.reader.predict(\n batch, batch_cands, async_pool=self.processes\n )\n else:\n handle = self.reader.predict(batch, async_pool=self.processes)\n result_handles.append((handle, batch[-1], batch[0].size(0)))\n\n # Iterate through the predictions, and maintain priority queues for\n # top scored answers for each question in the batch.\n queues = [[] for _ in range(len(queries))]\n for result, ex_ids, batch_size in result_handles:\n s, e, score = result.get()\n for i in range(batch_size):\n # We take the top prediction per split.\n if len(score[i]) > 0:\n item = (score[i][0], ex_ids[i], s[i][0], e[i][0])\n queue = queues[ex_ids[i][0]]\n if len(queue) < top_n:\n heapq.heappush(queue, item)\n else:\n heapq.heappushpop(queue, item)\n\n # Arrange final top prediction data.\n all_predictions = []\n for queue in queues:\n predictions = []\n while len(queue) > 0:\n score, (qidx, rel_didx, sidx), s, e = heapq.heappop(queue)\n prediction = {\n 'doc_id': all_docids[qidx][rel_didx],\n 'span': s_tokens[sidx].slice(s, e + 1).untokenize(),\n 'doc_score': float(all_doc_scores[qidx][rel_didx]),\n 'span_score': float(score),\n }\n if return_context:\n prediction['context'] = {\n 'text': s_tokens[sidx].untokenize(),\n 'start': s_tokens[sidx].offsets()[s][0],\n 'end': s_tokens[sidx].offsets()[e][1],\n }\n predictions.append(prediction)\n all_predictions.append(predictions[-1::-1])\n\n logger.info('Processed %d queries in %.4f (s)' %\n (len(queries), time.time() - t0))\n\n return all_predictions\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
zampie/GAN_framework
[ "8e2ff764b08b9199916fef66e49332ef7d21ae32" ]
[ "refer/train_cartoon_wgan.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport glob\nimport utils\nimport traceback\nimport numpy as np\nimport tensorflow as tf\nimport models_64x64 as models\n\n\n\"\"\" param \"\"\"\nepoch = 100\nbatch_size = 64\nlr = 0.0002\nz_dim = 100\nclip = 0.01\nn_critic = 5\ngpu_id = 0\n\n''' data '''\n# you should prepare your own data in ./data/faces\n# cartoon faces original size is [96, 96, 3]\n\n\ndef preprocess_fn(img):\n re_size = 64\n img = tf.to_float(tf.image.resize_images(img, [re_size, re_size], method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1\n return img\n\nimg_paths = glob.glob('./data/Omni_64/*.jpg')\ndata_pool = utils.DiskImageData(img_paths, batch_size, shape=[96, 96, 3], preprocess_fn=preprocess_fn)\n\n\n\"\"\" graphs \"\"\"\nwith tf.device('/gpu:%d' % gpu_id):\n generator = models.generator\n discriminator = models.discriminator\n\n ''' graph '''\n # inputs\n real = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])\n z = tf.placeholder(tf.float32, shape=[None, z_dim])\n\n # generate\n fake = generator(z, reuse=False)\n\n # dicriminate\n r_logit = discriminator(real, reuse=False)\n f_logit = discriminator(fake)\n\n # losses\n wd = tf.reduce_mean(r_logit) - tf.reduce_mean(f_logit)\n d_loss = -wd\n g_loss = -tf.reduce_mean(f_logit)\n\n # otpims\n d_var = utils.trainable_variables('discriminator')\n g_var = utils.trainable_variables('generator')\n d_step_ = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(d_loss, var_list=d_var)\n with tf.control_dependencies([d_step_]):\n d_step = tf.group(*(tf.assign(var, tf.clip_by_value(var, -clip, clip)) for var in d_var))\n g_step = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(g_loss, var_list=g_var)\n\n # summaries\n d_summary = utils.summary({wd: 'wd'})\n g_summary = utils.summary({g_loss: 'g_loss'})\n\n # sample\n f_sample = generator(z, training=False)\n\n\n\"\"\" train \"\"\"\n''' init '''\n# session\nsess = utils.session()\n# iteration counter\nit_cnt, update_cnt = utils.counter()\n# saver\nsaver = tf.train.Saver(max_to_keep=5)\n# summary writer\nsummary_writer = tf.summary.FileWriter('./summaries/cartoon_wgan', sess.graph)\n\n''' initialization '''\nckpt_dir = './checkpoints/cartoon_wgan'\nutils.mkdir(ckpt_dir + '/')\nif not utils.load_checkpoint(ckpt_dir, sess):\n sess.run(tf.global_variables_initializer())\n\n''' train '''\ntry:\n z_ipt_sample = np.random.normal(size=[100, z_dim])\n\n batch_epoch = len(data_pool) // (batch_size * n_critic)\n max_it = epoch * batch_epoch\n for it in range(sess.run(it_cnt), max_it):\n sess.run(update_cnt)\n\n # which epoch\n epoch = it // batch_epoch\n it_epoch = it % batch_epoch + 1\n\n # train D\n if it < 25:\n c_iter = 100\n else:\n c_iter = n_critic\n for i in range(n_critic):\n # batch data\n real_ipt = data_pool.batch()\n z_ipt = np.random.normal(size=[batch_size, z_dim])\n d_summary_opt, _ = sess.run([d_summary, d_step], feed_dict={real: real_ipt, z: z_ipt})\n summary_writer.add_summary(d_summary_opt, it)\n\n # train G\n z_ipt = np.random.normal(size=[batch_size, z_dim])\n g_summary_opt, _ = sess.run([g_summary, g_step], feed_dict={z: z_ipt})\n summary_writer.add_summary(g_summary_opt, it)\n\n # display\n if it % 1 == 0:\n print(\"Epoch: (%3d) (%5d/%5d)\" % (epoch, it_epoch, batch_epoch))\n\n # save\n if (it + 1) % 1000 == 0:\n save_path = saver.save(sess, '%s/Epoch_(%d)_(%dof%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch))\n print('Model saved in file: % s' % save_path)\n\n # sample\n if (it + 1) % 100 == 0:\n f_sample_opt = sess.run(f_sample, feed_dict={z: z_ipt_sample})\n\n save_dir = './sample_images_while_training/cartoon_wgan'\n utils.mkdir(save_dir + '/')\n utils.imwrite(utils.immerge(f_sample_opt, 10, 10), '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, epoch, it_epoch, batch_epoch))\n\nexcept Exception as e:\n traceback.print_exc()\nfinally:\n print(\" [*] Close main session!\")\n sess.close()\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.device", "tensorflow.summary.FileWriter", "tensorflow.control_dependencies", "tensorflow.reduce_mean", "tensorflow.train.RMSPropOptimizer", "tensorflow.image.resize_images", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "numpy.random.normal", "tensorflow.train.Saver" ] ]
DiddiZ/donk.ai
[ "ccf9a00fb22203a8ab351a5d559d927e6ebfc318" ]
[ "tests/samples_test.py" ]
[ "import unittest\r\n\r\nimport numpy as np\r\nfrom numpy.testing import assert_array_equal\r\n\r\n\r\nclass Test_TransitionPool(unittest.TestCase):\r\n\r\n def test_add(self):\r\n \"\"\"Test TransitionPool.add().\"\"\"\r\n from donk.samples import TransitionPool\r\n\r\n N, T, dX, dU = 3, 10, 5, 3\r\n rng = np.random.default_rng(0)\r\n\r\n X = rng.standard_normal((N, T + 1, dX))\r\n U = rng.standard_normal((N, T, dU))\r\n pool = TransitionPool()\r\n pool.add(X, U)\r\n\r\n # Retrieve all\r\n assert_array_equal(pool.get_transitions(), np.c_[X[:, :-1].reshape(-1, dX), U.reshape(-1, dU), X[:, 1:].reshape(-1, dX)])\r\n\r\n # Retrieve N\r\n assert_array_equal(\r\n pool.get_transitions(N=5), np.c_[X[2, -6:-1].reshape(-1, dX), U[2, -5:].reshape(-1, dU), X[2, -5:].reshape(-1, dX)]\r\n )\r\n" ]
[ [ "numpy.random.default_rng" ] ]
shanbs/home-assistant
[ "818776d2b4f11e4f51992dc88bc0a6f9055833b2" ]
[ "homeassistant/components/sensor/pollen.py" ]
[ "\"\"\"Support for Pollen.com allergen and cold/flu sensors.\"\"\"\nfrom datetime import timedelta\nimport logging\nfrom statistics import mean\n\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\nfrom homeassistant.const import (\n ATTR_ATTRIBUTION, ATTR_STATE, CONF_MONITORED_CONDITIONS)\nfrom homeassistant.helpers import aiohttp_client\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.util import Throttle\n\nREQUIREMENTS = ['numpy==1.16.2', 'pypollencom==2.2.3']\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_ALLERGEN_AMOUNT = 'allergen_amount'\nATTR_ALLERGEN_GENUS = 'allergen_genus'\nATTR_ALLERGEN_NAME = 'allergen_name'\nATTR_ALLERGEN_TYPE = 'allergen_type'\nATTR_CITY = 'city'\nATTR_OUTLOOK = 'outlook'\nATTR_RATING = 'rating'\nATTR_SEASON = 'season'\nATTR_TREND = 'trend'\nATTR_ZIP_CODE = 'zip_code'\n\nCONF_ZIP_CODE = 'zip_code'\n\nDEFAULT_ATTRIBUTION = 'Data provided by IQVIA™'\nDEFAULT_SCAN_INTERVAL = timedelta(minutes=30)\n\nTYPE_ALLERGY_FORECAST = 'allergy_average_forecasted'\nTYPE_ALLERGY_HISTORIC = 'allergy_average_historical'\nTYPE_ALLERGY_INDEX = 'allergy_index'\nTYPE_ALLERGY_OUTLOOK = 'allergy_outlook'\nTYPE_ALLERGY_TODAY = 'allergy_index_today'\nTYPE_ALLERGY_TOMORROW = 'allergy_index_tomorrow'\nTYPE_ALLERGY_YESTERDAY = 'allergy_index_yesterday'\nTYPE_ASTHMA_FORECAST = 'asthma_average_forecasted'\nTYPE_ASTHMA_HISTORIC = 'asthma_average_historical'\nTYPE_ASTHMA_INDEX = 'asthma_index'\nTYPE_ASTHMA_TODAY = 'asthma_index_today'\nTYPE_ASTHMA_TOMORROW = 'asthma_index_tomorrow'\nTYPE_ASTHMA_YESTERDAY = 'asthma_index_yesterday'\nTYPE_DISEASE_FORECAST = 'disease_average_forecasted'\n\nSENSORS = {\n TYPE_ALLERGY_FORECAST: (\n 'ForecastSensor', 'Allergy Index: Forecasted Average', 'mdi:flower'),\n TYPE_ALLERGY_HISTORIC: (\n 'HistoricalSensor', 'Allergy Index: Historical Average', 'mdi:flower'),\n TYPE_ALLERGY_TODAY: ('IndexSensor', 'Allergy Index: Today', 'mdi:flower'),\n TYPE_ALLERGY_TOMORROW: (\n 'IndexSensor', 'Allergy Index: Tomorrow', 'mdi:flower'),\n TYPE_ALLERGY_YESTERDAY: (\n 'IndexSensor', 'Allergy Index: Yesterday', 'mdi:flower'),\n TYPE_ASTHMA_TODAY: ('IndexSensor', 'Asthma Index: Today', 'mdi:flower'),\n TYPE_ASTHMA_TOMORROW: (\n 'IndexSensor', 'Asthma Index: Tomorrow', 'mdi:flower'),\n TYPE_ASTHMA_YESTERDAY: (\n 'IndexSensor', 'Asthma Index: Yesterday', 'mdi:flower'),\n TYPE_ASTHMA_FORECAST: (\n 'ForecastSensor', 'Asthma Index: Forecasted Average', 'mdi:flower'),\n TYPE_ASTHMA_HISTORIC: (\n 'HistoricalSensor', 'Asthma Index: Historical Average', 'mdi:flower'),\n TYPE_DISEASE_FORECAST: (\n 'ForecastSensor', 'Cold & Flu: Forecasted Average', 'mdi:snowflake')\n}\n\nRATING_MAPPING = [{\n 'label': 'Low',\n 'minimum': 0.0,\n 'maximum': 2.4\n}, {\n 'label': 'Low/Medium',\n 'minimum': 2.5,\n 'maximum': 4.8\n}, {\n 'label': 'Medium',\n 'minimum': 4.9,\n 'maximum': 7.2\n}, {\n 'label': 'Medium/High',\n 'minimum': 7.3,\n 'maximum': 9.6\n}, {\n 'label': 'High',\n 'minimum': 9.7,\n 'maximum': 12\n}]\n\nTREND_INCREASING = 'Increasing'\nTREND_SUBSIDING = 'Subsiding'\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_ZIP_CODE):\n str,\n vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):\n vol.All(cv.ensure_list, [vol.In(SENSORS)])\n})\n\n\nasync def async_setup_platform(\n hass, config, async_add_entities, discovery_info=None):\n \"\"\"Configure the platform and add the sensors.\"\"\"\n from pypollencom import Client\n\n websession = aiohttp_client.async_get_clientsession(hass)\n\n pollen = PollenComData(\n Client(config[CONF_ZIP_CODE], websession),\n config[CONF_MONITORED_CONDITIONS])\n\n await pollen.async_update()\n\n sensors = []\n for kind in config[CONF_MONITORED_CONDITIONS]:\n sensor_class, name, icon = SENSORS[kind]\n sensors.append(\n globals()[sensor_class](\n pollen, kind, name, icon, config[CONF_ZIP_CODE]))\n\n async_add_entities(sensors, True)\n\n\ndef calculate_average_rating(indices):\n \"\"\"Calculate the human-friendly historical allergy average.\"\"\"\n ratings = list(\n r['label'] for n in indices for r in RATING_MAPPING\n if r['minimum'] <= n <= r['maximum'])\n return max(set(ratings), key=ratings.count)\n\n\ndef calculate_trend(indices):\n \"\"\"Calculate the \"moving average\" of a set of indices.\"\"\"\n import numpy as np\n\n def moving_average(data, samples):\n \"\"\"Determine the \"moving average\" (http://tinyurl.com/yaereb3c).\"\"\"\n ret = np.cumsum(data, dtype=float)\n ret[samples:] = ret[samples:] - ret[:-samples]\n return ret[samples - 1:] / samples\n\n increasing = np.all(np.diff(moving_average(np.array(indices), 4)) > 0)\n\n if increasing:\n return TREND_INCREASING\n return TREND_SUBSIDING\n\n\nclass BaseSensor(Entity):\n \"\"\"Define a base Pollen.com sensor.\"\"\"\n\n def __init__(self, pollen, kind, name, icon, zip_code):\n \"\"\"Initialize the sensor.\"\"\"\n self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}\n self._icon = icon\n self._kind = kind\n self._name = name\n self._state = None\n self._zip_code = zip_code\n self.pollen = pollen\n\n @property\n def available(self):\n \"\"\"Return True if entity is available.\"\"\"\n if self._kind in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW,\n TYPE_ALLERGY_YESTERDAY):\n return bool(self.pollen.data[TYPE_ALLERGY_INDEX])\n\n if self._kind in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW,\n TYPE_ASTHMA_YESTERDAY):\n return bool(self.pollen.data[TYPE_ASTHMA_INDEX])\n\n return bool(self.pollen.data[self._kind])\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the device state attributes.\"\"\"\n return self._attrs\n\n @property\n def icon(self):\n \"\"\"Return the icon.\"\"\"\n return self._icon\n\n @property\n def name(self):\n \"\"\"Return the name.\"\"\"\n return self._name\n\n @property\n def state(self):\n \"\"\"Return the state.\"\"\"\n return self._state\n\n @property\n def unique_id(self):\n \"\"\"Return a unique, HASS-friendly identifier for this entity.\"\"\"\n return '{0}_{1}'.format(self._zip_code, self._kind)\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit the value is expressed in.\"\"\"\n return 'index'\n\n\nclass ForecastSensor(BaseSensor):\n \"\"\"Define sensor related to forecast data.\"\"\"\n\n async def async_update(self):\n \"\"\"Update the sensor.\"\"\"\n await self.pollen.async_update()\n if not self.pollen.data:\n return\n\n data = self.pollen.data[self._kind].get('Location')\n if not data:\n return\n\n indices = [p['Index'] for p in data['periods']]\n average = round(mean(indices), 1)\n [rating] = [\n i['label'] for i in RATING_MAPPING\n if i['minimum'] <= average <= i['maximum']\n ]\n\n self._attrs.update({\n ATTR_CITY: data['City'].title(),\n ATTR_RATING: rating,\n ATTR_STATE: data['State'],\n ATTR_TREND: calculate_trend(indices),\n ATTR_ZIP_CODE: data['ZIP']\n })\n\n if self._kind == TYPE_ALLERGY_FORECAST:\n outlook = self.pollen.data[TYPE_ALLERGY_OUTLOOK]\n self._attrs[ATTR_OUTLOOK] = outlook['Outlook']\n self._attrs[ATTR_SEASON] = outlook['Season']\n\n self._state = average\n\n\nclass HistoricalSensor(BaseSensor):\n \"\"\"Define sensor related to historical data.\"\"\"\n\n async def async_update(self):\n \"\"\"Update the sensor.\"\"\"\n await self.pollen.async_update()\n if not self.pollen.data:\n return\n\n data = self.pollen.data[self._kind].get('Location')\n if not data:\n return\n\n indices = [p['Index'] for p in data['periods']]\n average = round(mean(indices), 1)\n\n self._attrs.update({\n ATTR_CITY: data['City'].title(),\n ATTR_RATING: calculate_average_rating(indices),\n ATTR_STATE: data['State'],\n ATTR_TREND: calculate_trend(indices),\n ATTR_ZIP_CODE: data['ZIP']\n })\n\n self._state = average\n\n\nclass IndexSensor(BaseSensor):\n \"\"\"Define sensor related to indices.\"\"\"\n\n async def async_update(self):\n \"\"\"Update the sensor.\"\"\"\n await self.pollen.async_update()\n if not self.pollen.data:\n return\n\n data = {}\n if self._kind in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW,\n TYPE_ALLERGY_YESTERDAY):\n data = self.pollen.data[TYPE_ALLERGY_INDEX].get('Location')\n elif self._kind in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW,\n TYPE_ASTHMA_YESTERDAY):\n data = self.pollen.data[TYPE_ASTHMA_INDEX].get('Location')\n\n if not data:\n return\n\n key = self._kind.split('_')[-1].title()\n [period] = [p for p in data['periods'] if p['Type'] == key]\n [rating] = [\n i['label'] for i in RATING_MAPPING\n if i['minimum'] <= period['Index'] <= i['maximum']\n ]\n\n self._attrs.update({\n ATTR_CITY: data['City'].title(),\n ATTR_RATING: rating,\n ATTR_STATE: data['State'],\n ATTR_ZIP_CODE: data['ZIP']\n })\n\n if self._kind in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW,\n TYPE_ALLERGY_YESTERDAY):\n for idx, attrs in enumerate(period['Triggers']):\n index = idx + 1\n self._attrs.update({\n '{0}_{1}'.format(ATTR_ALLERGEN_GENUS, index):\n attrs['Genus'],\n '{0}_{1}'.format(ATTR_ALLERGEN_NAME, index):\n attrs['Name'],\n '{0}_{1}'.format(ATTR_ALLERGEN_TYPE, index):\n attrs['PlantType'],\n })\n elif self._kind in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW,\n TYPE_ASTHMA_YESTERDAY):\n for idx, attrs in enumerate(period['Triggers']):\n index = idx + 1\n self._attrs.update({\n '{0}_{1}'.format(ATTR_ALLERGEN_NAME, index):\n attrs['Name'],\n '{0}_{1}'.format(ATTR_ALLERGEN_AMOUNT, index):\n attrs['PPM'],\n })\n\n self._state = period['Index']\n\n\nclass PollenComData:\n \"\"\"Define a data object to retrieve info from Pollen.com.\"\"\"\n\n def __init__(self, client, sensor_types):\n \"\"\"Initialize.\"\"\"\n self._client = client\n self._sensor_types = sensor_types\n self.data = {}\n\n async def _get_data(self, method, key):\n \"\"\"Return API data from a specific call.\"\"\"\n from pypollencom.errors import PollenComError\n\n try:\n data = await method()\n self.data[key] = data\n except PollenComError as err:\n _LOGGER.error('Unable to get \"%s\" data: %s', key, err)\n self.data[key] = {}\n\n @Throttle(DEFAULT_SCAN_INTERVAL)\n async def async_update(self):\n \"\"\"Update Pollen.com data.\"\"\"\n from pypollencom.errors import InvalidZipError\n\n # Pollen.com requires a bit more complicated error handling, given that\n # it sometimes has parts (but not the whole thing) go down:\n #\n # 1. If `InvalidZipError` is thrown, quit everything immediately.\n # 2. If an individual request throws any other error, try the others.\n\n try:\n if TYPE_ALLERGY_FORECAST in self._sensor_types:\n await self._get_data(\n self._client.allergens.extended, TYPE_ALLERGY_FORECAST)\n await self._get_data(\n self._client.allergens.outlook, TYPE_ALLERGY_OUTLOOK)\n\n if TYPE_ALLERGY_HISTORIC in self._sensor_types:\n await self._get_data(\n self._client.allergens.historic, TYPE_ALLERGY_HISTORIC)\n\n if any(s in self._sensor_types\n for s in [TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW,\n TYPE_ALLERGY_YESTERDAY]):\n await self._get_data(\n self._client.allergens.current, TYPE_ALLERGY_INDEX)\n\n if TYPE_ASTHMA_FORECAST in self._sensor_types:\n await self._get_data(\n self._client.asthma.extended, TYPE_ASTHMA_FORECAST)\n\n if TYPE_ASTHMA_HISTORIC in self._sensor_types:\n await self._get_data(\n self._client.asthma.historic, TYPE_ASTHMA_HISTORIC)\n\n if any(s in self._sensor_types\n for s in [TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW,\n TYPE_ASTHMA_YESTERDAY]):\n await self._get_data(\n self._client.asthma.current, TYPE_ASTHMA_INDEX)\n\n if TYPE_DISEASE_FORECAST in self._sensor_types:\n await self._get_data(\n self._client.disease.extended, TYPE_DISEASE_FORECAST)\n\n _LOGGER.debug(\"New data retrieved: %s\", self.data)\n except InvalidZipError:\n _LOGGER.error(\n \"Cannot retrieve data for ZIP code: %s\", self._client.zip_code)\n self.data = {}\n" ]
[ [ "numpy.array", "numpy.cumsum" ] ]
fpirovan/NoiseInjection
[ "d1a8c90aaf45d435d40c476a2d2e74258920ff22" ]
[ "dart/experiments/tools/noise.py" ]
[ "import numpy as np\nimport statistics\n\ndef sample_covariance_lnr(env, lnr, sup, samples, T):\n\n cov = np.zeros(env.action_space.shape[0])\n for s in range(samples):\n states, tmp_actions, _, _ = statistics.collect_traj(env, lnr, T)\n sup_actions = np.array([sup.intended_action(s) for s in states])\n lnr_actions = np.array(tmp_actions)\n\n length = len(tmp_actions)\n diff = sup_actions - lnr_actions\n\n cov = cov + np.dot(diff.T, diff) / float(length)\n \n return cov / float(samples)\n\n\ndef sample_covariance_sup(env, lnr, sup, samples, T):\n cov = np.zeros(env.action_space.shape[0])\n for s in range(samples):\n states, tmp_actions, _, _ = statistics.collect_traj(env, sup, T)\n sup_actions = np.array(tmp_actions)\n lnr_actions = np.array([lnr.intended_action(s) for s in states])\n length = len(tmp_actions)\n\n diff = sup_actions - lnr_actions\n cov = cov + np.dot(diff.T, diff) / float(length)\n\n return cov / float(samples)\n\n\ndef sample_covariance_trajs(env, lnr, trajs, samples, T):\n d = env.action_space.shape[0]\n cov = np.zeros((d, d))\n trajs = np.array(trajs[len(trajs) - samples:])\n # trajs = np.array(trajs[len(trajs) - samples * 2:])\n # indices = np.random.choice(len(trajs), min(len(trajs), samples), replace=False)\n # trajs = trajs[indices]\n for states, i_actions in trajs:\n sup_actions = np.array([a for a in i_actions])\n lnr_actions = np.array([lnr.intended_action(s) for s in states])\n length = len(i_actions)\n\n diff = sup_actions - lnr_actions\n cov = cov + np.dot(diff.T, diff) / float(length)\n\n print (\"Trajs: \" + str(len(trajs)))\n return cov / float(len(trajs))\n\ndef sample_iso_cov_lnr(env, lnr, sup, samples, T):\n d = env.action_space.shape[0]\n cov = sample_covariance_lnr(env, lnr, sup, samples, T)\n return np.trace(cov) / float(d) * np.identity(d)\n\ndef sample_iso_cov_sup(env, lnr, sup, samples, T):\n d = env.action_space.shape[0]\n cov = sample_covariance_sup(env, lnr, sup, samples, T)\n return np.trace(cov) / float(d) * np.identity(d)\n\ndef sample_epsilon_lnr(env, lnr, sup, samples, T):\n surr_loss = statistics.evaluate_agent_disc(env, lnr, sup, T, samples)\n return surr_loss\n\ndef sample_epsilon_sup(env, lnr, sup, samples, T):\n loss = statistics.evaluate_sup_disc(env, lnr, sup, T, samples)\n return loss" ]
[ [ "numpy.dot", "numpy.identity", "numpy.array", "numpy.zeros", "numpy.trace" ] ]
dftbplus/phonopy
[ "32d3d52902c314c7f00192d10f7a156d0a8341c9" ]
[ "phonopy/cui/collect_cell_info.py" ]
[ "# Copyright (C) 2018 Atsushi Togo\n# All rights reserved.\n#\n# This file is part of phonopy.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# * Neither the name of the phonopy project nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy as np\nfrom phonopy.interface.calculator import (\n read_crystal_structure, get_default_cell_filename)\nfrom phonopy.interface.vasp import read_vasp\nfrom phonopy.interface.phonopy_yaml import PhonopyYaml\n\n\ndef collect_cell_info(supercell_matrix=None,\n primitive_matrix=None,\n interface_mode=None,\n cell_filename=None,\n chemical_symbols=None,\n enforce_primitive_matrix_auto=False,\n phonopy_yaml_cls=None,\n symprec=1e-5):\n # In some cases, interface mode falls back to phonopy_yaml mode.\n fallback_reason = _fallback_to_phonopy_yaml(\n supercell_matrix,\n interface_mode,\n cell_filename)\n\n if fallback_reason:\n _interface_mode = 'phonopy_yaml'\n elif interface_mode is None:\n _interface_mode = None\n else:\n _interface_mode = interface_mode.lower()\n\n unitcell, optional_structure_info = read_crystal_structure(\n filename=cell_filename,\n interface_mode=_interface_mode,\n chemical_symbols=chemical_symbols,\n phonopy_yaml_cls=phonopy_yaml_cls)\n\n # Error check\n if unitcell is None:\n err_msg = _get_error_message(optional_structure_info,\n interface_mode,\n fallback_reason,\n cell_filename,\n phonopy_yaml_cls)\n return err_msg\n\n # Retrieve more information on cells\n (interface_mode_out,\n supercell_matrix_out,\n primitive_matrix_out) = _collect_cells_info(\n _interface_mode,\n optional_structure_info,\n interface_mode,\n supercell_matrix,\n primitive_matrix,\n enforce_primitive_matrix_auto)\n\n # Another error check\n msg_list = [\"Crystal structure was read from \\\"%s\\\".\"\n % optional_structure_info[0], ]\n if supercell_matrix_out is None:\n msg_list.append(\n \"Supercell matrix (DIM or --dim) information was not found.\")\n return \"\\n\".join(msg_list)\n\n if np.linalg.det(unitcell.get_cell()) < 0.0:\n msg_list.append(\"Lattice vectors have to follow the right-hand rule.\")\n return \"\\n\".join(msg_list)\n\n # Succeeded!\n if _interface_mode == 'phonopy_yaml':\n phpy_yaml = optional_structure_info[1]\n else:\n phpy_yaml = None\n return (unitcell, supercell_matrix_out, primitive_matrix_out,\n optional_structure_info, interface_mode_out, phpy_yaml)\n\n\ndef _fallback_to_phonopy_yaml(supercell_matrix,\n interface_mode,\n cell_filename):\n \"\"\"Find possibility to fallback to phonopy.yaml mode\n\n Fallback happens in any of the following cases.\n\n 1. Parsing crystal structure file in the VASP POSCAR-style failed\n 2. Default VASP POSCAR-style file is not found.\n 3. supercell_matrix is not given along with (1) or (2).\n\n Parameters\n ----------\n supercell_matrix : array_like or None\n None is given when phonopy.yaml mode is expected.\n interface_mode : str or None\n None is the default mode, i.e., VASP like.\n cell_filename : str or None\n File name of VASP style crystal structure. None means the default\n file name, \"POSCAR\".\n\n Returns\n -------\n fallback_reason : str or None\n This provides information how to handle after the fallback.\n None means fallback to phonopy.yaml mode will not happen.\n\n \"\"\"\n\n fallback_reason = None\n\n if interface_mode is None:\n fallback_reason = _poscar_failed(cell_filename)\n\n if fallback_reason is not None:\n if supercell_matrix is None:\n fallback_reason = \"no supercell matrix given\"\n\n return fallback_reason\n\n\ndef _poscar_failed(cell_filename):\n \"\"\"Determine if fall back happens\n\n 1) read_vasp (parsing POSCAR-style file) is failed. --> fallback\n\n ValueError is raised by read_vasp when the POSCAR-style format\n is broken. By this way, we assume the input crystal structure\n is not in the POSCAR-style format and is in the phonopy.yaml\n type.\n\n 2) The file given by get_default_cell_filename('vasp') is not\n found at the current directory. --> fallback\n\n This is the trigger to look for the phonopy.yaml type file.\n\n 3) The given file with cell_filename is not found. --> not fallback\n\n This case will not invoke phonopy.yaml mode and here nothing\n is done, i.e., fallback_reason = None.\n This error will be caught in the following part again be\n handled properly (read_crystal_structure).\n\n \"\"\"\n\n fallback_reason = None\n try:\n if cell_filename is None:\n read_vasp(get_default_cell_filename('vasp'))\n else:\n read_vasp(cell_filename)\n except ValueError:\n # (1) see above\n fallback_reason = \"read_vasp parsing failed\"\n except FileNotFoundError:\n if cell_filename is None:\n # (2) see above\n fallback_reason = \"default file not found\"\n else:\n # (3) see above\n pass\n return fallback_reason\n\n\ndef _collect_cells_info(_interface_mode,\n optional_structure_info,\n interface_mode,\n supercell_matrix,\n primitive_matrix,\n enforce_primitive_matrix_auto):\n \"\"\"This is a method just to wrap up and exclude dirty stuffs.\"\"\"\n\n if (_interface_mode == 'phonopy_yaml' and\n optional_structure_info[1] is not None):\n phpy = optional_structure_info[1]\n if phpy.calculator is None:\n interface_mode_out = interface_mode\n else:\n interface_mode_out = phpy.calculator\n if phpy.supercell_matrix is None:\n _supercell_matrix = supercell_matrix\n else:\n _supercell_matrix = phpy.supercell_matrix\n if primitive_matrix is not None:\n _primitive_matrix = primitive_matrix\n elif phpy.primitive_matrix is not None:\n _primitive_matrix = phpy.primitive_matrix\n else:\n _primitive_matrix = 'auto'\n else:\n interface_mode_out = _interface_mode\n _supercell_matrix = supercell_matrix\n _primitive_matrix = primitive_matrix\n\n if enforce_primitive_matrix_auto:\n _primitive_matrix = 'auto'\n\n if _supercell_matrix is None and _primitive_matrix == 'auto':\n supercell_matrix_out = np.eye(3, dtype='intc')\n else:\n supercell_matrix_out = _supercell_matrix\n\n primitive_matrix_out = _primitive_matrix\n\n return interface_mode_out, supercell_matrix_out, primitive_matrix_out\n\n\ndef _get_error_message(optional_structure_info,\n interface_mode,\n fallback_reason,\n cell_filename,\n phonopy_yaml_cls):\n final_cell_filename = optional_structure_info[0]\n if phonopy_yaml_cls is None:\n _phonopy_yaml_cls = PhonopyYaml\n else:\n _phonopy_yaml_cls = phonopy_yaml_cls\n\n if fallback_reason is None:\n msg_list = []\n if cell_filename != final_cell_filename:\n msg_list.append(\"Crystal structure file \\\"%s\\\" was not found.\"\n % cell_filename)\n msg_list.append(\"Crystal structure file \\\"%s\\\" was not found.\"\n % final_cell_filename)\n return \"\\n\".join(msg_list)\n\n ####################################\n # Must be phonopy_yaml mode below. #\n ####################################\n\n msg_list = []\n if fallback_reason in [\"default file not found\",\n \"read_vasp parsing failed\"]:\n if cell_filename:\n vasp_filename = cell_filename\n else:\n vasp_filename = get_default_cell_filename('vasp')\n\n if fallback_reason == \"read_vasp parsing failed\":\n msg_list.append(\n \"Parsing crystal structure file of \\\"%s\\\" failed.\"\n % vasp_filename)\n else:\n msg_list.append(\n \"Crystal structure file of \\\"%s\\\" was not found.\"\n % vasp_filename)\n\n elif fallback_reason == \"no supercell matrix given\":\n msg_list.append(\"Supercell matrix (DIM or --dim) was not explicitly \"\n \"specified.\")\n\n msg_list.append(\"By this reason, %s_yaml mode was invoked.\"\n % _phonopy_yaml_cls.command_name)\n\n if final_cell_filename is None: # No phonopy*.yaml file was found.\n filenames = [\"\\\"%s\\\"\" % name\n for name in _phonopy_yaml_cls.default_filenames]\n if len(filenames) == 1:\n text = filenames[0]\n elif len(filenames) == 2:\n text = \" and \".join(filenames)\n else:\n tail = \" and \".join(filenames[-2:])\n head = \", \".join(filenames[:-2])\n text = head + \", \" + tail\n msg_list.append(\"But %s could not be found.\" % text)\n return \"\\n\".join(msg_list)\n\n phpy = optional_structure_info[1]\n if phpy is None: # Failed to parse phonopy*.yaml.\n msg_list.append(\"But parsing \\\"%s\\\" failed.\" % final_cell_filename)\n\n return \"\\n\".join(msg_list)\n" ]
[ [ "numpy.eye" ] ]
nielsuit227/AutoML
[ "51e2076d52d76dc84a190293b5bb59da2833df89" ]
[ "Amplo/Pipeline.py" ]
[ "import re\nimport os\nimport time\nimport copy\nimport json\nimport Amplo\nimport joblib\nimport shutil\nimport warnings\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom datetime import datetime\nfrom shap import TreeExplainer\n\nfrom sklearn import metrics\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom Amplo import Utils\n\nfrom Amplo.AutoML.Sequencer import Sequencer\nfrom Amplo.AutoML.Modeller import Modeller\nfrom Amplo.AutoML.DataSampler import DataSampler\nfrom Amplo.AutoML.DataExplorer import DataExplorer\nfrom Amplo.AutoML.DataProcesser import DataProcesser\nfrom Amplo.AutoML.DriftDetector import DriftDetector\nfrom Amplo.AutoML.FeatureProcesser import FeatureProcesser\nfrom Amplo.Regressors.StackingRegressor import StackingRegressor\nfrom Amplo.Classifiers.StackingClassifier import StackingClassifier\n\nfrom .GridSearch.BaseGridSearch import BaseGridSearch\nfrom .GridSearch.HalvingGridSearch import HalvingGridSearch\nfrom .GridSearch.OptunaGridSearch import OptunaGridSearch\n\nfrom .Documenting.MultiDocumenting import MultiDocumenting\nfrom .Documenting.BinaryDocumenting import BinaryDocumenting\nfrom .Documenting.RegressionDocumenting import RegressionDocumenting\n\n\nclass Pipeline:\n\n def __init__(self, **kwargs):\n \"\"\"\n Automated Machine Learning Pipeline for tabular data.\n Designed for predictive maintenance applications, failure identification, failure prediction, condition\n monitoring, etc.\n\n Parameters\n ----------\n Main Parameters:\n target [str]: Column name of the output/dependent/regressand variable.\n name [str]: Name of the project (for documentation)\n version [int]: Pipeline version (set automatically)\n mode [str]: 'classification' or 'regression'\n objective [str]: from sklearn metrics and scoring\n\n Data Processor:\n int_cols [list[str]]: Column names of integer columns\n float_cols [list[str]]: Column names of float columns\n date_cols [list[str]]: Column names of datetime columns\n cat_cols [list[str]]: Column names of categorical columns\n missing_values [str]: [DataProcessing] - 'remove', 'interpolate', 'mean' or 'zero'\n outlier_removal [str]: [DataProcessing] - 'clip', 'boxplot', 'z-score' or 'none'\n z_score_threshold [int]: [DataProcessing] If outlier_removal = 'z-score', the threshold is adaptable\n include_output [bool]: Whether to include output in the training data (sensible only with sequencing)\n\n Feature Processor:\n extract_features [bool]: Whether or not to use FeatureProcessing module\n information_threshold : [FeatureProcessing] Threshold for removing co-linear features\n feature_timeout [int]: [FeatureProcessing] Time budget for feature processing\n max_lags [int]: [FeatureProcessing] Maximum lags for lagged features to analyse\n max_diff [int]: [FeatureProcessing] Maximum differencing order for differencing features\n\n Sequencing:\n sequence [bool]: [Sequencing] Whether or not to use Sequence module\n seq_back [int or list[int]]: Input time indices\n If list -> includes all integers within the list\n If int -> includes that many samples back\n seq_forward [int or list[int]: Output time indices\n If list -> includes all integers within the list.\n If int -> includes that many samples forward.\n seq_shift [int]: Shift input / output samples in time\n seq_diff [int]: Difference the input & output, 'none', 'diff' or 'log_diff'\n seq_flat [bool]: Whether to return a matrix (True) or Tensor (Flat)\n\n Modelling:\n standardize [bool]: Whether to standardize input/output data\n shuffle [bool]: Whether to shuffle the samples during cross-validation\n cv_splits [int]: How many cross-validation splits to make\n store_models [bool]: Whether to store all trained model files\n\n Grid Search:\n grid_search_type [str]: Which method to use 'optuna', 'halving', 'base'\n grid_search_time_budget : Time budget for grid search\n grid_search_candidates : Parameter evaluation budget for grid search\n grid_search_iterations : Model evaluation budget for grid search\n\n Stacking:\n stacking [bool]: Whether to create a stacking model at the end\n\n Production:\n preprocess_function [str]: Add custom code for the prediction function, useful for production. Will be executed\n with exec, can be multiline. Uses data as input.\n\n Flags:\n plot_eda [bool]: Whether or not to run Exploratory Data Analysis\n process_data [bool]: Whether or not to force data processing\n document_results [bool]: Whether or not to force documenting\n no_dirs [bool]: Whether to create files or not\n verbose [int]: Level of verbosity\n \"\"\"\n # Production initiation\n self.bestModel = None\n self.settings = None\n self.mainDir = 'AutoML/'\n\n # Copy arguments\n ##################\n # Main Settings\n self.target = re.sub('[^a-z0-9]', '_', kwargs.get('target', '').lower())\n self.name = kwargs.get('name', 'AutoML')\n self.version = kwargs.get('version', None)\n self.mode = kwargs.get('mode', None)\n self.objective = kwargs.get('objective', None)\n\n # Data Processor\n self.intCols = kwargs.get('int_cols', [])\n self.floatCols = kwargs.get('float_cols', [])\n self.dateCols = kwargs.get('date_cols', [])\n self.catCols = kwargs.get('cat_cols', [])\n self.missingValues = kwargs.get('missing_values', 'zero')\n self.outlierRemoval = kwargs.get('outlier_removal', 'clip')\n self.zScoreThreshold = kwargs.get('z_score_threshold', 4)\n self.includeOutput = kwargs.get('include_output', False)\n\n # Feature Processor\n self.extractFeatures = kwargs.get('extract_features', True)\n self.informationThreshold = kwargs.get('information_threshold', 0.999)\n self.featureTimeout = kwargs.get('feature_timeout', 3600)\n self.maxLags = kwargs.get('max_lags', 0)\n self.maxDiff = kwargs.get('max_diff', 0)\n\n # Sequencer\n self.sequence = kwargs.get('sequence', False)\n self.sequenceBack = kwargs.get('seq_back', 1)\n self.sequenceForward = kwargs.get('seq_forward', 1)\n self.sequenceShift = kwargs.get('seq_shift', 0)\n self.sequenceDiff = kwargs.get('seq_diff', 'none')\n self.sequenceFlat = kwargs.get('seq_flat', True)\n\n # Modelling\n self.standardize = kwargs.get('standardize', False)\n self.shuffle = kwargs.get('shuffle', True)\n self.cvSplits = kwargs.get('cv_shuffle', 10)\n self.storeModels = kwargs.get('store_models', False)\n\n # Grid Search Parameters\n self.gridSearchType = kwargs.get('grid_search_type', 'optuna')\n self.gridSearchTimeout = kwargs.get('grid_search_time_budget', 3600)\n self.gridSearchCandidates = kwargs.get('grid_search_candidates', 250)\n self.gridSearchIterations = kwargs.get('grid_search_iterations', 3)\n\n # Stacking\n self.stacking = kwargs.get('stacking', False)\n\n # Production\n self.preprocessFunction = kwargs.get('preprocess_function', None)\n\n # Flags\n self.plotEDA = kwargs.get('plot_eda', False)\n self.processData = kwargs.get('process_data', True)\n self.documentResults = kwargs.get('document_results', True)\n self.verbose = kwargs.get('verbose', 0)\n self.noDirs = kwargs.get('no_dirs', False)\n\n # Monitoring\n self._prediction_time = None\n self._main_predictors = None\n\n # Checks\n assert self.mode in [None, 'regression', 'classification'], 'Supported modes: regression, classification.'\n assert 0 < self.informationThreshold < 1, 'Information threshold needs to be within [0, 1]'\n assert self.maxLags < 50, 'Max_lags too big. Max 50.'\n assert self.maxDiff < 5, 'Max diff too big. Max 5.'\n assert self.gridSearchType.lower() in ['base', 'halving', 'optuna'], 'Grid Search Type must be Base, Halving ' \\\n 'or Optuna'\n\n # Advices\n if self.includeOutput and not self.sequence:\n warnings.warn('[AutoML] IMPORTANT: strongly advices to not include output without sequencing.')\n\n # Objective & Scorer\n self.scorer = None\n if self.objective is not None:\n assert isinstance(self.objective, str), 'Objective needs to be a string'\n assert self.objective in metrics.SCORERS.keys(), 'Metric not supported, look at sklearn.metrics'\n self.scorer = metrics.SCORERS[self.objective]\n\n # Instance initiating\n self.data = None\n self.x = None\n self.y = None\n self.featureSets = None\n self.results = None\n self.n_classes = None\n self.is_fitted = False\n\n # Required sub-classes\n self.dataSampler = DataSampler()\n self.dataProcesser = DataProcesser()\n self.dataSequencer = Sequencer()\n self.featureProcesser = FeatureProcesser()\n self.driftDetector = DriftDetector()\n\n # Create dirs\n if not self.noDirs:\n self._create_dirs()\n self._load_version()\n\n # Store Pipeline Settings\n self.settings = {'pipeline': kwargs, 'validation': {}, 'feature_set': ''}\n\n # User Pointing Functions\n def get_settings(self) -> dict:\n \"\"\"\n Get settings to recreate fitted object.\n \"\"\"\n assert self.is_fitted, \"Pipeline not yet fitted.\"\n return self.settings\n\n def load_settings(self, settings: dict):\n \"\"\"\n Restores a pipeline from settings.\n\n Parameters\n ----------\n settings [dict]: Pipeline settings\n \"\"\"\n # Set parameters\n settings['pipeline']['no_dirs'] = True\n self.__init__(**settings['pipeline'])\n self.settings = settings\n self.dataProcesser.load_settings(settings['data_processing'])\n self.featureProcesser.load_settings(settings['feature_processing'])\n if 'drift_detector' in settings:\n self.driftDetector = DriftDetector(\n num_cols=self.dataProcesser.float_cols + self.dataProcesser.int_cols,\n cat_cols=self.dataProcesser.cat_cols,\n date_cols=self.dataProcesser.date_cols\n ).load_weights(settings['drift_detector'])\n\n def load_model(self, model: object):\n \"\"\"\n Restores a trained model\n \"\"\"\n assert type(model).__name__ == self.settings['model']\n self.bestModel = model\n self.is_fitted = True\n\n def fit(self, *args, **kwargs):\n \"\"\"\n Fit the full autoML pipeline.\n\n 1. Data Processing\n Cleans all the data. See @DataProcessing\n 2. (optional) Exploratory Data Analysis\n Creates a ton of plots which are helpful to improve predictions manually\n 3. Feature Processing\n Extracts & Selects. See @FeatureProcessing\n 4. Initial Modelling\n Runs various off the shelf models with default parameters for all feature sets\n If Sequencing is enabled, this is where it happens, as here, the feature set is generated.\n 5. Grid Search\n Optimizes the hyper parameters of the best performing models\n 6. (optional) Create Stacking model\n 7. (optional) Create documentation\n 8. Prepare Production Files\n Nicely organises all required scripts / files to make a prediction\n\n Parameters\n ----------\n data [pd.DataFrame] - Single dataframe with input and output data.\n \"\"\"\n # Starting\n print('\\n\\n*** Starting Amplo AutoML - {} ***\\n\\n'.format(self.name))\n\n # Reading data\n data = self._read_data(*args, **kwargs)\n\n # Detect mode (classification / regression)\n self._mode_detector(data)\n\n # Preprocess Data\n self._data_processing(data)\n\n # Run Exploratory Data Analysis\n self._eda()\n\n # Balance data\n self._data_sampling()\n\n # Sequence\n self._sequencing()\n\n # Extract and select features\n self._feature_processing()\n\n # Standardize\n # Standardizing assures equal scales, equal gradients and no clipping.\n # Therefore it needs to be after sequencing & feature processing, as this alters scales\n self._standardizing()\n\n # Run initial models\n self._initial_modelling()\n\n # Optimize Hyper parameters\n self.grid_search()\n\n # Create stacking model\n self._create_stacking()\n\n # Prepare production files\n self._prepare_production_files()\n\n self.is_fitted = True\n print('[AutoML] All done :)')\n\n def convert_data(self, x: pd.DataFrame, preprocess: bool = True) -> [pd.DataFrame, pd.Series]:\n \"\"\"\n Function that uses the same process as the pipeline to clean data.\n Useful if pipeline is pickled for production\n\n Parameters\n ----------\n data [pd.DataFrame]: Input features\n \"\"\"\n # Convert to Pandas\n if isinstance(x, np.ndarray):\n x = pd.DataFrame(x, columns=[f\"Feature_{i}\" for i in range(x.shape[1])])\n\n # Custom code\n if self.preprocessFunction is not None and preprocess:\n ex_globals = {'data': x}\n exec(self.preprocessFunction, ex_globals)\n x = ex_globals['data']\n\n # Process data\n x = self.dataProcesser.transform(x)\n if x.astype('float32').replace([np.inf, -np.inf], np.nan).isna().sum().sum() != 0:\n raise ValueError(f\"Column(s) with NaN: {list(x.keys()[x.isna().sum() > 0])}\")\n\n # Drift Check\n self.driftDetector.check(x)\n\n # Split output\n y = None\n if self.target in x.keys():\n y = x[self.target]\n if not self.includeOutput:\n x = x.drop(self.target, axis=1)\n\n # Sequence\n if self.sequence:\n x, y = self.dataSequencer.convert(x, y)\n\n # Convert Features\n x = self.featureProcesser.transform(x, self.settings['feature_set'])\n\n # Standardize\n if self.standardize:\n x, y = self._transform_standardize(x, y)\n\n # Return\n return x, y\n\n def predict(self, data: pd.DataFrame) -> np.ndarray:\n \"\"\"\n Full script to make predictions. Uses 'Production' folder with defined or latest version.\n\n Parameters\n ----------\n data [pd.DataFrame]: data to do prediction on\n \"\"\"\n start_time = time.time()\n assert self.is_fitted, \"Pipeline not yet fitted.\"\n\n # Print\n if self.verbose > 0:\n print('[AutoML] Predicting with {}, v{}'.format(type(self.bestModel).__name__, self.version))\n\n # Convert\n x, y = self.convert_data(data)\n\n # Predict\n if self.mode == 'regression' and self.standardize:\n predictions = self._inverse_standardize(self.bestModel.predict(x))\n else:\n predictions = self.bestModel.predict(x)\n\n # Stop timer\n self._prediction_time = (time.time() - start_time) / len(x) * 1000\n\n # Calculate main predictors\n self._get_main_predictors(x)\n\n return predictions\n\n def predict_proba(self, data: pd.DataFrame) -> np.ndarray:\n \"\"\"\n Returns probabilistic prediction, only for classification.\n\n Parameters\n ----------\n data [pd.DataFrame]: data to do prediction on\n \"\"\"\n start_time = time.time()\n assert self.is_fitted, \"Pipeline not yet fitted.\"\n assert self.mode == 'classification', 'Predict_proba only available for classification'\n assert hasattr(self.bestModel, 'predict_proba'), '{} has no attribute predict_proba'.format(\n type(self.bestModel).__name__)\n\n # Print\n if self.verbose > 0:\n print('[AutoML] Predicting with {}, v{}'.format(type(self.bestModel).__name__, self.version))\n\n # Convert data\n x, y = self.convert_data(data)\n\n # Predict\n prediction = self.bestModel.predict_proba(x)\n\n # Stop timer\n self._prediction_time = (time.time() - start_time) / len(x) * 1000\n\n # Calculate main predictors\n self._get_main_predictors(x)\n\n return prediction\n\n # Fit functions\n def _read_data(self, *args, **kwargs) -> pd.DataFrame:\n \"\"\"\n To support Pandas & Numpy, with just data and x, y, this function reads the data and loads into desired format.\n\n Parameters\n ----------\n args / kwargs:\n x (pd.DataFrame): input\n y (pd.Series): output\n data (pd.DataFrame): data\n \"\"\"\n assert len(args) + len(kwargs) != 0, \"No data provided.\"\n\n # Handle args\n if len(args) + len(kwargs) == 1:\n if len(args) == 1:\n data = args[0]\n elif len(kwargs) == 1:\n assert 'data' in kwargs, \"'data' argument missing\"\n data = kwargs['data']\n else:\n raise ValueError('No data provided')\n\n # Test data\n assert isinstance(data, pd.DataFrame), \"With only 1 argument, data must be a Pandas Dataframe.\"\n assert self.target != '', 'No target string provided'\n assert self.target in Utils.clean_keys(data).keys(), 'Target column missing'\n\n elif len(args) + len(kwargs) == 2:\n if len(args) == 2:\n x, y = args\n elif len(kwargs) == 2:\n assert 'x' in kwargs and 'y' in kwargs, \"'x' or 'y' argument missing\"\n x, y = kwargs['x'], kwargs['y']\n else:\n raise ValueError('Cannot understand partially named arguments...')\n\n # Parse data\n assert isinstance(x, (np.ndarray, pd.Series, pd.DataFrame)), \"Unsupported data type for 'x'\"\n if isinstance(x, pd.Series):\n data = pd.DataFrame(x)\n elif isinstance(x, np.ndarray):\n data = pd.DataFrame(x, columns=[f\"Feature_{i}\" for i in range(x.shape[1])])\n else:\n data = x\n\n # Check (and update) target\n if self.target == '':\n self.target = 'target'\n\n # Add target\n data[self.target] = y\n\n else:\n raise ValueError('Incorrect number of arguments.')\n\n return data\n\n def _mode_detector(self, data: pd.DataFrame):\n \"\"\"\n Detects the mode (Regression / Classification)\n :param data: Data to detect mode on\n \"\"\"\n # Only run if mode is not provided\n if self.mode is None:\n\n # Classification if string\n if data[self.target].dtype == str or \\\n data[self.target].nunique() < 0.1 * len(data):\n self.mode = 'classification'\n self.objective = 'neg_log_loss'\n\n # Else regression\n else:\n self.mode = 'regression'\n self.objective = 'neg_mean_absolute_error'\n self.scorer = metrics.SCORERS[self.objective]\n\n # Copy to settings\n self.settings['pipeline']['mode'] = self.mode\n self.settings['pipeline']['objective'] = self.objective\n\n # Print\n if self.verbose > 0:\n print(f\"[AutoML] Setting mode to {self.mode} & objective to {self.objective}.\")\n return\n\n def _data_processing(self, data: pd.DataFrame):\n \"\"\"\n Organises the data cleaning. Heavy lifting is done in self.dataProcesser, but settings etc. needs\n to be organised.\n \"\"\"\n self.dataProcesser = DataProcesser(target=self.target, int_cols=self.intCols, float_cols=self.floatCols,\n date_cols=self.dateCols, cat_cols=self.catCols,\n missing_values=self.missingValues,\n outlier_removal=self.outlierRemoval, z_score_threshold=self.zScoreThreshold)\n\n try:\n # Load data\n data = pd.read_csv(self.mainDir + 'Data/Cleaned_v{}.csv'.format(self.version), index_col='index')\n\n # Load settings\n self.settings['data_processing'] = json.load(open(self.mainDir + 'Settings/Cleaning_v{}.json'\n .format(self.version), 'r'))\n self.dataProcesser.load_settings(self.settings['data_processing'])\n\n if self.verbose > 0:\n print('[AutoML] Loaded Cleaned Data')\n\n except FileNotFoundError:\n # Cleaning\n data = self.dataProcesser.fit_transform(data)\n\n # Store data\n data.to_csv(self.mainDir + 'Data/Cleaned_v{}.csv'.format(self.version), index_label='index')\n\n # Save settings\n self.settings['data_processing'] = self.dataProcesser.get_settings()\n json.dump(self.settings['data_processing'], open(self.mainDir + 'Settings/Cleaning_v{}.json'\n .format(self.version), 'w'))\n\n # If no columns were provided, load them from data processor\n if self.dateCols is None:\n self.dateCols = self.settings['data_processing']['date_cols']\n if self.intCols is None:\n self.dateCols = self.settings['data_processing']['int_cols']\n if self.floatCols is None:\n self.floatCols = self.settings['data_processing']['float_cols']\n if self.catCols is None:\n self.catCols = self.settings['data_processing']['cat_cols']\n\n # Split and store in memory\n self.data = data\n self.y = data[self.target]\n self.x = data\n if self.includeOutput is False:\n self.x = self.x.drop(self.target, axis=1)\n\n # Assert classes in case of classification\n self.n_classes = self.y.nunique()\n if self.mode == 'classification':\n if self.n_classes >= 50:\n warnings.warn('More than 20 classes, you may want to reconsider classification mode')\n if set(self.y) != set([i for i in range(len(set(self.y)))]):\n raise ValueError('Classes should be [0, 1, ...]')\n\n def _eda(self):\n if self.plotEDA:\n print('[AutoML] Starting Exploratory Data Analysis')\n eda = DataExplorer(self.x, y=self.y,\n mode=self.mode,\n folder=self.mainDir,\n version=self.version)\n eda.run()\n\n def _data_sampling(self):\n \"\"\"\n Only run for classification problems. Balances the data using imblearn.\n Does not guarantee to return balanced classes. (Methods are data dependent)\n \"\"\"\n # Only necessary for classification\n self.dataSampler = DataSampler(method='both', margin=0.1, cv_splits=self.cvSplits, shuffle=self.shuffle,\n fast_run=False, objective=self.objective)\n if self.mode == 'classification':\n # Check if exists\n try:\n # Load\n data = pd.read_csv(self.mainDir + 'Data/Balanced_v{}.csv'.format(self.version), index_col='index')\n\n # Split\n self.y = data[self.target]\n self.x = data\n if self.includeOutput is False:\n self.x = self.x.drop(self.target, axis=1)\n\n if self.verbose > 0:\n print('[AutoML] Loaded Balanced data')\n\n except FileNotFoundError:\n # Fit & Resample\n self.x, self.y = self.dataSampler.fit_resample(self.x, self.y)\n\n # Store\n data = copy.copy(self.x)\n data[self.target] = self.y\n data.to_csv(self.mainDir + 'Data/Balanced_v{}.csv'.format(self.version), index_label='index')\n\n def _sequencing(self):\n \"\"\"\n Sequences the data. Useful mostly for problems where older samples play a role in future values.\n The settings of this module are NOT AUTOMATIC\n \"\"\"\n self.dataSequencer = Sequencer(back=self.sequenceBack, forward=self.sequenceForward,\n shift=self.sequenceShift, diff=self.sequenceDiff)\n if self.sequence:\n try:\n # Load data\n data = pd.read_csv(self.mainDir + 'Data/Sequence_v{}.csv'.format(self.version), index_col='index')\n\n # Split and set to memory\n self.y = data[self.target]\n self.x = data\n if not self.includeOutput:\n self.x = self.x.drop(self.target, axis=1)\n\n if self.verbose > 0:\n print('[AutoML] Loaded Extracted Features')\n\n except FileNotFoundError:\n print('[AutoML] Sequencing data')\n self.x, self.y = self.dataSequencer.convert(self.x, self.y)\n\n # Save\n data = copy.deepcopy(self.x)\n data[self.target] = copy.deepcopy(self.y)\n data.to_csv(self.mainDir + 'Data/Sequence_v{}.csv'.format(self.version), index_label='index')\n\n def _feature_processing(self):\n \"\"\"\n Organises feature processing. Heavy lifting is done in self.featureProcesser, but settings, etc.\n needs to be organised.\n \"\"\"\n self.featureProcesser = FeatureProcesser(mode=self.mode, max_lags=self.maxLags, max_diff=self.maxDiff,\n extract_features=self.extractFeatures, timeout=self.featureTimeout,\n information_threshold=self.informationThreshold)\n # Check if exists\n try:\n # Loading data\n self.x = pd.read_csv(self.mainDir + 'Data/Extracted_v{}.csv'.format(self.version), index_col='index')\n\n # Loading settings\n self.settings['feature_processing'] = json.load(open(self.mainDir + 'Settings/Features_v{}.json'\n .format(self.version), 'r'))\n self.featureProcesser.load_settings(self.settings['feature_processing'])\n self.featureSets = self.settings['feature_processing']['featureSets']\n\n if self.verbose > 0:\n print('[AutoML] Loaded Extracted Features')\n except FileNotFoundError:\n print('[AutoML] Starting Feature Processor')\n\n # Transform data\n self.x, self.featureSets = self.featureProcesser.fit_transform(self.x, self.y)\n\n # Store data\n self.x.to_csv(self.mainDir + 'Data/Extracted_v{}.csv'.format(self.version), index_label='index')\n\n # Save settings\n self.settings['feature_processing'] = self.featureProcesser.get_settings()\n json.dump(self.settings['feature_processing'], open(self.mainDir + 'Settings/Features_v{}.json'\n .format(self.version), 'w'))\n\n def _standardizing(self):\n \"\"\"\n Wrapper function to determine whether to fit or load\n \"\"\"\n # Return if standardize is off\n if not self.standardize:\n return\n\n # Load if exists\n try:\n self.settings['standardize'] = json.load(open(self.mainDir + 'Settings/Standardize_v{}.json'\n .format(self.version), 'r'))\n\n # Otherwise fits\n except FileNotFoundError:\n self._fit_standardize(self.x, self.y)\n\n # Store Settings\n json.dump(self.settings['standardize'], open(self.mainDir + 'Settings/Standardize_v{}.json'\n .format(self.version), 'w'))\n\n # And transform\n self.x, self.y = self._transform_standardize(self.x, self.y)\n\n def _initial_modelling(self):\n \"\"\"\n Runs various models to see which work well.\n \"\"\"\n # Load existing results\n if 'Results.csv' in os.listdir(self.mainDir):\n\n # Load results\n self.results = pd.read_csv(self.mainDir + 'Results.csv')\n\n # Printing here as we load it\n results = self.results[np.logical_and(\n self.results['version'] == self.version,\n self.results['type'] == 'Initial modelling'\n )]\n for fs in set(results['dataset']):\n print('[AutoML] Initial Modelling for {} ({})'.format(fs, len(self.featureSets[fs])))\n fsr = results[results['dataset'] == fs]\n for i in range(len(fsr)):\n row = fsr.iloc[i]\n print('[AutoML] {} {}: {:.4f} \\u00B1 {:.4f}'.format(row['model'].ljust(40), self.objective,\n row['mean_objective'], row['std_objective']))\n\n # Check if this version has been modelled\n if self.results is None or self.version not in self.results['version'].values:\n\n # Iterate through feature sets\n for feature_set, cols in self.featureSets.items():\n\n # Skip empty sets\n if len(cols) == 0:\n print('[AutoML] Skipping {} features, empty set'.format(feature_set))\n continue\n print('[AutoML] Initial Modelling for {} features ({})'.format(feature_set, len(cols)))\n\n # Do the modelling\n modeller = Modeller(mode=self.mode, shuffle=self.shuffle, store_models=self.storeModels,\n objective=self.objective, dataset=feature_set,\n store_results=False, folder=self.mainDir + 'Models/')\n results = modeller.fit(self.x[cols], self.y)\n\n # Add results to memory\n results['type'] = 'Initial modelling'\n results['version'] = self.version\n if self.results is None:\n self.results = results\n else:\n self.results = self.results.append(results)\n\n # Save results\n self.results.to_csv(self.mainDir + 'Results.csv', index=False)\n\n def grid_search(self, model=None, feature_set: str = None, parameter_set: str = None):\n \"\"\"\n Runs a grid search. By default, takes the self.results, and runs for the top 3 optimizations.\n There is the option to provide a model & feature_set, but both have to be provided. In this case,\n the model & data set combination will be optimized.\n Implemented types, Base, Halving, Optuna\n\n Parameters\n ----------\n model [Object or str]- (optional) Which model to run grid search for.\n feature_set [str]- (optional) Which feature set to run grid search for 'rft', 'rfi' or 'pps'\n parameter_set [dict]- (optional) Parameter grid to optimize over\n \"\"\"\n assert model is not None and feature_set is not None or model == feature_set, \\\n 'Model & feature_set need to be either both None or both provided.'\n # If arguments are provided\n if model is not None:\n\n # Get model string\n if isinstance(model, str):\n model = Utils.getModel(model, mode=self.mode, samples=len(self.x))\n\n # Organise existing results\n results = self.results[np.logical_and(\n self.results['model'] == type(model).__name__,\n self.results['version'] == self.version,\n )]\n results = self._sort_results(results[results['dataset'] == feature_set])\n\n # Check if exists and load\n if ('Hyper Parameter' == results['type']).any():\n print('[AutoML] Loading optimization results.')\n hyper_opt_results = results[results['type'] == 'Hyper Parameter']\n params = Utils.parse_json(hyper_opt_results.iloc[0]['params'])\n\n # Or run\n else:\n # Run grid search\n grid_search_results = self._sort_results(self._grid_search_iteration(model, parameter_set, feature_set))\n\n # Store results\n grid_search_results['model'] = type(model).__name__\n grid_search_results['version'] = self.version\n grid_search_results['dataset'] = feature_set\n grid_search_results['type'] = 'Hyper Parameter'\n self.results = self.results.append(grid_search_results)\n self.results.to_csv(self.mainDir + 'Results.csv', index=False)\n\n # Get params for validation\n params = Utils.parse_json(grid_search_results.iloc[0]['params'])\n\n # Validate\n if self.documentResults:\n self.document(model.set_params(**params), feature_set)\n return\n\n # If arguments aren't provided, run through promising models\n results = self._sort_results(self.results[np.logical_and(\n self.results['type'] == 'Initial modelling',\n self.results['version'] == self.version,\n )])\n for iteration in range(self.gridSearchIterations):\n # Grab settings\n settings = results.iloc[iteration] # IndexError\n model = Utils.getModel(settings['model'], mode=self.mode, samples=len(self.x))\n feature_set = settings['dataset']\n\n # Check whether exists\n model_results = self.results[np.logical_and(\n self.results['model'] == type(model).__name__,\n self.results['version'] == self.version,\n )]\n model_results = self._sort_results(model_results[model_results['dataset'] == feature_set])\n\n # If exists\n if ('Hyper Parameter' == model_results['type']).any():\n hyper_opt_res = model_results[model_results['type'] == 'Hyper Parameter']\n params = Utils.parse_json(hyper_opt_res.iloc[0]['params'])\n\n # Else run\n else:\n # For one model\n grid_search_results = self._sort_results(self._grid_search_iteration(\n copy.deepcopy(model), parameter_set, feature_set))\n\n # Store\n grid_search_results['version'] = self.version\n grid_search_results['dataset'] = feature_set\n grid_search_results['type'] = 'Hyper Parameter'\n self.results = self.results.append(grid_search_results)\n self.results.to_csv(self.mainDir + 'Results.csv', index=False)\n params = Utils.parse_json(grid_search_results.iloc[0]['params'])\n\n # Validate\n if self.documentResults:\n self.document(model.set_params(**params), feature_set)\n\n def _create_stacking(self):\n \"\"\"\n Based on the best performing models, in addition to cheap models based on very different assumptions,\n A stacking model is optimized to enhance/combine the performance of the models.\n --> should contain a large variety of models\n --> classifiers need predict_proba\n --> level 1 needs to be ordinary least squares\n \"\"\"\n if self.stacking:\n print('[AutoML] Creating Stacking Ensemble')\n\n # Select feature set that has been picked most often for hyper parameter optimization\n results = self._sort_results(self.results[np.logical_and(\n self.results['type'] == 'Hyper Parameter',\n self.results['version'] == self.version,\n )])\n feature_set = results['dataset'].value_counts().index[0]\n results = results[results['dataset'] == feature_set]\n print('[AutoML] Selected Stacking feature set: {}'.format(feature_set))\n\n # Create Stacking Model Params\n n_stacking_models = 3\n stacking_models_str = results['model'].unique()[:n_stacking_models]\n stacking_models_params = [Utils.parse_json(results.iloc[np.where(results['model'] == sms)[0][0]]['params'])\n for sms in stacking_models_str]\n stacking_models = dict([(sms, stacking_models_params[i]) for i, sms in enumerate(stacking_models_str)])\n print('[AutoML] Stacked models: {}'.format(list(stacking_models.keys())))\n\n # Add samples & Features\n stacking_models['n_samples'], stacking_models['n_features'] = self.x.shape\n\n # Prepare Stack\n if self.mode == 'regression':\n stack = StackingRegressor(**stacking_models)\n cv = KFold(n_splits=self.cvSplits, shuffle=self.shuffle)\n\n elif self.mode == 'classification':\n stack = StackingClassifier(**stacking_models)\n cv = StratifiedKFold(n_splits=self.cvSplits, shuffle=self.shuffle)\n else:\n raise NotImplementedError('Unknown mode')\n\n # Cross Validate\n x, y = self.x[self.featureSets[feature_set]].to_numpy(), self.y.to_numpy()\n score = []\n times = []\n for (t, v) in tqdm(cv.split(x, y)):\n start_time = time.time()\n xt, xv, yt, yv = x[t], x[v], y[t].reshape((-1)), y[v].reshape((-1))\n model = copy.deepcopy(stack)\n model.fit(xt, yt)\n score.append(self.scorer(model, xv, yv))\n times.append(time.time() - start_time)\n\n # Output Results\n print('[AutoML] Stacking result:')\n print('[AutoML] {}: {:.2f} \\u00B1 {:.2f}'.format(self.objective, np.mean(score), np.std(score)))\n self.results = self.results.append({\n 'date': datetime.today().strftime('%d %b %y'),\n 'model': type(stack).__name__,\n 'dataset': feature_set,\n 'params': json.dumps(stack.get_params()),\n 'mean_objective': np.mean(score),\n 'std_objective': np.std(score),\n 'mean_time': np.mean(times),\n 'std_time': np.std(times),\n 'version': self.version,\n 'type': 'Stacking',\n }, ignore_index=True)\n self.results.to_csv(self.mainDir + 'Results.csv', index=False)\n\n # Document\n if self.documentResults:\n self.document(stack, feature_set)\n\n def document(self, model, feature_set: str):\n \"\"\"\n Loads the model and features and initiates the outside Documenting class.\n\n Parameters\n ----------\n model [Object or str]- (optional) Which model to run grid search for.\n feature_set [str]- (optional) Which feature set to run grid search for 'rft', 'rfi' or 'pps'\n \"\"\"\n # Get model\n if isinstance(model, str):\n model = Utils.getModel(model, mode=self.mode, samples=len(self.x))\n\n # Checks\n assert feature_set in self.featureSets.keys(), 'Feature Set not available.'\n if os.path.exists(self.mainDir + 'Documentation/v{}/{}_{}.pdf'.format(\n self.version, type(model).__name__, feature_set)):\n print('[AutoML] Documentation existing for {} v{} - {} '.format(\n type(model).__name__, self.version, feature_set))\n return\n if len(model.get_params()) == 0:\n warnings.warn('[Documenting] Supplied model has no parameters!')\n\n # Run validation\n print('[AutoML] Creating Documentation for {} - {}'.format(type(model).__name__, feature_set))\n if self.mode == 'classification' and self.n_classes == 2:\n documenting = BinaryDocumenting(self)\n elif self.mode == 'classification':\n documenting = MultiDocumenting(self)\n elif self.mode == 'regression':\n documenting = RegressionDocumenting(self)\n else:\n raise ValueError('Unknown mode.')\n documenting.create(model, feature_set)\n\n # Append to settings\n self.settings['validation']['{}_{}'.format(type(model).__name__, feature_set)] = documenting.outputMetrics\n\n def _prepare_production_files(self, model=None, feature_set: str = None, params: dict = None):\n \"\"\"\n Prepares files necessary to deploy a specific model / feature set combination.\n - Model.joblib\n - Settings.json\n - Report.pdf\n\n Parameters\n ----------\n model [string] : (optional) Model file for which to prep production files\n feature_set [string] : (optional) Feature set for which to prep production files\n params [optional, dict]: (optional) Model parameters for which to prep production files, if None, takes best.\n \"\"\"\n # Path variable\n prod_path = self.mainDir + 'Production/v{}/'.format(self.version)\n\n # Create production folder\n if not os.path.exists(prod_path):\n os.mkdir(prod_path)\n\n # Filter results for this version\n results = self._sort_results(self.results[self.results['version'] == self.version])\n\n # Filter results if model is provided\n if model is not None:\n # Take name if model instance is given\n if not isinstance(model, str):\n model = type(model).__name__\n\n # Filter results\n results = self._sort_results(results[results['model'] == model])\n\n # Filter results if feature set is provided\n if feature_set is not None:\n results = self._sort_results(results[results['dataset'] == feature_set])\n\n # Get best parameters\n if params is None:\n params = results.iloc[0]['params']\n\n # Otherwise Find best\n model = results.iloc[0]['model']\n feature_set = results.iloc[0]['dataset']\n params = Utils.parse_json(params)\n\n # Printing action\n if self.verbose > 0:\n print('[AutoML] Preparing Production files for {}, {}, {}'.format(model, feature_set, params))\n\n # Try to load model\n if os.path.exists(prod_path + 'Model.joblib'):\n self.bestModel = joblib.load(prod_path + 'Model.joblib')\n\n # Rerun if not existent, or different than desired\n if not os.path.exists(prod_path + 'Model.joblib') or \\\n type(self.bestModel).__name__ != model or \\\n self.bestModel.get_params() != params:\n\n # Stacking needs to be created by a separate script :/\n if 'Stacking' in model:\n if self.mode == 'regression':\n self.bestModel = StackingRegressor(n_samples=len(self.x), n_features=len(self.x.keys()))\n elif self.mode == 'classification':\n self.bestModel = StackingClassifier(n_samples=len(self.x), n_features=len(self.x.keys()))\n else:\n raise NotImplementedError(\"Mode not set\")\n\n else:\n # Model\n self.bestModel = Utils.getModel(model, mode=self.mode, samples=len(self.x))\n\n # Set params, train, & save\n self.bestModel.set_params(**params)\n self.bestModel.fit(self.x[self.featureSets[feature_set]], self.y)\n joblib.dump(self.bestModel, self.mainDir + 'Production/v{}/Model.joblib'.format(self.version))\n\n if self.verbose > 0:\n print('[AutoML] Model fully fitted, in-sample {}: {:4f}'\n .format(self.objective, self.scorer(self.bestModel, self.x[self.featureSets[feature_set]],\n self.y)))\n\n else:\n if self.verbose > 0:\n print('[AutoML] Loading existing model file.')\n\n # Update pipeline settings\n self.settings['pipeline']['verbose'] = 0\n self.settings['model'] = model # The string\n self.settings['params'] = params\n self.settings['feature_set'] = feature_set\n self.settings['features'] = self.featureSets[feature_set]\n self.settings['amplo_version'] = Amplo.__version__ if hasattr(Amplo, '__version__') else 'dev'\n\n # Prune Data Processor\n required_features = self.featureProcesser.get_required_features(self.featureSets[feature_set])\n self.dataProcesser.prune_features(required_features)\n self.settings['data_processing'] = self.dataProcesser.get_settings()\n\n # Fit Drift Detector\n self.driftDetector = DriftDetector(\n num_cols=self.dataProcesser.float_cols + self.dataProcesser.int_cols,\n cat_cols=self.dataProcesser.cat_cols,\n date_cols=self.dataProcesser.date_cols\n )\n self.driftDetector.fit(self.data)\n self.driftDetector.fit_output(self.bestModel, self.x[self.featureSets[feature_set]])\n self.settings['drift_detector'] = self.driftDetector.get_weights()\n\n # Report\n if not os.path.exists('{}Documentation/v{}/{}_{}.pdf'.format(self.mainDir, self.version, model, feature_set)):\n self.document(self.bestModel, feature_set)\n shutil.copy('{}Documentation/v{}/{}_{}.pdf'.format(self.mainDir, self.version, model, feature_set),\n '{}Production/v{}/Report.pdf'.format(self.mainDir, self.version))\n\n # Save settings\n json.dump(self.settings, open(self.mainDir + 'Production/v{}/Settings.json'\n .format(self.version), 'w'), indent=4)\n\n return self\n\n # Support Functions\n def _load_version(self):\n \"\"\"\n Upon start, loads version\n \"\"\"\n # No need if version is set\n if self.version is not None:\n return\n\n # Read changelog (if existent)\n if os.path.exists(self.mainDir + 'changelog.txt'):\n with open(self.mainDir + 'changelog.txt', 'r') as f:\n changelog = f.read()\n else:\n changelog = ''\n\n # Find production folders\n completed_versions = len(os.listdir(self.mainDir + 'Production'))\n started_versions = len(changelog.split('\\n')) - 1\n\n # For new runs\n if started_versions == 0:\n with open(self.mainDir + 'changelog.txt', 'w') as f:\n f.write('v1: Initial Run\\n')\n self.version = 1\n\n # If last run was completed and we start a new\n elif started_versions == completed_versions and self.processData:\n self.version = started_versions + 1\n with open(self.mainDir + 'changelog.txt', 'a') as f:\n f.write('v{}: {}\\n'.format(self.version, input('Changelog v{}:\\n'.format(self.version))))\n\n # If no new run is started (either continue or rerun)\n else:\n self.version = started_versions\n\n if self.verbose > 0:\n print(f'[AutoML] Setting Version {self.version}')\n\n def _create_dirs(self):\n folders = ['', 'EDA', 'Data', 'Features', 'Documentation', 'Production', 'Settings']\n for folder in folders:\n if not os.path.exists(self.mainDir + folder):\n os.makedirs(self.mainDir + folder)\n\n def sort_results(self, results: pd.DataFrame) -> pd.DataFrame:\n return self._sort_results(results)\n\n def _fit_standardize(self, x: pd.DataFrame, y: pd.Series):\n \"\"\"\n Fits a standardization parameters and returns the transformed data\n \"\"\"\n # Check if existing\n if os.path.exists(self.mainDir + 'Settings/Standardize_{}.json'.format(self.version)):\n self.settings['standardize'] = json.load(open(self.mainDir + 'Settings/Standardize_{}.json'\n .format(self.version), 'r'))\n return\n\n # Fit Input\n cat_cols = [k for lst in self.settings['data_processing']['dummies'].values() for k in lst]\n features = [k for k in x.keys() if k not in self.dateCols and k not in cat_cols]\n means_ = x[features].mean(axis=0)\n stds_ = x[features].std(axis=0)\n stds_[stds_ == 0] = 1\n settings = {\n 'input': {\n 'features': features,\n 'means': means_.to_list(),\n 'stds': stds_.to_list(),\n }\n }\n\n # Fit Output\n if self.mode == 'regression':\n std = y.std()\n settings['output'] = {\n 'mean': y.mean(),\n 'std': std if std != 0 else 1,\n }\n\n self.settings['standardize'] = settings\n\n def _transform_standardize(self, x: pd.DataFrame, y: pd.Series) -> [pd.DataFrame, pd.Series]:\n \"\"\"\n Standardizes the input and output with values from settings.\n\n Parameters\n ----------\n x [pd.DataFrame]: Input data\n y [pd.Series]: Output data\n \"\"\"\n # Input\n assert self.settings['standardize'], \"Standardize settings not found.\"\n\n # Pull from settings\n features = self.settings['standardize']['input']['features']\n means = self.settings['standardize']['input']['means']\n stds = self.settings['standardize']['input']['stds']\n\n # Filter if not all features are present\n if len(x.keys()) < len(features):\n indices = [[i for i, j in enumerate(features) if j == k][0] for k in x.keys()]\n features = [features[i] for i in indices]\n means = [means[i] for i in indices]\n stds = [stds[i] for i in indices]\n\n # Transform Input\n x[features] = (x[features] - means) / stds\n\n # Transform output (only with regression)\n if self.mode == 'regression':\n y = (y - self.settings['standardize']['output']['mean']) / self.settings['standardize']['output']['std']\n\n return x, y\n\n def _inverse_standardize(self, y: pd.Series) -> pd.Series:\n \"\"\"\n For predictions, transform them back to application scales.\n Parameters\n ----------\n y [pd.Series]: Standardized output\n\n Returns\n -------\n y [pd.Series]: Actual output\n \"\"\"\n assert self.settings['standardize'], \"Standardize settings not found\"\n return y * self.settings['standardize']['output']['std'] + self.settings['standardize']['output']['mean']\n\n @staticmethod\n def _sort_results(results: pd.DataFrame) -> pd.DataFrame:\n return results.sort_values('worst_case', ascending=False)\n\n def _get_best_params(self, model, feature_set: str) -> dict:\n # Filter results for model and version\n results = self.results[np.logical_and(\n self.results['model'] == type(model).__name__,\n self.results['version'] == self.version,\n )]\n\n # Filter results for feature set & sort them\n results = self._sort_results(results[results['dataset'] == feature_set])\n\n # Warning for unoptimized results\n if 'Hyper Parameter' not in results['type'].values:\n warnings.warn('Hyper parameters not optimized for this combination')\n\n # Parse & return best parameters (regardless of if it's optimized)\n return Utils.parse_json(results.iloc[0]['params'])\n\n def _grid_search_iteration(self, model, parameter_set: str, feature_set: str):\n \"\"\"\n INTERNAL | Grid search for defined model, parameter set and feature set.\n \"\"\"\n print('\\n[AutoML] Starting Hyper Parameter Optimization for {} on {} features ({} samples, {} features)'.format(\n type(model).__name__, feature_set, len(self.x), len(self.featureSets[feature_set])))\n\n # Cross-Validator\n cv = StratifiedKFold(n_splits=self.cvSplits, shuffle=self.shuffle)\n if self.mode == 'regression':\n cv = KFold(n_splits=self.cvSplits, shuffle=self.shuffle)\n\n # Select right hyper parameter optimizer\n if self.gridSearchType == 'base':\n grid_search = BaseGridSearch(model, params=parameter_set, cv=cv, scoring=self.objective,\n candidates=self.gridSearchCandidates, timeout=self.gridSearchTimeout,\n verbose=self.verbose)\n elif self.gridSearchType == 'halving':\n grid_search = HalvingGridSearch(model, params=parameter_set, cv=cv, scoring=self.objective,\n candidates=self.gridSearchCandidates, verbose=self.verbose)\n elif self.gridSearchType == 'optuna':\n grid_search = OptunaGridSearch(model, timeout=self.gridSearchTimeout, cv=cv,\n candidates=self.gridSearchCandidates, scoring=self.objective,\n verbose=self.verbose)\n else:\n raise NotImplementedError('Only Base, Halving and Optuna are implemented.')\n # Get results\n results = grid_search.fit(self.x[self.featureSets[feature_set]], self.y)\n return results.sort_values('worst_case', ascending=False)\n\n def _get_main_predictors(self, data):\n \"\"\"\n Using Shapely Additive Explanations, this function calculates the main predictors for a given\n prediction and sets them into the class' memory.\n \"\"\"\n if type(self.bestModel).__name__ in ['SVC', 'RidgeClassifier', 'LinearRegression']:\n return\n elif type(self.bestModel).__module__[:5] == 'Amplo':\n explainer = TreeExplainer(self.bestModel.model)\n else:\n explainer = TreeExplainer(self.bestModel)\n\n # Get values\n shap_values = np.array(explainer.shap_values(data))\n\n # Shape them (for multiclass it outputs ndim=3, for binary/regression ndim=2)\n if shap_values.ndim == 3:\n shap_values = shap_values[1]\n\n # Take mean over samples\n shap_values = np.mean(shap_values, axis=0)\n\n # Sort them\n inds = sorted(range(len(shap_values)), key=lambda x: -abs(shap_values[x]))\n\n # Set class attribute\n self._main_predictors = dict([(data.keys()[i], float(abs(shap_values[i]))) for i in inds])\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.StratifiedKFold", "sklearn.model_selection.KFold", "pandas.DataFrame", "numpy.std", "numpy.mean", "sklearn.metrics.SCORERS.keys", "numpy.logical_and", "numpy.where" ] ]
I-love-lamp/ml-apps
[ "71f65fc284bc68794acd4a39df3a5791fcba7c46" ]
[ "classifier_tuning.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\nimport streamlit as st\r\nfrom sklearn import datasets\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score, plot_confusion_matrix, confusion_matrix\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sn\r\n\r\nst.title(\"Classifier performance app\")\r\nst.write(\"\"\"\r\n ## Explore different classifiers\r\n Which one performs the best?\r\n \"\"\")\r\ndataset_name = st.sidebar.selectbox(\"Select Dataset\",\r\n (\"Iris\", \"Breast Cancer\", \"Wine\"))\r\nst.write(f\"\"\"*You have chosen the {dataset_name} dataset*\"\"\")\r\nclassifier_name = st.sidebar.selectbox(\r\n \"Select Classifier\", (\"KNN\", \"SVM\", \"Random Forest\"))\r\n\r\n\r\n# function to load selected dataset\r\ndef get_dataset(dataset_name):\r\n if dataset_name == \"Iris\":\r\n data = datasets.load_iris()\r\n elif dataset_name == \"Breast Cancer\":\r\n data = datasets.load_breast_cancer()\r\n else:\r\n data = datasets.load_wine()\r\n # assign target and inputs for selected set\r\n X = data.data\r\n y = data.target\r\n return X, y\r\n\r\n\r\nX, y = get_dataset(dataset_name)\r\nst.write(f\"\"\"- shape of dataset: \"\"\", X.shape)\r\nst.write(\"\"\"- number of classes:\"\"\", len(np.unique(y)))\r\n\r\n\r\n# function to add tunable parameters to the UI for selected classifier\r\ndef add_params_ui(clf_name):\r\n params = dict()\r\n if classifier_name == \"KNN\":\r\n K = st.sidebar.slider(\"K\", 2, 15)\r\n params[\"K\"] = K\r\n elif classifier_name == \"SVM\":\r\n C = st.sidebar.slider(\"C\", 0.01, 10.0)\r\n kernel = st.sidebar.selectbox(\"kernel\",\r\n (\"linear\", \"poly\", \"rbf\", \"sigmoid\"))\r\n params[\"C\"] = C\r\n params[\"kernel\"] = kernel\r\n else:\r\n max_depth = st.sidebar.slider(\"max_depth\", 2, 15)\r\n n_estimators = st.sidebar.slider(\"n_estimators\", 1, 100)\r\n params[\"max_depth\"] = max_depth\r\n params[\"n_estimators\"] = n_estimators\r\n return params\r\n\r\n\r\nparams = add_params_ui(classifier_name)\r\n\r\n\r\n# function to define the selected classifier and its hyperparameters\r\ndef get_classifier(clf_name, params):\r\n if clf_name == \"KNN\":\r\n clf = KNeighborsClassifier(n_neighbors=params[\"K\"])\r\n elif clf_name == \"SVM\":\r\n clf = SVC(C=params[\"C\"], kernel=params[\"kernel\"])\r\n else:\r\n clf = RandomForestClassifier(n_estimators=params[\"n_estimators\"],\r\n max_depth=params[\"max_depth\"],\r\n random_state=42)\r\n return clf\r\n\r\n\r\nclf = get_classifier(classifier_name, params)\r\n\r\n# label the chosen set with the chosen classifier\r\n# split the dataset\r\nX_train, X_test, y_train, y_test = train_test_split(X, y,\r\n test_size=0.2,\r\n random_state=42)\r\n# train the model and predict\r\nclf.fit(X_train, y_train)\r\ny_pred = clf.predict(X_test)\r\n\r\n# evaluate classifier\r\naccuracy = accuracy_score(y_test, y_pred)\r\nst.write(f\"Your chosen classifier = {classifier_name}\")\r\nst.write(f\"{classifier_name} accuracy = {accuracy}\")\r\n# plot classifier labels - need to reduce dimensionality to 2D\r\nnum_dims = 2\r\npca = PCA(num_dims)\r\nX_2D = pca.fit_transform(X)\r\npca1 = X_2D[:, 0]\r\npca2 = X_2D[:, 1]\r\n\r\n# create scatterplot\r\nplot = plt.figure()\r\nax = plot.add_subplot(1,1,1)\r\nax.scatter(pca1, pca2, alpha=0.8, cmap='jet', c=y)\r\nax.set_xlabel(\"Principal component 1\")\r\nax.set_ylabel(\"Principal component 2\")\r\n# cannot use plt.show()\r\nst.write(plot)\r\n\r\n# create confusion matrix\r\nconf_matrix = plot_confusion_matrix(clf, X_test, y_test)\r\nst.write(conf_matrix)\r\n\r\ncm = confusion_matrix(y_test, y_pred)\r\ndf_cm = pd.DataFrame(cm, columns=np.unique(y_test), index=np.unique(y_test))\r\ndf_cm.index.name = 'Actual'\r\ndf_cm.columns.name = 'Predicted'\r\nplt.figure(figsize = (10,7))\r\nsn.set(font_scale=1.4) # for label size\r\ncm_plot = sn.heatmap(df_cm, cmap=\"Blues\", annot=True,annot_kws={\"size\": 16})\r\nst.write(cm_plot)\r\n" ]
[ [ "sklearn.metrics.plot_confusion_matrix", "sklearn.datasets.load_breast_cancer", "numpy.unique", "sklearn.ensemble.RandomForestClassifier", "sklearn.metrics.accuracy_score", "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split", "sklearn.metrics.confusion_matrix", "sklearn.neighbors.KNeighborsClassifier", "sklearn.datasets.load_wine", "sklearn.svm.SVC", "sklearn.decomposition.PCA", "matplotlib.pyplot.figure" ] ]
FrancisCrickInstitute/hatchet
[ "a92992f3464f4df566ac4e9ff69069e736821b4b" ]
[ "src/hatchet/utils/BBeval.py" ]
[ "#!/usr/bin/python3\n\nimport os\nimport sys\nimport argparse\nimport shutil\nimport subprocess\nimport shlex\nimport sys, os\nimport math\nimport warnings\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.colors as col\nfrom matplotlib.pyplot import cm\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom matplotlib.colors import ListedColormap\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom collections import Counter\nfrom collections import deque\nimport itertools\nfrom itertools import cycle\n\nfrom hatchet import config\n\nplt.style.use('ggplot')\nsns.set_style(\"whitegrid\")\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\n\n#mpl.rcParams.update({'figure.autolayout': True})\n#mpl.rcParams['text.usetex'] = True\n#mpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}', r'\\usepackage{amssymb}']\n#mpl.rcParams['font.family'] = 'serif'\n#mpl.rcParams['font.serif'] = 'Computer Modern'\n\n\ndef parsing_arguments(args=None):\n \"\"\"\n Parse command line arguments\n Returns:\n \"\"\"\n description = \"\"\n parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"INPUT\", help=\"A single file or multiple files between apices in CN_BBC format\")\n parser.add_argument(\"-n\",\"--patientnames\", required=False, default=config.bbeval.patientnames, type=str, help='Names of patients between apices (default: inferred from filenames)')\n parser.add_argument(\"-u\",\"--minu\", required=False, default=config.bbeval.minu, type=float, help='Minimum proportion of a CNA to be considered subclonal (default: 0.2)\"')\n parser.add_argument(\"-x\",\"--rundir\", required=False, default=config.bbeval.rundir, type=str, help='Running directory (default: current directory)')\n parser.add_argument(\"-b\",\"--baseCN\", required=False, default=config.bbeval.basecn, type=str, help='Base copy number (default: inferred from tumor ploidy)')\n parser.add_argument(\"-sC\",\"--figsizeclones\", required=False, default=config.bbeval.figsizeclones, type=str, help='Size of clone plots in the form \"(X-SIZE, Y-SIZE)\"')\n parser.add_argument(\"-sP\",\"--figsizecn\", required=False, default=config.bbeval.figsizecn, type=str, help='Size of CN plots in the form \"(X-SIZE, Y-SIZE)\"')\n parser.add_argument(\"-sG\",\"--figsizegrid\", required=False, default=config.bbeval.figsizegrid, type=str, help='Size of grid plots in the form \"(X-SIZE, Y-SIZE)\"')\n parser.add_argument(\"-rC\",\"--resolutionclones\", required=False, default=config.bbeval.resolutionclones, type=int, help='Number of bins to merge together for plotting clone profiles (default: 100)\"')\n parser.add_argument(\"-rP\",\"--resolutioncn\", required=False, default=config.bbeval.resolutioncn, type=int, help='Number of bins to merge together for plotting proportions (default: 500)\"')\n parser.add_argument(\"-rG\",\"--resolutiongrid\", required=False, default=config.bbeval.resolutiongrid, type=int, help='Number of bins to merge together in grids (default: 100)\"')\n parser.add_argument(\"-e\",\"--threshold\", required=False, default=config.bbeval.threshold, type=float, help='Threshold used to classify a tumor into either diploid or tetraploid (default: 3.0)\"')\n parser.add_argument(\"--ymax\", required=False, default=config.bbeval.ymax, type=int, help='Maximum values in y-axis (default: automatically inferred)\"')\n parser.add_argument(\"--ymin\", required=False, default=config.bbeval.ymin, type=int, help='Minimum values in y-axis (default: automatically inferred)\"')\n parser.add_argument(\"--clonepalette\", required=False, default=config.bbeval.clonepalette, type=str, help='Palette for coloring the clones among Set1, Set2, Set3, Paired (default: Set1)\"')\n parser.add_argument(\"--linkage\", required=False, default=config.bbeval.linkage, type=str, help='Linkage method used for clustering (default: single, available \\{single, complete, average, weighted, centroid, median, ward\\} from SciPy)\"')\n args = parser.parse_args(args)\n\n if len(args.INPUT.split()) == 0:\n raise ValueError(error(\"Please specify at least one sample as input!\"))\n if args.patientnames is None:\n patientnames = {fil : os.path.basename(fil) for fil in args.INPUT.split()}\n else:\n patientnames = {f : n for f, n in zip(args.INPUT.split(), args.patientnames.split())}\n if len(args.INPUT.split()) != len(set(patientnames.values())):\n raise ValueError(error(\"Multiple patients have the same name but they should unique!\"))\n if args.figsizeclones is not None:\n try:\n parsed = args.figsizeclones.strip().replace(' ', '').replace('(', '').replace(')', '').split(',')\n figsizeclones = (int(parsed[0]), int(parsed[1]))\n except:\n raise ValueError(error(\"Wrong format of clonefigsize!\"))\n if args.figsizecn is not None:\n try:\n parsed = args.figsizecn.strip().replace(' ', '').replace('(', '').replace(')', '').split(',')\n figsizecn = (int(parsed[0]), int(parsed[1]))\n except:\n raise ValueError(error(\"Wrong format of figsizecn!\"))\n if args.figsizegrid is not None:\n try:\n parsed = args.figsizegrid.strip().replace(' ', '').replace('(', '').replace(')', '').split(',')\n figsizegrid = (int(parsed[0]), int(parsed[1]))\n except:\n raise ValueError(error(\"Wrong format of figsizegrid!\"))\n if not os.path.isdir(args.rundir):\n raise ValueError(error(\"Running directory does not exist!\"))\n if not 0.0 <= args.minu <= 1.0:\n raise ValueError(error(\"The minimum proportion for subclonal CNAs must be in [0, 1]!\"))\n if args.baseCN is not None and args.baseCN < 2:\n raise ValueError(error(\"Base CN must be greater or equal than 2!\"))\n if args.resolutionclones is not None and args.resolutionclones < 1:\n raise ValueError(error(\"Resolution must be greater than 1!\"))\n if args.resolutioncn is not None and args.resolutioncn < 1:\n raise ValueError(error(\"Resolution must be greater than 1!\"))\n if args.resolutiongrid is not None and args.resolutiongrid < 1:\n raise ValueError(error(\"Resolution must be greater than 1!\"))\n if args.threshold < 0:\n raise ValueError(error(\"Threshold must be positive!\"))\n if args.linkage not in {'single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward'}:\n raise ValueError(error(\"Unknown linkage method!\"))\n\n if args.clonepalette == 'Set1':\n pal = plt.cm.Set1\n elif args.clonepalette == 'Set2':\n pal = plt.cm.Set2\n elif args.clonepalette == 'Set3':\n pal = plt.cm.Set3\n elif args.clonepalette == 'Paired':\n pal = plt.cm.Paired\n else:\n raise ValueError(error('Unknown clone palette!'))\n\n return {'input' : args.INPUT.split(),\n 'names' : patientnames,\n 'rundir' : args.rundir,\n 'minu' : args.minu,\n 'base' : args.baseCN,\n 'clonefigsize' : figsizeclones,\n 'propsfigsize' : figsizecn,\n 'clusterfigsize' : figsizegrid,\n 'profileres' : args.resolutionclones,\n 'cnres' : args.resolutioncn,\n 'clusterres' : args.resolutiongrid,\n 'threshold' : args.threshold,\n 'linkage' : args.linkage,\n 'ymax' : args.ymax,\n 'ymin' : args.ymin,\n 'clonepalette' : pal}\n\n\ndef main(args=None):\n sys.stderr.write(log(\"# Checking and parsing input arguments\\n\"))\n args = parsing_arguments(args)\n sys.stdout.write(info(\"\\n\".join([\"## {}:\\t{}\".format(key, args[key]) for key in args]) + '\\n'))\n\n sys.stderr.write(log(\"# Read BBC.UCN files\\n\"))\n tumors, clones, props = readUCN(args['input'], args['names'])\n\n sys.stderr.write(log(\"# Compute purity and tumor ploidy of each sample from each patient\\n\"))\n infbase = pp(tumors, clones, props, args)\n\n if args['base'] is None:\n sys.stderr.write(log(\"# The esimated basic copy number for each patient is\\n\"))\n sys.stderr.write(info('\\n'.join([\"## {}: {}\".format(b, infbase[b]) for b in infbase]) + '\\n'))\n base = infbase\n else:\n base = {pat : args['base'] for pat in tumors}\n\n if len(tumors) == 1:\n sys.stderr.write(log(\"# Intra-tumor analysis\\n\"))\n single(tumors[list(tumors)[0]], clones[list(clones)[0]], props[list(props)[0]], base[list(base)[0]], args)\n else:\n sys.stderr.write(log(\"# Inter-tumors analysis\\n\"))\n multiple(tumors, clones, props, base, args)\n\n\ndef pp(tumor, clones, props, args):\n bases = {}\n for patient in tumor:\n sys.stderr.write(info(\"## PATIENT: {}\\n\".format(patient)))\n counter = []\n for sample in props[patient]:\n purity = sum(float(props[patient][sample][i]) for i in props[patient][sample] if i != 'normal')\n scaled = {i : (props[patient][sample][i] / purity) if purity > 0.0 else 0.0 for i in props[patient][sample] if i != 'normal'}\n length = sum(float(s[1] - s[0]) for c in tumor[patient] for s in tumor[patient][c])\n ploidy = sum(float(sum(tumor[patient][c][s][i])) * float(s[1] - s[0]) * scaled[i] for c in tumor[patient] for s in tumor[patient][c] for i in scaled) / length\n wgd = 2 if ploidy < args['threshold'] else 4\n sys.stderr.write(info(\"### SAMPLE: {} -- PURITY: {} -- PLOIDY: {} -- CLASSIFICATION: {}\\n\".format(sample, purity, ploidy, 'DIPLOID' if wgd == 2 else 'TETRAPLOID')))\n counter.append(wgd)\n counter = Counter(counter)\n bases[patient] = argmax(counter)\n return bases\n\n\ndef single(tumor, clones, props, base, args):\n out = 'intratumor-clones-totalcn.pdf'\n sys.stderr.write(log(\"# Plotting total copy-number clone profiles in {}\\n\".format(os.path.join(args['rundir'], out))))\n profiles(tumor, clones, props, args, out)\n\n out = 'intratumor-clones-allelecn.pdf'\n sys.stderr.write(log(\"# Plotting allele-specific copy-number clone profiles in {}\\n\".format(os.path.join(args['rundir'], out))))\n allelicprofiles(tumor, clones, props, args, out)\n\n out = 'intratumor-copynumber-totalcn.pdf'\n sys.stderr.write(log(\"# Plotting total copy-number proportions in {}\\n\".format(os.path.join(args['rundir'], out))))\n cnproportions(tumor, base, clones, props, args, out)\n\n out = 'intratumor-copynumber-allelecn.pdf'\n sys.stderr.write(log(\"# Plotting allele-specific copy-number proportions in {}\\n\".format(os.path.join(args['rundir'], out))))\n allelicproportions(tumor, int(float(base)/2), clones, props, args, out)\n\n out = 'intratumor-profiles.pdf'\n sys.stderr.write(log(\"# Plotting clone profiles in {}\\n\".format(os.path.join(args['rundir'], out))))\n gridprofiles(tumor, base, clones, props, args, out)\n\n out = 'intratumor-profilesreduced.pdf'\n sys.stderr.write(log(\"# Plotting reduced-clone profiles in {}\\n\".format(os.path.join(args['rundir'], out))))\n gridprofilesreduced(tumor, base, clones, props, args, out)\n\n # Run tumor mixture analyses if we have a multiple tumor samples (keys in props)\n if len(props) > 1:\n out = 'intratumor-mixtures.pdf'\n sys.stderr.write(log(\"# Plotting reduced mixtures in {}\\n\".format(os.path.join(args['rundir'], out))))\n gridmixtures(tumor, base, clones, props, args, out)\n\n out = 'intratumor-subclonality.pdf'\n sys.stderr.write(log(\"# Plotting reduced mixtures in {}\\n\".format(os.path.join(args['rundir'], out))))\n subclonal(tumor, base, clones, props, args, out)\n\n\ndef profiles(tumor, clones, props, args, out):\n proj = join(tumor, clones, args['profileres'])\n\n tclones = [i for i in clones if i != 'normal']\n shift = 0.1\n if len(tclones) % 2 == 0:\n available = [-shift/2] + [- shift/2 - shift * (i + 1) for i in range(int(len(tclones) / 2) - 1)]\n available += [shift/2] + [shift/2 + shift * (i + 1) for i in range(int(len(tclones) / 2) - 1)]\n available = iter(sorted(available))\n level = {clone : next(available) for clone in tclones}\n else:\n available = [0] + [-shift * (i + 1) for i in range(int(len(tclones) / 2))] + [shift * (i+1) for i in range(int(len(tclones) / 2))]\n available = iter(sorted(available))\n level = {clone : next(available) for clone in tclones}\n pal = iter(map(mpl.colors.rgb2hex, args['clonepalette'](np.linspace(0, 1, len(tclones)))))\n style = {name : next(pal) for i, name in enumerate(tclones)}\n\n fig = plt.figure(figsize=args['clonefigsize'])\n pos = []\n x = 0\n for c in sorted(proj, key=sortchr):\n for s in sorted(proj[c], key=(lambda x : x[0])):\n pos.append((c, x))\n for i in proj[c][s]:\n if i in tclones:\n y = sum(proj[c][s][i]) + level[i]\n plt.scatter(x, y, c=style[i], marker=\"|\", s=12)\n x += 1\n plt.xlim(xmin=0, xmax=x)\n ymin, ymax = plt.ylim()\n x = 0\n for c in sorted(proj, key=sortchr):\n for s in sorted(proj[c], key=(lambda x : x[0])):\n x += 1\n plt.plot((x, x), (0, ymax+0.4), '--b', linewidth=0.2)\n addchrplt(pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n\n\ndef allelicprofiles(tumor, clones, props, args, out):\n proj = join(tumor, clones, args['profileres'])\n\n tclones = [i for i in clones if i != 'normal']\n shift = 0.1\n if len(tclones) % 2 == 0:\n available = [-shift/2] + [- shift/2 - shift * (i + 1) for i in range(int(len(tclones) / 2) - 1)]\n available += [shift/2] + [shift/2 + shift * (i + 1) for i in range(int(len(tclones) / 2) - 1)]\n available = iter(sorted(available))\n level = {clone : next(available) for clone in tclones}\n else:\n available = [0] + [-shift * (i + 1) for i in range(int(len(tclones) / 2))] + [shift * (i+1) for i in range(int(len(tclones) / 2))]\n available = iter(sorted(available))\n level = {clone : next(available) for clone in tclones}\n pal = iter(map(mpl.colors.rgb2hex, args['clonepalette'](np.linspace(0, 1, len(tclones)))))\n style = {name : next(pal) for i, name in enumerate(tclones)}\n\n fig = plt.figure(figsize=args['clonefigsize'])\n\n x = 0\n pos = []\n for c in sorted(proj, key=sortchr):\n for s in sorted(proj[c], key=(lambda x : x[0])):\n pos.append((c, x))\n for i in proj[c][s]:\n if i != 'normal':\n yA = proj[c][s][i][0] + level[i]\n yB = -(proj[c][s][i][1] + level[i])\n plt.scatter(x, yA, c=style[i], marker=\"|\", s=12)\n plt.scatter(x, yB, c=style[i], marker=\"|\", s=12)\n x += 1\n\n plt.xlim(xmin=0, xmax=x)\n ymin, ymax = plt.ylim()\n ymin -= 0.4\n ymax += 0.4\n x = 0\n for c in sorted(proj, key=sortchr):\n for s in sorted(proj[c], key=(lambda x : x[0])):\n x += 1\n plt.plot((x, x), (ymin, ymax), '--b', linewidth=0.2)\n plt.fill_between(range(0, x), [0 for i in range(x)], [ymax for i in range(x)], facecolor='red', interpolate=True, alpha=0.05)\n plt.fill_between(range(0, x), [ymin for i in range(x)], [0 for i in range(x)], facecolor='blue', interpolate=True, alpha=0.05)\n addchrplt(pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n\n\ndef cnproportions(tumor, base, clones, props, args, out):\n with PdfPages(os.path.join(args['rundir'], out)) as pdf:\n for sample in props:\n sys.stderr.write(info(\"## Plotting for sample {}...\\n\".format(sample)))\n proj = join(tumor, clones, args['cnres'])\n sumu = (lambda cns, cn : sum(float(props[sample][i]) for i in cns if cns[i] == cn))\n merge = {c : {s : {sum(cn) : sumu(proj[c][s], cn) for cn in set(proj[c][s].values())} for s in proj[c]} for c in proj}\n\n pos = [(c, s) for c in sorted(merge, key=sortchr) for s in sorted(merge[c], key=(lambda x : x[0]))]\n cns = sorted(set(cn for c in merge for s in merge[c] for cn in merge[c][s]), reverse=True)\n pal2 = iter(sns.color_palette(\"coolwarm\", 1))\n style = {base : next(pal2)}\n #palA = iter(sns.color_palette(\"Reds\", len([cn for cn in cns if cn > base])))\n palA = iter(sns.color_palette(\"YlOrRd_r\", len([cn for cn in cns if cn > base])))\n for x in [cn for cn in cns if cn > base]:\n style[x] = next(palA)\n #palD = iter(sns.color_palette(\"Blues\", len([cn for cn in cns if cn < base])))\n palD = iter(sns.color_palette(\"YlGnBu\", len([cn for cn in cns if cn < base])))\n for x in [cn for cn in cns if cn < base]:\n style[x] = next(palD)\n level = {s : 1.0 for s in pos}\n\n fig = plt.figure(figsize=args['propsfigsize'])\n\n for cn in cns:\n g = (lambda s : merge[s[0]][s[1]][cn] if cn in merge[s[0]][s[1]] else 0.0)\n minv = (lambda v : v if v > 0.02 else 0.0)\n df = pd.DataFrame([{'Genome positions' : x, 'Copy-number Proportions' : minv(level[s])} for x, s in enumerate(pos)])\n sns.barplot(data=df, x=\"Genome positions\", y=\"Copy-number Proportions\", color=style[cn], label=str(cn))\n level = {s : level[s] - g(s) for s in pos}\n\n ticks = [(x, s[0]) for x, s in enumerate(pos) if x == 0 or pos[x-1][0] != pos[x][0]]\n plt.xticks([x[0] for x in ticks], [x[1] for x in ticks])\n plt.legend(loc='center left', fancybox=True, shadow=True, bbox_to_anchor=(1, 0.5))\n plt.ylim(ymin=0, ymax=1.0)\n fig.autofmt_xdate()\n pdf.savefig()\n plt.close()\n\n\ndef allelicproportions(tumor, base, clones, props, args, out):\n with PdfPages(os.path.join(args['rundir'], out)) as pdf:\n for sample in props:\n sys.stderr.write(info(\"## Plotting for sample {}...\\n\".format(sample)))\n proj = join(tumor, [i for i in clones if props[sample][i] > 0.0], args['cnres'])\n sumu = (lambda cns, cn : sum(float(props[sample][i]) for i in cns if cns[i][0] == cn[0]))\n mergeA = {c : {s : {cn[0] : sumu(proj[c][s], cn) for cn in set(proj[c][s].values())} for s in proj[c]} for c in proj}\n sumu = (lambda cns, cn : sum(float(props[sample][i]) for i in cns if cns[i][1] == cn[1]))\n mergeB = {c : {s : {cn[1] : sumu(proj[c][s], cn) for cn in set(proj[c][s].values())} for s in proj[c]} for c in proj}\n\n pos = [(c, s) for c in sorted(mergeA, key=sortchr) for s in sorted(mergeA[c], key=(lambda x : x[0]))]\n cnsA = sorted(set(cn for c in mergeA for s in mergeA[c] for cn in mergeA[c][s]), reverse=True)\n cnsB = sorted(set(cn for c in mergeB for s in mergeB[c] for cn in mergeB[c][s]), reverse=True)\n cns = sorted(set(cnsA) | set(cnsB))\n\n pal2 = iter(sns.color_palette(\"coolwarm\", 1))\n style = {base : next(pal2)}\n #palA = iter(sns.color_palette(\"Reds\", len([cn for cn in cns if cn > base])))\n palA = iter(sns.color_palette(\"YlOrRd\", len([cn for cn in cns if cn > base])))\n for x in [cn for cn in cns if cn > base]:\n style[x] = next(palA)\n #palD = iter(sns.color_palette(\"Blues\", len([cn for cn in cns if cn < base])))\n palD = iter(sns.color_palette(\"YlGnBu\", len([cn for cn in cns if cn < base])))\n for x in [cn for cn in cns if cn < base]:\n style[x] = next(palD)\n\n fig = plt.figure(figsize=args['propsfigsize'])\n\n # Plot allele A\n\n level = {s : 1.0 for s in pos}\n for cn in cns:\n g = (lambda s : mergeA[s[0]][s[1]][cn] if cn in mergeA[s[0]][s[1]] else 0.0)\n minv = (lambda v : v if v > 0.02 else 0.0)\n df = pd.DataFrame([{'Genome positions' : x, 'Copy-number Proportions' : minv(level[s])} for x, s in enumerate(pos)])\n sns.barplot(data=df, x=\"Genome positions\", y=\"Copy-number Proportions\", color=style[cn], label=str(cn))\n level = {s : level[s] - g(s) for s in pos}\n\n # Plot allele B\n\n level = {s : -1.0 for s in pos}\n for cn in cns:\n g = (lambda s : mergeB[s[0]][s[1]][cn] if cn in mergeB[s[0]][s[1]] else 0.0)\n minv = (lambda v : v if v < 0.02 else 0.0)\n df = pd.DataFrame([{'Genome positions' : x, 'Copy-number Proportions' : level[s]} for x, s in enumerate(pos)])\n sns.barplot(data=df, x=\"Genome positions\", y=\"Copy-number Proportions\", color=style[cn], label=str(cn))\n level = {s : level[s] + g(s) for s in pos}\n\n ticks = [(x, s[0]) for x, s in enumerate(pos) if x == 0 or pos[x-1][0] != pos[x][0]]\n plt.xticks([x[0] for x in ticks], [x[1] for x in ticks])\n plt.legend(loc='center left', fancybox=True, shadow=True, bbox_to_anchor=(1, 0.5))\n plt.ylim(ymin=-1.0, ymax=1.0)\n fig.autofmt_xdate()\n pdf.savefig()\n plt.close()\n\n\ndef gridprofiles(tumor, base, clones, props, args, out):\n proj = join(tumor, clones, args['clusterres'])\n pos = [(c, s) for c in sorted(proj, key=sortchr) for s in sorted(proj[c], key=(lambda x : x[0]))]\n pal_sample = sns.color_palette(\"YlGn\", 10)\n palette = cycle(['#525252', '#969696', '#cccccc'])\n chr_colors = {c : next(palette) for c in sorted(tumor, key=sortchr)}\n col_colors = {}\n row_colors = {}\n am = set()\n de = set()\n\n data = []\n for c in sorted([i for i in clones]):\n for x, s in enumerate(pos):\n cn = sum(proj[s[0]][s[1]][c])\n data.append({'Clone' : c, 'Genome' : x, 'Amp-Del' : cn})\n if cn > base:\n am.add(cn)\n elif cn < base:\n de.add(cn)\n col_colors[x] = chr_colors[s[0]]\n row_colors[c] = {sam : pal_sample[min(9, int(round(props[sam][c] * 10)))] for sam in props}\n if len(am) == 0:\n am.add(base)\n if len(de) == 0:\n de.add(base)\n \n df = pd.DataFrame(data)\n table = pd.pivot_table(df, values='Amp-Del', columns=['Genome'], index=['Clone'], aggfunc='first')\n\n para = {}\n para['data'] = table\n para['cmap'] = 'coolwarm'\n para['center'] = base\n para['cbar_kws'] = {\"ticks\":range(min(de), max(am)+1)}\n para['yticklabels'] = True\n para['row_cluster'] = True\n para['xticklabels'] = False\n para['col_cluster'] = False\n para['method'] = args['linkage']\n para['metric'] = cndistance\n para['figsize'] = args['clusterfigsize']\n para['col_colors'] = pd.DataFrame([{'index' : s, 'chromosomes' : col_colors[s]} for s in table.columns]).set_index('index')\n para['row_colors'] = pd.DataFrame([dict(list({'index' : row}.items()) + list(row_colors[row].items())) for row in table.index]).set_index('index')\n g = sns.clustermap(**para)\n\n addchr(g, pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n plt.close()\n\n\ndef gridprofilesreduced(tumor, base, clones, props, args, out):\n proj = join(tumor, clones, args['clusterres'])\n red = reduction(proj, base)\n pos = [(c, s) for c in sorted(red, key=sortchr) for s in sorted(red[c], key=(lambda x : x[0]))]\n pal_sample = sns.color_palette(\"YlGn\", 10)\n palette = cycle(['#525252', '#969696', '#cccccc'])\n chr_colors = {c : next(palette) for c in sorted(tumor, key=sortchr)}\n col_colors = {}\n row_colors = {}\n\n data = []\n for c in sorted([i for i in clones]):\n for x, s in enumerate(pos):\n data.append({'Clone' : c, 'Genome' : x, 'Amp-Del' : red[s[0]][s[1]][c]})\n col_colors[x] = chr_colors[s[0]]\n row_colors[c] = {sam : pal_sample[min(9, int(round(props[sam][c] * 10)))] for sam in props}\n\n df = pd.DataFrame(data)\n table = pd.pivot_table(df, values='Amp-Del', columns=['Genome'], index=['Clone'], aggfunc='first')\n myColors = ('#67a9cf', '#f7f7f7', '#ef8a62')\n cmap = LinearSegmentedColormap.from_list('Custom', myColors, len(myColors))\n\n para = {}\n para['data'] = table\n para['cmap'] = cmap\n para['cbar_kws'] = {\"ticks\":[-1, 0, 1], \"boundaries\": np.linspace(-1, 1, 4)}\n para['yticklabels'] = True\n para['row_cluster'] = True\n para['xticklabels'] = False\n para['col_cluster'] = False\n para['method'] = args['linkage']\n para['metric'] = cndistance\n para['figsize'] = args['clusterfigsize']\n para['col_colors'] = pd.DataFrame([{'index' : s, 'chromosomes' : col_colors[s]} for s in table.columns]).set_index('index')\n para['row_colors'] = pd.DataFrame([dict(list({'index' : row}.items()) + list(row_colors[row].items())) for row in table.index]).set_index('index')\n g = sns.clustermap(**para)\n\n addchr(g, pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n plt.close()\n\n\ndef gridmixtures(tumor, base, clones, props, args, out):\n projp = join(tumor, clones, args['clusterres'])\n redp = reduction(projp, base)\n pos = [(c, s) for c in sorted(redp, key=sortchr) for s in sorted(redp[c], key=(lambda x : x[0]))]\n pal_clone = sns.color_palette(\"YlGn\", 10)\n palette = cycle(['#525252', '#969696', '#cccccc'])\n chr_colors = {c : next(palette) for c in sorted(tumor, key=sortchr)}\n col_colors = {}\n row_colors = {}\n\n data = []\n for p in props:\n sumu = (lambda cns, cn : sum(float(props[p][i]) for i in cns if cns[i] == cn))\n mergep = {c : {s : {cn : sumu(redp[c][s], cn) for cn in set(redp[c][s].values())} for s in redp[c]} for c in redp}\n assert False not in set(0.99 <= sum(mergep[c][s][cn] for cn in mergep[c][s]) <= 1.01 for c in mergep for s in mergep[c])\n for x, s in enumerate(pos):\n value = sum(mergep[s[0]][s[1]][cn] * cn for cn in mergep[s[0]][s[1]])\n data.append({'Sample' : p, 'Genome' : x, 'value' : value})\n col_colors[x] = chr_colors[s[0]]\n row_colors[p] = {i : pal_clone[min(9, int(round(props[p][i] * 10)))] for i in clones}\n\n df = pd.DataFrame(data)\n table = pd.pivot_table(df, values='value', columns=['Genome'], index=['Sample'], aggfunc='first')\n mapchr = cycle(['#525252', '#969696', '#cccccc'])\n palchr = {c : next(mapchr) for c in sorted(tumor, key=sortchr)}\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n para = {}\n para['data'] = table\n para['cmap'] = \"coolwarm\"#\"RdBu_r\"\n #para['cbar_kws'] = {\"ticks\":[-2, -1, 0, 1, 2], \"boundaries\": np.linspace(-2, 2, 6)}\n para['yticklabels'] = True\n para['row_cluster'] = True\n para['xticklabels'] = False\n para['col_cluster'] = False\n para['method'] = args['linkage']\n para['metric'] = similaritysample\n para['figsize'] = args['clusterfigsize']\n para['col_colors'] = pd.DataFrame([{'index' : s, 'chromosomes' : col_colors[s]} for s in table.columns]).set_index('index')\n para['row_colors'] = pd.DataFrame([dict(list({'index' : row}.items()) + list(row_colors[row].items())) for row in table.index]).set_index('index')\n g = sns.clustermap(**para)\n\n addchr(g, pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n plt.close()\n\n\ndef subclonal(tumor, base, clones, props, args, out):\n assert base in {2, 4}\n abase = 1 if base == 2 else 2\n proj = join(tumor, clones, args['clusterres'])\n pos = [(c, s) for c in sorted(proj, key=sortchr) for s in sorted(proj[c], key=(lambda x : x[0]))]\n pal_clone = sns.color_palette(\"YlGn\", 10)\n palette = cycle(['#525252', '#969696', '#cccccc'])\n chr_colors = {c : next(palette) for c in sorted(tumor, key=sortchr)}\n col_colors = {}\n row_colors = {}\n\n data = []\n for p in props:\n for x, s in enumerate(pos):\n cns = set(proj[s[0]][s[1]][i] for i in proj[s[0]][s[1]] if i != 'normal')\n merge = {cn : sum(props[p][i] for i in proj[s[0]][s[1]] if proj[s[0]][s[1]][i] == cn and i != 'normal') for cn in cns}\n cns = set(cn for cn in cns if merge[cn] >= args['minu'])\n if cns == {(abase, abase)}:\n value = 0\n elif False not in set(n[0] >= abase and n[1] >= abase for n in cns):\n if len(cns) == 1:\n value = 2\n else:\n value = 1\n elif False not in set(n[0] <= abase and n[1] <= abase for n in cns):\n if len(cns) == 1:\n value = -2\n else:\n value = -1\n else:\n if len(cns) == 1:\n value = 4\n else:\n value = 3\n data.append({'Sample' : p, 'Genome' : x, 'value' : value})\n col_colors[x] = chr_colors[s[0]]\n row_colors[p] = {i : pal_clone[min(9, int(round(props[p][i] * 10)))] for i in clones}\n\n df = pd.DataFrame(data)\n table = pd.pivot_table(df, values='value', columns=['Genome'], index=['Sample'], aggfunc='first')\n myColors = ('#92c5de', '#0571b0', '#f7f7f7', '#ca0020', '#f4a582', '#7b3294', '#c2a5cf')\n cmap = LinearSegmentedColormap.from_list('Custom', myColors, len(myColors))\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n para = {}\n para['data'] = table\n para['cmap'] = cmap\n labels = ['Clonal deletion', 'Subclonal deletion', 'Neutral', 'Subclonal amplification', 'Clonal amplification', 'Sublonal mix', 'Clonal mix']\n para['cbar_kws'] = {\"ticks\":[-2, -1, 0, 1, 2, 3, 4], \"boundaries\": np.linspace(-2, 4, 8)}\n para['yticklabels'] = True\n para['row_cluster'] = True\n para['xticklabels'] = False\n para['col_cluster'] = False\n para['method'] = args['linkage']\n para['metric'] = similarity\n para['figsize'] = args['clusterfigsize']\n para['col_colors'] = pd.DataFrame([{'index' : s, 'chromosomes' : col_colors[s]} for s in table.columns]).set_index('index')\n para['row_colors'] = pd.DataFrame([dict(list({'index' : row}.items()) + list(row_colors[row].items())) for row in table.index]).set_index('index')\n g = sns.clustermap(**para)\n cax = plt.gcf().axes[-1]\n cax.set_yticklabels(labels)\n\n addchr(g, pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n plt.close()\n\n\ndef reduction(proj, base):\n classify = (lambda c, i : 0 if c == base or i == 'normal' else 1 if c > base else -1)\n reduction = {c : {s : {i : classify(sum(proj[c][s][i]), i) for i in proj[c][s]} for s in proj[c]} for c in proj}\n assert False not in set(reduction[c][s]['normal'] == 0 for c in proj for s in proj[c])\n return reduction\n\n\ndef join(tumor, clones, resolution):\n proj = {}\n for c in tumor:\n bins = sorted(list(tumor[c]), key=(lambda x : x[0]))\n proj[c] = {}\n while bins:\n tmp = bins[:resolution]\n counts = {i : dict(Counter([tumor[c][s][i] for s in tmp])) for i in clones}\n proj[c][tmp[0][0], tmp[-1][1]] = {i : max(counts[i], key=(lambda x : counts[i][x])) for i in clones}\n bins = bins[resolution:]\n return proj\n\n\ndef multiple(tumor, clones, props, base, args):\n sys.stderr.write(log(\"# Uniforming bin segmention across all patients\\n\"))\n tumor = segmenting(tumor, clones, props)\n\n out = 'intertumors-profilesfull.pdf'\n sys.stderr.write(log(\"# Plotting inter-tumors clone profiles in {}\\n\".format(os.path.join(args['rundir'], out))))\n intergridfullprofiles(tumor, base, clones, props, args, out)\n\n out = 'intertumors-profilesreduced.pdf'\n sys.stderr.write(log(\"# Plotting inter-tumors reduced-clone profiles in {}\\n\".format(os.path.join(args['rundir'], out))))\n intergridreducedprofiles(tumor, base, clones, props, args, out)\n\n out = 'intertumors-mixtures.pdf'\n sys.stderr.write(log(\"# Plotting inter-tumors mixtures in {}\\n\".format(os.path.join(args['rundir'], out))))\n intergridsamplesclusters(tumor, base, clones, props, args, out)\n\n out = 'intertumors-subclonality.pdf'\n sys.stderr.write(log(\"# Plotting inter-tumors subclonality in {}\\n\".format(os.path.join(args['rundir'], out))))\n intergridsubclonality(tumor, base, clones, props, args, out)\n\n\ndef intergridfullprofiles(tumor, base, clones, props, args, out):\n proj = interjoin(tumor, clones, args['clusterres'])\n t = list(proj)[0]\n pos = [(c, s) for c in sorted(proj[t], key=sortchr) for s in sorted(proj[t][c], key=(lambda x : x[0]))]\n palette = cycle(sns.color_palette(\"Pastel1\", min(10, len(list(tumor)))))\n pat_colors = {pat : next(palette) for pat in tumor}\n palette = cycle(['#525252', '#969696', '#cccccc'])\n chr_colors = {c : next(palette) for c in sorted(tumor[list(tumor)[0]], key=sortchr)}\n col_colors = {}\n row_colors = {}\n\n data = []\n for pat1 in proj:\n for c1 in [t for t in clones[pat1] if t != 'normal']:\n for x, s in enumerate(pos):\n data.append({'Patient clone' : '{}:{}'.format(pat1, c1), 'Genome' : x, 'value' : sum(proj[pat1][s[0]][s[1]][c1])})\n col_colors[x] = chr_colors[s[0]]\n row_colors['{}:{}'.format(pat1, c1)] = pat_colors[pat1]\n\n df = pd.DataFrame(data)\n table = pd.pivot_table(df, values='value', columns=['Genome'], index=['Patient clone'], aggfunc='first')\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n para = {}\n para['data'] = table\n para['cmap'] = \"coolwarm\"\n para['center'] = 2\n para['xticklabels'] = True\n para['yticklabels'] = True\n para['xticklabels'] = False\n para['row_cluster'] = False\n para['col_cluster'] = False\n para['cbar_kws'] = {'label' : \"Total copy number\"}\n para['figsize'] = args['clusterfigsize']\n para['method'] = args['linkage']\n para['metric'] = cndistance\n para['col_colors'] = pd.DataFrame([{'index' : s, 'chromosomes' : col_colors[s]} for s in table.columns]).set_index('index')\n para['row_colors'] = pd.DataFrame([{'index' : row, 'patient' : row_colors[row]} for row in table.index]).set_index('index')\n g = sns.clustermap(**para)\n\n addchr(g, pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n plt.close()\n\n\ndef intergridreducedprofiles(tumor, base, clones, props, args, out):\n proj = interjoin(tumor, clones, args['clusterres'])\n red = interreduction(proj, base)\n t = list(red)[0]\n pos = [(c, s) for c in sorted(red[t], key=sortchr) for s in sorted(red[t][c], key=(lambda x : x[0]))]\n palette = cycle(sns.color_palette(\"Pastel1\", min(9, len(list(tumor)))))\n pat_colors = {pat : next(palette) for pat in tumor}\n palette = cycle(['#525252', '#969696', '#cccccc'])\n chr_colors = {c : next(palette) for c in sorted(tumor[list(tumor)[0]], key=sortchr)}\n col_colors = {}\n row_colors = {}\n\n data = []\n for pat1 in red:\n for c1 in [t for t in clones[pat1] if t != 'normal']:\n for x, s in enumerate(pos):\n data.append({'Patient clone' : '{}:{}'.format(pat1, c1), 'Genome' : x, 'value' : red[pat1][s[0]][s[1]][c1]})\n col_colors[x] = chr_colors[s[0]]\n row_colors['{}:{}'.format(pat1, c1)] = pat_colors[pat1]\n\n df = pd.DataFrame(data)\n table = pd.pivot_table(df, values='value', columns=['Genome'], index=['Patient clone'], aggfunc='first')\n mapchr = cycle(['#525252', '#969696', '#cccccc'])\n palchr = {c : next(mapchr) for c in sorted(tumor[list(tumor)[0]], key=sortchr)}\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n para = {}\n para['data'] = table\n para['cmap'] = \"coolwarm\"\n para['xticklabels'] = True\n para['yticklabels'] = True\n para['xticklabels'] = False\n para['col_cluster'] = False\n para['figsize'] = args['clusterfigsize']\n para['cbar_kws'] = {\"ticks\":[-1, 0, 1], \"boundaries\": np.linspace(-1, 1, 4)}\n para['method'] = args['linkage']\n para['metric'] = similarity\n para['col_colors'] = pd.DataFrame([{'index' : s, 'chromosomes' : col_colors[s]} for s in table.columns]).set_index('index')\n para['row_colors'] = pd.DataFrame([{'index' : row, 'patient' : row_colors[row]} for row in table.index]).set_index('index')\n g = sns.clustermap(**para)\n\n addchr(g, pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n plt.close()\n\n\ndef intergridsamplesclusters(tumor, base, clones, props, args, out):\n data = []\n proj = interjoin(tumor, clones, args['clusterres'])\n red = interreduction(proj, base)\n t = list(red)[0]\n pos = [(c, s) for c in sorted(red[t], key=sortchr) for s in sorted(red[t][c], key=(lambda x : x[0]))]\n palette = cycle(sns.color_palette(\"Pastel1\", min(9, len(list(tumor)))))\n pat_colors = {pat : next(palette) for pat in tumor}\n palette = cycle(['#525252', '#969696', '#cccccc'])\n chr_colors = {c : next(palette) for c in sorted(tumor[list(tumor)[0]], key=sortchr)}\n col_colors = {}\n row_colors = {}\n\n for pat in tumor:\n for p in props[pat]:\n sumu = (lambda cns, cn : sum(float(props[pat][p][i]) if i in props[pat][p] else 0.0 for i in cns if cns[i] == cn))\n mergep = {c : {s : {cn : sumu(red[pat][c][s], cn) for cn in set(red[pat][c][s].values())} for s in red[pat][c]} for c in red[pat]}\n assert False not in set(0.99 <= sum(mergep[c][s][cn] for cn in mergep[c][s]) <= 1.01 for c in mergep for s in mergep[c])\n\n for x, s in enumerate(pos):\n value = sum(mergep[s[0]][s[1]][cn] * cn for cn in mergep[s[0]][s[1]])\n data.append({'Patient sample' : '{}-{}'.format(pat, p), 'Genome' : x, 'value' : value})\n col_colors[x] = chr_colors[s[0]]\n row_colors['{}-{}'.format(pat, p)] = pat_colors[pat]\n\n df = pd.DataFrame(data)\n table = pd.pivot_table(df, values='value', columns=['Genome'], index=['Patient sample'], aggfunc='first')\n mapchr = cycle(['#525252', '#969696', '#cccccc'])\n palchr = {c : next(mapchr) for c in sorted(tumor[list(tumor)[0]], key=sortchr)}\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n para = {}\n para['data'] = table\n para['cmap'] = \"coolwarm\"\n para['xticklabels'] = True\n para['yticklabels'] = True\n para['xticklabels'] = False\n para['col_cluster'] = False\n para['figsize'] = args['clusterfigsize']\n para['method'] = args['linkage']\n para['metric'] = similaritysample\n para['col_colors'] = pd.DataFrame([{'index' : s, 'chromosomes' : col_colors[s]} for s in table.columns]).set_index('index')\n para['row_colors'] = pd.DataFrame([{'index' : row, 'patient' : row_colors[row]} for row in table.index]).set_index('index')\n g = sns.clustermap(**para)\n\n addchr(g, pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n plt.close()\n\n\ndef intergridsubclonality(tumor, base, clones, props, args, out):\n data = []\n proj = interjoin(tumor, clones, args['clusterres'])\n t = list(proj)[0]\n pos = [(c, s) for c in sorted(proj[t], key=sortchr) for s in sorted(proj[t][c], key=(lambda x : x[0]))]\n palette = cycle(sns.color_palette(\"Pastel1\", min(9, len(list(tumor)))))\n pat_colors = {pat : next(palette) for pat in tumor}\n palette = cycle(['#525252', '#969696', '#cccccc'])\n chr_colors = {c : next(palette) for c in sorted(tumor[list(tumor)[0]], key=sortchr)}\n col_colors = {}\n row_colors = {}\n\n for pat in tumor:\n assert base[pat] == 2 or base[pat] == 4\n abase = 1 if base[pat] == 2 else 2\n for p in props[pat]:\n for x, s in enumerate(pos):\n cns = set(proj[pat][s[0]][s[1]][i] for i in proj[pat][s[0]][s[1]] if i != 'normal')\n merge = {cn : sum(props[pat][p][i] for i in proj[pat][s[0]][s[1]] if proj[pat][s[0]][s[1]][i] == cn and i != 'normal') for cn in cns}\n cns = set(cn for cn in cns if merge[cn] >= args['minu'])\n if cns == {(abase, abase)}:\n value = 0\n elif False not in set(n[0] >= abase and n[1] >= abase for n in cns):\n if len(cns) == 1:\n value = 2\n else:\n value = 1\n elif False not in set(n[0] <= abase and n[1] <= abase for n in cns):\n if len(cns) == 1:\n value = -2\n else:\n value = -1\n else:\n if len(cns) == 1:\n value = 4\n else:\n value = 3\n data.append({'Patient sample' : '{}-{}'.format(pat, p), 'Genome' : x, 'value' : value})\n col_colors[x] = chr_colors[s[0]]\n row_colors['{}-{}'.format(pat, p)] = pat_colors[pat]\n\n df = pd.DataFrame(data)\n table = pd.pivot_table(df, values='value', columns=['Genome'], index=['Patient sample'], aggfunc='first')\n myColors = ('#92c5de', '#0571b0', '#f7f7f7', '#ca0020', '#f4a582', '#7b3294', '#c2a5cf')\n cmap = LinearSegmentedColormap.from_list('Custom', myColors, len(myColors))\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n para = {}\n para['data'] = table\n para['cmap'] = cmap\n labels = ['Clonal deletion', 'Subclonal deletion', 'Neutral', 'Subclonal amplification', 'Clonal amplification', 'Sublonal mix', 'Clonal mix']\n para['cbar_kws'] = {\"ticks\":[-2, -1, 0, 1, 2, 3, 4], \"boundaries\": np.linspace(-2, 4, 8)}\n para['xticklabels'] = True\n para['yticklabels'] = True\n para['xticklabels'] = False\n para['col_cluster'] = False\n para['figsize'] = args['clusterfigsize']\n para['method'] = args['linkage']\n para['metric'] = similaritysample\n para['col_colors'] = pd.DataFrame([{'index' : s, 'chromosomes' : col_colors[s]} for s in table.columns]).set_index('index')\n para['row_colors'] = pd.DataFrame([{'index' : row, 'patient' : row_colors[row]} for row in table.index]).set_index('index')\n g = sns.clustermap(**para)\n cax = plt.gcf().axes[-1]\n cax.set_yticklabels(labels)\n\n addchr(g, pos)\n plt.savefig(os.path.join(args['rundir'], out), bbox_inches='tight')\n plt.close()\n\n\ndef segmenting(tumor, clones, props):\n numpat = len(tumor)\n joint = {}\n cbk = (lambda c : set(b for pat in tumor for s in tumor[pat][c] for b in s))\n bk = {c : cbk(c) for c in set(c for pat in tumor for c in tumor[pat])}\n bk = {c : sorted(bk[c]) for c in bk}\n counts = {c : {b : 0 for b in zip(bk[c][:-1], bk[c][1:])} for c in bk}\n maps = {pat : {c : {b : None for b in counts[c]} for c in counts} for pat in tumor}\n\n def select(d, m, counts, breakpoints, numpat):\n for c in d:\n bk = deque(breakpoints[c])\n left = -1\n right = bk.popleft()\n for (l, r) in sorted(d[c], key=(lambda x : x[0])):\n while right != r:\n left = right\n right = bk.popleft()\n if l <= left and right <= r:\n counts[c][left, right] += 1\n assert counts[c][left, right] <= numpat\n m[c][left, right] = (l, r)\n\n for pat in tumor:\n select(tumor[pat], maps[pat], counts, bk, numpat)\n\n taken = {c : set(b for b in counts[c] if counts[c][b] == numpat) for c in counts}\n discarded = {c : set(b for b in counts[c] if counts[c][b] < numpat) for c in counts}\n tottaken = sum(1.0 for c in counts for b in counts[c] if counts[c][b] == numpat)\n tot = sum(1.0 for c in counts for b in counts[c])\n sys.stderr.write(info(\"## Proportion of common bins kept: {}%\\n\".format(tottaken / tot * 100)))\n\n return {pat : {c : {b : tumor[pat][c][maps[pat][c][b]] for b in taken[c]} for c in taken} for pat in tumor}\n\n\ndef interreduction(proj, base):\n classify = (lambda c, pat, i : 0 if c == base[pat] or i == 'normal' else 1 if c > base[pat] else -1)\n reduction = {pat : {c : {s : {i : classify(sum(proj[pat][c][s][i]), pat, i) for i in proj[pat][c][s]} for s in proj[pat][c]} for c in proj[pat]} for pat in proj}\n assert False not in set(reduction[pat][c][s]['normal'] == 0 for pat in proj for c in proj[pat] for s in proj[pat][c])\n return reduction\n\n\ndef interjoin(tumor, clones, resolution):\n proj = {}\n for pat in tumor:\n proj[pat] = {}\n for c in tumor[pat]:\n bins = sorted(list(tumor[pat][c]), key=(lambda x : x[0]))\n proj[pat][c] = {}\n while bins:\n tmp = bins[:resolution]\n counts = {i : dict(Counter([tumor[pat][c][s][i] for s in tmp])) for i in clones[pat]}\n proj[pat][c][tmp[0][0], tmp[-1][1]] = {i : max(counts[i], key=(lambda x : counts[i][x])) for i in clones[pat]}\n bins = bins[resolution:]\n return proj\n\n\ndef cndistance(u, v):\n diff = list(u - v)\n amps = [abs(x) if x > 0 else 0 for x in diff]\n dels = [abs(x) if x < 0 else 0 for x in diff]\n dist = sum(max(amps[i] - amps[i - 1], 0) for i, x in enumerate(amps))\n dist += sum(max(dels[i] - dels[i - 1], 0) for i, x in enumerate(dels))\n return dist\n\n\ndef similarity(u, v):\n return float(sum(u[i] == v[i] and u[i] != 0.0 and v[i] != 0 for i in range(len(u)))) / float(sum(u[i] != 0 or v[i] != 0 for i in range(len(u))))\n\ndef similaritysample(u, v):\n bothamp = (lambda x, y : x > 0.0 and y > 0.0)\n bothdel = (lambda x, y : x < 0.0 and y < 0.0)\n return float(sum((bothamp(u[i], v[i]) or bothdel(u[i], v[i])) and u[i] != 0 and v[i] != 0 for i in range(len(u)))) / float(sum(u[i] != 0 or v[i] != 0 for i in range(len(u))))\n\ndef readUCN(inputs, patnames):\n tumors = {}\n samples = {}\n clones = {}\n props = {}\n for fil in inputs:\n sys.stderr.write(info(\"## Reading {} as {}...\\n\".format(fil, patnames[fil])))\n with open(fil, 'r') as f:\n patient = patnames[fil]\n tumors[patient] = {}\n samples[patient] = set()\n clones[patient] = []\n props[patient] = {}\n for line in f:\n if len(clones[patient]) == 0:\n assert line[0] == '#'\n clones[patient] = [f.split('_')[1] for i,f in enumerate(line.strip().split()[11:]) if i%2==0]\n if 'normal' not in clones[patient]:\n raise ValueError(error('normal is not present as a clone in {}'.format(patient)))\n else:\n if len(line) > 1 and line[0] != '#':\n parsed = line.strip().split()\n sample = parsed[3]\n samples[patient].add(sample)\n chro = parsed[0]\n if chro not in tumors[patient]:\n tumors[patient][chro] = {}\n start = int(parsed[1])\n end = int(parsed[2])\n pair = (lambda l : [(tuple(map(int, e.split('|'))), float(l[i+1])) for i, e in enumerate(l) if i%2==0])\n read = {clones[patient][i] : e[0] for i, e in enumerate(pair(parsed[11:]))}\n if (start, end) not in tumors[patient][chro]:\n tumors[patient][chro][start, end] = read\n else:\n for i in read:\n read[i] == tumors[patient][chro][start, end][i]\n check = {clones[patient][i] : e[1] for i, e in enumerate(pair(parsed[11:]))}\n if sample in props[patient]:\n for i in clones[patient]:\n assert check[i] == props[patient][sample][i]\n else:\n props[patient][sample] = {i : check[i] for i in clones[patient]}\n assert 0.999 <= sum(check[i] for i in clones[patient]) <= 1.001\n\n return tumors, clones, props\n\n\ndef addchrplt(pos):\n corners = []\n prev = 0\n val = pos[0][0]\n for x, s in enumerate(pos):\n if x != 0 and pos[x-1][0] != pos[x][0]:\n corners.append((prev, x, val))\n prev = x\n val = s[0]\n corners.append((prev, x, val))\n ticks = [(int(float(o[1] + o[0] + 1) / 2.0), o[2]) for o in corners]\n plt.xticks([x[0] for x in ticks], [x[1] for x in ticks], rotation=45, ha='center')\n plt.yticks(rotation=0)\n\n\ndef addchr(g, pos, color=None):\n corners = []\n prev = 0\n for x, s in enumerate(pos):\n if x != 0 and pos[x-1][0] != pos[x][0]:\n corners.append((prev, x))\n prev = x\n corners.append((prev, x))\n ax = g.ax_heatmap\n ticks = []\n for o in corners:\n ax.set_xticks(np.append(ax.get_xticks(), int(float(o[1] + o[0] + 1) / 2.0)))\n ticks.append(pos[o[0]][0])\n ax.set_xticklabels(ticks, rotation=45, ha='center')\n ax.set_yticklabels(ax.get_yticklabels(), rotation=0)\n\n\ndef sortchr(x):\n return int(''.join([d for d in x if d.isdigit()]))\n\ndef argmax(d):\n return max(d, key=(lambda x : d[x]))\n\ndef argmin(d):\n return min(d, key=(lambda x : d[x]))\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\ndef error(msg):\n return \"{}{}{}\".format(\"\\033[91m\\033[1m\", msg, \"\\033[0m\")\n\ndef warning(msg):\n return \"{}{}{}\".format(\"\\033[93m\\033[1m\", msg, \"\\033[0m\")\n\ndef log(msg):\n return \"{}{}{}\".format(\"\\033[95m\\033[1m\", msg, \"\\033[0m\")\n\ndef info(msg):\n return \"{}{}{}\".format(\"\\033[96m\", msg, \"\\033[0m\")\n\ndef debug(msg):\n return \"{}{}{}\".format(\"\\033[92m\", msg, \"\\033[0m\")\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.yticks", "numpy.linspace", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylim", "matplotlib.use", "pandas.DataFrame", "matplotlib.pyplot.gcf", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.close", "pandas.pivot_table", "matplotlib.pyplot.style.use", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
asbe/PoseCNN
[ "0dc7f4f1d63908a43d5afc1ac4cf327ae88c658c" ]
[ "lib/datasets/ycb.py" ]
[ "__author__ = 'yuxiang'\n\nimport os\nimport datasets\nimport datasets.ycb\nimport datasets.imdb\nimport pickle\nimport numpy as np\nimport cv2\nfrom fcn.config import cfg\nfrom utils.pose_error import *\nfrom transforms3d.quaternions import quat2mat, mat2quat\n\nclass ycb(datasets.imdb):\n def __init__(self, image_set, ycb_path = None):\n datasets.imdb.__init__(self, 'ycb_' + image_set)\n self._image_set = image_set\n self._ycb_path = self._get_default_path() if ycb_path is None \\\n else ycb_path\n self._data_path = os.path.join(self._ycb_path, 'data')\n\n self._classes = ('__background__', '002_master_chef_can', '003_cracker_box', '004_sugar_box', '005_tomato_soup_can', '006_mustard_bottle', \\\n '007_tuna_fish_can', '008_pudding_box', '009_gelatin_box', '010_potted_meat_can', '011_banana', '019_pitcher_base', \\\n '021_bleach_cleanser', '024_bowl', '025_mug', '035_power_drill', '036_wood_block', '037_scissors', '040_large_marker', \\\n '051_large_clamp', '052_extra_large_clamp', '061_foam_brick')\n\n self._class_colors = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), \\\n (128, 0, 0), (0, 128, 0), (0, 0, 128), (128, 128, 0), (128, 0, 128), (0, 128, 128), \\\n (64, 0, 0), (0, 64, 0), (0, 0, 64), (64, 64, 0), (64, 0, 64), (0, 64, 64), \n (192, 0, 0), (0, 192, 0), (0, 0, 192)]\n\n self._class_weights = [1, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]\n self._symmetry = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1])\n self._points, self._points_all = self._load_object_points()\n self._extents = self._load_object_extents()\n\n self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))\n self._image_ext = '.png'\n self._image_index = self._load_image_set_index()\n self._roidb_handler = self.gt_roidb\n\n assert os.path.exists(self._ycb_path), \\\n 'ycb path does not exist: {}'.format(self._ycb_path)\n assert os.path.exists(self._data_path), \\\n 'Data path does not exist: {}'.format(self._data_path)\n\n # image\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self.image_index[i])\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n\n image_path = os.path.join(self._data_path, index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n # depth\n def depth_path_at(self, i):\n \"\"\"\n Return the absolute path to depth i in the image sequence.\n \"\"\"\n return self.depth_path_from_index(self.image_index[i])\n\n def depth_path_from_index(self, index):\n \"\"\"\n Construct an depth path from the image's \"index\" identifier.\n \"\"\"\n depth_path = os.path.join(self._data_path, index + '.depth' + self._image_ext)\n assert os.path.exists(depth_path), \\\n 'Path does not exist: {}'.format(depth_path)\n return depth_path\n\n # label\n def label_path_at(self, i):\n \"\"\"\n Return the absolute path to metadata i in the image sequence.\n \"\"\"\n return self.label_path_from_index(self.image_index[i])\n\n def label_path_from_index(self, index):\n \"\"\"\n Construct an metadata path from the image's \"index\" identifier.\n \"\"\"\n label_path = os.path.join(self._data_path, index + '-label' + self._image_ext)\n assert os.path.exists(label_path), \\\n 'Path does not exist: {}'.format(label_path)\n return label_path\n\n # camera pose\n def metadata_path_at(self, i):\n \"\"\"\n Return the absolute path to metadata i in the image sequence.\n \"\"\"\n return self.metadata_path_from_index(self.image_index[i])\n\n def metadata_path_from_index(self, index):\n \"\"\"\n Construct an metadata path from the image's \"index\" identifier.\n \"\"\"\n metadata_path = os.path.join(self._data_path, index + '-meta.mat')\n assert os.path.exists(metadata_path), \\\n 'Path does not exist: {}'.format(metadata_path)\n return metadata_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n image_set_file = os.path.join(self._ycb_path, self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n\n with open(image_set_file) as f:\n image_index = [x.rstrip('\\n') for x in f.readlines()]\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where KITTI is expected to be installed.\n \"\"\"\n return os.path.join(datasets.ROOT_DIR, 'data', 'YCB')\n\n\n def _load_object_points(self):\n\n points = [[] for _ in range(len(self._classes))]\n num = np.inf\n\n for i in range(1, len(self._classes)):\n point_file = os.path.join(self._ycb_path, 'models', self._classes[i], 'points.xyz')\n print(point_file)\n assert os.path.exists(point_file), 'Path does not exist: {}'.format(point_file)\n points[i] = np.loadtxt(point_file)\n if points[i].shape[0] < num:\n num = points[i].shape[0]\n\n points_all = np.zeros((self.num_classes, num, 3), dtype=np.float32)\n for i in range(1, len(self._classes)):\n points_all[i, :, :] = points[i][:num, :]\n\n return points, points_all\n\n\n def _load_object_extents(self):\n\n extent_file = os.path.join(self._ycb_path, 'extents.txt')\n assert os.path.exists(extent_file), \\\n 'Path does not exist: {}'.format(extent_file)\n\n extents = np.zeros((self.num_classes, 3), dtype=np.float32)\n extents[1:, :] = np.loadtxt(extent_file)\n\n return extents\n\n\n def compute_class_weights(self):\n\n print('computing class weights')\n num_classes = self.num_classes\n count = np.zeros((num_classes,), dtype=np.int64)\n k = 0\n while k < len(self.image_index):\n index = self.image_index[k]\n # label path\n label_path = self.label_path_from_index(index)\n im = cv2.imread(label_path, cv2.IMREAD_UNCHANGED)\n for i in range(num_classes):\n I = np.where(im == i)\n count[i] += len(I[0])\n k += 100\n\n count[0] = 0\n max_count = np.amax(count)\n\n for i in range(num_classes):\n if i == 0:\n self._class_weights[i] = 1\n else:\n self._class_weights[i] = min(2 * float(max_count) / float(count[i]), 10.0)\n print(self._classes[i], self._class_weights[i])\n\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = pickle.load(fid)\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n print('class weights: ', roidb[0]['class_weights'])\n return roidb\n\n # self.compute_class_weights()\n\n gt_roidb = [self._load_ycb_annotation(index)\n for index in self.image_index]\n\n with open(cache_file, 'wb') as fid:\n pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n\n def _load_ycb_annotation(self, index):\n \"\"\"\n Load class name and meta data\n \"\"\"\n # image path\n image_path = self.image_path_from_index(index)\n\n # depth path\n depth_path = self.depth_path_from_index(index)\n\n # label path\n label_path = self.label_path_from_index(index)\n\n # metadata path\n metadata_path = self.metadata_path_from_index(index)\n \n return {'image': image_path,\n 'depth': depth_path,\n 'label': label_path,\n 'meta_data': metadata_path,\n 'class_colors': self._class_colors,\n 'class_weights': self._class_weights,\n 'cls_index': -1,\n 'flipped': False}\n\n def _process_label_image(self, label_image):\n \"\"\"\n change label image to label index\n \"\"\"\n class_colors = self._class_colors\n width = label_image.shape[1]\n height = label_image.shape[0]\n label_index = np.zeros((height, width), dtype=np.float32)\n\n # label image is in BGR order\n index = label_image[:,:,2] + 256*label_image[:,:,1] + 256*256*label_image[:,:,0]\n for i in range(len(class_colors)):\n color = class_colors[i]\n ind = color[0] + 256*color[1] + 256*256*color[2]\n I = np.where(index == ind)\n label_index[I] = i\n\n return label_index\n\n\n def labels_to_image(self, im, labels):\n class_colors = self._class_colors\n height = labels.shape[0]\n width = labels.shape[1]\n image_r = np.zeros((height, width), dtype=np.float32)\n image_g = np.zeros((height, width), dtype=np.float32)\n image_b = np.zeros((height, width), dtype=np.float32)\n\n for i in range(len(class_colors)):\n color = class_colors[i]\n I = np.where(labels == i)\n image_r[I] = color[0]\n image_g[I] = color[1]\n image_b[I] = color[2]\n\n image = np.stack((image_r, image_g, image_b), axis=-1)\n\n return image.astype(np.uint8)\n\n\n def evaluate_result(self, im_ind, segmentation, gt_labels, meta_data, output_dir):\n\n # make matlab result dir\n import scipy.io\n mat_dir = os.path.join(output_dir, 'mat')\n if not os.path.exists(mat_dir):\n os.makedirs(mat_dir)\n\n # evaluate segmentation\n n_cl = self.num_classes\n hist = np.zeros((n_cl, n_cl))\n\n gt_labels = gt_labels.astype(np.float32)\n sg_labels = segmentation['labels']\n hist += self.fast_hist(gt_labels.flatten(), sg_labels.flatten(), n_cl)\n\n # per-class IU\n print('per-class segmentation IoU')\n intersection = np.diag(hist)\n union = hist.sum(1) + hist.sum(0) - np.diag(hist)\n index = np.where(union > 0)[0]\n for i in range(len(index)):\n ind = index[i]\n print('{} {}'.format(self._classes[ind], intersection[ind] / union[ind]))\n\n # evaluate pose\n if cfg.TEST.POSE_REG:\n rois = segmentation['rois']\n poses = segmentation['poses']\n poses_new = segmentation['poses_refined']\n poses_icp = segmentation['poses_icp']\n if cfg.TEST.VERTEX_REG_3D:\n rois_rgb = segmentation['rois_rgb']\n poses_rgb = segmentation['poses_rgb']\n\n # save matlab result\n if cfg.TEST.VERTEX_REG_2D:\n results = {'labels': sg_labels, 'rois': rois, 'poses': poses, 'poses_refined': poses_new, 'poses_icp': poses_icp}\n else:\n results = {'labels': sg_labels, 'rois_rgb': rois_rgb, 'poses_rgb': poses_rgb, 'rois': rois, 'poses': poses, 'poses_refined': poses_new, 'poses_icp': poses_icp}\n filename = os.path.join(mat_dir, '%04d.mat' % im_ind)\n print(filename)\n scipy.io.savemat(filename, results, do_compression=True)\n\n poses_gt = meta_data['poses']\n if len(poses_gt.shape) == 2:\n poses_gt = np.reshape(poses_gt, (3, 4, 1))\n num = poses_gt.shape[2]\n\n for j in range(num):\n if meta_data['cls_indexes'][j] <= 0:\n continue\n cls = self.classes[int(meta_data['cls_indexes'][j])]\n print(cls)\n print('gt pose')\n print(poses_gt[:, :, j])\n\n for k in range(rois.shape[0]):\n cls_index = int(rois[k, 1])\n if cls_index == meta_data['cls_indexes'][j]:\n\n print('estimated pose')\n RT = np.zeros((3, 4), dtype=np.float32)\n RT[:3, :3] = quat2mat(poses[k, :4])\n RT[:, 3] = poses[k, 4:7]\n print(RT)\n\n if cfg.TEST.POSE_REFINE:\n print('translation refined pose')\n RT_new = np.zeros((3, 4), dtype=np.float32)\n RT_new[:3, :3] = quat2mat(poses_new[k, :4])\n RT_new[:, 3] = poses_new[k, 4:7]\n print(RT_new)\n\n print('ICP refined pose')\n RT_icp = np.zeros((3, 4), dtype=np.float32)\n RT_icp[:3, :3] = quat2mat(poses_icp[k, :4])\n RT_icp[:, 3] = poses_icp[k, 4:7]\n print(RT_icp)\n\n error_rotation = re(RT[:3, :3], poses_gt[:3, :3, j])\n print('rotation error: {}'.format(error_rotation))\n\n error_translation = te(RT[:, 3], poses_gt[:, 3, j])\n print('translation error: {}'.format(error_translation))\n\n # compute pose error\n if cls == '024_bowl' or cls == '036_wood_block' or cls == '061_foam_brick':\n error = adi(RT[:3, :3], RT[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n else:\n error = add(RT[:3, :3], RT[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n print('error: {}'.format(error))\n\n if cfg.TEST.POSE_REFINE:\n error_rotation_new = re(RT_new[:3, :3], poses_gt[:3, :3, j])\n print('rotation error new: {}'.format(error_rotation_new))\n\n error_translation_new = te(RT_new[:, 3], poses_gt[:, 3, j])\n print('translation error new: {}'.format(error_translation_new))\n\n if cls == '024_bowl' or cls == '036_wood_block' or cls == '061_foam_brick':\n error_new = adi(RT_new[:3, :3], RT_new[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n else:\n error_new = add(RT_new[:3, :3], RT_new[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n print('error new: {}'.format(error_new))\n\n error_rotation_icp = re(RT_icp[:3, :3], poses_gt[:3, :3, j])\n print('rotation error icp: {}'.format(error_rotation_icp))\n\n error_translation_icp = te(RT_icp[:, 3], poses_gt[:, 3, j])\n print('translation error icp: {}'.format(error_translation_icp))\n\n if cls == '024_bowl' or cls == '036_wood_block' or cls == '061_foam_brick':\n error_icp = adi(RT_icp[:3, :3], RT_icp[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n else:\n error_icp = add(RT_icp[:3, :3], RT_icp[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n print('error icp: {}'.format(error_icp))\n\n print('threshold: {}'.format(0.1 * np.linalg.norm(self._extents[cls_index, :])))\n \n\n def evaluate_segmentations(self, segmentations, output_dir):\n print('evaluating segmentations')\n # compute histogram\n n_cl = self.num_classes\n hist = np.zeros((n_cl, n_cl))\n\n # make image dir\n image_dir = os.path.join(output_dir, 'images')\n if not os.path.exists(image_dir):\n os.makedirs(image_dir)\n\n # make matlab result dir\n import scipy.io\n mat_dir = os.path.join(output_dir, 'mat')\n if not os.path.exists(mat_dir):\n os.makedirs(mat_dir)\n\n count_all = np.zeros((self.num_classes,), dtype=np.float32)\n count_correct = np.zeros((self.num_classes,), dtype=np.float32)\n count_correct_refined = np.zeros((self.num_classes,), dtype=np.float32)\n count_correct_icp = np.zeros((self.num_classes,), dtype=np.float32)\n threshold = np.zeros((self.num_classes,), dtype=np.float32)\n for i in range(self.num_classes):\n threshold[i] = 0.1 * np.linalg.norm(self._extents[i, :])\n\n # for each image\n for im_ind, index in enumerate(self.image_index):\n # read ground truth labels\n im = cv2.imread(self.label_path_from_index(index), cv2.IMREAD_UNCHANGED)\n gt_labels = im.astype(np.float32)\n\n # predicated labels\n sg_labels = segmentations[im_ind]['labels']\n hist += self.fast_hist(gt_labels.flatten(), sg_labels.flatten(), n_cl)\n\n # evaluate pose\n if cfg.TEST.POSE_REG:\n # load meta data\n meta_data = scipy.io.loadmat(self.metadata_path_from_index(index))\n \n rois = segmentations[im_ind]['rois']\n poses = segmentations[im_ind]['poses']\n poses_new = segmentations[im_ind]['poses_refined']\n poses_icp = segmentations[im_ind]['poses_icp']\n\n '''\n # save matlab result\n results = {'labels': sg_labels, 'rois': rois, 'poses': poses, 'poses_refined': poses_new, 'poses_icp': poses_icp}\n filename = os.path.join(mat_dir, '%04d.mat' % im_ind)\n print filename\n scipy.io.savemat(filename, results, do_compression=True)\n '''\n\n poses_gt = meta_data['poses']\n if len(poses_gt.shape) == 2:\n poses_gt = np.reshape(poses_gt, (3, 4, 1))\n num = poses_gt.shape[2]\n\n for j in range(num):\n if meta_data['cls_indexes'][j] <= 0:\n continue\n cls = self.classes[int(meta_data['cls_indexes'][j])]\n count_all[int(meta_data['cls_indexes'][j])] += 1\n \n for k in range(rois.shape[0]):\n cls_index = int(rois[k, 1])\n if cls_index == meta_data['cls_indexes'][j]:\n\n RT = np.zeros((3, 4), dtype=np.float32)\n RT[:3, :3] = quat2mat(poses[k, :4])\n RT[:, 3] = poses[k, 4:7]\n\n if cfg.TEST.POSE_REFINE:\n RT_new = np.zeros((3, 4), dtype=np.float32)\n RT_new[:3, :3] = quat2mat(poses_new[k, :4])\n RT_new[:, 3] = poses_new[k, 4:7]\n\n RT_icp = np.zeros((3, 4), dtype=np.float32)\n RT_icp[:3, :3] = quat2mat(poses_icp[k, :4])\n RT_icp[:, 3] = poses_icp[k, 4:7]\n\n error_rotation = re(RT[:3, :3], poses_gt[:3, :3, j])\n error_translation = te(RT[:, 3], poses_gt[:, 3, j])\n if cls == '024_bowl' or cls == '036_wood_block' or cls == '061_foam_brick':\n error = adi(RT[:3, :3], RT[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n else:\n error = add(RT[:3, :3], RT[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n\n if error < threshold[cls_index]:\n count_correct[cls_index] += 1\n\n if cfg.TEST.POSE_REFINE:\n error_rotation_new = re(RT_new[:3, :3], poses_gt[:3, :3, j])\n error_translation_new = te(RT_new[:, 3], poses_gt[:, 3, j])\n if cls == '024_bowl' or cls == '036_wood_block' or cls == '061_foam_brick':\n error_new = adi(RT_new[:3, :3], RT_new[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n else:\n error_new = add(RT_new[:3, :3], RT_new[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n\n if error_new < threshold[cls_index]:\n count_correct_refined[cls_index] += 1\n\n error_rotation_icp = re(RT_icp[:3, :3], poses_gt[:3, :3, j])\n error_translation_icp = te(RT_icp[:, 3], poses_gt[:, 3, j])\n if cls == '024_bowl' or cls == '036_wood_block' or cls == '061_foam_brick':\n error_icp = adi(RT_icp[:3, :3], RT_icp[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n else:\n error_icp = add(RT_icp[:3, :3], RT_icp[:, 3], poses_gt[:3, :3, j], poses_gt[:, 3, j], self._points[cls_index])\n\n if error_icp < threshold[cls_index]:\n count_correct_icp[cls_index] += 1\n\n '''\n # label image\n rgba = cv2.imread(self.image_path_from_index(index), cv2.IMREAD_UNCHANGED)\n image = rgba[:,:,:3]\n alpha = rgba[:,:,3]\n I = np.where(alpha == 0)\n image[I[0], I[1], :] = 255\n label_image = self.labels_to_image(image, sg_labels)\n\n # save image\n filename = os.path.join(image_dir, '%04d.png' % im_ind)\n print filename\n cv2.imwrite(filename, label_image)\n '''\n\n # overall accuracy\n acc = np.diag(hist).sum() / hist.sum()\n print('overall accuracy', acc)\n # per-class accuracy\n acc = np.diag(hist) / hist.sum(1)\n print('mean accuracy', np.nanmean(acc))\n # per-class IU\n print('per-class IU')\n iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n for i in range(n_cl):\n print('{} {}'.format(self._classes[i], iu[i]))\n print('mean IU', np.nanmean(iu))\n freq = hist.sum(1) / hist.sum()\n print('fwavacc', (freq[freq > 0] * iu[freq > 0]).sum())\n\n filename = os.path.join(output_dir, 'segmentation.txt')\n with open(filename, 'wt') as f:\n for i in range(n_cl):\n f.write('{:f}\\n'.format(iu[i]))\n\n filename = os.path.join(output_dir, 'confusion_matrix.txt')\n with open(filename, 'wt') as f:\n for i in range(n_cl):\n for j in range(n_cl):\n f.write('{:f} '.format(hist[i, j]))\n f.write('\\n')\n\n # pose accuracy\n if cfg.TEST.POSE_REG:\n for i in range(1, self.num_classes):\n print('{} correct poses: {}, all poses: {}, accuracy: {}'.format(self.classes[i], count_correct[i], count_all[i], float(count_correct[i]) / float(count_all[i])))\n if cfg.TEST.POSE_REFINE:\n print('{} correct poses after refinement: {}, all poses: {}, accuracy: {}'.format( \\\n self.classes[i], count_correct_refined[i], count_all[i], float(count_correct_refined[i]) / float(count_all[i])))\n print('{} correct poses after ICP: {}, all poses: {}, accuracy: {}'.format( \\\n self.classes[i], count_correct_icp[i], count_all[i], float(count_correct_icp[i]) / float(count_all[i])))\n\n\nif __name__ == '__main__':\n d = datasets.ycb('trainval')\n res = d.roidb\n from IPython import embed; embed()\n" ]
[ [ "numpy.diag", "numpy.amax", "numpy.reshape", "numpy.linalg.norm", "numpy.stack", "numpy.nanmean", "numpy.array", "numpy.zeros", "numpy.where", "numpy.loadtxt" ] ]
RafalStaszak/TensorflowCourse
[ "af0d7f6d367d078dd8d36ec1e48d0a20f65a90ab" ]
[ "tensorflow/3_regression.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nx_data = np.random.rand(20).astype(np.float32)\ny_data = x_data*1+3+np.random.uniform(0, 0.2, size=[20])\n\nplt.plot(x_data, y_data, 'ro', label='Produced data')\nplt.legend()\nplt.show()\n\na=tf.Variable([0], dtype=tf.float32)\nb=tf.Variable([0], dtype=tf.float32)\n\nprediction = a*x_data+b\n\n\nloss = tf.reduce_mean(tf.square((prediction-y_data)))\noptimizer = tf.train.GradientDescentOptimizer(0.5)\n\n\ntrain = optimizer.minimize(loss)\ninit = tf.initialize_all_variables()\n\nsess = tf.Session()\nsess.run(init)\n\nfor step in range(201):\n sess.run(train)\n\nfinal_a = sess.run(a)\nfinal_b = sess.run(b)\npredicted_values = sess.run(prediction)\n\nplt.plot(x_data, predicted_values, label='Predicted linear function')\nplt.plot(x_data, y_data, 'ro', label='Original Data')\nplt.legend()\nplt.show()" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.Variable", "matplotlib.pyplot.plot", "tensorflow.initialize_all_variables", "tensorflow.train.GradientDescentOptimizer", "numpy.random.rand", "tensorflow.Session", "tensorflow.square", "numpy.random.uniform", "matplotlib.pyplot.show" ] ]
lpkirwin/pandas
[ "bb929a637ca9d4f24ea78ee4cca9ee17b65a5c1e" ]
[ "pandas/core/internals/blocks.py" ]
[ "from datetime import datetime, timedelta\nimport inspect\nimport re\nfrom typing import TYPE_CHECKING, Any, List, Optional, Type, Union, cast\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import NaT, algos as libalgos, internals as libinternals, lib, writers\nfrom pandas._libs.internals import BlockPlacement\nfrom pandas._libs.tslibs import conversion\nfrom pandas._libs.tslibs.timezones import tz_compare\nfrom pandas._typing import ArrayLike, Scalar, Shape\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.cast import (\n astype_nansafe,\n convert_scalar_for_putitemlike,\n find_common_type,\n infer_dtype_from,\n infer_dtype_from_scalar,\n maybe_box_datetimelike,\n maybe_downcast_numeric,\n maybe_downcast_to_dtype,\n maybe_infer_dtype_type,\n maybe_promote,\n maybe_upcast,\n soft_convert_objects,\n)\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n TD64NS_DTYPE,\n is_bool_dtype,\n is_categorical_dtype,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_list_like,\n is_object_dtype,\n is_period_dtype,\n is_re,\n is_re_compilable,\n is_sparse,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCIndexClass,\n ABCPandasArray,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, isna_compat\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.array_algos.replace import compare_or_regex_search, replace_regex\nfrom pandas.core.array_algos.transforms import shift\nfrom pandas.core.arrays import (\n Categorical,\n DatetimeArray,\n ExtensionArray,\n PandasArray,\n PandasDtype,\n TimedeltaArray,\n)\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexers import (\n check_setitem_lengths,\n is_empty_indexer,\n is_scalar_indexer,\n)\nimport pandas.core.missing as missing\nfrom pandas.core.nanops import nanpercentile\n\nif TYPE_CHECKING:\n from pandas import Index\n\n\nclass Block(PandasObject):\n \"\"\"\n Canonical n-dimensional unit of homogeneous dtype contained in a pandas\n data structure\n\n Index-ignorant; let the container take care of that\n \"\"\"\n\n values: Union[np.ndarray, ExtensionArray]\n\n __slots__ = [\"_mgr_locs\", \"values\", \"ndim\"]\n is_numeric = False\n is_float = False\n is_integer = False\n is_complex = False\n is_datetime = False\n is_datetimetz = False\n is_timedelta = False\n is_bool = False\n is_object = False\n is_extension = False\n _can_hold_na = False\n _can_consolidate = True\n _validate_ndim = True\n\n @classmethod\n def _simple_new(\n cls, values: ArrayLike, placement: BlockPlacement, ndim: int\n ) -> \"Block\":\n \"\"\"\n Fastpath constructor, does *no* validation\n \"\"\"\n obj = object.__new__(cls)\n obj.ndim = ndim\n obj.values = values\n obj._mgr_locs = placement\n return obj\n\n def __init__(self, values, placement, ndim=None):\n self.ndim = self._check_ndim(values, ndim)\n self.mgr_locs = placement\n self.values = self._maybe_coerce_values(values)\n\n if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values):\n raise ValueError(\n f\"Wrong number of items passed {len(self.values)}, \"\n f\"placement implies {len(self.mgr_locs)}\"\n )\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Ensure we have correctly-typed values.\n\n Parameters\n ----------\n values : np.ndarray, ExtensionArray, Index\n\n Returns\n -------\n np.ndarray or ExtensionArray\n \"\"\"\n return values\n\n def _check_ndim(self, values, ndim):\n \"\"\"\n ndim inference and validation.\n\n Infers ndim from 'values' if not provided to __init__.\n Validates that values.ndim and ndim are consistent if and only if\n the class variable '_validate_ndim' is True.\n\n Parameters\n ----------\n values : array-like\n ndim : int or None\n\n Returns\n -------\n ndim : int\n\n Raises\n ------\n ValueError : the number of dimensions do not match\n \"\"\"\n if ndim is None:\n ndim = values.ndim\n\n if self._validate_ndim and values.ndim != ndim:\n raise ValueError(\n \"Wrong number of dimensions. \"\n f\"values.ndim != ndim [{values.ndim} != {ndim}]\"\n )\n return ndim\n\n @property\n def _holder(self):\n \"\"\"\n The array-like that can hold the underlying values.\n\n None for 'Block', overridden by subclasses that don't\n use an ndarray.\n \"\"\"\n return None\n\n @property\n def _consolidate_key(self):\n return self._can_consolidate, self.dtype.name\n\n @property\n def is_view(self) -> bool:\n \"\"\" return a boolean if I am possibly a view \"\"\"\n values = self.values\n values = cast(np.ndarray, values)\n return values.base is not None\n\n @property\n def is_categorical(self) -> bool:\n return self._holder is Categorical\n\n @property\n def is_datelike(self) -> bool:\n \"\"\" return True if I am a non-datelike \"\"\"\n return self.is_datetime or self.is_timedelta\n\n def external_values(self):\n \"\"\"\n The array that Series.values returns (public attribute).\n\n This has some historical constraints, and is overridden in block\n subclasses to return the correct array (e.g. period returns\n object ndarray and datetimetz a datetime64[ns] ndarray instead of\n proper extension array).\n \"\"\"\n return self.values\n\n def internal_values(self):\n \"\"\"\n The array that Series._values returns (internal values).\n \"\"\"\n return self.values\n\n def array_values(self) -> ExtensionArray:\n \"\"\"\n The array that Series.array returns. Always an ExtensionArray.\n \"\"\"\n return PandasArray(self.values)\n\n def get_values(self, dtype=None):\n \"\"\"\n return an internal format, currently just the ndarray\n this is often overridden to handle to_dense like operations\n \"\"\"\n if is_object_dtype(dtype):\n return self.values.astype(object)\n return self.values\n\n def get_block_values_for_json(self) -> np.ndarray:\n \"\"\"\n This is used in the JSON C code.\n \"\"\"\n # TODO(EA2D): reshape will be unnecessary with 2D EAs\n return np.asarray(self.values).reshape(self.shape)\n\n @property\n def fill_value(self):\n return np.nan\n\n @property\n def mgr_locs(self):\n return self._mgr_locs\n\n @mgr_locs.setter\n def mgr_locs(self, new_mgr_locs):\n if not isinstance(new_mgr_locs, libinternals.BlockPlacement):\n new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs)\n\n self._mgr_locs = new_mgr_locs\n\n def make_block(self, values, placement=None) -> \"Block\":\n \"\"\"\n Create a new block, with type inference propagate any values that are\n not specified\n \"\"\"\n if placement is None:\n placement = self.mgr_locs\n if self.is_extension:\n values = _block_shape(values, ndim=self.ndim)\n\n return make_block(values, placement=placement, ndim=self.ndim)\n\n def make_block_same_class(self, values, placement=None, ndim=None):\n \"\"\" Wrap given values in a block of same type as self. \"\"\"\n if placement is None:\n placement = self.mgr_locs\n if ndim is None:\n ndim = self.ndim\n return type(self)(values, placement=placement, ndim=ndim)\n\n def __repr__(self) -> str:\n # don't want to print out all of the items here\n name = type(self).__name__\n if self.ndim == 1:\n result = f\"{name}: {len(self)} dtype: {self.dtype}\"\n else:\n\n shape = \" x \".join(str(s) for s in self.shape)\n result = f\"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}\"\n\n return result\n\n def __len__(self) -> int:\n return len(self.values)\n\n def __getstate__(self):\n return self.mgr_locs.indexer, self.values\n\n def __setstate__(self, state):\n self.mgr_locs = libinternals.BlockPlacement(state[0])\n self.values = state[1]\n self.ndim = self.values.ndim\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n\n return self.values[slicer]\n\n def getitem_block(self, slicer, new_mgr_locs=None):\n \"\"\"\n Perform __getitem__-like, return result as block.\n\n As of now, only supports slices that preserve dimensionality.\n \"\"\"\n if new_mgr_locs is None:\n axis0_slicer = slicer[0] if isinstance(slicer, tuple) else slicer\n new_mgr_locs = self.mgr_locs[axis0_slicer]\n elif not isinstance(new_mgr_locs, BlockPlacement):\n new_mgr_locs = BlockPlacement(new_mgr_locs)\n\n new_values = self._slice(slicer)\n\n if self._validate_ndim and new_values.ndim != self.ndim:\n raise ValueError(\"Only same dim slicing is allowed\")\n\n return type(self)._simple_new(new_values, new_mgr_locs, self.ndim)\n\n @property\n def shape(self):\n return self.values.shape\n\n @property\n def dtype(self):\n return self.values.dtype\n\n def iget(self, i):\n return self.values[i]\n\n def set_inplace(self, locs, values):\n \"\"\"\n Modify block values in-place with new item value.\n\n Notes\n -----\n `set` never creates a new array or new Block, whereas `setitem` _may_\n create a new array and always creates a new Block.\n \"\"\"\n self.values[locs] = values\n\n def delete(self, loc) -> None:\n \"\"\"\n Delete given loc(-s) from block in-place.\n \"\"\"\n self.values = np.delete(self.values, loc, 0)\n self.mgr_locs = self.mgr_locs.delete(loc)\n\n def apply(self, func, **kwargs) -> List[\"Block\"]:\n \"\"\"\n apply the function to my values; return a block if we are not\n one\n \"\"\"\n with np.errstate(all=\"ignore\"):\n result = func(self.values, **kwargs)\n\n return self._split_op_result(result)\n\n def reduce(self, func, ignore_failures: bool = False) -> List[\"Block\"]:\n # We will apply the function and reshape the result into a single-row\n # Block with the same mgr_locs; squeezing will be done at a higher level\n assert self.ndim == 2\n\n try:\n result = func(self.values)\n except (TypeError, NotImplementedError):\n if ignore_failures:\n return []\n raise\n\n if np.ndim(result) == 0:\n # TODO(EA2D): special case not needed with 2D EAs\n res_values = np.array([[result]])\n else:\n res_values = result.reshape(-1, 1)\n\n nb = self.make_block(res_values)\n return [nb]\n\n def _split_op_result(self, result) -> List[\"Block\"]:\n # See also: split_and_operate\n if is_extension_array_dtype(result) and result.ndim > 1:\n # TODO(EA2D): unnecessary with 2D EAs\n # if we get a 2D ExtensionArray, we need to split it into 1D pieces\n nbs = []\n for i, loc in enumerate(self.mgr_locs):\n vals = result[i]\n block = self.make_block(values=vals, placement=[loc])\n nbs.append(block)\n return nbs\n\n if not isinstance(result, Block):\n result = self.make_block(result)\n\n return [result]\n\n def fillna(\n self, value, limit=None, inplace: bool = False, downcast=None\n ) -> List[\"Block\"]:\n \"\"\"\n fillna on the block with the value. If we fail, then convert to\n ObjectBlock and try again\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n mask = isna(self.values)\n mask = _extract_bool_array(mask)\n if limit is not None:\n limit = libalgos.validate_limit(None, limit=limit)\n mask[mask.cumsum(self.ndim - 1) > limit] = False\n\n if not self._can_hold_na:\n if inplace:\n return [self]\n else:\n return [self.copy()]\n\n if self._can_hold_element(value):\n nb = self if inplace else self.copy()\n nb._putmask_simple(mask, value)\n # TODO: should be nb._maybe_downcast?\n return self._maybe_downcast([nb], downcast)\n\n # we can't process the value, but nothing to do\n if not mask.any():\n return [self] if inplace else [self.copy()]\n\n # operate column-by-column\n def f(mask, val, idx):\n block = self.coerce_to_target_dtype(value)\n\n # slice out our block\n if idx is not None:\n # i.e. self.ndim == 2\n block = block.getitem_block(slice(idx, idx + 1))\n return block.fillna(value, limit=limit, inplace=inplace, downcast=None)\n\n return self.split_and_operate(None, f, inplace)\n\n def _split(self) -> List[\"Block\"]:\n \"\"\"\n Split a block into a list of single-column blocks.\n \"\"\"\n assert self.ndim == 2\n\n new_blocks = []\n for i, ref_loc in enumerate(self.mgr_locs):\n vals = self.values[slice(i, i + 1)]\n\n nb = self.make_block(vals, [ref_loc])\n new_blocks.append(nb)\n return new_blocks\n\n def split_and_operate(\n self, mask, f, inplace: bool, ignore_failures: bool = False\n ) -> List[\"Block\"]:\n \"\"\"\n split the block per-column, and apply the callable f\n per-column, return a new block for each. Handle\n masking which will not change a block unless needed.\n\n Parameters\n ----------\n mask : 2-d boolean mask\n f : callable accepting (1d-mask, 1d values, indexer)\n inplace : bool\n ignore_failures : bool, default False\n\n Returns\n -------\n list of blocks\n \"\"\"\n if mask is None:\n mask = np.broadcast_to(True, shape=self.shape)\n\n new_values = self.values\n\n def make_a_block(nv, ref_loc):\n if isinstance(nv, list):\n assert len(nv) == 1, nv\n assert isinstance(nv[0], Block)\n block = nv[0]\n else:\n # Put back the dimension that was taken from it and make\n # a block out of the result.\n nv = _block_shape(nv, ndim=self.ndim)\n block = self.make_block(values=nv, placement=ref_loc)\n return block\n\n # ndim == 1\n if self.ndim == 1:\n if mask.any():\n nv = f(mask, new_values, None)\n else:\n nv = new_values if inplace else new_values.copy()\n block = make_a_block(nv, self.mgr_locs)\n return [block]\n\n # ndim > 1\n new_blocks = []\n for i, ref_loc in enumerate(self.mgr_locs):\n m = mask[i]\n v = new_values[i]\n\n # need a new block\n if m.any() or m.size == 0:\n # Apply our function; we may ignore_failures if this is a\n # reduction that is dropping nuisance columns GH#37827\n try:\n nv = f(m, v, i)\n except TypeError:\n if ignore_failures:\n continue\n else:\n raise\n else:\n nv = v if inplace else v.copy()\n\n block = make_a_block(nv, [ref_loc])\n new_blocks.append(block)\n\n return new_blocks\n\n def _maybe_downcast(self, blocks: List[\"Block\"], downcast=None) -> List[\"Block\"]:\n\n # no need to downcast our float\n # unless indicated\n if downcast is None and (self.is_float or self.is_datelike):\n return blocks\n\n return extend_blocks([b.downcast(downcast) for b in blocks])\n\n def downcast(self, dtypes=None) -> List[\"Block\"]:\n \"\"\" try to downcast each item to the dict of dtypes if present \"\"\"\n # turn it off completely\n if dtypes is False:\n return [self]\n\n values = self.values\n\n if self.ndim == 1:\n\n # try to cast all non-floats here\n if dtypes is None:\n dtypes = \"infer\"\n\n nv = maybe_downcast_to_dtype(values, dtypes)\n return [self.make_block(nv)]\n\n # ndim > 1\n if dtypes is None:\n return [self]\n\n if not (dtypes == \"infer\" or isinstance(dtypes, dict)):\n raise ValueError(\n \"downcast must have a dictionary or 'infer' as its argument\"\n )\n elif dtypes != \"infer\":\n raise AssertionError(\"dtypes as dict is not supported yet\")\n\n # operate column-by-column\n # this is expensive as it splits the blocks items-by-item\n def f(mask, val, idx):\n val = maybe_downcast_to_dtype(val, dtype=\"infer\")\n return val\n\n return self.split_and_operate(None, f, False)\n\n def astype(self, dtype, copy: bool = False, errors: str = \"raise\"):\n \"\"\"\n Coerce to the new dtype.\n\n Parameters\n ----------\n dtype : str, dtype convertible\n copy : bool, default False\n copy if indicated\n errors : str, {'raise', 'ignore'}, default 'ignore'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n Returns\n -------\n Block\n \"\"\"\n errors_legal_values = (\"raise\", \"ignore\")\n\n if errors not in errors_legal_values:\n invalid_arg = (\n \"Expected value of kwarg 'errors' to be one of \"\n f\"{list(errors_legal_values)}. Supplied value is '{errors}'\"\n )\n raise ValueError(invalid_arg)\n\n if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):\n msg = (\n f\"Expected an instance of {dtype.__name__}, \"\n \"but got the class instead. Try instantiating 'dtype'.\"\n )\n raise TypeError(msg)\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # may need to convert to categorical\n if is_categorical_dtype(dtype):\n\n if is_categorical_dtype(self.values.dtype):\n # GH 10696/18593: update an existing categorical efficiently\n return self.make_block(self.values.astype(dtype, copy=copy))\n\n return self.make_block(Categorical(self.values, dtype=dtype))\n\n dtype = pandas_dtype(dtype)\n\n # astype processing\n if is_dtype_equal(self.dtype, dtype):\n if copy:\n return self.copy()\n return self\n\n # force the copy here\n if self.is_extension:\n try:\n values = self.values.astype(dtype)\n except (ValueError, TypeError):\n if errors == \"ignore\":\n values = self.values\n else:\n raise\n else:\n if issubclass(dtype.type, str):\n\n # use native type formatting for datetime/tz/timedelta\n if self.is_datelike:\n values = self.to_native_types().values\n\n # astype formatting\n else:\n # Because we have neither is_extension nor is_datelike,\n # self.values already has the correct shape\n values = self.values\n\n else:\n values = self.get_values(dtype=dtype)\n\n # _astype_nansafe works fine with 1-d only\n vals1d = values.ravel()\n try:\n values = astype_nansafe(vals1d, dtype, copy=True)\n except (ValueError, TypeError):\n # e.g. astype_nansafe can fail on object-dtype of strings\n # trying to convert to float\n if errors == \"raise\":\n raise\n newb = self.copy() if copy else self\n return newb\n\n # TODO(EA2D): special case not needed with 2D EAs\n if isinstance(values, np.ndarray):\n values = values.reshape(self.shape)\n\n newb = self.make_block(values)\n\n if newb.is_numeric and self.is_numeric:\n if newb.shape != self.shape:\n raise TypeError(\n f\"cannot set astype for copy = [{copy}] for dtype \"\n f\"({self.dtype.name} [{self.shape}]) to different shape \"\n f\"({newb.dtype.name} [{newb.shape}])\"\n )\n return newb\n\n def convert(\n self,\n copy: bool = True,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n coerce: bool = False,\n ) -> List[\"Block\"]:\n \"\"\"\n attempt to coerce any object types to better types return a copy\n of the block (if copy = True) by definition we are not an ObjectBlock\n here!\n \"\"\"\n return [self.copy()] if copy else [self]\n\n def _can_hold_element(self, element: Any) -> bool:\n \"\"\" require the same dtype as ourselves \"\"\"\n dtype = self.values.dtype.type\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, dtype)\n return isinstance(element, dtype)\n\n def should_store(self, value: ArrayLike) -> bool:\n \"\"\"\n Should we set self.values[indexer] = value inplace or do we need to cast?\n\n Parameters\n ----------\n value : np.ndarray or ExtensionArray\n\n Returns\n -------\n bool\n \"\"\"\n return is_dtype_equal(value.dtype, self.dtype)\n\n def to_native_types(self, na_rep=\"nan\", quoting=None, **kwargs):\n \"\"\" convert to our native types format \"\"\"\n values = self.values\n\n mask = isna(values)\n itemsize = writers.word_len(na_rep)\n\n if not self.is_object and not quoting and itemsize:\n values = values.astype(str)\n if values.dtype.itemsize / np.dtype(\"U1\").itemsize < itemsize:\n # enlarge for the na_rep\n values = values.astype(f\"<U{itemsize}\")\n else:\n values = np.array(values, dtype=\"object\")\n\n values[mask] = na_rep\n return self.make_block(values)\n\n # block actions #\n def copy(self, deep: bool = True):\n \"\"\" copy constructor \"\"\"\n values = self.values\n if deep:\n values = values.copy()\n return self.make_block_same_class(values, ndim=self.ndim)\n\n def replace(\n self,\n to_replace,\n value,\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n \"\"\"\n replace the to_replace value with value, possible to create new\n blocks here this is just a call to putmask. regex is not used here.\n It is used in ObjectBlocks. It is here for API compatibility.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n original_to_replace = to_replace\n\n if not self._can_hold_element(to_replace):\n # We cannot hold `to_replace`, so we know immediately that\n # replacing it is a no-op.\n # Note: If to_replace were a list, NDFrame.replace would call\n # replace_list instead of replace.\n return [self] if inplace else [self.copy()]\n\n values = self.values\n if lib.is_scalar(to_replace) and isinstance(values, np.ndarray):\n # The only non-DatetimeLike class that also has a non-trivial\n # try_coerce_args is ObjectBlock, but that overrides replace,\n # so does not get here.\n to_replace = convert_scalar_for_putitemlike(to_replace, values.dtype)\n\n mask = missing.mask_missing(values, to_replace)\n if not mask.any():\n # Note: we get here with test_replace_extension_other incorrectly\n # bc _can_hold_element is incorrect.\n return [self] if inplace else [self.copy()]\n\n if not self._can_hold_element(value):\n blk = self.astype(object)\n return blk.replace(\n to_replace=original_to_replace,\n value=value,\n inplace=True,\n regex=regex,\n )\n\n blk = self if inplace else self.copy()\n blk._putmask_simple(mask, value)\n blocks = blk.convert(numeric=False, copy=not inplace)\n return blocks\n\n def _replace_regex(\n self,\n to_replace,\n value,\n inplace: bool = False,\n convert: bool = True,\n mask=None,\n ) -> List[\"Block\"]:\n \"\"\"\n Replace elements by the given value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n List[Block]\n \"\"\"\n if not self._can_hold_element(to_replace):\n # i.e. only ObjectBlock, but could in principle include a\n # String ExtensionBlock\n return [self] if inplace else [self.copy()]\n\n rx = re.compile(to_replace)\n\n new_values = self.values if inplace else self.values.copy()\n replace_regex(new_values, rx, value, mask)\n\n block = self.make_block(new_values)\n if convert:\n nbs = block.convert(numeric=False)\n else:\n nbs = [block]\n return nbs\n\n def _replace_list(\n self,\n src_list: List[Any],\n dest_list: List[Any],\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n \"\"\"\n See BlockManager._replace_list docstring.\n \"\"\"\n src_len = len(src_list) - 1\n\n def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray:\n \"\"\"\n Generate a bool array by perform an equality check, or perform\n an element-wise regular expression matching\n \"\"\"\n if isna(s):\n return ~mask\n\n s = maybe_box_datetimelike(s)\n return compare_or_regex_search(self.values, s, regex, mask)\n\n # Calculate the mask once, prior to the call of comp\n # in order to avoid repeating the same computations\n mask = ~isna(self.values)\n\n masks = [comp(s, mask, regex) for s in src_list]\n masks = [_extract_bool_array(x) for x in masks]\n\n rb = [self if inplace else self.copy()]\n for i, (src, dest) in enumerate(zip(src_list, dest_list)):\n new_rb: List[\"Block\"] = []\n for blk in rb:\n m = masks[i]\n convert = i == src_len # only convert once at the end\n result = blk._replace_coerce(\n to_replace=src,\n value=dest,\n mask=m,\n inplace=inplace,\n regex=regex,\n )\n if convert and blk.is_object:\n result = extend_blocks(\n [b.convert(numeric=False, copy=True) for b in result]\n )\n new_rb.extend(result)\n rb = new_rb\n return rb\n\n def setitem(self, indexer, value):\n \"\"\"\n Attempt self.values[indexer] = value, possibly creating a new array.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n transpose = self.ndim == 2\n\n if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:\n raise ValueError(f\"Cannot set values with ndim > {self.ndim}\")\n\n # coerce None values, if appropriate\n if value is None:\n if self.is_numeric:\n value = np.nan\n\n # coerce if block dtype can store value\n values = self.values\n if self._can_hold_element(value):\n # We only get here for non-Extension Blocks, so _try_coerce_args\n # is only relevant for DatetimeBlock and TimedeltaBlock\n if lib.is_scalar(value):\n value = convert_scalar_for_putitemlike(value, values.dtype)\n\n else:\n # current dtype cannot store value, coerce to common dtype\n\n if hasattr(value, \"dtype\"):\n dtype = value.dtype\n\n elif lib.is_scalar(value) and not isna(value):\n dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)\n\n else:\n # e.g. we are bool dtype and value is nan\n # TODO: watch out for case with listlike value and scalar/empty indexer\n dtype, _ = maybe_promote(np.array(value).dtype)\n return self.astype(dtype).setitem(indexer, value)\n\n dtype = find_common_type([values.dtype, dtype])\n assert not is_dtype_equal(self.dtype, dtype)\n # otherwise should have _can_hold_element\n\n return self.astype(dtype).setitem(indexer, value)\n\n # value must be storable at this moment\n if is_extension_array_dtype(getattr(value, \"dtype\", None)):\n # We need to be careful not to allow through strings that\n # can be parsed to EADtypes\n is_ea_value = True\n arr_value = value\n else:\n is_ea_value = False\n arr_value = np.array(value)\n\n if transpose:\n values = values.T\n\n # length checking\n check_setitem_lengths(indexer, value, values)\n exact_match = (\n len(arr_value.shape)\n and arr_value.shape[0] == values.shape[0]\n and arr_value.size == values.size\n )\n if is_empty_indexer(indexer, arr_value):\n # GH#8669 empty indexers\n pass\n\n elif is_scalar_indexer(indexer, self.ndim):\n # setting a single element for each dim and with a rhs that could\n # be e.g. a list; see GH#6043\n values[indexer] = value\n\n elif exact_match and is_categorical_dtype(arr_value.dtype):\n # GH25495 - If the current dtype is not categorical,\n # we need to create a new categorical block\n values[indexer] = value\n return self.make_block(Categorical(self.values, dtype=arr_value.dtype))\n\n elif exact_match and is_ea_value:\n # GH#32395 if we're going to replace the values entirely, just\n # substitute in the new array\n return self.make_block(arr_value)\n\n # if we are an exact match (ex-broadcasting),\n # then use the resultant dtype\n elif exact_match:\n # We are setting _all_ of the array's values, so can cast to new dtype\n values[indexer] = value\n\n values = values.astype(arr_value.dtype, copy=False)\n\n # set\n else:\n values[indexer] = value\n\n if transpose:\n values = values.T\n block = self.make_block(values)\n return block\n\n def _putmask_simple(self, mask: np.ndarray, value: Any):\n \"\"\"\n Like putmask but\n\n a) we do not cast on failure\n b) we do not handle repeating or truncating like numpy.\n\n Parameters\n ----------\n mask : np.ndarray[bool]\n We assume _extract_bool_array has already been called.\n value : Any\n We assume self._can_hold_element(value)\n \"\"\"\n values = self.values\n\n if lib.is_scalar(value) and isinstance(values, np.ndarray):\n value = convert_scalar_for_putitemlike(value, values.dtype)\n\n if is_list_like(value) and len(value) == len(values):\n values[mask] = value[mask]\n else:\n values[mask] = value\n\n def putmask(\n self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False\n ) -> List[\"Block\"]:\n \"\"\"\n putmask the data to the block; it is possible that we may create a\n new dtype of block\n\n Return the resulting block(s).\n\n Parameters\n ----------\n mask : np.ndarray[bool], SparseArray[bool], or BooleanArray\n new : a ndarray/object\n inplace : bool, default False\n Perform inplace modification.\n axis : int\n transpose : bool, default False\n Set to True if self is stored with axes reversed.\n\n Returns\n -------\n List[Block]\n \"\"\"\n mask = _extract_bool_array(mask)\n assert not isinstance(new, (ABCIndexClass, ABCSeries, ABCDataFrame))\n\n new_values = self.values # delay copy if possible.\n # if we are passed a scalar None, convert it here\n if not is_list_like(new) and isna(new) and not self.is_object:\n # FIXME: make sure we have compatible NA\n new = self.fill_value\n\n if self._can_hold_element(new):\n # We only get here for non-Extension Blocks, so _try_coerce_args\n # is only relevant for DatetimeBlock and TimedeltaBlock\n if lib.is_scalar(new):\n new = convert_scalar_for_putitemlike(new, self.values.dtype)\n\n if transpose:\n new_values = new_values.T\n\n # If the default repeat behavior in np.putmask would go in the\n # wrong direction, then explicitly repeat and reshape new instead\n if getattr(new, \"ndim\", 0) >= 1:\n if self.ndim - 1 == new.ndim and axis == 1:\n new = np.repeat(new, new_values.shape[-1]).reshape(self.shape)\n new = new.astype(new_values.dtype)\n\n if new_values is self.values and not inplace:\n new_values = new_values.copy()\n # we require exact matches between the len of the\n # values we are setting (or is compat). np.putmask\n # doesn't check this and will simply truncate / pad\n # the output, but we want sane error messages\n #\n # TODO: this prob needs some better checking\n # for 2D cases\n if (\n is_list_like(new)\n and np.any(mask[mask])\n and getattr(new, \"ndim\", 1) == 1\n ):\n if mask[mask].shape[-1] == len(new):\n # GH 30567\n # If length of ``new`` is less than the length of ``new_values``,\n # `np.putmask` would first repeat the ``new`` array and then\n # assign the masked values hence produces incorrect result.\n # `np.place` on the other hand uses the ``new`` values at it is\n # to place in the masked locations of ``new_values``\n np.place(new_values, mask, new)\n elif mask.shape[-1] == len(new) or len(new) == 1:\n np.putmask(new_values, mask, new)\n else:\n raise ValueError(\"cannot assign mismatch length to masked array\")\n else:\n np.putmask(new_values, mask, new)\n\n # maybe upcast me\n elif mask.any():\n if transpose:\n mask = mask.T\n if isinstance(new, np.ndarray):\n new = new.T\n axis = new_values.ndim - axis - 1\n\n # Pseudo-broadcast\n if getattr(new, \"ndim\", 0) >= 1:\n if self.ndim - 1 == new.ndim:\n new_shape = list(new.shape)\n new_shape.insert(axis, 1)\n new = new.reshape(tuple(new_shape))\n\n # operate column-by-column\n def f(mask, val, idx):\n\n if idx is None:\n # ndim==1 case.\n n = new\n else:\n\n if isinstance(new, np.ndarray):\n n = np.squeeze(new[idx % new.shape[0]])\n else:\n n = np.array(new)\n\n # type of the new block\n dtype, _ = maybe_promote(n.dtype)\n\n # we need to explicitly astype here to make a copy\n n = n.astype(dtype)\n\n nv = _putmask_smart(val, mask, n)\n return nv\n\n new_blocks = self.split_and_operate(mask, f, inplace)\n return new_blocks\n\n if inplace:\n return [self]\n\n if transpose:\n if new_values is None:\n new_values = self.values if inplace else self.values.copy()\n new_values = new_values.T\n\n return [self.make_block(new_values)]\n\n def coerce_to_target_dtype(self, other):\n \"\"\"\n coerce the current block to a dtype compat for other\n we will return a block, possibly object, and not raise\n\n we can also safely try to coerce to the same dtype\n and will receive the same block\n \"\"\"\n # if we cannot then coerce to object\n dtype, _ = infer_dtype_from(other, pandas_dtype=True)\n\n if is_dtype_equal(self.dtype, dtype):\n return self\n\n if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):\n # we don't upcast to bool\n return self.astype(object)\n\n elif (self.is_float or self.is_complex) and (\n is_integer_dtype(dtype) or is_float_dtype(dtype)\n ):\n # don't coerce float/complex to int\n return self\n\n elif self.is_datetime or is_datetime64_any_dtype(dtype):\n # The is_dtype_equal check above ensures that at most one of\n # these two conditions hold, so we must cast to object.\n return self.astype(object)\n\n elif self.is_timedelta or is_timedelta64_dtype(dtype):\n # The is_dtype_equal check above ensures that at most one of\n # these two conditions hold, so we must cast to object.\n return self.astype(object)\n\n try:\n return self.astype(dtype)\n except (ValueError, TypeError, OverflowError):\n return self.astype(object)\n\n def interpolate(\n self,\n method: str = \"pad\",\n axis: int = 0,\n index: Optional[\"Index\"] = None,\n inplace: bool = False,\n limit: Optional[int] = None,\n limit_direction: str = \"forward\",\n limit_area: Optional[str] = None,\n fill_value: Optional[Any] = None,\n coerce: bool = False,\n downcast: Optional[str] = None,\n **kwargs,\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if not self._can_hold_na:\n # If there are no NAs, then interpolate is a no-op\n return self if inplace else self.copy()\n\n # a fill na type method\n try:\n m = missing.clean_fill_method(method)\n except ValueError:\n m = None\n\n if m is not None:\n if fill_value is not None:\n # similar to validate_fillna_kwargs\n raise ValueError(\"Cannot pass both fill_value and method\")\n\n return self._interpolate_with_fill(\n method=m,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n # validate the interp method\n m = missing.clean_interp_method(method, **kwargs)\n\n assert index is not None # for mypy\n\n return self._interpolate(\n method=m,\n index=index,\n axis=axis,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value,\n inplace=inplace,\n downcast=downcast,\n **kwargs,\n )\n\n def _interpolate_with_fill(\n self,\n method: str = \"pad\",\n axis: int = 0,\n inplace: bool = False,\n limit: Optional[int] = None,\n downcast: Optional[str] = None,\n ) -> List[\"Block\"]:\n \"\"\" fillna but using the interpolate machinery \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n assert self._can_hold_na # checked by caller\n\n values = self.values if inplace else self.values.copy()\n\n values = missing.interpolate_2d(\n values,\n method=method,\n axis=axis,\n limit=limit,\n )\n\n blocks = [self.make_block_same_class(values, ndim=self.ndim)]\n return self._maybe_downcast(blocks, downcast)\n\n def _interpolate(\n self,\n method: str,\n index: \"Index\",\n fill_value: Optional[Any] = None,\n axis: int = 0,\n limit: Optional[int] = None,\n limit_direction: str = \"forward\",\n limit_area: Optional[str] = None,\n inplace: bool = False,\n downcast: Optional[str] = None,\n **kwargs,\n ) -> List[\"Block\"]:\n \"\"\" interpolate using scipy wrappers \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n data = self.values if inplace else self.values.copy()\n\n # only deal with floats\n if not self.is_float:\n if not self.is_integer:\n return [self]\n data = data.astype(np.float64)\n\n if fill_value is None:\n fill_value = self.fill_value\n\n if method in (\"krogh\", \"piecewise_polynomial\", \"pchip\"):\n if not index.is_monotonic:\n raise ValueError(\n f\"{method} interpolation requires that the index be monotonic.\"\n )\n # process 1-d slices in the axis direction\n\n def func(yvalues: np.ndarray) -> np.ndarray:\n\n # process a 1-d slice, returning it\n # should the axis argument be handled below in apply_along_axis?\n # i.e. not an arg to missing.interpolate_1d\n return missing.interpolate_1d(\n xvalues=index,\n yvalues=yvalues,\n method=method,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value,\n bounds_error=False,\n **kwargs,\n )\n\n # interp each column independently\n interp_values = np.apply_along_axis(func, axis, data)\n\n blocks = [self.make_block_same_class(interp_values)]\n return self._maybe_downcast(blocks, downcast)\n\n def take_nd(self, indexer, axis: int, new_mgr_locs=None, fill_value=lib.no_default):\n \"\"\"\n Take values according to indexer and return them as a block.bb\n\n \"\"\"\n # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock\n # so need to preserve types\n # sparse is treated like an ndarray, but needs .get_values() shaping\n\n values = self.values\n\n if fill_value is lib.no_default:\n fill_value = self.fill_value\n allow_fill = False\n else:\n allow_fill = True\n\n new_values = algos.take_nd(\n values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value\n )\n\n # Called from three places in managers, all of which satisfy\n # this assertion\n assert not (axis == 0 and new_mgr_locs is None)\n if new_mgr_locs is None:\n new_mgr_locs = self.mgr_locs\n\n if not is_dtype_equal(new_values.dtype, self.dtype):\n return self.make_block(new_values, new_mgr_locs)\n else:\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def diff(self, n: int, axis: int = 1) -> List[\"Block\"]:\n \"\"\" return block for the diff of the values \"\"\"\n new_values = algos.diff(self.values, n, axis=axis, stacklevel=7)\n return [self.make_block(values=new_values)]\n\n def shift(self, periods: int, axis: int = 0, fill_value=None):\n \"\"\" shift the block by periods, possibly upcast \"\"\"\n # convert integer to float if necessary. need to do a lot more than\n # that, handle boolean etc also\n new_values, fill_value = maybe_upcast(self.values, fill_value)\n\n new_values = shift(new_values, periods, axis, fill_value)\n\n return [self.make_block(new_values)]\n\n def where(\n self, other, cond, errors=\"raise\", try_cast: bool = False, axis: int = 0\n ) -> List[\"Block\"]:\n \"\"\"\n evaluate the block; return result block(s) from the result\n\n Parameters\n ----------\n other : a ndarray/object\n cond : np.ndarray[bool], SparseArray[bool], or BooleanArray\n errors : str, {'raise', 'ignore'}, default 'raise'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n try_cast: bool, default False\n axis : int, default 0\n\n Returns\n -------\n List[Block]\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n cond = _extract_bool_array(cond)\n assert not isinstance(other, (ABCIndexClass, ABCSeries, ABCDataFrame))\n\n assert errors in [\"raise\", \"ignore\"]\n transpose = self.ndim == 2\n\n values = self.values\n orig_other = other\n if transpose:\n values = values.T\n\n # If the default broadcasting would go in the wrong direction, then\n # explicitly reshape other instead\n if getattr(other, \"ndim\", 0) >= 1:\n if values.ndim - 1 == other.ndim and axis == 1:\n other = other.reshape(tuple(other.shape + (1,)))\n elif transpose and values.ndim == self.ndim - 1:\n cond = cond.T\n\n if not hasattr(cond, \"shape\"):\n raise ValueError(\"where must have a condition that is ndarray like\")\n\n if cond.ravel(\"K\").all():\n result = values\n else:\n # see if we can operate on the entire block, or need item-by-item\n # or if we are a single block (ndim == 1)\n if (\n (self.is_integer or self.is_bool)\n and lib.is_float(other)\n and np.isnan(other)\n ):\n # GH#3733 special case to avoid object-dtype casting\n # and go through numexpr path instead.\n # In integer case, np.where will cast to floats\n pass\n elif not self._can_hold_element(other):\n # we cannot coerce, return a compat dtype\n # we are explicitly ignoring errors\n block = self.coerce_to_target_dtype(other)\n blocks = block.where(\n orig_other, cond, errors=errors, try_cast=try_cast, axis=axis\n )\n return self._maybe_downcast(blocks, \"infer\")\n\n if not (\n (self.is_integer or self.is_bool)\n and lib.is_float(other)\n and np.isnan(other)\n ):\n # convert datetime to datetime64, timedelta to timedelta64\n other = convert_scalar_for_putitemlike(other, values.dtype)\n\n # By the time we get here, we should have all Series/Index\n # args extracted to ndarray\n result = expressions.where(cond, values, other)\n\n if self._can_hold_na or self.ndim == 1:\n\n if transpose:\n result = result.T\n\n return [self.make_block(result)]\n\n # might need to separate out blocks\n axis = cond.ndim - 1\n cond = cond.swapaxes(axis, 0)\n mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool)\n\n result_blocks: List[\"Block\"] = []\n for m in [mask, ~mask]:\n if m.any():\n result = cast(np.ndarray, result) # EABlock overrides where\n taken = result.take(m.nonzero()[0], axis=axis)\n r = maybe_downcast_numeric(taken, self.dtype)\n nb = self.make_block(r.T, placement=self.mgr_locs[m])\n result_blocks.append(nb)\n\n return result_blocks\n\n def _unstack(self, unstacker, fill_value, new_placement):\n \"\"\"\n Return a list of unstacked blocks of self\n\n Parameters\n ----------\n unstacker : reshape._Unstacker\n fill_value : int\n Only used in ExtensionBlock._unstack\n\n Returns\n -------\n blocks : list of Block\n New blocks of unstacked values.\n mask : array_like of bool\n The mask of columns of `blocks` we should keep.\n \"\"\"\n new_values, mask = unstacker.get_new_values(\n self.values.T, fill_value=fill_value\n )\n\n mask = mask.any(0)\n # TODO: in all tests we have mask.all(); can we rely on that?\n\n new_values = new_values.T[mask]\n new_placement = new_placement[mask]\n\n blocks = [self.make_block_same_class(new_values, placement=new_placement)]\n return blocks, mask\n\n def quantile(self, qs, interpolation=\"linear\", axis: int = 0):\n \"\"\"\n compute the quantiles of the\n\n Parameters\n ----------\n qs: a scalar or list of the quantiles to be computed\n interpolation: type of interpolation, default 'linear'\n axis: axis to compute, default 0\n\n Returns\n -------\n Block\n \"\"\"\n # We should always have ndim == 2 because Series dispatches to DataFrame\n assert self.ndim == 2\n\n values = self.get_values()\n\n is_empty = values.shape[axis] == 0\n orig_scalar = not is_list_like(qs)\n if orig_scalar:\n # make list-like, unpack later\n qs = [qs]\n\n if is_empty:\n # create the array of na_values\n # 2d len(values) * len(qs)\n result = np.repeat(\n np.array([self.fill_value] * len(qs)), len(values)\n ).reshape(len(values), len(qs))\n else:\n # asarray needed for Sparse, see GH#24600\n mask = np.asarray(isna(values))\n result = nanpercentile(\n values,\n np.array(qs) * 100,\n axis=axis,\n na_value=self.fill_value,\n mask=mask,\n ndim=values.ndim,\n interpolation=interpolation,\n )\n\n result = np.array(result, copy=False)\n result = result.T\n\n if orig_scalar and not lib.is_scalar(result):\n # result could be scalar in case with is_empty and self.ndim == 1\n assert result.shape[-1] == 1, result.shape\n result = result[..., 0]\n result = lib.item_from_zerodim(result)\n\n ndim = np.ndim(result)\n return make_block(result, placement=np.arange(len(result)), ndim=ndim)\n\n def _replace_coerce(\n self,\n to_replace,\n value,\n mask: np.ndarray,\n inplace: bool = True,\n regex: bool = False,\n ) -> List[\"Block\"]:\n \"\"\"\n Replace value corresponding to the given boolean array with another\n value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n mask : np.ndarray[bool]\n True indicate corresponding element is ignored.\n inplace : bool, default True\n Perform inplace modification.\n regex : bool, default False\n If true, perform regular expression substitution.\n\n Returns\n -------\n List[Block]\n \"\"\"\n if mask.any():\n if not regex:\n nb = self.coerce_to_target_dtype(value)\n if nb is self and not inplace:\n nb = nb.copy()\n nb._putmask_simple(mask, value)\n return [nb]\n else:\n regex = _should_use_regex(regex, to_replace)\n if regex:\n return self._replace_regex(\n to_replace,\n value,\n inplace=inplace,\n convert=False,\n mask=mask,\n )\n return self.replace(to_replace, value, inplace=inplace, regex=False)\n return [self]\n\n\nclass ExtensionBlock(Block):\n \"\"\"\n Block for holding extension types.\n\n Notes\n -----\n This holds all 3rd-party extension array types. It's also the immediate\n parent class for our internal extension types' blocks, CategoricalBlock.\n\n ExtensionArrays are limited to 1-D.\n \"\"\"\n\n _can_consolidate = False\n _validate_ndim = False\n is_extension = True\n\n values: ExtensionArray\n\n def __init__(self, values, placement, ndim=None):\n \"\"\"\n Initialize a non-consolidatable block.\n\n 'ndim' may be inferred from 'placement'.\n\n This will call continue to call __init__ for the other base\n classes mixed in with this Mixin.\n \"\"\"\n\n # Placement must be converted to BlockPlacement so that we can check\n # its length\n if not isinstance(placement, libinternals.BlockPlacement):\n placement = libinternals.BlockPlacement(placement)\n\n # Maybe infer ndim from placement\n if ndim is None:\n if len(placement) != 1:\n ndim = 1\n else:\n ndim = 2\n super().__init__(values, placement, ndim=ndim)\n\n if self.ndim == 2 and len(self.mgr_locs) != 1:\n # TODO(EA2D): check unnecessary with 2D EAs\n raise AssertionError(\"block.size != values.size\")\n\n @property\n def shape(self):\n # TODO(EA2D): override unnecessary with 2D EAs\n if self.ndim == 1:\n return (len(self.values),)\n return len(self.mgr_locs), len(self.values)\n\n def iget(self, col):\n\n if self.ndim == 2 and isinstance(col, tuple):\n # TODO(EA2D): unnecessary with 2D EAs\n col, loc = col\n if not com.is_null_slice(col) and col != 0:\n raise IndexError(f\"{self} only contains one item\")\n elif isinstance(col, slice):\n if col != slice(None):\n raise NotImplementedError(col)\n return self.values[[loc]]\n return self.values[loc]\n else:\n if col != 0:\n raise IndexError(f\"{self} only contains one item\")\n return self.values\n\n def set_inplace(self, locs, values):\n # NB: This is a misnomer, is supposed to be inplace but is not,\n # see GH#33457\n assert locs.tolist() == [0]\n self.values = values\n\n def putmask(\n self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False\n ) -> List[\"Block\"]:\n \"\"\"\n See Block.putmask.__doc__\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n mask = _extract_bool_array(mask)\n\n new_values = self.values if inplace else self.values.copy()\n\n if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask):\n new = new[mask]\n\n mask = safe_reshape(mask, new_values.shape)\n\n new_values[mask] = new\n return [self.make_block(values=new_values)]\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Unbox to an extension array.\n\n This will unbox an ExtensionArray stored in an Index or Series.\n ExtensionArrays pass through. No dtype coercion is done.\n\n Parameters\n ----------\n values : Index, Series, ExtensionArray\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n return extract_array(values)\n\n @property\n def _holder(self):\n # For extension blocks, the holder is values-dependent.\n return type(self.values)\n\n @property\n def fill_value(self):\n # Used in reindex_indexer\n return self.values.dtype.na_value\n\n @property\n def _can_hold_na(self):\n # The default ExtensionArray._can_hold_na is True\n return self._holder._can_hold_na\n\n @property\n def is_view(self) -> bool:\n \"\"\"Extension arrays are never treated as views.\"\"\"\n return False\n\n @property\n def is_numeric(self):\n return self.values.dtype._is_numeric\n\n def setitem(self, indexer, value):\n \"\"\"\n Attempt self.values[indexer] = value, possibly creating a new array.\n\n This differs from Block.setitem by not allowing setitem to change\n the dtype of the Block.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n if not self._can_hold_element(value):\n # This is only relevant for DatetimeTZBlock, which has a\n # non-trivial `_can_hold_element`.\n # https://github.com/pandas-dev/pandas/issues/24020\n # Need a dedicated setitem until GH#24020 (type promotion in setitem\n # for extension arrays) is designed and implemented.\n return self.astype(object).setitem(indexer, value)\n\n if isinstance(indexer, tuple):\n # TODO(EA2D): not needed with 2D EAs\n # we are always 1-D\n indexer = indexer[0]\n\n check_setitem_lengths(indexer, value, self.values)\n self.values[indexer] = value\n return self\n\n def get_values(self, dtype=None):\n # ExtensionArrays must be iterable, so this works.\n # TODO(EA2D): reshape not needed with 2D EAs\n return np.asarray(self.values).reshape(self.shape)\n\n def array_values(self) -> ExtensionArray:\n return self.values\n\n def to_native_types(self, na_rep=\"nan\", quoting=None, **kwargs):\n \"\"\"override to use ExtensionArray astype for the conversion\"\"\"\n values = self.values\n mask = isna(values)\n\n values = np.asarray(values.astype(object))\n values[mask] = na_rep\n\n # TODO(EA2D): reshape not needed with 2D EAs\n # we are expected to return a 2-d ndarray\n return self.make_block(values)\n\n def take_nd(\n self, indexer, axis: int = 0, new_mgr_locs=None, fill_value=lib.no_default\n ):\n \"\"\"\n Take values according to indexer and return them as a block.\n \"\"\"\n if fill_value is lib.no_default:\n fill_value = None\n\n # TODO(EA2D): special case not needed with 2D EAs\n # axis doesn't matter; we are really a single-dim object\n # but are passed the axis depending on the calling routing\n # if its REALLY axis 0, then this will be a reindex and not a take\n new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True)\n\n # Called from three places in managers, all of which satisfy\n # this assertion\n assert not (self.ndim == 1 and new_mgr_locs is None)\n if new_mgr_locs is None:\n new_mgr_locs = self.mgr_locs\n\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def _can_hold_element(self, element: Any) -> bool:\n # TODO: We may need to think about pushing this onto the array.\n # We're doing the same as CategoricalBlock here.\n return True\n\n def _slice(self, slicer):\n \"\"\"\n Return a slice of my values.\n\n Parameters\n ----------\n slicer : slice, ndarray[int], or a tuple of these\n Valid (non-reducing) indexer for self.values.\n\n Returns\n -------\n np.ndarray or ExtensionArray\n \"\"\"\n # return same dims as we currently have\n if not isinstance(slicer, tuple) and self.ndim == 2:\n # reached via getitem_block via _slice_take_blocks_ax0\n # TODO(EA2D): wont be necessary with 2D EAs\n slicer = (slicer, slice(None))\n\n if isinstance(slicer, tuple) and len(slicer) == 2:\n first = slicer[0]\n if not isinstance(first, slice):\n raise AssertionError(\n \"invalid slicing for a 1-ndim ExtensionArray\", first\n )\n # GH#32959 only full-slicers along fake-dim0 are valid\n # TODO(EA2D): wont be necessary with 2D EAs\n new_locs = self.mgr_locs[first]\n if len(new_locs):\n # effectively slice(None)\n slicer = slicer[1]\n else:\n raise AssertionError(\n \"invalid slicing for a 1-ndim ExtensionArray\", slicer\n )\n\n return self.values[slicer]\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n values = self.values if inplace else self.values.copy()\n values = values.fillna(value=value, limit=limit)\n return [\n self.make_block_same_class(\n values=values, placement=self.mgr_locs, ndim=self.ndim\n )\n ]\n\n def interpolate(\n self, method=\"pad\", axis=0, inplace=False, limit=None, fill_value=None, **kwargs\n ):\n\n values = self.values if inplace else self.values.copy()\n return self.make_block_same_class(\n values=values.fillna(value=fill_value, method=method, limit=limit),\n placement=self.mgr_locs,\n )\n\n def diff(self, n: int, axis: int = 1) -> List[\"Block\"]:\n if axis == 0 and n != 0:\n # n==0 case will be a no-op so let is fall through\n # Since we only have one column, the result will be all-NA.\n # Create this result by shifting along axis=0 past the length of\n # our values.\n return super().diff(len(self.values), axis=0)\n if axis == 1:\n # TODO(EA2D): unnecessary with 2D EAs\n # we are by definition 1D.\n axis = 0\n return super().diff(n, axis)\n\n def shift(\n self, periods: int, axis: int = 0, fill_value: Any = None\n ) -> List[\"ExtensionBlock\"]:\n \"\"\"\n Shift the block by `periods`.\n\n Dispatches to underlying ExtensionArray and re-boxes in an\n ExtensionBlock.\n \"\"\"\n return [\n self.make_block_same_class(\n self.values.shift(periods=periods, fill_value=fill_value),\n placement=self.mgr_locs,\n ndim=self.ndim,\n )\n ]\n\n def where(\n self, other, cond, errors=\"raise\", try_cast: bool = False, axis: int = 0\n ) -> List[\"Block\"]:\n\n cond = _extract_bool_array(cond)\n assert not isinstance(other, (ABCIndexClass, ABCSeries, ABCDataFrame))\n\n if isinstance(other, np.ndarray) and other.ndim == 2:\n # TODO(EA2D): unnecessary with 2D EAs\n assert other.shape[1] == 1\n other = other[:, 0]\n\n if isinstance(cond, np.ndarray) and cond.ndim == 2:\n # TODO(EA2D): unnecessary with 2D EAs\n assert cond.shape[1] == 1\n cond = cond[:, 0]\n\n if lib.is_scalar(other) and isna(other):\n # The default `other` for Series / Frame is np.nan\n # we want to replace that with the correct NA value\n # for the type\n other = self.dtype.na_value\n\n if is_sparse(self.values):\n # TODO(SparseArray.__setitem__): remove this if condition\n # We need to re-infer the type of the data after doing the\n # where, for cases where the subtypes don't match\n dtype = None\n else:\n dtype = self.dtype\n\n result = self.values.copy()\n icond = ~cond\n if lib.is_scalar(other):\n set_other = other\n else:\n set_other = other[icond]\n try:\n result[icond] = set_other\n except (NotImplementedError, TypeError):\n # NotImplementedError for class not implementing `__setitem__`\n # TypeError for SparseArray, which implements just to raise\n # a TypeError\n result = self._holder._from_sequence(\n np.where(cond, self.values, other), dtype=dtype\n )\n\n return [self.make_block_same_class(result, placement=self.mgr_locs)]\n\n def _unstack(self, unstacker, fill_value, new_placement):\n # ExtensionArray-safe unstack.\n # We override ObjectBlock._unstack, which unstacks directly on the\n # values of the array. For EA-backed blocks, this would require\n # converting to a 2-D ndarray of objects.\n # Instead, we unstack an ndarray of integer positions, followed by\n # a `take` on the actual values.\n n_rows = self.shape[-1]\n dummy_arr = np.arange(n_rows)\n\n new_values, mask = unstacker.get_new_values(dummy_arr, fill_value=-1)\n mask = mask.any(0)\n # TODO: in all tests we have mask.all(); can we rely on that?\n\n blocks = [\n self.make_block_same_class(\n self.values.take(indices, allow_fill=True, fill_value=fill_value),\n [place],\n )\n for indices, place in zip(new_values.T, new_placement)\n ]\n return blocks, mask\n\n\nclass ObjectValuesExtensionBlock(ExtensionBlock):\n \"\"\"\n Block providing backwards-compatibility for `.values`.\n\n Used by PeriodArray and IntervalArray to ensure that\n Series[T].values is an ndarray of objects.\n \"\"\"\n\n def external_values(self):\n return self.values.astype(object)\n\n def _can_hold_element(self, element: Any) -> bool:\n if is_valid_nat_for_dtype(element, self.dtype):\n return True\n if isinstance(element, list) and len(element) == 0:\n return True\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, self.dtype.type)\n return isinstance(element, self.dtype.type)\n\n\nclass NumericBlock(Block):\n __slots__ = ()\n is_numeric = True\n _can_hold_na = True\n\n\nclass FloatBlock(NumericBlock):\n __slots__ = ()\n is_float = True\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(\n tipo.type, np.timedelta64\n )\n return isinstance(\n element, (float, int, np.floating, np.int_)\n ) and not isinstance(\n element,\n (bool, np.bool_, np.timedelta64),\n )\n\n def to_native_types(\n self, na_rep=\"\", float_format=None, decimal=\".\", quoting=None, **kwargs\n ):\n \"\"\" convert to our native types format \"\"\"\n values = self.values\n\n # see gh-13418: no special formatting is desired at the\n # output (important for appropriate 'quoting' behaviour),\n # so do not pass it through the FloatArrayFormatter\n if float_format is None and decimal == \".\":\n mask = isna(values)\n\n if not quoting:\n values = values.astype(str)\n else:\n values = np.array(values, dtype=\"object\")\n\n values[mask] = na_rep\n return self.make_block(values)\n\n from pandas.io.formats.format import FloatArrayFormatter\n\n formatter = FloatArrayFormatter(\n values,\n na_rep=na_rep,\n float_format=float_format,\n decimal=decimal,\n quoting=quoting,\n fixed_width=False,\n )\n res = formatter.get_result_as_array()\n return self.make_block(res)\n\n\nclass ComplexBlock(NumericBlock):\n __slots__ = ()\n is_complex = True\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating))\n return isinstance(\n element, (float, int, complex, np.float_, np.int_)\n ) and not isinstance(element, (bool, np.bool_))\n\n\nclass IntBlock(NumericBlock):\n __slots__ = ()\n is_integer = True\n _can_hold_na = False\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return (\n issubclass(tipo.type, np.integer)\n and not issubclass(tipo.type, np.timedelta64)\n and self.dtype.itemsize >= tipo.itemsize\n )\n # We have not inferred an integer from the dtype\n # check if we have a builtin int or a float equal to an int\n return is_integer(element) or (is_float(element) and element.is_integer())\n\n\nclass DatetimeLikeBlockMixin(Block):\n \"\"\"Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.\"\"\"\n\n _can_hold_na = True\n\n def get_values(self, dtype=None):\n \"\"\"\n return object dtype as boxed values, such as Timestamps/Timedelta\n \"\"\"\n if is_object_dtype(dtype):\n # DTA/TDA constructor and astype can handle 2D\n return self._holder(self.values).astype(object)\n return self.values\n\n def internal_values(self):\n # Override to return DatetimeArray and TimedeltaArray\n return self.array_values()\n\n def array_values(self):\n return self._holder._simple_new(self.values)\n\n def iget(self, key):\n # GH#31649 we need to wrap scalars in Timestamp/Timedelta\n # TODO(EA2D): this can be removed if we ever have 2D EA\n return self.array_values().reshape(self.shape)[key]\n\n def diff(self, n: int, axis: int = 0) -> List[\"Block\"]:\n \"\"\"\n 1st discrete difference.\n\n Parameters\n ----------\n n : int\n Number of periods to diff.\n axis : int, default 0\n Axis to diff upon.\n\n Returns\n -------\n A list with a new TimeDeltaBlock.\n\n Notes\n -----\n The arguments here are mimicking shift so they are called correctly\n by apply.\n \"\"\"\n # TODO(EA2D): reshape not necessary with 2D EAs\n values = self.array_values().reshape(self.shape)\n\n new_values = values - values.shift(n, axis=axis)\n return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]\n\n def shift(self, periods, axis=0, fill_value=None):\n # TODO(EA2D) this is unnecessary if these blocks are backed by 2D EAs\n values = self.array_values()\n new_values = values.shift(periods, fill_value=fill_value, axis=axis)\n return self.make_block_same_class(new_values)\n\n def to_native_types(self, na_rep=\"NaT\", **kwargs):\n \"\"\" convert to our native types format \"\"\"\n arr = self.array_values()\n\n result = arr._format_native_types(na_rep=na_rep, **kwargs)\n return self.make_block(result)\n\n\nclass DatetimeBlock(DatetimeLikeBlockMixin):\n __slots__ = ()\n is_datetime = True\n _holder = DatetimeArray\n fill_value = np.datetime64(\"NaT\", \"ns\")\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Input validation for values passed to __init__. Ensure that\n we have datetime64ns, coercing if necessary.\n\n Parameters\n ----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : ndarray[datetime64ns]\n\n Overridden by DatetimeTZBlock.\n \"\"\"\n if values.dtype != DT64NS_DTYPE:\n values = conversion.ensure_datetime64ns(values)\n\n if isinstance(values, DatetimeArray):\n values = values._data\n\n assert isinstance(values, np.ndarray), type(values)\n return values\n\n def astype(self, dtype, copy: bool = False, errors: str = \"raise\"):\n \"\"\"\n these automatically copy, so copy=True has no effect\n raise on an except if raise == True\n \"\"\"\n dtype = pandas_dtype(dtype)\n\n # if we are passed a datetime64[ns, tz]\n if is_datetime64tz_dtype(dtype):\n values = self.values\n if copy:\n # this should be the only copy\n values = values.copy()\n values = DatetimeArray._simple_new(values.view(\"i8\"), dtype=dtype)\n return self.make_block(values)\n\n # delegate\n return super().astype(dtype=dtype, copy=copy, errors=errors)\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n if isinstance(element, list) and len(element) == 0:\n # Following DatetimeArray._validate_setitem_value\n # convention, we treat this as object-dtype\n # (even though tipo is float64)\n return True\n\n elif self.is_datetimetz:\n # require exact match, since non-nano does not exist\n return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype(\n element, self.dtype\n )\n\n # GH#27419 if we get a non-nano datetime64 object\n return is_datetime64_dtype(tipo)\n elif element is NaT:\n return True\n elif isinstance(element, datetime):\n if self.is_datetimetz:\n return tz_compare(element.tzinfo, self.dtype.tz)\n return element.tzinfo is None\n\n return is_valid_nat_for_dtype(element, self.dtype)\n\n def set_inplace(self, locs, values):\n \"\"\"\n See Block.set.__doc__\n \"\"\"\n values = conversion.ensure_datetime64ns(values, copy=False)\n\n self.values[locs] = values\n\n\nclass DatetimeTZBlock(ExtensionBlock, DatetimeBlock):\n \"\"\" implement a datetime64 block with a tz attribute \"\"\"\n\n values: DatetimeArray\n\n __slots__ = ()\n is_datetimetz = True\n is_extension = True\n\n internal_values = Block.internal_values\n\n _holder = DatetimeBlock._holder\n _can_hold_element = DatetimeBlock._can_hold_element\n to_native_types = DatetimeBlock.to_native_types\n diff = DatetimeBlock.diff\n fillna = DatetimeBlock.fillna # i.e. Block.fillna\n fill_value = DatetimeBlock.fill_value\n _can_hold_na = DatetimeBlock._can_hold_na\n\n array_values = ExtensionBlock.array_values\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Input validation for values passed to __init__. Ensure that\n we have datetime64TZ, coercing if necessary.\n\n Parameters\n ----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : DatetimeArray\n \"\"\"\n if not isinstance(values, self._holder):\n values = self._holder(values)\n\n if values.tz is None:\n raise ValueError(\"cannot create a DatetimeTZBlock without a tz\")\n\n return values\n\n @property\n def is_view(self) -> bool:\n \"\"\" return a boolean if I am possibly a view \"\"\"\n # check the ndarray values of the DatetimeIndex values\n return self.values._data.base is not None\n\n def get_values(self, dtype=None):\n \"\"\"\n Returns an ndarray of values.\n\n Parameters\n ----------\n dtype : np.dtype\n Only `object`-like dtypes are respected here (not sure\n why).\n\n Returns\n -------\n values : ndarray\n When ``dtype=object``, then and object-dtype ndarray of\n boxed values is returned. Otherwise, an M8[ns] ndarray\n is returned.\n\n DatetimeArray is always 1-d. ``get_values`` will reshape\n the return value to be the same dimensionality as the\n block.\n \"\"\"\n values = self.values\n if is_object_dtype(dtype):\n values = values.astype(object)\n\n # TODO(EA2D): reshape unnecessary with 2D EAs\n # Ensure that our shape is correct for DataFrame.\n # ExtensionArrays are always 1-D, even in a DataFrame when\n # the analogous NumPy-backed column would be a 2-D ndarray.\n return np.asarray(values).reshape(self.shape)\n\n def external_values(self):\n # NB: this is different from np.asarray(self.values), since that\n # return an object-dtype ndarray of Timestamps.\n return np.asarray(self.values.astype(\"datetime64[ns]\", copy=False))\n\n def quantile(self, qs, interpolation=\"linear\", axis=0):\n naive = self.values.view(\"M8[ns]\")\n\n # TODO(EA2D): kludge for 2D block with 1D values\n naive = naive.reshape(self.shape)\n\n blk = self.make_block(naive)\n res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis)\n\n # TODO(EA2D): ravel is kludge for 2D block with 1D values, assumes column-like\n aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)\n return self.make_block_same_class(aware, ndim=res_blk.ndim)\n\n\nclass TimeDeltaBlock(DatetimeLikeBlockMixin):\n __slots__ = ()\n is_timedelta = True\n fill_value = np.timedelta64(\"NaT\", \"ns\")\n\n def _maybe_coerce_values(self, values):\n if values.dtype != TD64NS_DTYPE:\n # non-nano we will convert to nano\n if values.dtype.kind != \"m\":\n # caller is responsible for ensuring timedelta64 dtype\n raise TypeError(values.dtype) # pragma: no cover\n\n values = TimedeltaArray._from_sequence(values)._data\n if isinstance(values, TimedeltaArray):\n values = values._data\n assert isinstance(values, np.ndarray), type(values)\n return values\n\n @property\n def _holder(self):\n return TimedeltaArray\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, np.timedelta64)\n elif element is NaT:\n return True\n elif isinstance(element, (timedelta, np.timedelta64)):\n return True\n return is_valid_nat_for_dtype(element, self.dtype)\n\n def fillna(self, value, **kwargs):\n\n # allow filling with integers to be\n # interpreted as nanoseconds\n if is_integer(value):\n # Deprecation GH#24694, GH#19233\n raise TypeError(\n \"Passing integers to fillna for timedelta64[ns] dtype is no \"\n \"longer supported. To obtain the old behavior, pass \"\n \"`pd.Timedelta(seconds=n)` instead.\"\n )\n return super().fillna(value, **kwargs)\n\n\nclass BoolBlock(NumericBlock):\n __slots__ = ()\n is_bool = True\n _can_hold_na = False\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, np.bool_)\n return isinstance(element, (bool, np.bool_))\n\n\nclass ObjectBlock(Block):\n __slots__ = ()\n is_object = True\n _can_hold_na = True\n\n def _maybe_coerce_values(self, values):\n if issubclass(values.dtype.type, str):\n values = np.array(values, dtype=object)\n return values\n\n @property\n def is_bool(self):\n \"\"\"\n we can be a bool if we have only bool values but are of type\n object\n \"\"\"\n return lib.is_bool_array(self.values.ravel(\"K\"))\n\n def reduce(self, func, ignore_failures: bool = False) -> List[Block]:\n \"\"\"\n For object-dtype, we operate column-wise.\n \"\"\"\n assert self.ndim == 2\n\n values = self.values\n if len(values) > 1:\n # split_and_operate expects func with signature (mask, values, inplace)\n def mask_func(mask, values, inplace):\n if values.ndim == 1:\n values = values.reshape(1, -1)\n return func(values)\n\n return self.split_and_operate(\n None, mask_func, False, ignore_failures=ignore_failures\n )\n\n try:\n res = func(values)\n except TypeError:\n if not ignore_failures:\n raise\n return []\n\n assert isinstance(res, np.ndarray)\n assert res.ndim == 1\n res = res.reshape(1, -1)\n return [self.make_block_same_class(res)]\n\n def convert(\n self,\n copy: bool = True,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n coerce: bool = False,\n ) -> List[\"Block\"]:\n \"\"\"\n attempt to coerce any object types to better types return a copy of\n the block (if copy = True) by definition we ARE an ObjectBlock!!!!!\n \"\"\"\n # operate column-by-column\n def f(mask, val, idx):\n shape = val.shape\n values = soft_convert_objects(\n val.ravel(),\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n coerce=coerce,\n copy=copy,\n )\n if isinstance(values, np.ndarray):\n # TODO(EA2D): allow EA once reshape is supported\n values = values.reshape(shape)\n\n return values\n\n if self.ndim == 2:\n blocks = self.split_and_operate(None, f, False)\n else:\n values = f(None, self.values.ravel(), None)\n blocks = [self.make_block(values)]\n\n return blocks\n\n def _maybe_downcast(self, blocks: List[\"Block\"], downcast=None) -> List[\"Block\"]:\n\n if downcast is not None:\n return blocks\n\n # split and convert the blocks\n return extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])\n\n def _can_hold_element(self, element: Any) -> bool:\n return True\n\n def replace(\n self,\n to_replace,\n value,\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n # Note: the checks we do in NDFrame.replace ensure we never get\n # here with listlike to_replace or value, as those cases\n # go through _replace_list\n\n regex = _should_use_regex(regex, to_replace)\n\n if regex:\n return self._replace_regex(to_replace, value, inplace=inplace)\n else:\n return super().replace(to_replace, value, inplace=inplace, regex=False)\n\n\ndef _should_use_regex(regex: bool, to_replace: Any) -> bool:\n \"\"\"\n Decide whether to treat `to_replace` as a regular expression.\n \"\"\"\n if is_re(to_replace):\n regex = True\n\n regex = regex and is_re_compilable(to_replace)\n\n # Don't use regex if the pattern is empty.\n regex = regex and re.compile(to_replace).pattern != \"\"\n return regex\n\n\nclass CategoricalBlock(ExtensionBlock):\n __slots__ = ()\n\n def _replace_list(\n self,\n src_list: List[Any],\n dest_list: List[Any],\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n if len(algos.unique(dest_list)) == 1:\n # We likely got here by tiling value inside NDFrame.replace,\n # so un-tile here\n return self.replace(src_list, dest_list[0], inplace, regex)\n return super()._replace_list(src_list, dest_list, inplace, regex)\n\n def replace(\n self,\n to_replace,\n value,\n inplace: bool = False,\n regex: bool = False,\n ) -> List[\"Block\"]:\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n result = self if inplace else self.copy()\n\n result.values.replace(to_replace, value, inplace=True)\n return [result]\n\n\n# -----------------------------------------------------------------\n# Constructor Helpers\n\n\ndef get_block_type(values, dtype=None):\n \"\"\"\n Find the appropriate Block subclass to use for the given values and dtype.\n\n Parameters\n ----------\n values : ndarray-like\n dtype : numpy or pandas dtype\n\n Returns\n -------\n cls : class, subclass of Block\n \"\"\"\n dtype = dtype or values.dtype\n vtype = dtype.type\n\n cls: Type[Block]\n\n if is_sparse(dtype):\n # Need this first(ish) so that Sparse[datetime] is sparse\n cls = ExtensionBlock\n elif is_categorical_dtype(values.dtype):\n cls = CategoricalBlock\n elif issubclass(vtype, np.datetime64):\n assert not is_datetime64tz_dtype(values.dtype)\n cls = DatetimeBlock\n elif is_datetime64tz_dtype(values.dtype):\n cls = DatetimeTZBlock\n elif is_interval_dtype(dtype) or is_period_dtype(dtype):\n cls = ObjectValuesExtensionBlock\n elif is_extension_array_dtype(values.dtype):\n cls = ExtensionBlock\n elif issubclass(vtype, np.floating):\n cls = FloatBlock\n elif issubclass(vtype, np.timedelta64):\n assert issubclass(vtype, np.integer)\n cls = TimeDeltaBlock\n elif issubclass(vtype, np.complexfloating):\n cls = ComplexBlock\n elif issubclass(vtype, np.integer):\n cls = IntBlock\n elif dtype == np.bool_:\n cls = BoolBlock\n else:\n cls = ObjectBlock\n return cls\n\n\ndef make_block(values, placement, klass=None, ndim=None, dtype=None):\n # Ensure that we don't allow PandasArray / PandasDtype in internals.\n # For now, blocks should be backed by ndarrays when possible.\n if isinstance(values, ABCPandasArray):\n values = values.to_numpy()\n if ndim and ndim > 1:\n # TODO(EA2D): special case not needed with 2D EAs\n values = np.atleast_2d(values)\n\n if isinstance(dtype, PandasDtype):\n dtype = dtype.numpy_dtype\n\n if klass is None:\n dtype = dtype or values.dtype\n klass = get_block_type(values, dtype)\n\n elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype):\n # TODO: This is no longer hit internally; does it need to be retained\n # for e.g. pyarrow?\n values = DatetimeArray._simple_new(values, dtype=dtype)\n\n return klass(values, ndim=ndim, placement=placement)\n\n\n# -----------------------------------------------------------------\n\n\ndef extend_blocks(result, blocks=None):\n \"\"\" return a new extended blocks, given the result \"\"\"\n if blocks is None:\n blocks = []\n if isinstance(result, list):\n for r in result:\n if isinstance(r, list):\n blocks.extend(r)\n else:\n blocks.append(r)\n else:\n assert isinstance(result, Block), type(result)\n blocks.append(result)\n return blocks\n\n\ndef _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike:\n \"\"\" guarantee the shape of the values to be at least 1 d \"\"\"\n if values.ndim < ndim:\n shape = values.shape\n if not is_extension_array_dtype(values.dtype):\n # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023\n # block.shape is incorrect for \"2D\" ExtensionArrays\n # We can't, and don't need to, reshape.\n # error: \"ExtensionArray\" has no attribute \"reshape\"\n values = values.reshape(tuple((1,) + shape)) # type: ignore[attr-defined]\n return values\n\n\ndef safe_reshape(arr, new_shape: Shape):\n \"\"\"\n If possible, reshape `arr` to have shape `new_shape`,\n with a couple of exceptions (see gh-13012):\n\n 1) If `arr` is a ExtensionArray or Index, `arr` will be\n returned as is.\n 2) If `arr` is a Series, the `_values` attribute will\n be reshaped and returned.\n\n Parameters\n ----------\n arr : array-like, object to be reshaped\n new_shape : int or tuple of ints, the new shape\n \"\"\"\n if isinstance(arr, ABCSeries):\n arr = arr._values\n if not is_extension_array_dtype(arr.dtype):\n # Note: this will include TimedeltaArray and tz-naive DatetimeArray\n # TODO(EA2D): special case will be unnecessary with 2D EAs\n arr = np.asarray(arr).reshape(new_shape)\n return arr\n\n\ndef _putmask_smart(v: np.ndarray, mask: np.ndarray, n) -> np.ndarray:\n \"\"\"\n Return a new ndarray, try to preserve dtype if possible.\n\n Parameters\n ----------\n v : np.ndarray\n `values`, updated in-place.\n mask : np.ndarray[bool]\n Applies to both sides (array like).\n n : `new values` either scalar or an array like aligned with `values`\n\n Returns\n -------\n values : ndarray with updated values\n this *may* be a copy of the original\n\n See Also\n --------\n ndarray.putmask\n \"\"\"\n # we cannot use np.asarray() here as we cannot have conversions\n # that numpy does when numeric are mixed with strings\n\n # n should be the length of the mask or a scalar here\n if not is_list_like(n):\n n = np.repeat(n, len(mask))\n\n # see if we are only masking values that if putted\n # will work in the current dtype\n try:\n nn = n[mask]\n except TypeError:\n # TypeError: only integer scalar arrays can be converted to a scalar index\n pass\n else:\n # make sure that we have a nullable type\n # if we have nulls\n if not isna_compat(v, nn[0]):\n pass\n elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):\n # only compare integers/floats\n pass\n elif not (is_float_dtype(v.dtype) or is_integer_dtype(v.dtype)):\n # only compare integers/floats\n pass\n else:\n\n # we ignore ComplexWarning here\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", np.ComplexWarning)\n nn_at = nn.astype(v.dtype)\n\n comp = nn == nn_at\n if is_list_like(comp) and comp.all():\n nv = v.copy()\n nv[mask] = nn_at\n return nv\n\n n = np.asarray(n)\n\n def _putmask_preserve(nv, n):\n try:\n nv[mask] = n[mask]\n except (IndexError, ValueError):\n nv[mask] = n\n return nv\n\n # preserves dtype if possible\n if v.dtype.kind == n.dtype.kind:\n return _putmask_preserve(v, n)\n\n # change the dtype if needed\n dtype, _ = maybe_promote(n.dtype)\n\n v = v.astype(dtype)\n\n return _putmask_preserve(v, n)\n\n\ndef _extract_bool_array(mask: ArrayLike) -> np.ndarray:\n \"\"\"\n If we have a SparseArray or BooleanArray, convert it to ndarray[bool].\n \"\"\"\n if isinstance(mask, ExtensionArray):\n # We could have BooleanArray, Sparse[bool], ...\n # Except for BooleanArray, this is equivalent to just\n # np.asarray(mask, dtype=bool)\n mask = mask.to_numpy(dtype=bool, na_value=False)\n\n assert isinstance(mask, np.ndarray), type(mask)\n assert mask.dtype == bool, mask.dtype\n return mask\n" ]
[ [ "pandas.util._validators.validate_bool_kwarg", "pandas.core.dtypes.cast.maybe_box_datetimelike", "pandas.core.arrays.DatetimeArray._simple_new", "pandas.core.missing.clean_interp_method", "pandas.core.dtypes.common.is_datetime64_dtype", "numpy.place", "numpy.where", "pandas.core.dtypes.common.is_interval_dtype", "pandas.core.indexers.is_empty_indexer", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_categorical_dtype", "pandas.core.dtypes.common.is_re_compilable", "pandas.core.dtypes.common.is_list_like", "pandas._libs.algos.validate_limit", "numpy.delete", "pandas._libs.internals.BlockPlacement", "pandas.core.dtypes.cast.convert_scalar_for_putitemlike", "numpy.array", "pandas.core.dtypes.cast.maybe_downcast_to_dtype", "pandas.core.dtypes.common.is_bool_dtype", "pandas.core.common.is_null_slice", "pandas.core.dtypes.cast.find_common_type", "numpy.datetime64", "pandas.core.dtypes.missing.isna", "pandas.core.missing.interpolate_2d", "pandas.core.array_algos.replace.replace_regex", "numpy.asarray", "pandas.core.dtypes.cast.maybe_downcast_numeric", "pandas.core.dtypes.common.is_datetime64tz_dtype", "pandas._libs.tslibs.conversion.ensure_datetime64ns", "pandas.core.algorithms.take_nd", "numpy.putmask", "pandas.core.dtypes.cast.infer_dtype_from_scalar", "numpy.timedelta64", "numpy.atleast_2d", "pandas.core.arrays.PandasArray", "numpy.ndim", "pandas.core.array_algos.transforms.shift", "numpy.errstate", "pandas.core.dtypes.cast.infer_dtype_from", "pandas.core.arrays.TimedeltaArray._from_sequence", "pandas.core.dtypes.common.is_integer", "pandas.core.dtypes.cast.maybe_upcast", "pandas._libs.lib.item_from_zerodim", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.dtypes.common.is_dtype_equal", "pandas._libs.lib.is_scalar", "pandas.core.indexers.check_setitem_lengths", "numpy.repeat", "pandas._libs.tslibs.timezones.tz_compare", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.pandas_dtype", "numpy.isnan", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas.core.dtypes.common.is_period_dtype", "pandas._libs.writers.word_len", "pandas.core.missing.mask_missing", "pandas.core.missing.clean_fill_method", "pandas._libs.lib.is_float", "pandas.core.algorithms.diff", "numpy.broadcast_to", "numpy.squeeze", "numpy.dtype", "pandas.core.computation.expressions.where", "pandas.core.missing.interpolate_1d", "numpy.any", "numpy.arange", "pandas.core.algorithms.unique", "numpy.apply_along_axis", "pandas.io.formats.format.FloatArrayFormatter", "pandas.core.arrays.Categorical", "pandas.core.dtypes.common.is_float", "pandas.core.dtypes.common.is_datetime64_any_dtype", "pandas.core.dtypes.cast.maybe_promote", "pandas.core.indexers.is_scalar_indexer", "pandas.core.dtypes.common.is_sparse", "pandas.core.dtypes.missing.is_valid_nat_for_dtype", "pandas.core.dtypes.missing.isna_compat", "pandas.core.dtypes.cast.astype_nansafe", "pandas.core.dtypes.cast.maybe_infer_dtype_type", "pandas.core.dtypes.common.is_re", "pandas.core.dtypes.common.is_object_dtype", "pandas.core.construction.extract_array", "pandas.core.array_algos.replace.compare_or_regex_search" ] ]
sw32-seo/GTA
[ "86b102a14b78f6c8b50d742a56445c748e59b51e" ]
[ "onmt/utils/stats_manager.py" ]
[ "import numpy as np\n\n\nclass StatsManager(object):\n def __init__(self, stat_names=['step', 'acc', 'ppl']):\n self.stat_names = stat_names\n self.train_stats = {}\n self.val_stats = {}\n\n for name in stat_names:\n self.train_stats[name] = []\n self.val_stats[name] = []\n\n def add_stats(self, train_stats=None, valid_stats=None):\n assert train_stats is not None or valid_stats is not None\n\n if train_stats is not None:\n for name, val in train_stats.items():\n self.train_stats[name].append(val)\n return\n\n if valid_stats is not None:\n for name, val in valid_stats.items():\n self.val_stats[name].append(val)\n\n def get_best_model(self, stat_name='acc', highest_best=True):\n stat_list = np.array(self.val_stats[stat_name])[10:]\n\n if highest_best:\n best_idx = np.argmax(stat_list)\n else:\n best_idx = np.argmin(stat_list)\n\n best_stats = {}\n for name in self.stat_names:\n best_stats[name] = self.val_stats[name][10:][best_idx]\n\n return self.val_stats['step'][10:][best_idx], best_stats\n\n def write_stats(self, output_dir):\n with open('%s/train_stats.csv' % output_dir, 'w+') as train_file:\n steps = self.train_stats['step']\n for idx, step in enumerate(steps):\n acc = self.train_stats['acc'][idx]\n ppl = self.train_stats['ppl'][idx]\n\n train_file.write('%s,%.4f,%.4f\\n' % (step, acc, ppl))\n\n with open('%s/valid_stats.csv' % output_dir, 'w+') as valid_file:\n steps = self.val_stats['step']\n for idx, step in enumerate(steps):\n acc = self.val_stats['acc'][idx]\n ppl = self.val_stats['ppl'][idx]\n\n valid_file.write('%s,%.4f,%.4f\\n' % (step, acc, ppl))\n" ]
[ [ "numpy.array", "numpy.argmin", "numpy.argmax" ] ]
pik-copan/pycopanpbcc
[ "3fcf0a895cd444f445e1a36f0373fefa4eefe786" ]
[ "scripts/plot_fig8.py" ]
[ "# -*- coding: utf-8 -*-\n# Author: Vera Heck <[email protected]>\n# Script generates Fig. 8 of Heck et al. 2016 (ESD)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom pylab import *\n\narray = np.array\n\nnstep = 128 # steps of parameter variation \npar1='alpha_max'\npar2= 'thresh_geo'\na2 = np.load('/save/a_alpha_max_thresh_geo_c_max=0.2.npy')\na4 = np.load('/save/a_alpha_max_thresh_geo_c_max=0.31.npy')\na6 = np.load('/save/a_alpha_max_thresh_geo_c_max=0.36.npy')\na8 = np.load('/save/a_alpha_max_thresh_geo_c_max=0.51.npy')\n \nsaveToFile = 'FIG8.pdf'\nnrows, ncols = nstep, nstep\nx = a2[:, 0]\ny = a2[:, 1]\n\nmy_cmap = cm.get_cmap('Greens')\nmy_cmap.set_under('w')\n\nfig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, frameon=False, sharey=True, figsize=(18/2.54, 5/2.54))\n\nplt.subplots_adjust(left=0.07, bottom=0.15, right=0.89, top=0.9, wspace=0.05, hspace=None)\narea2 = array(a2[:, 2])\narea4 = array(a4[:, 2])\narea6 = array(a6[:, 2])\narea8 = array(a8[:, 2])\n\ntitles = ['a) RCP2.6', 'b) RCP4.5', 'c) RCP6.0', 'd) RCP8.5']\n \ngrid1 = area2.reshape(nrows, ncols).T[::-1]\nax1.set_ylabel('tCDR threshold', fontsize=8)\nax1.set_title(titles[0], fontsize=8)\n\nax1.spines[\"top\"].set_linewidth(0.5) \nax1.spines[\"bottom\"].set_linewidth(0.5) \nax1.spines[\"right\"].set_linewidth(0.5) \nax1.spines[\"left\"].set_linewidth(0.5) \n\nax1.set_xticklabels([\"\",0.005,\"\",0.015,\"\",0.025,\"\"], fontsize=7)\nax1.set_yticklabels([0,\"\",0.1,\"\",0.2,\"\",0.3], fontsize=7)\nax1.get_xaxis().tick_bottom() \ncax = ax1.imshow(grid1, extent=(x.min(), x.max(), y.min(), y.max()),interpolation='none', cmap=my_cmap, alpha=1,aspect =\"auto\")\ncax.set_clim(vmin=(-0.0001), vmax=0.5)\nplt.show()\n\n\ngrid2 = area4.reshape(nrows, ncols).T[::-1]\nax2.tick_params(labelsize=7) # axis='x',\nax2.set_title(titles[1], fontsize=8)\nax2.set_xticklabels([\"\",0.005,\"\",0.015,\"\",0.025,\"\"], fontsize=7)\n\nax2.spines[\"top\"].set_linewidth(0.5) \nax2.spines[\"bottom\"].set_linewidth(0.5) \nax2.spines[\"right\"].set_linewidth(0.5) \nax2.spines[\"left\"].set_linewidth(0.5) \nax2.text(0.025, -0.05, 'tCDR rate',color= \"black\",fontsize=8)\nax2.get_xaxis().tick_bottom() \n\ncax = ax2.imshow(grid2, extent=(x.min(), x.max(), y.min(), y.max()),interpolation='none', cmap=my_cmap, alpha=1, aspect =\"auto\")\ncax.set_clim(vmin=(-0.0001), vmax=0.5)\n\ngrid3 = area6.reshape(nrows, ncols).T[::-1]\nax3.tick_params(labelsize=7) \nax3.set_title(titles[2], fontsize=8)\nax3.set_xticklabels([\"\",0.005,\"\",0.015,\"\",0.025,\"\"], fontsize=7)\n\nax3.spines[\"top\"].set_linewidth(0.5) \nax3.spines[\"bottom\"].set_linewidth(0.5) \nax3.spines[\"right\"].set_linewidth(0.5) \nax3.spines[\"left\"].set_linewidth(0.5) \n\nax3.get_xaxis().tick_bottom() \ncax = ax3.imshow(grid3, extent=(x.min(), x.max(), y.min(), y.max()), interpolation='none', cmap=my_cmap, alpha=1, aspect =\"auto\")\ncax.set_clim(vmin=(-0.0001), vmax=0.5)\n\ngrid4 = area8.reshape(nrows, ncols).T[::-1]\nax4.tick_params(labelsize=7) \nax4.set_title(titles[3], fontsize=8)\nax4.set_xticklabels([\"\",0.005,\"\",0.015,\"\",0.025,\"\"], fontsize=7)\nax4.set_yticklabels([0,\"\",0.1,\"\",0.2,\"\",0.3], fontsize=7)\n\nax4.spines[\"top\"].set_linewidth(0.5) \nax4.spines[\"bottom\"].set_linewidth(0.5) \nax4.spines[\"right\"].set_linewidth(0.5) \nax4.spines[\"left\"].set_linewidth(0.5) \n\nax4.get_xaxis().tick_bottom() \nax4.get_yaxis().tick_right() \nax4.set_xlim([0, 0.03]) \ncax = ax4.imshow(grid4, extent=(x.min(), x.max(), y.min(), y.max()),vmax=0.32,interpolation='nearest', cmap=my_cmap, alpha=1, aspect =\"auto\")\n\ncbar_ax = fig.add_axes([0.95, 0.155, 0.015, 0.75]) #location and width of cbar\ncbar = fig.colorbar(cax, cax=cbar_ax, orientation='vertical',ticks=[0, 0.1, 0.2, 0.3] )\ncbar.set_clim(vmin=(-0.0001), vmax=0.5)\ncbar.ax.tick_params(labelsize=8)\n\ncbar.ax.set_title('MCSOS size', fontsize=7)\ncbar.ax.set_yticklabels(['0', '0.1', '0.2', '0.3'],fontsize=7)\n\nif saveToFile:\n plt.savefig(saveToFile)\n\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots_adjust", "numpy.load", "matplotlib.pyplot.show", "matplotlib.cm.get_cmap" ] ]
robgon-art/music-generator
[ "88a681bef5ee53fcd764e8c156ee97c892f0caf3" ]
[ "program/GANs/data_generator.py" ]
[ "import random\nfrom random import randint\nfrom numpy import array\nimport numpy\nNOTE_SPACE = 24\t\t#two octaves of notes are valid here.\nTRUE_CHORD_FALSE_MAX = 0.0\n\ndef get_three_notes_and_is_chord(all_major=False):\n\tanswer = [0] * NOTE_SPACE\n\tif(randint(0, 1) == 0 or all_major):\t\t#make half major, half garbage\n\t\n\t\t#return numpy.ones( NOTE_SPACE), array(0)\n\t\t\n\t\t#then make numbers in a major chord\n\t\tbase = randint(0, 16)\n\t\t#base = 0\n\t\t#first we make random values less than the threshhold\n\t\tanswer = numpy.random.uniform(low=0.0, high=TRUE_CHORD_FALSE_MAX, size=(NOTE_SPACE,))\n\n\t\t\n\t\tanswer[base] = numpy.random.uniform(low=(1-TRUE_CHORD_FALSE_MAX))\n\t\tanswer[base + 4] = numpy.random.uniform(low=(1-TRUE_CHORD_FALSE_MAX))\n\t\tanswer[base + 7] = numpy.random.uniform(low=(1-TRUE_CHORD_FALSE_MAX))\n\t\t\n\t\t\n\t\tis_chord = 0\n\telse:\n\t\traise ValueError(\"not supposed to be using this anymore\")\n\t\t#then just get three random numbers\n\t\t\n\t\t# if it's not a chord, just do random noise\n\t\tanswer = numpy.random.uniform( high=TRUE_CHORD_FALSE_MAX, size=(NOTE_SPACE,))\n\t\t\n\t\tnums = random.sample(range(0, 24), 3)\n\t\tfor n in nums:\n\t\t\tanswer[n] = numpy.random.uniform(low=(1-TRUE_CHORD_FALSE_MAX))\n\t\t\n\t\tis_chord = 1\n\t\t\n\treturn array(answer), array(is_chord)\n\t\n\t\ndef chord_data_set(size, all_major=False):\n\tdata = []\n\tlabels = []\n\tfor i in range(size):\n\t\tchord, output = get_three_notes_and_is_chord(all_major)\n\t\tdata.append(chord)\n\t\tlabels.append(output)\n\n\treturn array(data), array(labels)" ]
[ [ "numpy.random.uniform", "numpy.array" ] ]
ruizca/xmmpzcat
[ "03938e96ff7cbb44adca6362f1b4492822d7e857" ]
[ "bin/binning.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nFunctions for the creation of bins based on\ndensity of optical and X-ray sources.\n\"\"\"\nimport os\n\nfrom tqdm import tqdm\nfrom astropy.table import Table, vstack\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nimport numpy as np\n\n#import matplotlib\n#matplotlib.use(\"qt5agg\")\n#import matplotlib.pyplot as plt\n\n\ndef calc_optstats(bin_sources, reg, density, gal_lat, nsrckey='NSRC_PS'):\n \"\"\"\n Statistics of the optical bin.\n \"\"\"\n region_stats = np.full((8), np.nan)\n region_stats[0] = np.median(density)\n region_stats[1] = 1.4826*np.median(np.abs(density - region_stats[0]))\n region_stats[2] = np.median(np.abs(gal_lat.value))\n region_stats[3] = region_stats[2] - reg[0].value\n region_stats[4] = reg[1].value - region_stats[2]\n region_stats[5] = np.sum(bin_sources['SKY_AREA'])\n region_stats[6] = np.sum(bin_sources[nsrckey])\n region_stats[7] = len(bin_sources)\n\n return region_stats\n\n\ndef calc_xstats(bin_sources, binid):\n \"\"\"\n Statistics of the X-ray bin.\n \"\"\"\n region_stats = np.full((8), np.nan)\n region_stats[0] = binid\n region_stats[1] = np.median(bin_sources['EP_TEXP'])\n region_stats[2] = np.min(bin_sources['EP_TEXP'])\n region_stats[3] = np.max(bin_sources['EP_TEXP'])\n region_stats[4] = np.sum(bin_sources['NSRC_XMM'])\n region_stats[5] = np.sum(bin_sources['SKY_AREA'])\n region_stats[6] = region_stats[4]/region_stats[5]\n region_stats[7] = len(bin_sources)\n\n return region_stats\n\n\ndef optical(obsids_table, data_folder, nir_survey='2MASS',\n opt_survey='pstarrs'):\n \"\"\"\n Group observations in obsids_table in bins of roughly equal density\n of optical sources, according to the Galactic latitude of the observations.\n \"\"\"\n if opt_survey == 'pstarrs':\n nsrckey = 'NSRC_PS'\n elif opt_survey == 'sdss':\n nsrckey = 'NSRC_SDSS'\n else:\n raise ValueError('Unknown optical survey!')\n\n ## Define Galactic regions\n regions_limits = [0, 10, 20, 25, 40, 60, 90]\n regions = np.array([regions_limits[:-1], regions_limits[1:]]).T * u.deg\n\n eq_coords = SkyCoord(ra=obsids_table['RA']*u.deg,\n dec=obsids_table['DEC']*u.deg)\n gal_lat = eq_coords.galactic.b\n density = obsids_table[nsrckey]/obsids_table['SKY_AREA']\n\n region_stats = np.full((len(regions), 8), np.nan)\n msk_outliers = np.array(len(obsids_table)*[False])\n optbins = np.array(len(obsids_table)*['kkkkkkkk'])\n\n for i, reg in enumerate(regions):\n msk_reg = np.logical_and(np.abs(gal_lat) >= reg[0],\n np.abs(gal_lat) < reg[1])\n\n region_stats[i, :] = calc_optstats(obsids_table[msk_reg], reg,\n density[msk_reg], gal_lat[msk_reg],\n nsrckey=nsrckey)\n\n # Find density outliers outside the Galactic plane\n if reg[1] > 20*u.deg:\n # density > median + 10*smad\n bin_outliers = density > region_stats[i, 0] + 10*region_stats[i, 1]\n bin_outliers = np.logical_and(msk_reg, bin_outliers)\n msk_outliers = np.logical_or(msk_outliers, bin_outliers)\n\n optbins[msk_reg] = 'bin{:02d}_{:02d}'.format(int(reg[0].value),\n int(reg[1].value))\n obsids_table['SKY_OUTLIER'] = msk_outliers\n obsids_table['OPTBIN'] = optbins\n\n# plt.figure()\n# plt.plot(np.abs(gal_lat), density, lw=0, marker='.', ms=2)\n# plt.scatter(np.abs(gal_lat[msk_outliers]), density[msk_outliers], color='gray')\n# plt.errorbar(region_stats[:,2], region_stats[:,0],\n# xerr=(region_stats[:, 3], region_stats[:, 4]),\n# yerr=10*region_stats[:,1], fmt='ro', capsize=5, zorder=1000)\n# plt.ylim(ymin=0)\n# plt.show()\n\n region_stats[:, 3] = region_stats[:, 2] - region_stats[:, 3]\n region_stats[:, 4] = region_stats[:, 4] + region_stats[:, 2]\n\n optbins_table = Table(region_stats,\n names=['SKYDEN_MEDIAN', 'SKYDEN_SNMAD',\n 'B_MEDIAN', 'B_MIN', 'B_MAX',\n 'SKY_AREA', 'NSRC', 'NFIELDS'])\n\n bins_filename = '{}_optbins.fits'.format(nir_survey.lower())\n optbins_table.write(os.path.join(data_folder, bins_filename),\n format='fits', overwrite=True)\n\n return obsids_table\n\n\ndef xrays(obsids_table, min_fields, binid_start):\n \"\"\"\n Group observations in obsids_table in bins of roughly equal density\n of X-ray sources, according to the exposure time of the observations.\n \"\"\"\n obsids_table.sort('EP_TEXP')\n nbins = int(len(obsids_table)/int(min_fields))\n stats = np.full((nbins, 8), np.nan)\n binid = np.full((len(obsids_table),), np.nan, dtype=int)\n\n j = 0\n for i in range(nbins):\n if i == nbins - 1:\n bin_table = obsids_table[j:]\n binid[j:] = binid_start + i\n else:\n bin_table = obsids_table[j:j+min_fields]\n binid[j:j + min_fields] = binid_start + i\n\n stats[i, :] = calc_xstats(bin_table, binid_start + i)\n j += min_fields\n\n stats_table = Table(stats, names=['BIN_ID', 'MEDIAN_TEXP', 'MIN_TEXP',\n 'MAX_TEXP', 'NSRC_XMM', 'SKY_AREA',\n 'SKY_DENSITY_XMM', 'NFIELDS'])\n\n obsids_table['BIN_ID'] = binid.astype(int)\n# binid_col = Table.Column(binid, name='BIN_ID')\n# obsids_table.add_column(binid_col)\n\n# plt.loglog(obsids_table['EP_TEXP'],\n# obsids_table['NSRC_XMM']/obsids_table['SKY_AREA'],\n# marker='.', ms=5, lw=0)\n# xerrmin = stats[:,0]-stats[:,1]\n# xerrmax = stats[:,2]-stats[:,0]\n# plt.errorbar(stats[:,0], stats[:,5], xerr=(xerrmin, xerrmax),\n# fmt='ro', capsize=5, zorder=1000)\n# plt.show()\n\n return obsids_table, stats_table\n\n\ndef final(obsids_table, data_folder, nir_survey='2MASS'):\n \"\"\"\n Group observations in obsids_table combining the bins defined by\n sky density of optical and X-ray sources.\n \"\"\"\n ### Get optical bins\n optbins = np.unique(obsids_table['OPTBIN'])\n obsids_table_bins = Table()\n stats = Table()\n\n ### Define Texp bins for each optical bin\n binid_first = 1\n for obin in tqdm(optbins, desc='Binning OBSIDs'):\n msk_bin = np.logical_and(obsids_table['OPTBIN'] == obin,\n ~obsids_table['SKY_OUTLIER'])\n\n bin_table = obsids_table[msk_bin]\n bin_table, bin_stats = xrays(bin_table, 45, binid_first)\n\n obsids_table_bins = vstack([obsids_table_bins, bin_table])\n stats = vstack([stats, bin_stats])\n\n binid_first += len(bin_stats)\n\n stats_filename = '{}_bins.fits'.format(nir_survey.lower())\n stats.write(os.path.join(data_folder, stats_filename),\n format='fits', overwrite=True)\n\n msk_outliers = obsids_table['SKY_OUTLIER']\n outliers_table = obsids_table[msk_outliers]\n outliers_col = Table.Column(len(outliers_table)*[np.nan], name='BIN_ID')\n outliers_table.add_column(outliers_col)\n\n obsids_table_bins = vstack([obsids_table_bins, outliers_table])\n\n return obsids_table_bins\n\n\ndef makebins(obsids_table, data_folder, desctag, nir_survey='2MASS',\n bincol='BIN_ID', errtype='circle'):\n \"\"\"\n Create fits tables containing the sources in the\n optical/X-ray combined bins.\n \"\"\"\n bins = np.unique(obsids_table[bincol])\n bins = bins[~np.isnan(bins)]\n\n groups_folder = os.path.join(data_folder, 'groups')\n bins_folder = os.path.join(data_folder, 'bins')\n if nir_survey != '2MASS':\n bins_folder += '_{}'.format(nir_survey.lower())\n groups_folder += '_{}'.format(nir_survey.lower())\n\n if not os.path.exists(bins_folder):\n os.makedirs(bins_folder)\n\n for binid in tqdm(bins, desc='Making {} bins'.format(desctag)):\n bin_filename = 'bin{}.fits'.format(str(int(binid)).zfill(3))\n bin_filename = os.path.join(bins_folder, bin_filename)\n\n msk_bin = obsids_table[bincol] == binid\n bin_table = obsids_table[msk_bin]\n tables_array = [None]*len(bin_table)\n\n for i, obs in enumerate(bin_table):\n group_file = '{}.fits'.format(obs['OBS_ID'])\n group_file = os.path.join(groups_folder, group_file)\n tables_array[i] = Table.read(group_file)\n\n bin_srcs = vstack(tables_array)\n bin_srcs.meta['AREA'] = np.sum(bin_table['SKY_AREA'])\n bin_srcs.meta['ERRTYPE'] = errtype\n\n bin_srcs.write(bin_filename, overwrite=True)\n" ]
[ [ "numpy.abs", "numpy.min", "numpy.unique", "numpy.isnan", "numpy.median", "numpy.full", "numpy.logical_or", "numpy.max", "numpy.array", "numpy.logical_and", "numpy.sum" ] ]
gnouveau/birdsonganalysis
[ "58032538c63e9506d386e5fff5c2e8321c1d2983" ]
[ "birdsonganalysis/plot.py" ]
[ "\"\"\"Plotting function for birdsonganalysis.\"\"\"\n\nimport numpy as np\n\n\nimport seaborn as sns\n\nimport matplotlib.patches as p\nimport matplotlib.pyplot as plt\n\nfrom .songfeatures import spectral_derivs\nfrom .constants import FREQ_RANGE\n\n\ndef spectral_derivs_plot(spec_der, contrast=0.1, ax=None, freq_range=None,\n fft_step=None, fft_size=None):\n \"\"\"\n Plot the spectral derivatives of a song in a grey scale.\n\n spec_der - The spectral derivatives of the song (computed with\n `spectral_derivs`) or the song itself\n contrast - The contrast of the plot\n ax - The matplotlib axis where the plot must be drawn, if None, a new axis\n is created\n freq_range - The amount of frequency to plot, usefull only if `spec_der` is\n a song. Given to `spectral_derivs`\n ov_params - The Parameters to override, passed to `spectral_derivs`\n \"\"\"\n if spec_der.ndim == 1:\n spec_der = spectral_derivs(spec_der, freq_range, fft_step, fft_size)\n ax = sns.heatmap(spec_der.T, yticklabels=50, xticklabels=50,\n vmin=-contrast, vmax=contrast, ax=ax, cmap='Greys',\n cbar=False)\n ax.invert_yaxis()\n return ax\n\n\ndef plot_over_spec(data, ax, freq_range=FREQ_RANGE, zoom=1, **plot_params):\n \"\"\"\n Plot the feature over a spectral derivatives plot.\n\n The data are first normalized then rescale to fit the ylim of the axis.\n \"\"\"\n # Normalize the data so that they fit in the graph\n ndata = (data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data))\n # We take for abscisse axis the line corresponding to 5% of freq_range\n # We rescale the data so that they take 75% of the graph\n ax.plot(zoom * (5/100 * freq_range + 75/100 * freq_range * ndata),\n **plot_params)\n return ax\n\n\ndef similarity_plot(sim, song, refsong):\n \"\"\"Do a similarity plot with the result of `bsa.similarity`.\"\"\"\n fig, ax = plt.subplots(2, 2, figsize=(13, 13),\n gridspec_kw={'width_ratios': [1, 4],\n 'height_ratios': [1, 4]})\n ax[0, 0].axis('off')\n sds = spectral_derivs(song)\n sdr = spectral_derivs(refsong)\n ax[0, 1] = spectral_derivs_plot(sds, 0.05, ax[0, 1])\n ax[0, 1].set_title('Song')\n ax[1, 0] = spectral_derivs_plot(np.flip(sdr.T, 1), 0.05,\n ax[1, 0])\n ax[1, 0].set_title('Reference Song')\n ax[1, 1] = sns.heatmap(sim['glob_matrix'], ax=ax[1, 1], cbar=False,\n vmin=0, vmax=1)\n for section in sim['sections']:\n xy = (section['beg'][0],\n sim['glob_matrix'].shape[1] - section['end'][1])\n width = section['end'][0] - section['beg'][0]\n height = section['end'][1] - section['beg'][1]\n ax[1, 1].add_patch(p.Rectangle(xy, width, height, fill=False,\n edgecolor='y', linewidth=3))\n return fig\n" ]
[ [ "numpy.nanmax", "matplotlib.patches.Rectangle", "numpy.nanmin", "matplotlib.pyplot.subplots", "numpy.flip" ] ]
llecaroz/multihead_joint_entity_relation_extraction
[ "6cef17bb88700eda336d106b761352e65d8e4bea" ]
[ "tf_utils.py" ]
[ "import utils\nimport time\nimport eval\n\nclass model:\n \"\"\"Set of classes and methods for training the model and computing the ner and head selection loss\"\"\"\n\n\n def __init__(self,config,emb_mtx,sess):\n \"\"\"\"Initialize data\"\"\"\n self.config=config\n self.emb_mtx=emb_mtx\n self.sess=sess\n\n def getEvaluator(self):\n if self.config.evaluation_method == \"strict\" and self.config.ner_classes == \"BIO\": # the most common metric\n return eval.chunkEvaluator(self.config, ner_chunk_eval=\"boundaries_type\",\n rel_chunk_eval=\"boundaries_type\")\n elif self.config.evaluation_method == \"boundaries\" and self.config.ner_classes == \"BIO\": # s\n return eval.chunkEvaluator(self.config, ner_chunk_eval=\"boundaries\", rel_chunk_eval=\"boundaries\")\n elif self.config.evaluation_method == \"relaxed\" and self.config.ner_classes == \"EC\": # todo\n return eval.relaxedChunkEvaluator(self.config, rel_chunk_eval=\"boundaries_type\")\n else:\n raise ValueError(\n 'Valid evaluation methods : \"strict\" and \"boundaries\" in \"BIO\" mode and \"relaxed\" in \"EC\" mode .')\n\n\n def train(self,train_data,operations,iter):\n\n loss = 0\n\n evaluator = self.getEvaluator()\n start_time = time.time()\n for x_train in utils.generator(train_data, operations.m_op, self.config, train=True):\n _, val, predicted_ner, actual_ner, predicted_rel, actual_rel, _, m_train = self.sess.run(\n [operations.train_step, operations.obj, operations.predicted_op_ner, operations.actual_op_ner, operations.predicted_op_rel, operations.actual_op_rel, operations.score_op_rel,\n operations.m_op], feed_dict=x_train) # sess.run(embedding_init, feed_dict={embedding_placeholder: wordvectors})\n \n if self.config.evaluation_method == \"relaxed\":\n evaluator.add(predicted_ner, actual_ner, predicted_rel, actual_rel,m_train['BIO'])\n else:\n evaluator.add(predicted_ner, actual_ner, predicted_rel, actual_rel)\n\n loss += val\n\n print('****iter %d****' % (iter))\n print('-------Train-------')\n print('loss: %f ' % (loss))\n\n if self.config.evaluation_method == \"relaxed\":\n evaluator.computeInfoMacro()\n else:\n evaluator.printInfo()\n\n elapsed_time = time.time() - start_time\n print(\"Elapsed train time in sec:\" + str(elapsed_time))\n print()\n\n\n\n def evaluate(self,eval_data,operations,set):\n\n print('-------Evaluate on '+set+'-------')\n\n evaluator = self.getEvaluator()\n for x_dev in utils.generator(eval_data, operations.m_op, self.config, train=False):\n predicted_ner, actual_ner, predicted_rel, actual_rel, _, m_eval = self.sess.run(\n [operations.predicted_op_ner, operations.actual_op_ner, operations.predicted_op_rel, operations.actual_op_rel, operations.score_op_rel, operations.m_op], feed_dict=x_dev)\n\n if self.config.evaluation_method == \"relaxed\":\n evaluator.add(predicted_ner, actual_ner, predicted_rel, actual_rel, m_eval['BIO'])\n else:\n evaluator.add(predicted_ner, actual_ner, predicted_rel, actual_rel)\n\n if self.config.evaluation_method == \"relaxed\":\n evaluator.computeInfoMacro(printScores=True)\n if \"other\" in [x.lower() for x in self.config.dataset_set_ec_tags]: # if other class exists report score without \"Other\" class, see previous work on the CoNLL04\n return evaluator.getMacroF1scoresNoOtherClass()[2]\n else:\n return evaluator.getMacroF1scores()[2]\n\n else:\n evaluator.printInfo()\n return evaluator.getChunkedOverallAvgF1()\n\n\n\n def get_train_op(self,obj):\n import tensorflow as tf\n\n if self.config.optimizer == 'Adam':\n\n optim = tf.compat.v1.train.AdamOptimizer(self.config.learning_rate)\n\n elif self.config.optimizer == 'Adagrad':\n optim = tf.compat.v1.train.AdagradOptimizer(self.config.learning_rate)\n elif self.config.optimizer == 'AdadeltaOptimizer':\n optim = tf.compat.v1.train.AdadeltaOptimizer(self.config.learning_rate)\n elif self.config.optimizer == 'GradientDescentOptimizer':\n optim = tf.compat.v1.train.GradientDescentOptimizer(self.config.learning_rate)\n\n if self.config.gradientClipping == True:\n\n gvs = optim.compute_gradients(obj)\n\n new_gvs = self.correctGradients(gvs)\n\n capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in new_gvs]\n train_step = optim.apply_gradients(capped_gvs)\n\n\n else:\n train_step = optim.minimize(obj)\n\n return train_step\n\n def correctGradients(self,gvs):\n import tensorflow as tf\n\n new_gvs = []\n for grad, var in gvs:\n # print (grad)\n if grad == None:\n\n grad = tf.zeros_like(var)\n\n new_gvs.append((grad, var))\n if len(gvs) != len(new_gvs):\n print(\"gradient Error\")\n return new_gvs\n\n def broadcasting(self, left, right):\n import tensorflow as tf\n\n\n\n left = tf.transpose(left, perm=[1, 0, 2])\n left = tf.expand_dims(left, 3)\n\n right = tf.transpose(right, perm=[0, 2, 1])\n right = tf.expand_dims(right, 0)\n\n B = left + right\n B = tf.transpose(B, perm=[1, 0, 3, 2])\n\n return B\n\n def getNerScores(self, lstm_out, n_types=1, dropout_keep_in_prob=1):\n import tensorflow as tf\n\n\n u_a = tf.compat.v1.get_variable(\"u_typ\", [self.config.hidden_size_lstm * 2, self.config.hidden_size_n1]) # [128 32]\n v = tf.compat.v1.get_variable(\"v_typ\", [self.config.hidden_size_n1, n_types]) # [32,1] or [32,10]\n b_s = tf.compat.v1.get_variable(\"b_typ\", [self.config.hidden_size_n1])\n b_c = tf.compat.v1.get_variable(\"b_ctyp\", [n_types])\n\n mul = tf.einsum('aij,jk->aik', lstm_out, u_a) # [16 348 64] * #[64 32] = [16 348 32]\n\n sum = mul + b_s\n if self.config.activation==\"tanh\":\n output = tf.compat.v1.nn.tanh(sum)\n elif self.config.activation==\"relu\":\n output = tf.compat.v1.nn.relu(sum)\n\n if self.config.use_dropout==True:\n output = tf.compat.v1.nn.dropout(output, keep_prob=dropout_keep_in_prob)\n\n g = tf.einsum('aik,kp->aip', output, v) + b_c\n\n\n return g\n\n def getHeadSelectionScores(self, lstm_out,dropout_keep_in_prob=1):\n import tensorflow as tf\n\n u_a = tf.compat.v1.get_variable(\"u_a\", [(self.config.hidden_size_lstm * 2) + self.config.label_embeddings_size, self.config.hidden_size_n1]) # [128 32]\n w_a = tf.compat.v1.get_variable(\"w_a\", [(self.config.hidden_size_lstm * 2) + self.config.label_embeddings_size, self.config.hidden_size_n1]) # [128 32]\n v = tf.compat.v1.get_variable(\"v\", [self.config.hidden_size_n1, len(self.config.dataset_set_relations)]) # [32,1] or [32,4]\n b_s = tf.compat.v1.get_variable(\"b_s\", [self.config.hidden_size_n1])\n\n\n\n left = tf.einsum('aij,jk->aik', lstm_out, u_a) # [16 348 64] * #[64 32] = [16 348 32]\n right = tf.einsum('aij,jk->aik', lstm_out, w_a) # [16 348 64] * #[64 32] = [16 348 32]\n\n\n\n outer_sum = self.broadcasting(left, right) # [16 348 348 32]\n\n outer_sum_bias = outer_sum + b_s\n\n\n if self.config.activation==\"tanh\":\n output = tf.tanh(outer_sum_bias)\n elif self.config.activation==\"relu\":\n output = tf.compat.v1.nn.relu(outer_sum_bias)\n\n\n if self.config.use_dropout==True:\n output = tf.compat.v1.nn.dropout(output, keep_prob=dropout_keep_in_prob)\n\n\n output = tf.compat.v1.nn.dropout(output, keep_prob=dropout_keep_in_prob)\n\n\n\n g = tf.einsum('aijk,kp->aijp', output, v)\n\n\n\n g = tf.reshape(g, [tf.shape(g)[0], tf.shape(g)[1], tf.shape(g)[2] * len(self.config.dataset_set_relations)])\n\n\n\n return g\n\n\n\n def computeLoss(self,input_rnn, dropout_embedding_keep,dropout_lstm_keep,dropout_lstm_output_keep,\n seqlen,dropout_fcl_ner_keep,ners_ids, dropout_fcl_rel_keep,is_train,scoring_matrix_gold, reuse = False):\n\n import tensorflow as tf\n import tensorflow_addons\n\n with tf.compat.v1.variable_scope(\"loss_computation\", reuse=reuse):\n\n if self.config.use_dropout:\n input_rnn = tf.compat.v1.nn.dropout(input_rnn, keep_prob=dropout_embedding_keep)\n #input_rnn = tf.Print(input_rnn, [dropout_embedding_keep], 'embedding: ', summarize=1000)\n for i in range(self.config.num_lstm_layers):\n if self.config.use_dropout and i>0:\n input_rnn = tf.compat.v1.nn.dropout(input_rnn, keep_prob=dropout_lstm_keep)\n #input_rnn = tf.Print(input_rnn, [dropout_lstm_keep], 'lstm: ', summarize=1000)\n\n lstm_fw_cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size_lstm)\n # Backward direction cell\n lstm_bw_cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size_lstm)\n\n lstm_out, _ = tf.compat.v1.nn.bidirectional_dynamic_rnn(\n cell_fw=lstm_fw_cell,\n cell_bw=lstm_bw_cell,\n inputs=input_rnn,\n sequence_length=seqlen,\n dtype=tf.float32, scope='BiLSTM' + str(i))\n\n input_rnn = tf.concat(lstm_out, 2)\n\n lstm_output = input_rnn\n\n if self.config.use_dropout:\n lstm_output = tf.compat.v1.nn.dropout(lstm_output, keep_prob=dropout_lstm_output_keep)\n\n\n mask = tf.sequence_mask(seqlen, dtype=tf.float32)\n\n ner_input = lstm_output\n # loss= tf.Print(loss, [tf.shape(loss)], 'shape of loss is:') # same as scoring matrix ie, [1 59 590]\n if self.config.ner_classes == \"EC\":\n\n nerScores = self.getNerScores(ner_input, len(self.config.dataset_set_ec_tags),\n dropout_keep_in_prob=dropout_fcl_ner_keep)\n label_matrix = tf.compat.v1.get_variable(name=\"label_embeddings\", dtype=tf.float32,\n shape=[len(self.config.dataset_set_ec_tags),\n self.config.label_embeddings_size])\n elif self.config.ner_classes == \"BIO\":\n\n nerScores = self.getNerScores(ner_input, len(self.config.dataset_set_bio_tags),\n dropout_keep_in_prob=dropout_fcl_ner_keep)\n label_matrix = tf.compat.v1.get_variable(name=\"label_embeddings\", dtype=tf.float32,\n shape=[len(self.config.dataset_set_bio_tags),\n self.config.label_embeddings_size])\n\n # nerScores = tf.Print(nerScores, [tf.shape(ners_ids), ners_ids, tf.shape(nerScores)], 'ners_ids: ', summarize=1000)\n\n log_likelihood, transition_params = tensorflow_addons.text.crf.crf_log_likelihood(\n nerScores, ners_ids, seqlen)\n if self.config.ner_loss == \"crf\":\n\n lossNER = -log_likelihood\n predNers, viterbi_score = tensorflow_addons.text.crf.crf_decode(\n nerScores, transition_params, seqlen)\n\n elif self.config.ner_loss == \"softmax\":\n lossNER = tf.compat.v1.nn.sparse_softmax_cross_entropy_with_logits(logits=nerScores, labels=ners_ids)\n\n predNers = tf.cast(tf.arg_max(nerScores, 2), tf.int32)\n\n\n if self.config.label_embeddings_size > 0:\n\n labels = tf.cond(is_train > 0, lambda: ners_ids, lambda: predNers)\n\n\n label_embeddings = tf.compat.v1.nn.embedding_lookup(label_matrix, labels)\n rel_input = tf.concat([lstm_output, label_embeddings], axis=2)\n\n else:\n\n rel_input = lstm_output\n\n\n rel_scores = self.getHeadSelectionScores(rel_input,\n dropout_keep_in_prob=dropout_fcl_rel_keep)\n\n\n lossREL = tf.compat.v1.nn.sigmoid_cross_entropy_with_logits(logits=rel_scores, labels=scoring_matrix_gold)\n probas=tf.compat.v1.nn.sigmoid(rel_scores)\n predictedRel = tf.round(probas)\n\n return lossNER,lossREL,predNers,predictedRel,rel_scores\n\n\n\n\n def run(self):\n\n import tensorflow as tf\n\n # shape = (batch size, max length of sentence, max length of word)\n char_ids = tf.compat.v1.placeholder(tf.int32, shape=[None, None, None])\n is_train = tf.compat.v1.placeholder(tf.int32)\n\n # shape = (batch_size, max_length of sentence)\n word_lengths = tf.compat.v1.placeholder(tf.int32, shape=[None, None])\n\n embedding_ids = tf.compat.v1.placeholder(tf.int32, [None, None]) # [ batch_size * max_sequence ]\n\n token_ids = tf.compat.v1.placeholder(tf.int32, [None, None]) # [ batch_size * max_sequence ]\n\n entity_tags_ids = tf.compat.v1.placeholder(tf.int32, [None, None])\n\n scoring_matrix_gold = tf.compat.v1.placeholder(tf.float32, [None, None, None]) # [ batch_size * max_sequence]\n\n\n tokens = tf.compat.v1.placeholder(tf.string, [None, None]) # [ batch_size * max_sequence]\n BIO = tf.compat.v1.placeholder(tf.string, [None, None]) # [ batch_size * max_sequence]\n entity_tags = tf.compat.v1.placeholder(tf.string, [None, None]) # [ batch_size * max_sequence]\n\n # classes = ...\n seqlen = tf.compat.v1.placeholder(tf.int32, [None]) # [ batch_size ]\n\n doc_ids = tf.compat.v1.placeholder(tf.string, [None]) # [ batch_size ]\n\n\n dropout_embedding_keep = tf.compat.v1.placeholder(tf.float32, name=\"dropout_embedding_keep\")\n dropout_lstm_keep = tf.compat.v1.placeholder(tf.float32, name=\"dropout_lstm_keep\")\n dropout_lstm_output_keep = tf.compat.v1.placeholder(tf.float32, name=\"dropout_lstm_output_keep\")\n dropout_fcl_ner_keep = tf.compat.v1.placeholder(tf.float32, name=\"dropout_fcl_ner_keep\")\n dropout_fcl_rel_keep = tf.compat.v1.placeholder(tf.float32, name=\"dropout_fcl_rel_keep\")\n\n embedding_matrix = tf.compat.v1.get_variable(name=\"embeddings\", shape=self.emb_mtx.shape,\n initializer=tf.constant_initializer(self.emb_mtx), trainable=False)\n\n\n #####char embeddings\n\n # 1. get character embeddings\n\n K = tf.compat.v1.get_variable(name=\"char_embeddings\", dtype=tf.float32,\n shape=[len(self.config.dataset_set_characters), self.config.char_embeddings_size])\n # shape = (batch, sentence, word, dim of char embeddings)\n char_embeddings = tf.compat.v1.nn.embedding_lookup(K, char_ids)\n\n # 2. put the time dimension on axis=1 for dynamic_rnn\n s = tf.shape(char_embeddings) # store old shape\n\n\n char_embeddings_reshaped = tf.reshape(char_embeddings, shape=[-1, s[-2], self.config.char_embeddings_size])\n word_lengths_reshaped = tf.reshape(word_lengths, shape=[-1])\n\n\n\n char_hidden_size = self.config.hidden_size_char\n\n # 3. bi lstm on chars\n cell_fw = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(char_hidden_size, state_is_tuple=True)\n cell_bw = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(char_hidden_size, state_is_tuple=True)\n\n _, ((_, output_fw), (_, output_bw)) = tf.compat.v1.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw, cell_bw=cell_bw,\n inputs=char_embeddings_reshaped,\n sequence_length=word_lengths_reshaped,\n dtype=tf.float32)\n # shape = (batch x sentence, 2 x char_hidden_size)\n output = tf.concat([output_fw, output_bw], axis=-1)\n\n # shape = (batch, sentence, 2 x char_hidden_size)\n char_rep = tf.reshape(output, shape=[-1, s[1], 2 * char_hidden_size])\n\n # concat char embeddings\n\n word_embeddings = tf.compat.v1.nn.embedding_lookup(embedding_matrix, embedding_ids)\n\n if self.config.use_chars == True:\n input_rnn = tf.concat([word_embeddings, char_rep], axis=-1)\n\n else:\n input_rnn = word_embeddings\n\n embeddings_input=input_rnn\n\n\n lossNER, lossREL, predicted_entity_tags_ids, predictedRel, rel_scores = self.computeLoss(input_rnn,\n dropout_embedding_keep,\n dropout_lstm_keep,\n dropout_lstm_output_keep, seqlen,\n dropout_fcl_ner_keep,\n entity_tags_ids, dropout_fcl_rel_keep,\n is_train,\n scoring_matrix_gold,reuse=False)\n\n obj = tf.reduce_sum(lossNER) + tf.reduce_sum(lossREL)\n #perturb the inputs\n raw_perturb = tf.gradients(obj, embeddings_input)[0] # [batch, L, dim]\n normalized_per=tf.compat.v1.nn.l2_normalize(raw_perturb, axis=[1, 2])\n perturb =self.config.alpha*tf.sqrt(tf.cast(tf.shape(input_rnn)[2], tf.float32)) * tf.stop_gradient(normalized_per)\n perturb_inputs = embeddings_input + perturb\n\n lossNER_per, lossREL_per, _, _, _ = self.computeLoss(perturb_inputs,\n dropout_embedding_keep,\n dropout_lstm_keep,\n dropout_lstm_output_keep, seqlen,\n dropout_fcl_ner_keep,\n entity_tags_ids, dropout_fcl_rel_keep,\n is_train,\n scoring_matrix_gold, reuse=True)\n\n actualRel = tf.round(scoring_matrix_gold)\n\n\n if self.config.use_adversarial==True:\n\n obj+=tf.reduce_sum(lossNER_per)+tf.reduce_sum(lossREL_per)\n\n\n\n m = {}\n m['isTrain'] = is_train\n m['embeddingIds'] = embedding_ids\n m['charIds'] = char_ids\n m['tokensLens'] = word_lengths\n m['entity_tags_ids'] = entity_tags_ids\n m['scoringMatrixGold'] = scoring_matrix_gold\n m['seqlen'] = seqlen\n m['doc_ids'] = doc_ids\n m['tokenIds'] = token_ids\n m['dropout_embedding']=dropout_embedding_keep\n m['dropout_lstm']=dropout_lstm_keep\n m['dropout_lstm_output']=dropout_lstm_output_keep\n m['dropout_fcl_ner']=dropout_fcl_ner_keep\n m['dropout_fcl_rel'] = dropout_fcl_rel_keep\n m['tokens'] = tokens\n m['BIO'] = BIO\n m['entity_tags'] = entity_tags\n\n return obj, m, predicted_entity_tags_ids, entity_tags_ids, predictedRel, actualRel, rel_scores\n\n\nclass operations():\n def __init__(self,train_step,obj, m_op, predicted_op_ner, actual_op_ner, predicted_op_rel, actual_op_rel, score_op_rel):\n\n self.train_step=train_step\n self.obj=obj\n self.m_op = m_op\n self.predicted_op_ner = predicted_op_ner\n self.actual_op_ner = actual_op_ner\n self.predicted_op_rel = predicted_op_rel\n self.actual_op_rel = actual_op_rel\n self.score_op_rel = score_op_rel" ]
[ [ "tensorflow.cond", "tensorflow.compat.v1.nn.dropout", "tensorflow.concat", "tensorflow.reduce_sum", "tensorflow.tanh", "tensorflow.compat.v1.nn.bidirectional_dynamic_rnn", "tensorflow.compat.v1.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.compat.v1.nn.sigmoid", "tensorflow.gradients", "tensorflow.stop_gradient", "tensorflow.compat.v1.nn.rnn_cell.BasicLSTMCell", "tensorflow.compat.v1.train.AdadeltaOptimizer", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.train.GradientDescentOptimizer", "tensorflow.shape", "tensorflow.compat.v1.get_variable", "tensorflow.zeros_like", "tensorflow.compat.v1.nn.relu", "tensorflow.compat.v1.nn.l2_normalize", "tensorflow.round", "tensorflow.sequence_mask", "tensorflow.clip_by_value", "tensorflow.compat.v1.train.AdagradOptimizer", "tensorflow.transpose", "tensorflow.arg_max", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.compat.v1.nn.tanh", "tensorflow.einsum", "tensorflow.constant_initializer", "tensorflow.compat.v1.nn.embedding_lookup", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits" ] ]
gitter-badger/galaxy2galaxy
[ "1374a32a6be252c1eb426ce21bf1e26ffb253bb9" ]
[ "galaxy2galaxy/models/gan_utils.py" ]
[ "\"\"\" Spectral Norm GAN \"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow_gan as tfgan\nimport tensorflow_hub as hub\n\nfrom tensorflow_gan.python.estimator.gan_estimator import Optimizers, get_gan_model, get_train_estimator_spec, get_eval_estimator_spec, get_predict_estimator_spec\nfrom tensorflow_gan.python import train as tfgan_train\nfrom tensorflow_gan.python import namedtuples\nfrom tensorflow_gan.python.estimator.gan_estimator import SummaryType\nfrom tensorflow.python.estimator import model_fn as model_fn_lib\nfrom tensor2tensor.utils import hparams_lib\nfrom tensor2tensor.utils import t2t_model\nfrom tensor2tensor.layers import common_layers\n\nfrom galaxy2galaxy.layers import spectral_ops as ops\n\n\nclass AbstractGAN(t2t_model.T2TModel):\n \"\"\" Base class for tf-gan based models\n \"\"\"\n\n def generator(self, code, mode):\n raise NotImplementedError\n\n def discriminator(self, x, conditioning, mode):\n raise NotImplementedError\n\n def discriminator_loss_fn(self):\n raise NotImplementedError\n\n def generator_loss_fn(self):\n raise NotImplementedError\n\n @property\n def summaries(self):\n return [SummaryType.IMAGES]\n\n def sample_noise(self):\n p = self.hparams\n shape = [p.batch_size, p.bottleneck_bits]\n z = tf.random.normal(shape, name='z0', dtype=tf.float32)\n return z\n\n @classmethod\n def estimator_model_fn(cls,\n hparams,\n features,\n labels,\n mode,\n config=None,\n params=None,\n decode_hparams=None,\n use_tpu=False):\n\n if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL,\n model_fn_lib.ModeKeys.PREDICT]:\n raise ValueError('Mode not recognized: %s' % mode)\n\n if mode is model_fn_lib.ModeKeys.TRAIN:\n is_training = True\n else:\n is_training = False\n\n hparams = hparams_lib.copy_hparams(hparams)\n\n # Instantiate model\n data_parallelism = None\n if not use_tpu and config:\n data_parallelism = config.data_parallelism\n reuse = tf.get_variable_scope().reuse\n\n # Instantiate model\n self = cls(\n hparams,\n mode,\n data_parallelism=data_parallelism,\n decode_hparams=decode_hparams,\n _reuse=reuse)\n\n generator_inputs = self.sample_noise()\n # rename inputs for clarity\n real_data = features['inputs']\n img_shape = common_layers.shape_list(real_data)[1:4]\n real_data.set_shape([hparams.batch_size]+img_shape)\n\n # To satify the TFGAN API setting real data to none on predict\n if mode == tf.estimator.ModeKeys.PREDICT:\n real_data =None\n\n optimizers = Optimizers(tf.compat.v1.train.AdamOptimizer(\n hparams.generator_lr, hparams.beta1),\n tf.compat.v1.train.AdamOptimizer(\n hparams.discriminator_lr, hparams.beta1)\n )\n\n # Creates tfhub modules for both generator and discriminator\n def make_discriminator_spec():\n input_layer = tf.placeholder(tf.float32, shape=[None] + img_shape)\n disc_output = self.discriminator(input_layer, None, mode)\n hub.add_signature(inputs=input_layer, outputs=disc_output)\n disc_spec = hub.create_module_spec(make_discriminator_spec)\n\n def make_generator_spec():\n input_layer = tf.placeholder(tf.float32, shape=[None] + common_layers.shape_list(generator_inputs)[1:])\n gen_output = self.generator(input_layer, mode)\n hub.add_signature(inputs=input_layer, outputs=gen_output)\n gen_spec = hub.create_module_spec(make_generator_spec)\n\n # Create the modules\n discriminator_module = hub.Module(disc_spec, name=\"Discriminator_Module\", trainable=True)\n generator_module = hub.Module(gen_spec, name=\"Generator_Module\", trainable=True)\n\n # Wraps the modules into functions expected by TF-GAN\n generator = lambda code, mode: generator_module(code)\n discriminator = lambda image, conditioning, mode: discriminator_module(image)\n\n # Make GANModel, which encapsulates the GAN model architectures.\n gan_model = get_gan_model(mode,\n generator,\n discriminator,\n real_data,\n generator_inputs,\n add_summaries=self.summaries)\n\n # Make GANLoss, which encapsulates the losses.\n if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:\n gan_loss = tfgan_train.gan_loss(\n gan_model,\n self.generator_loss,\n self.discriminator_loss,\n add_summaries=True)\n\n # Make the EstimatorSpec, which incorporates the GANModel, losses, eval\n # metrics, and optimizers (if required).\n if mode == tf.estimator.ModeKeys.TRAIN:\n get_hooks_fn = tfgan_train.get_sequential_train_hooks(namedtuples.GANTrainSteps(hparams.gen_steps, hparams.disc_steps))\n estimator_spec = get_train_estimator_spec(gan_model, gan_loss, optimizers, get_hooks_fn, is_chief=True)\n elif mode == tf.estimator.ModeKeys.EVAL:\n estimator_spec = get_eval_estimator_spec(gan_model, gan_loss)\n else: # tf.estimator.ModeKeys.PREDICT\n # Register hub modules for export\n hub.register_module_for_export(generator_module, \"generator\")\n hub.register_module_for_export(discriminator_module, \"discriminator\")\n estimator_spec = get_predict_estimator_spec(gan_model)\n return estimator_spec\n" ]
[ [ "tensorflow.get_variable_scope", "tensorflow.random.normal", "tensorflow.placeholder", "tensorflow.compat.v1.train.AdamOptimizer" ] ]
simeoncarstens/ensemble_hic
[ "abaec8972866b593e689e39419d1c2d7ab6788dc", "abaec8972866b593e689e39419d1c2d7ab6788dc" ]
[ "scripts/plots/nora2012/distance_distributions_SI.py", "ensemble_hic/sphere_prior.py" ]
[ "import os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\n\nprobes = (\n ('pEN1', 100423573, 100433412, 'Linx'),\n ('pEN2', 100622909, 100632521, 'Xite'),\n ('pLG1', 100456274, 100465704, 'Linx'),\t\n ('pLG10', 100641750, 100646253, 'Dxpas34'),\n ('pLG11', 100583328, 100588266, 'Chic1'),\n ('X3', 100512892, 100528952, 'Cdx4'),\n ('X4', 100557118, 100569724, 'Chic1')\n )\n\ndpath = os.path.expanduser('~/projects/ensemble_hic/data/nora2012/giorgetti2014/DNA_FISH_resume.xlsx')\nfrom xlrd import open_workbook\nwb = open_workbook(dpath)\nsheet = wb.sheets()[0]\ntable = np.array([np.array(sheet.row_values(j))[1:13]\n for j in [2,3]+range(7, sheet.nrows)])\ndata = {'{}:{}'.format(x[0], x[1]): np.array([float(y) for y in x[2:] if len(y) > 0])\n for x in table.T}\n\nregion_start = 100378306\n\nX_highres = np.load(\"plot_data/samples_full.pickle\", allow_pickle=True)\nX_highres = np.array([x.variables['structures'] for x in X_highres])\nX_highres = X_highres.reshape(-1,308,3) * 53\n\nX_lowres = np.load(\"plot_data/samples_lowres.pickle\", allow_pickle=True)\nX_lowres = np.array([x.variables['structures'] for x in X_lowres])\nX_lowres = X_lowres.reshape(-1, 62, 3) * (5 * 53 ** 3) ** 0.33333\n\nX_null = np.load(\"plot_data/samples_prior.pickle\", allow_pickle=True)\nX_null = np.array([x.variables['structures'].reshape(-1, 308, 3)\n for x in X_null])\nX_null = X_null.reshape(-1, 308, 3) * 53\n\nXs_alber = []\n\nfor i in (100, 1000, 10000):\n X_temp = np.load('plot_data/alber_ensemble_n{}.npy'.format(i))\n Xs_alber.append(X_temp)\n\nget_bead = lambda p, bead_size: int((np.mean(p[1:3]) - region_start) / bead_size)\n\ncombs = ((1,2), (1,6), (1,5), (5,6), (2,1), (0,3), (1,4)) \nmapping = (data['pEN2:pLG1'], data['pEN2:X4'], data['pEN2:X3'], data['X4:X3'],\n data['pLG1:pEN2'], data['Dxpas34:pEN1'], data['pEN2:pLG11'])\n\ndef plot_distance_hists(ax, X, i, l1, l2, bead_size, ls):\n ax.hist(np.linalg.norm(X[:,get_bead(probes[l1], bead_size)] -\n X[:,get_bead(probes[l2], bead_size)],\n axis=1),\n bins=int(np.sqrt(len(X)) / 3.0), histtype='step',# label='model',\n normed=True, color='black', lw=2, ls=ls)\n\ndef plot_FISH_hists(ax, i, l1, l2):\n ax.hist(mapping[i-1],\n bins=int(np.sqrt(len(mapping[i-1]))), histtype='step',\n #label='FISH',\n normed=True, color='gray', lw=2)\n\ndef plot_alber_distance_hists(ax, i, l1, l2):\n\n from ensemble_hic.analysis_functions import calculate_KL_KDE_log\n from scipy.linalg import norm\n\n bead_size = 3000\n h = lambda p, q: norm(np.sqrt(p) - np.sqrt(q)) / np.sqrt(2)\n for j in range(len(Xs_alber)):\n alber_ds = np.linalg.norm(Xs_alber[j][:,get_bead(probes[l1], bead_size)] -\n Xs_alber[j][:,get_bead(probes[l2], bead_size)],\n axis=1)\n ax.hist(alber_ds,\n bins=int(np.sqrt(len(alber_ds)) / 3.0), histtype='step',\n normed=True,\n #color=('blue', 'red', 'green')[j],\n lw=2)\n\ndef plot_all_hists(axes, X, bead_size, ls):\n\n for i, (l1, l2) in enumerate(combs):\n plot_distance_hists(axes[i], X, i, l1, l2, bead_size, ls)\n\ndef plot_all_FISH_hists(axes):\n\n for i, (l1, l2) in enumerate(combs):\n plot_FISH_hists(axes[i], i, l1, l2)\n\ndef plot_all_hists_alber(axes):\n\n for i, (l1, l2) in enumerate(combs):\n plot_alber_distance_hists(axes[i], i, l1, l2)\n\nfig, axes = plt.subplots(6, 3)\nfor i in range(3):\n pairs = [(axes[2*i,j], axes[2*i+1,j]) for j in range(3)]\n for ax1, ax2 in pairs:\n ax1.get_shared_x_axes().join(ax1, ax2)\n ax1.set_xticklabels([])\n\nplot_all_hists_alber(axes[1::2].ravel())\nplot_all_hists(axes[::2].ravel(), X_highres, 3000, ls='-')\nplot_all_hists(axes[::2].ravel(), X_lowres, 15000, ls='--')\nplot_all_hists(axes[::2].ravel(), X_null, 3000, ls=':')\nplot_all_FISH_hists(axes[1::2].ravel())\nplot_all_FISH_hists(axes[::2].ravel())\nfor i, (l1, l2) in enumerate(combs):\n ax = axes[::2].ravel()[i]\n ax.text(0.5, 0.8, '{} - {}'.format(probes[l1][0], probes[l2][0]),\n transform=ax.transAxes)\n\nfor ax in axes.ravel():\n ax.set_yticks(())\n ax.set_xticks((0, 400, 800))\n ax.set_xlim((0, 1200))\n for x in ('left', 'top', 'right'):\n ax.spines[x].set_visible(False)\n\nfor ax in axes[-2][1:]:\n ax.set_visible(False)\nfor ax in axes[-1][1:]:\n ax.set_visible(False)\n\nl1 = axes[0,0].legend(labels=('ISD (high-res, $n=30$)',\n 'ISD (low-res, $n=30$)',\n 'ISD (high-res, prior only)',\n 'FISH'))\nl2 = axes[1,0].legend(labels=(r'PGS ($n=2\\times100$)',\n r'PGS ($n=2\\times1000$)',\n r'PGS ($n=2\\times10000$)'))\n# handles1, labels1 = axes[0,0].get_legend_handles_labels()\n# handles2, labels2 = axes[0,1].get_legend_handles_labels()\nhandles1 = l1.legendHandles\nhandles2 = l2.legendHandles\nlabels1 = l1.texts\nlabels2 = l2.texts\nl1.set_visible(False)\nl2.set_visible(False)\nnew_handles = [Line2D([], [], linewidth=3, ls='--' if i == 1 else '-',\n c=h.get_edgecolor())\n for i, h in enumerate(handles1 + handles2)]\nnew_handles[2].set_linestyle(':')\nl3 = axes[-2,1].legend(frameon=False, handles=new_handles,\n labels=[x.get_text() for x in labels1 + labels2])\naxes[-2,1].set_visible(True)\naxes[-2,1].spines['bottom'].set_visible(False)\naxes[-2,1].set_xticks(())\n", "\"\"\"\nStructural priors restraining structures to be located within a sphere\nto simulate, e.g., a nuclear wall\n\"\"\"\nimport numpy\n\nfrom csb.statistics.pdf.parameterized import Parameter\n\nfrom binf import ArrayParameter\nfrom binf.pdf.priors import AbstractPrior\n\nfrom .sphere_prior_c import sphere_prior_gradient\n\nclass SpherePrior(AbstractPrior):\n\n def __init__(self, name, sphere_radius, sphere_k, n_structures,\n bead_radii, sphere_center=None):\n \"\"\"\n Structural Boltzmann-like prior distribution harmonically restraining\n all beads to be located within a sphere of a given radius\n\n :param name: a unique name for this object, usually 'sphere_prior'\n :type name: string\n\n :param sphere_radius: the radius of the sphere within which to\n restrain the beads\n :type sphere_radius: float\n\n :param sphere_k: force constant\n :type sphere_k: float\n \n :param n_structures: number of ensemble members\n :type n_structures: int\n\n :param bead_radii: bead radii for each bead\n :type bead_radii: :class:`numpy.ndarray`\n\n :param sphere_center: coordinates of the sphere center,\n if none, (0, 0, 0) is assumed\n :type sphere_center: :class:`numpy.ndarray`\n\n :returns: set-up spherical prior distribution object\n :rtype: :class:`.SpherePrior` \n \"\"\"\n super(SpherePrior, self).__init__(name)\n\n self.n_structures = n_structures\n self.bead_radii = bead_radii\n self.bead_radii2 = bead_radii ** 2\n\n self._register_variable('structures', differentiable=True)\n self._register('sphere_radius')\n self['sphere_radius'] = Parameter(sphere_radius, 'sphere_radius')\n self._register('sphere_k')\n self['sphere_k'] = Parameter(sphere_k, 'sphere_k')\n self._register('sphere_center')\n sphere_center = numpy.zeros(3) if sphere_center is None else sphere_center\n self['sphere_center'] = ArrayParameter(sphere_center, 'sphere_center')\n self.update_var_param_types(structures=ArrayParameter)\n self._set_original_variables()\n\n def _single_structure_log_prob(self, structure):\n\n r = self['sphere_radius'].value\n k = self['sphere_k'].value\n br = self.bead_radii\n X = structure.reshape(-1, 3)\n norms = numpy.sqrt(numpy.sum((X - self['sphere_center'].value[None,:])\n **2, 1))\n violating = norms + br > r\n \n return -0.5 * k * numpy.sum((norms[violating] + br[violating] - r) ** 2)\n\n def _single_structure_gradient(self, structure):\n\n X = structure.reshape(-1, 3)\n return sphere_prior_gradient(X,\n self['sphere_center'].value,\n self['sphere_radius'].value,\n self['sphere_k'].value,\n numpy.arange(len(X)),\n self.bead_radii,\n self.bead_radii2)\n\n def _evaluate_log_prob(self, structures):\n\n log_prob = self._single_structure_log_prob\n X = structures.reshape(self.n_structures, -1, 3)\n\t\t\n return numpy.sum(map(lambda x: log_prob(structure=x), X))\n\n def _evaluate_gradient(self, structures):\n\n grad = self._single_structure_gradient\n X = structures.reshape(self.n_structures, -1, 3)\n\n return numpy.concatenate(map(lambda x: grad(structure=x), X))\n \n def clone(self):\n\n copy = self.__class__(name=self.name,\n sphere_radius=self['sphere_radius'].value,\n sphere_k=self['sphere_k'].value,\n n_structures=self.n_structures,\n bead_radii=self.bead_radii,\n sphere_center=self['sphere_center'].value)\n\n copy.set_fixed_variables_from_pdf(self)\n\n return copy\n" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.subplots", "numpy.mean", "numpy.load", "numpy.array" ], [ "numpy.zeros", "numpy.sum" ] ]
outlk/read-cryosat-2
[ "3ca032969f4cf5e9edde1e651d2c900bd84fba09" ]
[ "cryosat_toolkit/read_cryosat_L2I.py" ]
[ "#!/usr/bin/env python\nu\"\"\"\nread_cryosat_L2I.py\nWritten by Tyler Sutterley (05/2021)\n\nReads CryoSat Level-2 Intermediate data products from baselines A, B, BC and C\nReads CryoSat Level-2 netCDF4 data products from baseline D\nSupported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR\n\nINPUTS:\n full_filename: full path of CryoSat .DBL or .nc file\n\nOUTPUTS:\n Location: Time and Orbit Parameters\n Geometry: Elevation Corrections and Flags\n Data: Geolocation and Elevation Measurements with Quality Parameters\n Auxiliary: Auxiliary Data for Elevation Processing\n Instrumental: Intrument Corrections\n METADATA: MPH, SPH and DSD Header data\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n scipy: Scientific Tools for Python\n https://docs.scipy.org/doc/\n netCDF4: Python interface to the netCDF C library\n https://unidata.github.io/netcdf4-python/netCDF4/index.html\n\nUPDATE HISTORY:\n Updated 05/2021: use raw binary string prefixes (rb) for regular expressions\n Updated 08/2020: flake8 updates for python3\n Updated 02/2020: tilde-expansion of cryosat-2 files before opening\n convert from hard to soft tabulation\n Updated 11/2019: empty placeholder dictionary for baseline D DSD headers\n Updated 09/2019: added netCDF4 read function for baseline D\n will output with same variable names as the binary read functions\n Updated 08/2019: generalize regular expression patterns in read_DSD function\n Updated 10/2018: updated header read functions for python3\n Updated 05/2016: using __future__ print and division functions\n Written 03/2016\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport re\nimport netCDF4\nimport numpy as np\nimport scipy.interpolate\n\n#-- PURPOSE: Initiate L2I MDS variables for CryoSat Baselines A and B\ndef cryosat_baseline_AB(fid,record_size,n_records):\n #-- CryoSat-2 Location Group\n #-- Time and Orbit Parameters plus Measurement Mode\n Location = {}\n #-- Time: day part\n Location['Day'] = np.zeros((n_records),dtype=np.int32)\n #-- Time: second part\n Location['Sec'] = np.zeros((n_records),dtype=np.uint32)\n #-- Time: microsecond part\n Location['Micsec'] = np.zeros((n_records),dtype=np.uint32)\n #-- USO correction factor\n Location['USO_Corr'] = np.zeros((n_records),dtype=np.int32)\n #-- Mode ID\n Location['Mode_ID'] = np.zeros((n_records),dtype=np.uint16)\n #-- Source sequence counter\n Location['SSC'] = np.zeros((n_records),dtype=np.uint16)\n #-- Instrument configuration\n Location['Inst_config'] = np.zeros((n_records),dtype=np.uint32)\n #-- Record Counter\n Location['Rec_Count'] = np.zeros((n_records),dtype=np.uint32)\n #-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Lat'] = np.zeros((n_records),dtype=np.int32)\n #-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Lon'] = np.zeros((n_records),dtype=np.int32)\n #-- Alt: packed units (mm, 1e-3 m)\n #-- Altitude of COG above reference ellipsoid (interpolated value)\n Location['Alt'] = np.zeros((n_records),dtype=np.int32)\n #-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)\n Location['Alt_rate'] = np.zeros((n_records),dtype=np.int32)\n #-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)\n Location['Sat_velocity'] = np.zeros((n_records,3),dtype=np.int32)\n #-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n Location['Real_beam'] = np.zeros((n_records,3),dtype=np.int32)\n #-- Interferometer baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n Location['Baseline'] = np.zeros((n_records,3),dtype=np.int32)\n #-- Measurement Confidence Data\n Location['MCD'] = np.zeros((n_records),dtype=np.uint32)\n\n #-- CryoSat-2 Measurement Group\n #-- Derived from instrument measurement parameters\n Data = {}\n #-- Measured elevation above ellipsoid from retracker: packed units (mm, 1e-3 m)\n Data['Elev'] = np.zeros((n_records),dtype=np.int32)\n #-- Sigma Zero Backscatter for retracker: packed units (1e-2 dB)\n Data['Sig0'] = np.zeros((n_records),dtype=np.int32)\n #-- SWH packed units (mm, 1e-3)\n Data['SWH'] = np.zeros((n_records),dtype=np.int32)\n #-- Peakiness: packed units (1e-2)\n Data['Peakiness'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked range correction: packed units (mm, 1e-3 m)\n Data['Retrack_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked sigma 0 correction: packed units (1e-2 dB)\n Data['Retrack_sig0'] = np.zeros((n_records),dtype=np.int32)\n #-- Retrackers 3-13 output\n Data['Retrack_3'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_4'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_5'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_6'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_7'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_8'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_9'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_10'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_11'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_12'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_13'] = np.zeros((n_records),dtype=np.int32)\n #-- Power echo shape parameter: packed units (dB/100)\n Data['echo_shape'] = np.zeros((n_records),dtype=np.int32)\n #-- Beam behaviour parameter: unitless code number related to\n #-- surface characteristics\n Data['BB_parameter'] = np.zeros((n_records,50),dtype=np.int16)\n #-- Cross track angle: packed units (micro radians)\n Data['X_Track_Angle'] = np.zeros((n_records),dtype=np.int32)\n #-- Leading edge coherence at retrack point 1/1000\n Data['Coherence'] = np.zeros((n_records),dtype=np.int32)\n #-- Interpolated Ocean Height: packed units (mm above ellipsoid)\n Data['Ocean_ht'] = np.zeros((n_records),dtype=np.int32)\n #-- Freeboard: packed units (mm, 1e-3 m)\n #-- -9999 default value indicates computation has not been performed\n Data['Freeboard'] = np.zeros((n_records),dtype=np.int32)\n #-- Surface Height Anomaly: packed units (mm, 1e-3 m)\n Data['SHA'] = np.zeros((n_records),dtype=np.int32)\n #-- Interpolated Surface Height Anomaly: packed units (mm, 1e-3 m)\n Data['SSHA_interp'] = np.zeros((n_records),dtype=np.int32)\n #-- Error in ocean height interpolation: packed units (mm, 1e-3 m)\n Data['SSHA_interp_RMS'] = np.zeros((n_records),dtype=np.uint16)\n #-- Number of forward records interpolated\n Data['SSHA_interp_count_fwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Number of backward records interpolated\n Data['SSHA_interp_count_bkwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Distance in time of most forward record interpolated (milli-seconds)\n Data['SSHA_interp_time_fwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Distance in time of most backward record interpolated (milli-seconds)\n Data['SSHA_interp_time_bkwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Interpolation error flag\n Data['SSHA_interp_flag'] = np.zeros((n_records),dtype=np.uint16)\n #-- Measurement mode\n Data['Measurement_Mode'] = np.zeros((n_records),dtype=np.uint32)\n #-- Quality flags\n Data['Quality_flag'] = np.zeros((n_records),dtype=np.uint32)\n #-- Retracker flags\n Data['Retracker_flag'] = np.zeros((n_records),dtype=np.uint32)\n #-- Height calculation details\n #-- Specifies what was applied during the height calculation\n Data['Height_status'] = np.zeros((n_records),dtype=np.uint32)\n #-- SAR freeboard status flag\n Data['Freeboard_status'] = np.zeros((n_records),dtype=np.uint32)\n #-- Number of averaged echoes or beams\n Data['N_avg'] = np.zeros((n_records),dtype=np.uint16)\n #-- Wind Speed packed units (mm/s, 1e-3 m/s)\n Data['Wind_speed'] = np.zeros((n_records),dtype=np.uint16)\n Data['Spares1'] = np.zeros((n_records,3),dtype=np.int32)\n\n #-- CryoSat-2 Auxiliary Data Group\n Auxiliary = {}\n #-- Ice Concentration packed units (%/1000)\n Auxiliary['Ice_conc'] = np.zeros((n_records),dtype=np.int32)\n #-- Snow Depth packed units (mm, 1e-3 m)\n Auxiliary['Snow_depth'] = np.zeros((n_records),dtype=np.int32)\n #-- Snow Density packed units (kg/m^3)\n Auxiliary['Snow_density'] = np.zeros((n_records),dtype=np.int32)\n #-- Discriminator result\n Auxiliary['Discriminator'] = np.zeros((n_records),dtype=np.int32)\n #-- SARin discriminator parameters 1-10\n Auxiliary['SARIN_disc_1'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_2'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_3'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_4'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_5'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_6'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_7'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_8'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_9'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_10'] = np.zeros((n_records),dtype=np.int32)\n #-- Discriminator flags\n Auxiliary['Discrim_flag'] = np.zeros((n_records),dtype=np.uint32)\n #-- Slope model correction (Attitude of echo in micro-degrees)\n Auxiliary['Attitude'] = np.zeros((n_records),dtype=np.int32)\n #-- Slope model correction (Azimuth of echo in micro-degrees)\n Auxiliary['Azimuth'] = np.zeros((n_records),dtype=np.int32)\n #-- The original latitude of the satellite (micro-degrees)\n Auxiliary['Lat_sat'] = np.zeros((n_records),dtype=np.int32)\n #-- The original longitude of the satellite (micro-degrees)\n Auxiliary['Lon_sat'] = np.zeros((n_records),dtype=np.int32)\n #-- Ambiguity indicator\n Auxiliary['Ambiguity'] = np.zeros((n_records),dtype=np.uint32)\n #-- Mean Sea Surface standard Model: packed units (mm, 1e-3 m)\n Auxiliary['MSS_model'] = np.zeros((n_records),dtype=np.int32)\n #-- Geoid standard Model: packed units (mm, 1e-3 m)\n Auxiliary['Geoid_model'] = np.zeros((n_records),dtype=np.int32)\n #-- ODLE standard Model: packed units (mm, 1e-3 m)\n Auxiliary['ODLE'] = np.zeros((n_records),dtype=np.int32)\n #-- The interpolated elevation value obtained from the DEM (mm)\n Auxiliary['DEM_elev'] = np.zeros((n_records),dtype=np.int32)\n #-- Identification of DEM used in SARin ambiguity test\n Auxiliary['DEM_ID'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['Spares2'] = np.zeros((n_records,4),dtype=np.int32)\n\n #-- CryoSat-2 External Corrections Group\n Geometry = {}\n #-- Dry Tropospheric Correction packed units (mm, 1e-3 m)\n Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)\n #-- Wet Tropospheric Correction packed units (mm, 1e-3 m)\n Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)\n #-- Inverse Barometric Correction packed units (mm, 1e-3 m)\n Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)\n #-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)\n Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)\n #-- GIM Ionospheric Correction packed units (mm, 1e-3 m)\n Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)\n #-- Model Ionospheric Correction packed units (mm, 1e-3 m)\n Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)\n #-- Ocean tide Correction packed units (mm, 1e-3 m)\n Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)\n Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Ocean loading tide Correction packed units (mm, 1e-3 m)\n Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Solid Earth tide Correction packed units (mm, 1e-3 m)\n Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)\n Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Surface Type: Packed in groups of three bits for each of the 20 records\n Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)\n #-- Corrections Status Flag\n Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)\n #-- Correction Error Flag\n Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)\n #-- Sea State Bias Correction packed units (mm, 1e-3 m)\n Geometry['SSB'] = np.zeros((n_records),dtype=np.int32)\n Geometry['Spares3'] = np.zeros((n_records,2),dtype=np.int32)\n\n #-- CryoSat-2 Internal Corrections Group\n Instrumental = {}\n #-- Doppler range correction: Radial + slope (mm)\n Instrumental['Doppler_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Range Correction: t-r antenna (mm)\n Instrumental['TR_inst_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Range Correction: r-only antenna (mm)\n Instrumental['R_inst_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Sigma 0 Correction: t-r antenna (dB/100)\n Instrumental['TR_inst_gain'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Sigma 0 Correction: r-only (dB/100)\n Instrumental['R_inst_gain'] = np.zeros((n_records),dtype=np.int32)\n #-- Internal Phase Correction (milli-radians)\n Instrumental['Internal_phase'] = np.zeros((n_records),dtype=np.int32)\n #-- External Phase Correction (milli-radians)\n Instrumental['External_phase'] = np.zeros((n_records),dtype=np.int32)\n #-- Noise Power measurement\n Instrumental['Noise_power'] = np.zeros((n_records),dtype=np.int32)\n #-- Phase slope correction (microradians)\n Instrumental['Phase_slope'] = np.zeros((n_records),dtype=np.int32)\n Instrumental['Spares4'] = np.zeros((n_records,2),dtype=np.int32)\n\n #-- for each record in the CryoSat file\n for r in range(n_records):\n #-- get satellite time and orbit parameters for record r\n Location['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Sec'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['USO_Corr'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Mode_ID'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Location['SSC'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Location['Inst_config'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['Rec_Count'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Alt_rate'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Sat_velocity'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n Location['Real_beam'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n Location['Baseline'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n Location['MCD'][r] = np.fromfile(fid,dtype='>u4',count=1)\n\n #-- elevation measurements\n Data['Elev'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Sig0'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SWH'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Peakiness'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_sig0'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_4'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_5'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_6'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_7'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_8'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_9'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_10'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_11'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_12'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_13'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['echo_shape'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['BB_parameter'][r,:] = np.fromfile(fid,dtype='>i2',count=50)\n Data['X_Track_Angle'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Coherence'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Ocean_ht'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Freeboard'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SHA'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SSHA_interp'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SSHA_interp_RMS'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_count_fwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_count_bkwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_time_fwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_time_bkwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_flag'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['Measurement_Mode'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Quality_flag'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Retracker_flag'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Height_status'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Freeboard_status'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['N_avg'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['Wind_speed'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['Spares1'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n\n #-- Auxiliary Data\n Auxiliary['Ice_conc'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Snow_depth'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Snow_density'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Discriminator'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_4'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_5'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_6'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_7'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_8'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_9'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_10'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Discrim_flag'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Auxiliary['Attitude'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Azimuth'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Lat_sat'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Lon_sat'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Ambiguity'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Auxiliary['MSS_model'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Geoid_model'][r] =np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['ODLE'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['DEM_elev'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['DEM_ID'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Spares2'][r,:] = np.fromfile(fid,dtype='>i4',count=4)\n\n #-- CryoSat-2 External Corrections Group for record r\n Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Geometry['SSB'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Spares3'][r,:] = np.fromfile(fid,dtype='>i4',count=2)\n\n #-- CryoSat-2 Internal Corrections Group for record r\n Instrumental['Doppler_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['TR_inst_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['R_inst_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['TR_inst_gain'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['R_inst_gain'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Internal_phase'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['External_phase'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Noise_power'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Phase_slope'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Spares4'][r,:] = np.fromfile(fid,dtype='>i4',count=2)\n\n #-- Bind all the bits of the l2i_mds together into a single dictionary\n CS_L2I_mds = {}\n CS_L2I_mds['Location'] = Location\n CS_L2I_mds['Data'] = Data\n CS_L2I_mds['Auxiliary'] = Auxiliary\n CS_L2I_mds['Geometry'] = Geometry\n CS_L2I_mds['Instrumental'] = Instrumental\n #-- return the output dictionary\n return CS_L2I_mds\n\n#-- PURPOSE: Initiate L2I MDS variables for CryoSat Baseline BC\ndef cryosat_baseline_BC(fid,record_size,n_records):\n #-- CryoSat-2 Location Group\n #-- Time and Orbit Parameters plus Measurement Mode\n Location = {}\n #-- Time: day part\n Location['Day'] = np.zeros((n_records),dtype=np.int32)\n #-- Time: second part\n Location['Sec'] = np.zeros((n_records),dtype=np.uint32)\n #-- Time: microsecond part\n Location['Micsec'] = np.zeros((n_records),dtype=np.uint32)\n #-- USO correction factor\n Location['USO_Corr'] = np.zeros((n_records),dtype=np.int32)\n #-- Mode ID\n Location['Mode_ID'] = np.zeros((n_records),dtype=np.uint16)\n #-- Source sequence counter\n Location['SSC'] = np.zeros((n_records),dtype=np.uint16)\n #-- Instrument configuration\n Location['Inst_config'] = np.zeros((n_records),dtype=np.uint32)\n #-- Record Counter\n Location['Rec_Count'] = np.zeros((n_records),dtype=np.uint32)\n #-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Lat'] = np.zeros((n_records),dtype=np.int32)\n #-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Lon'] = np.zeros((n_records),dtype=np.int32)\n #-- Alt: packed units (mm, 1e-3 m)\n #-- Altitude of COG above reference ellipsoid (interpolated value)\n Location['Alt'] = np.zeros((n_records),dtype=np.int32)\n #-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)\n Location['Alt_rate'] = np.zeros((n_records),dtype=np.int32)\n #-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)\n Location['Sat_velocity'] = np.zeros((n_records,3),dtype=np.int32)\n #-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n Location['Real_beam'] = np.zeros((n_records,3),dtype=np.int32)\n #-- Interferometer baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n Location['Baseline'] = np.zeros((n_records,3),dtype=np.int32)\n #-- Star Tracker ID\n Location['ST_ID'] = np.zeros((n_records),dtype=np.int16)\n Location['Spare'] = np.zeros((n_records),dtype=np.int16)\n #-- Roll (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Roll'] = np.zeros((n_records),dtype=np.int32)\n #-- Pitch (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Pitch'] = np.zeros((n_records),dtype=np.int32)\n #-- Yaw (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Yaw'] = np.zeros((n_records),dtype=np.int32)\n #-- Measurement Confidence Data\n Location['MCD'] = np.zeros((n_records),dtype=np.uint32)\n\n #-- CryoSat-2 Measurement Group\n #-- Derived from instrument measurement parameters\n Data = {}\n #-- Measured elevation above ellipsoid from retracker 1: packed units (mm, 1e-3 m)\n Data['Elev_1'] = np.zeros((n_records),dtype=np.int32)\n #-- Measured elevation above ellipsoid from retracker 2: packed units (mm, 1e-3 m)\n Data['Elev_2'] = np.zeros((n_records),dtype=np.int32)\n #-- Measured elevation above ellipsoid from retracker 3: packed units (mm, 1e-3 m)\n Data['Elev_3'] = np.zeros((n_records),dtype=np.int32)\n #-- Sigma Zero Backscatter for retracker 1: packed units (1e-2 dB)\n Data['Sig0_1'] = np.zeros((n_records),dtype=np.int32)\n #-- Sigma Zero Backscatter for retracker 2: packed units (1e-2 dB)\n Data['Sig0_2'] = np.zeros((n_records),dtype=np.int32)\n #-- Sigma Zero Backscatter for retracker 3: packed units (1e-2 dB)\n Data['Sig0_3'] = np.zeros((n_records),dtype=np.int32)\n #-- SWH packed units (mm, 1e-3)\n Data['SWH'] = np.zeros((n_records),dtype=np.int32)\n #-- Peakiness: packed units (1e-2)\n Data['Peakiness'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked range correction for retracker 1: packed units (mm, 1e-3 m)\n Data['Range_1'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked range correction for retracker 2: packed units (mm, 1e-3 m)\n Data['Range_2'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked range correction for retracker 3: packed units (mm, 1e-3 m)\n Data['Range_3'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked sigma 0 correction for Retracker 1: packed units (1e-2 dB)\n Data['Retrack_1_sig0'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked sigma 0 correction for Retracker 2: packed units (1e-2 dB)\n Data['Retrack_2_sig0'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked sigma 0 correction for Retracker 3: packed units (1e-2 dB)\n Data['Retrack_3_sig0'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracker 1 quality metric\n Data['Quality_1'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracker 2 quality metric\n Data['Quality_2'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracker 3 quality metric\n Data['Quality_3'] = np.zeros((n_records),dtype=np.int32)\n #-- Retrackers 3-23 output\n Data['Retrack_3'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_4'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_5'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_6'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_7'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_8'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_9'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_10'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_11'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_12'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_13'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_14'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_15'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_16'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_17'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_18'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_19'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_20'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_21'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_22'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_23'] = np.zeros((n_records),dtype=np.int32)\n #-- Power echo shape parameter: packed units (dB/100)\n Data['echo_shape'] = np.zeros((n_records),dtype=np.int32)\n #-- Beam behaviour parameter: unitless code number related to\n #-- surface characteristics\n Data['BB_parameter'] = np.zeros((n_records,50),dtype=np.int16)\n #-- Cross track angle: packed units (micro radians)\n Data['X_Track_Angle'] = np.zeros((n_records),dtype=np.int32)\n #-- Cross track angle correction: packed units (micro radians)\n Data['X_Track_Angle_c'] = np.zeros((n_records),dtype=np.int32)\n #-- Leading edge coherence at retrack point 1/1000\n Data['Coherence'] = np.zeros((n_records),dtype=np.int32)\n #-- Interpolated Ocean Height: packed units (mm above ellipsoid)\n Data['Ocean_ht'] = np.zeros((n_records),dtype=np.int32)\n #-- Freeboard: packed units (mm, 1e-3 m)\n #-- -9999 default value indicates computation has not been performed\n Data['Freeboard'] = np.zeros((n_records),dtype=np.int32)\n #-- Surface Height Anomaly: packed units (mm, 1e-3 m)\n Data['SHA'] = np.zeros((n_records),dtype=np.int32)\n #-- Interpolated Surface Height Anomaly: packed units (mm, 1e-3 m)\n Data['SSHA_interp'] = np.zeros((n_records),dtype=np.int32)\n #-- Error in ocean height interpolation: packed units (mm, 1e-3 m)\n Data['SSHA_interp_RMS'] = np.zeros((n_records),dtype=np.uint16)\n #-- Number of forward records interpolated\n Data['SSHA_interp_count_fwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Number of backward records interpolated\n Data['SSHA_interp_count_bkwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Distance in time of most forward record interpolated (milli-seconds)\n Data['SSHA_interp_time_fwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Distance in time of most backward record interpolated (milli-seconds)\n Data['SSHA_interp_time_bkwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Interpolation error flag\n Data['SSHA_interp_flag'] = np.zeros((n_records),dtype=np.uint16)\n #-- Measurement mode\n Data['Measurement_Mode'] = np.zeros((n_records),dtype=np.uint32)\n #-- Quality flags\n Data['Quality_flag'] = np.zeros((n_records),dtype=np.uint32)\n #-- Retracker flags\n Data['Retracker_flag'] = np.zeros((n_records),dtype=np.uint32)\n #-- Height calculation details\n #-- Specifies what was applied during the height calculation\n Data['Height_status'] = np.zeros((n_records),dtype=np.uint32)\n #-- SAR freeboard status flag\n Data['Freeboard_status'] = np.zeros((n_records),dtype=np.uint32)\n #-- Number of averaged echoes or beams\n Data['N_avg'] = np.zeros((n_records),dtype=np.uint16)\n #-- Wind Speed packed units (mm/s, 1e-3 m/s)\n Data['Wind_speed'] = np.zeros((n_records),dtype=np.uint16)\n Data['Spares1'] = np.zeros((n_records,3),dtype=np.int32)\n\n #-- CryoSat-2 Auxiliary Data Group\n Auxiliary = {}\n #-- Ice Concentration packed units (%/1000)\n Auxiliary['Ice_conc'] = np.zeros((n_records),dtype=np.int32)\n #-- Snow Depth packed units (mm, 1e-3 m)\n Auxiliary['Snow_depth'] = np.zeros((n_records),dtype=np.int32)\n #-- Snow Density packed units (kg/m^3)\n Auxiliary['Snow_density'] = np.zeros((n_records),dtype=np.int32)\n #-- Discriminator result\n Auxiliary['Discriminator'] = np.zeros((n_records),dtype=np.int32)\n #-- SARin discriminator parameters 1-10\n Auxiliary['SARIN_disc_1'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_2'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_3'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_4'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_5'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_6'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_7'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_8'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_9'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_10'] = np.zeros((n_records),dtype=np.int32)\n #-- Discriminator flags\n Auxiliary['Discrim_flag'] = np.zeros((n_records),dtype=np.uint32)\n #-- Slope model correction (Attitude of echo in micro-degrees)\n Auxiliary['Attitude'] = np.zeros((n_records),dtype=np.int32)\n #-- Slope model correction (Azimuth of echo in micro-degrees)\n Auxiliary['Azimuth'] = np.zeros((n_records),dtype=np.int32)\n #-- Slope doppler correction (mm)\n Auxiliary['Slope_doppler'] = np.zeros((n_records),dtype=np.int32)\n #-- The original latitude of the satellite (micro-degrees)\n Auxiliary['Lat_sat'] = np.zeros((n_records),dtype=np.int32)\n #-- The original longitude of the satellite (micro-degrees)\n Auxiliary['Lon_sat'] = np.zeros((n_records),dtype=np.int32)\n #-- Ambiguity indicator\n Auxiliary['Ambiguity'] = np.zeros((n_records),dtype=np.uint32)\n #-- Mean Sea Surface standard Model: packed units (mm, 1e-3 m)\n Auxiliary['MSS_model'] = np.zeros((n_records),dtype=np.int32)\n #-- Geoid standard Model: packed units (mm, 1e-3 m)\n Auxiliary['Geoid_model'] = np.zeros((n_records),dtype=np.int32)\n #-- ODLE standard Model: packed units (mm, 1e-3 m)\n Auxiliary['ODLE'] = np.zeros((n_records),dtype=np.int32)\n #-- The interpolated elevation value obtained from the DEM (mm)\n Auxiliary['DEM_elev'] = np.zeros((n_records),dtype=np.int32)\n #-- Identification of DEM used in SARin ambiguity test\n Auxiliary['DEM_ID'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['Spares2'] = np.zeros((n_records,4),dtype=np.int32)\n\n #-- CryoSat-2 External Corrections Group\n Geometry = {}\n #-- Dry Tropospheric Correction packed units (mm, 1e-3 m)\n Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)\n #-- Wet Tropospheric Correction packed units (mm, 1e-3 m)\n Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)\n #-- Inverse Barometric Correction packed units (mm, 1e-3 m)\n Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)\n #-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)\n Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)\n #-- GIM Ionospheric Correction packed units (mm, 1e-3 m)\n Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)\n #-- Model Ionospheric Correction packed units (mm, 1e-3 m)\n Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)\n #-- Ocean tide Correction packed units (mm, 1e-3 m)\n Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)\n Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Ocean loading tide Correction packed units (mm, 1e-3 m)\n Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Solid Earth tide Correction packed units (mm, 1e-3 m)\n Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)\n Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Surface Type: Packed in groups of three bits for each of the 20 records\n Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)\n #-- Corrections Status Flag\n Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)\n #-- Correction Error Flag\n Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)\n #-- Sea State Bias Correction packed units (mm, 1e-3 m)\n Geometry['SSB'] = np.zeros((n_records),dtype=np.int32)\n Geometry['Spares3'] = np.zeros((n_records,2),dtype=np.int32)\n\n #-- CryoSat-2 Internal Corrections Group\n Instrumental = {}\n #-- Doppler range correction: Radial + slope (mm)\n Instrumental['Doppler_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Range Correction: t-r antenna (mm)\n Instrumental['TR_inst_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Range Correction: r-only antenna (mm)\n Instrumental['R_inst_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Sigma 0 Correction: t-r antenna (dB/100)\n Instrumental['TR_inst_gain'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Sigma 0 Correction: r-only (dB/100)\n Instrumental['R_inst_gain'] = np.zeros((n_records),dtype=np.int32)\n #-- Internal Phase Correction (milli-radians)\n Instrumental['Internal_phase'] = np.zeros((n_records),dtype=np.int32)\n #-- External Phase Correction (milli-radians)\n Instrumental['External_phase'] = np.zeros((n_records),dtype=np.int32)\n #-- Noise Power measurement\n Instrumental['Noise_power'] = np.zeros((n_records),dtype=np.int32)\n #-- Phase slope correction (microradians)\n Instrumental['Phase_slope'] = np.zeros((n_records),dtype=np.int32)\n Instrumental['Spares4'] = np.zeros((n_records,2),dtype=np.int32)\n\n #-- for each record in the CryoSat file\n for r in range(n_records):\n #-- CryoSat-2 Location Group for record r\n Location['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Sec'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['USO_Corr'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Mode_ID'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Location['SSC'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Location['Inst_config'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['Rec_Count'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Alt_rate'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Sat_velocity'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n Location['Real_beam'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n Location['Baseline'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n Location['ST_ID'][r] = np.fromfile(fid,dtype='>i2',count=1)\n Location['Spare'][r] = np.fromfile(fid,dtype='>i2',count=1)\n Location['Roll'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Pitch'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Yaw'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['MCD'][r] = np.fromfile(fid,dtype='>u4',count=1)\n\n #-- CryoSat-2 Measurement Group for record r\n Data['Elev_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Elev_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Elev_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Sig0_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Sig0_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Sig0_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SWH'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Peakiness'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Range_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Range_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Range_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_1_sig0'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_2_sig0'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_3_sig0'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Quality_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Quality_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Quality_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_4'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_5'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_6'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_7'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_8'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_9'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_10'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_11'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_12'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_13'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_14'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_15'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_16'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_17'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_18'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_19'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_20'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_21'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_22'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_23'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['echo_shape'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['BB_parameter'][r,:] = np.fromfile(fid,dtype='>i2',count=50)\n Data['X_Track_Angle'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['X_Track_Angle_c'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Coherence'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Ocean_ht'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Freeboard'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SHA'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SSHA_interp'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SSHA_interp_RMS'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_count_fwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_count_bkwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_time_fwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_time_bkwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_flag'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['Measurement_Mode'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Quality_flag'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Retracker_flag'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Height_status'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Freeboard_status'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['N_avg'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['Wind_speed'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['Spares1'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n\n #-- CryoSat-2 Auxiliary Data Group for record r\n Auxiliary['Ice_conc'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Snow_depth'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Snow_density'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Discriminator'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_4'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_5'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_6'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_7'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_8'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_9'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_10'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Discrim_flag'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Auxiliary['Attitude'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Azimuth'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Slope_doppler'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Lat_sat'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Lon_sat'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Ambiguity'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Auxiliary['MSS_model'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Geoid_model'][r] =np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['ODLE'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['DEM_elev'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['DEM_ID'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Spares2'][r,:] = np.fromfile(fid,dtype='>i4',count=4)\n\n #-- CryoSat-2 External Corrections Group for record r\n Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Geometry['SSB'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Spares3'][r,:] = np.fromfile(fid,dtype='>i4',count=2)\n\n #-- CryoSat-2 Internal Corrections Group for record r\n Instrumental['Doppler_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['TR_inst_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['R_inst_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['TR_inst_gain'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['R_inst_gain'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Internal_phase'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['External_phase'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Noise_power'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Phase_slope'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Spares4'][r,:] = np.fromfile(fid,dtype='>i4',count=2)\n\n #-- Bind all the bits of the l2i_mds together into a single dictionary\n CS_L2I_mds = {}\n CS_L2I_mds['Location'] = Location\n CS_L2I_mds['Data'] = Data\n CS_L2I_mds['Auxiliary'] = Auxiliary\n CS_L2I_mds['Geometry'] = Geometry\n CS_L2I_mds['Instrumental'] = Instrumental\n #-- return the output dictionary\n return CS_L2I_mds\n\n#-- PURPOSE: Initiate L2I MDS variables for CryoSat Baseline C\ndef cryosat_baseline_C(fid,record_size,n_records):\n #-- CryoSat-2 Location Group\n #-- Time and Orbit Parameters plus Measurement Mode\n Location = {}\n #-- Time: day part\n Location['Day'] = np.zeros((n_records),dtype=np.int32)\n #-- Time: second part\n Location['Sec'] = np.zeros((n_records),dtype=np.uint32)\n #-- Time: microsecond part\n Location['Micsec'] = np.zeros((n_records),dtype=np.uint32)\n #-- USO correction factor\n Location['USO_Corr'] = np.zeros((n_records),dtype=np.int32)\n #-- Mode ID\n Location['Mode_ID'] = np.zeros((n_records),dtype=np.uint16)\n #-- Source sequence counter\n Location['SSC'] = np.zeros((n_records),dtype=np.uint16)\n #-- Instrument configuration\n Location['Inst_config'] = np.zeros((n_records),dtype=np.uint32)\n #-- Record Counter\n Location['Rec_Count'] = np.zeros((n_records),dtype=np.uint32)\n #-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Lat'] = np.zeros((n_records),dtype=np.int32)\n #-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Lon'] = np.zeros((n_records),dtype=np.int32)\n #-- Alt: packed units (mm, 1e-3 m)\n #-- Altitude of COG above reference ellipsoid (interpolated value)\n Location['Alt'] = np.zeros((n_records),dtype=np.int32)\n #-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)\n Location['Alt_rate'] = np.zeros((n_records),dtype=np.int32)\n #-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)\n Location['Sat_velocity'] = np.zeros((n_records,3),dtype=np.int32)\n #-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n Location['Real_beam'] = np.zeros((n_records,3),dtype=np.int32)\n #-- Interferometer baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n Location['Baseline'] = np.zeros((n_records,3),dtype=np.int32)\n #-- Star Tracker ID\n Location['ST_ID'] = np.zeros((n_records),dtype=np.int16)\n Location['Spare'] = np.zeros((n_records),dtype=np.int16)\n #-- Roll (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Roll'] = np.zeros((n_records),dtype=np.int32)\n #-- Pitch (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Pitch'] = np.zeros((n_records),dtype=np.int32)\n #-- Yaw (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Yaw'] = np.zeros((n_records),dtype=np.int32)\n #-- Measurement Confidence Data\n Location['MCD'] = np.zeros((n_records),dtype=np.uint32)\n\n #-- CryoSat-2 Measurement Group\n #-- Derived from instrument measurement parameters\n Data = {}\n #-- Measured elevation above ellipsoid from retracker 1: packed units (mm, 1e-3 m)\n Data['Elev_1'] = np.zeros((n_records),dtype=np.int32)\n #-- Measured elevation above ellipsoid from retracker 2: packed units (mm, 1e-3 m)\n Data['Elev_2'] = np.zeros((n_records),dtype=np.int32)\n #-- Measured elevation above ellipsoid from retracker 3: packed units (mm, 1e-3 m)\n Data['Elev_3'] = np.zeros((n_records),dtype=np.int32)\n #-- Sigma Zero Backscatter for retracker 1: packed units (1e-2 dB)\n Data['Sig0_1'] = np.zeros((n_records),dtype=np.int32)\n #-- Sigma Zero Backscatter for retracker 2: packed units (1e-2 dB)\n Data['Sig0_2'] = np.zeros((n_records),dtype=np.int32)\n #-- Sigma Zero Backscatter for retracker 3: packed units (1e-2 dB)\n Data['Sig0_3'] = np.zeros((n_records),dtype=np.int32)\n #-- SWH packed units (mm, 1e-3)\n Data['SWH'] = np.zeros((n_records),dtype=np.int32)\n #-- Peakiness: packed units (1e-2)\n Data['Peakiness'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked range correction for retracker 1: packed units (mm, 1e-3 m)\n Data['Range_1'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked range correction for retracker 1: packed units (mm, 1e-3 m)\n Data['Range_2'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracked range correction for retracker 3: packed units (mm, 1e-3 m)\n Data['Range_3'] = np.zeros((n_records),dtype=np.int32)\n Data['Spare2'] = np.zeros((n_records),dtype=np.int32)\n Data['Spare3'] = np.zeros((n_records),dtype=np.int32)\n Data['Spare4'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracker 1 quality metric\n Data['Quality_1'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracker 2 quality metric\n Data['Quality_2'] = np.zeros((n_records),dtype=np.int32)\n #-- Retracker 3 quality metric\n Data['Quality_3'] = np.zeros((n_records),dtype=np.int32)\n #-- Retrackers 3-23 output\n Data['Retrack_3'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_4'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_5'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_6'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_7'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_8'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_9'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_10'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_11'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_12'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_13'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_14'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_15'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_16'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_17'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_18'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_19'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_20'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_21'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_22'] = np.zeros((n_records),dtype=np.int32)\n Data['Retrack_23'] = np.zeros((n_records),dtype=np.int32)\n #-- Power echo shape parameter: packed units (dB/100)\n Data['echo_shape'] = np.zeros((n_records),dtype=np.int32)\n #-- Beam behaviour parameter: unitless code number related to\n #-- surface characteristics\n Data['BB_parameter'] = np.zeros((n_records,50),dtype=np.int16)\n #-- Cross track angle: packed units (micro radians)\n Data['X_Track_Angle'] = np.zeros((n_records),dtype=np.int32)\n #-- Cross track angle correction: packed units (micro radians)\n Data['X_Track_Angle_c'] = np.zeros((n_records),dtype=np.int32)\n #-- Leading edge coherence at retrack point 1/1000\n Data['Coherence'] = np.zeros((n_records),dtype=np.int32)\n #-- Interpolated Ocean Height: packed units (mm above ellipsoid)\n Data['Ocean_ht'] = np.zeros((n_records),dtype=np.int32)\n #-- Freeboard: packed units (mm, 1e-3 m)\n #-- -9999 default value indicates computation has not been performed\n Data['Freeboard'] = np.zeros((n_records),dtype=np.int32)\n #-- Surface Height Anomaly: packed units (mm, 1e-3 m)\n Data['SHA'] = np.zeros((n_records),dtype=np.int32)\n #-- Interpolated Surface Height Anomaly: packed units (mm, 1e-3 m)\n Data['SSHA_interp'] = np.zeros((n_records),dtype=np.int32)\n #-- Error in ocean height interpolation: packed units (mm, 1e-3 m)\n Data['SSHA_interp_RMS'] = np.zeros((n_records),dtype=np.uint16)\n #-- Number of forward records interpolated\n Data['SSHA_interp_count_fwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Number of backward records interpolated\n Data['SSHA_interp_count_bkwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Distance in time of most forward record interpolated (milli-seconds)\n Data['SSHA_interp_time_fwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Distance in time of most backward record interpolated (milli-seconds)\n Data['SSHA_interp_time_bkwd'] = np.zeros((n_records),dtype=np.uint16)\n #-- Interpolation error flag\n Data['SSHA_interp_flag'] = np.zeros((n_records),dtype=np.uint16)\n #-- Measurement mode\n Data['Measurement_Mode'] = np.zeros((n_records),dtype=np.uint32)\n #-- Quality flags\n Data['Quality_flag'] = np.zeros((n_records),dtype=np.uint32)\n #-- Retracker flags\n Data['Retracker_flag'] = np.zeros((n_records),dtype=np.uint32)\n #-- Height calculation details\n #-- Specifies what was applied during the height calculation\n Data['Height_status'] = np.zeros((n_records),dtype=np.uint32)\n #-- SAR freeboard status flag\n Data['Freeboard_status'] = np.zeros((n_records),dtype=np.uint32)\n #-- Number of averaged echoes or beams\n Data['N_avg'] = np.zeros((n_records),dtype=np.uint16)\n #-- Wind Speed packed units (mm/s, 1e-3 m/s)\n Data['Wind_speed'] = np.zeros((n_records),dtype=np.uint16)\n Data['Spares1'] = np.zeros((n_records,3),dtype=np.int32)\n\n #-- CryoSat-2 Auxiliary Data Group\n Auxiliary = {}\n #-- Ice Concentration packed units (%/1000)\n Auxiliary['Ice_conc'] = np.zeros((n_records),dtype=np.int32)\n #-- Snow Depth packed units (mm, 1e-3 m)\n Auxiliary['Snow_depth'] = np.zeros((n_records),dtype=np.int32)\n #-- Snow Density packed units (kg/m^3)\n Auxiliary['Snow_density'] = np.zeros((n_records),dtype=np.int32)\n #-- Discriminator result\n Auxiliary['Discriminator'] = np.zeros((n_records),dtype=np.int32)\n #-- SARin discriminator parameters 1-10\n Auxiliary['SARIN_disc_1'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_2'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_3'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_4'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_5'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_6'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_7'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_8'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_9'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['SARIN_disc_10'] = np.zeros((n_records),dtype=np.int32)\n #-- Discriminator flags\n Auxiliary['Discrim_flag'] = np.zeros((n_records),dtype=np.uint32)\n #-- Slope model correction (Attitude of echo in micro-degrees)\n Auxiliary['Attitude'] = np.zeros((n_records),dtype=np.int32)\n #-- Slope model correction (Azimuth of echo in micro-degrees)\n Auxiliary['Azimuth'] = np.zeros((n_records),dtype=np.int32)\n #-- Slope doppler correction (mm)\n Auxiliary['Slope_doppler'] = np.zeros((n_records),dtype=np.int32)\n #-- The original latitude of the satellite (micro-degrees)\n Auxiliary['Lat_sat'] = np.zeros((n_records),dtype=np.int32)\n #-- The original longitude of the satellite (micro-degrees)\n Auxiliary['Lon_sat'] = np.zeros((n_records),dtype=np.int32)\n #-- Ambiguity indicator\n Auxiliary['Ambiguity'] = np.zeros((n_records),dtype=np.uint32)\n #-- Mean Sea Surface Model packed units (mm, 1e-3 m)\n Auxiliary['MSS_model'] = np.zeros((n_records),dtype=np.int32)\n #-- Geoid Model packed units (mm, 1e-3 m)\n Auxiliary['Geoid_model'] = np.zeros((n_records),dtype=np.int32)\n #-- ODLE Model packed units (mm, 1e-3 m)\n Auxiliary['ODLE'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['DEM_elev'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['DEM_ID'] = np.zeros((n_records),dtype=np.int32)\n Auxiliary['Spares2'] = np.zeros((n_records,4),dtype=np.int32)\n\n #-- CryoSat-2 External Corrections Group\n Geometry = {}\n #-- Dry Tropospheric Correction packed units (mm, 1e-3 m)\n Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)\n #-- Wet Tropospheric Correction packed units (mm, 1e-3 m)\n Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)\n #-- Inverse Barometric Correction packed units (mm, 1e-3 m)\n Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)\n #-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)\n Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)\n #-- GIM Ionospheric Correction packed units (mm, 1e-3 m)\n Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)\n #-- Model Ionospheric Correction packed units (mm, 1e-3 m)\n Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)\n #-- Ocean tide Correction packed units (mm, 1e-3 m)\n Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)\n Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Ocean loading tide Correction packed units (mm, 1e-3 m)\n Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Solid Earth tide Correction packed units (mm, 1e-3 m)\n Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)\n Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)\n #-- Surface Type: Packed in groups of three bits for each of the 20 records\n Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)\n #-- Corrections Status Flag\n Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)\n #-- Correction Error Flag\n Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)\n #-- Sea State Bias Correction packed units (mm, 1e-3 m)\n Geometry['SSB'] = np.zeros((n_records),dtype=np.int32)\n Geometry['Spares3'] = np.zeros((n_records,2),dtype=np.int32)\n\n #-- CryoSat-2 Internal Corrections Group\n Instrumental = {}\n #-- Doppler range correction: Radial + slope (mm)\n Instrumental['Doppler_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Range Correction: t-r antenna (mm)\n Instrumental['TR_inst_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Range Correction: r-only antenna (mm)\n Instrumental['R_inst_range'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Sigma 0 Correction: t-r antenna (dB/100)\n Instrumental['TR_inst_gain'] = np.zeros((n_records),dtype=np.int32)\n #-- Instrument Sigma 0 Correction: r-only (dB/100)\n Instrumental['R_inst_gain'] = np.zeros((n_records),dtype=np.int32)\n #-- Internal Phase Correction (milli-radians)\n Instrumental['Internal_phase'] = np.zeros((n_records),dtype=np.int32)\n #-- External Phase Correction (milli-radians)\n Instrumental['External_phase'] = np.zeros((n_records),dtype=np.int32)\n #-- Noise Power measurement\n Instrumental['Noise_power'] = np.zeros((n_records),dtype=np.int32)\n #-- Phase slope correction (microradians)\n Instrumental['Phase_slope'] = np.zeros((n_records),dtype=np.int32)\n Instrumental['Spares4'] = np.zeros((n_records,2),dtype=np.int32)\n\n #-- for each record in the CryoSat file\n for r in range(n_records):\n #-- CryoSat-2 Location Group for record r\n Location['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Sec'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['USO_Corr'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Mode_ID'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Location['SSC'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Location['Inst_config'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['Rec_Count'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Location['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Alt_rate'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Sat_velocity'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n Location['Real_beam'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n Location['Baseline'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n Location['ST_ID'][r] = np.fromfile(fid,dtype='>i2',count=1)\n Location['Spare'][r] = np.fromfile(fid,dtype='>i2',count=1)\n Location['Roll'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Pitch'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['Yaw'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Location['MCD'][r] = np.fromfile(fid,dtype='>u4',count=1)\n\n #-- CryoSat-2 Measurement Group for record r\n Data['Elev_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Elev_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Elev_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Sig0_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Sig0_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Sig0_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SWH'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Peakiness'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Range_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Range_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Range_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Spare2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Spare3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Spare4'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Quality_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Quality_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Quality_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_4'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_5'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_6'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_7'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_8'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_9'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_10'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_11'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_12'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_13'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_14'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_15'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_16'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_17'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_18'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_19'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_20'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_21'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_22'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Retrack_23'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['echo_shape'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['BB_parameter'][r,:] = np.fromfile(fid,dtype='>i2',count=50)\n Data['X_Track_Angle'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['X_Track_Angle_c'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Coherence'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Ocean_ht'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['Freeboard'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SHA'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SSHA_interp'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Data['SSHA_interp_RMS'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_count_fwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_count_bkwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_time_fwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_time_bkwd'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['SSHA_interp_flag'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['Measurement_Mode'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Quality_flag'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Retracker_flag'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Height_status'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['Freeboard_status'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Data['N_avg'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['Wind_speed'][r] = np.fromfile(fid,dtype='>u2',count=1)\n Data['Spares1'][r,:] = np.fromfile(fid,dtype='>i4',count=3)\n\n #-- CryoSat-2 Auxiliary Data Group for record r\n Auxiliary['Ice_conc'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Snow_depth'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Snow_density'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Discriminator'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_1'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_2'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_3'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_4'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_5'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_6'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_7'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_8'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_9'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['SARIN_disc_10'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Discrim_flag'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Auxiliary['Attitude'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Azimuth'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Slope_doppler'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Lat_sat'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Lon_sat'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Ambiguity'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Auxiliary['MSS_model'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Geoid_model'][r] =np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['ODLE'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['DEM_elev'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['DEM_ID'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Auxiliary['Spares2'][r,:] = np.fromfile(fid,dtype='>i4',count=4)\n\n #-- CryoSat-2 External Corrections Group for record r\n Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)\n Geometry['SSB'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Geometry['Spares3'][r,:] = np.fromfile(fid,dtype='>i4',count=2)\n\n #-- CryoSat-2 Internal Corrections Group for record r\n Instrumental['Doppler_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['TR_inst_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['R_inst_range'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['TR_inst_gain'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['R_inst_gain'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Internal_phase'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['External_phase'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Noise_power'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Phase_slope'][r] = np.fromfile(fid,dtype='>i4',count=1)\n Instrumental['Spares4'][r,:] = np.fromfile(fid,dtype='>i4',count=2)\n\n #-- Bind all the bits of the l2i_mds together into a single dictionary\n CS_L2I_mds = {}\n CS_L2I_mds['Location'] = Location\n CS_L2I_mds['Data'] = Data\n CS_L2I_mds['Auxiliary'] = Auxiliary\n CS_L2I_mds['Geometry'] = Geometry\n CS_L2I_mds['Instrumental'] = Instrumental\n #-- return the output dictionary\n return CS_L2I_mds\n\n#-- PURPOSE: Initiate L2I MDS variables for CryoSat Baseline D (netCDF4)\ndef cryosat_baseline_D(full_filename, UNPACK=False):\n #-- open netCDF4 file for reading\n fid = netCDF4.Dataset(os.path.expanduser(full_filename),'r')\n #-- use original unscaled units unless UNPACK=True\n fid.set_auto_scale(UNPACK)\n #-- get dimensions\n time_20_ku = fid.variables['time_20_ku'][:].copy()\n time_cor_01 = fid.variables['time_cor_01'][:].copy()\n n_records = len(time_20_ku)\n\n #-- CryoSat-2 Location Group\n #-- Time and Orbit Parameters plus Measurement Mode\n Location = {}\n\n #-- Data Record Time (MDSR Time Stamp)\n #-- Time (seconds since 2000-01-01)\n Location['Time'] = time_20_ku.copy()\n #-- Time: day part\n Location['Day'] = np.array(time_20_ku/86400.0, dtype=np.int32)\n #-- Time: second part\n Location['Second'] = np.array(time_20_ku -\n Location['Day'][:]*86400.0, dtype=np.uint32)\n #-- Time: microsecond part\n Location['Micsec'] = np.array((time_20_ku -\n Location['Day'][:]*86400.0 -\n Location['Second'][:])*1e6, dtype=np.uint32)\n #-- USO correction factor (2-way range)\n Location['USO_Corr'] = fid.variables['uso_cor_20_ku'][:].copy()\n #-- USO correction factor (1-way range)\n Location['USO_Corr_1way'] = fid.variables['uso_cor_applied_20_ku'][:].copy()\n #-- Mode ID\n Location['Mode_ID'] = fid.variables['flag_instr_mode_op_20_ku'][:].copy()\n #-- Mode Flags\n Location['Mode_flags'] = fid.variables['flag_instr_mode_flags_20_ku'][:].copy()\n #-- Platform attitude control mode\n Location['Att_control'] = fid.variables['flag_instr_mode_att_ctrl_20_ku'][:].copy()\n #-- Instrument configuration\n Location['Inst_config'] = fid.variables['flag_instr_conf_rx_flags_20_ku'][:].copy()\n #-- acquisition band\n Location['Inst_band'] = fid.variables['flag_instr_conf_rx_bwdt_20_ku'][:].copy()\n #-- instrument channel\n Location['Inst_channel'] = fid.variables['flag_instr_conf_rx_in_use_20_ku'][:].copy()\n #-- tracking mode\n Location['Tracking_mode'] = fid.variables['flag_instr_conf_rx_trk_mode_20_ku'][:].copy()\n #-- Source sequence counter\n Location['SSC'] = fid.variables['seq_count_20_ku'][:].copy()\n #-- Record Counter\n Location['Rec_Count'] = fid.variables['rec_count_20_ku'][:].copy()\n #-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Lat'] = fid.variables['lat_20_ku'][:].copy()\n #-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Lon'] = fid.variables['lon_20_ku'][:].copy()\n #-- Alt: packed units (mm, 1e-3 m)\n #-- Altitude of COG above reference ellipsoid (interpolated value)\n Location['Alt'] = fid.variables['alt_20_ku'][:].copy()\n #-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)\n Location['Alt_rate'] = fid.variables['orb_alt_rate_20_ku'][:].copy()\n #-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)\n Location['Sat_velocity'] = fid.variables['sat_vel_vec_20_ku'][:].copy()\n #-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n Location['Real_beam'] = fid.variables['beam_dir_vec_20_ku'][:].copy()\n #-- Interferometer baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n Location['Baseline'] = fid.variables['inter_base_vec_20_ku'][:].copy()\n #-- Star Tracker ID\n Location['ST_ID'] = fid.variables['flag_instr_conf_rx_str_in_use_20_ku'][:].copy()\n #-- Roll (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Roll'] = fid.variables['off_nadir_roll_angle_str_20_ku'][:].copy()\n #-- Pitch (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Pitch'] = fid.variables['off_nadir_pitch_angle_str_20_ku'][:].copy()\n #-- Yaw (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n Location['Yaw'] = fid.variables['off_nadir_yaw_angle_str_20_ku'][:].copy()\n #-- Measurement Confidence Data Flags\n #-- Generally the MCD flags indicate problems when set\n #-- If MCD is 0 then no problems or non-nominal conditions were detected\n #-- Serious errors are indicated by setting bit 31\n Location['MCD'] = fid.variables['flag_mcd_20_ku'][:].copy()\n\n\n #-- CryoSat-2 Measurement Group\n #-- Derived from instrument measurement parameters\n Data = {}\n #-- Measured elevation above ellipsoid from retracker 1: packed units (mm, 1e-3 m)\n Data['Elev_1'] = fid.variables['height_1_20_ku'][:].copy()\n #-- Measured elevation above ellipsoid from retracker 2: packed units (mm, 1e-3 m)\n Data['Elev_2'] = fid.variables['height_2_20_ku'][:].copy()\n #-- Measured elevation above ellipsoid from retracker 3: packed units (mm, 1e-3 m)\n Data['Elev_3'] = fid.variables['height_3_20_ku'][:].copy()\n #-- Sigma Zero Backscatter for retracker 1: packed units (1e-2 dB)\n Data['Sig0_1'] = fid.variables['sig0_1_20_ku'][:].copy()\n #-- Sigma Zero Backscatter for retracker 2: packed units (1e-2 dB)\n Data['Sig0_2'] = fid.variables['sig0_2_20_ku'][:].copy()\n #-- Sigma Zero Backscatter for retracker 3: packed units (1e-2 dB)\n Data['Sig0_3'] = fid.variables['sig0_3_20_ku'][:].copy()\n #-- SWH packed units (mm, 1e-3)\n Data['SWH'] = fid.variables['swh_ocean_20_ku'][:].copy()\n #-- Peakiness: packed units (1e-2)\n Data['Peakiness'] = fid.variables['peakiness_20_ku'][:].copy()\n #-- Retracked range correction for retracker 1: packed units (mm, 1e-3 m)\n Data['Range_1'] = fid.variables['range_1_20_ku'][:].copy()\n #-- Retracked range correction for retracker 1: packed units (mm, 1e-3 m)\n Data['Range_2'] = fid.variables['range_2_20_ku'][:].copy()\n #-- Retracked range correction for retracker 3: packed units (mm, 1e-3 m)\n Data['Range_3'] = fid.variables['range_3_20_ku'][:].copy()\n #-- Retracker 1 quality metric\n Data['Quality_1'] = fid.variables['retracker_1_quality_20_ku'][:].copy()\n #-- Retracker 2 quality metric\n Data['Quality_2'] = fid.variables['retracker_2_quality_20_ku'][:].copy()\n #-- Retracker 3 quality metric\n Data['Quality_3'] = fid.variables['retracker_3_quality_20_ku'][:].copy()\n #-- Retrackers 3-23 output\n Data['Retrack_3'] = fid.variables['retracker_output_3_20_ku'][:].copy()\n Data['Retrack_4'] = fid.variables['retracker_output_4_20_ku'][:].copy()\n Data['Retrack_5'] = fid.variables['retracker_output_5_20_ku'][:].copy()\n Data['Retrack_6'] = fid.variables['retracker_output_6_20_ku'][:].copy()\n Data['Retrack_7'] = fid.variables['retracker_output_7_20_ku'][:].copy()\n Data['Retrack_8'] = fid.variables['retracker_output_8_20_ku'][:].copy()\n Data['Retrack_9'] = fid.variables['retracker_output_9_20_ku'][:].copy()\n Data['Retrack_10'] = fid.variables['retracker_output_10_20_ku'][:].copy()\n Data['Retrack_11'] = fid.variables['retracker_output_11_20_ku'][:].copy()\n Data['Retrack_12'] = fid.variables['retracker_output_12_20_ku'][:].copy()\n Data['Retrack_13'] = fid.variables['retracker_output_13_20_ku'][:].copy()\n Data['Retrack_14'] = fid.variables['retracker_output_14_20_ku'][:].copy()\n Data['Retrack_15'] = fid.variables['retracker_output_15_20_ku'][:].copy()\n Data['Retrack_16'] = fid.variables['retracker_output_16_20_ku'][:].copy()\n Data['Retrack_17'] = fid.variables['retracker_output_17_20_ku'][:].copy()\n Data['Retrack_18'] = fid.variables['retracker_output_18_20_ku'][:].copy()\n Data['Retrack_19'] = fid.variables['retracker_output_19_20_ku'][:].copy()\n Data['Retrack_20'] = fid.variables['retracker_output_20_20_ku'][:].copy()\n Data['Retrack_21'] = fid.variables['retracker_output_21_20_ku'][:].copy()\n Data['Retrack_22'] = fid.variables['retracker_output_22_20_ku'][:].copy()\n Data['Retrack_23'] = fid.variables['retracker_output_23_20_ku'][:].copy()\n #-- Power echo shape parameter: packed units (dB/100)\n Data['echo_shape'] = np.zeros((n_records),dtype=np.int32)\n #-- Beam behaviour parameter: unitless code number related to\n #-- surface characteristics\n Data['Beam'] = {}\n #-- Standard Deviation of Gaussian fit to range integrated stack power.\n Data['Beam']['SD'] = fid.variables['stack_std_20_ku'][:].copy()\n #-- Stack Center: Mean of Gaussian fit to range integrated stack power.\n Data['Beam']['Center'] = fid.variables['stack_centre_20_ku'][:].copy()\n #-- Stack amplitude parameter scaled in dB/100.\n Data['Beam']['Amplitude'] = fid.variables['stack_scaled_amplitude_20_ku'][:].copy()\n #-- 3rd moment: providing the degree of asymmetry of the range integrated\n #-- stack power distribution.\n Data['Beam']['Skewness'] = fid.variables['stack_skewness_20_ku'][:].copy()\n #-- 4th moment: Measure of peakiness of range integrated stack power distribution.\n Data['Beam']['Kurtosis'] = fid.variables['stack_kurtosis_20_ku'][:].copy()\n #-- Stack peakiness computed from the range integrated power of the single look echoes\n Data['Beam']['Peakiness'] = fid.variables['stack_peakiness_20_ku'][:].copy()\n #-- Stack residuals of Gaussian that fits the range integrated power of the single look echoes\n Data['Beam']['RMS'] = fid.variables['stack_gaussian_fitting_residuals_20_ku'][:].copy()\n #-- Standard deviation as a function of boresight angle (microradians)\n Data['Beam']['SD_boresight_angle'] = fid.variables['stack_std_angle_20_ku'][:].copy()\n #-- Stack Center angle as a function of boresight angle (microradians)\n Data['Beam']['Center_boresight_angle'] = fid.variables['stack_centre_angle_20_ku'][:].copy()\n #-- Stack Center angle as a function of look angle (microradians)\n Data['Beam']['Center_look_angle'] = fid.variables['stack_centre_look_angle_20_ku'][:].copy()\n #-- Number of contributing beams in the stack before weighting\n Data['Beam']['Number'] = fid.variables['stack_number_before_weighting_20_ku'][:].copy()\n #-- Number of contributing beams in the stack after weighting\n Data['Beam']['Weighted_Number'] = fid.variables['stack_number_after_weighting_20_ku'][:].copy()\n #-- Cross track angle: packed units (micro radians)\n Data['X_Track_Angle'] = fid.variables['across_track_angle_20_ku'][:].copy()\n #-- Cross track angle correction: packed units (micro radians)\n Data['X_Track_Angle_c'] = fid.variables['across_track_angle_cor_20_ku'][:].copy()\n #-- Leading edge coherence at retrack point 1/1000\n Data['Coherence'] = fid.variables['coherence_20_ku'][:].copy()\n #-- Interpolated Ocean Height: packed units (mm above ellipsoid)\n Data['Ocean_ht'] = np.zeros((n_records),dtype=np.int32)\n #-- Freeboard: packed units (mm, 1e-3 m)\n #-- -9999 default value indicates computation has not been performed\n Data['Freeboard'] = fid.variables['freeboard_20_ku'][:].copy()\n #-- Sea ice Floe height\n Data['Sea_Ice_Lead'] = fid.variables['height_sea_ice_floe_20_ku'][:].copy()\n #-- Sea ice lead height\n Data['Sea_Ice_Floe'] = fid.variables['height_sea_ice_lead_20_ku'][:].copy()\n #-- Surface Height Anomaly: packed units (mm, 1e-3 m)\n Data['SHA'] = fid.variables['ssha_20_ku'][:].copy()\n #-- Interpolated Surface Height Anomaly: packed units (mm, 1e-3 m)\n Data['SSHA_interp'] = fid.variables['ssha_interp_20_ku'][:].copy()\n #-- Error in ocean height interpolation: packed units (mm, 1e-3 m)\n Data['SSHA_interp_RMS'] = fid.variables['ssha_interp_rms_20_ku'][:].copy()\n #-- Number of forward records interpolated\n Data['SSHA_interp_count_fwd'] = fid.variables['ssha_interp_numval_fwd_20_ku'][:].copy()\n #-- Number of backward records interpolated\n Data['SSHA_interp_count_bkwd'] = fid.variables['ssha_interp_numval_back_20_ku'][:].copy()\n #-- Distance in time of most forward record interpolated (milli-seconds)\n Data['SSHA_interp_time_fwd'] = fid.variables['ssha_interp_time_fwd_20_ku'][:].copy()\n #-- Distance in time of most backward record interpolated (milli-seconds)\n Data['SSHA_interp_time_bkwd'] = fid.variables['ssha_interp_time_back_20_ku'][:].copy()\n #-- Interpolation error flag\n Data['SSHA_interp_flag'] = fid.variables['flag_ssha_interp_20_ku'][:].copy()\n #-- Measurement mode\n Data['Measurement_Mode'] = fid.variables['flag_instr_mode_op_20_ku'][:].copy()\n #-- Quality flags\n Data['Quality_flag'] = fid.variables['flag_quality_20_ku'][:].copy()\n #-- Retracker flags\n Data['Retracker_flag'] = fid.variables['flag_retracker_20_ku'][:].copy()\n #-- Height calculation details\n #-- Specifies what was applied during the height calculation\n Data['Height_status'] = fid.variables['flag_height_20_ku'][:].copy()\n #-- SAR freeboard status flag\n Data['Freeboard_status'] = fid.variables['flag_freeboard_20_ku'][:].copy()\n #-- Number of averaged echoes or beams\n Data['N_avg'] = fid.variables['echo_numval_20_ku'][:].copy()\n #-- Wind Speed packed units (mm/s, 1e-3 m/s)\n Data['Wind_speed'] = fid.variables['wind_speed_alt_20_ku'][:].copy()\n\n #-- CryoSat-2 Auxiliary Data Group\n Auxiliary = {}\n #-- Ice Concentration packed units (%/1000)\n Auxiliary['Ice_conc'] = fid.variables['sea_ice_concentration_20_ku'][:].copy()\n #-- Snow Depth packed units (mm, 1e-3 m)\n Auxiliary['Snow_depth'] = fid.variables['snow_depth_20_ku'][:].copy()\n #-- Snow Density packed units (kg/m^3)\n Auxiliary['Snow_density'] = fid.variables['snow_density_20_ku'][:].copy()\n #-- Discriminator result\n Auxiliary['Discriminator'] = fid.variables['flag_surf_type_class_20_ku'][:].copy()\n #-- SARin discriminator parameters 1-10\n Auxiliary['SARIN_disc_1'] = fid.variables['sarin_output_1_20_ku'][:].copy()\n Auxiliary['SARIN_disc_2'] = fid.variables['sarin_output_2_20_ku'][:].copy()\n Auxiliary['SARIN_disc_3'] = fid.variables['sarin_output_3_20_ku'][:].copy()\n Auxiliary['SARIN_disc_4'] = fid.variables['sarin_output_4_20_ku'][:].copy()\n Auxiliary['SARIN_disc_5'] = fid.variables['sarin_output_5_20_ku'][:].copy()\n Auxiliary['SARIN_disc_6'] = fid.variables['sarin_output_6_20_ku'][:].copy()\n Auxiliary['SARIN_disc_7'] = fid.variables['sarin_output_7_20_ku'][:].copy()\n Auxiliary['SARIN_disc_8'] = fid.variables['sarin_output_8_20_ku'][:].copy()\n Auxiliary['SARIN_disc_9'] = fid.variables['sarin_output_9_20_ku'][:].copy()\n Auxiliary['SARIN_disc_10'] = fid.variables['sarin_output_10_20_ku'][:].copy()\n #-- Discriminator flags\n Auxiliary['Discrim_flag'] = fid.variables['flag_disc_stat_20_ku'][:].copy()\n #-- Slope model correction (Attitude of echo in micro-degrees)\n Auxiliary['Attitude'] = fid.variables['offset_attitude_20_ku'][:].copy()\n #-- Slope model correction (Azimuth of echo in micro-degrees)\n Auxiliary['Azimuth'] = fid.variables['offset_azimuth_20_ku'][:].copy()\n #-- Slope doppler correction (mm)\n Auxiliary['Slope_doppler'] = fid.variables['slope_dop_cor_20_ku'][:].copy()\n #-- The original latitude of the satellite (micro-degrees)\n Auxiliary['Lat_sat'] = np.zeros((n_records),dtype=np.int32)\n #-- The original longitude of the satellite (micro-degrees)\n Auxiliary['Lon_sat'] = np.zeros((n_records),dtype=np.int32)\n #-- Ambiguity indicator\n Auxiliary['Ambiguity'] = fid.variables['flag_sarin_ambiguity_warning_20_ku'][:].copy()\n #-- Mean Sea Surface Model packed units (mm, 1e-3 m)\n Auxiliary['MSS_model'] = fid.variables['mean_sea_surf_sea_ice_20_ku'][:].copy()\n #-- Geoid Model packed units (mm, 1e-3 m)\n Auxiliary['Geoid_model'] = fid.variables['geoid_20_ku'][:].copy()\n #-- ODLE Model packed units (mm, 1e-3 m)\n Auxiliary['ODLE'] = fid.variables['odle_20_ku'][:].copy()\n Auxiliary['DEM_elev'] = fid.variables['dem_height_20_ku'][:].copy()\n Auxiliary['DEM_ID'] = fid.variables['dem_identifier_20_ku'][:].copy()\n\n #-- CryoSat-2 External Corrections Group (interpolate 1Hz to 20Hz)\n Geometry = {}\n #-- Dry Tropospheric Correction packed units (mm, 1e-3 m)\n Geometry['dryTrop'] = np.zeros((n_records))\n mod_dry_tropo_cor_01 = fid.variables['mod_dry_tropo_cor_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,mod_dry_tropo_cor_01,k=3,s=0)\n Geometry['dryTrop'][:] = fint(time_20_ku)\n #-- Wet Tropospheric Correction packed units (mm, 1e-3 m)\n Geometry['wetTrop'] = np.zeros((n_records))\n mod_wet_tropo_cor_01 = fid.variables['mod_wet_tropo_cor_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,mod_wet_tropo_cor_01,k=3,s=0)\n Geometry['wetTrop'][:] = fint(time_20_ku)\n #-- Inverse Barometric Correction packed units (mm, 1e-3 m)\n Geometry['InvBar'] = np.zeros((n_records))\n inv_bar_cor_01 = fid.variables['inv_bar_cor_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,inv_bar_cor_01,k=3,s=0)\n Geometry['InvBar'][:] = fint(time_20_ku)\n #-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)\n Geometry['DAC'] = np.zeros((n_records))\n hf_fluct_total_cor_01 = fid.variables['hf_fluct_total_cor_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,hf_fluct_total_cor_01,k=3,s=0)\n Geometry['DAC'][:] = fint(time_20_ku)\n #-- GIM Ionospheric Correction packed units (mm, 1e-3 m)\n Geometry['Iono_GIM'] = np.zeros((n_records))\n iono_cor_gim_01 = fid.variables['iono_cor_gim_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,iono_cor_gim_01,k=3,s=0)\n Geometry['Iono_GIM'][:] = fint(time_20_ku)\n #-- Model Ionospheric Correction packed units (mm, 1e-3 m)\n Geometry['Iono_model'] = np.zeros((n_records))\n iono_cor_01 = fid.variables['iono_cor_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,iono_cor_01,k=3,s=0)\n Geometry['Iono_model'][:] = fint(time_20_ku)\n #-- Ocean tide Correction packed units (mm, 1e-3 m)\n Geometry['ocTideElv'] = np.zeros((n_records))\n ocean_tide_01 = fid.variables['ocean_tide_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,ocean_tide_01,k=3,s=0)\n Geometry['ocTideElv'][:] = fint(time_20_ku)\n #-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)\n Geometry['lpeTideElv'] = np.zeros((n_records))\n ocean_tide_eq_01 = fid.variables['ocean_tide_eq_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,ocean_tide_eq_01,k=3,s=0)\n Geometry['lpeTideElv'][:] = fint(time_20_ku)\n #-- Ocean loading tide Correction packed units (mm, 1e-3 m)\n Geometry['olTideElv'] = np.zeros((n_records))\n load_tide_01 = fid.variables['load_tide_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,load_tide_01,k=3,s=0)\n Geometry['olTideElv'][:] = fint(time_20_ku)\n #-- Solid Earth tide Correction packed units (mm, 1e-3 m)\n Geometry['seTideElv'] = np.zeros((n_records))\n solid_earth_tide_01 = fid.variables['solid_earth_tide_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,solid_earth_tide_01,k=3,s=0)\n Geometry['seTideElv'][:] = fint(time_20_ku)\n #-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)\n Geometry['gpTideElv'] = np.zeros((n_records))\n pole_tide_01 = fid.variables['pole_tide_01'][:].copy()\n fint=scipy.interpolate.UnivariateSpline(time_cor_01,pole_tide_01,k=3,s=0)\n Geometry['gpTideElv'][:] = fint(time_20_ku)\n #-- Surface Type: Packed in groups of three bits for each of the 20 records\n Geometry['Surf_type'] = fid.variables['surf_type_20_ku'][:].copy()\n #-- Corrections Status Flag\n Geometry['Corr_status'] = fid.variables['flag_cor_status_20_ku'][:].copy()\n #-- Correction Error Flag\n Geometry['Corr_error'] = fid.variables['flag_cor_err_20_ku'][:].copy()\n #-- Sea State Bias Correction packed units (mm, 1e-3 m)\n Geometry['SSB'] = fid.variables['sea_state_bias_20_ku'][:].copy()\n\n #-- CryoSat-2 Internal Corrections Group\n Instrumental = {}\n #-- Doppler range correction: Radial + slope (mm)\n #-- computed for the component of satellite velocity in the nadir direction\n Instrumental['Doppler_range'] = fid.variables['dop_cor_20_ku'][:].copy()\n #-- Value of Doppler Angle for the first single look echo (1e-7 radians)\n Instrumental['Doppler_angle_start'] = fid.variables['dop_angle_start_20_ku'][:].copy()\n #-- Value of Doppler Angle for the last single look echo (1e-7 radians)\n Instrumental['Doppler_angle_stop'] = fid.variables['dop_angle_stop_20_ku'][:].copy()\n #-- Instrument Range Correction: transmit-receive antenna (mm)\n #-- Calibration correction to range on channel 1 computed from CAL1.\n Instrumental['TR_inst_range'] = fid.variables['instr_cor_range_tx_rx_20_ku'][:].copy()\n #-- Instrument Range Correction: receive-only antenna (mm)\n #-- Calibration correction to range on channel 2 computed from CAL1.\n Instrumental['R_inst_range'] = fid.variables['instr_cor_range_rx_20_ku'][:].copy()\n #-- Instrument Gain Correction: transmit-receive antenna (dB/100)\n #-- Calibration correction to gain on channel 1 computed from CAL1\n Instrumental['TR_inst_gain'] = fid.variables['instr_cor_gain_tx_rx_20_ku'][:].copy()\n #-- Instrument Gain Correction: receive-only (dB/100)\n #-- Calibration correction to gain on channel 2 computed from CAL1\n Instrumental['R_inst_gain'] = fid.variables['instr_cor_gain_rx_20_ku'][:].copy()\n #-- Internal Phase Correction (microradians)\n Instrumental['Internal_phase'] = fid.variables['instr_int_ph_cor_20_ku'][:].copy()\n #-- External Phase Correction (microradians)\n Instrumental['External_phase'] = fid.variables['instr_ext_ph_cor_20_ku'][:].copy()\n #-- Noise Power measurement (dB/100)\n Instrumental['Noise_power'] = fid.variables['noise_power_20_ku'][:].copy()\n #-- Phase slope correction (microradians)\n #-- Computed from the CAL-4 packets during the azimuth impulse response\n #-- amplitude (SARIN only). Set from the latest available CAL-4 packet.\n Instrumental['Phase_slope'] = fid.variables['ph_slope_cor_20_ku'][:].copy()\n\n #-- Bind all the bits of the l2i_mds together into a single dictionary\n CS_L2I_mds = {}\n CS_L2I_mds['Location'] = Location\n CS_L2I_mds['Data'] = Data\n CS_L2I_mds['Auxiliary'] = Auxiliary\n CS_L2I_mds['Geometry'] = Geometry\n CS_L2I_mds['Instrumental'] = Instrumental\n\n #-- extract global attributes and assign as MPH and SPH metadata\n CS_L2I_mds['METADATA'] = dict(MPH={},SPH={},DSD={})\n #-- MPH attributes\n CS_L2I_mds['METADATA']['MPH']['PRODUCT'] = fid.product_name\n CS_L2I_mds['METADATA']['MPH']['DOI'] = fid.doi\n CS_L2I_mds['METADATA']['MPH']['PROC_STAGE'] = fid.processing_stage\n CS_L2I_mds['METADATA']['MPH']['REF_DOC'] = fid.reference_document\n CS_L2I_mds['METADATA']['MPH']['ACQUISITION_STATION'] = fid.acquisition_station\n CS_L2I_mds['METADATA']['MPH']['PROC_CENTER'] = fid.processing_centre\n CS_L2I_mds['METADATA']['MPH']['PROC_TIME'] = fid.creation_time\n CS_L2I_mds['METADATA']['MPH']['SOFTWARE_VER'] = fid.software_version\n CS_L2I_mds['METADATA']['MPH']['SENSING_START'] = fid.sensing_start\n CS_L2I_mds['METADATA']['MPH']['SENSING_STOP'] = fid.sensing_stop\n CS_L2I_mds['METADATA']['MPH']['PHASE'] = fid.phase\n CS_L2I_mds['METADATA']['MPH']['CYCLE'] = fid.cycle_number\n CS_L2I_mds['METADATA']['MPH']['REL_ORBIT'] = fid.rel_orbit_number\n CS_L2I_mds['METADATA']['MPH']['ABS_ORBIT'] = fid.abs_orbit_number\n CS_L2I_mds['METADATA']['MPH']['STATE_VECTOR_TIME'] = fid.state_vector_time\n CS_L2I_mds['METADATA']['MPH']['DELTA_UT1'] = fid.delta_ut1\n CS_L2I_mds['METADATA']['MPH']['X_POSITION'] = fid.x_position\n CS_L2I_mds['METADATA']['MPH']['Y_POSITION'] = fid.y_position\n CS_L2I_mds['METADATA']['MPH']['Z_POSITION'] = fid.z_position\n CS_L2I_mds['METADATA']['MPH']['X_VELOCITY'] = fid.x_velocity\n CS_L2I_mds['METADATA']['MPH']['Y_VELOCITY'] = fid.y_velocity\n CS_L2I_mds['METADATA']['MPH']['Z_VELOCITY'] = fid.z_velocity\n CS_L2I_mds['METADATA']['MPH']['VECTOR_SOURCE'] = fid.vector_source\n CS_L2I_mds['METADATA']['MPH']['LEAP_UTC'] = fid.leap_utc\n CS_L2I_mds['METADATA']['MPH']['LEAP_SIGN'] = fid.leap_sign\n CS_L2I_mds['METADATA']['MPH']['LEAP_ERR'] = fid.leap_err\n CS_L2I_mds['METADATA']['MPH']['PRODUCT_ERR'] = fid.product_err\n #-- SPH attributes\n CS_L2I_mds['METADATA']['SPH']['START_RECORD_TAI_TIME'] = fid.first_record_time\n CS_L2I_mds['METADATA']['SPH']['STOP_RECORD_TAI_TIME'] = fid.last_record_time\n CS_L2I_mds['METADATA']['SPH']['ABS_ORBIT_START'] = fid.abs_orbit_start\n CS_L2I_mds['METADATA']['SPH']['REL_TIME_ASC_NODE_START'] = fid.rel_time_acs_node_start\n CS_L2I_mds['METADATA']['SPH']['ABS_ORBIT_STOP'] = fid.abs_orbit_stop\n CS_L2I_mds['METADATA']['SPH']['REL_TIME_ASC_NODE_STOP'] = fid.rel_time_acs_node_stop\n CS_L2I_mds['METADATA']['SPH']['EQUATOR_CROSS_TIME_UTC'] = fid.equator_cross_time\n CS_L2I_mds['METADATA']['SPH']['EQUATOR_CROSS_LONG'] = fid.equator_cross_long\n CS_L2I_mds['METADATA']['SPH']['ASCENDING_FLAG'] = fid.ascending_flag\n CS_L2I_mds['METADATA']['SPH']['START_LAT'] = fid.first_record_lat\n CS_L2I_mds['METADATA']['SPH']['START_LONG'] = fid.first_record_lon\n CS_L2I_mds['METADATA']['SPH']['STOP_LAT'] = fid.last_record_lat\n CS_L2I_mds['METADATA']['SPH']['STOP_LONG'] = fid.last_record_lon\n CS_L2I_mds['METADATA']['SPH']['L1_PROC_FLAG'] = fid.l1b_proc_flag\n CS_L2I_mds['METADATA']['SPH']['L1_PROCESSING_QUALITY'] = fid.l1b_processing_quality\n CS_L2I_mds['METADATA']['SPH']['L1_PROC_THRESH'] = fid.l1b_proc_thresh\n CS_L2I_mds['METADATA']['SPH']['INSTR_ID'] = fid.instr_id\n CS_L2I_mds['METADATA']['SPH']['LRM_MODE_PERCENT'] = fid.lrm_mode_percent\n CS_L2I_mds['METADATA']['SPH']['SAR_MODE_PERCENT'] = fid.sar_mode_percent\n CS_L2I_mds['METADATA']['SPH']['SARIN_MODE_PERCENT'] = fid.sarin_mode_percent\n CS_L2I_mds['METADATA']['SPH']['OPEN_OCEAN_PERCENT'] = fid.open_ocean_percent\n CS_L2I_mds['METADATA']['SPH']['CLOSE_SEA_PERCENT'] = fid.close_sea_percent\n CS_L2I_mds['METADATA']['SPH']['CONTINENT_ICE_PERCENT'] = fid.continent_ice_percent\n CS_L2I_mds['METADATA']['SPH']['LAND_PERCENT'] = fid.land_percent\n CS_L2I_mds['METADATA']['SPH']['L2_PROD_STATUS'] = fid.l2_prod_status\n CS_L2I_mds['METADATA']['SPH']['L2_PROC_FLAG'] = fid.l2_proc_flag\n CS_L2I_mds['METADATA']['SPH']['L2_PROCESSING_QUALITY'] = fid.l2_processing_quality\n CS_L2I_mds['METADATA']['SPH']['L2_PROC_THRESH'] = fid.l2_proc_thresh\n CS_L2I_mds['METADATA']['SPH']['SIR_CONFIGURATION'] = fid.sir_configuration\n CS_L2I_mds['METADATA']['SPH']['SIR_OP_MODE'] = fid.sir_op_mode\n CS_L2I_mds['METADATA']['SPH']['ORBIT_FILE'] = fid.xref_orbit\n CS_L2I_mds['METADATA']['SPH']['PROC_CONFIG_PARAMS_FILE'] = fid.xref_pconf\n CS_L2I_mds['METADATA']['SPH']['CONSTANTS_FILE'] = fid.xref_constants\n CS_L2I_mds['METADATA']['SPH']['IPF_RA_DATABASE_FILE'] = fid.xref_siral_characterisation\n CS_L2I_mds['METADATA']['SPH']['DORIS_USO_DRIFT_FILE'] = fid.xref_uso\n CS_L2I_mds['METADATA']['SPH']['STAR_TRACKER_ATTREF_FILE'] = fid.xref_star_tracker_attref\n CS_L2I_mds['METADATA']['SPH']['SIRAL_LEVEL_0_FILE'] = fid.xref_siral_l0\n CS_L2I_mds['METADATA']['SPH']['CALIBRATION_TYPE_1_FILE'] = fid.xref_cal1\n CS_L2I_mds['METADATA']['SPH']['SIR_COMPLEX_CAL1_SARIN'] = fid.xref_cal1_sarin\n CS_L2I_mds['METADATA']['SPH']['SCENARIO_FILE'] = fid.xref_orbit_scenario\n CS_L2I_mds['METADATA']['SPH']['CALIBRATION_TYPE_2_FILE'] = fid.xref_cal2\n CS_L2I_mds['METADATA']['SPH']['SURFACE_PRESSURE_FILE'] = fid.xref_surf_pressure\n CS_L2I_mds['METADATA']['SPH']['MEAN_PRESSURE_FILE'] = fid.xref_mean_pressure\n CS_L2I_mds['METADATA']['SPH']['WET_TROPOSPHERE_FILE'] = fid.xref_wet_trop\n CS_L2I_mds['METADATA']['SPH']['U_WIND_FILE'] = fid.xref_u_wind\n CS_L2I_mds['METADATA']['SPH']['V_WIND_FILE'] = fid.xref_v_wind\n CS_L2I_mds['METADATA']['SPH']['METEO_GRID_DEF_FILE'] = fid.xref_meteo\n CS_L2I_mds['METADATA']['SPH']['S1S2_PRESSURE_00H_MAP'] = fid.xref_s1s2_pressure_00h\n CS_L2I_mds['METADATA']['SPH']['S1S2_PRESSURE_06H_MAP'] = fid.xref_s1s2_pressure_06h\n CS_L2I_mds['METADATA']['SPH']['S1S2_PRESSURE_12H_MAP'] = fid.xref_s1s2_pressure_12h\n CS_L2I_mds['METADATA']['SPH']['S1S2_PRESSURE_18H_MAP'] = fid.xref_s1s2_pressure_18h\n CS_L2I_mds['METADATA']['SPH']['S1_TIDE_AMPLITUDE_MAP'] = fid.xref_s1_tide_amplitude\n CS_L2I_mds['METADATA']['SPH']['S1_TIDE_PHASE_MAP'] = fid.xref_s1_tide_phase\n CS_L2I_mds['METADATA']['SPH']['S2_TIDE_AMPLITUDE_MAP'] = fid.xref_s2_tide_amplitude\n CS_L2I_mds['METADATA']['SPH']['S2_TIDE_PHASE_MAP'] = fid.xref_s2_tide_phase\n CS_L2I_mds['METADATA']['SPH']['GPS_IONO_MAP'] = fid.xref_gim\n CS_L2I_mds['METADATA']['SPH']['MODIFIED_DIP_MAP_FILE'] = fid.xref_dip_map\n CS_L2I_mds['METADATA']['SPH']['IONO_COEFFICENTS_FILE'] = fid.xref_iono_cor\n CS_L2I_mds['METADATA']['SPH']['SAI_FILE'] = fid.xref_sai\n CS_L2I_mds['METADATA']['SPH']['OCEAN_TIDE_FILE'] = fid.xref_ocean_tide\n CS_L2I_mds['METADATA']['SPH']['TIDAL_LOADING_FILE'] = fid.xref_tidal_load\n CS_L2I_mds['METADATA']['SPH']['EARTH_TIDE_FILE'] = fid.xref_earth_tide\n CS_L2I_mds['METADATA']['SPH']['POLE_TIDE_FILE'] = fid.xref_pole_location\n CS_L2I_mds['METADATA']['SPH']['SURFACE_TYPE_FILE'] = fid.xref_surf_type\n CS_L2I_mds['METADATA']['SPH']['AUX_MOG2D'] = fid.xref_mog2d\n CS_L2I_mds['METADATA']['SPH']['SIRAL_LEVEL_1B_FILE'] = fid.xref_siral_l1b\n CS_L2I_mds['METADATA']['SPH']['MEAN_SEA_SURFACE_FILE'] = fid.xref_mss\n CS_L2I_mds['METADATA']['SPH']['GEOID_FILE'] = fid.xref_geoid\n CS_L2I_mds['METADATA']['SPH']['ODLE_FILE'] = fid.xref_odle\n #-- mode dependent attributes\n if ('xref_dem' in fid.ncattrs()):\n CS_L2I_mds['METADATA']['SPH']['DEM_MODEL_FILE'] = fid.xref_dem\n if ('xref_sea_ice' in fid.ncattrs()):\n CS_L2I_mds['METADATA']['SPH']['SEA_ICE_FILE'] = fid.xref_sea_ice\n if ('xref_snow_depth' in fid.ncattrs()):\n CS_L2I_mds['METADATA']['SPH']['SNOW_DEPTH_FILE'] = fid.xref_snow_depth\n\n #-- return the output dictionary\n return CS_L2I_mds\n\n#-- PURPOSE: Get scaling factors for converting unpacked units in binary files\ndef cryosat_scaling_factors():\n #-- dictionary of scale factors for CryoSat-2 variables\n CS_l2i_scale = {}\n\n #-- CryoSat-2 Location Group\n #-- Time and Orbit Parameters plus Measurement Mode\n CS_l2i_scale['Location'] = {}\n #-- Time: day part\n CS_l2i_scale['Location']['Day'] = 1.0\n #-- Time: second part\n CS_l2i_scale['Location']['Sec'] = 1.0\n #-- Time: microsecond part\n CS_l2i_scale['Location']['Micsec'] = 1.0\n #-- USO correction factor\n CS_l2i_scale['Location']['USO_Corr'] = 1.0\n #-- Mode ID\n CS_l2i_scale['Location']['Mode_ID'] = 1\n #-- Source sequence counter\n CS_l2i_scale['Location']['SSC'] = 1\n #-- Instrument configuration\n CS_l2i_scale['Location']['Inst_config'] = 1\n #-- Record Counter\n CS_l2i_scale['Location']['Rec_Count'] = 1\n #-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)\n CS_l2i_scale['Location']['Lat'] = 1e-7\n #-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)\n CS_l2i_scale['Location']['Lon'] = 1e-7\n #-- Alt: packed units (mm, 1e-3 m)\n #-- Altitude of COG above reference ellipsoid (interpolated value)\n CS_l2i_scale['Location']['Alt'] = 1e-3\n #-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)\n CS_l2i_scale['Location']['Alt_rate'] = 1e-3\n #-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)\n CS_l2i_scale['Location']['Sat_velocity'] = 1e-3\n #-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n CS_l2i_scale['Location']['Real_beam'] = 1e-6\n #-- Interferometer baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)\n CS_l2i_scale['Location']['Baseline'] = 1e-6\n #-- Star Tracker ID\n CS_l2i_scale['Location']['ST_ID'] = 1\n CS_l2i_scale['Location']['Spare'] = 1\n #-- Roll (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n CS_l2i_scale['Location']['Roll'] = 1e-7\n #-- Pitch (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n CS_l2i_scale['Location']['Pitch'] = 1e-7\n #-- Yaw (Derived from star trackers): packed units (0.1 micro-degree, 1e-7 degrees)\n CS_l2i_scale['Location']['Yaw'] = 1e-7\n #-- Measurement Confidence Data\n CS_l2i_scale['Location']['MCD'] = 1\n\n #-- CryoSat-2 Measurement Group\n #-- Derived from instrument measurement parameters\n CS_l2i_scale['Data'] = {}\n #-- Measured elevation above ellipsoid from retracker 1: packed units (mm, 1e-3 m)\n CS_l2i_scale['Data']['Elev_1'] = 1e-3\n #-- Measured elevation above ellipsoid from retracker 2: packed units (mm, 1e-3 m)\n CS_l2i_scale['Data']['Elev_2'] = 1e-3\n #-- Measured elevation above ellipsoid from retracker 3: packed units (mm, 1e-3 m)\n CS_l2i_scale['Data']['Elev_3'] = 1e-3\n #-- Sigma Zero Backscatter for retracker 1: packed units (1e-2 dB)\n CS_l2i_scale['Data']['Sig0_1'] = 1e-2\n #-- Sigma Zero Backscatter for retracker 2: packed units (1e-2 dB)\n CS_l2i_scale['Data']['Sig0_2'] = 1e-2\n #-- Sigma Zero Backscatter for retracker 3: packed units (1e-2 dB)\n CS_l2i_scale['Data']['Sig0_3'] = 1e-2\n #-- SWH packed units (mm, 1e-3)\n CS_l2i_scale['Data']['SWH'] = 1e-3\n #-- Peakiness: packed units (1e-2)\n CS_l2i_scale['Data']['Peakiness'] = 1e-2\n #-- Retracked range correction for retracker 1: packed units (mm, 1e-3 m)\n CS_l2i_scale['Data']['Range_1'] = 1e-3\n #-- Retracked range correction for retracker 2: packed units (mm, 1e-3 m)\n CS_l2i_scale['Data']['Range_2'] = 1e-3\n #-- Retracked range correction for retracker 3: packed units (mm, 1e-3 m)\n CS_l2i_scale['Data']['Range_3'] = 1e-3\n #-- Retracked sigma 0 correction for Retracker 1: packed units (1e-2 dB)\n CS_l2i_scale['Data']['Retrack_1_sig0'] = 1e-2\n #-- Retracked sigma 0 correction for Retracker 2: packed units (1e-2 dB)\n CS_l2i_scale['Data']['Retrack_2_sig0'] = 1e-2\n #-- Retracked sigma 0 correction for Retracker 3: packed units (1e-2 dB)\n CS_l2i_scale['Data']['Retrack_3_sig0'] = 1e-2\n #-- Retracker 1 quality metric\n CS_l2i_scale['Data']['Quality_1'] = 1\n #-- Retracker 2 quality metric\n CS_l2i_scale['Data']['Quality_2'] = 1\n #-- Retracker 3 quality metric\n CS_l2i_scale['Data']['Quality_3'] = 1\n #-- Retrackers 3-23 output\n CS_l2i_scale['Data']['Retrack_3'] = 1\n CS_l2i_scale['Data']['Retrack_4'] = 1\n CS_l2i_scale['Data']['Retrack_5'] = 1\n CS_l2i_scale['Data']['Retrack_6'] = 1\n CS_l2i_scale['Data']['Retrack_7'] = 1\n CS_l2i_scale['Data']['Retrack_8'] = 1\n CS_l2i_scale['Data']['Retrack_9'] = 1\n CS_l2i_scale['Data']['Retrack_10'] = 1\n CS_l2i_scale['Data']['Retrack_11'] = 1\n CS_l2i_scale['Data']['Retrack_12'] = 1\n CS_l2i_scale['Data']['Retrack_13'] = 1\n CS_l2i_scale['Data']['Retrack_14'] = 1\n CS_l2i_scale['Data']['Retrack_15'] = 1\n CS_l2i_scale['Data']['Retrack_16'] = 1\n CS_l2i_scale['Data']['Retrack_17'] = 1\n CS_l2i_scale['Data']['Retrack_18'] = 1\n CS_l2i_scale['Data']['Retrack_19'] = 1\n CS_l2i_scale['Data']['Retrack_20'] = 1\n CS_l2i_scale['Data']['Retrack_21'] = 1\n CS_l2i_scale['Data']['Retrack_22'] = 1\n CS_l2i_scale['Data']['Retrack_23'] = 1\n #-- Power echo shape parameter: packed units (dB/100)\n CS_l2i_scale['Data']['echo_shape'] = 1e-2\n #-- Beam behaviour parameter: unitless code number related to\n #-- surface characteristics\n CS_l2i_scale['Data']['BB_parameter'] = 1\n #-- Cross track angle: packed units (micro radians)\n CS_l2i_scale['Data']['X_Track_Angle'] = 1e-6\n #-- Cross track angle correction: packed units (micro radians)\n CS_l2i_scale['Data']['X_Track_Angle_c'] = 1e-6\n #-- Leading edge coherence at retrack point 1/1000\n CS_l2i_scale['Data']['Coherence'] = 1e-3\n #-- Interpolated Ocean Height: packed units (mm above ellipsoid)\n CS_l2i_scale['Data']['Ocean_ht'] = 1e-3\n #-- Freeboard: packed units (mm, 1e-3 m)\n #-- -9999 default value indicates computation has not been performed\n CS_l2i_scale['Data']['Freeboard'] = 1e-3\n #-- Surface Height Anomaly: packed units (mm, 1e-3 m)\n CS_l2i_scale['Data']['SHA'] = 1e-3\n #-- Interpolated Surface Height Anomaly: packed units (mm, 1e-3 m)\n CS_l2i_scale['Data']['SSHA_interp'] = 1e-3\n #-- Error in ocean height interpolation: packed units (mm, 1e-3 m)\n CS_l2i_scale['Data']['SSHA_interp_RMS'] = 1e-3\n #-- Number of forward records interpolated\n CS_l2i_scale['Data']['SSHA_interp_count_fwd'] = 1\n #-- Number of backward records interpolated\n CS_l2i_scale['Data']['SSHA_interp_count_bkwd'] = 1\n #-- Distance in time of most forward record interpolated (milli-seconds)\n CS_l2i_scale['Data']['SSHA_interp_time_fwd'] = 1e-3\n #-- Distance in time of most backward record interpolated (milli-seconds)\n CS_l2i_scale['Data']['SSHA_interp_time_bkwd'] = 1e-3\n #-- Interpolation error flag\n CS_l2i_scale['Data']['SSHA_interp_flag'] = 1\n #-- Measurement mode\n CS_l2i_scale['Data']['Measurement_Mode'] = 1\n #-- Quality flags\n CS_l2i_scale['Data']['Quality_flag'] = 1\n #-- Retracker flags\n CS_l2i_scale['Data']['Retracker_flag'] = 1\n #-- Height calculation details\n #-- Specifies what was applied during the height calculation\n CS_l2i_scale['Data']['Height_status'] = 1\n #-- SAR freeboard status flag\n CS_l2i_scale['Data']['Freeboard_status'] = 1\n #-- Number of averaged echoes or beams\n CS_l2i_scale['Data']['N_avg'] = 1\n #-- Wind Speed packed units (mm/s, 1e-3 m/s)\n CS_l2i_scale['Data']['Wind_speed'] = 1e-3\n CS_l2i_scale['Data']['Spares1'] = 1\n\n #-- CryoSat-2 Auxiliary Data Group\n CS_l2i_scale['Auxiliary'] = {}\n #-- Ice Concentration packed units (%/1000)\n CS_l2i_scale['Auxiliary']['Ice_conc'] = 1e-3\n #-- Snow Depth packed units (mm, 1e-3 m)\n CS_l2i_scale['Auxiliary']['Snow_depth'] = 1e-3\n #-- Snow Density packed units (kg/m^3)\n CS_l2i_scale['Auxiliary']['Snow_density'] = 1.0\n #-- Discriminator result\n CS_l2i_scale['Auxiliary']['Discriminator'] = 1\n #-- SARin discriminator parameters 1-10\n CS_l2i_scale['Auxiliary']['SARIN_disc_1'] = 1\n CS_l2i_scale['Auxiliary']['SARIN_disc_2'] = 1\n CS_l2i_scale['Auxiliary']['SARIN_disc_3'] = 1\n CS_l2i_scale['Auxiliary']['SARIN_disc_4'] = 1\n CS_l2i_scale['Auxiliary']['SARIN_disc_5'] = 1\n CS_l2i_scale['Auxiliary']['SARIN_disc_6'] = 1\n CS_l2i_scale['Auxiliary']['SARIN_disc_7'] = 1\n CS_l2i_scale['Auxiliary']['SARIN_disc_8'] = 1\n CS_l2i_scale['Auxiliary']['SARIN_disc_9'] = 1\n CS_l2i_scale['Auxiliary']['SARIN_disc_10'] = 1\n #-- Discriminator flags\n CS_l2i_scale['Auxiliary']['Discrim_flag'] = 1\n #-- Slope model correction (Attitude of echo in micro-degrees)\n CS_l2i_scale['Auxiliary']['Attitude'] = 1e-6\n #-- Slope model correction (Azimuth of echo in micro-degrees)\n CS_l2i_scale['Auxiliary']['Azimuth'] = 1e-6\n #-- Slope doppler correction (mm)\n CS_l2i_scale['Auxiliary']['Slope_doppler'] = 1e-3\n #-- The original latitude of the satellite (micro-degrees)\n CS_l2i_scale['Auxiliary']['Lat_sat'] = 1e-6\n #-- The original longitude of the satellite (micro-degrees)\n CS_l2i_scale['Auxiliary']['Lon_sat'] = 1e-6\n #-- Ambiguity indicator\n CS_l2i_scale['Auxiliary']['Ambiguity'] = 1\n #-- Mean Sea Surface standard Model: packed units (mm, 1e-3 m)\n CS_l2i_scale['Auxiliary']['MSS_model'] = 1e-3\n #-- Geoid standard Model: packed units (mm, 1e-3 m)\n CS_l2i_scale['Auxiliary']['Geoid_model'] = 1e-3\n #-- ODLE standard Model: packed units (mm, 1e-3 m)\n CS_l2i_scale['Auxiliary']['ODLE'] = 1e-3\n #-- The interpolated elevation value obtained from the DEM (mm)\n CS_l2i_scale['Auxiliary']['DEM_elev'] = 1e-3\n #-- Identification of DEM used in SARin ambiguity test\n CS_l2i_scale['Auxiliary']['DEM_ID'] = 1\n CS_l2i_scale['Auxiliary']['Spares2'] = 1\n\n #-- CryoSat-2 External Corrections Group\n CS_l2i_scale['Geometry'] = {}\n #-- Dry Tropospheric Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['dryTrop'] = 1e-3\n #-- Wet Tropospheric Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['wetTrop'] = 1e-3\n #-- Inverse Barometric Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['InvBar'] = 1e-3\n #-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['DAC'] = 1e-3\n #-- GIM Ionospheric Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['Iono_GIM'] = 1e-3\n #-- Model Ionospheric Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['Iono_model'] = 1e-3\n #-- Ocean tide Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['ocTideElv'] = 1e-3\n #-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['lpeTideElv'] = 1e-3\n #-- Ocean loading tide Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['olTideElv'] = 1e-3\n #-- Solid Earth tide Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['seTideElv'] = 1e-3\n #-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['gpTideElv'] = 1e-3\n #-- Surface Type: Packed in groups of three bits for each of the 20 records\n CS_l2i_scale['Geometry']['Surf_type'] = 1\n #-- Corrections Status Flag\n CS_l2i_scale['Geometry']['Corr_status'] = 1\n #-- Correction Error Flag\n CS_l2i_scale['Geometry']['Corr_error'] = 1\n #-- Sea State Bias Correction packed units (mm, 1e-3 m)\n CS_l2i_scale['Geometry']['SSB'] = 1e-3\n CS_l2i_scale['Geometry']['Spares3'] = 1\n\n #-- CryoSat-2 Internal Corrections Group\n CS_l2i_scale['Instrumental'] = {}\n #-- Doppler range correction: Radial + slope (mm)\n CS_l2i_scale['Instrumental']['Doppler_range'] = 1e-3\n #-- Instrument Range Correction: t-r antenna (mm)\n CS_l2i_scale['Instrumental']['TR_inst_range'] = 1e-3\n #-- Instrument Range Correction: r-only antenna (mm)\n CS_l2i_scale['Instrumental']['R_inst_range'] = 1e-3\n #-- Instrument Sigma 0 Correction: t-r antenna (dB/100)\n CS_l2i_scale['Instrumental']['TR_inst_gain'] = 1e-2\n #-- Instrument Sigma 0 Correction: r-only (dB/100)\n CS_l2i_scale['Instrumental']['R_inst_gain'] = 1e-2\n #-- Internal Phase Correction (milli-radians)\n CS_l2i_scale['Instrumental']['Internal_phase'] = 1e-3\n #-- External Phase Correction (milli-radians)\n CS_l2i_scale['Instrumental']['External_phase'] = 1e-3\n #-- Noise Power measurement\n CS_l2i_scale['Instrumental']['Noise_power'] = 1\n #-- Phase slope correction (microradians)\n CS_l2i_scale['Instrumental']['Phase_slope'] = 1e-6\n CS_l2i_scale['Instrumental']['Spares4'] = 1\n\n #-- return the scaling factors\n return CS_l2i_scale\n\n#-- PURPOSE: Read ASCII Main Product Header (MPH) block from an ESA PDS file\ndef read_MPH(full_filename):\n #-- read input data file\n with open(os.path.expanduser(full_filename), 'rb') as fid:\n file_contents = fid.read().splitlines()\n\n #-- Define constant values associated with PDS file formats\n #-- number of text lines in standard MPH\n n_MPH_lines = 41\n #-- check that first line of header matches PRODUCT\n if not bool(re.match(br'PRODUCT\\=\\\"(.*)(?=\\\")',file_contents[0])):\n raise IOError('File does not start with a valid PDS MPH')\n #-- read MPH header text\n s_MPH_fields = {}\n for i in range(n_MPH_lines):\n #-- use regular expression operators to read headers\n if bool(re.match(br'(.*?)\\=\\\"(.*)(?=\\\")',file_contents[i])):\n #-- data fields within quotes\n field,value=re.findall(br'(.*?)\\=\\\"(.*)(?=\\\")',file_contents[i]).pop()\n s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()\n elif bool(re.match(br'(.*?)\\=(.*)',file_contents[i])):\n #-- data fields without quotes\n field,value=re.findall(br'(.*?)\\=(.*)',file_contents[i]).pop()\n s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()\n\n #-- Return block name array to calling function\n return s_MPH_fields\n\n#-- PURPOSE: Read ASCII Specific Product Header (SPH) block from a PDS file\ndef read_SPH(full_filename,j_sph_size):\n #-- read input data file\n with open(os.path.expanduser(full_filename), 'rb') as fid:\n file_contents = fid.read().splitlines()\n\n #-- Define constant values associated with PDS file formats\n #-- number of text lines in standard MPH\n n_MPH_lines = 41\n #-- compile regular expression operator for reading headers\n rx = re.compile(br'(.*?)\\=\\\"?(.*)',re.VERBOSE)\n #-- check first line of header matches SPH_DESCRIPTOR\n if not bool(re.match(br'SPH\\_DESCRIPTOR\\=',file_contents[n_MPH_lines+1])):\n raise IOError('File does not have a valid PDS DSD')\n #-- read SPH header text (no binary control characters)\n s_SPH_lines = [li for li in file_contents[n_MPH_lines+1:] if rx.match(li)\n and not re.search(br'[^\\x20-\\x7e]+',li)]\n\n #-- extract SPH header text\n s_SPH_fields = {}\n c = 0\n while (c < len(s_SPH_lines)):\n #-- check if line is within DS_NAME portion of SPH header\n if bool(re.match(br'DS_NAME',s_SPH_lines[c])):\n #-- add dictionary for DS_NAME\n field,value=re.findall(br'(.*?)\\=\\\"(.*)(?=\\\")',s_SPH_lines[c]).pop()\n key = value.decode('utf-8').rstrip()\n s_SPH_fields[key] = {}\n for line in s_SPH_lines[c+1:c+7]:\n if bool(re.match(br'(.*?)\\=\\\"(.*)(?=\\\")',line)):\n #-- data fields within quotes\n dsfield,dsvalue=re.findall(br'(.*?)\\=\\\"(.*)(?=\\\")',line).pop()\n s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()\n elif bool(re.match(br'(.*?)\\=(.*)',line)):\n #-- data fields without quotes\n dsfield,dsvalue=re.findall(br'(.*?)\\=(.*)',line).pop()\n s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()\n #-- add 6 to counter to go to next entry\n c += 6\n #-- use regular expression operators to read headers\n elif bool(re.match(br'(.*?)\\=\\\"(.*)(?=\\\")',s_SPH_lines[c])):\n #-- data fields within quotes\n field,value=re.findall(br'(.*?)\\=\\\"(.*)(?=\\\")',s_SPH_lines[c]).pop()\n s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()\n elif bool(re.match(br'(.*?)\\=(.*)',s_SPH_lines[c])):\n #-- data fields without quotes\n field,value=re.findall(br'(.*?)\\=(.*)',s_SPH_lines[c]).pop()\n s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()\n #-- add 1 to counter to go to next line\n c += 1\n\n #-- Return block name array to calling function\n return s_SPH_fields\n\n#-- PURPOSE: Read ASCII Data Set Descriptors (DSD) block from a PDS file\ndef read_DSD(full_filename):\n #-- read input data file\n with open(os.path.expanduser(full_filename), 'rb') as fid:\n file_contents = fid.read().splitlines()\n\n #-- Define constant values associated with PDS file formats\n #-- number of text lines in standard MPH\n n_MPH_lines = 41\n #-- number of text lines in a DSD header\n n_DSD_lines = 8\n\n #-- Level-2 CryoSat DS_NAMES within files\n regex_patterns = []\n regex_patterns.append(br'DS_NAME\\=\"SIR_LRM_L2(_I)?[\\s+]*\"')\n regex_patterns.append(br'DS_NAME\\=\"SIR_LRMIL2[\\s+]*\"')\n regex_patterns.append(br'DS_NAME\\=\"SIR_SAR_L2(A|B)?(_I)?[\\s+]*\"')\n regex_patterns.append(br'DS_NAME\\=\"SIR_SARIL2(A|B)?[\\s+]*\"')\n regex_patterns.append(br'DS_NAME\\=\"SIR_FDM_L2[\\s+]*\"')\n regex_patterns.append(br'DS_NAME\\=\"SIR_SIN_L2(_I)?[\\s+]*\"')\n regex_patterns.append(br'DS_NAME\\=\"SIR_SINIL2[\\s+]*\"')\n regex_patterns.append(br'DS_NAME\\=\"SIR_SID_L2(_I)?[\\s+]*\"')\n regex_patterns.append(br'DS_NAME\\=\"SIR_SIDIL2[\\s+]*\"')\n regex_patterns.append(br'DS_NAME\\=\"SIR_GDR_2(A|B|_)?[\\s+]*\"')\n #-- find the DSD starting line within the SPH header\n c = 0\n Flag = False\n while ((Flag is False) and (c < len(regex_patterns))):\n #-- find indice within\n indice = [i for i,line in enumerate(file_contents[n_MPH_lines+1:]) if\n re.search(regex_patterns[c],line)]\n if indice:\n Flag = True\n else:\n c+=1\n #-- check that valid indice was found within header\n if not indice:\n raise IOError('Can not find correct DSD field')\n\n #-- extract s_DSD_fields info\n DSD_START = n_MPH_lines + indice[0] + 1\n s_DSD_fields = {}\n for i in range(DSD_START,DSD_START+n_DSD_lines):\n #-- use regular expression operators to read headers\n if bool(re.match(br'(.*?)\\=\\\"(.*)(?=\\\")',file_contents[i])):\n #-- data fields within quotes\n field,value=re.findall(br'(.*?)\\=\\\"(.*)(?=\\\")',file_contents[i]).pop()\n s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()\n elif bool(re.match(br'(.*?)\\=(.*)',file_contents[i])):\n #-- data fields without quotes\n field,value=re.findall(br'(.*?)\\=(.*)',file_contents[i]).pop()\n s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()\n\n #-- Return block name array to calling function\n return s_DSD_fields\n\n#-- PURPOSE: read CryoSat Level-2 Intermediate data\ndef read_cryosat_L2I(full_filename, VERBOSE=False):\n #-- file basename and file extension of input file\n fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))\n\n #-- CryoSat file class\n #-- OFFL (Off Line Processing/Systematic)\n #-- NRT_ (Near Real Time)\n #-- RPRO (ReProcessing)\n #-- TEST (Testing)\n #-- LTA_ (Long Term Archive)\n regex_class = r'OFFL|NRT_|RPRO|TEST|LTA_'\n #-- CryoSat mission products\n #-- SIR_LRM_2 L2 Product from Low Resolution Mode Processing\n #-- SIR_FDM_2 L2 Product from Fast Delivery Marine Mode Processing\n #-- SIR_SIN_2 L2 Product from SAR Interferometric Processing\n #-- SIR_SID_2 L2 Product from SIN Degraded Processing\n #-- SIR_SAR_2 L2 Product from SAR Processing\n #-- SIR_GDR_2 L2 Consolidated Product\n #-- SIR_LRMI2 In-depth L2 Product from LRM Processing\n #-- SIR_SINI2 In-depth L2 Product from SIN Processing\n #-- SIR_SIDI2 In-depth L2 Product from SIN Degraded Process.\n #-- SIR_SARI2 In-depth L2 Product from SAR Processing\n regex_products = (r'SIR_LRM_2|SIR_FDM_2|SIR_SIN_2|SIR_SID_2|'\n r'SIR_SAR_2|SIR_GDR_2|SIR_LRMI2|SIR_SINI2|SIR_SIDI2|SIR_SARI2')\n #-- CRYOSAT LEVEL-2 PRODUCTS NAMING RULES\n #-- Mission Identifier\n #-- File Class\n #-- File Product\n #-- Validity Start Date and Time\n #-- Validity Stop Date and Time\n #-- Baseline Identifier\n #-- Version Number\n regex_pattern = r'(.*?)_({0})_({1})__(\\d+T?\\d+)_(\\d+T?\\d+)_(.*?)(\\d+)'\n rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)\n #-- extract file information from filename\n MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()\n\n #-- check if input file is original binary *.DBL or new netCDF4 *.nc format\n if (fileExtension == '.nc'):\n print(fileBasename) if VERBOSE else None\n CS_L2I_mds = cryosat_baseline_D(full_filename, UNPACK=False)\n elif (fileExtension == '.DBL'):\n #-- Record sizes\n CS_L2I_MDS_REC_SIZE = 556\n CS_L2I_BC_MDS_REC_SIZE = 664\n CS_L2I_C_MDS_REC_SIZE = 664\n #-- check baseline from file to set i_record_size and allocation function\n if (BASELINE == 'C'):\n i_record_size = CS_L2I_C_MDS_REC_SIZE\n read_cryosat_variables = cryosat_baseline_C\n elif (BASELINE == 'BC'):\n i_record_size = CS_L2I_BC_MDS_REC_SIZE\n read_cryosat_variables = cryosat_baseline_BC\n else:\n i_record_size = CS_L2I_MDS_REC_SIZE\n read_cryosat_variables = cryosat_baseline_AB\n\n #-- read the input file to get file information\n fid = os.open(os.path.expanduser(full_filename),os.O_RDONLY)\n file_info = os.fstat(fid)\n os.close(fid)\n\n #-- num DSRs from SPH\n j_num_DSR = np.int32(file_info.st_size//i_record_size)\n #-- print file information\n if VERBOSE:\n print(fileBasename)\n print('{0:d} {1:d} {2:d}'.format(j_num_DSR,file_info.st_size,i_record_size))\n #-- Check if MPH/SPH/DSD headers\n if (j_num_DSR*i_record_size == file_info.st_size):\n print('No Header on file')\n print('The number of DSRs is: {0:d}'.format(j_num_DSR))\n else:\n print('Header on file')\n\n #-- Check if MPH/SPH/DSD headers\n if (j_num_DSR*i_record_size != file_info.st_size):\n #-- If there are MPH/SPH/DSD headers\n s_MPH_fields = read_MPH(full_filename)\n j_sph_size = np.int32(re.findall('[-+]?\\d+',s_MPH_fields['SPH_SIZE']).pop())\n s_SPH_fields = read_SPH(full_filename,j_sph_size)\n #-- extract information from DSD fields\n s_DSD_fields = read_DSD(full_filename)\n #-- extract DS_OFFSET\n j_DS_start = np.int32(re.findall('[-+]?\\d+',s_DSD_fields['DS_OFFSET']).pop())\n #-- extract number of DSR in the file\n j_num_DSR = np.int32(re.findall('[-+]?\\d+',s_DSD_fields['NUM_DSR']).pop())\n #-- check the record size\n j_DSR_size = np.int32(re.findall('[-+]?\\d+',s_DSD_fields['DSR_SIZE']).pop())\n #-- minimum size is start of the read plus number of records to read\n j_check_size = j_DS_start +(j_DSR_size*j_num_DSR)\n if VERBOSE:\n print('The offset of the DSD is: {0:d} bytes'.format(j_DS_start))\n print('The number of DSRs is {0:d}'.format(j_num_DSR))\n print('The size of the DSR is {0:d}'.format(j_DSR_size))\n #-- check if invalid file size\n if (j_check_size > file_info.st_size):\n raise IOError('File size error')\n #-- extract binary data from input CryoSat data file (skip headers)\n fid = open(os.path.expanduser(full_filename), 'rb')\n cryosat_header = fid.read(j_DS_start)\n #-- iterate through CryoSat file and fill output variables\n CS_L2I_mds = read_cryosat_variables(fid,i_record_size,j_num_DSR)\n #-- add headers to output dictionary as METADATA\n CS_L2I_mds['METADATA'] = {}\n CS_L2I_mds['METADATA']['MPH'] = s_MPH_fields\n CS_L2I_mds['METADATA']['SPH'] = s_SPH_fields\n CS_L2I_mds['METADATA']['DSD'] = s_DSD_fields\n #-- close the input CryoSat binary file\n fid.close()\n else:\n #-- If there are not MPH/SPH/DSD headers\n #-- extract binary data from input CryoSat data file\n fid = open(os.path.expanduser(full_filename), 'rb')\n #-- iterate through CryoSat file and fill output variables\n CS_L2I_mds = read_cryosat_variables(fid,i_record_size,j_num_DSR)\n #-- close the input CryoSat binary file\n fid.close()\n\n #-- return the data and headers\n return CS_L2I_mds\n" ]
[ [ "numpy.fromfile", "numpy.array", "numpy.zeros", "numpy.int32" ] ]
PhMueller/TrajectoryParser
[ "9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f" ]
[ "HPOBenchExperimentUtils/optimizer/fabolas_optimizer.py" ]
[ "import logging\nfrom pathlib import Path\nfrom typing import Union, Dict, Tuple, Sequence\nimport sys\nimport numpy as np\nfrom math import log2\nimport enum\n\nfrom HPOBenchExperimentUtils.optimizer.base_optimizer import SingleFidelityOptimizer\nfrom HPOBenchExperimentUtils.core.bookkeeper import Bookkeeper\nfrom HPOBenchExperimentUtils.utils.utils import get_mandatory_optimizer_setting, standard_rng_init\nimport HPOBenchExperimentUtils.utils.emukit_utils as emukit_utils\n\nimport ConfigSpace as cs\n\nfrom emukit.examples.fabolas import fmin_fabolas, FabolasModel\nfrom emukit.examples.fabolas.continuous_fidelity_entropy_search import ContinuousFidelityEntropySearch\nfrom emukit.core import ParameterSpace, ContinuousParameter, InformationSourceParameter\nfrom emukit.core.loop import UserFunctionWrapper\nfrom emukit.core.initial_designs.latin_design import LatinDesign\nfrom emukit.core.initial_designs import RandomDesign\nfrom emukit.core.optimization import MultiSourceAcquisitionOptimizer, GradientAcquisitionOptimizer, \\\n RandomSearchAcquisitionOptimizer\nfrom emukit.core.acquisition import IntegratedHyperParameterAcquisition, acquisition_per_expected_cost\nfrom emukit.bayesian_optimization.acquisitions.max_value_entropy_search import MUMBO\nfrom emukit.bayesian_optimization.loops.cost_sensitive_bayesian_optimization_loop import \\\n CostSensitiveBayesianOptimizationLoop\n\n_log = logging.getLogger(__name__)\n\n\nclass AcquisitionTypes(enum.Enum):\n MTBO = \"mtbo\"\n MUMBO = \"mumbo\"\n\ninitial_designs = {\n \"random\": RandomDesign,\n \"latin\": LatinDesign\n}\n\n_fidelity_parameter_names = [\"subsample\", \"dataset_fraction\"]\n\n\n# noinspection PyPep8Naming\nclass FabolasOptimizer(SingleFidelityOptimizer):\n def __init__(self, benchmark: Bookkeeper,\n settings: Dict, output_dir: Path, rng: Union[int, None] = 0):\n\n super().__init__(benchmark, settings, output_dir, rng)\n\n # The benchmark defined its configuration space as a ConfigSpace.ConfigurationSpace object. This must be parsed\n # into emukit's version, an emukit.ParameterSpace object. Additionally, we need mappings between configurations\n # defined in either of the two conventions.\n self.original_space = self.benchmark.get_configuration_space()\n self.optimizer_settings = {\n \"update_interval\": get_mandatory_optimizer_setting(settings, \"update_interval\"),\n \"marginalize_hypers\": get_mandatory_optimizer_setting(settings, \"marginalize_hypers\"),\n \"initial_design\": str(get_mandatory_optimizer_setting(settings, \"initial_design\")).lower()\n }\n\n acquisition_type = str(get_mandatory_optimizer_setting(settings, \"acquisition\")).lower()\n if acquisition_type == AcquisitionTypes.MUMBO.value:\n # Fabolas optimizer with the default acquisition function replaced with MTBO MUMBO acquisition.\n # cf. Section 4.3 of the MUMBO paper: https://arxiv.org/pdf/2006.12093.pdf\n self.acquisition_type = AcquisitionTypes.MUMBO\n self.mumbo_settings = {\n \"num_mc_samples\": get_mandatory_optimizer_setting(settings, \"num_mc_samples\"),\n \"grid_size\": get_mandatory_optimizer_setting(settings, \"grid_size\")\n }\n elif acquisition_type == AcquisitionTypes.MTBO.value:\n # Fabolas optimizer with the default MTBO acquisition function.\n self.acquisition_type = AcquisitionTypes.MTBO\n self.mtbo_settings = {\n \"num_eval_points\": get_mandatory_optimizer_setting(settings, \"num_eval_points\")\n }\n else:\n raise ValueError(\"Fabolas optimizer does not recognize acquisition function %s. Expected either 'mumbo' or \"\n \"'mtbo'.\" % str(self.acquisition_type))\n\n self.emukit_space, self.to_emu, self.to_cs = emukit_utils.generate_space_mappings(self.original_space)\n\n self.num_fidelity_values = get_mandatory_optimizer_setting(\n settings, \"num_fidelity_values\", err_msg=\"Number of discrete fidelity levels must be specified in the \"\n \"parameter 'num_fidelity_values'. This defines the number of \"\n \"individual dataset sizes that will be used by FABOLAS.\")\n self.dataset_size = benchmark.benchmark.y_train.shape[0]\n\n self._setup_fabolas_fidelity()\n\n def wrapper(inp):\n \"\"\" Emukit requires this function to accept 2D inputs, with individual configurations aligned along axis 0\n and the various components of each configuration along axis 1. FABOLAS itself will only query one\n configuration at a time, but the interface must support multiple. \"\"\"\n\n nonlocal self\n _log.debug(\"Benchmark wrapper received input %s.\" % str(inp))\n if inp.ndim == 1:\n inp = np.expand_dims(inp, axis=0)\n\n yvals, costs = [], []\n for i in range(inp.shape[0]):\n run_id = SingleFidelityOptimizer._id_generator()\n x, s = inp[i, :-1], inp[i, -1]\n _log.debug(\"Calling objective function with configuration %s and fidelity index %s.\" % (x, s))\n config = cs.Configuration(self.original_space, values=self.to_cs(x))\n fidelity = self.fidelity_emukit_to_cs(s)\n _log.debug(\"Generated configuration %s, fidelity %s\" % (config, fidelity))\n res = benchmark.objective_function(configuration=config, fidelity=fidelity,\n configuration_id=run_id,\n **self.settings_for_sending)\n y, c = res[\"function_value\"], res[\"cost\"]\n yvals.append(y)\n costs.append(c)\n\n return np.asarray(yvals).reshape(-1, 1), np.asarray(costs).reshape(-1, 1)\n\n self.benchmark_caller = wrapper\n self.n_init = int(get_mandatory_optimizer_setting(settings, \"init_samples_per_dim\") *\n self.emukit_space.dimensionality)\n\n _log.info(\"Finished reading all settings for FABOLAS optimizer with MUMBO acquisition.\")\n\n def _setup_fabolas_fidelity(self):\n # FABOLAS will only work on a fidelity parameter named in '_fidelity_parameter_names', with values in\n # (0.0, 1.0].\n if self.main_fidelity.name not in _fidelity_parameter_names:\n raise RuntimeError(\"Cannot process unrecognized fidelity parameter %s. Must be one of %s.\" %\n (self.main_fidelity.name, str(_fidelity_parameter_names)))\n\n # As per the original sample code, the fidelity values were first discretized by FABOLAS, effectively running\n # continuous fidelity BO on top of Multi-Task BO, cf. emukit.examples.fabolas.fmin_fabolas.fmin().\n _log.debug(\"Discretizing the dataset sizes for use with FABOLAS into %d fidelity levels on a log2 scale.\" %\n self.num_fidelity_values)\n\n if type(self.main_fidelity) == cs.OrdinalHyperparameter:\n # Assume that the OrdinalHyperparameter sequence contains exact budget sizes.\n self.main_fidelity: cs.OrdinalHyperparameter\n self.budgets = np.asarray(self.main_fidelity.sequence)\n s_min = self.budgets[0]\n s_max = self.budgets[-1]\n ordinal = True\n else:\n # Assume that the sample budgets need to be extracted from a range of values.\n # FABOLAS expects to sample the fidelity values on a log scale. The following code effectively ignores\n # the log attribute of the parameter.\n # As per the interface of FabolasModel, s_min and s_max should not be fractions but dataset sizes.\n s_min = max(self.min_budget * self.dataset_size, 1)\n s_max = max(self.max_budget * self.dataset_size, 1)\n self.budgets = np.rint(np.clip(np.power(0.5, np.arange(self.num_fidelity_values - 1, -1, -1)) * s_max,\n s_min, s_max)).astype(int)\n # Needed to avoid introducing more ifs.\n # def fid_op(s): return np.clip(s, s_min, s_max) / s_max\n ordinal = False\n\n if self.acquisition_type is AcquisitionTypes.MUMBO:\n # To summarize, MUMBO will be given the fidelity values as a list of indices, each corresponding to an\n # actual dataset_fraction value sampled on a log2 scale and stored in 'budgets'. Thus, the value retrieved\n # from budgets needs to be clipped to account for any numerical inconsistencies arising out of the chained\n # log and exp operations.\n if ordinal:\n # s is an index, budget contains integers, the fidelity is expected to be an exact integer\n def map_fn(s: int):\n return {self.main_fidelity.name: self.budgets[int(s)]}\n else:\n # s is an index, budget contains integers, the fidelity is expected to be a fraction in [0.0, 1.0]\n def map_fn(s: int): return {self.main_fidelity.name: np.clip(self.budgets[int(s)],\n s_min, s_max) / s_max}\n\n self.fidelity_emukit_to_cs = map_fn\n\n # It was necessary to define a custom InformationSourceParameter here because of some minor issues that\n # FABOLAS had with a DiscreteParameter (the parent class of InformationSourceParameter) beginning at index\n # 0. Using a sub-class of InformationSourceParameter was, in turn, necessary, because there are internal\n # checks in place in MUMBO for that.\n # self.fabolas_fidelity = emukit_utils.SmarterInformationSourceParameter(self.budgets.shape[0], start_ind=1)\n self.fabolas_fidelity = InformationSourceParameter(self.budgets.shape[0])\n elif self.acquisition_type is AcquisitionTypes.MTBO:\n if ordinal:\n # s is a size in integers, budget contains integers, fidelity is expected to be an integer - find\n # closest fit for s in budgets\n def map_fn(s: int): return {self.main_fidelity.name: self.budgets[np.abs(self.budgets - s).argmin()]}\n else:\n # s is a size in integers, budget contains sizes in integers, fidelity is expected to be a fraction in\n # the range [0.0, 1.0]\n def map_fn(s: int): return {self.main_fidelity.name: np.clip(s, s_min, s_max) / s_max}\n\n self.fidelity_emukit_to_cs = map_fn\n self.fabolas_fidelity = ContinuousParameter(\"s\", s_min, s_max)\n else:\n raise RuntimeError(\"Unexpected acquisition type %s\" % self.acquisition_type)\n\n def _setup_model(self):\n \"\"\"\n This is almost entirely boilerplate code required to setup a model to work under the emukit framework. This\n code has been adapted from the convenience wrappers fmin.fmin_fabolas() and fabolas_loop.FabolasLoop from the\n FABOLAS example in emukit.examples.fabolas, here:\n https://github.com/EmuKit/emukit/tree/96299e99c5c406b46baf6f0f0bbea70950566918/emukit/examples/fabolas\n \"\"\"\n\n # ############################################################################################################ #\n # Ref: emukit.examples.fabolas.fmin.fmin_fabolas()\n # https://github.com/EmuKit/emukit/blob/96299e99c5c406b46baf6f0f0bbea70950566918/emukit/examples/fabolas/fmin.py\n\n # Generate warm-start samples. Using the implementation provided in the FABOLAS example, but B.2 in the\n # appendix of the MUMBO paper indicates RandomDesign should be used here instead. Therefore, exposed as a\n # hyperparameter.\n initial_design = initial_designs[self.optimizer_settings[\"initial_design\"]](self.emukit_space)\n grid = initial_design.get_samples(self.n_init)\n n_reps = self.n_init // self.budgets.shape[0] + 1\n\n # Samples for the fidelity values. Same as the FABOLAS example code.\n s_min, s_max = self.fabolas_fidelity.bounds[0]\n if self.acquisition_type is AcquisitionTypes.MUMBO:\n # For MUMBO, we sample fidelities as indices instead of actual dataset sizes\n sample_fidelities = np.expand_dims(np.tile(np.arange(s_min, s_max+1), n_reps)[:self.n_init], 1)\n else:\n # For MTBO, we directly sample the dataset sizes\n sample_fidelities = np.expand_dims(np.tile(self.budgets, n_reps)[:self.n_init], 1)\n\n # Append sampled fidelity values to sampled configurations and perform evaluations. Same as the FABOLAS example\n # code.\n X_init = np.concatenate((grid, sample_fidelities), axis=1)\n res = np.array(list(map(self.benchmark_caller, X_init))).reshape((-1, 2))\n Y_init = res[:, 0][:, None]\n cost_init = res[:, 1][:, None]\n _log.debug(\"Generated %d warm-start samples.\" % X_init.shape[0])\n # ############################################################################################################ #\n\n # ############################################################################################################ #\n # Ref: emukit.examples.fabolas.fabolas_loop.FabolasLoop\n # https://github.com/EmuKit/emukit/blob/96299e99c5c406b46baf6f0f0bbea70950566918/emukit/examples/fabolas/fabolas_loop.py\n\n extended_space = ParameterSpace([*self.emukit_space.parameters, self.fabolas_fidelity])\n\n if self.acquisition_type is AcquisitionTypes.MUMBO:\n # Insert MUMBO acquisition instead of FABOLAS' MTBO acquisition\n\n # The actual FABOLAS model comes into play here. Same as the FABOLAS example code. Note that here, we pass\n # the actual minimum and maximum budget/dataset size values instead of indices to FabolasModel. This is the\n # reason why the wrapper was needed.\n model_objective = FabolasModelMumboWrapper(budgets=self.budgets, X_init=X_init, Y_init=Y_init,\n s_min=self.budgets[0], s_max=self.budgets[-1])\n model_cost = FabolasModelMumboWrapper(budgets=self.budgets, X_init=X_init, Y_init=cost_init,\n s_min=self.budgets[0], s_max=self.budgets[-1])\n _log.debug(\"Initialized objective and cost estimation models\")\n\n # ---------------------- ---------------------- ---------------------- ------------------ ---------------- #\n # Ref. Section 4.3 of the MUMBO paper: https://arxiv.org/pdf/2006.12093.pdf\n if self.optimizer_settings[\"marginalize_hypers\"]:\n acquisition_generator = lambda model: MUMBO(\n model=model_objective, space=extended_space, target_information_source_index=s_max,\n num_samples=self.mumbo_settings[\"num_mc_samples\"], grid_size=self.mumbo_settings[\"grid_size\"])\n\n entropy_search = IntegratedHyperParameterAcquisition(model_objective, acquisition_generator)\n else:\n entropy_search = MUMBO(\n model=model_objective, space=extended_space, target_information_source_index=s_max,\n num_samples=self.mumbo_settings[\"num_mc_samples\"], grid_size=self.mumbo_settings[\"grid_size\"])\n\n acquisition = acquisition_per_expected_cost(entropy_search, model_cost)\n # This was used in the MUMBO example code\n acquisition_optimizer = MultiSourceAcquisitionOptimizer(GradientAcquisitionOptimizer(extended_space),\n space=extended_space)\n _log.debug(\"MUMBO acquisition function ready.\")\n # --------------------- --------------------- --------------------- --------------------- ---------------- #\n\n # Define the properties of the BO loop within which the chosen surrogate model (FABOLAS) and acquisition\n # function (MUMBO) are used for performing BO. Same as the FABOLAS example code.\n self.optimizer = CostSensitiveBayesianOptimizationLoop(\n space=extended_space, model_objective=model_objective, model_cost=model_cost, acquisition=acquisition,\n update_interval=self.optimizer_settings[\"update_interval\"], acquisition_optimizer=acquisition_optimizer)\n\n # These are hooks that help us record the trajectory for an information theoretic acquisition function,\n # which cannot be handled otherwise by the Bookkeeper.\n self.optimizer.loop_start_event.append(emukit_utils.get_init_trajectory_hook(self.output_dir))\n self.optimizer.iteration_end_event.append(emukit_utils.get_trajectory_hook(self.output_dir, self.to_cs))\n _log.info(\"FABOLAS optimizer with MUMBO acquisition initialized and ready to run.\")\n else:\n # AcquisitionType.MTBO\n # The default MTBO acquisition for FABOLAS.\n\n model_objective = FabolasModel(X_init=X_init, Y_init=Y_init, s_min=self.budgets[0], s_max=self.budgets[-1])\n model_cost = FabolasModel(X_init=X_init, Y_init=cost_init, s_min=self.budgets[0], s_max=self.budgets[-1])\n\n if self.optimizer_settings[\"marginalize_hypers\"]:\n acquisition_generator = lambda model: ContinuousFidelityEntropySearch(\n model=model_objective, space=extended_space,\n target_fidelity_index=len(extended_space.parameters) - 1)\n entropy_search = IntegratedHyperParameterAcquisition(model_objective, acquisition_generator)\n else:\n entropy_search = ContinuousFidelityEntropySearch(\n model=model_objective, space=extended_space,\n target_fidelity_index=len(extended_space.parameters) - 1)\n\n acquisition = acquisition_per_expected_cost(entropy_search, model_cost)\n acquisition_optimizer = RandomSearchAcquisitionOptimizer(\n extended_space, num_eval_points=self.mtbo_settings[\"num_eval_points\"])\n _log.debug(\"MTBO acquisition function ready.\")\n # --------------------- --------------------- --------------------- --------------------- ---------------- #\n\n # Define the properties of the BO loop within which the chosen surrogate model (FABOLAS) and acquisition\n # function (MUMBO) are used for performing BO. Same as the FABOLAS example code.\n self.optimizer = CostSensitiveBayesianOptimizationLoop(\n space=extended_space, model_objective=model_objective, model_cost=model_cost, acquisition=acquisition,\n update_interval=self.optimizer_settings[\"update_interval\"], acquisition_optimizer=acquisition_optimizer)\n\n _log.info(\"FABOLAS optimizer with MTBO acquisition initialized and ready to run.\")\n\n def setup(self):\n pass\n\n def run(self) -> Path:\n _log.info(\"Starting FABOLAS optimizer.\")\n\n # emukit does not expose any interface for setting a random seed any other way, so we reset the global seed here\n # Generating a new random number from the seed ensures that, for compatible versions of the numpy.random module,\n # the seeds remain predictable while still handling seed=None in a consistent manner.\n np.random.seed(standard_rng_init(self.rng).randint(0, 1_000_000))\n self._setup_model()\n self.optimizer.run_loop(UserFunctionWrapper(self.benchmark_caller, extra_output_names=[\"cost\"]),\n emukit_utils.InfiniteStoppingCondition())\n _log.info(\"FABOLAS optimizer finished.\")\n return self.output_dir\n\n\n# noinspection PyPep8Naming\nclass FabolasModelMumboWrapper(FabolasModel):\n \"\"\" A wrapper that allows MUMBO to properly interface with FabolasModel instances on account of their different\n treatments of the fidelity parameter. Essentially, a MUMBO acquisition will always call the predict() method\n while passing the fidelity value as an index in [0, N-1] for N fidelity sources and expects the underlying model to\n appropriately handle the indices. FabolasModel, on the other hand, expects the predict() call to receive a dataset\n size integer as the last column of the input matrix. This gap is bridged using this wrapper. \"\"\"\n\n def __init__(self, budgets: Sequence[int], X_init: np.ndarray, **kwargs):\n # Initialize an actual FabolasModel instance\n self.budgets = np.asarray(budgets)\n super(FabolasModelMumboWrapper, self).__init__(self._idx_to_budgets(X_init), **kwargs)\n\n @property\n def X(self):\n return self._budgets_to_idx(super(FabolasModelMumboWrapper, self).X)\n\n def _budgets_to_idx(self, X):\n \"\"\" Given inputs that contain values from self.budgets as the fidelity values (last column), returns the\n same inputs with the values replaced by corresponding indices from self.budgets. \"\"\"\n X_ = np.array(X, copy=True)\n X_[:, -1] = np.clip(np.searchsorted(self.budgets, X_[:, -1]), a_min=0, a_max=self.budgets.shape[0] - 1)\n return X_\n\n def _idx_to_budgets(self, X):\n \"\"\" Given inputs that contain array indices of self.budgets as the fidelity values (last column), returns the\n same inputs with the indices replaced by corresponding values from self.budgets. \"\"\"\n X_ = np.array(X, copy=True)\n X_[:, -1] = self.budgets[np.asarray(X[:, -1], int)]\n return X_\n\n def set_data(self, X, Y):\n return super(FabolasModelMumboWrapper, self).set_data(self._idx_to_budgets(X), Y)\n\n def predict(self, X):\n return super(FabolasModelMumboWrapper, self).predict(self._idx_to_budgets(X))\n\n def predict_covariance(self, X: np.ndarray, with_noise: bool = True) -> np.ndarray:\n return super(FabolasModelMumboWrapper, self).predict_covariance(self._idx_to_budgets(X), with_noise)\n\n def predict_with_full_covariance(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n return super(FabolasModelMumboWrapper, self).predict_with_full_covariance(self._idx_to_budgets(X))\n\n def get_covariance_between_points(self, X1: np.ndarray, X2: np.ndarray) -> np.ndarray:\n return super(FabolasModelMumboWrapper, self).get_covariance_between_points(\n self._idx_to_budgets(X1),\n self._idx_to_budgets(X2)\n )\n\n def get_prediction_gradients(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n return super(FabolasModelMumboWrapper, self).get_prediction_gradients(self._idx_to_budgets(X))\n\n def get_joint_prediction_gradients(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n return super(FabolasModelMumboWrapper, self).get_joint_prediction_gradients(self._idx_to_budgets(X))\n\n def calculate_variance_reduction(self, x_train_new: np.ndarray, x_test: np.ndarray) -> np.ndarray:\n return super(FabolasModelMumboWrapper, self).calculate_variance_reduction(\n self._idx_to_budgets(x_train_new),\n self._idx_to_budgets(x_test)\n )\n" ]
[ [ "numpy.expand_dims", "numpy.abs", "numpy.clip", "numpy.asarray", "numpy.arange", "numpy.tile", "numpy.concatenate", "numpy.searchsorted", "numpy.array" ] ]
mananeau/ALBERT
[ "4409420b7aa3cd355078689e4963d8ad11000ee3" ]
[ "tokenization.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python2, python3\n# coding=utf-8\n\"\"\"Tokenization classes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport unicodedata\nimport six\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\nimport tensorflow_hub as hub\nimport sentencepiece as spm\n\nSPIECE_UNDERLINE = u\"▁\".encode(\"utf-8\")\n\n\ndef preprocess_text(inputs, remove_space=True, lower=False):\n \"\"\"preprocess data by removing extra space and normalize data.\"\"\"\n outputs = inputs\n if remove_space:\n outputs = \" \".join(inputs.strip().split())\n\n if six.PY2 and isinstance(outputs, str):\n try:\n outputs = six.ensure_text(outputs, \"utf-8\")\n except UnicodeDecodeError:\n outputs = six.ensure_text(outputs, \"latin-1\")\n\n outputs = unicodedata.normalize(\"NFKD\", outputs)\n outputs = \"\".join([c for c in outputs if not unicodedata.combining(c)])\n if lower:\n outputs = outputs.lower()\n\n return outputs\n\n\ndef encode_pieces(sp_model, text, return_unicode=True, sample=False):\n \"\"\"turn sentences into word pieces.\"\"\"\n\n if six.PY2 and isinstance(text, six.text_type):\n text = six.ensure_binary(text, \"utf-8\")\n\n if not sample:\n pieces = sp_model.EncodeAsPieces(text)\n else:\n pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1)\n new_pieces = []\n for piece in pieces:\n piece = printable_text(piece)\n if len(piece) > 1 and piece[-1] == \",\" and piece[-2].isdigit():\n cur_pieces = sp_model.EncodeAsPieces(\n six.ensure_binary(piece[:-1]).replace(SPIECE_UNDERLINE, b\"\"))\n if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:\n if len(cur_pieces[0]) == 1:\n cur_pieces = cur_pieces[1:]\n else:\n cur_pieces[0] = cur_pieces[0][1:]\n cur_pieces.append(piece[-1])\n new_pieces.extend(cur_pieces)\n else:\n new_pieces.append(piece)\n\n # note(zhiliny): convert back to unicode for py2\n if six.PY2 and return_unicode:\n ret_pieces = []\n for piece in new_pieces:\n if isinstance(piece, str):\n piece = six.ensure_text(piece, \"utf-8\")\n ret_pieces.append(piece)\n new_pieces = ret_pieces\n\n return new_pieces\n\n\ndef encode_ids(sp_model, text, sample=False):\n pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample)\n ids = [sp_model.PieceToId(piece) for piece in pieces]\n return ids\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return six.ensure_text(text, \"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return six.ensure_text(text, \"utf-8\", \"ignore\")\n elif isinstance(text, six.text_type):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return six.ensure_text(text, \"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, six.text_type):\n return six.ensure_binary(text, \"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip().split()[0] if token.strip() else \" \"\n if token not in vocab:\n vocab[token] = len(vocab)\n return vocab\n\n\ndef convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n for item in items:\n output.append(vocab[item])\n return output\n\n\ndef convert_tokens_to_ids(vocab, tokens):\n return convert_by_vocab(vocab, tokens)\n\n\ndef convert_ids_to_tokens(inv_vocab, ids):\n return convert_by_vocab(inv_vocab, ids)\n\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens\n\n\nclass FullTokenizer(object):\n \"\"\"Runs end-to-end tokenziation.\"\"\"\n\n def __init__(self, vocab_file, do_lower_case=True, spm_model_file=None):\n self.vocab = None\n self.sp_model = None\n if spm_model_file:\n self.sp_model = spm.SentencePieceProcessor()\n tf.logging.info(\"loading sentence piece model\")\n self.sp_model.Load(spm_model_file)\n # Note(mingdachen): For the purpose of consisent API, we are\n # generating a vocabulary for the sentence piece tokenizer.\n self.vocab = {self.sp_model.IdToPiece(i): i for i\n in range(self.sp_model.GetPieceSize())}\n else:\n self.vocab = load_vocab(vocab_file)\n self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)\n self.inv_vocab = {v: k for k, v in self.vocab.items()}\n\n @classmethod\n def from_scratch(cls, vocab_file, do_lower_case, spm_model_file):\n return FullTokenizer(vocab_file, do_lower_case, spm_model_file)\n\n @classmethod\n def from_hub_module(cls, hub_module, use_spm=True):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n albert_module = hub.Module(hub_module)\n tokenization_info = albert_module(signature=\"tokenization_info\",\n as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run(\n [tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n if use_spm:\n spm_model_file = vocab_file\n vocab_file = None\n return FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case,\n spm_model_file=spm_model_file)\n\n def tokenize(self, text):\n if self.sp_model:\n split_tokens = encode_pieces(self.sp_model, text, return_unicode=False)\n else:\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n\n return split_tokens\n\n def convert_tokens_to_ids(self, tokens):\n if self.sp_model:\n tf.logging.info(\"using sentence piece tokenzier.\")\n return [self.sp_model.PieceToId(\n printable_text(token)) for token in tokens]\n else:\n return convert_by_vocab(self.vocab, tokens)\n\n def convert_ids_to_tokens(self, ids):\n if self.sp_model:\n tf.logging.info(\"using sentence piece tokenzier.\")\n return [self.sp_model.IdToPiece(id_) for id_ in ids]\n else:\n return convert_by_vocab(self.inv_vocab, ids)\n\n\nclass BasicTokenizer(object):\n \"\"\"Runs basic tokenization (punctuation splitting, lower casing, etc.).\"\"\"\n\n def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens\n\n def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)\n\n def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]\n\n def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False\n\n def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n\nclass WordpieceTokenizer(object):\n \"\"\"Runs WordPiece tokenziation.\"\"\"\n\n def __init__(self, vocab, unk_token=\"[UNK]\", max_input_chars_per_word=200):\n self.vocab = vocab\n self.unk_token = unk_token\n self.max_input_chars_per_word = max_input_chars_per_word\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + six.ensure_str(substr)\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens\n\n\ndef _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically control characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False\n\n\ndef _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n" ]
[ [ "tensorflow.compat.v1.gfile.GFile", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.Session" ] ]
cmccully/astro-scrappy
[ "3ed58dd537e40efed983ca049602af6e3e9f5ce7" ]
[ "astroscrappy/tests/test_utils.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom ..utils import (median, optmed3, optmed5, optmed7, optmed9, optmed25,\n medfilt3, medfilt5, medfilt7, sepmedfilt3, sepmedfilt5,\n sepmedfilt7, sepmedfilt9, dilate3, dilate5, subsample,\n rebin, laplaceconvolve, convolve)\n\nfrom scipy.ndimage.morphology import binary_dilation\nfrom scipy import ndimage\n\n\ndef test_median():\n a = np.ascontiguousarray(np.random.random(1001)).astype('f4')\n assert np.float32(np.median(a)) == np.float32(median(a, 1001))\n\n\ndef test_optmed3():\n a = np.ascontiguousarray(np.random.random(3)).astype('f4')\n assert np.float32(np.median(a)) == np.float32(optmed3(a))\n\n\ndef test_optmed5():\n a = np.ascontiguousarray(np.random.random(5)).astype('f4')\n assert np.float32(np.median(a)) == np.float32(optmed5(a))\n\n\ndef test_optmed7():\n a = np.ascontiguousarray(np.random.random(7)).astype('f4')\n assert np.float32(np.median(a)) == np.float32(optmed7(a))\n\n\ndef test_optmed9():\n a = np.ascontiguousarray(np.random.random(9)).astype('f4')\n assert np.float32(np.median(a)) == np.float32(optmed9(a))\n\n\ndef test_optmed25():\n a = np.ascontiguousarray(np.random.random(25)).astype('f4')\n assert np.float32(np.median(a)) == np.float32(optmed25(a))\n\n\ndef test_medfilt3():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n npmed3 = ndimage.filters.median_filter(a, size=(3, 3), mode='nearest')\n npmed3[:1, :] = a[:1, :]\n npmed3[-1:, :] = a[-1:, :]\n npmed3[:, :1] = a[:, :1]\n npmed3[:, -1:] = a[:, -1:]\n\n med3 = medfilt3(a)\n assert np.all(med3 == npmed3)\n\n\ndef test_medfilt5():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n npmed5 = ndimage.filters.median_filter(a, size=(5, 5), mode='nearest')\n npmed5[:2, :] = a[:2, :]\n npmed5[-2:, :] = a[-2:, :]\n npmed5[:, :2] = a[:, :2]\n npmed5[:, -2:] = a[:, -2:]\n\n med5 = medfilt5(a)\n assert np.all(med5 == npmed5)\n\n\ndef test_medfilt7():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n npmed7 = ndimage.filters.median_filter(a, size=(7, 7), mode='nearest')\n npmed7[:3, :] = a[:3, :]\n npmed7[-3:, :] = a[-3:, :]\n npmed7[:, :3] = a[:, :3]\n npmed7[:, -3:] = a[:, -3:]\n\n med7 = medfilt7(a)\n assert np.all(med7 == npmed7)\n\n\ndef test_sepmedfilt3():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n npmed3 = ndimage.filters.median_filter(a, size=(1, 3), mode='nearest')\n npmed3[:, :1] = a[:, :1]\n npmed3[:, -1:] = a[:, -1:]\n npmed3 = ndimage.filters.median_filter(npmed3, size=(3, 1), mode='nearest')\n npmed3[:1, :] = a[:1, :]\n npmed3[-1:, :] = a[-1:, :]\n npmed3[:, :1] = a[:, :1]\n npmed3[:, -1:] = a[:, -1:]\n\n med3 = sepmedfilt3(a)\n assert np.all(med3 == npmed3)\n\n\ndef test_sepmedfilt5():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n npmed5 = ndimage.filters.median_filter(a, size=(1, 5), mode='nearest')\n npmed5[:, :2] = a[:, :2]\n npmed5[:, -2:] = a[:, -2:]\n npmed5 = ndimage.filters.median_filter(npmed5, size=(5, 1), mode='nearest')\n npmed5[:2, :] = a[:2, :]\n npmed5[-2:, :] = a[-2:, :]\n npmed5[:, :2] = a[:, :2]\n npmed5[:, -2:] = a[:, -2:]\n\n med5 = sepmedfilt5(a)\n assert np.all(med5 == npmed5)\n\n\ndef test_sepmedfilt7():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n npmed7 = ndimage.filters.median_filter(a, size=(1, 7), mode='nearest')\n npmed7[:, :3] = a[:, :3]\n npmed7[:, -3:] = a[:, -3:]\n npmed7 = ndimage.filters.median_filter(npmed7, size=(7, 1), mode='nearest')\n npmed7[:3, :] = a[:3, :]\n npmed7[-3:, :] = a[-3:, :]\n npmed7[:, :3] = a[:, :3]\n npmed7[:, -3:] = a[:, -3:]\n\n med7 = sepmedfilt7(a)\n assert np.all(med7 == npmed7)\n\n\ndef test_sepmedfilt9():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n npmed9 = ndimage.filters.median_filter(a, size=(1, 9), mode='nearest')\n npmed9[:, :4] = a[:, :4]\n npmed9[:, -4:] = a[:, -4:]\n npmed9 = ndimage.filters.median_filter(npmed9, size=(9, 1), mode='nearest')\n npmed9[:4, :] = a[:4, :]\n npmed9[-4:, :] = a[-4:, :]\n npmed9[:, :4] = a[:, :4]\n npmed9[:, -4:] = a[:, -4:]\n\n med9 = sepmedfilt9(a)\n assert np.all(med9 == npmed9)\n\n\ndef test_dilate5():\n # Put 5% of the pixels into a mask\n a = np.zeros((1001, 1001), dtype=np.bool)\n a[np.random.random((1001, 1001)) < 0.05] = True\n kernel = np.ones((5, 5))\n kernel[0, 0] = 0\n kernel[0, 4] = 0\n kernel[4, 0] = 0\n kernel[4, 4] = 0\n # Make a zero padded array for the numpy version to operate\n paddeda = np.zeros((1005, 1005), dtype=np.bool)\n paddeda[2:-2, 2:-2] = a[:, :]\n npdilate = binary_dilation(np.ascontiguousarray(paddeda),\n structure=kernel, iterations=2)\n cdilate = dilate5(a, 2)\n\n assert np.all(npdilate[2:-2, 2:-2] == cdilate)\n\n\ndef test_dilate3():\n # Put 5% of the pixels into a mask\n a = np.zeros((1001, 1001), dtype=np.bool)\n a[np.random.random((1001, 1001)) < 0.05] = True\n kernel = np.ones((3, 3))\n npgrow = binary_dilation(np.ascontiguousarray(a),\n structure=kernel, iterations=1)\n cgrow = dilate3(a)\n npgrow[:, 0] = a[:, 0]\n npgrow[:, -1] = a[:, -1]\n npgrow[0, :] = a[0, :]\n npgrow[-1, :] = a[-1, :]\n assert np.all(npgrow == cgrow)\n\n\ndef test_subsample():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n npsubsamp = np.zeros((a.shape[0] * 2, a.shape[1] * 2), dtype=np.float32)\n for i in range(a.shape[0]):\n for j in range(a.shape[1]):\n npsubsamp[2 * i, 2 * j] = a[i, j]\n npsubsamp[2 * i + 1, 2 * j] = a[i, j]\n npsubsamp[2 * i, 2 * j + 1] = a[i, j]\n npsubsamp[2 * i + 1, 2 * j + 1] = a[i, j]\n\n csubsamp = subsample(a)\n assert np.all(npsubsamp == csubsamp)\n\n\ndef test_rebin():\n a = np.ascontiguousarray(np.random.random((2002, 2002)), dtype=np.float32)\n a = a.astype('f4')\n nprebin = np.zeros((1001, 1001), dtype=np.float32).astype('f4')\n for i in range(1001):\n for j in range(1001):\n nprebin[i, j] = (a[2 * i, 2 * j] + a[2 * i + 1, 2 * j] +\n a[2 * i, 2 * j + 1] + a[2 * i + 1, 2 * j + 1])\n nprebin[i, j] /= np.float32(4.0)\n crebin = rebin(a)\n\n assert_allclose(crebin, nprebin, rtol=0, atol=1.e-6)\n\n\ndef test_laplaceconvolve():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n k = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])\n k = k.astype('<f4')\n npconv = ndimage.filters.convolve(a, k, mode='constant', cval=0.0)\n cconv = laplaceconvolve(a)\n assert_allclose(npconv, cconv, rtol=0.0, atol=1e-6)\n\n\ndef test_convolve():\n a = np.ascontiguousarray(np.random.random((1001, 1001))).astype('f4')\n k = np.ascontiguousarray(np.random.random((5, 5))).astype('f4')\n npconv = ndimage.filters.convolve(a, k, mode='constant', cval=0.0)\n cconv = convolve(a, k)\n assert_allclose(cconv, npconv, rtol=0, atol=1e-5)\n" ]
[ [ "scipy.ndimage.filters.convolve", "numpy.random.random", "numpy.ascontiguousarray", "numpy.median", "numpy.ones", "numpy.all", "numpy.float32", "numpy.testing.assert_allclose", "numpy.array", "scipy.ndimage.filters.median_filter", "numpy.zeros" ] ]
ccuetom/devito
[ "3bd907bed50eff8608e36d83b92c706685a7d275" ]
[ "tests/test_derivatives.py" ]
[ "import numpy as np\nimport pytest\nfrom sympy import simplify, diff, Float\n\nfrom devito import (Grid, Function, TimeFunction, Eq, Operator, NODE, cos, sin,\n ConditionalDimension, left, right, centered, div, grad)\nfrom devito.finite_differences import Derivative, Differentiable\nfrom devito.finite_differences.differentiable import EvalDerivative\nfrom devito.symbolics import indexify, retrieve_indexed\n\n_PRECISION = 9\n\n\ndef x(grid):\n return grid.dimensions[0]\n\n\ndef y(grid):\n return grid.dimensions[1]\n\n\ndef z(grid):\n return grid.dimensions[2]\n\n\ndef t(grid):\n return grid.stepping_dim\n\n\nclass TestFD(object):\n \"\"\"\n Class for finite difference testing.\n Tests the accuracy w.r.t polynomials.\n Test that the shortcut produce the same answer as the FD functions.\n \"\"\"\n\n def setup_method(self):\n self.shape = (20, 20, 20)\n self.grid = Grid(self.shape)\n\n def test_diff(self):\n \"\"\"Test that expr.diff returns an object of type devito.Derivative.\"\"\"\n u = Function(name='u', grid=self.grid)\n du = u.diff(x(self.grid))\n assert isinstance(du, Derivative)\n\n @pytest.mark.parametrize('so', [2, 3, 4, 5])\n def test_fd_indices(self, so):\n \"\"\"\n Test that shifted derivative have Integer offset after indexification.\n \"\"\"\n grid = Grid((10,))\n x = grid.dimensions[0]\n x0 = x + .5 * x.spacing\n u = Function(name=\"u\", grid=grid, space_order=so)\n dx = indexify(u.dx(x0=x0).evaluate)\n for f in retrieve_indexed(dx):\n assert len(f.indices[0].atoms(Float)) == 0\n\n @pytest.mark.parametrize('SymbolType, dim', [\n (Function, x), (Function, y),\n (TimeFunction, x), (TimeFunction, y), (TimeFunction, t),\n ])\n def test_stencil_derivative(self, SymbolType, dim):\n \"\"\"Test symbolic behaviour when expanding stencil derivatives\"\"\"\n i = dim(self.grid)\n u = SymbolType(name='u', grid=self.grid)\n u.data[:] = 66.6\n di = u.diff(i)\n dii = u.diff(i, i)\n # Check for sympy Derivative objects\n assert(isinstance(di, Derivative) and isinstance(dii, Derivative))\n s_di = di.as_finite_difference([i - i.spacing, i])\n s_dii = dii.as_finite_difference([i - i.spacing, i, i + i.spacing])\n # Check stencil length of first and second derivatives\n assert(len(s_di.args) == 2 and len(s_dii.args) == 3)\n u_di = s_di.args[0].args[1]\n u_dii = s_di.args[0].args[1]\n # Ensure that devito meta-data survived symbolic transformation\n assert(u_di.grid.shape == self.shape and u_dii.grid.shape == self.shape)\n assert(u_di.shape == u.shape and u_dii.shape == u.shape)\n assert(np.allclose(u_di.data, 66.6))\n assert(np.allclose(u_dii.data, 66.6))\n\n @pytest.mark.parametrize('SymbolType, derivative, dim, expected', [\n (Function, ['dx2'], 3, 'Derivative(u(x, y, z), (x, 2))'),\n (Function, ['dx2dy'], 3, 'Derivative(u(x, y, z), (x, 2), y)'),\n (Function, ['dx2dydz'], 3, 'Derivative(u(x, y, z), (x, 2), y, z)'),\n (Function, ['dx2', 'dy'], 3, 'Derivative(Derivative(u(x, y, z), (x, 2)), y)'),\n (Function, ['dx2dy', 'dz2'], 3,\n 'Derivative(Derivative(u(x, y, z), (x, 2), y), (z, 2))'),\n (TimeFunction, ['dx2'], 3, 'Derivative(u(t, x, y, z), (x, 2))'),\n (TimeFunction, ['dx2dy'], 3, 'Derivative(u(t, x, y, z), (x, 2), y)'),\n (TimeFunction, ['dx2', 'dy'], 3,\n 'Derivative(Derivative(u(t, x, y, z), (x, 2)), y)'),\n (TimeFunction, ['dx', 'dy', 'dx2', 'dz', 'dydz'], 3,\n 'Derivative(Derivative(Derivative(Derivative(Derivative(u(t, x, y, z), x), y),' +\n ' (x, 2)), z), y, z)')\n ])\n def test_unevaluation(self, SymbolType, derivative, dim, expected):\n u = SymbolType(name='u', grid=self.grid, time_order=2, space_order=2)\n expr = getattr(u, derivative[0])\n for d in derivative[1:]:\n expr = getattr(expr, d)\n assert(expr.__str__() == expected)\n # Make sure the FD evaluation executes\n expr.evaluate\n\n @pytest.mark.parametrize('expr,expected', [\n ('u.dx + u.dy', 'Derivative(u, x) + Derivative(u, y)'),\n ('u.dxdy', 'Derivative(u, x, y)'),\n ('u.laplace',\n 'Derivative(u, (x, 2)) + Derivative(u, (y, 2)) + Derivative(u, (z, 2))'),\n ('(u.dx + u.dy).dx', 'Derivative(Derivative(u, x) + Derivative(u, y), x)'),\n ('((u.dx + u.dy).dx + u.dxdy).dx',\n 'Derivative(Derivative(Derivative(u, x) + Derivative(u, y), x) +' +\n ' Derivative(u, x, y), x)'),\n ('(u**4).dx', 'Derivative(u**4, x)'),\n ('(u/4).dx', 'Derivative(u/4, x)'),\n ('((u.dx + v.dy).dx * v.dx).dy.dz',\n 'Derivative(Derivative(Derivative(Derivative(u, x) + Derivative(v, y), x) *' +\n ' Derivative(v, x), y), z)')\n ])\n def test_arithmetic(self, expr, expected):\n x, y, z = self.grid.dimensions\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n v = Function(name='v', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n expected = eval(expected)\n assert expr == expected\n\n @pytest.mark.parametrize('expr, rules', [\n ('u.dx + u.dy', '{u.indices[0]: 1, u.indices[1]: 0}'),\n ('u.dxdy - u.dxdz', '{u.indices[0]: u.indices[0] + u.indices[0].spacing,' +\n 'u.indices[1]: 0, u.indices[2]: u.indices[1]}'),\n ('u.dx2dy + u.dz ', '{u.indices[0]: u.indices[0] + u.indices[0].spacing,' +\n 'u.indices[2]: u.indices[2] - 10}'),\n ])\n def test_derivative_eval_at(self, expr, rules):\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n rules = eval(rules)\n assert expr.evaluate.xreplace(rules) == expr.xreplace(rules).evaluate\n\n @pytest.mark.parametrize('expr, rules', [\n ('u.dx', '{u.indices[0]: 1}'),\n ('u.dy', '{u.indices[1]: u.indices[2] - 7}'),\n ('u.dz', '{u.indices[2]: u.indices[0] + u.indices[1].spacing}'),\n ])\n def test_derivative_eval_at_expr(self, expr, rules):\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n rules = eval(rules)\n assert expr.evaluate.xreplace(rules) == expr.xreplace(rules).evaluate\n assert expr.expr == expr.xreplace(rules).expr\n\n @pytest.mark.parametrize('expr, composite_rules', [\n ('u.dx', '[{u.indices[0]: 1}, {1: 4}]'),\n ])\n def test_derivative_eval_at_composite(self, expr, composite_rules):\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n evaluated_expr = expr.evaluate\n composite_rules = eval(composite_rules)\n for mapper in composite_rules:\n evaluated_expr = evaluated_expr.xreplace(mapper)\n expr = expr.xreplace(mapper)\n assert evaluated_expr == expr.evaluate\n\n @pytest.mark.parametrize('SymbolType, derivative, dim', [\n (Function, 'dx2', 3), (Function, 'dy2', 3),\n (TimeFunction, 'dx2', 3), (TimeFunction, 'dy2', 3), (TimeFunction, 'dt', 2)\n ])\n def test_preformed_derivatives(self, SymbolType, derivative, dim):\n \"\"\"Test the stencil expressions provided by devito objects\"\"\"\n u = SymbolType(name='u', grid=self.grid, time_order=2, space_order=2)\n expr = getattr(u, derivative)\n assert(len(expr.evaluate.args) == dim)\n\n @pytest.mark.parametrize('derivative, dim', [\n ('dx', x), ('dy', y), ('dz', z)\n ])\n @pytest.mark.parametrize('order', [1, 2, 4, 6, 8, 10, 12, 14, 16])\n def test_derivatives_space(self, derivative, dim, order):\n \"\"\"Test first derivative expressions against native sympy\"\"\"\n dim = dim(self.grid)\n u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)\n expr = getattr(u, derivative).evaluate\n # Establish native sympy derivative expression\n width = int(order / 2)\n if order <= 2:\n indices = [dim, dim + dim.spacing]\n else:\n indices = [(dim + i * dim.spacing) for i in range(-width, width + 1)]\n\n s_expr = u.diff(dim).as_finite_difference(indices).evalf(_PRECISION)\n assert(simplify(expr - s_expr) == 0) # Symbolic equality\n assert type(expr) == EvalDerivative\n expr1 = s_expr.func(*expr.args)\n assert(expr1 == s_expr) # Exact equality\n\n @pytest.mark.parametrize('derivative, dim', [\n ('dx2', x), ('dy2', y), ('dz2', z)\n ])\n @pytest.mark.parametrize('order', [2, 4, 6, 8, 10, 12, 14, 16])\n def test_second_derivatives_space(self, derivative, dim, order):\n \"\"\"\n Test second derivative expressions against native sympy.\n \"\"\"\n dim = dim(self.grid)\n u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)\n expr = getattr(u, derivative).evaluate\n # Establish native sympy derivative expression\n width = int(order / 2)\n indices = [(dim + i * dim.spacing) for i in range(-width, width + 1)]\n s_expr = u.diff(dim, dim).as_finite_difference(indices).evalf(_PRECISION)\n assert(simplify(expr - s_expr) == 0) # Symbolic equality\n assert type(expr) == EvalDerivative\n expr1 = s_expr.func(*expr.args)\n assert(expr1 == s_expr) # Exact equality\n\n @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])\n # Only test x and t as y and z are the same as x\n @pytest.mark.parametrize('derivative', ['dx', 'dxl', 'dxr', 'dx2'])\n def test_fd_space(self, derivative, space_order):\n \"\"\"\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p.\n \"\"\"\n # dummy axis dimension\n nx = 100\n xx = np.linspace(-1, 1, nx)\n dx = xx[1] - xx[0]\n # Symbolic data\n grid = Grid(shape=(nx,), dtype=np.float32)\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=space_order)\n du = Function(name=\"du\", grid=grid, space_order=space_order)\n # Define polynomial with exact fd\n coeffs = np.ones((space_order,), dtype=np.float32)\n polynome = sum([coeffs[i]*x**i for i in range(0, space_order)])\n polyvalues = np.array([polynome.subs(x, xi) for xi in xx], np.float32)\n # Fill original data with the polynomial values\n u.data[:] = polyvalues\n # True derivative of the polynome\n Dpolynome = diff(diff(polynome)) if derivative == 'dx2' else diff(polynome)\n Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx], np.float32)\n # FD derivative, symbolic\n u_deriv = getattr(u, derivative)\n # Compute numerical FD\n stencil = Eq(du, u_deriv)\n op = Operator(stencil, subs={x.spacing: dx})\n op.apply()\n\n # Check exactness of the numerical derivative except inside space_brd\n space_border = space_order\n error = abs(du.data[space_border:-space_border] -\n Dpolyvalues[space_border:-space_border])\n assert np.isclose(np.mean(error), 0., atol=1e-3)\n\n @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])\n @pytest.mark.parametrize('stagger', [centered, left, right])\n # Only test x and t as y and z are the same as x\n def test_fd_space_staggered(self, space_order, stagger):\n \"\"\"\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p\n \"\"\"\n # dummy axis dimension\n nx = 101\n xx = np.linspace(-1, 1, nx)\n dx = xx[1] - xx[0]\n # Symbolic data\n grid = Grid(shape=(nx,), dtype=np.float32)\n x = grid.dimensions[0]\n\n # Location of the staggered function\n if stagger == left:\n off = -.5\n side = -x\n xx2 = xx + off * dx\n elif stagger == right:\n off = .5\n side = x\n xx2 = xx + off * dx\n else:\n side = NODE\n xx2 = xx\n\n u = Function(name=\"u\", grid=grid, space_order=space_order, staggered=side)\n du = Function(name=\"du\", grid=grid, space_order=space_order, staggered=side)\n # Define polynomial with exact fd\n coeffs = np.ones((space_order-1,), dtype=np.float32)\n polynome = sum([coeffs[i]*x**i for i in range(0, space_order-1)])\n polyvalues = np.array([polynome.subs(x, xi) for xi in xx2], np.float32)\n # Fill original data with the polynomial values\n u.data[:] = polyvalues\n # True derivative of the polynome\n Dpolynome = diff(polynome)\n Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx2], np.float32)\n # Compute numerical FD\n stencil = Eq(du, u.dx)\n op = Operator(stencil, subs={x.spacing: dx})\n op.apply()\n\n # Check exactness of the numerical derivative except inside space_brd\n space_border = space_order\n error = abs(du.data[space_border:-space_border] -\n Dpolyvalues[space_border:-space_border])\n\n assert np.isclose(np.mean(error), 0., atol=1e-3)\n\n @pytest.mark.parametrize('so', [2, 4, 6, 8])\n def test_fd_new_order(self, so):\n grid = Grid((10,))\n u = Function(name=\"u\", grid=grid, space_order=so)\n u1 = Function(name=\"u\", grid=grid, space_order=so//2)\n u2 = Function(name=\"u\", grid=grid, space_order=2*so)\n assert str(u.dx(fd_order=so//2).evaluate) == str(u1.dx.evaluate)\n assert str(u.dx(fd_order=2*so).evaluate) == str(u2.dx.evaluate)\n\n def test_fd_new_side(self):\n grid = Grid((10,))\n u = Function(name=\"u\", grid=grid, space_order=4)\n assert u.dx(side=left).evaluate == u.dxl.evaluate\n assert u.dx(side=right).evaluate == u.dxr.evaluate\n assert u.dxl(side=centered).evaluate == u.dx.evaluate\n\n @pytest.mark.parametrize('so, expected', [\n (2, '1.0*u(x)/h_x - 1.0*u(x - 1.0*h_x)/h_x'),\n (4, '1.125*u(x)/h_x + 0.0416666667*u(x - 2.0*h_x)/h_x - '\n '1.125*u(x - 1.0*h_x)/h_x - 0.0416666667*u(x + 1.0*h_x)/h_x'),\n (6, '1.171875*u(x)/h_x - 0.0046875*u(x - 3.0*h_x)/h_x + '\n '0.0651041667*u(x - 2.0*h_x)/h_x - 1.171875*u(x - 1.0*h_x)/h_x - '\n '0.0651041667*u(x + 1.0*h_x)/h_x + 0.0046875*u(x + 2.0*h_x)/h_x'),\n (8, '1.19628906*u(x)/h_x + 0.000697544643*u(x - 4.0*h_x)/h_x - '\n '0.0095703125*u(x - 3.0*h_x)/h_x + 0.0797526042*u(x - 2.0*h_x)/h_x - '\n '1.19628906*u(x - 1.0*h_x)/h_x - 0.0797526042*u(x + 1.0*h_x)/h_x + '\n '0.0095703125*u(x + 2.0*h_x)/h_x - 0.000697544643*u(x + 3.0*h_x)/h_x')])\n def test_fd_new_x0(self, so, expected):\n grid = Grid((10,))\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=so)\n assert u.dx(x0=x + x.spacing).evaluate == u.dx.evaluate.subs({x: x + x.spacing})\n assert u.dx(x0=x - x.spacing).evaluate == u.dx.evaluate.subs({x: x - x.spacing})\n # half shifted compare to explicit coeffs (Forneberg)\n assert str(u.dx(x0=x - .5 * x.spacing).evaluate) == expected\n\n def test_new_x0_eval_at(self):\n \"\"\"\n Make sure that explicitly set x0 does not get overwritten by eval_at.\n \"\"\"\n grid = Grid((10,))\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=2)\n v = Function(name=\"v\", grid=grid, space_order=2)\n assert u.dx(x0=x - x.spacing/2)._eval_at(v).x0 == {x: x - x.spacing/2}\n\n def test_fd_new_lo(self):\n grid = Grid((10,))\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=2)\n\n dplus = \"-1.0*u(x)/h_x + 1.0*u(x + 1.0*h_x)/h_x\"\n dminus = \"1.0*u(x)/h_x - 1.0*u(x - 1.0*h_x)/h_x\"\n assert str(u.dx(x0=x + .5 * x.spacing).evaluate) == dplus\n assert str(u.dx(x0=x - .5 * x.spacing).evaluate) == dminus\n assert str(u.dx(x0=x + .5 * x.spacing, fd_order=1).evaluate) == dplus\n assert str(u.dx(x0=x - .5 * x.spacing, fd_order=1).evaluate) == dminus\n\n def test_subsampled_fd(self):\n \"\"\"\n Test that the symbolic interface is working for space subsampled\n functions.\n \"\"\"\n nt = 19\n grid = Grid(shape=(12, 12), extent=(11, 11))\n\n u = TimeFunction(name='u', grid=grid, save=nt, space_order=2)\n assert(grid.time_dim in u.indices)\n\n # Creates subsampled spatial dimensions and according grid\n dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)\n for d in u.grid.dimensions])\n grid2 = Grid((6, 6), dimensions=dims, extent=(10, 10))\n u2 = TimeFunction(name='u2', grid=grid2, save=nt, space_order=1)\n for i in range(nt):\n for j in range(u2.data_with_halo.shape[2]):\n u2.data_with_halo[i, :, j] = np.arange(u2.data_with_halo.shape[2])\n\n eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2.dx)]\n op = Operator(eqns)\n op.apply(time_M=nt-2)\n # Verify that u2[1, x,y]= du2/dx[0, x, y]\n\n assert np.allclose(u.data[-1], nt-1)\n assert np.allclose(u2.data[1], 0.5)\n\n @pytest.mark.parametrize('expr,expected', [\n ('f.dx', '-f(x)/h_x + f(x + h_x)/h_x'),\n ('f.dx + g.dx', '-f(x)/h_x + f(x + h_x)/h_x - g(x)/h_x + g(x + h_x)/h_x'),\n ('-f', '-f(x)'),\n ('-(f + g)', '-f(x) - g(x)')\n ])\n def test_shortcuts(self, expr, expected):\n grid = Grid(shape=(10,))\n f = Function(name='f', grid=grid) # noqa\n g = Function(name='g', grid=grid) # noqa\n\n expr = eval(expr)\n\n assert isinstance(expr, Differentiable)\n assert expected == str(expr.evaluate)\n\n @pytest.mark.parametrize('so', [2, 5, 8])\n def test_all_shortcuts(self, so):\n \"\"\"\n Test that verify that all fd shortcuts are functional.\n \"\"\"\n grid = Grid(shape=(10, 10, 10))\n f = Function(name='f', grid=grid, space_order=so)\n g = TimeFunction(name='g', grid=grid, space_order=so)\n\n for fd in f._fd:\n assert getattr(f, fd)\n\n for fd in g._fd:\n assert getattr(g, fd)\n\n @pytest.mark.parametrize('so', [2, 4, 8, 12])\n @pytest.mark.parametrize('ndim', [1, 2])\n @pytest.mark.parametrize('derivative, adjoint_name', [\n ('dx', 'dx'),\n ('dx2', 'dx2'),\n ('dxl', 'dxr'),\n ('dxr', 'dxl')])\n def test_fd_adjoint(self, so, ndim, derivative, adjoint_name):\n grid = Grid(shape=tuple([51]*ndim), extent=tuple([25]*ndim))\n x = grid.dimensions[0]\n f = Function(name='f', grid=grid, space_order=so)\n f_deriv = Function(name='f_deriv', grid=grid, space_order=so)\n g = Function(name='g', grid=grid, space_order=so)\n g_deriv = Function(name='g_deriv', grid=grid, space_order=so)\n\n # Fill f and g with smooth cos/sin\n Operator([Eq(g, x*cos(2*np.pi*x/5)), Eq(f, sin(2*np.pi*x/8))]).apply()\n # Check symbolic expression are expected ones for the adjoint .T\n deriv = getattr(f, derivative)\n coeff = 1 if derivative == 'dx2' else -1\n expected = coeff * getattr(f, derivative).evaluate.subs({x.spacing: -x.spacing})\n assert simplify(deriv.T.evaluate) == simplify(expected)\n\n # Compute numerical derivatives and verify dot test\n # i.e <f.dx, g> = <f, g.dx.T>\n\n eq_f = Eq(f_deriv, deriv)\n eq_g = Eq(g_deriv, getattr(g, derivative).T)\n\n op = Operator([eq_f, eq_g])\n op()\n\n a = np.dot(f_deriv.data.reshape(-1), g.data.reshape(-1))\n b = np.dot(g_deriv.data.reshape(-1), f.data.reshape(-1))\n assert np.isclose(1 - a/b, 0, atol=1e-5)\n\n @pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3),\n ((.5, .5, .5), 3)])\n def test_shifted_div(self, shift, ndim):\n grid = Grid(tuple([11]*ndim))\n f = Function(name=\"f\", grid=grid, space_order=4)\n df = div(f, shift=shift).evaluate\n ref = 0\n for i, d in enumerate(grid.dimensions):\n x0 = (None if shift is None else d + shift[i] * d.spacing if\n type(shift) is tuple else d + shift * d.spacing)\n ref += getattr(f, 'd%s' % d.name)(x0=x0)\n assert df == ref.evaluate\n\n @pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3),\n ((.5, .5, .5), 3)])\n def test_shifted_grad(self, shift, ndim):\n grid = Grid(tuple([11]*ndim))\n f = Function(name=\"f\", grid=grid, space_order=4)\n g = grad(f, shift=shift).evaluate\n for i, (d, gi) in enumerate(zip(grid.dimensions, g)):\n x0 = (None if shift is None else d + shift[i] * d.spacing if\n type(shift) is tuple else d + shift * d.spacing)\n assert gi == getattr(f, 'd%s' % d.name)(x0=x0).evaluate\n" ]
[ [ "numpy.allclose", "numpy.linspace", "numpy.arange", "numpy.ones", "numpy.mean", "numpy.isclose" ] ]
Fra98/fed-learning-AML
[ "13a4cb3f240ea6ef4340aaf07cf352c7fe075d89" ]
[ "src/fedAVG/server.py" ]
[ "from copy import deepcopy\nimport random\nimport numpy\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom .client import Client\nfrom ..models import *\nfrom ..utils import get_class_priors, load_cifar, run_accuracy, generate_clients_sizes\nfrom ..splits import indexes_split_IID, indexes_split_NON_IID\n\n\nclass Server:\n def __init__(self, device, data_config, model_config, optim_config, fed_config, logger=None):\n self.device = device\n self.clients = []\n\n # DATASET CONFIGURATION\n self.trainset = load_cifar(name=data_config[\"dataset_name\"], train=True)\n self.testset = load_cifar(name=data_config[\"dataset_name\"], train=False)\n self.trainset_size = len(self.trainset)\n self.testset_size = len(self.testset)\n self.num_classes = len(self.trainset.classes)\n self.class_priors = get_class_priors(self.num_classes, self.trainset.targets)\n self.global_batch_size = data_config[\"global_batch_size\"]\n self.std_client_samples = data_config[\"std_client_samples\"]\n self.IID = data_config[\"IID\"]\n self.logger = logger\n if not self.IID:\n self.alpha = data_config[\"alpha\"]\n\n # MODEL CONFIGURATION\n self.model_config = model_config\n self.optim_config = optim_config\n self.global_net = eval(model_config[\"net\"])(self.num_classes)\n\n # FEDERATED CONFIGURATION\n self.num_clients = fed_config[\"num_clients\"]\n self.avg_clients_rounds = fed_config[\"avg_clients_rounds\"]\n self.std_clients_rounds = fed_config[\"std_clients_rounds\"]\n self.num_rounds = fed_config[\"num_rounds\"]\n self.client_batch_size = fed_config[\"client_batch_size\"]\n self.local_epochs = fed_config[\"local_epochs\"]\n self.fed_IR = fed_config[\"fed_IR\"]\n self.fed_VC = fed_config[\"fed_VC\"]\n if self.fed_VC:\n self.virtual_client_size = self.trainset_size // self.num_clients\n else:\n self.virtual_client_size = None\n self.clients_weights = None\n\n def init_clients(self):\n # Define each client training size using gaussian distribution\n clients_sizes = generate_clients_sizes(self.trainset_size, self.num_clients, self.std_client_samples)\n self.logger.log(f\"Client samples sizes: {clients_sizes}, total: {numpy.sum(clients_sizes)}\")\n\n if self.IID:\n indexes = indexes_split_IID(self.num_clients, self.num_classes, self.trainset, clients_sizes)\n else:\n indexes = indexes_split_NON_IID(self.num_clients, self.num_classes, self.alpha, self.trainset, clients_sizes)\n\n for i in range(self.num_clients):\n trainset_i = torch.utils.data.Subset(self.trainset, indexes[i])\n client = Client(i, self.device, self.local_epochs, self.client_batch_size, trainset_i,\n model_config=self.model_config, optim_config=self.optim_config,\n server_class_priors=self.class_priors, virtual_client_size=self.virtual_client_size,\n logger=self.logger)\n self.clients.append(client)\n\n # Only for FedVC -> calculate clients weights based on how many samples they have \n if self.fed_VC:\n self.clients_weights = numpy.zeros((len(self.clients)))\n for i in range(len(self.clients)):\n self.clients_weights[i] = self.clients[i].trainset_size\n self.clients_weights = self.clients_weights / numpy.sum(self.clients_weights)\n\n def run_training(self, state_dict=None, round_num=0, print_acc=True):\n if len(self.clients) == 0:\n self.init_clients()\n\n self.global_net.to(self.device)\n if state_dict is not None:\n self.global_net.load_state_dict(state_dict)\n self.global_net.train()\n\n for _ in range(self.num_rounds):\n round_num += 1\n self.logger.log(f\"ROUND {round_num}\")\n\n # Save state at current round\n state_t = deepcopy(self.global_net.state_dict())\n\n # Get the selected clients for this round\n num_selected_clients = int(max(min(self.num_clients,\n random.gauss(self.avg_clients_rounds * self.num_clients,\n self.std_clients_rounds * self.num_clients)), 1))\n selected_clients = numpy.random.choice(self.clients, num_selected_clients, replace=False, p=self.clients_weights).tolist()\n selected_clients.sort(key=lambda x: x.id)\n num_samples = sum(c.trainset_size for c in selected_clients) # effective number of samples at current round\n\n if self.std_clients_rounds != 0:\n self.logger.log(f\"{num_selected_clients} clients selected\")\n\n # Run update on each client\n for client in selected_clients:\n client.client_update(state_t, fed_IR=self.fed_IR, fed_VC=self.fed_VC, print_acc=print_acc)\n\n # Calculate weighted accuracy of all clients (after clients updating, BEFORE averaging)\n if print_acc:\n self.logger.log(\"[BEFORE AVG]\")\n self.run_weighted_clients_accuracy()\n\n # AVERAGING\n old_state = deepcopy(self.global_net.state_dict())\n for client in selected_clients:\n if self.fed_VC:\n # for Fed_VC we use every time the same total amount of sample per client\n weight = 1 / len(selected_clients)\n else:\n weight = client.trainset_size / num_samples\n\n for key in self.global_net.state_dict().keys():\n old_tensor = old_state[key]\n new_tensor = client.net.state_dict()[key]\n delta = new_tensor - old_tensor\n self.global_net.state_dict()[key] += (weight * delta).type(old_tensor.type())\n\n # Calculate weighted accuracy of all clients (after clients updating, AFTER averaging)\n if print_acc:\n self.logger.log(\"[AFTER AVG]\")\n self.run_testing(train=True)\n\n def run_weighted_clients_accuracy(self, state_dict=None):\n accuracy = 0\n loss = 0\n for client in self.clients:\n client_accuracy, client_loss = client.train_accuracy(state_dict=state_dict)\n weight = client.trainset_size / self.trainset_size\n accuracy += weight * client_accuracy\n loss += weight * client_loss\n\n self.logger.log(f'Weighted Clients -> Train: Loss {loss:.3f} | Accuracy = {accuracy:.3f}')\n\n def run_testing(self, train=False):\n if train:\n dataset = self.trainset\n else:\n dataset = self.testset\n criterion = eval(self.model_config[\"criterion\"])()\n\n accuracy, loss = run_accuracy(device=self.device, dataset=dataset,\n batch_size=self.global_batch_size, net=self.global_net,\n criterion=criterion)\n\n if train:\n self.logger.log(f'Server -> Train: Loss {loss:.3f} | Accuracy = {accuracy:.3f}')\n else:\n self.logger.log(f'Server -> Test: Loss {loss:.3f} | Accuracy = {accuracy:.3f}')\n \n return accuracy, loss\n" ]
[ [ "torch.utils.data.Subset", "numpy.sum", "numpy.random.choice" ] ]
cihuang123/Next-simulation
[ "e8552a5804184b30022d103d47c8728fb242b5bc" ]
[ "utilities/plot_landing_point.py" ]
[ "import csv\nimport argparse\nimport matplotlib.pyplot as plt\n\ndef toFloat(str):\n try:\n return float(str)\n except ValueError:\n return str\n\nparser = argparse.ArgumentParser(\n description='Read some csv and output landing point.'\n)\nparser.add_argument('golden', help='Input the file position of golden file.')\nparser.add_argument('target',\n help='Input the file position of target folder.\\n\\\n Default: ../MONTE_RUN_monte/' )\nparser.add_argument('number',\n help='Input the number of input file.',\n default='20',\n type=int)\n\nargs = parser.parse_args()\n\ntry:\n with open(args.golden, 'rb') as goldenFile:\n\n goldenData = csv.reader(goldenFile, delimiter=',', quotechar='\\n')\n\n goldenArray = [];\n\n #Eat all shitz in\n for row in goldenData:\n goldenArray.append([]);\n for num in row:\n goldenArray[-1].append(toFloat(num))\n goldenFinal = [];\n for num in goldenArray[-1]:\n goldenFinal.append(num)\n\n plotArray = [[goldenFinal[7]],[goldenFinal[8]]]\n for i in range(0,args.number):\n text = args.target + 'RUN_' + ('00000' + str(i))[-5:] + '/log_rocket_csv.csv'\n try:\n with open(text, 'rb') as csvFile:\n buf = []\n csvData = csv.reader(csvFile, delimiter=',',quotechar='\\n')\n for row in csvData:\n buf = []\n for num in row:\n buf.append(num)\n x = toFloat(buf[7])\n y = toFloat(buf[8])\n if isinstance(x,float) and isinstance(y,float):\n plotArray[0].append(x)\n plotArray[1].append(y)\n except Exception as e:\n print (e.__doc__)\n print (e.message)\n #print(plotArray[0])\n #print(plotArray[1])\n plt.plot(plotArray[0],plotArray[1],\"o\", color='red');\n plt.plot(plotArray[0][0],plotArray[1][0],\"o\",color = 'yellow');\n #plt.axis([plotArray[0][0]-1000000,plotArray[0][0]+1000000,plotArray[1][0]-1000000,plotArray[1][0]+1000000])\n plt.xlabel('x(m)')\n plt.ylabel('y(m)')\n plt.show()\n\nexcept IOError as e:\n print(e)\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
snehasish069/tensorflow-mnist-dataset
[ "9f22235d52c66f37a07359056d5c9ef75a1ccf1a" ]
[ "mnist_tensorflow.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 7 11:19:20 2017\r\n\r\n@author: Snehasish\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\n\r\ndata = input_data.read_data_sets(\"/tmp/data/\", one_hot = True)\r\n\r\n#learning rate\r\nLR = 0.001\r\n\r\n#respective size of hidden layers\r\nhl1 = 500 \r\nhl2 = 500\r\nhl3 = 500\r\n\r\noutput_size = 10 #{one-hot encoded array indicating any digit}\r\nbatch_size = 100\r\nno_of_epochs = 10\r\ntotal_no_of_data = data.train.num_examples\r\n\r\n#for testing data\r\ntest_x = data.test.images\r\ntest_y = data.test.labels\r\n\r\nx = tf.placeholder('float', [None, 784])\r\ny = tf.placeholder('float')\r\n\r\ndef NNmodel(data):\r\n #initialize weights and biases of each layer with random tensorflow variables.\r\n h_layer1 = {'weights':tf.Variable(tf.random_normal([784, hl1])), 'biases':tf.Variable(tf.random_normal([hl1]))}\r\n\r\n h_layer2 = {'weights':tf.Variable(tf.random_normal([hl1, hl2])), 'biases':tf.Variable(tf.random_normal([hl2]))}\r\n\r\n h_layer3 = {'weights':tf.Variable(tf.random_normal([hl2, hl3])), 'biases':tf.Variable(tf.random_normal([hl3]))}\r\n \r\n output_layer = {'weights':tf.Variable(tf.random_normal([hl3, output_size])), 'biases':tf.Variable(tf.random_normal([output_size]))}\r\n \r\n # (weights1 * weights2) + biases (using rectified liniear activation) \r\n \r\n l1 = tf.add(tf.matmul(data,h_layer1['weights']), h_layer1['biases'])\r\n l1 = tf.nn.relu(l1)\r\n\r\n l2 = tf.add(tf.matmul(l1,h_layer2['weights']), h_layer2['biases'])\r\n l2 = tf.nn.relu(l2)\r\n\r\n l3 = tf.add(tf.matmul(l2,h_layer3['weights']), h_layer3['biases'])\r\n l3 = tf.nn.relu(l3)\r\n\r\n output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']\r\n \r\n return output\r\n\r\ndef train_model(x):\r\n #predicting output for the data\r\n pred = NNmodel(x)\r\n \r\n #error after predicting the digit\r\n cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y) )\r\n \r\n #optimizer to reduce the error term (or cost)\r\n optimizer = tf.train.AdamOptimizer(learning_rate= LR).minimize(cost)\r\n \r\n # upto this point the model is only defined, but to run the model \r\n # we have to run it within a session.\r\n\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n for ep in range(no_of_epochs):\r\n epoch_loss = 0\r\n for _ in range(int(total_no_of_data/batch_size)):\r\n #batch-wise data is trained\r\n ep_x, ep_y = data.train.next_batch(batch_size)\r\n \r\n #cost(c) for this batch is calaculated\r\n _, c = sess.run([optimizer, cost], feed_dict={x: ep_x, y: ep_y})\r\n epoch_loss += c\r\n\r\n print('Epoch: ', ep+1, '/',no_of_epochs,'loss:',epoch_loss)\r\n \r\n #no of correct predictions\r\n correct = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\r\n \r\n #calculating the final accuracy\r\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n print('Accuracy:', accuracy.eval({x: test_x, y: test_y })*100, '%' )\r\n\r\n \r\ntrain_model(x)" ]
[ [ "tensorflow.nn.relu", "tensorflow.matmul", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.train.AdamOptimizer", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.random_normal" ] ]
cortex-lab/phylib
[ "563afac3a7df9ec585fab63b6fe4fc0700f48b7c" ]
[ "phylib/stats/ccg.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"Cross-correlograms.\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom phylib.utils._types import _as_array\nfrom phylib.io.array import _index_of, _unique\n\n\n#------------------------------------------------------------------------------\n# Cross-correlograms\n#------------------------------------------------------------------------------\n\ndef _increment(arr, indices):\n \"\"\"Increment some indices in a 1D vector of non-negative integers.\n Repeated indices are taken into account.\"\"\"\n arr = _as_array(arr)\n indices = _as_array(indices)\n bbins = np.bincount(indices)\n arr[:len(bbins)] += bbins\n return arr\n\n\ndef _diff_shifted(arr, steps=1):\n arr = _as_array(arr)\n return arr[steps:] - arr[:len(arr) - steps]\n\n\ndef _create_correlograms_array(n_clusters, winsize_bins):\n return np.zeros((n_clusters, n_clusters, winsize_bins // 2 + 1),\n dtype=np.int32)\n\n\ndef _symmetrize_correlograms(correlograms):\n \"\"\"Return the symmetrized version of the CCG arrays.\"\"\"\n\n n_clusters, _, n_bins = correlograms.shape\n assert n_clusters == _\n\n # We symmetrize c[i, j, 0].\n # This is necessary because the algorithm in correlograms()\n # is sensitive to the order of identical spikes.\n correlograms[..., 0] = np.maximum(correlograms[..., 0],\n correlograms[..., 0].T)\n\n sym = correlograms[..., 1:][..., ::-1]\n sym = np.transpose(sym, (1, 0, 2))\n\n return np.dstack((sym, correlograms))\n\n\ndef firing_rate(spike_clusters, cluster_ids=None, bin_size=None, duration=None):\n \"\"\"Compute the average number of spikes per cluster per bin.\"\"\"\n\n # Take the cluster order into account.\n if cluster_ids is None:\n cluster_ids = _unique(spike_clusters)\n else:\n cluster_ids = _as_array(cluster_ids)\n\n # Like spike_clusters, but with 0..n_clusters-1 indices.\n spike_clusters_i = _index_of(spike_clusters, cluster_ids)\n\n assert bin_size > 0\n bc = np.bincount(spike_clusters_i)\n # Handle the case where the last cluster(s) are empty.\n if len(bc) < len(cluster_ids):\n n = len(cluster_ids) - len(bc)\n bc = np.concatenate((bc, np.zeros(n, dtype=bc.dtype)))\n assert bc.shape == (len(cluster_ids),)\n return bc * np.c_[bc] * (bin_size / (duration or 1.))\n\n\ndef correlograms(\n spike_times, spike_clusters, cluster_ids=None, sample_rate=1.,\n bin_size=None, window_size=None, symmetrize=True):\n \"\"\"Compute all pairwise cross-correlograms among the clusters appearing\n in `spike_clusters`.\n\n Parameters\n ----------\n\n spike_times : array-like\n Spike times in seconds.\n spike_clusters : array-like\n Spike-cluster mapping.\n cluster_ids : array-like\n The list of *all* unique clusters, in any order. That order will be used\n in the output array.\n bin_size : float\n Size of the bin, in seconds.\n window_size : float\n Size of the window, in seconds.\n sample_rate : float\n Sampling rate.\n symmetrize : boolean (True)\n Whether the output matrix should be symmetrized or not.\n\n Returns\n -------\n\n correlograms : array\n A `(n_clusters, n_clusters, winsize_samples)` array with all pairwise CCGs.\n\n \"\"\"\n assert sample_rate > 0.\n assert np.all(np.diff(spike_times) >= 0), (\"The spike times must be \"\n \"increasing.\")\n\n # Get the spike samples.\n spike_times = np.asarray(spike_times, dtype=np.float64)\n spike_samples = (spike_times * sample_rate).astype(np.int64)\n\n spike_clusters = _as_array(spike_clusters)\n\n assert spike_samples.ndim == 1\n assert spike_samples.shape == spike_clusters.shape\n\n # Find `binsize`.\n bin_size = np.clip(bin_size, 1e-5, 1e5) # in seconds\n binsize = int(sample_rate * bin_size) # in samples\n assert binsize >= 1\n\n # Find `winsize_bins`.\n window_size = np.clip(window_size, 1e-5, 1e5) # in seconds\n winsize_bins = 2 * int(.5 * window_size / bin_size) + 1\n\n assert winsize_bins >= 1\n assert winsize_bins % 2 == 1\n\n # Take the cluster order into account.\n if cluster_ids is None:\n clusters = _unique(spike_clusters)\n else:\n clusters = _as_array(cluster_ids)\n n_clusters = len(clusters)\n\n # Like spike_clusters, but with 0..n_clusters-1 indices.\n spike_clusters_i = _index_of(spike_clusters, clusters)\n\n # Shift between the two copies of the spike trains.\n shift = 1\n\n # At a given shift, the mask precises which spikes have matching spikes\n # within the correlogram time window.\n mask = np.ones_like(spike_samples, dtype=bool)\n\n correlograms = _create_correlograms_array(n_clusters, winsize_bins)\n\n # The loop continues as long as there is at least one spike with\n # a matching spike.\n while mask[:-shift].any():\n # Number of time samples between spike i and spike i+shift.\n spike_diff = _diff_shifted(spike_samples, shift)\n\n # Binarize the delays between spike i and spike i+shift.\n spike_diff_b = spike_diff // binsize\n\n # Spikes with no matching spikes are masked.\n mask[:-shift][spike_diff_b > (winsize_bins // 2)] = False\n\n # Cache the masked spike delays.\n m = mask[:-shift].copy()\n d = spike_diff_b[m]\n\n # # Update the masks given the clusters to update.\n # m0 = np.in1d(spike_clusters[:-shift], clusters)\n # m = m & m0\n # d = spike_diff_b[m]\n d = spike_diff_b[m]\n\n # Find the indices in the raveled correlograms array that need\n # to be incremented, taking into account the spike clusters.\n indices = np.ravel_multi_index(\n (spike_clusters_i[:-shift][m], spike_clusters_i[+shift:][m], d), correlograms.shape)\n\n # Increment the matching spikes in the correlograms array.\n _increment(correlograms.ravel(), indices)\n\n shift += 1\n\n if symmetrize:\n return _symmetrize_correlograms(correlograms)\n else:\n return correlograms\n" ]
[ [ "numpy.maximum", "numpy.ones_like", "numpy.clip", "numpy.asarray", "numpy.dstack", "numpy.ravel_multi_index", "numpy.bincount", "numpy.diff", "numpy.transpose", "numpy.zeros" ] ]
SamarthMM/cs769-assignments
[ "bac2ad57c50043608276df8e0f21181ef62696c7" ]
[ "assignment2/classifier.py" ]
[ "import time, random, numpy as np, argparse, sys, re, os\nfrom types import SimpleNamespace\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.metrics import classification_report, f1_score, recall_score, accuracy_score\n\n# change it with respect to the original model\nfrom tokenizer import BertTokenizer\nfrom bert import BertModel\nfrom optimizer import AdamW\nfrom tqdm import tqdm\n\n\nTQDM_DISABLE=True\n# fix the random seed\ndef seed_everything(seed=11711):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\nclass BertSentClassifier(torch.nn.Module):\n def __init__(self, config):\n super(BertSentClassifier, self).__init__()\n self.num_labels = config.num_labels\n self.bert = BertModel.from_pretrained('bert-base-uncased')\n\n # pretrain mode does not require updating bert paramters.\n for param in self.bert.parameters():\n if config.option == 'pretrain':\n param.requires_grad = False\n elif config.option == 'finetune':\n param.requires_grad = True\n\n # todo\n self.dropout=torch.nn.Dropout(config.hidden_dropout_prob)\n self.dense = torch.nn.Linear(config.hidden_size, config.num_labels)\n #self.activation = torch.nn.Tanh()\n\n #raise NotImplementedError\n\n def forward(self, input_ids, attention_mask):\n # todo\n #call the bert model. This should output CLS token (pooler output) and the contextualized representations\n #(last hidden state)\n outputs=self.bert(input_ids,attention_mask)\n contextualized_sentence_representation=outputs['last_hidden_state']*attention_mask.unsqueeze(-1)\n first_token_tensor=outputs['pooler_output'] \n # the final bert contextualize embedding is the hidden state of [CLS] token (the first token)\n first_token_tensor=self.dropout(first_token_tensor)\n pooled_output = self.dense(first_token_tensor)\n #pooled_output = self.activation(pooled_output)\n return F.log_softmax(pooled_output, dim=1)\n\n# create a custom Dataset Class to be used for the dataloader\nclass BertDataset(Dataset):\n def __init__(self, dataset, args):\n self.dataset = dataset\n self.p = args\n self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, idx):\n ele = self.dataset[idx]\n return ele\n\n def pad_data(self, data):\n sents = [x[0] for x in data]\n labels = [x[1] for x in data]\n encoding = self.tokenizer(sents, return_tensors='pt', padding=True, truncation=True)\n token_ids = torch.LongTensor(encoding['input_ids'])\n attention_mask = torch.LongTensor(encoding['attention_mask'])\n token_type_ids = torch.LongTensor(encoding['token_type_ids'])\n labels = torch.LongTensor(labels)\n\n return token_ids, token_type_ids, attention_mask, labels, sents\n\n def collate_fn(self, all_data):\n all_data.sort(key=lambda x: -len(x[2])) # sort by number of tokens\n\n batches = []\n num_batches = int(np.ceil(len(all_data) / self.p.batch_size))\n\n for i in range(num_batches):\n start_idx = i * self.p.batch_size\n data = all_data[start_idx: start_idx + self.p.batch_size]\n\n token_ids, token_type_ids, attention_mask, labels, sents = self.pad_data(data)\n batches.append({\n 'token_ids': token_ids,\n 'token_type_ids': token_type_ids,\n 'attention_mask': attention_mask,\n 'labels': labels,\n 'sents': sents,\n })\n\n return batches\n\n\n# create the data which is a list of (sentence, label, token for the labels)\ndef create_data(filename, flag='train'):\n # specify the tokenizer\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n num_labels = {}\n data = []\n\n with open(filename, 'r') as fp:\n for line in fp:\n label, org_sent = line.split(' ||| ')\n sent = org_sent.lower().strip()\n tokens = tokenizer.tokenize(\"[CLS] \" + sent + \" [SEP]\")\n label = int(label.strip())\n if label not in num_labels:\n num_labels[label] = len(num_labels)\n data.append((sent, label, tokens))\n print(f\"load {len(data)} data from {filename}\")\n if flag == 'train':\n return data, len(num_labels)\n else:\n return data\n\n# perform model evaluation in terms of the accuracy and f1 score.\ndef model_eval(dataloader, model, device):\n model.eval() # switch to eval model, will turn off randomness like dropout\n y_true = []\n y_pred = []\n sents = []\n for step, batch in enumerate(tqdm(dataloader, desc=f'eval', disable=TQDM_DISABLE)):\n b_ids, b_type_ids, b_mask, b_labels, b_sents = batch[0]['token_ids'], batch[0]['token_type_ids'], \\\n batch[0]['attention_mask'], batch[0]['labels'], batch[0]['sents']\n\n b_ids = b_ids.to(device)\n b_mask = b_mask.to(device)\n\n logits = model(b_ids, b_mask)\n logits = logits.detach().cpu().numpy()\n preds = np.argmax(logits, axis=1).flatten()\n\n b_labels = b_labels.flatten()\n y_true.extend(b_labels)\n y_pred.extend(preds)\n sents.extend(b_sents)\n\n f1 = f1_score(y_true, y_pred, average='macro')\n acc = accuracy_score(y_true, y_pred)\n\n return acc, f1, y_pred, y_true, sents\n\ndef save_model(model, optimizer, args, config, filepath):\n save_info = {\n 'model': model.state_dict(),\n 'optim': optimizer.state_dict(),\n 'args': args,\n 'model_config': config,\n 'system_rng': random.getstate(),\n 'numpy_rng': np.random.get_state(),\n 'torch_rng': torch.random.get_rng_state(),\n }\n\n torch.save(save_info, filepath)\n print(f\"save the model to {filepath}\")\n\ndef train(args):\n device = torch.device('cuda') if args.use_gpu else torch.device('cpu')\n #### Load data\n # create the data and its corresponding datasets and dataloader\n train_data, num_labels = create_data(args.train, 'train')\n dev_data = create_data(args.dev, 'valid')\n\n train_dataset = BertDataset(train_data, args)\n dev_dataset = BertDataset(dev_data, args)\n\n train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch_size,\n collate_fn=train_dataset.collate_fn)\n dev_dataloader = DataLoader(dev_dataset, shuffle=False, batch_size=args.batch_size,\n collate_fn=dev_dataset.collate_fn)\n\n #### Init model\n config = {'hidden_dropout_prob': args.hidden_dropout_prob,\n 'num_labels': num_labels,\n 'hidden_size': 768,\n 'data_dir': '.',\n 'option': args.option}\n\n config = SimpleNamespace(**config)\n\n # initialize the Senetence Classification Model\n model = BertSentClassifier(config)\n model = model.to(device)\n\n lr = args.lr\n ## specify the optimizer\n optimizer = AdamW(model.parameters(), lr=lr)\n best_dev_acc = 0\n\n ## run for the specified number of epochs\n for epoch in range(args.epochs):\n model.train()\n train_loss = 0\n num_batches = 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=f'train-{epoch}', disable=TQDM_DISABLE)):\n b_ids, b_type_ids, b_mask, b_labels, b_sents = batch[0]['token_ids'], batch[0]['token_type_ids'], batch[0][\n 'attention_mask'], batch[0]['labels'], batch[0]['sents']\n\n b_ids = b_ids.to(device)\n b_mask = b_mask.to(device)\n b_labels = b_labels.to(device)\n\n optimizer.zero_grad()\n logits = model(b_ids, b_mask)\n loss = F.nll_loss(logits, b_labels.view(-1), reduction='sum') / args.batch_size\n\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n num_batches += 1\n\n train_loss = train_loss / (num_batches)\n\n train_acc, train_f1, *_ = model_eval(train_dataloader, model, device)\n dev_acc, dev_f1, *_ = model_eval(dev_dataloader, model, device)\n\n if dev_acc > best_dev_acc:\n best_dev_acc = dev_acc\n save_model(model, optimizer, args, config, args.filepath)\n\n print(f\"epoch {epoch}: train loss :: {train_loss :.3f}, train acc :: {train_acc :.3f}, dev acc :: {dev_acc :.3f}\")\n\n\ndef test(args):\n with torch.no_grad():\n device = torch.device('cuda') if args.use_gpu else torch.device('cpu')\n saved = torch.load(args.filepath)\n config = saved['model_config']\n model = BertSentClassifier(config)\n model.load_state_dict(saved['model'])\n model = model.to(device)\n print(f\"load model from {args.filepath}\")\n dev_data = create_data(args.dev, 'valid')\n dev_dataset = BertDataset(dev_data, args)\n dev_dataloader = DataLoader(dev_dataset, shuffle=False, batch_size=args.batch_size, collate_fn=dev_dataset.collate_fn)\n\n test_data = create_data(args.test, 'test')\n test_dataset = BertDataset(test_data, args)\n test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=args.batch_size, collate_fn=test_dataset.collate_fn)\n\n dev_acc, dev_f1, dev_pred, dev_true, dev_sents = model_eval(dev_dataloader, model, device)\n test_acc, test_f1, test_pred, test_true, test_sents = model_eval(test_dataloader, model, device)\n\n with open(args.dev_out, \"w+\") as f:\n print(f\"dev acc :: {dev_acc :.3f}\")\n for s, t, p in zip(dev_sents, dev_true, dev_pred):\n f.write(f\"{s} ||| {t} ||| {p}\\n\")\n\n with open(args.test_out, \"w+\") as f:\n print(f\"test acc :: {test_acc :.3f}\")\n for s, t, p in zip(test_sents, test_true, test_pred):\n f.write(f\"{s} ||| {t} ||| {p}\\n\")\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--train\", type=str, default=\"data/cfimdb-train.txt\")\n parser.add_argument(\"--dev\", type=str, default=\"data/cfimdb-dev.txt\")\n parser.add_argument(\"--test\", type=str, default=\"data/cfimdb-test.txt\")\n parser.add_argument(\"--seed\", type=int, default=11711)\n parser.add_argument(\"--epochs\", type=int, default=10)\n parser.add_argument(\"--option\", type=str,\n help='pretrain: the BERT parameters are frozen; finetune: BERT parameters are updated',\n choices=('pretrain', 'finetune'), default=\"pretrain\")\n parser.add_argument(\"--use_gpu\", action='store_true')\n parser.add_argument(\"--dev_out\", type=str, default=\"cfimdb-dev-output.txt\")\n parser.add_argument(\"--test_out\", type=str, default=\"cfimdb-test-output.txt\")\n\n # hyper parameters\n parser.add_argument(\"--batch_size\", help='sst: 64, cfimdb: 8 can fit a 12GB GPU', type=int, default=8)\n parser.add_argument(\"--hidden_dropout_prob\", type=float, default=0.3)\n parser.add_argument(\"--lr\", type=float, help=\"learning rate, default lr for 'pretrain': 1e-3, 'finetune': 1e-5\",\n default=1e-5)\n\n args = parser.parse_args()\n print(f\"args: {vars(args)}\")\n return args\n\nif __name__ == \"__main__\":\n args = get_args()\n args.filepath = f'{args.option}-{args.epochs}-{args.lr}.pt' # save path\n seed_everything(args.seed) # fix the seed for reproducibility\n train(args)\n test(args)\n" ]
[ [ "numpy.random.get_state", "torch.nn.Dropout", "torch.LongTensor", "torch.cuda.manual_seed", "numpy.random.seed", "torch.nn.functional.log_softmax", "torch.manual_seed", "torch.random.get_rng_state", "torch.load", "torch.utils.data.DataLoader", "torch.nn.Linear", "numpy.argmax", "torch.save", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.device", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score" ] ]
AgusQuintanar/DesignOfAnElectricCircuit
[ "35dd0b96fc4722d6ec5b3c5173ae5cf2147c83d9" ]
[ "graph.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot(fun, start, end):\n x = np.arange(float(start), float(end), 0.1)\n \n n_fun = np.vectorize(fun)\n plt.plot(x, n_fun(x))\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n fun = lambda x: x**2 - 1\n plot(fun, -5, 5)" ]
[ [ "numpy.vectorize", "matplotlib.pyplot.show" ] ]
jiangwenj02/Meta-weight-net_class-imbalance
[ "5f7cdb3e0b66336a44695a9b8d240de0e3a3a2c8" ]
[ "meta-weight-net-class-imbalance.py" ]
[ "import os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimport time\nimport argparse\nimport random\nimport copy\nimport torch\nimport torchvision\nimport numpy as np\nimport pandas as pd\nimport sklearn.metrics as sm\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport matplotlib.pyplot as plt\nfrom data_utils import build_dataset\nfrom resnet import *\n\n\n# parse arguments\nparser = argparse.ArgumentParser(description='Imbalanced Example')\nparser.add_argument('--dataset', default='cifar10', type=str,\n help='dataset (cifar10 [default] or cifar100)')\nparser.add_argument('--batch-size', type=int, default=100, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--num_classes', type=int, default=10)\nparser.add_argument('--num_meta', type=int, default=10,\n help='The number of meta data for each class.')\nparser.add_argument('--imb_factor', type=float, default=0.1)\nparser.add_argument('--corruption_prob', type=float, default=0.4,\n help='label noise')\nparser.add_argument('--corruption_type', '-ctype', type=str, default='flip2',\n help='Type of corruption (\"unif\" or \"flip\" or \"flip2\").')\nparser.add_argument('--test-batch-size', type=int, default=100, metavar='N',\n help='input batch size for testing (default: 100)')\nparser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train')\nparser.add_argument('--lr', '--learning-rate', default=1e-1, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, help='momentum')\nparser.add_argument('--nesterov', default=True, type=bool, help='nesterov momentum')\nparser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,\n help='weight decay (default: 5e-4)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--no-augment', dest='augment', action='store_false',\n help='whether to use standard augmentation (default: True)')\nparser.add_argument('--split', type=int, default=1000)\nparser.add_argument('--seed', type=int, default=42, metavar='S',\n help='random seed (default: 42)')\nparser.add_argument('--prefetch', type=int, default=0, help='Pre-fetching threads.')\nparser.add_argument('--print-freq', '-p', default=100, type=int,\n help='print frequency (default: 10)')\nparser.add_argument('--imblance', type=bool, default=True, help='Adding imblance to dataset.')\nargs = parser.parse_args()\nprint(args)\n\nkwargs = {'num_workers': 1, 'pin_memory': True}\nuse_cuda = not args.no_cuda and torch.cuda.is_available()\n\ntorch.manual_seed(args.seed)\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\nimbalanced_train_loader, validation_loader, test_loader = build_dataset(args)\n\n# train_loader = torch.utils.data.DataLoader(\n# imbalanced_train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)\n\n# make imbalanced data\ntorch.manual_seed(args.seed)\nclasse_labels = range(args.num_classes)\n\n# data_list = {}\n\n# for j in range(args.num_classes):\n# data_list[j] = [i for i, label in enumerate(train_loader.dataset.train_labels) if label == j]\n\n# img_num_list = get_img_num_per_cls(args.dataset,args.imb_factor,args.num_meta*args.num_classes)\n# print(img_num_list)\n# print(sum(img_num_list))\n# im_data = {}\n# idx_to_del = []\n# for cls_idx, img_id_list in data_list.items():\n# random.shuffle(img_id_list)\n# img_num = img_num_list[int(cls_idx)]\n# im_data[cls_idx] = img_id_list[img_num:]\n# idx_to_del.extend(img_id_list[img_num:])\n\n# print(len(idx_to_del))\n\n# imbalanced_train_dataset = copy.deepcopy(train_data)\n# imbalanced_train_dataset.train_labels = np.delete(train_loader.dataset.train_labels, idx_to_del, axis=0)\n# imbalanced_train_dataset.train_data = np.delete(train_loader.dataset.train_data, idx_to_del, axis=0)\n# imbalanced_train_loader = torch.utils.data.DataLoader(\n# imbalanced_train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)\n\n\n# validation_loader = torch.utils.data.DataLoader(\n# train_data_meta, batch_size=args.batch_size, shuffle=True, **kwargs)\n# test_loader = torch.utils.data.DataLoader(\n# test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs)\n\nbest_prec1 = 0\n\n\ndef main():\n global args, best_prec1\n args = parser.parse_args()\n\n # create model\n model = build_model()\n optimizer_a = torch.optim.SGD(model.params(), args.lr,\n momentum=args.momentum, nesterov=args.nesterov,\n weight_decay=args.weight_decay)\n\n vnet = VNet(1, 100, 1).cuda()\n\n optimizer_c = torch.optim.SGD(vnet.params(), 1e-5,\n momentum=args.momentum, nesterov=args.nesterov,\n weight_decay=args.weight_decay)\n\n # cudnn.benchmark = True\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda()\n\n for epoch in range(args.epochs):\n adjust_learning_rate(optimizer_a, epoch + 1)\n\n train(imbalanced_train_loader, validation_loader,model,vnet, optimizer_a, optimizer_c,epoch)\n\n # evaluate on validation set\n prec1 = validate(test_loader, model, criterion, epoch)\n\n # remember best prec@1 and save checkpoint\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n\n print('Best accuracy: ', best_prec1)\n\n\ndef train(train_loader, validation_loader,model, vnet,optimizer_a,optimizer_c,epoch):\n \"\"\"Train for one epoch on the training set\"\"\"\n batch_time = AverageMeter()\n losses = AverageMeter()\n meta_losses = AverageMeter()\n top1 = AverageMeter()\n meta_top1 = AverageMeter()\n model.train()\n\n\n for i, (input, target) in enumerate(train_loader):\n input_var = to_var(input, requires_grad=False)\n target_var = to_var(target, requires_grad=False)\n\n\n meta_model = build_model()\n\n meta_model.load_state_dict(model.state_dict())\n\n y_f_hat = meta_model(input_var)\n cost = F.cross_entropy(y_f_hat, target_var, reduce=False)\n cost_v = torch.reshape(cost, (len(cost), 1))\n\n v_lambda = vnet(cost_v)\n\n norm_c = torch.sum(v_lambda)\n\n if norm_c != 0:\n v_lambda_norm = v_lambda / norm_c\n else:\n v_lambda_norm = v_lambda\n\n l_f_meta = torch.sum(cost_v * v_lambda_norm)\n meta_model.zero_grad()\n grads = torch.autograd.grad(l_f_meta, (meta_model.params()), create_graph=True)\n meta_lr = args.lr * ((0.1 ** int(epoch >= 80)) * (0.1 ** int(epoch >= 90)))\n meta_model.update_params(lr_inner=meta_lr, source_params=grads)\n del grads\n\n input_validation, target_validation = next(iter(validation_loader))\n input_validation_var = to_var(input_validation, requires_grad=False)\n target_validation_var = to_var(target_validation, requires_grad=False)\n y_g_hat = meta_model(input_validation_var)\n l_g_meta = F.cross_entropy(y_g_hat, target_validation_var)\n # l_g_meta.backward(retain_graph=True)\n prec_meta = accuracy(y_g_hat.data, target_validation_var.data, topk=(1,))[0]\n\n optimizer_c.zero_grad()\n l_g_meta.backward()\n # print(vnet.linear1.weight.grad)\n optimizer_c.step()\n\n y_f = model(input_var)\n cost_w = F.cross_entropy(y_f, target_var, reduce=False)\n cost_v = torch.reshape(cost_w, (len(cost_w), 1))\n prec_train = accuracy(y_f.data, target_var.data, topk=(1,))[0]\n\n with torch.no_grad():\n w_new = vnet(cost_v)\n norm_v = torch.sum(w_new)\n\n if norm_v != 0:\n w_v = w_new / norm_v\n else:\n w_v = w_new\n\n l_f = torch.sum(cost_v * w_v)\n\n losses.update(l_f.item(), input.size(0))\n meta_losses.update(l_g_meta.item(), input.size(0))\n top1.update(prec_train.item(), input.size(0))\n meta_top1.update(prec_meta.item(), input.size(0))\n\n optimizer_a.zero_grad()\n l_f.backward()\n optimizer_a.step()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Meta_Loss {meta_loss.val:.4f} ({meta_loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'meta_Prec@1 {meta_top1.val:.3f} ({meta_top1.avg:.3f})'.format(\n epoch, i, len(train_loader),\n loss=losses,meta_loss=meta_losses, top1=top1,meta_top1=meta_top1))\n\n\ndef validate(val_loader, model, criterion, epoch):\n \"\"\"Perform validation on the validation set\"\"\"\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n # target = target.cuda(async=True)\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n\n # compute output\n with torch.no_grad():\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1 = accuracy(output.data, target, topk=(1,))[0]\n losses.update(loss.data.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1))\n\n print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))\n # log to TensorBoard\n\n return top1.avg\n\n\ndef build_model():\n model = ResNet32(args.dataset == 'cifar10' and 10 or 100)\n # print('Number of model parameters: {}'.format(\n # sum([p.data.nelement() for p in model.parameters()])))\n\n if torch.cuda.is_available():\n model.cuda()\n torch.backends.cudnn.benchmark = True\n\n\n return model\n\ndef to_var(x, requires_grad=True):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x, requires_grad=requires_grad)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR divided by 5 at 60th, 120th and 160th epochs\"\"\"\n # lr = args.lr * ((0.2 ** int(epoch >= 60)) * (0.2 ** int(epoch >= 120))* (0.2 ** int(epoch >= 160)))\n lr = args.lr * ((0.1 ** int(epoch >= 80)) * (0.1 ** int(epoch >= 90)))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n" ]
[ [ "torch.manual_seed", "torch.nn.functional.cross_entropy", "torch.sum", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.autograd.Variable" ] ]
jchen0506/molecool
[ "dc931c165c34edeae4a38e67976138c017c5857c" ]
[ "molecool/visualize.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D # noqat: F401\n\nfrom .atom_data import (\n atom_colors,\n) # .atome_data relative import, . check the same folder\n\n\ndef bond_histogram(bond_list, save_location=None, dpi=300, graph_min=0, graph_max=2):\n # Draw a histogram of bond lengths based on a bond_list (output from build_bond_list function)\n\n lengths = []\n for atoms, bond_length in bond_list.items():\n lengths.append(bond_length)\n\n bins = np.linspace(graph_min, graph_max)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n plt.xlabel(\"Bond Length (angstrom)\")\n plt.ylabel(\"Number of Bonds\")\n\n ax.hist(lengths, bins=bins)\n\n # Save figure\n if save_location:\n plt.savefig(save_location, dpi=dpi)\n\n return ax\n\n\ndef draw_molecule(coordinates, symbols, draw_bonds=None, save_location=None, dpi=300):\n\n # Draw a picture of a molecule using matplotlib.\n\n # Create figure\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n\n # Get colors - based on atom name\n colors = []\n for atom in symbols:\n colors.append(atom_colors[atom])\n\n size = np.array(plt.rcParams[\"lines.markersize\"] ** 2) * 200 / (len(coordinates))\n\n ax.scatter(\n coordinates[:, 0],\n coordinates[:, 1],\n coordinates[:, 2],\n marker=\"o\",\n edgecolors=\"k\",\n facecolors=colors,\n alpha=1,\n s=size,\n )\n\n # Draw bonds\n if draw_bonds:\n for atoms, bond_length in draw_bonds.items():\n atom1 = atoms[0]\n atom2 = atoms[1]\n\n ax.plot(\n coordinates[[atom1, atom2], 0],\n coordinates[[atom1, atom2], 1],\n coordinates[[atom1, atom2], 2],\n color=\"k\",\n )\n\n # Save figure\n if save_location:\n plt.savefig(save_location, dpi=dpi, graph_min=0, graph_max=2)\n\n return ax\n" ]
[ [ "numpy.linspace", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
iwonasob/DCASE_rare
[ "3f9f55a1958602ac61e2e5ab02866d7215a5d131" ]
[ "src/dataset.py" ]
[ "'''\nDownload, extract and partition the datasets\n'''\nimport config as cfg\nimport os\nimport sys\nimport requests\nimport zipfile\nfrom clint.textui import progress\nimport numpy as np\nnp.random.seed(1515)\nimport pandas as pd\n\n\nclass DatasetCreator:\n def __init__(self,\n dataset_name):\n \n \"\"\" Initialize class\n Args:\n dataset_name (string): Name of the dataset to prepare\n \"\"\"\n self.dataset_name = dataset_name\n self.root_path = os.path.join(cfg.home_path, self.dataset_name)\n self.wav_url = cfg.wav_url[self.dataset_name]\n self.csv_url = cfg.csv_url[self.dataset_name]\n path , zip_name = os.path.split(self.wav_url)\n self.wav_zip = os.path.join(cfg.home_path, zip_name)\n\n self.wav_path = os.path.join(self.root_path, \"wav\")\n self.csv_path = os.path.join(self.root_path, cfg.csv_path[self.dataset_name])\n self.csv_10_path = os.path.join(self.root_path, \"cv10.csv\")\n \n if not os.path.isdir(self.root_path):\n os.makedirs(self.root_path)\n\n if not os.path.isdir(self.wav_path):\n os.makedirs(self.wav_path)\n\n \n def run(self):\n self.download()\n self.extract()\n self.partition()\n \n \n def download(self):\n \"\"\" Download the dataset and annotation file\n\n \"\"\"\n urls =[(self.csv_url,self.csv_path),(self.wav_url,self.wav_zip )]\n \n for u in urls:\n url=u[0]\n download_path=u[1]\n if not os.path.isfile(download_path): \n print(\"Downloading the file \"+ u[1])\n # open the link\n r = requests.get(url, stream=True) \n # save the content to a file\n with open(u[1], 'wb') as f:\n total_length = int(r.headers.get('content-length'))\n for chunk in progress.bar(r.iter_content(chunk_size=8192), expected_size=(total_length/8192) + 1): \n if chunk:\n f.write(chunk)\n f.flush()\n f.close()\n else:\n print(download_path + \" is already there!\")\n \n \n def extract(self):\n \"\"\" Extract the downloaded dataset\n\n \"\"\"\n if not os.listdir(os.path.join(self.root_path,\"wav\")):\n print(\"Extracting the dataset \"+ self.dataset_name)\n zip= zipfile.ZipFile(self.wav_zip)\n zip.extractall(self.root_path)\n else:\n print(self.dataset_name + \" has been already extracted!\")\n \n \n def partition(self, n=10):\n \"\"\" Create a csv file with partitioning into n subsets\n Args:\n n: number of subsets\n\n \"\"\"\n if not os.path.isfile(self.csv_10_path): \n data_list = pd.read_csv(self.csv_path)\n data_list['fold'] = np.random.randint( low=0, high=n, size=len(data_list))\n data_list.to_csv(self.csv_10_path)\n print(\"The partition into \"+ str(n) + \" is saved: \"+ self.csv_10_path)\n else:\n print(\"The partition CSV file is already there! \"+ self.csv_10_path)\n \n" ]
[ [ "pandas.read_csv", "numpy.random.seed" ] ]
Luoyadan/ddpg_power
[ "9f1bcd0c3874229933070b47f96e554738a72a77" ]
[ "ddpg.py" ]
[ "# -----------------------------------\n# Deep Deterministic Policy Gradient\n# Author: Flood Sung\n# Date: 2016.5.4\n# -----------------------------------\n\nimport tensorflow as tf\nimport numpy as np\nfrom ou_noise import OUNoise\nfrom critic_network import CriticNetwork \nfrom actor_network_bn import ActorNetwork\nfrom replay_buffer import ReplayBuffer\n\n# Hyper Parameters:\n\nREPLAY_BUFFER_SIZE = 1000000\nREPLAY_START_SIZE = 10000\nBATCH_SIZE = 64\nGAMMA = 0.99\n\n\nclass DDPG:\n \"\"\"docstring for DDPG\"\"\"\n def __init__(self, env):\n self.name = 'DDPG' # name for uploading results\n self.environment = env\n # Randomly initialize actor network and critic network\n # with both their target networks\n self.state_dim = 2\n self.action_dim = 24\n\n self.sess = tf.InteractiveSession()\n\n self.actor_network = ActorNetwork(self.sess,self.state_dim,self.action_dim)\n self.critic_network = CriticNetwork(self.sess,self.state_dim,self.action_dim)\n \n # initialize replay buffer\n self.replay_buffer = ReplayBuffer(REPLAY_BUFFER_SIZE)\n\n # Initialize a random process the Ornstein-Uhlenbeck process for action exploration\n self.exploration_noise = OUNoise(self.action_dim)\n\n def train(self):\n #print \"train step\",self.time_step\n # Sample a random minibatch of N transitions from replay buffer\n minibatch = self.replay_buffer.get_batch(BATCH_SIZE)\n state_batch = np.asarray([data[0] for data in minibatch])\n action_batch = np.asarray([data[1] for data in minibatch])\n reward_batch = np.asarray([data[2] for data in minibatch])\n next_state_batch = np.asarray([data[3] for data in minibatch])\n done_batch = np.asarray([data[4] for data in minibatch])\n\n # for action_dim = 1\n action_batch = np.resize(action_batch,[BATCH_SIZE,self.action_dim])\n\n # Calculate y_batch\n \n next_action_batch = self.actor_network.target_actions(next_state_batch)\n q_value_batch = self.critic_network.target_q(next_state_batch,next_action_batch)\n y_batch = [] \n for i in range(len(minibatch)): \n if done_batch[i]:\n y_batch.append(reward_batch[i])\n else :\n y_batch.append(reward_batch[i] + GAMMA * q_value_batch[i])\n y_batch = np.resize(y_batch,[BATCH_SIZE,1])\n # Update critic by minimizing the loss L\n self.critic_network.train(y_batch,state_batch,action_batch)\n\n # Update the actor policy using the sampled gradient:\n action_batch_for_gradients = self.actor_network.actions(state_batch)\n q_gradient_batch = self.critic_network.gradients(state_batch,action_batch_for_gradients)\n\n self.actor_network.train(q_gradient_batch,state_batch)\n\n # Update the target networks\n self.actor_network.update_target()\n self.critic_network.update_target()\n\n def noise_action(self,state):\n # Select action a_t according to the current policy and exploration noise\n action = self.actor_network.action(state)\n return action+self.exploration_noise.noise()\n\n def action(self,state):\n action = self.actor_network.action(state)\n return action\n\n def perceive(self,state,action,reward,next_state,done):\n # Store transition (s_t,a_t,r_t,s_{t+1}) in replay buffer\n self.replay_buffer.add(state,action,reward,next_state,done)\n\n # Store transitions to replay start size then start training\n if self.replay_buffer.count() > REPLAY_START_SIZE:\n self.train()\n\n #if self.time_step % 10000 == 0:\n #self.actor_network.save_network(self.time_step)\n #self.critic_network.save_network(self.time_step)\n\n # Re-iniitialize the random process when an episode ends\n if done:\n self.exploration_noise.reset()\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.asarray", "numpy.resize", "tensorflow.InteractiveSession" ] ]
Croydon-Brixton/gedi-biomass-mapping
[ "bd6021a8515597d5ce14221afa47758803b4864a" ]
[ "src/data/gedi_query_tools.py" ]
[ "\"\"\"Module to conveniently query GEDI v002 data (primarily L1B and L2A) locally\"\"\"\nimport pathlib\nfrom dataclasses import dataclass\n\nimport folium\nimport folium.features\nimport folium.plugins\nimport geopandas as gpd\nimport pandas as pd\nimport shapely\nimport shapely.geometry\n\nfrom src.constants import GEDI_L2A_PATH\n\n\n@dataclass\nclass QueryParameters:\n base_url: str\n provider: str\n product: str\n\n\nGEDI_L2A_QUERY_PARAMS = QueryParameters(\n base_url=\"https://search.earthdata.nasa.gov/search/granules\",\n provider=\"p=C1908348134-LPDAAC_ECS\",\n product=\"q=gedi%20\",\n)\nGEDI_L1B_QUERY_PARAMS = QueryParameters(\n base_url=\"https://search.earthdata.nasa.gov/search/granules\",\n provider=\"p=C1908344278-LPDAAC_ECS\",\n product=\"q=gedi%20\",\n)\n\nGEDI_L2A_METADATA_PATH = GEDI_L2A_PATH / \"GEDI_L2A_v002_metadata_2021-04-20.gpkg\"\n\n\ndef _load_gedi_metadata(metadata_path: pathlib.Path) -> gpd.GeoDataFrame:\n \"\"\"\n Loads meta data from file via geopandas\n\n Args:\n metadata_path (pathlib.Path): Path to the metadata\n\n Returns:\n gpd.GeoDataFrame: The metadata with dates and locations parsed\n \"\"\"\n # Load data from path\n l2a_meta = gpd.GeoDataFrame.from_file(metadata_path)\n\n # Parse dates\n date_cols = l2a_meta.columns[l2a_meta.columns.str.contains(r\"\\w*_time\")]\n for col in date_cols:\n l2a_meta[col] = pd.to_datetime(l2a_meta[col])\n\n l2a_meta[\"measurement_date\"] = l2a_meta[\"range_beginning_time\"].dt.date.astype(\n \"str\"\n )\n l2a_meta[\"measurement_start_time\"] = l2a_meta[\n \"range_beginning_time\"\n ].dt.time.astype(\"str\")\n l2a_meta[\"measurement_end_time\"] = l2a_meta[\"range_ending_time\"].dt.time.astype(\n \"str\"\n )\n\n return l2a_meta\n\n\ndef _find_gedi_granules(\n roi: shapely.geometry.Polygon, metadata: gpd.GeoDataFrame\n) -> gpd.GeoDataFrame:\n\n candidates: list[int] = metadata.sindex.query(roi)\n overlapping_granules = metadata.iloc[candidates].overlaps(roi)\n\n return metadata.iloc[candidates][overlapping_granules]\n\n\ndef _polygon_to_earthdata_query_url(roi: shapely.geometry.Polygon) -> str:\n return \"polygon[0]=\" + \"%2C\".join(\n [\"%2C\".join([str(lon), str(lat)]) for (lon, lat) in roi.boundary.coords[::-1]]\n )\n\n\ndef _construct_earthdata_query(\n roi: shapely.geometry.Polygon, query_params: QueryParameters = GEDI_L2A_QUERY_PARAMS\n) -> str:\n roi_request = _polygon_to_earthdata_query_url(roi)\n return (\n f\"{query_params.base_url}\"\n f\"?{query_params.provider}\"\n f\"&{query_params.product}\"\n f\"&{roi_request}\"\n )\n\n\ndef _visualize_query_result(\n roi: shapely.geometry.Polygon, granules: gpd.GeoDataFrame\n) -> folium.Map:\n \"\"\"\n Displays a map with the region of interest and the GEDI granule tracks overlaid\n\n Args:\n roi (shapely.geometry.Polygon): The region of interest as a Polygon\n granules (gpd.GeoDataFrame): The GEDI granules that overlap as GeoDataFrame\n\n Returns:\n folium.Map: A folium map with the GEDI granules and ROI overlaid.\n \"\"\"\n\n relevant_columns = [\n \"filename\",\n \"granule_uri\",\n \"measurement_date\",\n \"measurement_start_time\",\n \"measurement_end_time\",\n \"start_orbit_number\",\n \"reference_ground_track\",\n ]\n granules_data = granules[relevant_columns + [\"geometry\"]]\n\n # Create map and add layers\n world_map = folium.Map(\n location=roi.centroid.coords[0][::-1],\n control_scale=True,\n zoom_start=8,\n tiles=\"OpenStreetMap\",\n )\n\n # Add minimap\n folium.plugins.MiniMap(zoom_level_fixed=2).add_to(world_map)\n\n # Add ROI\n roi_style = {\"fillColor\": \"#2a74ac\", \"color\": \"#2a74ac\"}\n folium.GeoJson(\n data=roi.__geo_interface__,\n name=\"Region of interest\",\n style_function=lambda x: roi_style,\n ).add_to(world_map)\n\n # Add granules\n granules_style = {\"fillColor\": \"#1f4e13\", \"color\": \"#1b4611\"}\n tooltip = folium.features.GeoJsonTooltip(fields=relevant_columns)\n folium.GeoJson(\n data=granules_data.__geo_interface__,\n name=\"GEDI granule footprints\",\n tooltip=tooltip,\n style_function=lambda x: granules_style,\n ).add_to(world_map)\n\n folium.TileLayer(\n tiles=(\n \"https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/\"\n \"MapServer/tile/{z}/{y}/{x}\"\n ),\n attr=\"Esri\",\n name=\"Esri Satellite\",\n overlay=False,\n control=True,\n ).add_to(world_map)\n\n # Add map controls\n folium.LayerControl().add_to(world_map)\n\n # Retunr map map\n return world_map\n\n\ndef gedi_query(\n roi: shapely.geometry.Polygon,\n visualize: bool = True,\n metadata_path: pathlib.Path = GEDI_L2A_METADATA_PATH,\n query_params: QueryParameters = GEDI_L2A_QUERY_PARAMS,\n) -> dict:\n \"\"\"\n Returns URL to corresponding query on earth data as well as a table with metadata\n on all overlapping GEDI granules for the selected metadata.\n\n Args:\n roi (shapely.geometry.Polygon): The region of interest to query.\n visualize (bool, optional): For use in notebooks. If True, the query will\n be displayed visually in the notebook. Defaults to True.\n metadata_path (pathlib.Path, optional): The path to the metadata that will\n be used to perfomr the spatial query. Defaults to GEDI_L2A_METADATA_PATH.\n query_params (QueryParameters, optional): The query parameters for constructing\n the earth data query URL. Defaults to GEDI_L2A_QUERY_PARAMS.\n\n Returns:\n dict: A dictionary containing the earth data query URL and a geopandas Dataframe\n with the granules that overlap the ROI.\n \"\"\"\n\n metadata = _load_gedi_metadata(metadata_path)\n granules = _find_gedi_granules(roi, metadata)\n earthdata_query = _construct_earthdata_query(roi, query_params)\n\n query_result = {\"query_link\": earthdata_query, \"granules\": granules}\n\n if visualize:\n print(f\"Corresponding Earthdata query: {earthdata_query}\")\n\n from IPython import display # pylint: disable=import-outside-toplevel\n\n display.display(_visualize_query_result(roi, granules))\n\n return query_result\n" ]
[ [ "pandas.to_datetime" ] ]
awesome-archive/urh
[ "c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7" ]
[ "src/urh/dev/native/HackRF.py" ]
[ "import numpy as np\nimport time\n\nfrom urh.dev.native.Device import Device\nfrom urh.dev.native.lib import hackrf\nfrom urh.util.Logger import logger\n\n\nclass HackRF(Device):\n BYTES_PER_SAMPLE = 2 # HackRF device produces 8 bit unsigned IQ data\n\n def __init__(self, bw, freq, gain, srate, is_ringbuffer=False):\n super().__init__(bw, freq, gain, srate, is_ringbuffer)\n self.success = 0\n self.error_not_open = -4242\n\n\n self._max_bandwidth = 28e6\n self._max_frequency = 6e9\n self._max_sample_rate = 20e6\n self._max_gain = 40\n\n self.error_codes = {\n 0: \"HACKRF_SUCCESS\",\n 1: \"HACKRF_TRUE\",\n 1337: \"TIMEOUT ERROR\",\n -2: \"HACKRF_ERROR_INVALID_PARAM\",\n -5: \"HACKRF_ERROR_NOT_FOUND\",\n -6: \"HACKRF_ERROR_BUSY\",\n -11: \"HACKRF_ERROR_NO_MEM\",\n -1000: \"HACKRF_ERROR_LIBUSB\",\n -1001: \"HACKRF_ERROR_THREAD\",\n -1002: \"HACKRF_ERROR_STREAMING_THREAD_ERR\",\n -1003: \"HACKRF_ERROR_STREAMING_STOPPED\",\n -1004: \"HACKRF_ERROR_STREAMING_EXIT_CALLED\",\n -4242: \"HACKRF NOT OPEN\",\n -9999: \"HACKRF_ERROR_OTHER\"\n }\n\n # self.__lut = np.zeros(0xffff + 1, dtype=np.complex64)\n # self.little_endian = False\n # for i in range(0, 0xffff + 1):\n # if self.little_endian:\n # real = (float(np.int8(i & 0xff))) * (1.0 / 128.0)\n # imag = (float(np.int8(i >> 8))) * (1.0 / 128.0)\n # else:\n # real = (float(np.int8(i >> 8))) * (1.0 / 128.0)\n # imag = (float(np.int8(i & 0xff))) * (1.0 / 128.0)\n #\n # self.__lut[i] = complex(real, imag)\n\n def reopen(self):\n if self.is_open:\n hackrf.reopen()\n\n def open(self, init=True):\n if not self.is_open:\n if init:\n ret = hackrf.setup()\n else:\n ret = hackrf.open()\n\n self.is_open = ret == self.success\n self.log_retcode(ret, \"open\")\n\n\n def close(self, exit=True):\n if self.is_open:\n logger.info(\"HackRF: Attempting to close...\")\n time.sleep(0.01)\n ret = hackrf.close()\n self.is_open = ret != self.success\n\n if self.is_open:\n logger.error(\"Failed to close HackRF\")\n else:\n logger.info(\"Successfully closed HackRF\")\n\n self.log_retcode(ret, \"close\")\n if exit:\n self.exit()\n\n def exit(self):\n return hackrf.exit()\n\n def start_rx_mode(self):\n if self.is_open:\n self.init_recv_buffer()\n self.set_device_parameters()\n ret = hackrf.start_rx_mode(self.callback_recv)\n self.is_receiving = ret == self.success\n\n if self.is_receiving:\n logger.info(\"HackRF: Starting receiving thread\")\n self._start_readqueue_thread()\n\n\n self.log_retcode(ret, \"start_rx_mode\")\n else:\n self.log_retcode(self.error_not_open, \"start_rx_mode\")\n\n def stop_rx_mode(self, msg):\n self.is_receiving = False\n\n logger.info(\"HackRF: Stopping RX Mode: \"+msg)\n\n if hasattr(self, \"read_queue_thread\") and self.read_queue_thread.is_alive():\n try:\n self.read_queue_thread.join(0.001)\n logger.info(\"HackRF: Joined read_queue_thread\")\n except RuntimeError:\n logger.error(\"HackRF: Could not join read_queue_thread\")\n\n\n if self.is_open:\n logger.info(\"stopping HackRF rx mode ({0})\".format(msg))\n logger.warning(\"closing because stop_rx_mode of HackRF is bugged and will not allow re receive without close\")\n self.close(exit=False)\n\n\n\n def switch_from_rx2tx(self):\n # https://github.com/mossmann/hackrf/pull/246/commits/4f9665fb3b43462e39a1592fc34f3dfb50de4a07\n self.reopen()\n\n def start_tx_mode(self, samples_to_send: np.ndarray = None, repeats=None, resume=False):\n if self.is_open:\n self.init_send_parameters(samples_to_send, repeats, resume=resume)\n retcode = hackrf.start_tx_mode(self.callback_send)\n\n if retcode == self.success:\n self.is_transmitting = True\n self._start_sendbuffer_thread()\n else:\n self.is_transmitting = False\n else:\n retcode = self.error_not_open\n self.log_retcode(retcode, \"start_tx_mode\")\n\n def stop_tx_mode(self, msg):\n self.is_transmitting = False\n try:\n self.send_buffer_reader.close()\n self.send_buffer.close()\n except AttributeError:\n logger.warning(\"HackRF: Could not close send buffer, because it was not open yet\")\n\n if self.is_open:\n logger.info(\"stopping HackRF tx mode ({0})\".format(msg))\n logger.info(\"closing because stop_tx_mode of HackRF is bugged and never returns\")\n self.close(exit=False)\n\n def set_device_bandwidth(self, bw):\n if self.is_open:\n retcode = hackrf.set_baseband_filter_bandwidth(bw)\n else:\n retcode = self.error_not_open\n self.log_retcode(retcode, \"set_bandwidth\", bw)\n\n def set_device_frequency(self, value):\n if self.is_open:\n retcode = hackrf.set_freq(value)\n else:\n retcode = self.error_not_open\n self.log_retcode(retcode, \"set_frequency\", value)\n\n def set_device_gain(self, gain):\n if self.is_open:\n hackrf.set_lna_gain(gain)\n hackrf.set_vga_gain(gain)\n hackrf.set_txvga_gain(gain)\n\n def set_device_sample_rate(self, sample_rate):\n if self.is_open:\n retcode = hackrf.set_sample_rate(sample_rate)\n else:\n retcode = self.error_not_open\n\n self.log_retcode(retcode, \"set_sample_rate\", sample_rate)\n\n def unpack_complex(self, buffer, nvalues: int):\n result = np.empty(nvalues, dtype=np.complex64)\n unpacked = np.frombuffer(buffer, dtype=[('r', np.int8), ('i', np.int8)])\n result.real = unpacked['r'] / 128.0\n result.imag = unpacked['i'] / 128.0\n return result\n\n\n def pack_complex(self, complex_samples: np.ndarray):\n assert complex_samples.dtype == np.complex64\n # tostring() is a compatibility (numpy<1.9) alias for tobytes(). Despite its name it returns bytes not strings.\n return (128 * complex_samples.view(np.float32)).astype(np.int8).tostring()\n" ]
[ [ "numpy.frombuffer", "numpy.empty" ] ]
johnson1228/g2p-seq2seq
[ "2fac457e066df48038a7682be69f3bd8b9fff916" ]
[ "g2p_seq2seq/g2p.py" ]
[ "# Copyright 2016 AC Technologies LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Binary for training translation models and decoding from them.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport re\nimport numpy as np\nimport six\nimport sys\n\nfrom tensor2tensor.data_generators.problem import problem_hparams_to_features\nimport tensorflow as tf\nfrom tensorflow.python.estimator import estimator as estimator_lib\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.util import compat\n\n# Dependency imports\n\nfrom tensor2tensor import models # pylint: disable=unused-import\n\nfrom g2p_seq2seq import g2p_problem\nfrom g2p_seq2seq import g2p_trainer_utils\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import usr_dir\nfrom tensor2tensor.utils import decoding\nfrom tensor2tensor.utils import trainer_lib\n\nfrom tensor2tensor.data_generators import text_encoder\nfrom six.moves import input\nfrom six import text_type\n\nEOS = text_encoder.EOS\n\n\nclass G2PModel(object):\n \"\"\"Grapheme-to-Phoneme translation model class.\n \"\"\"\n def __init__(self, params, train_path=\"\", dev_path=\"\", test_path=\"\",\n cleanup=False, p2g_mode=False):\n # Point out the current directory with t2t problem specified for g2p task.\n usr_dir.import_usr_dir(os.path.dirname(os.path.abspath(__file__)))\n self.params = params\n self.test_path = test_path\n if not os.path.exists(self.params.model_dir):\n os.makedirs(self.params.model_dir)\n\n # Register g2p problem.\n self.problem = registry._PROBLEMS[self.params.problem_name](\n self.params.model_dir, train_path=train_path, dev_path=dev_path,\n test_path=test_path, cleanup=cleanup, p2g_mode=p2g_mode)\n\n self.frozen_graph_filename = os.path.join(self.params.model_dir,\n \"frozen_model.pb\")\n self.inputs, self.features, self.input_fn = None, None, None\n self.mon_sess, self.estimator_spec, self.g2p_gt_map = None, None, None\n self.first_ex = False\n if train_path:\n self.train_preprocess_file_path, self.dev_preprocess_file_path =\\\n None, None\n self.estimator, self.decode_hp, self.hparams =\\\n self.__prepare_model(train_mode=True)\n self.train_preprocess_file_path, self.dev_preprocess_file_path =\\\n self.problem.generate_preprocess_data()\n\n elif os.path.exists(self.frozen_graph_filename):\n self.estimator, self.decode_hp, self.hparams =\\\n self.__prepare_model()\n self.__load_graph()\n self.checkpoint_path = tf.train.latest_checkpoint(self.params.model_dir)\n\n else:\n self.estimator, self.decode_hp, self.hparams =\\\n self.__prepare_model()\n\n def __prepare_model(self, train_mode=False):\n \"\"\"Prepare utilities for decoding.\"\"\"\n hparams = registry.hparams(self.params.hparams_set)\n hparams.problem = self.problem\n hparams.problem_hparams = self.problem.get_hparams(hparams)\n if self.params.hparams:\n tf.logging.info(\"Overriding hparams in %s with %s\",\n self.params.hparams_set,\n self.params.hparams)\n hparams = hparams.parse(self.params.hparams)\n trainer_run_config = g2p_trainer_utils.create_run_config(hparams,\n self.params)\n if train_mode:\n exp_fn = g2p_trainer_utils.create_experiment_fn(self.params, self.problem)\n self.exp = exp_fn(trainer_run_config, hparams)\n\n decode_hp = decoding.decode_hparams(self.params.decode_hparams)\n estimator = trainer_lib.create_estimator(\n self.params.model_name,\n hparams,\n trainer_run_config,\n decode_hparams=decode_hp,\n use_tpu=False)\n\n return estimator, decode_hp, hparams\n\n def __prepare_interactive_model(self):\n \"\"\"Create monitored session and generator that reads from the terminal and\n yields \"interactive inputs\".\n\n Due to temporary limitations in tf.learn, if we don't want to reload the\n whole graph, then we are stuck encoding all of the input as one fixed-size\n numpy array.\n\n We yield int32 arrays with shape [const_array_size]. The format is:\n [num_samples, decode_length, len(input ids), <input ids>, <padding>]\n\n Raises:\n ValueError: Could not find a trained model in model_dir.\n ValueError: if batch length of predictions are not same.\n \"\"\"\n\n def input_fn():\n \"\"\"Input function returning features which is a dictionary of\n string feature name to `Tensor` or `SparseTensor`. If it returns a\n tuple, first item is extracted as features. Prediction continues until\n `input_fn` raises an end-of-input exception (`OutOfRangeError` or\n `StopIteration`).\"\"\"\n gen_fn = decoding.make_input_fn_from_generator(\n self.__interactive_input_fn())\n example = gen_fn()\n example = decoding._interactive_input_tensor_to_features_dict(\n example, self.hparams)\n return example\n\n self.res_iter = self.estimator.predict(input_fn)\n\n if os.path.exists(self.frozen_graph_filename):\n return\n\n # List of `SessionRunHook` subclass instances. Used for callbacks inside\n # the prediction call.\n hooks = estimator_lib._check_hooks_type(None)\n\n # Check that model has been trained.\n # Path of a specific checkpoint to predict. The latest checkpoint\n # in `model_dir` is used\n checkpoint_path = estimator_lib.saver.latest_checkpoint(\n self.params.model_dir)\n if not checkpoint_path:\n raise ValueError('Could not find trained model in model_dir: {}.'\n .format(self.params.model_dir))\n\n with estimator_lib.ops.Graph().as_default() as graph:\n\n estimator_lib.random_seed.set_random_seed(\n self.estimator._config.tf_random_seed)\n self.estimator._create_and_assert_global_step(graph)\n\n self.features, input_hooks = self.estimator._get_features_from_input_fn(\n input_fn, estimator_lib.model_fn_lib.ModeKeys.PREDICT)\n self.estimator_spec = self.estimator._call_model_fn(\n self.features, None, estimator_lib.model_fn_lib.ModeKeys.PREDICT,\n self.estimator.config)\n try:\n self.mon_sess = estimator_lib.training.MonitoredSession(\n session_creator=estimator_lib.training.ChiefSessionCreator(\n checkpoint_filename_with_path=checkpoint_path,\n scaffold=self.estimator_spec.scaffold,\n config=self.estimator._session_config),\n hooks=hooks)\n except:\n # raise StandardError(\"Invalid model in {}\".format(self.params.model_dir))\n raise ValueError(\"Invalid model in {}\".format(self.params.model_dir))\n\n def decode_word(self, word):\n \"\"\"Decode word.\n\n Args:\n word: word for decoding.\n\n Returns:\n pronunciation: a decoded phonemes sequence for input word.\n \"\"\"\n num_samples = 1\n decode_length = 100\n vocabulary = self.problem.source_vocab\n # This should be longer than the longest input.\n const_array_size = 10000\n\n input_ids = vocabulary.encode(word)\n input_ids.append(text_encoder.EOS_ID)\n self.inputs = [num_samples, decode_length, len(input_ids)] + input_ids\n assert len(self.inputs) < const_array_size\n self.inputs += [0] * (const_array_size - len(self.inputs))\n\n result = next(self.res_iter)\n pronunciations = []\n if self.decode_hp.return_beams:\n beams = np.split(result[\"outputs\"], self.decode_hp.beam_size, axis=0)\n for k, beam in enumerate(beams):\n tf.logging.info(\"BEAM %d:\" % k)\n beam_string = self.problem.target_vocab.decode(\n decoding._save_until_eos(beam, is_image=False))\n pronunciations.append(beam_string)\n tf.logging.info(beam_string)\n else:\n if self.decode_hp.identity_output:\n tf.logging.info(\" \".join(map(str, result[\"outputs\"].flatten())))\n else:\n res = result[\"outputs\"].flatten()\n if text_encoder.EOS_ID in res:\n index = list(res).index(text_encoder.EOS_ID)\n res = res[0:index]\n pronunciations.append(self.problem.target_vocab.decode(res))\n return pronunciations\n\n def __interactive_input_fn(self):\n num_samples = self.decode_hp.num_samples if self.decode_hp.num_samples > 0\\\n else 1\n decode_length = self.decode_hp.extra_length\n input_type = \"text\"\n p_hparams = self.hparams.problem_hparams\n has_input = \"inputs\" in p_hparams.input_modality\n vocabulary = p_hparams.vocabulary[\"inputs\" if has_input else \"targets\"]\n # Import readline if available for command line editing and recall.\n try:\n import readline # pylint: disable=g-import-not-at-top,unused-variable\n except ImportError:\n pass\n while True:\n features = {\n \"inputs\": np.array(self.inputs).astype(np.int32),\n }\n for k, v in six.iteritems(problem_hparams_to_features(p_hparams)):\n features[k] = np.array(v).astype(np.int32)\n yield features\n\n def __run_op(self, sess, decode_op, feed_input):\n \"\"\"Run tensorflow operation for decoding.\"\"\"\n results = sess.run(decode_op,\n feed_dict={\"inp_decode:0\" : [feed_input]})\n return results\n\n def train(self):\n \"\"\"Run training.\"\"\"\n print('Training started.')\n execute_schedule(self.exp, self.params)\n\n def interactive(self):\n \"\"\"Interactive decoding.\"\"\"\n self.inputs = []\n self.__prepare_interactive_model()\n\n if os.path.exists(self.frozen_graph_filename):\n with tf.Session(graph=self.graph) as sess:\n saver = tf.train.import_meta_graph(self.checkpoint_path + \".meta\",\n import_scope=None,\n clear_devices=True)\n saver.restore(sess, self.checkpoint_path)\n inp = tf.placeholder(tf.string, name=\"inp_decode\")[0]\n decode_op = tf.py_func(self.decode_word, [inp], tf.string)\n while True:\n word = get_word()\n pronunciations = self.__run_op(sess, decode_op, word)\n print (\" \".join(pronunciations))\n else:\n while not self.mon_sess.should_stop():\n word = get_word()\n pronunciations = self.decode_word(word)\n print(\" \".join(pronunciations))\n # To make sure the output buffer always flush at this level\n sys.stdout.flush()\n\n def decode(self, output_file_path):\n \"\"\"Run decoding mode.\"\"\"\n outfile = None\n # Output results to a file if given.\n if output_file_path:\n tf.logging.info(\"Writing decodes into %s\" % output_file_path)\n outfile = tf.gfile.Open(output_file_path, \"w\")\n\n if os.path.exists(self.frozen_graph_filename):\n with tf.Session(graph=self.graph) as sess:\n inp = tf.placeholder(tf.string, name=\"inp_decode\")[0]\n decode_op = tf.py_func(self.__decode_from_file, [inp],\n [tf.string, tf.string])\n [inputs, decodes] = self.__run_op(sess, decode_op, self.test_path)\n else:\n inputs, decodes = self.__decode_from_file(self.test_path)\n \n # Output decoding results\n for _input, _decode in zip(inputs, decodes):\n _input = compat.as_text(_input)\n _decode = compat.as_text(_decode)\n if output_file_path:\n outfile.write(\"{} {}\\n\".format(_input, _decode))\n else:\n print(\"Raw prediction: {} {}\".format(_input, _decode))\n\n def evaluate(self):\n \"\"\"Run evaluation mode.\"\"\"\n words, pronunciations = [], []\n for case in self.problem.generator(self.test_path,\n self.problem.source_vocab,\n self.problem.target_vocab):\n word = self.problem.source_vocab.decode(case[\"inputs\"]).replace(\n EOS, \"\").strip()\n pronunciation = self.problem.target_vocab.decode(case[\"targets\"]).replace(\n EOS, \"\").strip()\n words.append(word)\n pronunciations.append(pronunciation)\n\n self.g2p_gt_map = create_g2p_gt_map(words, pronunciations)\n\n if os.path.exists(self.frozen_graph_filename):\n with tf.Session(graph=self.graph) as sess:\n inp = tf.placeholder(tf.string, name=\"inp_decode\")[0]\n decode_op = tf.py_func(self.calc_errors, [inp], \n [tf.int64, tf.int64, tf.int64, tf.int64])\n results = self.__run_op(sess, decode_op, self.test_path)\n\n else:\n results = self.calc_errors(self.test_path)\n \n word_correct, word_errors, phone_errors, total_ref_phones = results\n wer = 100.0 * word_errors / (word_correct + word_errors)\n per = 100.0 * phone_errors / total_ref_phones\n\n print(\"=\"*80)\n print(\"Total: {} words, {} phones\".\\\n format(word_correct + word_errors, total_ref_phones))\n print(\"Word errors: {} ({:.2f}%)\".format(word_errors, wer))\n print(\"Phone errors: {} ({:.2f}%)\".format(phone_errors, per))\n print(\"Total word errors: {}\".format(word_errors))\n print(\"Total phone errors: {}\".format(phone_errors))\n print(\"=\"*80)\n\n def freeze(self):\n \"\"\"Freeze pre-trained model.\"\"\"\n # We retrieve our checkpoint fullpath\n checkpoint = tf.train.get_checkpoint_state(self.params.model_dir)\n input_checkpoint = checkpoint.model_checkpoint_path\n\n # We precise the file fullname of our freezed graph\n absolute_model_folder = \"/\".join(input_checkpoint.split('/')[:-1])\n output_graph = absolute_model_folder + \"/frozen_model.pb\"\n\n # Before exporting our graph, we need to precise what is our output node\n # This is how TF decides what part of the Graph he has to keep and what\n # part it can dump\n # NOTE: this variable is plural, because you can have multiple output nodes\n output_node_names = []\n hparams = self.params.hparams.split(\",\")\n num_layers = [int(hp.split(\"=\")[1]) for hp in hparams \n if hp.startswith(\"num_hidden_layers\")][0]\n root_dir = \"transformer/parallel_0_4/transformer/transformer/body\"\n for i in range(num_layers):\n output_node_names.append(\"{}/encoder/layer_{}/self_attention/\".format(root_dir, i) +\\\n \"multihead_attention/dot_product_attention/attention_weights\")\n\n for i in range(num_layers):\n output_node_names.append(\"{}/decoder/layer_{}/self_attention/\".format(root_dir, i) +\\\n \"multihead_attention/dot_product_attention/attention_weights\")\n output_node_names.append(\"{}/decoder/layer_{}/encdec_attention/\".format(root_dir, i) +\\\n \"multihead_attention/dot_product_attention/attention_weights\")\n\n # We clear devices to allow TensorFlow to control on which device it will\n # load operations\n clear_devices = True\n # We import the meta graph and retrieve a Saver\n saver = tf.train.import_meta_graph(input_checkpoint + '.meta',\n clear_devices=clear_devices)\n\n # We retrieve the protobuf graph definition\n graph = tf.get_default_graph()\n input_graph_def = graph.as_graph_def()\n\n # We start a session and restore the graph weights\n with tf.Session() as sess:\n saver.restore(sess, input_checkpoint)\n\n # We use a built-in TF helper to export variables to constants\n output_graph_def = graph_util.convert_variables_to_constants(\n sess, # The session is used to retrieve the weights\n input_graph_def, # The graph_def is used to retrieve the nodes\n output_node_names, # The output node names are used to select the\n #usefull nodes\n variable_names_blacklist=['global_step'])\n\n # Finally we serialize and dump the output graph to the filesystem\n with tf.gfile.GFile(output_graph, \"wb\") as output_graph_file:\n output_graph_file.write(output_graph_def.SerializeToString())\n print(\"%d ops in the final graph.\" % len(output_graph_def.node))\n\n def __load_graph(self):\n \"\"\"Load freezed graph.\"\"\"\n # We load the protobuf file from the disk and parse it to retrieve the\n # unserialized graph_def\n with tf.gfile.GFile(self.frozen_graph_filename, \"rb\") as frozen_graph_file:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(frozen_graph_file.read())\n\n # Then, we import the graph_def into a new Graph and returns it\n with tf.Graph().as_default() as self.graph:\n # The name var will prefix every op/nodes in your graph\n # Since we load everything in a new graph, this is not needed\n tf.import_graph_def(graph_def, name=\"import\")\n\n def __decode_from_file(self, filename):\n \"\"\"Compute predictions on entries in filename and write them out.\"\"\"\n\n if not self.decode_hp.batch_size:\n self.decode_hp.batch_size = 32\n tf.logging.info(\"decode_hp.batch_size not specified; default=%d\" %\n self.decode_hp.batch_size)\n\n p_hparams = self.hparams.problem_hparams\n inputs_vocab = p_hparams.vocabulary[\"inputs\"]\n targets_vocab = p_hparams.vocabulary[\"targets\"]\n problem_name = \"grapheme_to_phoneme_problem\"\n tf.logging.info(\"Performing decoding from a file.\")\n inputs = _get_inputs(filename)\n num_decode_batches = (len(inputs) - 1) // self.decode_hp.batch_size + 1\n\n def input_fn():\n \"\"\"Function for inputs generator.\"\"\"\n input_gen = _decode_batch_input_fn(\n num_decode_batches, inputs, inputs_vocab,\n self.decode_hp.batch_size, self.decode_hp.max_input_size)\n gen_fn = decoding.make_input_fn_from_generator(input_gen)\n example = gen_fn()\n return decoding._decode_input_tensor_to_features_dict(example,\n self.hparams)\n\n decodes = []\n result_iter = self.estimator.predict(input_fn)\n try:\n for result in result_iter:\n if self.decode_hp.return_beams:\n decoded_inputs = inputs_vocab.decode(\n decoding._save_until_eos(result[\"inputs\"], False))\n beam_decodes = []\n output_beams = np.split(result[\"outputs\"], self.decode_hp.beam_size,\n axis=0)\n for k, beam in enumerate(output_beams):\n decoded_outputs = targets_vocab.decode(\n decoding._save_until_eos(beam, False))\n beam_decodes.append(decoded_outputs)\n decodes.append(beam_decodes)\n else:\n decoded_inputs = inputs_vocab.decode(\n decoding._save_until_eos(result[\"inputs\"], False))\n decoded_outputs = targets_vocab.decode(\n decoding._save_until_eos(result[\"outputs\"], False))\n decodes.append(decoded_outputs)\n except:\n # raise StandardError(\"Invalid model in {}\".format(self.params.model_dir))\n raise ValueError(\"Invalid model in {}\".format(self.params.model_dir))\n\n return [inputs, decodes]\n\n def calc_errors(self, decode_file_path):\n \"\"\"Calculate a number of word and phone prediction errors.\"\"\"\n inputs, decodes = self.__decode_from_file(decode_file_path)\n\n word_correct, word_errors, phone_errors = 0, 0, 0\n total_ref_phones = 0\n word_set = set()\n for index, word in enumerate(inputs):\n if word in word_set:\n continue\n\n word_set.add(word)\n # Estimate #phones of the word\n ref_phone_count = np.mean([len(ref_str.split(\" \")) \n for ref_str in self.g2p_gt_map[word]])\n total_ref_phones += int(ref_phone_count)\n\n if self.decode_hp.return_beams:\n beam_correct_found = False\n for beam_decode in decodes[index]:\n if beam_decode in self.g2p_gt_map[word]:\n beam_correct_found = True\n break\n if beam_correct_found:\n word_correct += 1\n else:\n word_errors += 1\n # Estimate phone-level errors\n phone_error = phone_errors_for_single_word(decodes[index], \n self.g2p_gt_map[word])\n phone_errors += phone_error\n \n else:\n if decodes[index] in self.g2p_gt_map[word]:\n word_correct += 1\n else:\n word_errors += 1\n # Estimate phone-level errors\n phone_error = phone_errors_for_single_word([decodes[index]], \n self.g2p_gt_map[word])\n phone_errors += phone_error\n\n return word_correct, word_errors, phone_errors, total_ref_phones\n\n\ndef phone_errors_for_single_word(predicted_strs, ref_strs):\n \"\"\"\n Given decoded results (depending on beam size) and a list of ref \n pronunciations, estimate the phone-level edit distance. Return the min\n distance.\n \"\"\"\n phone_error_list = []\n for ref_str in ref_strs:\n for predicted_str in predicted_strs:\n d = phone_edit_distance(predicted_str, ref_str)\n phone_error_list.append(d)\n return min(phone_error_list)\n\n\ndef phone_edit_distance(predicted_str, ref_str):\n \"\"\"\n Estimate the edit distance between predicted and ref phone sequences.\n \"\"\"\n predicted_list = predicted_str.split(\" \")\n ref_list = ref_str.split(\" \")\n m, n = len(predicted_list), len(ref_list)\n dp = [[0] * (m+1) for _ in range(n+1)]\n dp[0][0] = 0\n \n for i in range(1, m+1):\n dp[0][i] = i\n \n for i in range(1, n+1):\n dp[i][0] = i\n \n for i in range(1, m+1):\n for j in range(1, n+1):\n if predicted_list[i-1] == ref_list[j-1]:\n dp[j][i] = dp[j-1][i-1]\n else:\n dp[j][i] = min(dp[j-1][i] + 1, dp[j][i-1] + 1, dp[j-1][i-1] + 1)\n \n return dp[n][m]\n\n\ndef get_word():\n \"\"\"Get next word in the interactive mode.\"\"\"\n word = \"\"\n try:\n word = input(\"> \")\n #if not issubclass(type(word), text_type):\n # word = text_type(word, encoding=\"utf-8\", errors=\"replace\")\n except EOFError:\n pass\n if not word:\n pass\n return word\n\n\ndef create_g2p_gt_map(words, pronunciations):\n \"\"\"Create grapheme-to-phoneme ground true mapping.\"\"\"\n g2p_gt_map = {}\n for word, pronunciation in zip(words, pronunciations):\n if word in g2p_gt_map:\n g2p_gt_map[word].append(pronunciation)\n else:\n g2p_gt_map[word] = [pronunciation]\n return g2p_gt_map\n\n\ndef _get_inputs(filename, delimiters=\"\\t \"):\n \"\"\"Returning inputs.\n\n Args:\n filename: path to file with inputs, 1 per line.\n delimiters: str, delimits records in the file.\n\n Returns:\n a list of inputs\n\n \"\"\"\n tf.logging.info(\"Getting inputs\")\n delimiters_regex = re.compile(\"[\" + delimiters + \"]+\")\n\n inputs = []\n with tf.gfile.Open(filename) as input_file:\n lines = input_file.readlines()\n for line in lines:\n if set(\"[\" + delimiters + \"]+$\").intersection(line):\n items = re.split(delimiters_regex, line.strip(), maxsplit=1)\n inputs.append(items[0])\n else:\n inputs.append(line.strip())\n return inputs\n\n\ndef _decode_batch_input_fn(num_decode_batches, inputs,\n vocabulary, batch_size, max_input_size):\n \"\"\"Decode batch\"\"\"\n for batch_idx in range(num_decode_batches):\n tf.logging.info(\"Decoding batch %d out of %d\" % (batch_idx, num_decode_batches))\n batch_length = 0\n batch_inputs = []\n for _inputs in inputs[batch_idx * batch_size:(batch_idx + 1) * batch_size]:\n input_ids = vocabulary.encode(_inputs)\n if max_input_size > 0:\n # Subtract 1 for the EOS_ID.\n input_ids = input_ids[:max_input_size - 1]\n input_ids.append(text_encoder.EOS_ID)\n batch_inputs.append(input_ids)\n if len(input_ids) > batch_length:\n batch_length = len(input_ids)\n final_batch_inputs = []\n for input_ids in batch_inputs:\n assert len(input_ids) <= batch_length\n encoded_input = input_ids + [0] * (batch_length - len(input_ids))\n final_batch_inputs.append(encoded_input)\n\n yield {\n \"inputs\": np.array(final_batch_inputs).astype(np.int32),\n \"problem_choice\": np.array(0).astype(np.int32),\n }\n\n\ndef execute_schedule(exp, params):\n if not hasattr(exp, params.schedule):\n raise ValueError(\n \"Experiment has no method %s, from --schedule\" % params.schedule)\n with profile_context(params):\n getattr(exp, params.schedule)()\n\n\[email protected]\ndef profile_context(params):\n if params.profile:\n with tf.contrib.tfprof.ProfileContext(\"t2tprof\",\n trace_steps=range(100),\n dump_steps=range(100)) as pctx:\n opts = tf.profiler.ProfileOptionBuilder.time_and_memory()\n pctx.add_auto_profiling(\"op\", opts, range(100))\n yield\n else:\n yield\n" ]
[ [ "numpy.split", "tensorflow.gfile.GFile", "tensorflow.python.util.compat.as_text", "tensorflow.python.estimator.estimator.ops.Graph", "tensorflow.get_default_graph", "tensorflow.py_func", "tensorflow.python.estimator.estimator._check_hooks_type", "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.profiler.ProfileOptionBuilder.time_and_memory", "tensorflow.train.import_meta_graph", "tensorflow.python.estimator.estimator.saver.latest_checkpoint", "tensorflow.Session", "tensorflow.gfile.Open", "tensorflow.placeholder", "tensorflow.python.estimator.estimator.random_seed.set_random_seed", "tensorflow.logging.info", "numpy.array", "tensorflow.train.get_checkpoint_state", "tensorflow.train.latest_checkpoint", "tensorflow.python.estimator.estimator.training.ChiefSessionCreator", "tensorflow.python.framework.graph_util.convert_variables_to_constants", "tensorflow.GraphDef" ] ]
sebastiengilbert73/synthetic_heatmap
[ "8a1f21f4eaf5a56374b77a4238be97a7005cdb58" ]
[ "src/synthetic_heatmap/generators/stop_sign.py" ]
[ "from synthetic_heatmap.generator import Generator, RegularPolygonVertices, WarpAffinePoints, DownloadRandomImage\nimport cv2\nimport numpy as np\nimport random\nimport math\nimport urllib.request\nimport os\n\nclass StopSign(Generator):\n def __init__(self, octogon_diameter_range=(1.3, 1.3),\n font_scale_range=(0.5, 1.0),\n graylevel_range=(80, 200),\n noise_amplitude=10):\n #number_of_lines_range=(15, 30),\n #number_of_arcs_range=(15, 30)):\n\n parameters_dict = {}\n parameters_dict['octogon_diameter_range'] = octogon_diameter_range\n parameters_dict['font_scale_range'] = font_scale_range\n parameters_dict['graylevel_range'] = graylevel_range\n parameters_dict['noise_amplitude'] = noise_amplitude\n #parameters_dict['number_of_lines_range'] = number_of_lines_range\n #parameters_dict['number_of_arcs_range'] = number_of_arcs_range\n\n super().__init__(parameters_dict)\n self.font_types = [cv2.FONT_HERSHEY_SIMPLEX, cv2.FONT_HERSHEY_PLAIN, cv2.FONT_HERSHEY_DUPLEX\n ]\n #self.background_source_url = \"https://picsum.photos\"\n\n\n def Generate(self, image_sizeHW, maximum_number_of_trials=1000, debug_directory=None,\n apply_laplacian=False, background_image=None, word='STOP'):\n heatmap = np.zeros(image_sizeHW, dtype=np.uint8)\n dilation_kernel = np.ones((3, 3), dtype=np.uint8)\n\n input_image = background_image\n result_msg = None\n if background_image is None:\n color_img, result_msg = DownloadRandomImage(image_sizeHW=image_sizeHW)\n input_image = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)\n if input_image.shape != image_sizeHW:\n input_image = cv2.resize(input_image, image_sizeHW)\n\n # Write a random 'STOP'\n random_stop_img = np.zeros(image_sizeHW, dtype=np.uint8)\n random_stop_center = (round(image_sizeHW[1] * random.random()), round(image_sizeHW[0] * random.random()))\n font_type = random.choice(self.font_types)\n font_scale = self.RandomValueInRange('font_scale_range')\n text_gray_level = self.RandomValueInRange('graylevel_range')\n text_origin = (round(random_stop_center[0]), round(random_stop_center[1]))\n cv2.putText(random_stop_img, word, text_origin, font_type, font_scale, text_gray_level)\n random_stop_img = cv2.dilate(random_stop_img, dilation_kernel)\n\n input_image = cv2.max(input_image, random_stop_img)\n\n # Draw a random octagon\n random_octagon_diameter = self.RandomValueInRange('octogon_diameter_range') * 0.2 * min(image_sizeHW[0], image_sizeHW[1])\n random_octagon_center = (round(image_sizeHW[1] * random.random()), round(image_sizeHW[0] * random.random()))\n random_octagon_vertices_arr = RegularPolygonVertices(random_octagon_center, random_octagon_diameter, 8)\n random_octagon_vertices_arr = random_octagon_vertices_arr.astype(int)\n random_octagon_octagon_graylevel = self.RandomValueInRange('graylevel_range')\n #cv2.polylines(input_image, random_octagon_vertices_arr, True, random_octagon_octagon_graylevel)\n cv2.fillPoly(input_image, random_octagon_vertices_arr, random_octagon_octagon_graylevel)\n # Reflections\n if random.random() > 0.5:\n input_image = cv2.flip(input_image, 0)\n if random.random() > 0.5:\n input_image = cv2.flip(input_image, 1)\n\n # Compute the histogram\n \"\"\"histogram = cv2.calcHist([input_image], [0], None, [32], [0, 256])\n histogram = [h[0] for h in histogram]\n #print (\"Generate(): histogram = {}; len(histogram) = {}\".format(histogram, len(histogram)))\n peak_gray_level = 4 + 8 * HistogramPeak(histogram)\n \"\"\"\n\n # Write 'STOP'\n text_is_within_limits = False\n trialNdx = 1\n text_bounding_boxYXHW = None\n text_gray_level = np.mean(input_image) + 25 + round(25 * random.random())\n if text_gray_level > 255:\n text_gray_level = 255\n stop_sign_img = np.zeros(image_sizeHW, dtype=np.uint8)\n if len(word) > 0:\n while not text_is_within_limits and trialNdx <= maximum_number_of_trials:\n center = (round(image_sizeHW[1] * random.random()), round(image_sizeHW[0] * random.random()))\n font_type = random.choice(self.font_types)\n font_scale = self.RandomValueInRange('font_scale_range')\n #text_gray_level = self.RandomValueInRange('graylevel_range')\n text_origin = (round(center[0]), round(center[1]))\n cv2.putText(stop_sign_img, word, text_origin, font_type, font_scale, text_gray_level)\n #input_image = cv2.dilate(input_image, dilation_kernel)\n\n # Stretch the text vertically by 1.5 x\n H = image_sizeHW[0]\n alpha = 1.5\n affine_transformation_mtx = np.zeros((2, 3), dtype=float)\n affine_transformation_mtx[0, 0] = 1.0\n affine_transformation_mtx[1, 1] = alpha\n affine_transformation_mtx[1, 2] = H / 2 * (1 - alpha)\n stop_sign_img = cv2.warpAffine(stop_sign_img, affine_transformation_mtx, image_sizeHW)\n # Dilate\n stop_sign_img = cv2.dilate(stop_sign_img, np.ones((3, 3), dtype=np.uint8))\n # Find the bounding box around the text\n non_zero_points = np.transpose(np.nonzero(stop_sign_img))\n text_bounding_boxYXHW = cv2.boundingRect(np.array(non_zero_points))\n if text_bounding_boxYXHW[0] + text_bounding_boxYXHW[2] < image_sizeHW[0] - 8 and \\\n text_bounding_boxYXHW[1] + text_bounding_boxYXHW[3] < image_sizeHW[1] - 8:\n text_is_within_limits = True\n else: # The text touches the limit\n text_is_within_limits = False\n stop_sign_img = np.zeros(image_sizeHW, dtype=np.uint8)\n\n trialNdx += 1\n else: # word is \"\"\n text_bounding_boxYXHW = [stop_sign_img.shape[0]//2, stop_sign_img.shape[1]//2, stop_sign_img.shape[0]//6, stop_sign_img.shape[1]//6]\n\n diameter = self.RandomValueInRange('octogon_diameter_range') * math.sqrt(\n text_bounding_boxYXHW[3] ** 2 + text_bounding_boxYXHW[2] ** 2)\n octagon_center = (text_bounding_boxYXHW[1] + text_bounding_boxYXHW[3] / 2,\n text_bounding_boxYXHW[0] + text_bounding_boxYXHW[2] / 2)\n vertices_arr = RegularPolygonVertices(octagon_center, diameter, 8)\n vertices_arr = vertices_arr.astype(int)\n inner_vertices_arr = RegularPolygonVertices(octagon_center, 0.85 * diameter, 8)\n #inner_vertices_arr = inner_vertices_arr.astype(int)\n octagon_graylevel = text_gray_level\n stop_text_img = stop_sign_img.copy()\n cv2.fillPoly(stop_sign_img, vertices_arr, octagon_graylevel//2)\n cv2.polylines(stop_sign_img, vertices_arr, True, octagon_graylevel, thickness=3)\n # Re-write the text\n stop_sign_img = cv2.max(stop_sign_img, stop_text_img)\n\n # Add noise\n stop_sign_img += (self.parameters_dict['noise_amplitude'] * np.random.random(stop_sign_img.shape)).astype(np.uint8)\n\n # Blur the stop sign, such that its edges are not too sharp\n if random.random() > 0.5:\n stop_sign_img = cv2.blur(stop_sign_img, (3, 3))\n\n # Erase the input image in the octagon, such that the stop sign is on the foreground\n cv2.fillPoly(input_image, vertices_arr, 0)\n\n\n input_image = cv2.max(input_image, stop_sign_img)\n\n\n # Affine transformation\n linear_transformation_mtx = np.random.uniform(0.6, 1.5, (2, 2))\n linear_transformation_mtx[0, 1] = -0.3 + 0.6 * random.random()\n linear_transformation_mtx[1, 0] = -0.3 + 0.6 * random.random()\n translation = [image_sizeHW[1]/2 - (linear_transformation_mtx @ np.array([image_sizeHW[1]/2, image_sizeHW[0]/2]))[0],\n image_sizeHW[0] / 2 - (linear_transformation_mtx @ np.array(\n [image_sizeHW[1] / 2, image_sizeHW[0] / 2] ))[1] ]\n affine_transformation_mtx = np.zeros((2, 3))\n affine_transformation_mtx[0: 2, 0: 2] = linear_transformation_mtx\n affine_transformation_mtx[0, 2] = translation[0]\n affine_transformation_mtx[1, 2] = translation[1]\n input_image = cv2.warpAffine(input_image, affine_transformation_mtx, input_image.shape)\n\n affine_transformed_vertices = WarpAffinePoints(vertices_arr, affine_transformation_mtx)\n cv2.fillPoly(heatmap, affine_transformed_vertices, 255)\n # Check if the heatmap is non-empty\n number_of_non_zero_pixels = cv2.countNonZero(heatmap)\n if diameter > 0:\n circle_filling_ratio = number_of_non_zero_pixels / (math.pi * (diameter/2)**2)\n else:\n circle_filling_ratio = 0\n #print (\"number_of_non_zero_pixels = {}; circle_filling_ratio = {}\".format(number_of_non_zero_pixels, circle_filling_ratio))\n if circle_filling_ratio < 0.5:\n # The stop sign is not visible enough. Start again\n return self.Generate(image_sizeHW, maximum_number_of_trials, debug_directory,\n apply_laplacian, background_image, word)\n\n if debug_directory is not None:\n input_image_before_laplacian_filepath = os.path.join(debug_directory, \"stopSign_Generate_beforeLaplacian.png\")\n cv2.imwrite(input_image_before_laplacian_filepath, input_image)\n if apply_laplacian:\n # Compute the edges of the input image\n input_image = cv2.Laplacian(input_image, ddepth=cv2.CV_8U)\n\n return (input_image, heatmap, result_msg)\n\ndef HistogramPeak(histogram):\n peak_value = 0\n peakNdx = None\n for index in range(len(histogram)):\n if histogram[index] > peak_value:\n peak_value = histogram[index]\n peakNdx = index\n return peakNdx\n" ]
[ [ "numpy.random.random", "numpy.nonzero", "numpy.ones", "numpy.mean", "numpy.random.uniform", "numpy.array", "numpy.zeros" ] ]
giaba90/python-thesis
[ "8a6d951fc3a1e58b510b7f3f9d1df6ef109711e3" ]
[ "algoritmo2.py" ]
[ "from itertools import combinations\n\nimport numpy as np\nimport utility\n\ndef sol2(vet1, indice, vet_in):\n out = []\n while indice >= 1:\n # converto in lista la combinations\n vet2 = list(combinations(vet1, indice))\n for riga in vet2:\n # trasformo il vettore in input in un array\n tmp = np.array(riga)\n # somma vet sulla stessa riga e salvo il risultato in temp\n tmp = tmp.sum(axis=0)\n if (all(x < 2 for x in tmp)):\n # converto da binario ad interno\n out.append(utility.bin_to_int2(vet_in, vet1, riga, len(vet2[0])))\n if (not out):\n indice = indice - 1\n else:\n break\n return out" ]
[ [ "numpy.array" ] ]
Jingqiao-Zhao/DCASE2020-Task1-SubtaskB
[ "b9474ad68751a7201323364de34bd9630f76f74c" ]
[ "utilities/sparse_image_warp_pytorch.py" ]
[ "# Copyright 2019 RnD at Spoon Radio\r\n\r\n#\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n\r\n# you may not use this file except in compliance with the License.\r\n\r\n# You may obtain a copy of the License at\r\n\r\n#\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n#\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n\r\n# See the License for the specific language governing permissions and\r\n\r\n# limitations under the License.\r\n\r\n# ==============================================================================\r\n\r\n# import torch\r\n\r\n# import numpy as np\r\n\r\n# from torch.autograd import Variable\r\n\r\n# import librosa\r\n\r\nimport random\r\n\r\nimport numpy as np\r\n\r\n# import scipy.signal\r\n\r\nimport torch\r\n\r\n# import torchaudio\r\n\r\n# from torchaudio import transforms\r\n\r\n# import math\r\n\r\n# from torch.utils.data import DataLoader\r\n\r\n# from torch.utils.data import Dataset\r\n\r\n\r\n\r\n\r\n\r\ndef time_warp(spec, W=5):\r\n\r\n spec = spec.view(1, spec.shape[0], spec.shape[1])\r\n\r\n num_rows = spec.shape[1]\r\n\r\n spec_len = spec.shape[2]\r\n\r\n\r\n\r\n y = num_rows // 2\r\n\r\n horizontal_line_at_ctr = spec[0][y]\r\n\r\n assert len(horizontal_line_at_ctr) == spec_len\r\n\r\n\r\n\r\n point_to_warp = horizontal_line_at_ctr[random.randrange(W, spec_len - W)]\r\n\r\n assert isinstance(point_to_warp, torch.Tensor)\r\n\r\n\r\n\r\n # Uniform distribution from (0,W) with chance to be up to W negative\r\n\r\n dist_to_warp = random.randrange(-W, W)\r\n\r\n src_pts, dest_pts = torch.tensor([[[y, point_to_warp]]]), torch.tensor([[[y, point_to_warp + dist_to_warp]]])\r\n\r\n warped_spectro, dense_flows = SparseImageWarp.sparse_image_warp(spec, src_pts, dest_pts)\r\n\r\n return warped_spectro.squeeze(3)\r\n\r\n\r\n\r\n\r\n\r\ndef freq_mask(spec, F=15, num_masks=1, replace_with_zero=False):\r\n\r\n cloned = spec.clone()\r\n\r\n num_mel_channels = cloned.shape[1]\r\n\r\n\r\n\r\n for i in range(0, num_masks):\r\n\r\n f = random.randrange(0, F)\r\n\r\n f_zero = random.randrange(0, num_mel_channels - f)\r\n\r\n\r\n\r\n # avoids randrange error if values are equal and range is empty\r\n\r\n if (f_zero == f_zero + f): return cloned\r\n\r\n\r\n\r\n mask_end = random.randrange(f_zero, f_zero + f)\r\n\r\n if (replace_with_zero):\r\n\r\n cloned[0][f_zero:mask_end] = 0\r\n\r\n else:\r\n\r\n cloned[0][f_zero:mask_end] = cloned.mean()\r\n\r\n\r\n\r\n return cloned\r\n\r\n\r\n\r\n\r\n\r\ndef time_mask(spec, T=15, num_masks=1, replace_with_zero=False):\r\n\r\n cloned = spec.clone()\r\n\r\n len_spectro = cloned.shape[2]\r\n\r\n\r\n\r\n for i in range(0, num_masks):\r\n\r\n t = random.randrange(0, T)\r\n\r\n t_zero = random.randrange(0, len_spectro - t)\r\n\r\n\r\n\r\n # avoids randrange error if values are equal and range is empty\r\n\r\n if (t_zero == t_zero + t): return cloned\r\n\r\n\r\n\r\n mask_end = random.randrange(t_zero, t_zero + t)\r\n\r\n if (replace_with_zero):\r\n\r\n cloned[0][:, t_zero:mask_end] = 0\r\n\r\n else:\r\n\r\n cloned[0][:, t_zero:mask_end] = cloned.mean()\r\n\r\n return cloned\r\n\r\n\r\n\r\n\r\n\r\ndef sparse_image_warp(img_tensor,\r\n\r\n source_control_point_locations,\r\n\r\n dest_control_point_locations,\r\n\r\n interpolation_order=2,\r\n\r\n regularization_weight=0.0,\r\n\r\n num_boundaries_points=0):\r\n\r\n control_point_flows = (dest_control_point_locations - source_control_point_locations)\r\n\r\n\r\n\r\n batch_size, image_height, image_width = img_tensor.shape\r\n\r\n grid_locations = get_grid_locations(image_height, image_width)\r\n\r\n flattened_grid_locations = torch.tensor(flatten_grid_locations(grid_locations, image_height, image_width))\r\n\r\n\r\n\r\n flattened_flows = interpolate_spline(\r\n\r\n dest_control_point_locations,\r\n\r\n control_point_flows,\r\n\r\n flattened_grid_locations,\r\n\r\n interpolation_order,\r\n\r\n regularization_weight)\r\n\r\n\r\n\r\n dense_flows = create_dense_flows(flattened_flows, batch_size, image_height, image_width)\r\n\r\n\r\n\r\n warped_image = dense_image_warp(img_tensor, dense_flows)\r\n\r\n\r\n\r\n return warped_image, dense_flows\r\n\r\n\r\n\r\n\r\n\r\ndef get_grid_locations(image_height, image_width):\r\n\r\n \"\"\"Wrapper for np.meshgrid.\"\"\"\r\n\r\n\r\n\r\n y_range = np.linspace(0, image_height - 1, image_height)\r\n\r\n x_range = np.linspace(0, image_width - 1, image_width)\r\n\r\n y_grid, x_grid = np.meshgrid(y_range, x_range, indexing='ij')\r\n\r\n return np.stack((y_grid, x_grid), -1)\r\n\r\n\r\n\r\n\r\n\r\ndef flatten_grid_locations(grid_locations, image_height, image_width):\r\n\r\n return np.reshape(grid_locations, [image_height * image_width, 2])\r\n\r\n\r\n\r\n\r\n\r\ndef create_dense_flows(flattened_flows, batch_size, image_height, image_width):\r\n\r\n # possibly .view\r\n\r\n return torch.reshape(flattened_flows, [batch_size, image_height, image_width, 2])\r\n\r\n\r\n\r\n\r\n\r\ndef interpolate_spline(train_points, train_values, query_points, order, regularization_weight=0.0, ):\r\n\r\n # First, fit the spline to the observed data.\r\n\r\n w, v = solve_interpolation(train_points, train_values, order, regularization_weight)\r\n\r\n # Then, evaluate the spline at the query locations.\r\n\r\n query_values = apply_interpolation(query_points, train_points, w, v, order)\r\n\r\n\r\n\r\n return query_values\r\n\r\n\r\n\r\n\r\n\r\ndef solve_interpolation(train_points, train_values, order, regularization_weight):\r\n\r\n b, n, d = train_points.shape\r\n\r\n k = train_values.shape[-1]\r\n\r\n\r\n\r\n # First, rename variables so that the notation (c, f, w, v, A, B, etc.)\r\n\r\n # follows https://en.wikipedia.org/wiki/Polyharmonic_spline.\r\n\r\n # To account for python style guidelines we use\r\n\r\n # matrix_a for A and matrix_b for B.\r\n\r\n\r\n\r\n c = train_points\r\n\r\n f = train_values.float()\r\n\r\n\r\n\r\n matrix_a = phi(cross_squared_distance_matrix(c, c), order).unsqueeze(0) # [b, n, n]\r\n\r\n # if regularization_weight > 0:\r\n\r\n # batch_identity_matrix = array_ops.expand_dims(\r\n\r\n # linalg_ops.eye(n, dtype=c.dtype), 0)\r\n\r\n # matrix_a += regularization_weight * batch_identity_matrix\r\n\r\n\r\n\r\n # Append ones to the feature values for the bias term in the linear model.\r\n\r\n ones = torch.ones(1, dtype=train_points.dtype).view([-1, 1, 1])\r\n\r\n matrix_b = torch.cat((c, ones), 2).float() # [b, n, d + 1]\r\n\r\n\r\n\r\n # [b, n + d + 1, n]\r\n\r\n left_block = torch.cat((matrix_a, torch.transpose(matrix_b, 2, 1)), 1)\r\n\r\n\r\n\r\n num_b_cols = matrix_b.shape[2] # d + 1\r\n\r\n\r\n\r\n # In Tensorflow, zeros are used here. Pytorch gesv fails with zeros for some reason we don't understand.\r\n\r\n # So instead we use very tiny randn values (variance of one, zero mean) on one side of our multiplication.\r\n\r\n lhs_zeros = torch.randn((b, num_b_cols, num_b_cols)) / 1e10\r\n\r\n right_block = torch.cat((matrix_b, lhs_zeros),\r\n\r\n 1) # [b, n + d + 1, d + 1]\r\n\r\n lhs = torch.cat((left_block, right_block),\r\n\r\n 2) # [b, n + d + 1, n + d + 1]\r\n\r\n\r\n\r\n rhs_zeros = torch.zeros((b, d + 1, k), dtype=train_points.dtype).float()\r\n\r\n rhs = torch.cat((f, rhs_zeros), 1) # [b, n + d + 1, k]\r\n\r\n\r\n\r\n # Then, solve the linear system and unpack the results.\r\n\r\n X, LU = torch.solve(rhs, lhs)\r\n\r\n w = X[:, :n, :]\r\n\r\n v = X[:, n:, :]\r\n\r\n\r\n\r\n return w, v\r\n\r\n\r\n\r\n\r\n\r\ndef cross_squared_distance_matrix(x, y):\r\n\r\n \"\"\"Pairwise squared distance between two (batch) matrices' rows (2nd dim).\r\n\r\n Computes the pairwise distances between rows of x and rows of y\r\n\r\n Args:\r\n\r\n x: [batch_size, n, d] float `Tensor`\r\n\r\n y: [batch_size, m, d] float `Tensor`\r\n\r\n Returns:\r\n\r\n squared_dists: [batch_size, n, m] float `Tensor`, where\r\n\r\n squared_dists[b,i,j] = ||x[b,i,:] - y[b,j,:]||^2\r\n\r\n \"\"\"\r\n\r\n x_norm_squared = torch.sum(torch.mul(x, x))\r\n\r\n y_norm_squared = torch.sum(torch.mul(y, y))\r\n\r\n\r\n\r\n x_y_transpose = torch.matmul(x.squeeze(0), y.squeeze(0).transpose(0, 1))\r\n\r\n\r\n\r\n # squared_dists[b,i,j] = ||x_bi - y_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj\r\n\r\n squared_dists = x_norm_squared - 2 * x_y_transpose + y_norm_squared\r\n\r\n\r\n\r\n return squared_dists.float()\r\n\r\n\r\n\r\n\r\n\r\ndef phi(r, order):\r\n\r\n \"\"\"Coordinate-wise nonlinearity used to define the order of the interpolation.\r\n\r\n See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.\r\n\r\n Args:\r\n\r\n r: input op\r\n\r\n order: interpolation order\r\n\r\n Returns:\r\n\r\n phi_k evaluated coordinate-wise on r, for k = r\r\n\r\n \"\"\"\r\n\r\n EPSILON = torch.tensor(1e-10)\r\n\r\n # using EPSILON prevents log(0), sqrt0), etc.\r\n\r\n # sqrt(0) is well-defined, but its gradient is not\r\n\r\n if order == 1:\r\n\r\n r = torch.max(r, EPSILON)\r\n\r\n r = torch.sqrt(r)\r\n\r\n return r\r\n\r\n elif order == 2:\r\n\r\n return 0.5 * r * torch.log(torch.max(r, EPSILON))\r\n\r\n elif order == 4:\r\n\r\n return 0.5 * torch.square(r) * torch.log(torch.max(r, EPSILON))\r\n\r\n elif order % 2 == 0:\r\n\r\n r = torch.max(r, EPSILON)\r\n\r\n return 0.5 * torch.pow(r, 0.5 * order) * torch.log(r)\r\n\r\n else:\r\n\r\n r = torch.max(r, EPSILON)\r\n\r\n return torch.pow(r, 0.5 * order)\r\n\r\n\r\n\r\n\r\n\r\ndef apply_interpolation(query_points, train_points, w, v, order):\r\n\r\n \"\"\"Apply polyharmonic interpolation model to data.\r\n\r\n Given coefficients w and v for the interpolation model, we evaluate\r\n\r\n interpolated function values at query_points.\r\n\r\n Args:\r\n\r\n query_points: `[b, m, d]` x values to evaluate the interpolation at\r\n\r\n train_points: `[b, n, d]` x values that act as the interpolation centers\r\n\r\n ( the c variables in the wikipedia article)\r\n\r\n w: `[b, n, k]` weights on each interpolation center\r\n\r\n v: `[b, d, k]` weights on each input dimension\r\n\r\n order: order of the interpolation\r\n\r\n Returns:\r\n\r\n Polyharmonic interpolation evaluated at points defined in query_points.\r\n\r\n \"\"\"\r\n\r\n query_points = query_points.unsqueeze(0)\r\n\r\n # First, compute the contribution from the rbf term.\r\n\r\n pairwise_dists = cross_squared_distance_matrix(query_points.float(), train_points.float())\r\n\r\n phi_pairwise_dists = phi(pairwise_dists, order)\r\n\r\n\r\n\r\n rbf_term = torch.matmul(phi_pairwise_dists, w)\r\n\r\n\r\n\r\n # Then, compute the contribution from the linear term.\r\n\r\n # Pad query_points with ones, for the bias term in the linear model.\r\n\r\n ones = torch.ones_like(query_points[..., :1])\r\n\r\n query_points_pad = torch.cat((\r\n\r\n query_points,\r\n\r\n ones\r\n\r\n ), 2).float()\r\n\r\n linear_term = torch.matmul(query_points_pad, v)\r\n\r\n\r\n\r\n return rbf_term + linear_term\r\n\r\n\r\n\r\n\r\n\r\ndef dense_image_warp(image, flow):\r\n\r\n \"\"\"Image warping using per-pixel flow vectors.\r\n\r\n Apply a non-linear warp to the image, where the warp is specified by a dense\r\n\r\n flow field of offset vectors that define the correspondences of pixel values\r\n\r\n in the output image back to locations in the source image. Specifically, the\r\n\r\n pixel value at output[b, j, i, c] is\r\n\r\n images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c].\r\n\r\n The locations specified by this formula do not necessarily map to an int\r\n\r\n index. Therefore, the pixel value is obtained by bilinear\r\n\r\n interpolation of the 4 nearest pixels around\r\n\r\n (b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside\r\n\r\n of the image, we use the nearest pixel values at the image boundary.\r\n\r\n Args:\r\n\r\n image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.\r\n\r\n flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.\r\n\r\n name: A name for the operation (optional).\r\n\r\n Note that image and flow can be of type tf.half, tf.float32, or tf.float64,\r\n\r\n and do not necessarily have to be the same type.\r\n\r\n Returns:\r\n\r\n A 4-D float `Tensor` with shape`[batch, height, width, channels]`\r\n\r\n and same type as input image.\r\n\r\n Raises:\r\n\r\n ValueError: if height < 2 or width < 2 or the inputs have the wrong number\r\n\r\n of dimensions.\r\n\r\n \"\"\"\r\n\r\n image = image.unsqueeze(3) # add a single channel dimension to image tensor\r\n\r\n batch_size, height, width, channels = image.shape\r\n\r\n\r\n\r\n # The flow is defined on the image grid. Turn the flow into a list of query\r\n\r\n # points in the grid space.\r\n\r\n grid_x, grid_y = torch.meshgrid(\r\n\r\n torch.arange(width), torch.arange(height))\r\n\r\n\r\n\r\n stacked_grid = torch.stack((grid_y, grid_x), dim=2).float()\r\n\r\n\r\n\r\n batched_grid = stacked_grid.unsqueeze(-1).permute(3, 1, 0, 2)\r\n\r\n\r\n\r\n query_points_on_grid = batched_grid - flow\r\n\r\n query_points_flattened = torch.reshape(query_points_on_grid,\r\n\r\n [batch_size, height * width, 2])\r\n\r\n # Compute values at the query points, then reshape the result back to the\r\n\r\n # image grid.\r\n\r\n interpolated = interpolate_bilinear(image, query_points_flattened)\r\n\r\n interpolated = torch.reshape(interpolated,\r\n\r\n [batch_size, height, width, channels])\r\n\r\n return interpolated\r\n\r\n\r\n\r\n\r\n\r\ndef interpolate_bilinear(grid,\r\n\r\n query_points,\r\n\r\n name='interpolate_bilinear',\r\n\r\n indexing='ij'):\r\n\r\n \"\"\"Similar to Matlab's interp2 function.\r\n\r\n Finds values for query points on a grid using bilinear interpolation.\r\n\r\n Args:\r\n\r\n grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.\r\n\r\n query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.\r\n\r\n name: a name for the operation (optional).\r\n\r\n indexing: whether the query points are specified as row and column (ij),\r\n\r\n or Cartesian coordinates (xy).\r\n\r\n Returns:\r\n\r\n values: a 3-D `Tensor` with shape `[batch, N, channels]`\r\n\r\n Raises:\r\n\r\n ValueError: if the indexing mode is invalid, or if the shape of the inputs\r\n\r\n invalid.\r\n\r\n \"\"\"\r\n\r\n if indexing != 'ij' and indexing != 'xy':\r\n\r\n raise ValueError('Indexing mode must be \\'ij\\' or \\'xy\\'')\r\n\r\n\r\n\r\n shape = grid.shape\r\n\r\n if len(shape) != 4:\r\n\r\n msg = 'Grid must be 4 dimensional. Received size: '\r\n\r\n raise ValueError(msg + str(grid.shape))\r\n\r\n\r\n\r\n batch_size, height, width, channels = grid.shape\r\n\r\n\r\n\r\n shape = [batch_size, height, width, channels]\r\n\r\n query_type = query_points.dtype\r\n\r\n grid_type = grid.dtype\r\n\r\n\r\n\r\n num_queries = query_points.shape[1]\r\n\r\n\r\n\r\n alphas = []\r\n\r\n floors = []\r\n\r\n ceils = []\r\n\r\n index_order = [0, 1] if indexing == 'ij' else [1, 0]\r\n\r\n unstacked_query_points = query_points.unbind(2)\r\n\r\n\r\n\r\n for dim in index_order:\r\n\r\n queries = unstacked_query_points[dim]\r\n\r\n\r\n\r\n size_in_indexing_dimension = shape[dim + 1]\r\n\r\n\r\n\r\n # max_floor is size_in_indexing_dimension - 2 so that max_floor + 1\r\n\r\n # is still a valid index into the grid.\r\n\r\n max_floor = torch.tensor(size_in_indexing_dimension - 2, dtype=query_type)\r\n\r\n min_floor = torch.tensor(0.0, dtype=query_type)\r\n\r\n maxx = torch.max(min_floor, torch.floor(queries))\r\n\r\n floor = torch.min(maxx, max_floor)\r\n\r\n int_floor = floor.long()\r\n\r\n floors.append(int_floor)\r\n\r\n ceil = int_floor + 1\r\n\r\n ceils.append(ceil)\r\n\r\n\r\n\r\n # alpha has the same type as the grid, as we will directly use alpha\r\n\r\n # when taking linear combinations of pixel values from the image.\r\n\r\n alpha = torch.tensor(queries - floor, dtype=grid_type)\r\n\r\n min_alpha = torch.tensor(0.0, dtype=grid_type)\r\n\r\n max_alpha = torch.tensor(1.0, dtype=grid_type)\r\n\r\n alpha = torch.min(torch.max(min_alpha, alpha), max_alpha)\r\n\r\n\r\n\r\n # Expand alpha to [b, n, 1] so we can use broadcasting\r\n\r\n # (since the alpha values don't depend on the channel).\r\n\r\n alpha = torch.unsqueeze(alpha, 2)\r\n\r\n alphas.append(alpha)\r\n\r\n\r\n\r\n flattened_grid = torch.reshape(\r\n\r\n grid, [batch_size * height * width, channels])\r\n\r\n batch_offsets = torch.reshape(\r\n\r\n torch.arange(batch_size) * height * width, [batch_size, 1])\r\n\r\n\r\n\r\n # This wraps array_ops.gather. We reshape the image data such that the\r\n\r\n # batch, y, and x coordinates are pulled into the first dimension.\r\n\r\n # Then we gather. Finally, we reshape the output back. It's possible this\r\n\r\n # code would be made simpler by using array_ops.gather_nd.\r\n\r\n def gather(y_coords, x_coords, name):\r\n\r\n linear_coordinates = batch_offsets + y_coords * width + x_coords\r\n\r\n gathered_values = torch.gather(flattened_grid.t(), 1, linear_coordinates)\r\n\r\n return torch.reshape(gathered_values,\r\n\r\n [batch_size, num_queries, channels])\r\n\r\n\r\n\r\n # grab the pixel values in the 4 corners around each query point\r\n\r\n top_left = gather(floors[0], floors[1], 'top_left')\r\n\r\n top_right = gather(floors[0], ceils[1], 'top_right')\r\n\r\n bottom_left = gather(ceils[0], floors[1], 'bottom_left')\r\n\r\n bottom_right = gather(ceils[0], ceils[1], 'bottom_right')\r\n\r\n\r\n\r\n interp_top = alphas[1] * (top_right - top_left) + top_left\r\n\r\n interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left\r\n\r\n interp = alphas[0] * (interp_bottom - interp_top) + interp_top\r\n\r\n\r\n\r\n return interp" ]
[ [ "torch.transpose", "torch.max", "numpy.linspace", "torch.cat", "torch.zeros", "torch.pow", "torch.ones", "numpy.reshape", "torch.solve", "torch.reshape", "torch.randn", "torch.sqrt", "numpy.stack", "torch.tensor", "torch.mul", "torch.square", "torch.arange", "torch.ones_like", "torch.floor", "torch.min", "torch.unsqueeze", "torch.log", "torch.stack", "numpy.meshgrid", "torch.matmul" ] ]
DrewRust/lambdata2-drewrust
[ "c2fcd57bf898f5564225e06f465a1b11671b08cf" ]
[ "my_lambdata/ds_utilities.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n# from sklearn.datasets import load_wine\n# from pdb import set_trace as breakpoint\n# from IPython.display import display\n\ndef enlarge(n):\n ''' \n This function will multiple the input by 100 \n '''\n return n * 100\n\nclass Test_Split_Class:\n\n def __init__(self, df, X, y):\n self.df = df\n self.X = X\n self.y = y\n\n def my_train_val_test_split(self):\n train_1, test = train_test_split(\n self.df, test_size=0.15, random_state=42)\n train, val = train_test_split(\n train_1, test_size=0.15, random_state=42)\n return train, val, test\n\n def example_train_val_test_split(\n self, train_size=0.7, val_size=0.1, test_size=0.2,\n random_state=None, shuffle=True):\n X_train_val, X_test, y_train_val, y_test = train_test_split(\n self.X, self.y, test_size=test_size, random_state=random_state,\n shuffle=shuffle)\n X_train, X_val, y_train, y_val = train_test_split(\n X_train_val, y_train_val, test_size=val_size/(train_size+val_size),\n random_state=random_state, shuffle=shuffle)\n return X_train, X_val, X_test, y_train, y_val, y_test\n\n\nclass Others_Class:\n def __init__(self, df):\n self.df = df\n\n def delete_col(self, col_to_del):\n new_df = self.df\n final_df = new_df.drop(columns=[col_to_del])\n return final_df\n\n def find_nulls_func(self):\n my_list = []\n new_df = self.df\n my_list = new_df.isnull().sum().tolist()\n new_series = pd.Series(my_list, index=new_df.columns)\n return new_series\n\n def drop_nulls_cols(self):\n new_df = self.df\n final_df = new_df.dropna(axis=0)\n return final_df\n\n def convert_to_dates(self, dateColumn):\n new_df = self.df\n new_df['date_dtype'] = pd.to_datetime(dateColumn)\n return new_df\n\n def enlarge(self, n):\n \"\"\" Will multiply the input by 100 \"\"\"\n return n * 100\n\n\nif __name__ == '__main__':\n\n \"\"\"\n This is the dataset used and it works here better.\n Originally I had it above defined in the class.\n That is not the best place for it otherwise it gets repeatedly called.\n \"\"\"\n url = \"https://raw.githubusercontent.com/DrewRust/Kepler_Planet_data/master/SoccerMatches.csv\"\n soccer_df = pd.read_csv(url)\n\n \"\"\" This would load a built in wine dataset \"\"\"\n # raw_data = load_wine()\n # df = pd.DataFrame(data=raw_data['data'],\n # columns=raw_data['feature_names'])\n # df['target'] = raw_data['target']\n\n \"\"\" This is for the vscode editor \"\"\"\n # breakpoint allows for debugging and data exploration\n # breakpoint()\n # print(df.shape)\n\n \"\"\" Creating an object of the Others_Class \"\"\"\n first_obj = Others_Class(soccer_df)\n first_instance = first_obj.enlarge(5)\n second_instance = first_obj.convert_to_dates(soccer_df['date'])\n third_instance = first_obj.delete_col('league_id')\n fourth_instance = first_obj.find_nulls_func()\n fifth_instance = first_obj.drop_nulls_cols()\n\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "sklearn.model_selection.train_test_split", "pandas.Series" ] ]
saxenam06/Approximate-Dynamic-Programming
[ "de613c10e087ae6b4a87a1730104c59442b33797" ]
[ "plot.py" ]
[ "from config import GeneralConfig, DynamicsConfig, PlotConfig\nimport numpy as np\nimport torch\nimport time\nimport os\nfrom network import Actor, Critic\nfrom solver import Solver\nfrom utils import idplot, numpy2torch, step_relative, recover_absolute_state, cm2inch\nimport matplotlib.pyplot as plt\n\nimport dynamics\nS_DIM = 4\nA_DIM = 1\n\n\ndef plot_comparison(simu_dir, methods):\n '''\n Plot comparison figure among ADP, MPC & open-loop solution.\n Trajectory, tracking error and control signal plot\n\n Parameters\n ----------\n picture_dir: string\n location of figure saved.\n\n '''\n num_methods = len(methods)\n legends = methods # ['MPC-3','MPC-5','MPC-10','ADP','Open-loop']\n picture_dir = simu_dir + \"/Figures\"\n if not os.path.exists(picture_dir): os.mkdir(picture_dir)\n config = DynamicsConfig()\n trajectory_data = []\n heading_angle = []\n error_data = []\n psi_error_data = []\n control_plot_data = []\n utilities_data = []\n dy = dynamics.VehicleDynamics()\n\n def load_data(method):\n if method.startswith('MPC'):\n pred_steps = method.split('-')[1]\n state_fname, control_fname = 'MPC_' + pred_steps + '_state.txt', \\\n 'MPC_' + pred_steps + '_control.txt'\n state = np.loadtxt(os.path.join(simu_dir, state_fname))\n control = np.loadtxt(os.path.join(simu_dir, control_fname))\n elif method.startswith('ADP'):\n state = np.loadtxt(os.path.join(simu_dir, 'ADP_state.txt'))\n control = np.loadtxt(os.path.join(simu_dir, 'ADP_control.txt'))\n elif method.startswith('OP'):\n state = np.loadtxt(os.path.join(simu_dir, 'Open_loop_state.txt'))\n control = np.loadtxt(os.path.join(simu_dir, 'Open_loop_control.txt'))\n else:\n raise KeyError('invalid methods')\n trajectory = (state[:, 4], state[:, 0])\n heading = (state[:, 4], 180 / np.pi * state[:, 2])\n ref = dy.reference_trajectory(numpy2torch(state[:, 4], state[:, 4].shape)).numpy()\n error = (state[:, 4], state[:, 0] - ref[:, 0])\n #if method.startswith('ADP'):\n # error[1][5:] = error[1][5:] + 0.0013\n # error[1][5:] = 0.98 * error[1][5:]\n psi_error = (state[:, 4], 180 / np.pi * (state[:, 2] - ref[:, 2]))\n control_tuple = (state[1:, 4], 180 / np.pi * control)\n utilities = 6 * (state[1:, 0]) ** 2 + 80 * control ** 2\n utilities_tuple = (state[1:, 4], utilities)\n\n trajectory_data.append(trajectory)\n heading_angle.append(heading)\n error_data.append(error)\n psi_error_data.append(psi_error)\n control_plot_data.append(control_tuple)\n utilities_data.append(utilities_tuple)\n\n for method in methods:\n load_data(method)\n idplot(trajectory_data, num_methods, \"xy\",\n fname=os.path.join(picture_dir, 'trajectory.png'),\n xlabel=\"Longitudinal position [m]\",\n ylabel=\"Lateral position [m]\",\n legend=legends,\n legend_loc=\"lower left\"\n )\n idplot(utilities_data, num_methods, \"xy\",\n fname=os.path.join(picture_dir, 'utilities.png'),\n xlabel=\"Longitudinal position [m]\",\n ylabel=\"Utilities\",\n legend=legends,\n legend_loc=\"lower left\"\n )\n idplot(heading_angle, num_methods, \"xy\",\n fname=os.path.join(picture_dir, 'trajectory_heading_angle.png'),\n xlabel=\"Longitudinal position [m]\",\n ylabel=r\"Heading angle [$\\degree$]\",\n legend=legends,\n legend_loc=\"lower left\"\n )\n idplot(error_data, num_methods, \"xy\",\n fname=os.path.join(picture_dir, 'trajectory_error.png'),\n xlabel=\"Longitudinal position [m]\",\n ylabel=\"Lateral position error [m]\",\n legend=legends,\n legend_loc=\"upper left\"\n )\n idplot(psi_error_data, num_methods, \"xy\",\n fname=os.path.join(picture_dir, 'head_angle_error.png'),\n xlabel=\"Longitudinal position [m]\",\n ylabel=r\"Head angle error [$\\degree$]\",\n legend=legends,\n legend_loc=\"lower left\"\n )\n idplot(control_plot_data, num_methods, \"xy\",\n fname=os.path.join(picture_dir, 'control.png'),\n xlabel=\"Longitudinal position [m]\",\n ylabel=r\"Steering angle [$\\degree$]\",\n legend=legends,\n legend_loc=\"upper left\"\n )\n\n\ndef adp_simulation_plot(simu_dir):\n '''\n Simulate and plot trajectory and control after ADP training algorithm.\n\n Parameters\n ----------\n simu_dir: string\n location of data and figures saved.\n\n '''\n state_history = np.loadtxt(os.path.join(simu_dir, 'ADP_state.txt'))\n control_history = np.loadtxt(os.path.join(simu_dir, 'ADP_control.txt'))\n trajectory = (state_history[:, -1], state_history[:, 0])\n figures_dir = simu_dir + \"/Figures\"\n os.makedirs(figures_dir, exist_ok=True)\n idplot(trajectory, 1, \"xy\",\n fname=os.path.join(figures_dir, 'adp_trajectory.png'),\n xlabel=\"longitudinal position [m]\",\n ylabel=\"Lateral position [m]\",\n legend=[\"trajectory\"],\n legend_loc=\"upper left\"\n )\n u_lat = (state_history[:, -1], state_history[:, 1])\n psi =(state_history[:, -1], state_history[:, 2])\n omega = (state_history[:, -1], state_history[:, 3])\n data = [u_lat, psi, omega]\n legend=[\"$u_{lat}$\", \"$\\psi$\", \"$\\omega$\"]\n idplot(data, 3, \"xy\",\n fname=os.path.join(figures_dir, 'adp_other_state.png'),\n xlabel=\"longitudinal position [m]\",\n legend=legend\n )\n control_history_plot = (state_history[1:, -1], 180 / np.pi * control_history)\n idplot(control_history_plot, 1, \"xy\",\n fname=os.path.join(figures_dir, 'adp_control.png'),\n xlabel=\"longitudinal position [m]\",\n ylabel=\"steering angle [degree]\"\n )\n\ndef plot_ref_and_state(log_dir, simu_dir, ref='angle', figsize_scalar=1, ms_size=2.0):\n '''\n\n Args:\n log_dir: str, model directory.\n simu_dir: str, simulation directory.\n ref: 'pos' or 'angle', which state to plot.\n\n Returns:\n\n '''\n config = GeneralConfig()\n S_DIM = config.STATE_DIM\n A_DIM = config.ACTION_DIM\n policy = Actor(S_DIM, A_DIM)\n value = Critic(S_DIM, A_DIM)\n config = DynamicsConfig()\n solver=Solver()\n load_dir = log_dir\n policy.load_parameters(load_dir)\n value.load_parameters(load_dir)\n statemodel_plt = dynamics.VehicleDynamics()\n\n # Open-loop reference\n x_init = [1.0, 0.0, 0.0, 0.0, 15 * np.pi]\n index = 0 if ref == 'pos' else 2\n for step in [3,4,5]:\n cal_time = 0\n state = torch.tensor([x_init])\n state.requires_grad_(False)\n x_ref = statemodel_plt.reference_trajectory(state[:, -1])\n state_r = state.detach().clone()\n state_r[:, 0:4] = state_r[:, 0:4] - x_ref\n\n state_r_history = state.detach().numpy()\n state_history = []\n control_history = []\n ref_history = []\n fig_size = (PlotConfig.fig_size * figsize_scalar, PlotConfig.fig_size * figsize_scalar)\n _, ax = plt.subplots(figsize=cm2inch(*fig_size), dpi=PlotConfig.dpi)\n\n for i in range(step): # plot_length\n x = state_r.tolist()[0]\n time_start = time.time()\n state_r_predict, control = solver.mpcSolver(x, 10)\n cal_time += time.time() - time_start\n u = np.array(control[0], dtype='float32').reshape(-1, config.ACTION_DIM)\n u = torch.from_numpy(u)\n\n state, state_r, x_ref =step_relative(statemodel_plt, state, u)\n\n\n state_predict, ref_predict = recover_absolute_state(state_r_predict, x_ref.numpy().squeeze())\n ref_history.append(ref_predict[0])\n state_r_history = np.append(state_r_history, np.expand_dims(state_r_predict[0], axis=0), axis=0)\n state_history.append(state_predict[0])\n if i < step - 1:\n plt.plot(state_r_predict[:, -1], state_predict[:, index], linestyle='--', marker='D', color='deepskyblue', ms=ms_size)\n plt.plot(state_r_predict[:, -1], ref_predict[:, index], linestyle='--', color='grey', marker='D', ms=ms_size)\n else:\n plt.plot(state_r_predict[:, -1], state_predict[:, index], linestyle='--', label='Predictive trajectory', color='deepskyblue', marker='D', ms=ms_size)\n plt.plot(state_r_predict[:, -1], ref_predict[:, index], linestyle='--', color='grey',label='Predictive reference', marker='D', ms=ms_size)\n\n ref_history = np.array(ref_history)\n state_history = np.array(state_history)\n plt.plot(state_r_history[1:, -1], state_history[:, index], color='blue', label='Real trajectory', marker='1', ms=ms_size)\n plt.plot(state_r_history[1:, -1], ref_history[:, index], linestyle='-.', color='black', label='Real reference',\n marker='1', ms=ms_size)\n\n plt.tick_params(labelsize=PlotConfig.tick_size)\n labels = ax.get_xticklabels() + ax.get_yticklabels()\n [label.set_fontname(PlotConfig.tick_label_font) for label in labels]\n plt.legend(loc='best', prop=PlotConfig.legend_font)\n plt.xlim([47, 57])\n if ref == 'pos':\n plt.ylim([0.990, 1.002])\n elif ref == 'angle':\n plt.ylim([-0.006, 0.0005])\n figures_dir = simu_dir + \"/Figures\"\n os.makedirs(figures_dir, exist_ok=True)\n fig_name = 'reference_' + ref + '_' + str(step) + '.png'\n fig_path = os.path.join(figures_dir, fig_name)\n plt.savefig(fig_path)\n\n\ndef plot_phase_plot(methods, log_dir, simu_dir, figsize_scalar=1, ms_size=2.0):\n '''\n\n Args:\n log_dir: str, model directory.\n simu_dir: str, simulation directory.\n ref: 'pos' or 'angle', which state to plot.\n\n Returns:\n\n '''\n config = GeneralConfig()\n S_DIM = config.STATE_DIM\n A_DIM = config.ACTION_DIM\n policy = Actor(S_DIM, A_DIM)\n value = Critic(S_DIM, A_DIM)\n config = DynamicsConfig()\n solver = Solver()\n load_dir = log_dir\n policy.load_parameters(load_dir)\n value.load_parameters(load_dir)\n statemodel_plt = dynamics.VehicleDynamics()\n\n # Open-loop reference\n x_init = [1.01, 0.0, 0.0, 0.0, 15 * np.pi]\n index = 2\n state = torch.tensor([x_init])\n for step in range(16):\n if step % 5 != 0:\n state, state_r, x_ref = step_relative(statemodel_plt, state, u)\n continue\n cal_time = 0\n state.requires_grad_(False)\n x_ref = statemodel_plt.reference_trajectory(state[:, -1])\n state_r = state.detach().clone()\n state_r[:, 0:4] = state_r[:, 0:4] - x_ref\n\n for i in range(1): # plot_length\n fig_size = (PlotConfig.fig_size * figsize_scalar, PlotConfig.fig_size * figsize_scalar)\n _, ax = plt.subplots(figsize=cm2inch(*fig_size), dpi=PlotConfig.dpi)\n for method in methods:\n if method.startswith('ADP'):\n state_r_predict = []\n ref_state = state_r[:, 0:4]\n for virtual_step in range(50):\n u = policy.forward(ref_state)\n state, _, _, _, _, _, _ = statemodel_plt.step(state, u)\n ref_state = state.detach().clone()[:, 0:4] - x_ref\n state_r_predict.append(ref_state.numpy().squeeze())\n state_r_predict = np.array(state_r_predict)\n label = 'ADP'\n color = 'deepskyblue'\n\n\n elif method.startswith('MPC'):\n pred_steps = int(method.split('-')[1])\n x = state_r.tolist()[0]\n time_start = time.time()\n state_r_predict, control = solver.mpcSolver(x, pred_steps)\n cal_time += time.time() - time_start\n u = np.array(control[0], dtype='float32').reshape(-1, config.ACTION_DIM)\n u = torch.from_numpy(u)\n label = 'MPC ' + str(pred_steps) + ' steps'\n color = 'red'\n # state_predict, ref_predict = recover_absolute_state(state_r_predict, x_ref.numpy().squeeze())\n\n else: continue\n\n plt.plot(state_r_predict[:, 0], state_r_predict[:, index], linestyle='--', label=label,\n marker='D', ms=ms_size)\n\n plt.scatter([0], [0], color='red',\n label='Ref point', marker='o', s= 4 * ms_size)\n plt.tick_params(labelsize=PlotConfig.tick_size)\n labels = ax.get_xticklabels() + ax.get_yticklabels()\n [label.set_fontname(PlotConfig.tick_label_font) for label in labels]\n plt.legend(loc='best', prop=PlotConfig.legend_font)\n figures_dir = simu_dir + \"/Figures\"\n os.makedirs(figures_dir, exist_ok=True)\n fig_name = 'phase_plot_' + str(step) + '.png'\n fig_path = os.path.join(figures_dir, fig_name)\n plt.xlabel(\"Lateral position [m]\", PlotConfig.label_font)\n plt.ylabel(\"Heading angle [deg]\", PlotConfig.label_font)\n plt.tight_layout(pad=PlotConfig.pad)\n plt.savefig(fig_path)\n\n state, state_r, x_ref = step_relative(statemodel_plt, state, u)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "numpy.expand_dims", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylim", "torch.from_numpy", "matplotlib.pyplot.savefig", "torch.tensor", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.ylabel" ] ]
schackv/shapewarp
[ "36c69a641fc06239eda48b9e7011e3e86f9f7da0" ]
[ "shapewarp/ASM.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 18 11:18:20 2014\n\n@author: schackv\n\"\"\"\n\nfrom . import GPA\nimport numpy as np\n\n\nclass ASM:\n \n def build(self,landmarks):\n \"\"\"Build an active shape model from the landmarks given.\n Landmarks are expected to be a numpy N x 2*p array \n where p is the number of landmarks.\n \"\"\"\n \n # Do Generalized Procrustes analysis\n mu, S, Xnew = GPA.generalized_procrustes_2d(landmarks)\n \n self.k = len(mu)/2 # Number of points\n self.MeanShape = np.array(mu)\n self.Covariance = np.array(S)\n self.AlignedShapes = np.array(Xnew)\n \n # PCA on shapes\n eigvals, eigvecs = np.linalg.eig(S)\n eigvals = np.abs(eigvals)\n eigvecs = np.abs(eigvecs)\n idx = np.argsort(-eigvals) # Ensure descending sort\n eigvals = eigvals[idx]\n eigvecs = eigvecs[:,idx]\n \n self.Scores = np.array(Xnew.T * eigvecs)\n self.MeanScores = np.array(mu.T * eigvecs)\n self.VarianceExplained = np.array(np.cumsum(eigvals/np.sum(eigvals)))\n \n # Build modes for up to 95% variance\n npcs,_ = index_of_true(self.VarianceExplained>0.95)\n npcs += 1\n\n M = []\n for i in range(0,npcs-1):\n M.append(np.array(np.sqrt(eigvals[i]) * eigvecs[:,i]))\n self.PCModes = M\n \n \n\n\n \ndef index_of_true(arr):\n for index, item in enumerate(arr):\n if item == True:\n return index, item\n \n\n " ]
[ [ "numpy.abs", "numpy.sqrt", "numpy.linalg.eig", "numpy.argsort", "numpy.array", "numpy.sum" ] ]
anilgeorge04/learn-ds
[ "f1a9c638e29270d4d72fc3aed0af3ccea8c53350" ]
[ "python-play/hackerstat.py" ]
[ "# Hacker Statistics\r\n# In a 100 storey building, move up and down floors on the roll of dice\r\n# Move up +1 on getting a 3 or 4 or 5\r\n# Move down -1 on getting 1 or 2\r\n# On 6, roll die again and move +n steps (n is the number on second roll)\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nnp.random.seed(123)\r\n\r\n# Initialize all walks\r\nall_walks = []\r\n\r\n# 5000 random walks\r\nfor i in range(5000):\r\n # Random walk starting point\r\n random_walk = [0]\r\n # Roll the dice 100 times\r\n for x in range(100):\r\n step = random_walk[-1]\r\n dice = np.random.randint(1, 7)\r\n # Move up or down floors\r\n if dice <= 2:\r\n step = max(0, step - 1)\r\n elif dice <= 5:\r\n step += 1\r\n else:\r\n step += np.random.randint(1, 7)\r\n\r\n # There's a 0.1% chance of falling down\r\n if np.random.rand() <= 0.001:\r\n step = 0\r\n\r\n # Append taken step to Random Walk\r\n random_walk.append(step)\r\n # print(random_walk)\r\n # plt.plot(random_walk)\r\n # plt.show()\r\n all_walks.append(random_walk)\r\n\r\nnp_aw_t = np.transpose(np.array(all_walks))\r\nplt.plot(np_aw_t)\r\nplt.show()\r\nplt.clf()\r\n\r\nends = np_aw_t[-1, :]\r\nplt.hist(ends, bins=10)\r\nplt.show()" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.plot", "matplotlib.pyplot.clf", "numpy.random.rand", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "numpy.random.randint" ] ]
erykoff/redmapper_duster
[ "9058d84905535230c330803de18575670da03bf4" ]
[ "duster/pdfs.py" ]
[ "import numpy as np\n\n\ndef p_dust(rho_0, b, rho_min, rho_vals):\n \"\"\"Compute p_dust(rho | rho_0, b, rho_min)\n\n Parameters\n ----------\n rho_0 : `float`\n The value of rho_0\n b : `float`\n The value of b\n rho_min : `float`\n The value of rho_min\n rho_vals : `np.ndarray`\n Array of rho values to compute p_dust\n\n Returns\n -------\n p_dust : `np.ndarray`\n p_dust evaluated at rho_vals given rho0, b, rho_min.\n \"\"\"\n lo = (rho_vals <= rho_min)\n hi = ~lo\n\n B = (b + 1)/(rho_0*((rho_min/rho_0)**b)*(b + 1 + (rho_min/rho_0)))\n\n p_dust = np.zeros(rho_vals.size)\n p_dust[lo] = B*((rho_vals[lo]/rho_0)**b)\n p_dust[hi] = B*((rho_min/rho_0)**b)*(np.exp(-(rho_vals[hi] - rho_min)/rho_0))\n\n return p_dust\n\n\ndef compute_normalized_rho_pars(u, b):\n \"\"\"Compute normalized rho_0, rho_min given b, u=rho_min/rho_0\n\n Parameters\n ----------\n u : `float`\n u = rho_min/rho_0 parameter.\n b : `float`\n b parameter\n\n Returns\n -------\n rho_0, rho_min : (`float`, `float`)\n \"\"\"\n rho_0 = ((b + 2)/(b + 1))*((u + b + 1)/(u**2. + (b + 2)*(u + 1)))\n rho_min = rho_0*u\n\n return rho_0, rho_min\n" ]
[ [ "numpy.exp", "numpy.zeros" ] ]
zehuilu/Learning-from-Directional-Corrections
[ "762a05b0d169c0db12932b8bc3f5b4abfa5d6fb9" ]
[ "experiments/run_quad_realtime.py" ]
[ "#!/usr/bin/env python3\nimport os\nimport sys\nsys.path.append(os.getcwd()+'/LFC')\nsys.path.append(os.getcwd()+'/JinEnv')\nsys.path.append(os.getcwd()+'/lib')\nimport numpy as np\nfrom casadi import *\nimport transforms3d\nfrom QuadAlgorithmRealtime import QuadAlgorithmRealtime\nfrom QuadStates import QuadStates\nfrom QuadPara import QuadPara\n\n\nTIME_SCALE = 3.5\nCASE_NUM = 2\n\n\nif __name__ == '__main__':\n # define the quadrotor dynamics parameters\n QuadParaInput = QuadPara(inertial_list=[1.0, 1.0, 1.0], mass=1.0, l=1.0, c=0.02)\n\n # define the initial condition\n R = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) # rotation matrix in numpy 2D array\n QuadInitialCondition = QuadStates()\n QuadInitialCondition.position = [-1.8, -0.9, 0.6]\n QuadInitialCondition.velocity = [0, 0, 0]\n QuadInitialCondition.attitude_quaternion = transforms3d.quaternions.mat2quat(R).tolist()\n QuadInitialCondition.angular_velocity = [0, 0, 0]\n\n # define the desired goal\n R = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) # rotation matrix in numpy 2D array\n QuadDesiredStates = QuadStates()\n QuadDesiredStates.position = [1.8, -0.9, 1.0]\n QuadDesiredStates.velocity = [0, 0, 0]\n QuadDesiredStates.attitude_quaternion = transforms3d.quaternions.mat2quat(R).tolist()\n QuadDesiredStates.angular_velocity = [0, 0, 0]\n\n # create the quadrotor algorithm solver\n Solver = QuadAlgorithmRealtime(QuadParaInput, time_step=0.1, time_scale=TIME_SCALE, case_num=CASE_NUM)\n\n # solve\n # horizon is number of steps\n Solver.run(QuadInitialCondition, QuadDesiredStates, iter_num=30, time_horizon=4, save_flag=True)\n" ]
[ [ "numpy.array" ] ]
AndySrb/ProracunOsvetljenjaUlica
[ "1eeeb5f92c24889819594646ee22639943035dd9" ]
[ "main.py" ]
[ "#!/usr/bin/env python3\n\nimport json\nimport math\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass vector2F:\n def __init__(self,x,y):\n self.x=x\n self.y=y\n\nclass vector3F:\n def __init__(self,x,y,z):\n self.x=x\n self.y=y\n self.z=z\n\nclass mesuringPoints:\n def __init__(self,x,y,a,tg,angle,cos,cos3,illuminated):\n self.x = x\n self.y = y\n self.a = a\n self.tg = tg\n self.angle = angle\n self.cos = cos\n self.cos3 = cos3\n self.illuminated = illuminated\n\nlampIlumCarac = [] #lamp iluminacion characteristics\n\nwith open('./data.json') as json_file:\n data = json.load(json_file)\n for p in data['angle']:\n print('from: ' + str(p['form']))\n print('to: ' + str(p['to']))\n print('value: ' + str(p['value']))\n print('const: ' + str(p['const']))\n lampIlumCarac.append([p['form'],p['to'],p['value'],p['const']])\n print('')\n\nlampFlow = data['lamp']['lampFlow']\nbulbFlow = data['lamp']['bulbFlow']\n\n\nprint(lampIlumCarac)\nprint(lampFlow)\nprint(bulbFlow)\n\nlampPos = []\n\nfor p in data['lampPos']:\n lampPos.append(vector3F(p['x'],p['y'],p['z']))\n\n\nroadLenght = data['roadLenght'] #Lenght betwen 2 lamps\nroadWidth = data['roadWidth'] #Road width\n\nmesuringDotsX= roadLenght / 16 #Mesuring Dots on road lenght \nmesuringDotsY= roadWidth / 8 #Mesuring Dots on road width\n\nx = 0\ny = 0\n\nmesuringDotsTable = []\n\nwhile roadWidth >= y:\n while roadLenght >= x:\n mesuringDotsTable.append(vector2F(x,y))\n x += mesuringDotsX\n y += mesuringDotsY\n x = 0\n\ndef getValue(angle):\n form = None\n to = None\n valueHigh = None\n const = None\n valueLow = None\n if angle > 180:\n print(\"Error\")\n exit()\n for i in lampIlumCarac:\n valueHigh = valueLow\n form = i[0]\n to = i[1]\n valueLow = i[2]\n const = i[3]\n if angle < i[1] and angle > i[0]:\n break\n\n if const:\n return valueLow\n\n return (abs((angle % form) - (to - form)) / (to - form)) * (valueHigh - valueLow) + valueLow\n\n\nmathPlotDataX = []\nmathPlotDataY = []\nmathPlotDataZ = [0] * len(mesuringDotsTable)\n\nfor i in mesuringDotsTable:\n mathPlotDataX.append(i.x)\n mathPlotDataY.append(i.y)\n\nfinalResult = 0\n\nfor lamp in lampPos:\n print(\"lampX= \"+str(lamp.x) + \" lampY= \" + str(lamp.y))\n i = 0\n for dots in mesuringDotsTable:\n print(\"x=\" + str(dots.x-lamp.x) + \"y=\" + str(dots.y-lamp.y))\n mountingHeight = lamp.z\n a=0\n if (dots.x-lamp.x) == 0 and (dots.y-lamp.y) == 0:\n a = math.sqrt(math.pow(0.001,2)+math.pow(0.001,2))\n else:\n a = math.sqrt(math.pow((dots.x-lamp.x),2)+math.pow((dots.y-lamp.y),2)) \n \n print(\"a=\" + str(a))\n\n tgAlpha = (a/mountingHeight)\n print(\"tg\\u03B1=\" + str(tgAlpha))\n\n angleAlpha = math.degrees(math.atan(tgAlpha))\n print(\"angle\\u03B1=\" + str(angleAlpha))\n\n cosAlpha =math.cos(math.radians(angleAlpha))\n print(\"cos\\u03B1=\" + str(cosAlpha))\n\n cos3Alpha = pow(math.cos(math.radians(angleAlpha)),3)\n print(\"cos3\\u03B1=\" + str(cos3Alpha))\n\n print(\"AngleLampCarac=\" + str(getValue(angleAlpha)))\n illuminatedLampValue= getValue(angleAlpha) * ( bulbFlow / lampFlow)\n\n result = (illuminatedLampValue / pow(mountingHeight,2)) * pow(math.cos(math.radians(angleAlpha)),3)\n print(\"Result= \" + str(result))\n\n finalResult += result\n #dataList[i] = mesuringPoints(dots.x,dots.y,a,tgAlpha,angleAlpha,cosAlpha,cos3Alpha,result) \n mathPlotDataZ[i] = mathPlotDataZ[i] + result\n i += 1\n print(\"\")\n\n\n\nfinalResult = finalResult / (len(mesuringDotsTable) * len(lampPos))\nprint(finalResult)\n\nf = open(\"results.csv\", \"w\")\ni = 0\nwhile i < len(mesuringDotsTable):\n f.write(str(mathPlotDataX[i]) + \",\" + str(mathPlotDataY[i]) + \",\" + str(mathPlotDataZ[i]) + \"\\n\")\n i = i +1\nf.close()\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.plot_trisurf(mathPlotDataX, mathPlotDataY, mathPlotDataZ, cmap=cm.jet, linewidth=0.2)\nplt.show()\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
dibyajit30/Course-Works
[ "f8c275a61651a757cdac562d07d373f15d27f05c" ]
[ "Deep Reinforcement Learning/immitation learning/utils.py" ]
[ "import numpy as np\nfrom torch import argmax\n\nLEFT =1\nRIGHT = 2\nSTRAIGHT = 0\nACCELERATE =3\nBRAKE = 4\n\ndef one_hot(labels):\n \"\"\"\n this creates a one hot encoding from a flat vector:\n i.e. given y = [0,2,1]\n it creates y_one_hot = [[1,0,0], [0,0,1], [0,1,0]]\n \"\"\"\n classes = np.unique(labels)\n n_classes = classes.size\n one_hot_labels = np.zeros(labels.shape + (n_classes,))\n for c in classes:\n one_hot_labels[labels == c, c] = 1.0\n return one_hot_labels\n\ndef rgb2gray(rgb):\n \"\"\" \n this method converts rgb images to grayscale.\n \"\"\"\n gray = np.dot(rgb[...,:3], [0.2125, 0.7154, 0.0721])\n return gray.astype('float32') \n\n\ndef action_to_id(a):\n \"\"\" \n this method discretizes the actions.\n Important: this method only works if you recorded data pressing only one key at a time!\n \"\"\"\n if all(a == [-1.0, 0.0, 0.0]): return LEFT # LEFT: 1\n elif all(a == [1.0, 0.0, 0.0]): return RIGHT # RIGHT: 2\n elif all(a == [0.0, 1.0, 0.0]): return ACCELERATE # ACCELERATE: 3\n elif all(a == [0.0, 0.0, 0.2]): return BRAKE # BRAKE: 4\n else: \n return STRAIGHT # STRAIGHT = 0\n \ndef output_to_action(output):\n \"\"\"\n this method accepts the output of the CNN agent and provides continuous action. \n \"\"\"\n action_index = argmax(output)\n action_map = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.2]]\n result = np.array(action_map[action_index])\n return result\n" ]
[ [ "numpy.dot", "numpy.unique", "numpy.array", "numpy.zeros", "torch.argmax" ] ]