repo_name
stringlengths 8
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
jaryaman/ML_demos | [
"df270b58d35d1248079e4651988ded4074237bfc"
] | [
"Notebooks/utls.py"
] | [
"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nimport matplotlib.ticker\nfrom matplotlib.ticker import FormatStrFormatter\n\ndef reset_plots():\n plt.close('all')\n fontsize = 20\n legsize = 15\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n plt.rc('text', usetex=True)\n font = {'size' : fontsize}\n plt.rc('font', **font)\n rc={'axes.labelsize': fontsize,\n 'font.size': fontsize,\n 'axes.titlesize': fontsize,\n 'xtick.labelsize':fontsize,\n 'ytick.labelsize':fontsize,\n 'legend.fontsize': legsize}\n mpl.rcParams.update(**rc)\n mpl.rc('lines', markersize=10)\n plt.rcParams.update({'axes.labelsize': fontsize})\n mpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}']\n\n\ndef remove_tex_axis(ax, xtick_fmt = '%d', ytick_fmt = '%d'):\n\t\"\"\"\n\tMakes axes normal font in matplotlib.\n\tParams:\n\txtick_fmt : A string, defining the format of the x-axis\n\tytick_fmt : A string, defining the format of the y-axis\n\t\"\"\"\n\tfmt = matplotlib.ticker.StrMethodFormatter(\"{x}\")\n\tax.xaxis.set_major_formatter(fmt)\n\tax.yaxis.set_major_formatter(fmt)\n\tax.xaxis.set_major_formatter(FormatStrFormatter(xtick_fmt))\n\tax.yaxis.set_major_formatter(FormatStrFormatter(ytick_fmt))\n\ndef multivariate_gaussian(pos, mu, Sigma):\n \"\"\"Return the multivariate Gaussian distribution on array pos.\n\n pos is an array constructed by packing the meshed arrays of variables\n x_1, x_2, x_3, ..., x_k into its _last_ dimension.\n\n Source: https://scipython.com/blog/visualizing-the-bivariate-gaussian-distribution/\n \"\"\"\n\n n = mu.shape[0]\n Sigma_det = np.linalg.det(Sigma)\n Sigma_inv = np.linalg.inv(Sigma)\n N = np.sqrt((2*np.pi)**n * Sigma_det)\n # This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized\n # way across all the input variables.\n fac = np.einsum('...k,kl,...l->...', pos-mu, Sigma_inv, pos-mu)\n\n return np.exp(-fac / 2) / N\n\ndef standardize(X):\n \"\"\"Z-transform an array\n\n param X: An N x D array where N is the number of examples and D is the number of features\n\n returns: An N x D array where every column has been rescaled to 0 mean and unit variance\n \"\"\"\n return (X - X.mean())/X.std(ddof=1)\n"
] | [
[
"matplotlib.pyplot.rc",
"numpy.einsum",
"numpy.linalg.inv",
"numpy.linalg.det",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.rc",
"numpy.exp",
"matplotlib.pyplot.rcParams.update",
"matplotlib.rcParams.update",
"matplotlib.pyplot.close",
"numpy.sqrt"
]
] |
emtpb/dsch | [
"ed31b72a95e59335f338ae48bc0c7c0d011e889c"
] | [
"dsch/backends/inmem.py"
] | [
"\"\"\"dsch backend for in-memory data storage.\n\nThis backend stores all data in memory and cannot directly save to disk.\nFor temporary data that does not have to be stored, the in-memory backend\nprovides a clean way of data with dsch, without littering the workspace with\ntemporary files. Also, it can be used to collect and aggregate data *before*\nselecting a storage path, e.g. file name.\n\"\"\"\nimport numpy as np\n\nfrom .. import data, storage\n\n\nclass _ItemNode(data.ItemNode):\n \"\"\"Common base class for data nodes for the inmem backend.\"\"\"\n\n def replace(self, new_value):\n \"\"\"Completely replace the current node value.\n\n Instead of changing parts of the data (e.g. via numpy array slicing),\n replace the entire data object for this node.\n\n Args:\n new_value: New value to apply to the node, independent of the\n backend in use.\n \"\"\"\n # Directly store the given value, since there is no storage engine that\n # might require any data type changes etc.\n self._storage = new_value\n\n def _value(self):\n \"\"\"Return the actual node data, independent of the backend in use.\n\n Returns:\n Node data.\n \"\"\"\n # Directly return the given value, since there is no storage engine\n # that might require any data type changes etc.\n return self._storage\n\n\nclass Array(data.Array, _ItemNode):\n \"\"\"Array-type data node for the inmem backend.\"\"\"\n pass\n\n\nclass Bytes(_ItemNode):\n \"\"\"Bytes-type data node for the inmem backend.\"\"\"\n pass\n\n\nclass Bool(_ItemNode):\n \"\"\"Bool-type data node for the inmem backend.\"\"\"\n pass\n\n\nclass Compilation(data.Compilation):\n \"\"\"Compilation-type data node for the inmem backend.\"\"\"\n pass\n\n\nclass Date(data.Date, _ItemNode):\n \"\"\"Date-type data node for the inmem backend.\"\"\"\n pass\n\n\nclass DateTime(data.DateTime, _ItemNode):\n \"\"\"DateTime-type data node for the inmem backend.\"\"\"\n pass\n\n\nclass List(data.List):\n \"\"\"List-type data node for the inmem backend.\"\"\"\n pass\n\n\nclass Scalar(data.Scalar, _ItemNode):\n \"\"\"Scalar-type data node for the inmem backend.\"\"\"\n\n def replace(self, new_value):\n \"\"\"Completely replace the current node value.\n\n Instead of changing parts of the data (e.g. via numpy array slicing),\n replace the entire data object for this node.\n\n Args:\n new_value: New value to apply to the node, independent of the\n backend in use.\n \"\"\"\n self._storage = np.dtype(self.schema_node.dtype).type(new_value)\n\n\n\nclass Storage(storage.Storage):\n \"\"\"Interface to the in-memory storage.\n\n Attributes:\n storage_path (str): Path to the current storage.\n schema_node: Top-level schema node used for the stored data.\n data: Top-level data node, providing access to all managed data.\n \"\"\"\n\n def __init__(self, storage_path='::inmem::', schema_node=None):\n \"\"\"Initialize the in-memory storage interface.\n\n .. note::\n The in-memory backend does not support saving data to disk, so no\n storage path must be given. Consequently, it does not support\n loading data, so the only possible operation is creating a new,\n empty storage.\n\n Args:\n storage_path: Only supported for API compatibility. If given, this\n must always be \"::inmem::\".\n schema_node: Top-level schema node for the data hierarchy.\n \"\"\"\n if storage_path != '::inmem::':\n raise ValueError('Invalid storage path for in-memory backend. '\n 'Must be the special string \"::inmem::\".')\n if not schema_node:\n raise ValueError('Top-level schema node must always be specified.')\n super().__init__(storage_path, schema_node)\n self.data = data.data_node_from_schema(self.schema_node,\n self.__module__, None)\n\n\nclass String(_ItemNode):\n \"\"\"String-type data node for the inmem backend.\"\"\"\n pass\n\n\nclass Time(data.Time, _ItemNode):\n \"\"\"Time-type data node for the inmem backend.\"\"\"\n pass\n"
] | [
[
"numpy.dtype"
]
] |
legaultmarc/pytorch-genotypes-dataloader | [
"0cb99f28cbfc5a2787d1d51f1c27e7e31dc23d05"
] | [
"pytorch_genotypes/vcf.py"
] | [
"from dataclasses import dataclass\nimport collections\nimport typing\nimport sqlite3\nimport os\n\nimport cyvcf2\nimport numpy as np\nimport torch\n\n\nVERBOSE = True\n\n\ndef set_verbose(b: bool):\n global VERBOSE\n VERBOSE = b\n\n\n@dataclass\nclass VCFChunk:\n chunk_id: int\n chrom: str\n start: int\n end: int\n\n @classmethod\n def new(cls, id, first_variant):\n return cls(id, first_variant.CHROM, first_variant.POS, None)\n\n def next_new(self, first_variant):\n return self.new(self.chunk_id + 1, first_variant)\n\n def sql_insert(self, cur):\n cur.execute(\n \"insert into chunks values (?, ?, ?, ?)\",\n (self.chunk_id, self.chrom, self.start, self.end)\n )\n\n def __repr__(self):\n return (\n f\"<Chunk #{self.chunk_id} - {self.chrom}:{self.start}-{self.end}>\"\n )\n\n\nclass FixedSizeVCFChunks(object):\n \"\"\"Create chunks of variants from a VCF.\"\"\"\n\n _Chunk = collections.namedtuple(\"_Chunk\", (\"id\", \"chrom\", \"start\", \"end\",\n \"n_variants\"))\n\n def __init__(self, vcf_filename, max_snps_per_chunk=200000, create=False):\n self.vcf_filename = vcf_filename\n\n db_filename = os.path.abspath(self._get_db_name())\n print(f\"Using DB: {db_filename}\")\n self.con = sqlite3.connect(db_filename)\n if create:\n self.create(max_snps_per_chunk)\n\n # Load the chunk regions in memory.\n self._load_chunks()\n\n def _load_chunks(self):\n cur = self.con.cursor()\n\n cur.execute(\n \"select chunks.*, counts.n \"\n \"from \"\n \" chunks inner join \"\n \" ( \"\n \" select chunk_id, count(*) as n \"\n \" from variants group by chunk_id \"\n \" ) counts on chunks.id=counts.chunk_id;\"\n )\n self.chunks = [self._Chunk(*tu) for tu in cur.fetchall()]\n\n def get_samples(self):\n try:\n vcf = cyvcf2.VCF(self.vcf_filename)\n samples = vcf.samples\n finally:\n vcf.close()\n\n return samples\n\n def get_chunk_meta(self, chunk_id):\n li = filter(lambda chunk: chunk.id == chunk_id, self.chunks)\n li = list(li)\n if len(li) > 1:\n raise ValueError()\n elif len(li) == 0:\n raise IndexError(chunk_id)\n\n return li[0]\n\n def _get_db_name(self):\n if \".vcf.gz\" in self.vcf_filename:\n return self.vcf_filename.replace(\".vcf.gz\", \".db\")\n\n elif \".vcf\" in self.vcf_filename:\n return self.vcf_filename.replace(\".vcf\", \".db\")\n\n else:\n return \"vcf_chunks.db\"\n\n def _sql_create(self):\n cur = self.con.cursor()\n\n cur.execute(\"drop table if exists chunks\")\n cur.execute(\n \"create table chunks ( \"\n \" id integer primary key, \"\n \" chrom text not null, \"\n \" start integer not null, \"\n \" end integer not null\"\n \");\"\n )\n\n cur.execute(\"drop table if exists variants\")\n cur.execute(\n \"create table variants ( \"\n \" chunk_id integer, \"\n \" chrom text not null, \"\n \" id text, \"\n \" pos integer not null, \"\n \" ref text not null, \"\n \" alt text not null, \"\n \" constraint chunk_fk \"\n \" foreign key (chunk_id) references chunks (id)\"\n \");\"\n )\n self.con.commit()\n\n def create(self, max_snps_per_chunk):\n self._sql_create()\n cur_chunk = None\n cur_n = 0\n buf = []\n vcf_iter = iter_vcf_wrapper(cyvcf2.VCF(self.vcf_filename, lazy=True))\n prev = None\n for v in vcf_iter:\n if cur_chunk is None:\n # Initialize first chunk.\n cur_chunk = VCFChunk.new(id=0, first_variant=v)\n\n if cur_chunk.chrom != v.CHROM or cur_n >= max_snps_per_chunk:\n self._close_chunk(cur_chunk, last_variant=prev)\n cur_chunk = cur_chunk.next_new(first_variant=v)\n cur_n = 0\n\n buf.append([cur_chunk.chunk_id, v.CHROM, v.ID, v.POS,\n v.REF, v.ALT[0]])\n cur_n += 1\n\n if len(buf) >= 1e6:\n buf = self._flush_buffer(buf)\n\n prev = v\n\n if buf:\n self._flush_buffer(buf)\n\n self._close_chunk(cur_chunk, last_variant=v)\n\n def _flush_buffer(self, buf):\n cur = self.con.cursor()\n cur.executemany(\"insert into variants values (?, ?, ?, ?, ?, ?)\", buf)\n return []\n\n def _close_chunk(self, cur_chunk: VCFChunk, last_variant):\n # Increment chunk counter and add entry to the db.\n cur_chunk.end = last_variant.POS\n cur = self.con.cursor()\n cur_chunk.sql_insert(cur)\n if VERBOSE:\n print(cur_chunk)\n self.con.commit()\n\n def iter_vcf_by_chunk_id(\n self,\n chunk_id: int\n ) -> typing.Generator[cyvcf2.Variant, None, None]:\n chunk = self.get_chunk_meta(chunk_id)\n return iter_vcf_wrapper(\n cyvcf2.VCF(self.vcf_filename)(\n f\"{chunk.chrom}:{chunk.start}-{chunk.end}\"\n )\n )\n\n def get_n_chunks(self):\n return len(self.chunks)\n\n def get_variant_metadata_for_chunk_id(self, chunk_id: int):\n import pandas as pd\n cur = self.con.cursor()\n\n cur.execute(\n \"select chrom, id, pos, ref, alt \"\n \" from variants where chunk_id=? order by pos asc;\",\n (chunk_id, )\n )\n\n results = cur.fetchall()\n if not results:\n raise ValueError(f\"No variants in chunk '{chunk_id}'.\")\n\n return pd.DataFrame(results, columns=[\"chrom\", \"id\", \"pos\",\n \"ref\", \"alt\"])\n\n def get_tensor_for_chunk_id(self, chunk_id):\n # Check how many samples and variants to pre-allocate memory.\n try:\n vcf = cyvcf2.VCF(self.vcf_filename)\n n_samples = len(vcf.samples)\n finally:\n vcf.close()\n\n chunk = self.get_chunk_meta(chunk_id)\n mat = np.empty((n_samples, chunk.n_variants), dtype=np.float32)\n\n for j, v in enumerate(self.iter_vcf_by_chunk_id(chunk_id)):\n mat[:, j] = parse_vcf_genotypes(v.genotypes, format=\"additive\")\n\n return torch.from_numpy(mat).to(torch.half)\n\n\ndef iter_vcf_wrapper(vcf, biallelic=True, snp=True):\n \"\"\"Wrapper over cyvcf2.VCF to unify filtering as needed.\"\"\"\n for v in vcf:\n # Filters\n if biallelic:\n if len(v.ALT) > 1:\n continue\n\n if snp:\n if not all(len(allele) == 1 for allele in [v.REF] + v.ALT):\n continue\n\n yield v\n\n\ndef parse_vcf_genotypes(genotypes, format=\"additive\"):\n if format == \"additive\":\n return _parse_vcf_genotypes_additive(genotypes)\n else:\n raise ValueError(format)\n\n\ndef _parse_vcf_genotypes_additive(genotypes):\n return np.fromiter(\n (a + b if a != -1 and b != -1 else np.nan\n for a, b, _ in genotypes),\n dtype=np.float32,\n count=len(genotypes)\n )\n"
] | [
[
"torch.from_numpy",
"pandas.DataFrame",
"numpy.empty"
]
] |
rivinduw/ntf_estimation | [
"1c4bc7015971081dd880f00940c79776ad1ea63d"
] | [
"validate.py"
] | [
"#!/usr/bin/env python3 -u\n#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\n\nfrom fairseq import checkpoint_utils, options, progress_bar, utils\n\n\ndef main(args, override_args=None):\n utils.import_user_module(args)\n\n use_fp16 = args.fp16\n use_cuda = torch.cuda.is_available() and not args.cpu\n\n if override_args is not None:\n overrides = vars(override_args)\n overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))\n else:\n overrides = None\n\n # Load ensemble\n print('| loading model(s) from {}'.format(args.path))\n models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(\n [args.path],\n arg_overrides=overrides,\n )\n model = models[0]\n\n # Move models to GPU\n for model in models:\n if use_fp16:\n model.half()\n if use_cuda:\n model.cuda()\n\n # Print args\n print(model_args)\n\n # Build criterion\n criterion = task.build_criterion(model_args)\n criterion.eval()\n\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n for subset in args.valid_subset.split(','):\n try:\n task.load_dataset(subset, combine=False, epoch=0)\n dataset = task.dataset(subset)\n except KeyError:\n raise Exception('Cannot find dataset: ' + subset)\n\n # Initialize data iterator\n itr = task.get_batch_iterator(\n dataset=dataset,\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n *[m.max_positions() for m in models],\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n seed=args.seed,\n num_workers=args.num_workers,\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.build_progress_bar(\n args, itr,\n prefix='valid on \\'{}\\' subset'.format(subset),\n no_progress_bar='simple'\n )\n\n log_outputs = []\n for i, sample in enumerate(progress):\n sample = utils.move_to_cuda(sample) if use_cuda else sample\n _loss, _sample_size, log_output = task.valid_step(sample, model, criterion)\n progress.log(log_output, step=i)\n log_outputs.append(log_output)\n\n log_output = task.aggregate_logging_outputs(log_outputs, criterion)\n\n progress.print(log_output, tag=subset, step=i)\n\n\ndef cli_main():\n parser = options.get_validation_parser()\n args = options.parse_args_and_arch(parser)\n\n # only override args that are explicitly given on the command line\n override_parser = options.get_validation_parser()\n override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)\n\n main(args, override_args)\n\n\nif __name__ == '__main__':\n cli_main()\n"
] | [
[
"torch.cuda.is_available"
]
] |
piotlinski/ssdir | [
"907b4f00a0747b7e774dc0a342d5a40c4d296fb6"
] | [
"pytorch_ssdir/modeling/encoder.py"
] | [
"\"\"\"SSDIR encoder.\"\"\"\nfrom copy import deepcopy\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom pytorch_ssd.modeling.model import SSD\n\nfrom pytorch_ssdir.modeling.depth import DepthEncoder\nfrom pytorch_ssdir.modeling.present import PresentEncoder\nfrom pytorch_ssdir.modeling.what import WhatEncoder\nfrom pytorch_ssdir.modeling.where import WhereEncoder\n\n\nclass Encoder(nn.Module):\n \"\"\"Module encoding input image to latent representation.\n\n .. latent representation consists of:\n - $$z_{what} ~ N(\\\\mu^{what}, \\\\sigma^{what})$$\n - $$z_{where} in R^4$$\n - $$z_{present} ~ Bernoulli(p_{present})$$\n - $$z_{depth} ~ N(\\\\mu_{depth}, \\\\sigma_{depth})$$\n \"\"\"\n\n def __init__(\n self,\n ssd: SSD,\n z_what_size: int = 64,\n z_what_hidden: int = 2,\n z_what_scale_const: Optional[float] = None,\n z_depth_scale_const: Optional[float] = None,\n z_present_eps: float = 1e-3,\n square_boxes: bool = False,\n train_what: bool = True,\n train_where: bool = True,\n train_present: bool = True,\n train_depth: bool = True,\n train_backbone: bool = True,\n train_backbone_layers: int = -1,\n clone_backbone: bool = False,\n reset_non_present: bool = True,\n background: bool = True,\n normalize_z_present: bool = False,\n ):\n super().__init__()\n self.ssd_backbone = ssd.backbone.requires_grad_(train_backbone)\n self.clone_backbone = clone_backbone\n self.reset = reset_non_present\n self.background = background\n if self.clone_backbone:\n self.ssd_backbone_cloned = deepcopy(self.ssd_backbone).requires_grad_(True)\n if train_backbone_layers >= 0 and train_backbone:\n for module in list(self.ssd_backbone.children())[train_backbone_layers:][\n ::-1\n ]:\n module.requires_grad_(False)\n self.z_present_eps = z_present_eps\n self.what_enc = WhatEncoder(\n z_what_size=z_what_size,\n n_hidden=z_what_hidden,\n z_what_scale_const=z_what_scale_const,\n feature_channels=ssd.backbone.out_channels,\n feature_maps=ssd.backbone.feature_maps,\n background=background,\n ).requires_grad_(train_what)\n self.where_enc = WhereEncoder(\n ssd_box_predictor=ssd.predictor,\n ssd_anchors=ssd.anchors,\n ssd_center_variance=ssd.center_variance,\n ssd_size_variance=ssd.size_variance,\n square_boxes=square_boxes,\n ).requires_grad_(train_where)\n self.present_enc = PresentEncoder(\n ssd_box_predictor=ssd.predictor,\n normalize_probas=normalize_z_present,\n ).requires_grad_(train_present)\n self.depth_enc = DepthEncoder(\n feature_channels=ssd.backbone.out_channels,\n z_depth_scale_const=z_depth_scale_const,\n ).requires_grad_(train_depth)\n\n self.register_buffer(\n \"indices\",\n self.latents_indices(\n feature_maps=ssd.backbone.feature_maps,\n boxes_per_loc=ssd.backbone.boxes_per_loc,\n ),\n )\n self.register_buffer(\"empty_loc\", torch.tensor(0.0, dtype=torch.float))\n self.register_buffer(\"empty_scale\", torch.tensor(1.0, dtype=torch.float))\n\n @staticmethod\n def latents_indices(\n feature_maps: List[int], boxes_per_loc: List[int]\n ) -> torch.Tensor:\n \"\"\"Get indices for reconstructing images.\n\n .. Caters for the difference between z_what, z_depth and z_where, z_present.\n \"\"\"\n indices = []\n idx = 0\n for feature_map, n_boxes in zip(feature_maps, boxes_per_loc):\n for feature_map_idx in range(feature_map ** 2):\n indices.append(\n torch.full(size=(n_boxes,), fill_value=idx, dtype=torch.float)\n )\n idx += 1\n return torch.cat(indices, dim=0)\n\n def pad_latents(\n self,\n latents: Tuple[\n Tuple[torch.Tensor, torch.Tensor],\n torch.Tensor,\n torch.Tensor,\n Tuple[torch.Tensor, torch.Tensor],\n ],\n ) -> Tuple[\n Tuple[torch.Tensor, torch.Tensor],\n torch.Tensor,\n torch.Tensor,\n Tuple[torch.Tensor, torch.Tensor],\n ]:\n \"\"\"Pad latents according to Encoder's settings.\"\"\"\n (\n (z_what_loc, z_what_scale),\n z_where,\n z_present,\n (z_depth_loc, z_depth_scale),\n ) = latents\n # repeat rows to match z_where and z_present\n indices = self.indices.long()\n what_indices = indices\n if self.background:\n what_indices = torch.hstack((indices, indices.max() + 1))\n z_what_loc = z_what_loc.index_select(dim=1, index=what_indices)\n z_what_scale = z_what_scale.index_select(dim=1, index=what_indices)\n z_depth_loc = z_depth_loc.index_select(dim=1, index=indices)\n z_depth_scale = z_depth_scale.index_select(dim=1, index=indices)\n return (\n (z_what_loc, z_what_scale),\n z_where,\n z_present,\n (z_depth_loc, z_depth_scale),\n )\n\n def reset_non_present(\n self,\n latents: Tuple[\n Tuple[torch.Tensor, torch.Tensor],\n torch.Tensor,\n torch.Tensor,\n Tuple[torch.Tensor, torch.Tensor],\n ],\n ) -> Tuple[\n Tuple[torch.Tensor, torch.Tensor],\n torch.Tensor,\n torch.Tensor,\n Tuple[torch.Tensor, torch.Tensor],\n ]:\n \"\"\"Reset latents, whose z_present is 0.\n\n .. note: this will set all \"non-present\" locs to 0. and scales to 1.\n \"\"\"\n (\n (z_what_loc, z_what_scale),\n z_where,\n z_present,\n (z_depth_loc, z_depth_scale),\n ) = latents\n present_mask = torch.gt(z_present, self.z_present_eps)\n what_present_mask = present_mask\n if self.background:\n what_present_mask = torch.hstack(\n (\n present_mask,\n present_mask.new_full((1,), fill_value=True).expand(\n present_mask.shape[0], 1, 1\n ),\n )\n )\n z_what_loc = torch.where(what_present_mask, z_what_loc, self.empty_loc)\n z_what_scale = torch.where(what_present_mask, z_what_scale, self.empty_scale)\n z_where = torch.where(present_mask, z_where, self.empty_loc)\n z_depth_loc = torch.where(present_mask, z_depth_loc, self.empty_loc)\n z_depth_scale = torch.where(present_mask, z_depth_scale, self.empty_scale)\n return (\n (z_what_loc, z_what_scale),\n z_where,\n z_present,\n (z_depth_loc, z_depth_scale),\n )\n\n def forward(\n self, images: torch.Tensor\n ) -> Tuple[\n Tuple[torch.Tensor, torch.Tensor],\n torch.Tensor,\n torch.Tensor,\n Tuple[torch.Tensor, torch.Tensor],\n ]:\n \"\"\"Takes images tensors (batch_size x channels x image_size x image_size)\n .. and outputs latent representation tuple\n .. (z_what (loc & scale), z_where, z_present, z_depth (loc & scale))\n \"\"\"\n where_present_features = self.ssd_backbone(images)\n if self.clone_backbone:\n what_depth_features = self.ssd_backbone_cloned(images)\n else:\n what_depth_features = where_present_features\n z_where = self.where_enc(where_present_features)\n z_present = self.present_enc(where_present_features)\n z_what_loc, z_what_scale = self.what_enc(what_depth_features)\n z_depth_loc, z_depth_scale = self.depth_enc(what_depth_features)\n latents = (\n (z_what_loc, z_what_scale),\n z_where,\n z_present,\n (z_depth_loc, z_depth_scale),\n )\n padded_latents = self.pad_latents(latents)\n if self.reset:\n padded_latents = self.reset_non_present(padded_latents)\n return padded_latents\n"
] | [
[
"torch.tensor",
"torch.full",
"torch.where",
"torch.cat",
"torch.gt"
]
] |
ZwEin27/digoie-annotation | [
"edf01770e26a78267045bba33a54aef3376fa63f"
] | [
"digoie/core/ml/classifier/mla/decision_tree.py"
] | [
"\nfrom sklearn import tree\n\n\nfrom digoie.core.ml.classifier.mla.base import MLAlgorithm\n\n\nclass MLDecisionTree(MLAlgorithm):\n\n # ML_NAME = DECISION_TREE\n\n def __init__(self, training_dataset, training_label):\n super(MLDecisionTree, self).__init__(training_dataset, training_label)\n\n def generate(self):\n classifier = tree.DecisionTreeClassifier()\n super(MLDecisionTree, self).generate(classifier)\n # print 'model for (' + self.ML_NAME + ') has been generated'\n # save_model(self.ML_NAME, classifier)\n return classifier\n\n \n\n\n\n\n\n\n"
] | [
[
"sklearn.tree.DecisionTreeClassifier"
]
] |
sujitmandal/Sales-Forecasting | [
"9052a30f2e3a386eafe36de3e5f1b1205bc1fdf1"
] | [
"LoadModel.py"
] | [
"#Import required libraries\nimport os\nimport pandas as pd\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\n\n#Github: https://github.com/sujitmandal\n#This programe is create by Sujit Mandal\n\"\"\"\nGithub: https://github.com/sujitmandal\nPypi : https://pypi.org/user/sujitmandal/\nLinkedIn : https://www.linkedin.com/in/sujit-mandal-91215013a/\n\"\"\"\n\nunknownData = pd.read_csv('dataset/unknownData.csv',delimiter=';',index_col='date')\nunknownDates = unknownData.index\n\nnew_model = keras.models.load_model('saved model/vscode/my_model.h5')\nnew_model.summary()\n\nunknownDatapredictions = new_model.predict(unknownData).flatten()\nprint('\\n')\nprint('Unknown Data Predictions :')\nprint(unknownDatapredictions)\n\nplt.title('Predicted Loans for unknown Data ')\nplt.plot(unknownDates, unknownDatapredictions)\nplt.xlabel('Date')\nplt.ylabel('loans')\nplt.legend(['Predicted'], loc='upper right')\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"tensorflow.keras.models.load_model",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
dangilman/LenstronomyWrapper | [
"7c3bb68ab1f982432cd16d570854df50466491e9"
] | [
"lenstronomywrapper/LensData/lensed_quasar.py"
] | [
"import numpy as np\nfrom lenstronomy.PointSource.point_source import PointSource\nimport itertools\nfrom copy import deepcopy\n\nclass LensedQuasar(object):\n\n def __init__(self, x_image, y_image, mag, t_arrival=None):\n\n \"\"\"\n Data class for a quadruply-imaged quasar\n\n :param x_image: x image positions\n :param y_image: y image positions\n :param mag: image magnifications or flux ratios\n :param t_arrvival: arrival times (optional)\n \"\"\"\n self.x, self.y = x_image, y_image\n self.m = mag\n self._nimg = len(x_image)\n self.t_arrival = t_arrival\n\n if t_arrival is not None:\n self.relative_arrival_times = self.t_arrival[1:] - self.t_arrival[0]\n\n if self._nimg == 4:\n pass\n else:\n raise Exception(str(self._nimg)+' lenses not yet incorporated.')\n\n point_amp = mag * 200 # multiply by intrinsic quasar brightness (in counts/s)\n\n self.kwargs_ps = [{'ra_image': self.x, 'dec_image': self.y,\n 'point_amp': point_amp}] # quasar point source position in the source plane and intrinsic brightness\n self.point_source_list = ['LENSED_POSITION']\n\n self.point_source_class = PointSource(point_source_type_list=self.point_source_list,\n fixed_magnification_list=[False])\n\n def update_kwargs_ps(self, new_kwargs):\n\n self.kwargs_ps = new_kwargs\n\n def flux_ratios(self, index=0):\n\n ref_flux = self.m[index]\n ratios = []\n for i, mi in enumerate(self.m):\n if i == index:\n continue\n ratios.append(mi / ref_flux)\n\n return np.array(ratios)\n\n def sort_by_pos(self, x, y):\n\n x_self = np.array(list(itertools.permutations(self.x)))\n y_self = np.array(list(itertools.permutations(self.y)))\n\n indexes = [0, 1, 2, 3]\n index_iterations = list(itertools.permutations(indexes))\n delta_r = []\n\n for i in range(0, int(len(x_self))):\n dr = 0\n for j in range(0, int(len(x_self[0]))):\n dr += (x_self[i][j] - x[j]) ** 2 + (y_self[i][j] - y[j]) ** 2\n\n delta_r.append(dr ** .5)\n\n min_indexes = np.array(index_iterations[np.argmin(delta_r)])\n self.x = self.x[min_indexes]\n self.y = self.y[min_indexes]\n self.m = self.m[min_indexes]\n\n if self.t_arrival is not None:\n self.t_arrival = self.t_arrival[min_indexes]\n self.relative_arrival_times = self.t_arrival[1:] - self.t_arrival[0]\n\n @property\n def prior(self):\n return []\n\n @property\n def fixed_models(self):\n return [{}]\n\n @property\n def param_init(self):\n return self.kwargs_ps\n\n @property\n def param_sigma(self):\n sigma = [{'ra_image': [0.005] * self._nimg, 'dec_image': [0.005] * self._nimg}]\n return sigma\n\n @property\n def param_lower(self):\n lower = [{'ra_image': -10 * np.ones_like(self.x), 'dec_image': -10 * np.ones_like(self.y)}]\n return lower\n\n @property\n def param_upper(self):\n lower = [{'ra_image': 10 * np.ones_like(self.x), 'dec_image': 10 * np.ones_like(self.y)}]\n return lower\n"
] | [
[
"numpy.array",
"numpy.argmin",
"numpy.ones_like"
]
] |
fangchao111/tensorpack | [
"87059de51331e540ca35eb295953ee16e3f59cee"
] | [
"examples/basics/cifar-convnet.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: cifar-convnet.py\n# Author: Yuxin Wu\nimport tensorflow as tf\nimport argparse\nimport os\n\nfrom tensorpack import *\nfrom tensorpack.tfutils.summary import *\nfrom tensorpack.dataflow import dataset\nfrom tensorpack.utils.gpu import get_num_gpu\n\n\"\"\"\nA small convnet model for Cifar10 or Cifar100 dataset.\n\nCifar10 trained on 1 GPU:\n 91% accuracy after 50k iterations.\n 79 itr/s on P100\n\nNot a good model for Cifar100, just for demonstration.\n\"\"\"\n\n\nclass Model(ModelDesc):\n def __init__(self, cifar_classnum):\n super(Model, self).__init__()\n self.cifar_classnum = cifar_classnum\n\n def inputs(self):\n return [tf.placeholder(tf.float32, (None, 30, 30, 3), 'input'),\n tf.placeholder(tf.int32, (None,), 'label')]\n\n def build_graph(self, image, label):\n is_training = get_current_tower_context().is_training\n keep_prob = tf.constant(0.5 if is_training else 1.0)\n\n if is_training:\n tf.summary.image(\"train_image\", image, 10)\n if tf.test.is_gpu_available():\n image = tf.transpose(image, [0, 3, 1, 2])\n data_format = 'channels_first'\n else:\n data_format = 'channels_last'\n\n image = image / 4.0 # just to make range smaller\n with argscope(Conv2D, activation=BNReLU, use_bias=False, kernel_size=3), \\\n argscope([Conv2D, MaxPooling, BatchNorm], data_format=data_format):\n logits = LinearWrap(image) \\\n .Conv2D('conv1.1', filters=64) \\\n .Conv2D('conv1.2', filters=64) \\\n .MaxPooling('pool1', 3, stride=2, padding='SAME') \\\n .Conv2D('conv2.1', filters=128) \\\n .Conv2D('conv2.2', filters=128) \\\n .MaxPooling('pool2', 3, stride=2, padding='SAME') \\\n .Conv2D('conv3.1', filters=128, padding='VALID') \\\n .Conv2D('conv3.2', filters=128, padding='VALID') \\\n .FullyConnected('fc0', 1024 + 512, activation=tf.nn.relu) \\\n .tf.nn.dropout(keep_prob) \\\n .FullyConnected('fc1', 512, activation=tf.nn.relu) \\\n .FullyConnected('linear', out_dim=self.cifar_classnum)()\n\n cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)\n cost = tf.reduce_mean(cost, name='cross_entropy_loss')\n\n correct = tf.to_float(tf.nn.in_top_k(logits, label, 1), name='correct')\n # monitor training error\n add_moving_summary(tf.reduce_mean(correct, name='accuracy'))\n\n # weight decay on all W of fc layers\n wd_cost = regularize_cost('fc.*/W', l2_regularizer(4e-4), name='regularize_loss')\n add_moving_summary(cost, wd_cost)\n\n add_param_summary(('.*/W', ['histogram'])) # monitor W\n return tf.add_n([cost, wd_cost], name='cost')\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=1e-2, trainable=False)\n tf.summary.scalar('lr', lr)\n return tf.train.AdamOptimizer(lr, epsilon=1e-3)\n\n\ndef get_data(train_or_test, cifar_classnum):\n isTrain = train_or_test == 'train'\n if cifar_classnum == 10:\n ds = dataset.Cifar10(train_or_test)\n else:\n ds = dataset.Cifar100(train_or_test)\n if isTrain:\n augmentors = [\n imgaug.RandomCrop((30, 30)),\n imgaug.Flip(horiz=True),\n imgaug.Brightness(63),\n imgaug.Contrast((0.2, 1.8)),\n imgaug.MeanVarianceNormalize(all_channel=True)\n ]\n else:\n augmentors = [\n imgaug.CenterCrop((30, 30)),\n imgaug.MeanVarianceNormalize(all_channel=True)\n ]\n ds = AugmentImageComponent(ds, augmentors)\n ds = BatchData(ds, 128, remainder=not isTrain)\n if isTrain:\n ds = PrefetchDataZMQ(ds, 5)\n return ds\n\n\ndef get_config(cifar_classnum):\n # prepare dataset\n dataset_train = get_data('train', cifar_classnum)\n dataset_test = get_data('test', cifar_classnum)\n\n def lr_func(lr):\n if lr < 3e-5:\n raise StopTraining()\n return lr * 0.31\n return TrainConfig(\n model=Model(cifar_classnum),\n dataflow=dataset_train,\n callbacks=[\n ModelSaver(),\n InferenceRunner(dataset_test,\n ScalarStats(['accuracy', 'cost'])),\n StatMonitorParamSetter('learning_rate', 'validation_accuracy', lr_func,\n threshold=0.001, last_k=10, reverse=True),\n ],\n max_epoch=150,\n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.', required=True)\n parser.add_argument('--load', help='load model')\n parser.add_argument('--classnum', help='10 for cifar10 or 100 for cifar100',\n type=int, default=10)\n args = parser.parse_args()\n\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n with tf.Graph().as_default():\n logger.set_logger_dir(os.path.join('train_log', 'cifar' + str(args.classnum)))\n config = get_config(args.classnum)\n if args.load:\n config.session_init = SaverRestore(args.load)\n\n num_gpu = get_num_gpu()\n trainer = QueueInputTrainer() if num_gpu <= 1 \\\n else SyncMultiGPUTrainerParameterServer(num_gpu)\n launch_train_with_config(config, trainer)\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.placeholder",
"tensorflow.add_n",
"tensorflow.summary.image",
"tensorflow.nn.in_top_k",
"tensorflow.train.AdamOptimizer",
"tensorflow.reduce_mean",
"tensorflow.Graph",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.get_variable",
"tensorflow.test.is_gpu_available"
]
] |
zamlz/dlcampjeju2018-I2A-cube | [
"85ae7a2084ca490ea685ff3d30e82720fb58c0ea"
] | [
"misc/cube_gif_test.py"
] | [
"\nimport gym\nimport sys\nimport cube_gym\nimport time\nfrom common.multiprocessing_env import SubprocVecEnv\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nfrom a2c import ActorCritic\nfrom policy import *\n\ndef env_fn():\n env = gym.make('cube-x3-v0')\n env.unwrapped._refreshScrambleParameters(1, 2, scramble_easy=True)\n return env\n\nactions = env_fn().unwrapped.action_list\n\nenvs = SubprocVecEnv([env_fn])\n\nobs = envs.reset()\nenvs.render(0)\n\naction_list = []\n\nfig = plt.figure()\nims = []\n\nim = plt.imshow(cube_gym.onehotToRGB(obs[0]))\nims.append([im])\n\nwith tf.Session() as sess:\n\n actor_critic = ActorCritic(sess, CnnPolicy,\n envs.observation_space.shape, envs.action_space, 1, 5,\n 0.5, 0.01, 0.5, 7e-4, 0.99, 1e-5, False)\n actor_critic.load(sys.argv[1])\n\n # sess.run(tf.global_variables_initializer())\n\n d = False\n while not d:\n print('-------------------------------------------------')\n print('Current Observation')\n envs.render(0)\n time.sleep(0.1)\n\n a, v, neg = actor_critic.act(obs, stochastic=True)\n print('')\n print('action: ', actions[a[0]])\n print('value: ', v)\n print('neglogp: ', neg)\n print('pd: ') \n for ac, pd in zip(actions, actor_critic.step_model.logits(obs)[0][0]):\n print('\\t', ac, pd)\n\n obs, r, d, sbo = envs.step(a)\n print('r: ', r)\n envs.render(0)\n time.sleep(0.1)\n\n if not d:\n im = plt.imshow(cube_gym.onehotToRGB(obs[0]))\n ims.append([im])\n else:\n print('DONE')\n im = plt.imshow(cube_gym.onehotToRGB(sbo[0]))\n ims.append([im])\n\n d = d[0]\n print(r)\n\n ani = animation.ArtistAnimation(fig, ims, interval=2000, blit=True, repeat_delay=4000)\n\n # plt.show()\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.animation.ArtistAnimation",
"tensorflow.Session"
]
] |
sanzgiri/covid19_india | [
"3a530fb107446bdba54dc6581c45f15f590b0a1e"
] | [
"covid19_model.py"
] | [
"\nimport pandas as pd\nimport numpy as np\nfrom fbprophet import Prophet\nimport pickle\nimport math\nimport scipy.optimize as optim\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\n \nimport covid19_prepare_data as prepare_data\n\nimport logging\nlogging.getLogger('fbprophet').setLevel(logging.WARNING)\n\n\ndef fetch_data():\n prepare_data.build_covid19_data()\n\n\n# Define function with the coefficients to estimate\ndef func_logistic(t, a, b, c):\n return c / (1 + a * np.exp(-b*t))\n\n\n# Hill sigmoidal function\ndef func_hill(t, a, b, c):\n return a * np.power(t, b) / (np.power(c, b) + np.power(t, b)) \n\n\ndef detect_growth(input_file, output_file, backtesting):\n countries_processed = 0\n countries_stabilized = 0\n countries_increasing = 0\n \n countries_list = []\n \n df = pd.read_csv(input_file, parse_dates=True)\n columns = df.columns.values\n for column in columns:\n if column.endswith('_cases'):\n data = pd.DataFrame(df[column].values)\n \n data = data.reset_index(drop=False)\n data.columns = ['Timestep', 'Total Cases']\n \n # Randomly initialize the coefficients\n p0 = np.random.exponential(size=3)\n\n # Set min bound 0 on all coefficients, and set different max bounds for each coefficient\n bounds = (0, [100000., 1000., 1000000000.])\n\n # Convert pd.Series to np.Array and use Scipy's curve fit to find the best Nonlinear Least Squares coefficients\n x = np.array(data['Timestep']) + 1\n y = np.array(data['Total Cases'])\n \n try:\n (a,b,c),cov = optim.curve_fit(func_logistic, x, y, bounds=bounds, p0=p0, maxfev=100000)\n \n # The time step at which the growth is fastest\n t_fastest = np.log(a) / b\n i_fastest = func_logistic(t_fastest, a, b, c)\n \n res_df = df[['Report_Date', column]].copy()\n res_df['fastest_grow_day'] = t_fastest\n res_df['fastest_grow_value'] = i_fastest\n res_df['growth_stabilized'] = t_fastest <= x[-1]\n res_df['timestep'] = x\n res_df['res_func_logistic'] = func_logistic(x, a, b, c)\n \n if t_fastest <= x[-1]:\n print('Growth stabilized:', column, '| Fastest grow day:', t_fastest, '| Infections:', i_fastest)\n res_df['cap'] = func_logistic(x[-1] + 10, a, b, c)\n countries_stabilized += 1\n else:\n print('Growth increasing:', column, '| Fastest grow day:', t_fastest, '| Infections:', i_fastest)\n res_df['cap'] = func_logistic(i_fastest + 10, a, b, c)\n countries_increasing += 1\n \n countries_processed += 1\n countries_list.append(column)\n \n res_df.to_csv(output_file + column + '.csv')\n except RuntimeError:\n print('No fit found for: ', column)\n \n if backtesting == False:\n d = {'countries_processed': [countries_processed], 'countries_stabilized': [countries_stabilized], 'countries_increasing': [countries_increasing]}\n df_c = pd.DataFrame(data=d)\n df_c.to_csv('data/covid19_stats_countries.csv')\n\n df_countries = pd.DataFrame(countries_list)\n df_countries.to_csv('data/covid19_countries_list.csv')\n\n# detect_growth('data/covid19_data.csv', 'data/covid19_processed_data_', False)\n# detect_growth('data/covid19_data_backtesting.csv', 'data/covid19_processed_backtesting_data_', True)\n\n\n# In[10]:\n\n\ndef construct_hill_growth(input_file, country, backtesting):\n df = pd.read_csv(input_file, parse_dates=True)\n columns = df.columns.values\n for column in columns:\n if column == country:\n data = pd.DataFrame(df[column].values)\n \n data = data.reset_index(drop=False)\n data.columns = ['Timestep', 'Total Cases']\n \n # Randomly initialize the coefficients\n p0 = np.random.exponential(size=3)\n\n # Set min bound 0 on all coefficients, and set different max bounds for each coefficient\n bounds = (0, [1000000., 100., 1000.])\n\n # Convert pd.Series to np.Array and use Scipy's curve fit to find the best Nonlinear Least Squares coefficients\n x = np.array(data['Timestep']) + 1\n y = np.array(data['Total Cases'])\n \n try:\n (a,b,c),cov = optim.curve_fit(func_hill, x, y, bounds=bounds, p0=p0, maxfev=1000)\n horizon = 21\n if backtesting == True:\n horizon = 26\n for day in range(x[-1] + 1, x[-1] + horizon):\n x = np.append(x, day)\n \n res_df = df[['Report_Date']].copy()\n future_range = pd.date_range(df['Report_Date'].iloc[-1], periods=horizon, freq='D')\n future_columns = {'Report_Date': future_range.strftime('%Y-%m-%d')}\n future_df = pd.DataFrame(future_columns)\n future_df = future_df.iloc[1:]\n res_df = res_df.append(future_df)\n\n res_df['y_hill'] = func_hill(x, a, b, c)\n res_df.columns = ['ds', 'y_hill']\n if backtesting == True:\n res_df.columns = ['ds', 'y_hill_b1']\n \n return res_df\n except RuntimeError:\n print('No fit found for: ', column)\n return None\n\n# construct_hill_growth('data/covid19_data.csv', 'Lithuania_cases')\n\n\n# In[16]:\n\n\ndef build_model(country):\n try:\n df = pd.read_csv('data/covid19_processed_data_' + country + '.csv', parse_dates=True)\n forecast_b1 = None\n try:\n df_b1 = pd.read_csv('data/covid19_processed_backtesting_data_' + country + '.csv', parse_dates=True)\n df_b1 = df_b1[['Report_Date', country, 'cap']].dropna()\n df_b1.columns = ['ds', 'y', 'cap']\n m_b1 = Prophet(growth=\"logistic\")\n m_b1.fit(df_b1)\n future_b1 = m_b1.make_future_dataframe(periods=25)\n future_b1['cap'] = df_b1['cap'].iloc[0]\n forecast_b1 = m_b1.predict(future_b1)\n forecast_b1 = forecast_b1[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].dropna()\n forecast_b1.columns = ['ds', 'yhat_b1', 'yhat_b1_lower', 'yhat_b1_upper']\n except FileNotFoundError:\n print('Skipping backtesing:', country)\n \n df_ = df.copy()\n df = df[['Report_Date', country, 'cap']].dropna()\n df.columns = ['ds', 'y', 'cap']\n\n m = Prophet(growth=\"logistic\")\n m.fit(df)\n\n future = m.make_future_dataframe(periods=20)\n future['cap'] = df['cap'].iloc[0]\n forecast = m.predict(future)\n\n res_df = forecast.set_index('ds')[['yhat', 'yhat_lower', 'yhat_upper']].join(df.set_index('ds').y).reset_index()\n\n res_hill = construct_hill_growth('data/covid19_data.csv', country, False)\n if res_hill is not None:\n res_df = res_df.set_index('ds')[['yhat', 'yhat_lower', 'yhat_upper', 'y']].join(res_hill.set_index('ds')[['y_hill']]).reset_index()\n res_hill_b1 = construct_hill_growth('data/covid19_data_backtesting.csv', country, True)\n if res_hill_b1 is not None:\n res_df = res_df.set_index('ds')[['yhat', 'yhat_lower', 'yhat_upper', 'y', 'y_hill']].join(res_hill_b1.set_index('ds')[['y_hill_b1']]).reset_index()\n\n if forecast_b1 is not None:\n res_df = res_df.set_index('ds')[['yhat', 'yhat_lower', 'yhat_upper', 'y', 'y_hill', 'y_hill_b1']].join(forecast_b1.set_index('ds')[['yhat_b1', 'yhat_b1_lower', 'yhat_b1_upper']]).reset_index()\n \n res_df['current_date'] = df['ds'].iloc[-1]\n res_df['fastest_growth_day'] = df_['fastest_grow_day'].iloc[-1]\n res_df['growth_stabilized'] = df_['growth_stabilized'].iloc[-1]\n res_df['current_day'] = df_['timestep'].iloc[-1]\n res_df['cap'] = df['cap'].iloc[0]\n\n res_df.to_csv('data/covid19_forecast_data_' + country + '.csv')\n\n print('Processed:', country)\n except FileNotFoundError:\n print('Skipping:', country)\n \n# fig1 = m.plot(forecast)\n# fig1.set_size_inches(18.5, 8.5)\n# datenow = datetime(2020, 4, 5)\n# dateend = datenow + timedelta(days=20)\n# datestart = dateend - timedelta(days=71)\n# plt.xlim([datestart, dateend])\n# plt.title(\"COVID19 forecast: \" + country, fontsize=20)\n# plt.xlabel(\"Day\", fontsize=20)\n# plt.ylabel(\"Infections\", fontsize=20)\n# plt.axvline(datenow, color=\"k\", linestyle=\":\")\n# plt.show()\n \n# print(res_df[['ds', 'y', 'yhat', 'yhat_lower', 'yhat_upper', 'current_date', 'fastest_growth_day', 'growth_stabilized', 'current_day']].tail(30))\n \n# build_model('Lithuania_cases')\n\n\n# In[17]:\n\n\ndef calculate_forecast():\n df = pd.read_csv('data/covid19_data.csv', parse_dates=True)\n columns = df.columns.values\n for column in columns:\n if column.endswith('_cases'):\n build_model(column)\n print('Forecast calculation completed')\n \n# calculate_forecast()\n\n"
] | [
[
"pandas.date_range",
"scipy.optimize.curve_fit",
"numpy.append",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.exp",
"numpy.power",
"numpy.log",
"numpy.random.exponential",
"numpy.array"
]
] |
makart19/daal4py | [
"8264fe81c772478c5530f38077a129f027f3677d"
] | [
"daal4py/sklearn/decomposition/_pca_0_23.py"
] | [
"#\n#*******************************************************************************\n# Copyright 2014-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#******************************************************************************/\n\nimport numpy as np\nimport numbers\n\nfrom sklearn import decomposition\nfrom sklearn.utils import check_array\n\nfrom sklearn.decomposition._pca import PCA as PCA_original\nfrom sklearn.decomposition._pca import (_infer_dimension, svd_flip)\n\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils.extmath import stable_cumsum\nfrom scipy.sparse import issparse\n\nimport daal4py\nfrom .._utils import getFPType, method_uses_sklearn, method_uses_daal\nimport logging\n\n\ndef _daal4py_svd(X):\n X = check_array(X, dtype=[np.float64, np.float32])\n X_fptype = getFPType(X)\n alg = daal4py.svd(\n fptype=X_fptype,\n method='defaultDense',\n leftSingularMatrix='requiredInPackedForm',\n rightSingularMatrix='requiredInPackedForm'\n )\n res = alg.compute(X)\n s = res.singularValues\n U = res.leftSingularMatrix\n V = res.rightSingularMatrix\n return U, np.ravel(s), V\n\n\ndef _validate_n_components(n_components, n_samples, n_features):\n if n_components == 'mle':\n if n_samples < n_features:\n raise ValueError(\"n_components='mle' is only supported \"\n \"if n_samples >= n_features\")\n elif not 0 <= n_components <= min(n_samples, n_features):\n raise ValueError(\"n_components=%r must be between 0 and \"\n \"min(n_samples, n_features)=%r with \"\n \"svd_solver='full'\"\n % (n_components, min(n_samples, n_features)))\n elif n_components >= 1:\n if not isinstance(n_components, (numbers.Integral, np.integer)):\n raise ValueError(\"n_components=%r must be of type int \"\n \"when greater than or equal to 1, \"\n \"was of type=%r\"\n % (n_components, type(n_components)))\n\n\ndef _process_n_components_None(self_n_components, self_svd_solver, X_shape):\n # Handle n_components==None\n if self_n_components is None:\n if self_svd_solver != 'arpack':\n n_components = min(X_shape)\n else:\n n_components = min(X_shape) - 1\n else:\n n_components = self_n_components\n\n return n_components\n\n\ndef _n_components_from_fraction(explained_variance_ratio, frac):\n # number of components for which the cumulated explained\n # variance percentage is superior to the desired threshold\n # side='right' ensures that number of features selected\n # their variance is always greater than n_components float\n # passed. More discussion in issue: #15669\n ratio_cumsum = stable_cumsum(explained_variance_ratio)\n n_components = np.searchsorted(ratio_cumsum, frac,\n side='right') + 1\n return n_components\n \n\ndef _fit_full(self, X, n_components):\n \"\"\"Fit the model by computing full SVD on X\"\"\"\n n_samples, n_features = X.shape\n\n _validate_n_components(n_components, n_samples, n_features)\n\n # Center data\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n\n if X.shape[0] > X.shape[1] and (X.dtype == np.float64 or X.dtype == np.float32):\n U, S, V = _daal4py_svd(X)\n else:\n U, S, V = np.linalg.svd(X, full_matrices=False)\n # flip eigenvectors' sign to enforce deterministic output\n U, V = svd_flip(U, V)\n\n components_ = V\n\n # Get variance explained by singular values\n explained_variance_ = (S ** 2) / (n_samples - 1)\n total_var = explained_variance_.sum()\n explained_variance_ratio_ = explained_variance_ / total_var\n\n # Postprocess the number of components required\n if n_components == 'mle':\n n_components = \\\n _infer_dimension(explained_variance_, n_samples)\n elif 0 < n_components < 1.0:\n n_components = _n_components_from_fraction(\n explained_variance_ratio_, n_components)\n\n # Compute noise covariance using Probabilistic PCA model\n # The sigma2 maximum likelihood (cf. eq. 12.46)\n if n_components < min(n_features, n_samples):\n self.noise_variance_ = explained_variance_[n_components:].mean()\n else:\n self.noise_variance_ = 0.\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = components_[:n_components]\n self.n_components_ = n_components\n self.explained_variance_ = explained_variance_[:n_components]\n self.explained_variance_ratio_ = \\\n explained_variance_ratio_[:n_components]\n self.singular_values_ = S[:n_components]\n\n return U, S, V\n\n\n_fit_full_copy = _fit_full\n\nclass PCA_prev(PCA_original):\n __doc__ = PCA_original.__doc__\n\n def __init__(self, n_components=None, copy=True, whiten=False,\n svd_solver='auto', tol=0.0, iterated_power='auto',\n random_state=None):\n self.n_components = n_components\n self.copy = copy\n self.whiten = whiten\n self.svd_solver = svd_solver\n self.tol = tol\n self.iterated_power = iterated_power\n self.random_state = random_state\n\n def _fit_full(self, X, n_components):\n return _fit_full_copy(self, X, n_components)\n\n\nclass PCA(PCA_original):\n def __init__(self, n_components=None, copy=True, whiten=False,\n svd_solver='auto', tol=0.0, iterated_power='auto',\n random_state=None):\n self.n_components = n_components\n self.copy = copy\n self.whiten = whiten\n self.svd_solver = svd_solver\n self.tol = tol\n self.iterated_power = iterated_power\n self.random_state = random_state\n\n\n def _fit_daal4py(self, X, n_components):\n n_samples, n_features = X.shape\n n_sf_min = min(n_samples, n_features)\n\n _validate_n_components(n_components, n_samples, n_features)\n\n if n_components == 'mle':\n daal_n_components = n_features\n elif n_components < 1:\n daal_n_components = n_sf_min\n else:\n daal_n_components = n_components\n\n fpType = getFPType(X)\n centering_algo = daal4py.normalization_zscore(\n fptype=fpType, doScale=False)\n pca_alg = daal4py.pca(\n fptype=fpType,\n method='svdDense',\n normalization=centering_algo,\n resultsToCompute='mean|variance|eigenvalue',\n isDeterministic=True,\n nComponents=daal_n_components\n )\n pca_res = pca_alg.compute(X)\n\n self.mean_ = pca_res.means.ravel()\n variances_ = pca_res.variances.ravel()\n components_ = pca_res.eigenvectors\n explained_variance_ = pca_res.eigenvalues.ravel()\n tot_var = explained_variance_.sum()\n explained_variance_ratio_ = explained_variance_ / tot_var\n\n if n_components == 'mle':\n n_components = \\\n _infer_dimension(explained_variance_, n_samples)\n elif 0 < n_components < 1.0:\n n_components = _n_components_from_fraction(\n explained_variance_ratio_, n_components)\n\n # Compute noise covariance using Probabilistic PCA model\n # The sigma2 maximum likelihood (cf. eq. 12.46)\n if n_components < n_sf_min:\n if explained_variance_.shape[0] == n_sf_min:\n self.noise_variance_ = explained_variance_[n_components:].mean()\n else:\n resid_var_ = variances_.sum()\n resid_var_ -= explained_variance_[:n_components].sum()\n self.noise_variance_ = resid_var_ / (n_sf_min - n_components)\n else:\n self.noise_variance_ = 0.\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = components_[:n_components]\n self.n_components_ = n_components\n self.explained_variance_ = explained_variance_[:n_components]\n self.explained_variance_ratio_ = \\\n explained_variance_ratio_[:n_components]\n self.singular_values_ = np.sqrt((n_samples - 1) * self.explained_variance_)\n\n\n def _transform_daal4py(self, X, whiten=False, scale_eigenvalues=True, check_X=True):\n check_is_fitted(self)\n\n X = check_array(X, dtype=[np.float64, np.float32], force_all_finite=check_X)\n fpType = getFPType(X)\n\n tr_data = dict()\n if self.mean_ is not None:\n tr_data['mean'] = self.mean_.reshape((1, -1))\n if whiten:\n if scale_eigenvalues:\n tr_data['eigenvalue'] = (self.n_samples_ - 1) * self.explained_variance_.reshape((1, -1))\n else:\n tr_data['eigenvalue'] = self.explained_variance_.reshape((1, -1))\n elif scale_eigenvalues:\n tr_data['eigenvalue'] = np.full(\n (1, self.explained_variance_.shape[0]),\n self.n_samples_ - 1.0, dtype=X.dtype)\n\n if X.shape[1] != self.n_features_:\n raise ValueError(\"The number of features of the input data, {}, is not \"\n \"equal to the number of features of the training data, {}\".format(\n X.shape[1], self.n_features_))\n tr_res = daal4py.pca_transform(\n fptype=fpType\n ).compute(X, self.components_, tr_data)\n\n return tr_res.transformedData\n\n\n def _fit_full_daal4py(self, X, n_components):\n n_samples, n_features = X.shape\n\n # due to need to flip components, need to do full decomposition\n self._fit_daal4py(X, min(n_samples, n_features))\n U = self._transform_daal4py(X, whiten=True, check_X=False, scale_eigenvalues=True)\n V = self.components_\n U, V = svd_flip(U, V)\n U = U.copy()\n V = V.copy()\n S = self.singular_values_.copy()\n\n if n_components == 'mle':\n n_components = \\\n _infer_dimension(self.explained_variance_, n_samples)\n elif 0 < n_components < 1.0:\n n_components = _n_components_from_fraction(\n self.explained_variance_ratio_, n_components)\n\n # Compute noise covariance using Probabilistic PCA model\n # The sigma2 maximum likelihood (cf. eq. 12.46)\n if n_components < min(n_features, n_samples):\n self.noise_variance_ = self.explained_variance_[n_components:].mean()\n else:\n self.noise_variance_ = 0.\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = self.components_[:n_components]\n self.n_components_ = n_components\n self.explained_variance_ = self.explained_variance_[:n_components]\n self.explained_variance_ratio_ = \\\n self.explained_variance_ratio_[:n_components]\n self.singular_values_ = self.singular_values_[:n_components]\n\n return U, S, V\n\n\n def _fit_full_vanilla(self, X, n_components):\n \"\"\"Fit the model by computing full SVD on X\"\"\"\n n_samples, n_features = X.shape\n\n # Center data\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n\n U, S, V = np.linalg.svd(X, full_matrices=False)\n # flip eigenvectors' sign to enforce deterministic output\n U, V = svd_flip(U, V)\n\n components_ = V\n\n # Get variance explained by singular values\n explained_variance_ = (S ** 2) / (n_samples - 1)\n total_var = explained_variance_.sum()\n explained_variance_ratio_ = explained_variance_ / total_var\n\n # Postprocess the number of components required\n if n_components == 'mle':\n n_components = \\\n _infer_dimension(explained_variance_, n_samples)\n elif 0 < n_components < 1.0:\n n_components = _n_components_from_fraction(\n explained_variance_ratio_, n_components)\n\n # Compute noise covariance using Probabilistic PCA model\n # The sigma2 maximum likelihood (cf. eq. 12.46)\n if n_components < min(n_features, n_samples):\n self.noise_variance_ = explained_variance_[n_components:].mean()\n else:\n self.noise_variance_ = 0.\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = components_[:n_components]\n self.n_components_ = n_components\n self.explained_variance_ = explained_variance_[:n_components]\n self.explained_variance_ratio_ = \\\n explained_variance_ratio_[:n_components]\n self.singular_values_ = S[:n_components]\n\n return U, S, V\n\n\n def _fit_full(self, X, n_components):\n n_samples, n_features = X.shape\n\n _validate_n_components(n_components, n_samples, n_features)\n\n if n_samples > n_features and (X.dtype == np.float64 or X.dtype == np.float32):\n logging.info(\"sklearn.decomposition.PCA.fit: \" + method_uses_daal)\n return self._fit_full_daal4py(X, n_components)\n logging.info(\"sklearn.decomposition.PCA.fit: \" + method_uses_sklearn)\n return self._fit_full_vanilla(X, n_components)\n\n\n def _fit(self, X):\n \"\"\"Dispatch to the right submethod depending on the chosen solver.\"\"\"\n\n # Raise an error for sparse input.\n # This is more informative than the generic one raised by check_array.\n if issparse(X):\n raise TypeError('PCA does not support sparse input. See '\n 'TruncatedSVD for a possible alternative.')\n\n X = check_array(X, dtype=[np.float64, np.float32], ensure_2d=True,\n copy=self.copy)\n\n # Handle n_components==None\n n_components = _process_n_components_None(\n self.n_components, self.svd_solver, X.shape)\n\n # Handle svd_solver\n self._fit_svd_solver = self.svd_solver\n if self._fit_svd_solver == 'auto':\n # Small problem or n_components == 'mle', just call full PCA\n if max(X.shape) <= 500 or n_components == 'mle':\n self._fit_svd_solver = 'full'\n elif n_components >= 1 and n_components < .8 * min(X.shape):\n self._fit_svd_solver = 'randomized'\n # This is also the case of n_components in (0,1)\n else:\n self._fit_svd_solver = 'full'\n\n # Call different fits for either full or truncated SVD\n if self._fit_svd_solver == 'full':\n return self._fit_full(X, n_components)\n elif self._fit_svd_solver in ['arpack', 'randomized']:\n logging.info(\"sklearn.decomposition.PCA.fit: \" + method_uses_sklearn)\n return self._fit_truncated(X, n_components, self._fit_svd_solver)\n elif self._fit_svd_solver == 'daal':\n if X.shape[0] < X.shape[1]:\n raise ValueError(\"svd_solver='daal' is applicable for tall and skinny inputs only.\")\n logging.info(\"sklearn.decomposition.PCA.fit: \" + method_uses_daal)\n return self._fit_daal4py(X, n_components)\n else:\n raise ValueError(\"Unrecognized svd_solver='{0}'\"\n \"\".format(self._fit_svd_solver))\n\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n if (self.svd_solver == 'daal' and isinstance(X, np.ndarray) and\n X.shape[0] >= X.shape[1]):\n # Handle n_components==None\n n_components = _process_n_components_None(\n self.n_components, self.svd_solver, X.shape)\n logging.info(\"sklearn.decomposition.PCA.fit: \" + method_uses_daal)\n self._fit_daal4py(X, n_components)\n logging.info(\"sklearn.decomposition.PCA.transform: \" + method_uses_daal)\n if self.n_components_ > 0:\n return self._transform_daal4py(X, whiten=self.whiten, check_X=False)\n return np.empty((self.n_samples_, 0), dtype=X.dtype)\n U, S, V = self._fit(X)\n U = U[:, :self.n_components_]\n\n logging.info(\"sklearn.decomposition.PCA.transform: \" + method_uses_sklearn)\n if self.whiten:\n # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)\n U *= np.sqrt(X.shape[0] - 1)\n else:\n # X_new = X * V = U * S * V^T * V = U * S\n U *= S[:self.n_components_]\n\n return U\n\n def transform(self, X):\n \"\"\"Apply dimensionality reduction to X.\n\n X is projected on the first principal components previously extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples is the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from sklearn.decomposition import IncrementalPCA\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> ipca = IncrementalPCA(n_components=2, batch_size=3)\n >>> ipca.fit(X)\n IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)\n >>> ipca.transform(X) # doctest: +SKIP\n \"\"\"\n check_is_fitted(self)\n\n X = check_array(X)\n if self.n_components_ > 0:\n logging.info(\"sklearn.decomposition.PCA.transform: \" + method_uses_daal)\n return self._transform_daal4py(X, whiten=self.whiten,\n check_X=False, scale_eigenvalues=False)\n logging.info(\"sklearn.decomposition.PCA.transform: \" + method_uses_sklearn)\n if self.mean_ is not None:\n X = X - self.mean_\n X_transformed = np.dot(X, self.components_.T)\n if self.whiten:\n X_transformed /= np.sqrt(self.explained_variance_)\n return X_transformed\n\nif (lambda s: (int(s[:4]), int(s[6:])))( daal4py.__daal_link_version__[:8] ) < (2019, 4):\n # with DAAL < 2019.4 PCA only optimizes fit, using DAAL's SVD\n class PCA(PCA_original):\n __doc__ = PCA_original.__doc__\n\n def __init__(self, n_components=None, copy=True, whiten=False,\n svd_solver='auto', tol=0.0, iterated_power='auto',\n random_state=None):\n self.n_components = n_components\n self.copy = copy\n self.whiten = whiten\n self.svd_solver = svd_solver\n self.tol = tol\n self.iterated_power = iterated_power\n self.random_state = random_state\n\n def _fit_full(self, X, n_components):\n return _fit_full_copy(self, X, n_components)\n"
] | [
[
"sklearn.utils.extmath.stable_cumsum",
"sklearn.utils.validation.check_is_fitted",
"numpy.empty",
"sklearn.decomposition._pca.svd_flip",
"numpy.searchsorted",
"scipy.sparse.issparse",
"sklearn.utils.check_array",
"numpy.linalg.svd",
"numpy.ravel",
"numpy.sqrt",
"sklearn.decomposition._pca._infer_dimension",
"numpy.dot",
"numpy.full",
"numpy.mean"
]
] |
HenryDayHall/awkward-1.0 | [
"4a860e775502f9adb953524c35c5a2de8f7a3181"
] | [
"tests/test_0056b-partitioned-array-numba.py"
] | [
"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport sys\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\nnumba = pytest.importorskip(\"numba\")\n\nak_numba = pytest.importorskip(\"awkward._connect._numba\")\nak_numba_arrayview = pytest.importorskip(\"awkward._connect._numba.arrayview\")\n\nak_numba.register_and_check()\n\n\ndef test_view():\n aslist = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n asarray = ak.repartition(ak.Array(aslist), 3)\n asview = ak_numba_arrayview.ArrayView.fromarray(asarray)\n\n for start in range(10):\n for stop in range(start, 10):\n asview.start = start\n asview.stop = stop\n assert ak.to_list(asview.toarray()) == aslist[start:stop]\n\n asarray = ak.repartition(ak.Array(aslist), [3, 2, 0, 1, 4])\n asview = ak_numba_arrayview.ArrayView.fromarray(asarray)\n\n for start in range(10):\n for stop in range(start, 10):\n asview.start = start\n asview.stop = stop\n assert ak.to_list(asview.toarray()) == aslist[start:stop]\n\n aslist = [[1, 2, 3], [], [4, 5], [6], [7, 8, 9, 10]]\n asarray = ak.repartition(ak.Array(aslist), 3)\n asview = ak_numba_arrayview.ArrayView.fromarray(asarray)\n\n for start in range(5):\n for stop in range(start, 5):\n asview.start = start\n asview.stop = stop\n assert ak.to_list(asview.toarray()) == aslist[start:stop]\n\n\ndef test_boxing1():\n asnumpy = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n assert sys.getrefcount(asnumpy) == 2\n\n aslayout = ak.layout.NumpyArray(asnumpy)\n aspart = ak.repartition(aslayout, 3, highlevel=False)\n asarray = ak.Array(aspart)\n aspart = asarray._layout\n\n assert (\n sys.getrefcount(asnumpy),\n sys.getrefcount(aslayout),\n sys.getrefcount(aspart),\n ) == (3, 2, 3)\n\n @numba.njit\n def f1(x):\n return 3.14\n\n for i in range(5):\n f1(asarray)\n assert (\n sys.getrefcount(asnumpy),\n sys.getrefcount(aslayout),\n sys.getrefcount(aspart),\n ) == (3, 2, 3)\n\n del asarray\n del aspart\n del aslayout\n import gc\n\n gc.collect()\n assert sys.getrefcount(asnumpy) == 2\n\n\ndef test_boxing2():\n asnumpy = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n assert sys.getrefcount(asnumpy) == 2\n\n aslayout = ak.layout.NumpyArray(asnumpy)\n aspart = ak.repartition(aslayout, 3, highlevel=False)\n asarray = ak.Array(aspart)\n aspart = asarray._layout\n\n assert (\n sys.getrefcount(asnumpy),\n sys.getrefcount(aslayout),\n sys.getrefcount(aspart),\n ) == (3, 2, 3)\n\n @numba.njit\n def f2(x):\n return x\n\n for i in range(10):\n out = f2(asarray)\n\n assert isinstance(out.layout, ak.partition.PartitionedArray)\n assert ak.to_list(out) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert (\n sys.getrefcount(asnumpy),\n sys.getrefcount(aslayout),\n sys.getrefcount(aspart),\n ) == (3, 2, 3)\n\n del out\n del asarray\n del aspart\n del aslayout\n import gc\n\n gc.collect()\n assert sys.getrefcount(asnumpy) == 2\n\n\ndef test_boxing3():\n asnumpy = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n assert sys.getrefcount(asnumpy) == 2\n\n aslayout = ak.layout.NumpyArray(asnumpy)\n aspart = ak.repartition(aslayout, 3, highlevel=False)\n asarray = ak.Array(aspart)\n aspart = asarray._layout\n\n assert (\n sys.getrefcount(asnumpy),\n sys.getrefcount(aslayout),\n sys.getrefcount(aspart),\n ) == (3, 2, 3)\n\n @numba.njit\n def f3(x):\n return x, x\n\n for i in range(10):\n out1, out2 = f3(asarray)\n assert isinstance(out1.layout, ak.partition.PartitionedArray)\n assert isinstance(out2.layout, ak.partition.PartitionedArray)\n assert ak.to_list(out1) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert ak.to_list(out2) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert (\n sys.getrefcount(asnumpy),\n sys.getrefcount(aslayout),\n sys.getrefcount(aspart),\n ) == (3, 2, 3)\n\n del out1\n del out2\n del asarray\n del aspart\n del aslayout\n import gc\n\n gc.collect()\n assert sys.getrefcount(asnumpy) == 2\n\n\ndef test_getitem_1a():\n array = ak.repartition(\n ak.Array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]), 3\n )\n\n @numba.njit\n def f1(x, i):\n return x[i]\n\n assert [f1(array, i) for i in range(10)] == [\n 0.0,\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n 6.6,\n 7.7,\n 8.8,\n 9.9,\n ]\n assert [f1(array, -i) for i in range(1, 11)] == [\n 9.9,\n 8.8,\n 7.7,\n 6.6,\n 5.5,\n 4.4,\n 3.3,\n 2.2,\n 1.1,\n 0.0,\n ]\n\n\ndef test_getitem_1b():\n asnumpy = np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n array = ak.repartition(ak.Array(asnumpy), 3)\n\n assert sys.getrefcount(asnumpy) == 3\n\n @numba.njit\n def f2(x, i1, i2):\n out = x[i1:i2]\n return out\n\n assert isinstance(f2(array, 0, 10).layout, ak.partition.PartitionedArray)\n assert isinstance(f2(array, 4, 5).layout, ak.partition.PartitionedArray)\n assert isinstance(f2(array, 5, 5).layout, ak.partition.PartitionedArray)\n\n for start in range(-10, 10):\n for stop in range(-10, 10):\n assert (\n ak.to_list(f2(array, start, stop))\n == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9][start:stop]\n )\n\n assert sys.getrefcount(asnumpy) == 3\n\n del array\n assert sys.getrefcount(asnumpy) == 2\n\n\ndef test_getitem_2():\n aslist = [\n {\"x\": 0.0, \"y\": []},\n {\"x\": 1.1, \"y\": [1]},\n {\"x\": 2.2, \"y\": [2, 2]},\n {\"x\": 3.3, \"y\": [3, 3, 3]},\n {\"x\": 4.4, \"y\": [4, 4, 4, 4]},\n {\"x\": 5.5, \"y\": [5, 5, 5]},\n {\"x\": 6.6, \"y\": [6, 6]},\n {\"x\": 7.7, \"y\": [7]},\n {\"x\": 8.8, \"y\": []},\n ]\n asarray = ak.repartition(ak.Array(aslist), 2)\n\n @numba.njit\n def f3a(x):\n return x[\"x\"]\n\n assert ak.to_list(f3a(asarray)) == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8]\n\n @numba.njit\n def f3b(x):\n return x.x\n\n assert ak.to_list(f3b(asarray)) == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8]\n\n @numba.njit\n def f4a(x):\n return x[\"y\"]\n\n assert ak.to_list(f4a(asarray)) == [\n [],\n [1],\n [2, 2],\n [3, 3, 3],\n [4, 4, 4, 4],\n [5, 5, 5],\n [6, 6],\n [7],\n [],\n ]\n\n @numba.njit\n def f4b(x):\n return x.y\n\n assert ak.to_list(f4b(asarray)) == [\n [],\n [1],\n [2, 2],\n [3, 3, 3],\n [4, 4, 4, 4],\n [5, 5, 5],\n [6, 6],\n [7],\n [],\n ]\n\n @numba.njit\n def f5a(x, i):\n return x[\"x\"][i]\n\n assert [f5a(asarray, i) for i in range(-9, 9)]\n\n @numba.njit\n def f5b(x, i):\n return x[i][\"x\"]\n\n assert [f5b(asarray, i) for i in range(-9, 9)]\n\n @numba.njit\n def f5c(x, i):\n return x.x[i]\n\n assert [f5c(asarray, i) for i in range(-9, 9)]\n\n @numba.njit\n def f5d(x, i):\n return x[i].x\n\n assert [f5d(asarray, i) for i in range(-9, 9)]\n\n @numba.njit\n def f6a(x, i):\n return x[\"y\"][i]\n\n assert ak.to_list(f6a(asarray, 6)) == [6, 6]\n assert ak.to_list(f6a(asarray, -3)) == [6, 6]\n\n @numba.njit\n def f6b(x, i):\n return x[i][\"y\"]\n\n assert ak.to_list(f6b(asarray, 6)) == [6, 6]\n assert ak.to_list(f6b(asarray, -3)) == [6, 6]\n\n @numba.njit\n def f6c(x, i):\n return x.y[i]\n\n assert ak.to_list(f6c(asarray, 6)) == [6, 6]\n assert ak.to_list(f6c(asarray, -3)) == [6, 6]\n\n @numba.njit\n def f6d(x, i):\n return x[i].y\n\n assert ak.to_list(f6d(asarray, 6)) == [6, 6]\n assert ak.to_list(f6d(asarray, -3)) == [6, 6]\n\n\ndef test_len():\n array = ak.repartition(ak.Array([1.1, 2.2, 3.3, 4.4, 5.5]), 3)\n\n @numba.njit\n def f1(x):\n return len(x)\n\n assert f1(array) == 5\n\n aslist = [\n {\"x\": 0.0, \"y\": []},\n {\"x\": 1.1, \"y\": [1]},\n {\"x\": 2.2, \"y\": [2, 2]},\n {\"x\": 3.3, \"y\": [3, 3, 3]},\n {\"x\": 4.4, \"y\": [4, 4, 4, 4]},\n {\"x\": 5.5, \"y\": [5, 5, 5]},\n {\"x\": 6.6, \"y\": [6, 6]},\n {\"x\": 7.7, \"y\": [7]},\n {\"x\": 8.8, \"y\": []},\n ]\n asarray = ak.repartition(ak.Array(aslist), 2)\n\n assert f1(asarray) == 9\n\n\ndef test_iter():\n asnumpy = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n assert sys.getrefcount(asnumpy) == 2\n\n array = ak.repartition(ak.Array(asnumpy), 3)\n\n assert sys.getrefcount(asnumpy) == 3\n\n @numba.njit\n def f1(x):\n out = 0\n for xi in x:\n out += xi\n return out\n\n for i in range(10):\n assert f1(array) == 45\n assert sys.getrefcount(asnumpy) == 3\n\n del array\n assert sys.getrefcount(asnumpy) == 2\n\n aslist = [\n {\"x\": 0.0, \"y\": []},\n {\"x\": 1.1, \"y\": [1]},\n {\"x\": 2.2, \"y\": [2, 2]},\n {\"x\": 3.3, \"y\": [3, 3, 3]},\n {\"x\": 4.4, \"y\": [4, 4, 4, 4]},\n {\"x\": 5.5, \"y\": [5, 5, 5]},\n {\"x\": 6.6, \"y\": [6, 6]},\n {\"x\": 7.7, \"y\": [7]},\n {\"x\": 8.8, \"y\": []},\n ]\n asarray = ak.repartition(ak.Array(aslist), 2)\n\n @numba.njit\n def f2(x):\n i = 0\n for xi in x:\n if i == 6:\n return xi[\"y\"]\n i += 1\n\n assert ak.to_list(f2(asarray)) == [6, 6]\n\n @numba.njit\n def f3(x):\n i = 0\n for xi in x:\n if i == 6:\n return xi\n i += 1\n\n assert ak.to_list(f3(asarray)) == {\"x\": 6.6, \"y\": [6, 6]}\n"
] | [
[
"numpy.array"
]
] |
georgezywang/BFT-RLForensics | [
"014be0b57f4edf44ed9d933d23df836cb46d8714"
] | [
"src/utils/logging.py"
] | [
"\"\"\"\nCode adapted from https://github.com/TonghanWang/ROMA\n\"\"\"\n\nfrom collections import defaultdict\nimport logging\nimport numpy as np\nimport torch\n\n\nclass Logger:\n def __init__(self, console_logger):\n self.console_logger = console_logger\n\n self.use_tb = False\n self.use_sacred = False\n self.use_hdf = False\n\n self.stats = defaultdict(lambda: [])\n\n def setup_tb(self, directory_name):\n # Import here so it doesn't have to be installed if you don't use it\n from tensorboard_logger import configure, log_value\n configure(directory_name)\n self.tb_logger = log_value\n self.use_tb = True\n\n from tensorboardX import SummaryWriter\n self.writer=SummaryWriter(directory_name+\"-game\")\n\n def setup_sacred(self, sacred_run_dict):\n self.sacred_info = sacred_run_dict.info\n self.use_sacred = True\n\n def log_stat(self, key, value, t, to_sacred=True):\n self.stats[key].append((t, value))\n\n if self.use_tb:\n self.tb_logger(key, value, t)\n\n if self.use_sacred and to_sacred:\n if key in self.sacred_info:\n self.sacred_info[\"{}_T\".format(key)].append(t)\n self.sacred_info[key].append(value)\n else:\n self.sacred_info[\"{}_T\".format(key)] = [t]\n self.sacred_info[key] = [value]\n\n def log_vec(self,mat,metadata,global_step,tag):\n if self.use_tb:\n self.writer.add_embedding(mat,metadata,global_step=global_step,tag=tag)\n\n def print_recent_stats(self):\n log_str = \"Recent Stats | t_env: {:>10} | Episode: {:>8}\\n\".format(*self.stats[\"episode\"][-1])\n i = 0\n for (k, v) in sorted(self.stats.items()):\n if k == \"episode\":\n continue\n i += 1\n window = 5 if k != \"epsilon\" else 1\n # print(\"what broke, huh? {}\".format(k))\n # self.stats[k][-window:] = [x.to(\"cpu\").numpy() for x in self.stats[k][-window:] if isinstance(x, torch.Tensor)]\n item = \"{:.4f}\".format(np.mean([x[1] for x in self.stats[k][-window:]]))\n log_str += \"{:<25}{:>8}\".format(k + \":\", item)\n log_str += \"\\n\" if i % 4 == 0 else \"\\t\"\n self.console_logger.info(log_str)\n\n\n# set up a custom logger\ndef get_logger():\n logger = logging.getLogger()\n logger.handlers = []\n ch = logging.StreamHandler()\n formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(name)s %(message)s', '%H:%M:%S')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logger.setLevel('DEBUG')\n\n return logger\n\n"
] | [
[
"numpy.mean"
]
] |
RickyMexx/DeepRL-LTLf | [
"24cb3ac49e5bb9e07c37644d7226201ccb2b59a4"
] | [
"ra-gym/ra_gym/envs/ra_env.py"
] | [
"import numpy as np\nimport gym\nfrom gym import spaces\nimport time\nclass RAEnv(gym.Env):\n\tmetadata = {\n\t\t'render.modes': ['rgb_array'],\n\t\t'video.frames_per_second': 50\n\t}\n\tdef __init__(self):\n\t\tself.action_space = spaces.Box(low=np.array([-1., -1.]), high=np.array([1., 1.]), dtype=np.float32)\n\t\tself.observation_space = spaces.Box(low=np.array([0., 0., -np.inf, -np.inf, 0., 0.]), high=np.array([1., 1., np.inf, np.inf, 1., 1.]), dtype=np.float32)\n\t\tself.viewer = None\n\t\tself._max_episode_steps = 1000\n\tdef reset(self):\n\t\tself.h = 600\n\t\tself.w = 600\n\t\tself.r = 10\n\t\tself.tr = 30\n\t\tself.x = 0.5\n\t\tself.y = 1\n\t\tself.vx = 0\n\t\tself.vy = 0\n\t\tself.g = -0.5\n\t\tself.dt = 0.05\n\t\tself.ground_el = 1.1\n\t\tself.t1_x = 0.3\n\t\tself.t1_y = 0.3\n\t\tself.t2_x = 0.8\n\t\tself.t2_y = 0.6\n\t\tself.t1_crossed = False\n\t\tself.t2_crossed = False\n\t\tself.ball_circle = None\n\t\tself.ball_trans = None\n\t\tself.t1_circle = None\n\t\tself.t2_circle = None\n\t\tself.done = False\n\t\tself.episodes = 0\n\t\tif self.viewer is not None:\n\t\t\tself.viewer.close()\n\t\t\tself.viewer = None\n\t\treturn np.array([self.x, self.y, self.vx, self.vy, self.t1_crossed, self.t2_crossed])\n\n\tdef render(self, mode='rgb_array'):\n\t\tif self.viewer is None:\n\t\t\tfrom gym.envs.classic_control import rendering\n\t\t\tself.viewer = rendering.Viewer(self.w, self.h)\n\n\t\t\tself.t1_circle = rendering.make_circle(self.tr)\n\t\t\tself.t1_circle.set_color(0, 0, 1)\n\t\t\tt1_trans = rendering.Transform(translation=(self.t1_x*self.w, self.t1_y*self.h))\n\t\t\tself.t1_circle.add_attr(t1_trans)\n\t\t\tself.viewer.add_geom(self.t1_circle)\n\n\t\t\tself.t2_circle = rendering.make_circle(self.tr)\n\t\t\tself.t2_circle.set_color(0, 0, 1)\n\t\t\tt2_trans = rendering.Transform(translation=(self.t2_x*self.w, self.t2_y*self.h))\n\t\t\tself.t2_circle.add_attr(t2_trans)\n\t\t\tself.viewer.add_geom(self.t2_circle)\n\n\t\t\tself.ball_circle = rendering.make_circle(self.r)\n\t\t\tself.ball_circle.set_color(1, 0, 0)\n\t\t\tself.ball_trans = rendering.Transform(translation=(self.x*self.w, self.y*self.h))\n\t\t\tself.ball_circle.add_attr(self.ball_trans)\n\t\t\tself.viewer.add_geom(self.ball_circle)\n\n\t\tif self.t1_crossed:\n\t\t\tself.t1_circle.set_color(0, 1, 0)\n\t\tif self.t2_crossed:\n\t\t\tself.t2_circle.set_color(0, 1, 0)\n\n\t\tself.ball_trans.set_translation(self.x*self.w, self.y*self.h)\n\n\t\treturn self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\n\n\tdef step(self, a):\n\t\treward = 0\n\n\t\tif not self.done:\n\t\t\tself.episodes += 1\n\t\t\treward = -1\n\t\t\tax, ay = np.clip(a, -1, 1)\n\n\t\t\tself.vx = self.vx + self.dt*ax\n\t\t\tself.vy = self.vy + self.dt*(ay + self.g)\n\n\t\t\tself.x = self.x + self.vx*self.dt + 0.5 * ax * self.dt**2\n\t\t\tself.y = self.y + self.vy*self.dt + 0.5 * (ay + self.g) * self.dt**2\n\n\t\t\tif self.episodes == self._max_episode_steps:\n\t\t\t\treward = -100\n\t\t\t\tself.done = True\n\t\t\tif self.x < 0 or self.x > 1:\n\t\t\t\treward = -100\n\t\t\t\tself.done = True\n\t\t\tif self.y < 0 or self.y > 1:\n\t\t\t\treward = -100\n\t\t\t\tself.done = True\n\n\t\t\tself.y = np.clip(self.y, 0, 1)\n\t\t\tself.x = np.clip(self.x, 0, 1)\n\n\t\t\tif (self.x - self.t1_x)**2 + (self.y - self.t1_y)**2 <= (self.tr/self.w + self.r/self.w)**2:\n\t\t\t\tif not self.t1_crossed: reward = 100\n\t\t\t\tself.t1_crossed = True\n\n\t\t\tif (self.x - self.t2_x)**2 + (self.y - self.t2_y)**2 <= (self.tr/self.w + self.r/self.w)**2 and self.t1_crossed:\n\t\t\t\tif not self.t2_crossed: reward = 100\n\t\t\t\tself.t2_crossed = True\n\n\t\t\tif self.t1_crossed and self.t2_crossed:\n\t\t\t\tself.done=True\n\n\t\treturn np.array([self.x, self.y, self.vx, self.vy, self.t1_crossed, self.t2_crossed]), reward, self.done, {}\n\n\tdef close(self):\n\t\tif self.viewer is not None:\n\t\t\tself.viewer.close()\n\t\t\tself.viewer = None"
] | [
[
"numpy.array",
"numpy.clip"
]
] |
ladsantos/blastoise | [
"30458f41c5432c97f71a54a73a89fa1193e28b38"
] | [
"sunburn/test_hst_observation.py"
] | [
"import numpy as np\nfrom . import hst_observation, spectroscopy\n\ndatasets = ['ld9m10ujq', 'ld9m10uyq']\nvisit1 = hst_observation.Visit(datasets, 'cos', prefix='data/')\n\nline_list = spectroscopy.COSFUVLineList(wavelength_shift=.0,\n range_factor=1.0).lines\n\ntr = 'Si III'\nline = 0\nref_wl = line_list[tr][line].central_wavelength\n\nshift = np.array([-23.623059845212502, -23.37932521889708])\n\norbit_list = [visit1.orbit['ld9m10ujq'], visit1.orbit['ld9m10uyq']]\ntest = hst_observation.CombinedSpectrum(\n orbit_list, ref_wl, 'cos', velocity_range=[-100, 100], doppler_corr=shift)\n"
] | [
[
"numpy.array"
]
] |
eltrompetero/coniii | [
"d698696c11e12a62fe3340eb2f4d3344145a96dd"
] | [
"coniii/ising_eqn/ising_eqn_2_sym.py"
] | [
"# MIT License\n# \n# Copyright (c) 2019 Edward D. Lee, Bryan C. Daniels\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Equations for 2-spin Ising model.\n\n# Written on 2019/09/19.\nfrom numpy import zeros, exp, array, prod, isnan\nfrom ..enumerate import fast_logsumexp\n\ndef calc_observables(params):\n \"\"\"\n Give all parameters concatenated into one array from lowest to highest order.\n Returns all correlations.\n \"\"\"\n Cout = zeros((3))\n H = params[0:2]\n J = params[2:3]\n energyTerms = array([ +H[0]+H[1]+J[0], +H[0]-H[1]-J[0], -H[0]+H[1]-J[0], -H[0]-H[1]+J[0],])\n logZ = fast_logsumexp(energyTerms)[0]\n num = fast_logsumexp(energyTerms, [ 1, 1,-1,-1])\n Cout[0] = exp( num[0] - logZ ) * num[1]\n num = fast_logsumexp(energyTerms, [ 1,-1, 1,-1])\n Cout[1] = exp( num[0] - logZ ) * num[1]\n num = fast_logsumexp(energyTerms, [ 1,-1,-1, 1])\n Cout[2] = exp( num[0] - logZ ) * num[1]\n Cout[isnan(Cout)] = 0.\n return(Cout)\n\ndef p(params):\n \"\"\"\n Give all parameters concatenated into one array from lowest to highest order.\n Returns probabilities of all configurations.\n \"\"\"\n Cout = zeros((3))\n H = params[0:2]\n J = params[2:3]\n H = params[0:2]\n J = params[2:3]\n Pout = zeros((4))\n energyTerms = array([ +H[0]+H[1]+J[0], +H[0]-H[1]-J[0], -H[0]+H[1]-J[0], -H[0]-H[1]+J[0],])\n logZ = fast_logsumexp(energyTerms)[0]\n Pout[0] = exp( +H[0]+H[1]+J[0] - logZ )\n Pout[1] = exp( +H[0]-H[1]-J[0] - logZ )\n Pout[2] = exp( -H[0]+H[1]-J[0] - logZ )\n Pout[3] = exp( -H[0]-H[1]+J[0] - logZ )\n\n Pout = Pout[::-1]\n return(Pout)\n"
] | [
[
"numpy.array",
"numpy.exp",
"numpy.zeros",
"numpy.isnan"
]
] |
yashtailor/preprocessy | [
"015b4653ef064e3c4db5884aa1b6d1b098383055"
] | [
"tests/test_encode.py"
] | [
"from collections import Counter\n\nimport pandas as pd\nimport pytest\nfrom preprocessy.encoding import Encoder\n\nord_dict = {\"Profession\": {\"Student\": 1, \"Teacher\": 2, \"HOD\": 3}}\n\n\n# test for empty input\ndef test_empty_df():\n params = {\"target_label\": \"Price\", \"ord_dict\": ord_dict}\n with pytest.raises(ValueError):\n encoder = Encoder()\n encoder.encode(params=params)\n\n\n# test for warning\ndef test_target_label_warning():\n train_csv = pd.read_csv(\"datasets/encoding/testnew.csv\")\n params = {\"train_df\": train_csv, \"ord_dict\": ord_dict}\n with pytest.warns(UserWarning):\n encoder = Encoder()\n encoder.encode(params=params)\n\n\n# test ordinal encoding\ndef test_mapping():\n train_csv = pd.read_csv(\"datasets/encoding/testnew.csv\")\n train_csv.drop([\"Price\"], axis=1, inplace=True)\n params = {\n \"train_df\": train_csv,\n \"target_label\": \"Price\",\n \"ord_dict\": ord_dict,\n }\n encoder = Encoder()\n encoder.encode(params=params)\n assert params[\"train_df\"][\"ProfessionEncoded\"].nunique() == 3\n assert params[\"train_df\"][\"ProfessionEncoded\"][2] == 3\n assert Counter(params[\"ord_dict\"][\"Profession\"].values()) == Counter(\n params[\"train_df\"][\"ProfessionEncoded\"].unique()\n )\n\n\ndef test_ordinal_vs_categorical():\n train_csv = pd.read_csv(\"datasets/encoding/testnew.csv\")\n params = {\n \"train_df\": train_csv,\n \"target_label\": \"Price\",\n \"cat_cols\": [\"Profession\"],\n \"ord_dict\": ord_dict,\n \"one_hot\": False,\n }\n\n encoder = Encoder()\n encoder.encode(params=params)\n assert params[\"train_df\"][\"ProfessionEncoded\"][0] == 1\n\n\ndef test_categorical_encoding():\n train_csv = pd.read_csv(\"datasets/encoding/testnew.csv\")\n params = {\n \"train_df\": train_csv,\n \"target_label\": \"Price\",\n \"cat_cols\": [\"Profession\"],\n \"one_hot\": False,\n }\n encoder = Encoder()\n encoder.encode(params=params)\n assert \"ProfessionEncoded\" in params[\"train_df\"].columns\n assert params[\"train_df\"][\"ProfessionEncoded\"][0] == 0\n\n\n# test for empty mapping\ndef test_empty_weight_mapping():\n train_csv = pd.read_csv(\"datasets/encoding/testnew.csv\")\n train_csv.drop([\"Price\"], axis=1, inplace=True)\n ord_dict1 = ord_dict.copy()\n ord_dict1[\"Size\"] = None\n params = {\n \"train_df\": train_csv,\n \"target_label\": \"Price\",\n \"ord_dict\": ord_dict1,\n }\n with pytest.raises(ValueError):\n encoder = Encoder()\n encoder.encode(params=params)\n\n\n# test for one-hot encoding\ndef test_one_hot_encoding():\n train_csv = pd.read_csv(\"datasets/encoding/testnew.csv\")\n params = {\n \"train_df\": train_csv,\n \"target_label\": \"Price\",\n \"cat_cols\": [\"Test\", \"Labels\"],\n \"ord_dict\": ord_dict,\n \"one_hot\": True,\n }\n encoder = Encoder()\n encoder.encode(params=params)\n assert \"Test_Tata\" in params[\"train_df\"].columns\n assert params[\"train_df\"][\"Test_Tata\"][1] == 1\n\n\ndef test_ignore_cat_col():\n train_csv = pd.read_csv(\"datasets/encoding/testnew.csv\")\n params = {\n \"train_df\": train_csv,\n \"target_label\": \"Price\",\n \"cat_cols\": [\"Profession\"],\n \"ord_dict\": ord_dict,\n \"one_hot\": True,\n }\n encoder = Encoder()\n encoder.encode(params=params)\n assert \"Profession_HOD\" not in params[\"train_df\"].columns\n\n\ndef test_parser():\n train_df = pd.DataFrame(\n {\n \"A\": [i for i in range(100)],\n \"B\": [\"hello\" if i % 2 == 0 else \"bye\" for i in range(100)],\n }\n )\n params = {\"train_df\": train_df, \"target_label\": \"C\"}\n encoder = Encoder()\n encoder.encode(params=params)\n assert \"B\" in params[\"cat_cols\"]\n"
] | [
[
"pandas.read_csv"
]
] |
hxri/mars | [
"f7864f00911883b94800b63856f0e57648d3d9b4"
] | [
"mars/tensor/arithmetic/tests/test_arithmetic.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\nimport scipy.sparse as sps\n\nfrom mars.core import enter_mode, tile\nfrom mars.tensor.datasource import array, ones, tensor, empty\nfrom mars.tensor.fetch import TensorFetch\nfrom mars.tensor.arithmetic import add, subtract, truediv, log, frexp, \\\n around, isclose, isfinite, negative, cos, tree_add, tree_multiply, \\\n TensorAdd, TensorTreeAdd, TensorTreeMultiply, TensorSubtract, \\\n TensorLog, TensorIsclose, TensorGreaterThan\nfrom mars.tensor.linalg import matmul\nfrom mars.tensor.core import Tensor, SparseTensor\n\n\ndef test_add():\n t1 = ones((3, 4), chunk_size=2)\n t2 = ones(4, chunk_size=2)\n t3 = t1 + t2\n k1 = t3.key\n assert t3.op.gpu is False\n t1, t2, t3 = tile(t1, t2, t3)\n assert t3.key != k1\n assert t3.shape == (3, 4)\n assert len(t3.chunks) == 4\n assert t3.chunks[0].inputs == [t1.chunks[0].data, t2.chunks[0].data]\n assert t3.chunks[1].inputs == [t1.chunks[1].data, t2.chunks[1].data]\n assert t3.chunks[2].inputs == [t1.chunks[2].data, t2.chunks[0].data]\n assert t3.chunks[3].inputs == [t1.chunks[3].data, t2.chunks[1].data]\n assert t3.op.dtype == np.dtype('f8')\n assert t3.chunks[0].op.dtype == np.dtype('f8')\n\n t1 = ones((3, 4), chunk_size=2)\n t4 = t1 + 1\n t1, t4 = tile(t1, t4)\n assert t4.shape == (3, 4)\n assert len(t3.chunks) == 4\n assert t4.chunks[0].inputs == [t1.chunks[0].data]\n assert t4.chunks[0].op.rhs == 1\n assert t4.chunks[1].inputs == [t1.chunks[1].data]\n assert t4.chunks[1].op.rhs == 1\n assert t4.chunks[2].inputs == [t1.chunks[2].data]\n assert t4.chunks[2].op.rhs == 1\n assert t4.chunks[3].inputs == [t1.chunks[3].data]\n assert t4.chunks[3].op.rhs == 1\n\n t5 = add([1, 2, 3, 4], 1)\n tile(t5)\n assert t4.chunks[0].inputs == [t1.chunks[0].data]\n\n t2 = ones(4, chunk_size=2)\n t6 = ones((3, 4), chunk_size=2, gpu=True)\n t7 = ones(4, chunk_size=2, gpu=True)\n t8 = t6 + t7\n t9 = t6 + t2\n assert t8.op.gpu is True\n t8, t9 = tile(t8, t9)\n assert t8.chunks[0].op.gpu is True\n assert t9.op.gpu is None\n assert t9.chunks[0].op.gpu is None\n\n # sparse tests\n t1 = tensor([[0, 1, 0], [1, 0, 0]], chunk_size=2).tosparse()\n\n t = t1 + 1\n assert t.op.gpu is False\n assert t.issparse() is True\n assert type(t) is SparseTensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is True\n\n t = t1 + 0\n assert t.issparse() is True\n assert type(t) is SparseTensor\n\n t2 = tensor([[1, 0, 0]], chunk_size=2).tosparse()\n\n t = t1 + t2\n assert t.issparse() is True\n assert type(t) is SparseTensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is True\n\n t3 = tensor([1, 1, 1], chunk_size=2)\n t = t1 + t3\n assert t.issparse() is False\n assert type(t) is Tensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is False\n\n\ndef test_add_order():\n raw_a = np.random.rand(4, 2)\n raw_b = np.asfortranarray(np.random.rand(4, 2))\n t1 = tensor(raw_a)\n t2 = tensor(raw_b)\n out = tensor(raw_b)\n\n # C + scalar\n assert (t1 + 1).flags['C_CONTIGUOUS'] == (raw_a + 1).flags['C_CONTIGUOUS']\n assert (t1 + 1).flags['F_CONTIGUOUS'] == (raw_a + 1).flags['F_CONTIGUOUS']\n # C + C\n assert (t1 + t1).flags['C_CONTIGUOUS'] == (raw_a + raw_a).flags['C_CONTIGUOUS']\n assert (t1 + t1).flags['F_CONTIGUOUS'] == (raw_a + raw_a).flags['F_CONTIGUOUS']\n # F + scalar\n assert (t2 + 1).flags['C_CONTIGUOUS'] == (raw_b + 1).flags['C_CONTIGUOUS']\n assert (t2 + 1).flags['F_CONTIGUOUS'] == (raw_b + 1).flags['F_CONTIGUOUS']\n # F + F\n assert (t2 + t2).flags['C_CONTIGUOUS'] == (raw_b + raw_b).flags['C_CONTIGUOUS']\n assert (t2 + t2).flags['F_CONTIGUOUS'] == (raw_b + raw_b).flags['F_CONTIGUOUS']\n # C + F\n assert (t1 + t2).flags['C_CONTIGUOUS'] == (raw_a + raw_b).flags['C_CONTIGUOUS']\n assert (t1 + t2).flags['F_CONTIGUOUS'] == (raw_a + raw_b).flags['F_CONTIGUOUS']\n # C + C + out\n assert add(t1, t1, out=out).flags['C_CONTIGUOUS'] == np.add(raw_a, raw_a, out=np.empty((4, 2), order='F')).flags['C_CONTIGUOUS']\n assert add(t1, t1, out=out).flags['F_CONTIGUOUS'] == np.add(raw_a, raw_a, out=np.empty((4, 2), order='F')).flags['F_CONTIGUOUS']\n\n with pytest.raises(TypeError):\n add(t1, 1, order='B')\n\n\ndef test_multiply():\n t1 = tensor([[0, 1, 0], [1, 0, 0]], chunk_size=2).tosparse()\n\n t = t1 * 10\n assert t.issparse() is True\n assert type(t) is SparseTensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is True\n\n t2 = tensor([[1, 0, 0]], chunk_size=2).tosparse()\n\n t = t1 * t2\n assert t.issparse() is True\n assert type(t) is SparseTensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is True\n\n t3 = tensor([1, 1, 1], chunk_size=2)\n t = t1 * t3\n assert t.issparse() is True\n assert type(t) is SparseTensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is True\n\n\ndef test_divide():\n t1 = tensor([[0, 1, 0], [1, 0, 0]], chunk_size=2).tosparse()\n\n t = t1 / 10\n assert t.issparse() is True\n assert type(t) is SparseTensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is True\n\n t2 = tensor([[1, 0, 0]], chunk_size=2).tosparse()\n\n t = t1 / t2\n assert t.issparse() is False\n assert type(t) is Tensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is False\n\n t3 = tensor([1, 1, 1], chunk_size=2)\n t = t1 / t3\n assert t.issparse() is False\n assert type(t) is Tensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is False\n\n t = t3 / t1\n assert t.issparse() is False\n assert type(t) is Tensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is False\n\n\ndef test_datatime_arith():\n t1 = array([np.datetime64('2005-02-02'), np.datetime64('2005-02-03')])\n t2 = t1 + np.timedelta64(1)\n\n assert isinstance(t2.op, TensorAdd)\n\n t3 = t1 - np.datetime64('2005-02-02')\n\n assert isinstance(t3.op, TensorSubtract)\n assert t3.dtype == (np.array(['2005-02-02', '2005-02-03'], dtype=np.datetime64) -\n np.datetime64('2005-02-02')).dtype\n\n t1 = array([np.datetime64('2005-02-02'), np.datetime64('2005-02-03')])\n subtract(t1, np.datetime64('2005-02-02'), out=empty(t1.shape, dtype=t3.dtype))\n\n t1 = array([np.datetime64('2005-02-02'), np.datetime64('2005-02-03')])\n add(t1, np.timedelta64(1, 'D'), out=t1)\n\n\ndef test_add_with_out():\n t1 = ones((3, 4), chunk_size=2)\n t2 = ones(4, chunk_size=2)\n\n t3 = add(t1, t2, out=t1)\n\n assert isinstance(t1.op, TensorAdd)\n assert t1.op.out.key == t1.op.lhs.key\n assert t3 is t1\n assert t3.shape == (3, 4)\n assert t3.op.lhs.extra_params.raw_chunk_size == 2\n assert t3.op.rhs is t2.data\n assert t3.key != t3.op.lhs.key\n\n t1, t3 = tile(t1, t3)\n\n assert isinstance(t1.chunks[0].op, TensorAdd)\n assert t1.chunks[0].op.out.key == t1.chunks[0].op.lhs.key\n\n with pytest.raises(TypeError):\n add(t1, t2, out=1)\n\n with pytest.raises(ValueError):\n add(t1, t2, out=t2)\n\n with pytest.raises(TypeError):\n truediv(t1, t2, out=t1.astype('i8'))\n\n t1 = ones((3, 4), chunk_size=2, dtype=float)\n t2 = ones(4, chunk_size=2, dtype=int)\n\n t3 = add(t2, 1, out=t1)\n assert t3.shape == (3, 4)\n assert t3.dtype == np.float64\n\n\ndef test_dtype_from_out():\n x = array([-np.inf, 0., np.inf])\n y = array([2, 2, 2])\n\n t3 = isfinite(x, y)\n assert t3.dtype == y.dtype\n\n\ndef test_log_without_where():\n t1 = ones((3, 4), chunk_size=2)\n\n t2 = log(t1, out=t1)\n\n assert isinstance(t2.op, TensorLog)\n assert t1.op.out.key == t1.op.input.key\n assert t2 is t1\n assert t2.op.input.extra_params.raw_chunk_size == 2\n assert t2.key != t2.op.input.key\n\n t3 = empty((3, 4), chunk_size=2)\n t4 = log(t1, out=t3, where=t1 > 0)\n assert isinstance(t4.op, TensorLog)\n assert t4 is t3\n assert t2.op.input.extra_params.raw_chunk_size == 2\n assert t2.key != t2.op.input.key\n\n\ndef test_copy_add():\n t1 = ones((3, 4), chunk_size=2)\n t2 = ones(4, chunk_size=2)\n t3 = t1 + t2\n t3 = tile(t3)\n\n c = t3.chunks[0]\n inputs = c.op.lhs, TensorFetch().new_chunk(\n c.op.rhs.inputs, shape=c.op.rhs.shape, index=c.op.rhs.index, _key=c.op.rhs.key)\n new_c = c.op.copy().reset_key().new_chunk(inputs, shape=c.shape, _key='new_key')\n assert new_c.key == 'new_key'\n assert new_c.inputs[1] is new_c.op.rhs\n assert isinstance(new_c.inputs[1].op, TensorFetch)\n\n\ndef test_compare():\n t1 = ones(4, chunk_size=2) * 2\n t2 = ones(4, chunk_size=2)\n t3 = t1 > t2\n t3 = tile(t3)\n assert len(t3.chunks) == 2\n assert isinstance(t3.op, TensorGreaterThan)\n\n\ndef test_unify_chunk_add():\n t1 = ones(4, chunk_size=2)\n t2 = ones(1, chunk_size=1)\n\n t3 = t1 + t2\n t1, t2, t3 = tile(t1, t2, t3)\n\n assert len(t3.chunks) == 2\n assert t3.chunks[0].inputs[0] == t1.chunks[0].data\n assert t3.chunks[0].inputs[1] == t2.chunks[0].data\n assert t3.chunks[1].inputs[0] == t1.chunks[1].data\n assert t3.chunks[1].inputs[1] == t2.chunks[0].data\n\n\ndef test_frexp():\n t1 = ones((3, 4, 5), chunk_size=2)\n t2 = empty((3, 4, 5), dtype=np.float_, chunk_size=2)\n op_type = type(t1.op)\n\n o1, o2 = frexp(t1)\n\n assert o1.op is o2.op\n assert o1.dtype != o2.dtype\n\n o1, o2 = frexp(t1, t1)\n\n assert o1 is t1\n assert o1.inputs[0] is not t1\n assert isinstance(o1.inputs[0].op, op_type)\n assert o2.inputs[0] is not t1\n\n o1, o2 = frexp(t1, t2, where=t1 > 0)\n\n op_type = type(t2.op)\n assert o1 is t2\n assert o1.inputs[0] is not t1\n assert isinstance(o1.inputs[0].op, op_type)\n assert o2.inputs[0] is not t1\n\n\ndef test_frexp_order():\n raw1 = np.asfortranarray(np.random.rand(2, 4))\n t = tensor(raw1)\n o1 = tensor(np.random.rand(2, 4))\n\n o1, o2 = frexp(t, out1=o1)\n\n assert o1.flags['C_CONTIGUOUS'] == np.frexp(raw1, np.empty((2, 4)))[0].flags['C_CONTIGUOUS']\n assert o1.flags['F_CONTIGUOUS'] == np.frexp(raw1, np.empty((2, 4)))[0].flags['F_CONTIGUOUS']\n assert o2.flags['C_CONTIGUOUS'] == np.frexp(raw1)[1].flags['C_CONTIGUOUS']\n assert o2.flags['F_CONTIGUOUS'] == np.frexp(raw1)[1].flags['F_CONTIGUOUS']\n\n\ndef test_dtype():\n t1 = ones((2, 3), dtype='f4', chunk_size=2)\n\n t = truediv(t1, 2, dtype='f8')\n\n assert t.dtype == np.float64\n\n with pytest.raises(TypeError):\n truediv(t1, 2, dtype='i4')\n\n\ndef test_negative():\n t1 = tensor([[0, 1, 0], [1, 0, 0]], chunk_size=2).tosparse()\n\n t = negative(t1)\n assert t.op.gpu is False\n assert t.issparse() is True\n assert type(t) is SparseTensor\n\n t = tile(t)\n assert t.chunks[0].op.sparse is True\n\n\ndef test_negative_order():\n raw1 = np.random.rand(4, 2)\n raw2 = np.asfortranarray(np.random.rand(4, 2))\n t1 = tensor(raw1)\n t2 = tensor(raw2)\n t3 = tensor(raw1)\n t4 = tensor(raw2)\n\n # C\n assert negative(t1).flags['C_CONTIGUOUS'] == np.negative(raw1).flags['C_CONTIGUOUS']\n assert negative(t1).flags['F_CONTIGUOUS'] == np.negative(raw1).flags['F_CONTIGUOUS']\n # F\n assert negative(t2).flags['C_CONTIGUOUS'] == np.negative(raw2).flags['C_CONTIGUOUS']\n assert negative(t2).flags['F_CONTIGUOUS'] == np.negative(raw2).flags['F_CONTIGUOUS']\n # C + out\n assert negative(t1, out=t4).flags['C_CONTIGUOUS'] == np.negative(raw1, out=np.empty((4, 2), order='F')).flags['C_CONTIGUOUS']\n assert negative(t1, out=t4).flags['F_CONTIGUOUS'] == np.negative(raw1, out=np.empty((4, 2), order='F')).flags['F_CONTIGUOUS']\n # F + out\n assert negative(t2, out=t3).flags['C_CONTIGUOUS'] == np.negative(raw1, out=np.empty((4, 2), order='C')).flags['C_CONTIGUOUS']\n assert negative(t2, out=t3).flags['F_CONTIGUOUS'] == np.negative(raw1, out=np.empty((4, 2), order='C')).flags['F_CONTIGUOUS']\n\n with pytest.raises(TypeError):\n negative(t1, order='B')\n\n\ndef test_cos():\n t1 = tensor([[0, 1, 0], [1, 0, 0]], chunk_size=2).tosparse()\n\n t = cos(t1)\n assert t.issparse() is True\n assert type(t) is SparseTensor\n\n\ndef test_around():\n t1 = ones((2, 3), dtype='f4', chunk_size=2)\n\n t = around(t1, decimals=3)\n\n assert t.issparse() is False\n assert t.op.decimals == 3\n\n t = tile(t)\n\n assert t.chunks[0].op.decimals == 3\n\n\ndef test_isclose():\n t1 = ones((2, 3), dtype='f4', chunk_size=2)\n\n atol = 1e-4\n rtol = 1e-5\n equal_nan = True\n\n t = isclose(t1, 2, atol=atol, rtol=rtol, equal_nan=equal_nan)\n\n assert isinstance(t.op, TensorIsclose)\n assert t.op.atol == atol\n assert t.op.rtol == rtol\n assert t.op.equal_nan == equal_nan\n\n t = tile(t)\n\n assert isinstance(t.chunks[0].op, TensorIsclose)\n assert t.chunks[0].op.atol == atol\n assert t.chunks[0].op.rtol == rtol\n assert t.chunks[0].op.equal_nan == equal_nan\n\n t1 = ones((2, 3), dtype='f4', chunk_size=2)\n t2 = ones((2, 3), dtype='f4', chunk_size=2)\n\n atol = 1e-4\n rtol = 1e-5\n equal_nan = True\n\n t = isclose(t1, t2, atol=atol, rtol=rtol, equal_nan=equal_nan)\n\n assert isinstance(t.op, TensorIsclose)\n assert t.op.atol == atol\n assert t.op.rtol == rtol\n assert t.op.equal_nan == equal_nan\n\n t = tile(t)\n\n assert isinstance(t.chunks[0].op, TensorIsclose)\n assert t.chunks[0].op.atol == atol\n assert t.chunks[0].op.rtol == rtol\n assert t.chunks[0].op.equal_nan == equal_nan\n\n\ndef test_matmul():\n a_data = [[1, 0], [0, 1]]\n b_data = [[4, 1], [2, 2]]\n\n a = tensor(a_data, chunk_size=1)\n b = tensor(b_data, chunk_size=1)\n\n t = matmul(a, b)\n\n assert t.shape == (2, 2)\n t = tile(t)\n assert t.shape == tuple(sum(s) for s in t.nsplits)\n\n b_data = [1, 2]\n b = tensor(b_data, chunk_size=1)\n\n t = matmul(a, b)\n\n assert t.shape == (2,)\n t = tile(t)\n assert t.shape == tuple(sum(s) for s in t.nsplits)\n\n t = matmul(b, a)\n\n assert t.shape == (2,)\n t = tile(t)\n assert t.shape == tuple(sum(s) for s in t.nsplits)\n\n a_data = np.arange(2 * 2 * 4).reshape((2, 2, 4))\n b_data = np.arange(2 * 2 * 4).reshape((2, 4, 2))\n\n a = tensor(a_data, chunk_size=1)\n b = tensor(b_data, chunk_size=1)\n\n t = matmul(a, b)\n\n assert t.shape == (2, 2, 2)\n t = tile(t)\n assert t.shape == tuple(sum(s) for s in t.nsplits)\n\n t = matmul(tensor([2j, 3j], chunk_size=1), tensor([2j, 3j], chunk_size=1))\n\n assert t.shape == ()\n t = tile(t)\n assert t.shape == tuple(sum(s) for s in t.nsplits)\n\n with pytest.raises(ValueError):\n matmul([1, 2], 3)\n\n with pytest.raises(ValueError):\n matmul(np.random.randn(2, 3, 4), np.random.randn(3, 4, 3))\n\n t = matmul(tensor(np.random.randn(2, 3, 4), chunk_size=2),\n tensor(np.random.randn(3, 1, 4, 3), chunk_size=3))\n assert t.shape == (3, 2, 3, 3)\n\n v = ones((100, 100), chunk_size=10)\n tv = matmul(v, v)\n assert tv.shape == (100, 100)\n tv = tile(tv)\n assert tv.shape == tuple(sum(s) for s in tv.nsplits)\n\n\ndef test_tree_arithmetic():\n raws = [np.random.rand(10, 10) for _ in range(10)]\n tensors = [tensor(a, chunk_size=3) for a in raws]\n\n t = tree_add(*tensors, combine_size=4)\n assert isinstance(t.op, TensorTreeAdd)\n assert t.issparse() is False\n assert len(t.inputs) == 3\n assert len(t.inputs[0].inputs) == 4\n assert len(t.inputs[-1].inputs) == 2\n\n t = tree_multiply(*tensors, combine_size=4)\n assert isinstance(t.op, TensorTreeMultiply)\n assert t.issparse() is False\n assert len(t.inputs) == 3\n assert len(t.inputs[0].inputs) == 4\n assert len(t.inputs[-1].inputs) == 2\n\n raws = [sps.random(5, 9, density=.1) for _ in range(10)]\n tensors = [tensor(a, chunk_size=3) for a in raws]\n\n t = tree_add(*tensors, combine_size=4)\n assert isinstance(t.op, TensorTreeAdd)\n assert t.issparse() is True\n assert len(t.inputs) == 3\n assert len(t.inputs[0].inputs) == 4\n assert len(t.inputs[-1].inputs) == 2\n\n t = tree_multiply(*tensors, combine_size=4)\n assert isinstance(t.op, TensorTreeMultiply)\n assert t.issparse() is True\n assert len(t.inputs) == 3\n assert len(t.inputs[0].inputs) == 4\n assert len(t.inputs[-1].inputs) == 2\n\n\ndef test_get_set_real():\n a_data = np.array([1+2j, 3+4j, 5+6j])\n a = tensor(a_data, chunk_size=2)\n\n with pytest.raises(ValueError):\n a.real = [2, 4]\n\n\ndef test_build_mode():\n t1 = ones((2, 3), chunk_size=2)\n assert t1 == 2\n\n with enter_mode(build=True):\n assert t1 != 2\n"
] | [
[
"scipy.sparse.random",
"numpy.empty",
"numpy.timedelta64",
"numpy.dtype",
"numpy.random.randn",
"numpy.arange",
"numpy.frexp",
"numpy.random.rand",
"numpy.negative",
"numpy.datetime64",
"numpy.array"
]
] |
kuke/models | [
"5d25e00c94943e50e64780a244136f88f13c0a88"
] | [
"fluid/PaddleCV/video/metrics/metrics_util.py"
] | [
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport logging\n\nimport numpy as np\nfrom metrics.youtube8m import eval_util as youtube8m_metrics\nfrom metrics.kinetics import accuracy_metrics as kinetics_metrics\nfrom metrics.multicrop_test import multicrop_test_metrics as multicrop_test_metrics\n\nlogger = logging.getLogger(__name__)\n\n\nclass Metrics(object):\n def __init__(self, name, mode, metrics_args):\n \"\"\"Not implemented\"\"\"\n pass\n\n def calculate_and_log_out(self, loss, pred, label, info=''):\n \"\"\"Not implemented\"\"\"\n pass\n\n def accumulate(self, loss, pred, label, info=''):\n \"\"\"Not implemented\"\"\"\n pass\n\n def finalize_and_log_out(self, info=''):\n \"\"\"Not implemented\"\"\"\n pass\n\n def reset(self):\n \"\"\"Not implemented\"\"\"\n pass\n\n\nclass Youtube8mMetrics(Metrics):\n def __init__(self, name, mode, metrics_args):\n self.name = name\n self.mode = mode\n self.num_classes = metrics_args['MODEL']['num_classes']\n self.topk = metrics_args['MODEL']['topk']\n self.calculator = youtube8m_metrics.EvaluationMetrics(self.num_classes,\n self.topk)\n\n def calculate_and_log_out(self, loss, pred, label, info=''):\n loss = np.mean(np.array(loss))\n hit_at_one = youtube8m_metrics.calculate_hit_at_one(pred, label)\n perr = youtube8m_metrics.calculate_precision_at_equal_recall_rate(pred,\n label)\n gap = youtube8m_metrics.calculate_gap(pred, label)\n logger.info(info + ' , loss = {0}, Hit@1 = {1}, PERR = {2}, GAP = {3}'.format(\\\n '%.6f' % loss, '%.2f' % hit_at_one, '%.2f' % perr, '%.2f' % gap))\n\n def accumulate(self, loss, pred, label, info=''):\n self.calculator.accumulate(loss, pred, label)\n\n def finalize_and_log_out(self, info=''):\n epoch_info_dict = self.calculator.get()\n logger.info(info + '\\tavg_hit_at_one: {0},\\tavg_perr: {1},\\tavg_loss :{2},\\taps: {3},\\tgap:{4}'\\\n .format(epoch_info_dict['avg_hit_at_one'], epoch_info_dict['avg_perr'], \\\n epoch_info_dict['avg_loss'], epoch_info_dict['aps'], epoch_info_dict['gap']))\n\n def reset(self):\n self.calculator.clear()\n\n\nclass Kinetics400Metrics(Metrics):\n def __init__(self, name, mode, metrics_args):\n self.name = name\n self.mode = mode\n self.calculator = kinetics_metrics.MetricsCalculator(name, mode.lower())\n\n def calculate_and_log_out(self, loss, pred, label, info=''):\n if loss is not None:\n loss = np.mean(np.array(loss))\n else:\n loss = 0.\n acc1, acc5 = self.calculator.calculate_metrics(loss, pred, label)\n logger.info(info + '\\tLoss: {},\\ttop1_acc: {}, \\ttop5_acc: {}'.format('%.6f' % loss, \\\n '%.2f' % acc1, '%.2f' % acc5))\n\n def accumulate(self, loss, pred, label, info=''):\n self.calculator.accumulate(loss, pred, label)\n\n def finalize_and_log_out(self, info=''):\n self.calculator.finalize_metrics()\n metrics_dict = self.calculator.get_computed_metrics()\n loss = metrics_dict['avg_loss']\n acc1 = metrics_dict['avg_acc1']\n acc5 = metrics_dict['avg_acc5']\n logger.info(info + '\\tLoss: {},\\ttop1_acc: {}, \\ttop5_acc: {}'.format('%.6f' % loss, \\\n '%.2f' % acc1, '%.2f' % acc5))\n\n def reset(self):\n self.calculator.reset()\n\n\nclass MulticropMetrics(Metrics):\n def __init__(self, name, mode, metrics_args):\n self.name = name\n self.mode = mode\n if mode == 'test':\n args = {}\n args['num_test_clips'] = metrics_args.TEST.num_test_clips\n args['dataset_size'] = metrics_args.TEST.dataset_size\n args['filename_gt'] = metrics_args.TEST.filename_gt\n args['checkpoint_dir'] = metrics_args.TEST.checkpoint_dir\n args['num_classes'] = metrics_args.MODEL.num_classes\n self.calculator = multicrop_test_metrics.MetricsCalculator(\n name, mode.lower(), **args)\n else:\n self.calculator = kinetics_metrics.MetricsCalculator(name,\n mode.lower())\n\n def calculate_and_log_out(self, loss, pred, label, info=''):\n if self.mode == 'test':\n pass\n else:\n if loss is not None:\n loss = np.mean(np.array(loss))\n else:\n loss = 0.\n acc1, acc5 = self.calculator.calculate_metrics(loss, pred, label)\n logger.info(info + '\\tLoss: {},\\ttop1_acc: {}, \\ttop5_acc: {}'.format('%.6f' % loss, \\\n '%.2f' % acc1, '%.2f' % acc5))\n\n def accumulate(self, loss, pred, label):\n self.calculator.accumulate(loss, pred, label)\n\n def finalize_and_log_out(self, info=''):\n if self.mode == 'test':\n self.calculator.finalize_metrics()\n else:\n self.calculator.finalize_metrics()\n metrics_dict = self.calculator.get_computed_metrics()\n loss = metrics_dict['avg_loss']\n acc1 = metrics_dict['avg_acc1']\n acc5 = metrics_dict['avg_acc5']\n logger.info(info + '\\tLoss: {},\\ttop1_acc: {}, \\ttop5_acc: {}'.format('%.6f' % loss, \\\n '%.2f' % acc1, '%.2f' % acc5))\n\n def reset(self):\n self.calculator.reset()\n\n\nclass MetricsZoo(object):\n def __init__(self):\n self.metrics_zoo = {}\n\n def regist(self, name, metrics):\n assert metrics.__base__ == Metrics, \"Unknow model type {}\".format(\n type(metrics))\n self.metrics_zoo[name] = metrics\n\n def get(self, name, mode, cfg):\n for k, v in self.metrics_zoo.items():\n if k == name:\n return v(name, mode, cfg)\n raise MetricsNotFoundError(name, self.metrics_zoo.keys())\n\n\n# singleton metrics_zoo\nmetrics_zoo = MetricsZoo()\n\n\ndef regist_metrics(name, metrics):\n metrics_zoo.regist(name, metrics)\n\n\ndef get_metrics(name, mode, cfg):\n return metrics_zoo.get(name, mode, cfg)\n\n\nregist_metrics(\"NEXTVLAD\", Youtube8mMetrics)\nregist_metrics(\"ATTENTIONLSTM\", Youtube8mMetrics)\nregist_metrics(\"ATTENTIONCLUSTER\", Youtube8mMetrics)\nregist_metrics(\"TSN\", Kinetics400Metrics)\nregist_metrics(\"TSM\", Kinetics400Metrics)\nregist_metrics(\"STNET\", Kinetics400Metrics)\nregist_metrics(\"NONLOCAL\", MulticropMetrics)\n"
] | [
[
"numpy.array"
]
] |
snap-stanford/GIB | [
"a5b625c38f65feda0413eba81b2ccf5dac8f1a98"
] | [
"experiments/GIB_node_exp.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\"\"\"Script for experiments with standard learning with GNNs (including GIB-GAT, GAT, GCN and other baselines.)\"\"\"\nimport argparse\nfrom copy import deepcopy\nimport datetime\nimport matplotlib.pylab as plt\nimport numpy as np\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(\"__file__\"), '..'))\nsys.path.append(os.path.join(os.path.dirname(\"__file__\"), '..', '..'))\nfrom GIB.experiments.GIB_node_model import GNN, load_model_dict_GNN, get_data, train, test_model, train_baseline\nfrom GIB.pytorch_net.util import plot_matrices, to_np_array, Beta_Function, record_data, str2bool, make_dir, eval_tuple, to_string, filter_filename\nfrom GIB.util import sample_lognormal, add_distant_neighbors, uniform_prior, process_data_for_nettack, GIB_PATH\nfrom GIB.DeepRobust.deeprobust.graph.defense import GCNJaccard\nfrom GIB.DeepRobust.deeprobust.graph.defense import RGCN\n\n\n# ## Settings:\n\n# In[2]:\n\n\n\"\"\"\nTypical GIB-Cat setting: model_type=\"GAT\", beta1=0.001, beta2=0.1, struct_dropout_mode=(\"Nsampling\",'multi-categorical-sum',0.1,3) (or (\"DNsampling\",'multi-categorical-sum',0.1,3,2));\nTypical GIB-Bern setting:model_type=\"GAT\", beta1=0.001, beta2=0.1, struct_dropout_mode=(\"Nsampling\",'Bernoulli',0.1,0.5,\"norm\") (or (\"DNsampling\",'Bernoulli',0.1,0.5,\"norm\",2));\nStandard GAT setting: model_type=\"GAT\", beta1=-1, beta2=-1, struct_dropout_mode=(\"standard\",0.6);\nStandard GCN setting: model_type=\"GCN\", beta1=-1, beta2=-1\nRGCN setting: model_type=\"RGCN\"\nGCNJaccard setting: model_type=\"GCNJaccard\"\n\"\"\"\nexp_id = \"exp1.0\" # Experiment id, used for the directory name saving the experiment result files.\ndata_type = 'Cora' # Data type. Choose from \"Cora\", \"Pubmed\", \"citeseer\"\nmodel_type = 'GAT' # Name of the base model. Choose from \"GAT\", \"GCN\", 'GCNJaccard', 'RGCN'. \n # For GIB-Cat and GIB-Bern, still choose model_type=\"GAT\", but set either beta1 or beta2 nonzero.\nbeta1 = 0.001 # coefficient for the XIB term. If -1, this term will turn off.\nbeta2 = 0.1 # coefficient for the AIB term. If -1, this term will have 0 coefficent (but may still perform sampling, depending on \"struct_dropout_mode\")\nstruct_dropout_mode = (\"Nsampling\", 'multi-categorical-sum', 0.1, 3) # Mode for how the structural representation is generated. \n # For GIB-Cat, choose from (\"Nsampling\", 'multi-categorical-sum', 0.1, 3) (here 0.1 is temperature, k=3 is the number of sampled neighboring edges with replacement),\n # and (\"DNsampling\", 'multi-categorical-sum', 0.1, 3, 2) (similar as above, but with 2-hop neighbors)\n # For GIB-Bern, choose from (\"Nsampling\",'Bernoulli',0.1,0.5,\"norm\") (here 0.1 is temperature, 0.5 is the prior for the Bernoulli probability)\n # and (\"DNsampling\",'Bernoulli',0.1,0.5,\"norm\",2) (with 2-hop neighbors)\n # For standard GAT, choose from (\"standard\", 0.6) (where standard dropout used on the attention weights in GAT)\n # and (\"standard\", 0.6, 2) (with 2-hop neighbors)\ntrain_fraction = 1. # Fraction of training labels preserved for the training set. Default 1, meaning using the full training set in the standard split.\nadded_edge_fraction = 0. # Fraction of added random edges. Default 0.\nfeature_noise_ratio = 0. # Noise ratio for the additive independent Gaussian noise on the features.\nlatent_size = 16 # Latent dimension for GCN-based or GAT-based models.\nsample_size = 1 # How many Z sampled from each node X.\nnum_layers = 2 # Number of layers for the GNN.\nreparam_mode = \"diag\" # Reparameterization mode for XIB. Choose from \"None\", \"diag\" or \"full\"\nprior_mode = \"mixGau-100\" # Prior mode. Choose from \"Gaussian\" or \"mixGau-100\" (mixture of 100 Gaussian components)\nis_anneal_beta = True # Whether to anneal beta1 and beta2 from 0 up during training. Default True.\nval_use_mean = True # Whether during evaluation use the parameter value instead of sampling. If True, during evaluation,\n # XIB will use mean for prediction, and AIB will use the parameter of the categorical distribution for prediction.\nreparam_all_layers = (-2,) # Which layers to use XIB, e.g. (1,2,4). Default (-2,), meaning the second last layer. If True, use XIB for all layers.\nepochs = 2000 # Number of epochs. Default 2000\nlr = -1 # Learning rate. If -1, use default learning rate for each model\nweight_decay = -1 # weight decay. If -1, use default weight decay for each model\ndate_time = \"{0}-{1}\".format(datetime.datetime.now().month, datetime.datetime.now().day) # Today's month and day. Used for the directory name saving the experiment result files.\nseed = 0 # Random seed.\nidx = \"0\" # idx to differentiate different files. Only used if wanting to run the same setting for many times.\nsave_best_model = True # Whether to save the model with the best validation accuracy.\nskip_previous = False # If True, will skip the training if the same setting has already been trained.\nis_cuda = \"cuda:0\" # CUDA device. Choose from False, or \"cuda:${NUMBER}\", where the ${NUMBER} is the GPU id.\n\nthreshold = 0.05 # threshold for GCNJaccard.\ngamma = 0.5 # gamma for RGCN\n\ntry:\n # If the current envionrment is Jupyter notebook:\n get_ipython().run_line_magic('matplotlib', 'inline')\n import matplotlib.pylab as plt\n isplot = True\nexcept:\n # If the current envionrment is terminal, pass in settings from the command line:\n import matplotlib\n isplot = True\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_id', default=exp_id, help='experiment ID')\n parser.add_argument('--data_type', help='Data type: choose from PROTEINS.', required=True)\n parser.add_argument('--model_type', default=\"GAT\", help='Model type: GCN or GAT or GCNJaccard or RGCN')\n parser.add_argument('--train_fraction', type=float, default=1., help='train_fraction')\n parser.add_argument('--added_edge_fraction', type=float, default=0., help='Fraction of added edges.')\n parser.add_argument('--feature_noise_ratio', type=float, default=0., help='Relative amplitude of feature Gaussian noise')\n parser.add_argument('--beta1', type=float, default=0.001, help='beta1 value for feature IB, set a float value >= 0.')\n parser.add_argument('--beta2', type=float, default=0.1, help='beta2 value for structure IB, set a float value >= 0.')\n parser.add_argument('--latent_size', type=int, default=16, help='latent_size')\n parser.add_argument('--sample_size', type=int, default=1, help='sample_size')\n parser.add_argument('--num_layers', type=int, default=2, help='num_layers')\n parser.add_argument('--reparam_mode', default=\"diag\", help='diag, diagg, or full')\n parser.add_argument('--prior_mode', default=\"mixGau-100\", help='prior mode for VIB')\n parser.add_argument('--struct_dropout_mode', default=\"(standard, 0.6)\", help='mode for structure dropout.')\n '''\n 'Nsampling, categorical/subset/multi-categorical-sum/multi-categorical-max, temperature, sample-neighbor-size'\n 'Nsampling, Bernoulli, temperature, prior (0~1)', 'norm'/'none'\n 'DNsampling, categorical/subset/multi-categorical-sum/multi-categorical-max, temperature, sample-neighbor-size, hops'\n 'DNsampling, Bernoulli, temperature, prior (0~1), 'norm'/'none', hops'\n 'standard, 0.6'\n 'standard, 0.6, hops'\n '''\n parser.add_argument('--is_anneal_beta', type=str2bool, nargs='?', const=True, default=True, help='Whether to anneal beta.')\n parser.add_argument('--val_use_mean', type=str2bool, nargs='?', const=True, default=True, help='Whether to use mean of Z during validation.')\n parser.add_argument('--reparam_all_layers', type=str, default=\"\\(-2,\\)\", help='Whether to reparameterize all layers.')\n parser.add_argument('--epochs', type=int, default=2000, help=\"Number of epochs.\")\n parser.add_argument('--lr', type=float, default=-1, help=\"Learning rate.\")\n parser.add_argument('--weight_decay', type=float, default=-1, help=\"weight_decay.\")\n parser.add_argument('--threshold', type=float, default=0.05, help='threshold for GCNJaccard')\n parser.add_argument('--gamma', type=float, default=0.3, help='gamma for RGCN')\n parser.add_argument('--save_best_model', type=str2bool, nargs='?', const=True, default=True, help='Whether to save the best model.')\n parser.add_argument('--skip_previous', type=str2bool, nargs='?', const=True, default=False, help='Whether to skip previously trained model in the same directory.')\n parser.add_argument('--date_time', default=date_time, help=\"Current date and time.\")\n parser.add_argument('--seed', type=int, default=0, help='seed')\n parser.add_argument('--gpuid', help='an integer for the accumulator', required=True)\n parser.add_argument('--idx', default=\"0\", help='idx')\n args = parser.parse_args()\n\n\nif \"args\" in locals():\n exp_id = args.exp_id\n data_type = args.data_type\n model_type = args.model_type\n train_fraction = args.train_fraction\n added_edge_fraction = args.added_edge_fraction\n feature_noise_ratio = args.feature_noise_ratio\n beta1 = args.beta1\n beta2 = args.beta2\n latent_size = args.latent_size\n sample_size = args.sample_size\n num_layers = args.num_layers\n reparam_mode = args.reparam_mode\n prior_mode = args.prior_mode\n struct_dropout_mode = eval_tuple(args.struct_dropout_mode)\n is_anneal_beta = args.is_anneal_beta\n val_use_mean = args.val_use_mean\n reparam_all_layers = eval_tuple(args.reparam_all_layers)\n epochs = args.epochs\n lr = args.lr\n weight_decay = args.weight_decay\n threshold = args.threshold\n gamma = args.gamma\n save_best_model = args.save_best_model\n skip_previous = args.skip_previous\n date_time = args.date_time\n seed = args.seed\n idx = args.idx\n is_cuda = eval(args.gpuid)\n if not isinstance(is_cuda, bool):\n is_cuda = \"cuda:{}\".format(is_cuda)\n\nbaseline = model_type in ['GCNJaccard', 'RGCN']\ndevice = torch.device(is_cuda if isinstance(is_cuda, str) else \"cuda\" if is_cuda else \"cpu\")\n# Directory and filename:\ndirname = GIB_PATH + \"/{0}_{1}/\".format(exp_id, date_time)\nif baseline:\n filename = dirname + \"{0}_{1}_tr_{2}_ed_{3}_{4}_beta_{5}_{6}_lat_{7}_samp_{8}_lay_{9}_anl_{10}_mean_{11}_reall_{12}_epochs_{13}_lr_{14}_l2_{15}_seed_{16}_threshold_{17}_gamma_{18}_{19}_id_{20}\".format(\n data_type, model_type, train_fraction, added_edge_fraction, feature_noise_ratio, beta1, beta2, latent_size, sample_size, num_layers,\n is_anneal_beta, val_use_mean, to_string(reparam_all_layers, \"-\"), epochs, lr, weight_decay, seed, threshold, gamma, is_cuda, idx\n )\nelse:\n filename = dirname + \"{0}_{1}_tr_{2}_ed_{3}_{4}_beta_{5}_{6}_lat_{7}_samp_{8}_lay_{9}_reparam_{10}_prior_{11}_sdrop_{12}_anl_{13}_mean_{14}_reall_{15}_epochs_{16}_lr_{17}_l2_{18}_seed_{19}_{20}_id_{21}\".format(\n data_type, model_type, train_fraction, added_edge_fraction, feature_noise_ratio, beta1, beta2, latent_size, sample_size, num_layers, reparam_mode, prior_mode,\n to_string(struct_dropout_mode, \"-\"), is_anneal_beta, val_use_mean, to_string(reparam_all_layers, \"-\"), epochs, lr, weight_decay, seed, is_cuda, idx,\n )\n\n\n# In[3]:\n\n\n# Setting the seed:\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n\n# Setting default hyperparameters:\nif struct_dropout_mode[0] is None:\n struct_dropout_mode = (\"None\",)\nif lr == -1:\n lr = None\nif weight_decay == -1:\n weight_decay = None\nif beta1 == -1:\n beta1 = None\nif beta1 is None:\n beta1_list, reparam_mode, prior_mode = None, None, None\nelse:\n if is_anneal_beta:\n beta_init = 0\n init_length = int(epochs / 4)\n anneal_length = int(epochs / 4)\n beta_inter = Beta_Function(np.linspace(0,1,anneal_length),1,4)\n beta1_inter = beta_inter / 4 * (beta_init - beta1) + beta1\n beta1_list = np.concatenate([np.ones(init_length) * beta_init, beta1_inter, \n np.ones(epochs - init_length - anneal_length + 1) * beta1])\n else:\n beta1_list = np.ones(epochs + 1) * beta1\nif beta2 == -1:\n beta2_list = None\nelse:\n if is_anneal_beta:\n beta_init = 0\n init_length = int(epochs / 4)\n anneal_length = int(epochs / 4)\n beta_inter = Beta_Function(np.linspace(0,1,anneal_length),1,4)\n beta2_inter = beta_inter / 4 * (beta_init - beta2) + beta2\n beta2_list = np.concatenate([np.ones(init_length) * beta_init, beta2_inter, \n np.ones(epochs - init_length - anneal_length + 1) * beta2])\n else:\n beta2_list = np.ones(epochs + 1) * beta2\n\n# Get Dataset:\ndata, info = get_data(data_type,\n train_fraction=train_fraction,\n added_edge_fraction=added_edge_fraction,\n feature_noise_ratio=feature_noise_ratio,\n seed=seed,\n )\n\nif struct_dropout_mode[0] == 'DNsampling' or (struct_dropout_mode[0] == 'standard' and len(struct_dropout_mode) == 3):\n add_distant_neighbors(data, struct_dropout_mode[-1])\ndata = process_data_for_nettack(data).to(device)\ndata = data.to(device)\n\n\nif model_type == 'GCNJaccard':\n model = GCNJaccard(nfeat=data.features.shape[1], nclass=data.labels.max()+1,\n num_layers=num_layers,\n nhid=latent_size, device=device,\n weight_decay=weight_decay if weight_decay is not None else 5e-4,\n lr=lr if lr is not None else 0.01,\n )\nelif model_type == 'RGCN':\n model = RGCN(nnodes=data.adj.shape[0], nfeat=data.features.shape[1], nclass=data.labels.max()+1,\n num_layers=num_layers,\n nhid=latent_size, device=device,\n lr=lr if lr is not None else 0.01,\n gamma=gamma if gamma is not None else 0.5,\n beta1=beta1 if beta1 is not None else 5e-4,\n beta2=weight_decay if weight_decay is not None else 5e-4,\n )\nelse:\n # For GIB-GAT, GAT or GCN:\n model = GNN(\n model_type=model_type,\n num_features=info[\"num_features\"],\n num_classes=info[\"num_classes\"],\n normalize=True,\n reparam_mode=reparam_mode,\n prior_mode=prior_mode,\n latent_size=latent_size,\n sample_size=sample_size,\n num_layers=num_layers,\n struct_dropout_mode=struct_dropout_mode,\n dropout=True,\n val_use_mean=val_use_mean,\n reparam_all_layers=reparam_all_layers,\n is_cuda=is_cuda,\n )\n\n\n# In[ ]:\n\n\nprint(filename + \"\\n\")\n\nif skip_previous:\n filename_core = \"_\".join(filename.split(\"/\")[-1].split(\"_\")[:-3])\n if filename_core.endswith(\"cuda\"):\n filename_core = filename_core[:-5]\n cand_filename = filter_filename(dirname, include=filename_core)\n if len(cand_filename) == 0:\n skip_previous = False\nif skip_previous:\n print(\"File already exists at {} with {}\".format(dirname, cand_filename))\nelse:\n if baseline:\n data_record = train_baseline(model, model_type, data, device, threshold, filename, epochs, save_best_model=save_best_model, verbose=True)\n else:\n data_record = train(\n model=model,\n data=data,\n data_type=data_type,\n model_type=model_type,\n loss_type=info['loss'],\n beta1_list=beta1_list,\n beta2_list=beta2_list,\n epochs=epochs,\n inspect_interval=200 if isplot else 20,\n verbose=True,\n isplot=isplot,\n filename=filename,\n compute_metrics=None,\n lr=lr,\n weight_decay=weight_decay,\n save_best_model=save_best_model,\n )\n\n"
] | [
[
"numpy.ones",
"torch.manual_seed",
"numpy.linspace",
"numpy.random.seed"
]
] |
tdaylan/pcat | [
"b5ed1b88cd87baf8af8ab0f4f75d93dfe28cdbb9"
] | [
"pcat/main.py"
] | [
"# plotting\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# numpy\nimport numpy as np\n\n# scipy\nimport scipy as sp\nimport scipy.interpolate\nfrom scipy.special import erfinv, erf\nfrom scipy.stats import poisson as pss\nimport scipy.fftpack\nimport scipy.sparse\n\n# jit\nfrom numba import jit\n\nimport ctypes\n\nimport astropy\nimport astropy as ap\nfrom astropy.convolution import convolve_fft, AiryDisk2DKernel\n\nimport pickle\n\n# multiprocessing\nimport multiprocessing as mp\n\nfrom copy import deepcopy\n\n# utilities\nimport os, time, sys, glob, fnmatch, inspect, traceback, functools\n\n# HealPix\nimport healpy as hp\n\n# ignore warnings if not in diagnostic mode\nimport warnings\n \n#seterr(divide='raise', over='raise', invalid='raise')\n#seterr(all='raise')\n#seterr(under='ignore')\n#warnings.simplefilter('ignore')\n#np.set_printoptions(linewidth=180)\n#sns.set(context='poster', style='ticks', color_codes=True)\n\nimport h5py\n\n# utilities\n\n# secondaries\n## Symbolic Jacobian calculation\n#import sympy\n\n# tdpy\nimport tdpy\nfrom tdpy.util import summgene\n\n\n\n# photometry related\n\n### find the spectra of sources\ndef retr_spec(gdat, flux, sind=None, curv=None, expc=None, sindcolr=None, elin=None, edisintp=None, sigm=None, gamm=None, spectype='powr', plot=False):\n \n if gdat.numbener == 1:\n spec = flux[None, :]\n else:\n if plot:\n meanener = gdat.meanpara.enerplot\n else:\n meanener = gdat.meanpara.ener\n\n if gmod.spectype == 'gaus':\n spec = 1. / edis[None, :] / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis[None, :])**2)\n if gmod.spectype == 'voig':\n args = (gdat.meanpara.ener[:, None] + 1j * gamm[None, :]) / np.sqrt(2.) / sigm[None, :]\n spec = 1. / sigm[None, :] / np.sqrt(2. * pi) * flux[None, :] * real(scipy.special.wofz(args))\n if gmod.spectype == 'edis':\n edis = edisintp(elin)[None, :]\n spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)\n if gmod.spectype == 'pvoi':\n spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)\n if gmod.spectype == 'lore':\n spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)\n if gmod.spectype == 'powr':\n spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :])\n if gmod.spectype == 'colr':\n if plot:\n spec = np.zeros((gdat.numbenerplot, flux.size))\n else:\n spec = np.empty((gdat.numbener, flux.size))\n for i in gdat.indxener:\n if i < gdat.indxenerpivt:\n spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i])\n elif i == gdat.indxenerpivt:\n spec[i, :] = flux\n else:\n spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i-1])\n if gmod.spectype == 'curv':\n spec = flux[None, :] * meanener[:, None]**(-sind[None, :] - gdat.factlogtenerpivt[:, None] * curv[None, :])\n if gmod.spectype == 'expc':\n spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :]) * np.exp(-(meanener - gdat.enerpivt)[:, None] / expc[None, :])\n \n return spec\n\n\n### find the surface brightness due to one point source\ndef retr_sbrtpnts(gdat, lgal, bgal, spec, psfnintp, indxpixlelem):\n \n # calculate the distance to all pixels from each point source\n dist = retr_angldistunit(gdat, lgal, bgal, indxpixlelem)\n \n # interpolate the PSF onto the pixels\n if gdat.kernevaltype == 'ulip':\n psfntemp = psfnintp(dist)\n if gdat.kernevaltype == 'bspx':\n pass\n\n # scale by the PS spectrum\n sbrtpnts = spec[:, None, None] * psfntemp\n \n return sbrtpnts\n\n\ndef retr_psfnwdth(gdat, psfn, frac):\n '''\n Return the PSF width\n '''\n\n wdth = np.zeros((gdat.numbener, gdat.numbevtt))\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n psfntemp = psfn[i, :, m]\n indxanglgood = np.argsort(psfntemp)\n intpwdth = max(frac * np.amax(psfntemp), np.amin(psfntemp))\n if intpwdth >= np.amin(psfntemp[indxanglgood]) and intpwdth <= np.amax(psfntemp[indxanglgood]):\n wdthtemp = sp.interpolate.interp1d(psfntemp[indxanglgood], gdat.binspara.angl[indxanglgood], fill_value='extrapolate')(intpwdth)\n else:\n wdthtemp = 0.\n wdth[i, m] = wdthtemp\n \n return wdth\n\n\n# lensing-related\ndef samp_lgalbgalfromtmpl(gdat, probtmpl):\n \n indxpixldraw = np.random.choice(gdat.indxpixl, p=probtmpl)\n lgal = gdat.lgalgrid[indxpixldraw] + randn(gdat.sizepixl)\n bgal = gdat.bgalgrid[indxpixldraw] + randn(gdat.sizepixl)\n \n return lgal, bgal\n\n\n## custom random variables, pdfs, cdfs and icdfs\n### probability distribution functions\ndef retr_lprbpois(data, modl):\n \n lprb = data * np.log(modl) - modl - sp.special.gammaln(data + 1)\n \n return lprb\n \n \n### probability density functions\ndef pdfn_self(xdat, minm, maxm):\n \n pdfn = 1. / (maxm - minm)\n \n return pdfn\n\n\ndef pdfn_expo(xdat, maxm, scal):\n\n if (xdat > maxm).any():\n pdfn = 0.\n else:\n pdfn = 1. / scal / (1. - np.exp(-maxm / scal)) * np.exp(-xdat / scal)\n\n return pdfn\n\n\ndef pdfn_dexp(xdat, maxm, scal):\n \n pdfn = 0.5 * pdfn_expo(np.fabs(xdat), maxm, scal)\n\n return pdfn\n\n\ndef pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr):\n \n if np.isscalar(xdat):\n xdat = np.array([xdat])\n \n faca = 1. / (brek**(sloplowr - slopuppr) * (brek**(1. - sloplowr) - minm**(1. - sloplowr)) / \\\n (1. - sloplowr) + (maxm**(1. - slopuppr) - brek**(1. - slopuppr)) / (1. - slopuppr))\n facb = faca * brek**(sloplowr - slopuppr) / (1. - sloplowr)\n \n pdfn = np.empty_like(xdat)\n indxlowr = np.where(xdat <= brek)[0]\n indxuppr = np.where(xdat > brek)[0]\n if indxlowr.size > 0:\n pdfn[indxlowr] = faca * brek**(sloplowr - slopuppr) * xdat[indxlowr]**(-sloplowr)\n if indxuppr.size > 0:\n pdfn[indxuppr] = faca * xdat[indxuppr]**(-slopuppr)\n \n return pdfn\n\n\ndef pdfn_powr(xdat, minm, maxm, slop):\n \n norm = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop))\n \n pdfn = norm * xdat**(-slop)\n \n return pdfn\n\n\ndef pdfn_logt(xdat, minm, maxm):\n \n pdfn = 1. / (np.log(maxm) - np.log(minm)) / xdat\n \n return pdfn\n\n\ndef pdfn_igam(xdat, slop, cutf):\n \n pdfn = sp.stats.invgamma.pdf(xdat, slop - 1., scale=cutf)\n \n return pdfn\n\n\ndef pdfn_lnor(xdat, mean, stdv):\n \n pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)\n\n return pdfn\n\n\ndef pdfn_gaus(xdat, mean, stdv):\n \n pdfn = 1. / np.sqrt(2. * pi) / stdv * np.exp(-0.5 * ((xdat - mean) / stdv)**2)\n\n return pdfn\n\n\ndef pdfn_lgau(xdat, mean, stdv):\n \n pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)\n\n return pdfn\n\n\ndef pdfn_atan(para, minmpara, maxmpara):\n\n pdfn = 1. / (para**2 + 1.) / (np.arctan(maxmpara) - np.arctan(minmpara))\n \n return pdfn\n\n\ndef cdfn_paragenrscalbase(gdat, strgmodl, paragenrscalbase, thisindxparagenrbase):\n \n gmod = getattr(gdat, strgmodl)\n\n scalparagenrbase = gmod.scalpara.genrbase[thisindxparagenrbase]\n \n if scalparagenrbase == 'self' or scalparagenrbase == 'logt' or scalparagenrbase == 'atan':\n \n listminmparagenrscalbase = gmod.minmpara.genrbase[thisindxparagenrbase]\n factparagenrscalbase = gmod.factparagenrscalbase[thisindxparagenrbase]\n\n if scalparagenrbase == 'self':\n paragenrscalbaseunit = cdfn_self(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)\n elif scalparagenrbase == 'logt':\n paragenrscalbaseunit = cdfn_logt(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)\n\n elif scalparagenrbase == 'atan':\n gmod.listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[thisindxparagenrbase]\n paragenrscalbaseunit = cdfn_atan(paragenrscalbase, listminmparagenrscalbase, gmod.listmaxmparagenrscalbase)\n \n elif scalparagenrbase == 'gaus' or scalparagenrbase == 'eerr':\n gmod.listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[thisindxparagenrbase]\n gmod.liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[thisindxparagenrbase]\n if scalparagenrbase == 'eerr':\n gmod.cdfnlistminmparagenrscalbaseunit = gmod.cdfnlistminmparagenrscalbaseunit[thisindxparagenrbase]\n gmod.listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[thisindxparagenrbase]\n paragenrscalbaseunit = cdfn_eerr(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase, \\\n gmod.cdfnlistminmparagenrscalbaseunit, gmod.listparagenrscalbaseunitdiff)\n else:\n paragenrscalbaseunit = cdfn_gaus(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase)\n\n elif scalparagenrbase == 'pois':\n paragenrscalbaseunit = paragenrscalbase\n \n if gdat.booldiagmode:\n if paragenrscalbaseunit == 0:\n print('Warning. CDF is zero.')\n\n return paragenrscalbaseunit\n\n\ndef icdf_paragenrscalfull(gdat, strgmodl, paragenrunitfull, indxparagenrfullelem):\n \n gmod = getattr(gdat, strgmodl)\n\n # tobechanged\n # temp -- change zeros to empty\n paragenrscalfull = np.zeros_like(paragenrunitfull)\n for scaltype in gdat.listscaltype:\n listindxparagenrbasescal = gmod.listindxparagenrbasescal[scaltype]\n if len(listindxparagenrbasescal) == 0:\n continue\n paragenrscalfull[listindxparagenrbasescal] = icdf_paragenrscalbase(gdat, strgmodl, paragenrunitfull[listindxparagenrbasescal], scaltype, listindxparagenrbasescal)\n \n if not np.isfinite(paragenrscalfull).all():\n raise Exception('')\n \n if indxparagenrfullelem is not None:\n for l in gmod.indxpopl:\n for g in gmod.indxparagenrelemsing[l]:\n indxparagenrfulltemp = indxparagenrfullelem[l][gmod.namepara.genrelem[l][g]]\n if indxparagenrfulltemp.size == 0:\n continue\n paragenrscalfull[indxparagenrfulltemp] = icdf_trap(gdat, strgmodl, paragenrunitfull[indxparagenrfulltemp], paragenrscalfull, \\\n gmod.listscalparagenrelem[l][g], gmod.namepara.genrelem[l][g], l)\n \n if gdat.booldiagmode:\n if not np.isfinite(paragenrscalfull[indxparagenrfulltemp]).all():\n raise Exception('')\n\n if not np.isfinite(paragenrscalfull).all():\n raise Exception('')\n \n return paragenrscalfull\n\n \ndef icdf_paragenrscalbase(gdat, strgmodl, paragenrunitbase, scaltype, indxparagenrbasescal):\n \n gmod = getattr(gdat, strgmodl)\n \n if scaltype == 'self' or scaltype == 'logt' or scaltype == 'atan':\n minmparagenrscalbase = gmod.minmpara.genrbase[indxparagenrbasescal]\n factparagenrscalbase = gmod.factpara.genrbase[indxparagenrbasescal]\n\n if scaltype == 'self':\n paragenrscalbase = tdpy.icdf_self(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)\n elif scaltype == 'logt':\n paragenrscalbase = tdpy.icdf_logt(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)\n elif scaltype == 'atan':\n listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[indxparagenrbasescal]\n paragenrscalbase = tdpy.icdf_atan(paragenrunitbase, minmparagenrscalbase, listmaxmparagenrscalbase)\n elif scaltype == 'gaus' or scaltype == 'eerr':\n listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[indxparagenrbasescal]\n liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[indxparagenrbasescal]\n if scaltype == 'eerr':\n cdfnminmparagenrscalbaseunit = gmod.cdfnminmparagenrscalbaseunit[indxparagenrbasescal]\n listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[indxparagenrbasescal]\n paragenrscalbase = tdpy.icdf_eerr(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase, cdfnminmparagenrscalbaseunit, listparagenrscalbaseunitdiff)\n else:\n paragenrscalbase = tdpy.icdf_gaus(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase)\n elif scaltype == 'pois':\n paragenrscalbase = paragenrunitbase\n \n if gdat.booldiagmode:\n if not np.isfinite(paragenrscalbase).all():\n print('scaltype')\n print(scaltype)\n print('paragenrscalbase')\n print(paragenrscalbase)\n print('type(paragenrscalbase)')\n print(type(paragenrscalbase))\n print('paragenrscalbase.dtype')\n print(paragenrscalbase.dtype)\n raise Exception('')\n\n return paragenrscalbase\n\n\ndef icdf_trap(gdat, strgmodl, cdfn, paragenrscalfull, scalcomp, nameparagenrelem, l):\n \n gmod = getattr(gdat, strgmodl)\n \n if scalcomp == 'self' or scalcomp == 'powr' or scalcomp == 'dpowslopbrek' or scalcomp == 'logt':\n minm = getattr(gmod.minmpara, nameparagenrelem)\n \n if scalcomp != 'self':\n maxm = getattr(gmod.maxmpara, nameparagenrelem)\n \n if scalcomp == 'powr':\n slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio%spop%d' % (nameparagenrelem, l))]\n if gdat.booldiagmode:\n if not np.isfinite(slop):\n raise Exception('')\n if maxm < minm:\n raise Exception('')\n icdf = tdpy.icdf_powr(cdfn, minm, maxm, slop)\n\n if scalcomp == 'dpowslopbrek':\n distbrek = paragenrscalfull[getattr(gmod.indxpara, 'brekprio' + nameparagenrelem)[l]]\n sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + nameparagenrelem)[l]]\n slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + nameparagenrelem)[l]]\n icdf = tdpy.icdf_dpow(cdfn, minm, maxm, distbrek, sloplowr, slopuppr)\n \n if scalcomp == 'expo':\n sexp = getattr(gmod, nameparagenrelem + 'distsexppop%d' % l)\n icdf = tdpy.icdf_expo(cdfn, maxm, sexp)\n \n if scalcomp == 'self':\n fact = getattr(gmod.factpara, nameparagenrelem)\n icdf = tdpy.icdf_self_fact(cdfn, minm, fact)\n \n if scalcomp == 'logt':\n icdf = tdpy.icdf_logt(cdfn, minm, fact)\n \n if scalcomp == 'dexp':\n scal = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]\n icdf = tdpy.icdf_dexp(cdfn, maxm, scal)\n \n if scalcomp == 'lnormeanstdv':\n distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]\n diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]\n icdf = tdpy.icdf_lnor(cdfn, distmean, diststdv)\n \n if scalcomp == 'igam':\n slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem)[l]]\n cutf = getattr(gdat, 'cutf' + nameparagenrelem)\n icdf = tdpy.icdf_igam(cdfn, slop, cutf)\n \n if scalcomp == 'gaus':\n distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]\n diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]\n icdf = tdpy.icdf_gaus(cdfn, distmean, diststdv)\n \n if gdat.booldiagmode:\n if not np.isfinite(icdf).all():\n print('icdf')\n print(icdf)\n raise Exception('')\n\n return icdf\n\n\ndef cdfn_trap(gdat, gdatmodi, strgmodl, icdf, indxpoplthis):\n \n gmod = getattr(gdat, strgmodl)\n gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)\n \n gmod.listscalparagenrelem = gmod.listscalparagenrelem[indxpoplthis]\n cdfn = np.empty_like(icdf)\n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[indxpoplthis]):\n \n if gmod.listscalparagenrelem[k] == 'self' or gmod.listscalparagenrelem[k] == 'dexp' or gmod.listscalparagenrelem[k] == 'expo' \\\n or gmod.listscalparagenrelem[k] == 'powr' or gmod.listscalparagenrelem[k] == 'dpowslopbrek':\n minm = getattr(gdat.fitt.minm, nameparagenrelem)\n if gmod.listscalparagenrelem[k] == 'powr':\n maxm = getattr(gdat.fitt.maxm, nameparagenrelem)\n slop = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]\n cdfn[k] = cdfn_powr(icdf[k], minm, maxm, slop)\n elif gmod.listscalparagenrelem[k] == 'dpowslopbrek':\n maxm = getattr(gdat.fitt.maxm, nameparagenrelem)\n brek = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[indxpoplthis]]\n sloplowr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[indxpoplthis]]\n slopuppr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[indxpoplthis]]\n cdfn[k] = cdfn_dpow(icdf[k], minm, maxm, brek, sloplowr, slopuppr)\n else:\n fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)\n cdfn[k] = cdfn_self(icdf[k], minm, fact)\n if gmod.listscalparagenrelem[k] == 'lnormeanstdv':\n distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]\n diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]\n cdfn[k] = cdfn_lnor(icdf[k], distmean, slop)\n if gmod.listscalparagenrelem[k] == 'igam':\n slop = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]\n cutf = getattr(gdat, 'cutf' + nameparagenrelem)\n cdfn[k] = cdfn_igam(icdf[k], slop, cutf)\n if gmod.listscalparagenrelem[k] == 'gaus':\n distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]\n diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]\n cdfn[k] = cdfn_gaus(icdf[k], distmean, diststdv)\n \n return cdfn\n\n\n### update sampler state\ndef updt_stat(gdat, gdatmodi):\n \n if gdat.typeverb > 1:\n print('updt_stat()')\n \n # update the sample and the unit sample vectors\n gdatmodi.this.lpritotl = gdatmodi.next.lpritotl\n gdatmodi.this.lliktotl = gdatmodi.next.lliktotl\n gdatmodi.this.lpostotl = gdatmodi.next.lpostotl\n gdatmodi.this.paragenrscalfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrscalfull[gdatmodi.indxsampmodi])\n gdatmodi.this.paragenrunitfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrunitfull[gdatmodi.indxsampmodi])\n if gdatmodi.this.indxproptype > 0:\n gdatmodi.this.indxelemfull = deepcopy(gdatmodi.next.indxelemfull)\n gdatmodi.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdatmodi.this.indxelemfull, 'fitt')\n\n\ndef initcompfromstat(gdat, gdatmodi, namerefr):\n \n for l in gmod.indxpopl:\n for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):\n minm = getattr(gdat.fitt.minmpara, nameparagenrelem)\n maxm = getattr(gdat.fitt.maxmpara, nameparagenrelem)\n try:\n comp = getattr(gdat, namerefr + nameparagenrelem)[l][0, :]\n if gmod.listscalparagenrelem[l][g] == 'self' or gmod.listscalparagenrelem[l][g] == 'logt':\n fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)\n if gmod.listscalparagenrelem[l][g] == 'self':\n compunit = cdfn_self(comp, minm, fact)\n if gmod.listscalparagenrelem[l][g] == 'logt':\n compunit = cdfn_logt(comp, minm, fact)\n if gmod.listscalparagenrelem[l][g] == 'expo':\n scal = getattr(gdat.fitt, 'gangdistsexp')\n maxm = getattr(gdat.fitt.maxm, nameparagenrelem)\n compunit = cdfn_expo(icdf, maxm, scal)\n if gmod.listscalparagenrelem[l][g] == 'powr' or gmod.listscalparagenrelem[l][g] == 'igam':\n slop = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[l]]\n if gmod.listscalparagenrelem[l][g] == 'powr':\n compunit = cdfn_powr(comp, minm, maxm, slop)\n if gmod.listscalparagenrelem[l][g] == 'igam':\n cutf = getattr(gdat, 'cutf' + nameparagenrelem)\n compunit = cdfn_igam(comp, slop, cutf)\n if gmod.listscalparagenrelem[l][g] == 'dpowslopbrek':\n brek = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[l]]\n sloplowr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[l]]\n slopuppr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[l]]\n compunit = cdfn_powr(comp, minm, maxm, brek, sloplowr, slopuppr)\n if gmod.listscalparagenrelem[l][g] == 'gaus':\n distmean = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[l]]\n diststdv = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[l]]\n compunit = cdfn_gaus(comp, distmean, diststdv)\n except:\n if gdat.typeverb > 0:\n print('Initialization from the reference catalog failed for %s. Sampling randomly...' % nameparagenrelem)\n compunit = np.random.rand(gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int))\n gdatmodi.this.paragenrunitfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]] = compunit\n\n\n### find the set of pixels in proximity to a position on the map\ndef retr_indxpixlelemconc(gdat, strgmodl, dictelem, l):\n \n gmod = getattr(gdat, strgmodl)\n \n lgal = dictelem[l]['lgal']\n bgal = dictelem[l]['bgal']\n varbampl = dictelem[l][gmod.nameparagenrelemampl[l]]\n \n if gmod.typeelemspateval[l] == 'locl':\n listindxpixlelem = [[] for k in range(lgal.size)]\n for k in range(lgal.size):\n indxpixlpnts = retr_indxpixl(gdat, bgal[k], lgal[k])\n \n indxfluxproxtemp = np.digitize(varbampl[k], gdat.binspara.prox)\n if indxfluxproxtemp > 0:\n indxfluxproxtemp -= 1\n if indxfluxproxtemp == gdat.binspara.prox.size - 1:\n print('Warning! Index of the proximity pixel list overflew. Taking the largest list...')\n indxfluxproxtemp -= 1\n indxpixlelem = gdat.indxpixlprox[indxfluxproxtemp][indxpixlpnts]\n if isinstance(indxpixlelem, int):\n indxpixlelem = gdat.indxpixl\n listindxpixlelem[k] = indxpixlelem\n\n listindxpixlelemconc = np.unique(np.concatenate(listindxpixlelem))\n else:\n listindxpixlelemconc = gdat.indxpixl\n listindxpixlelem = gdat.indxpixl\n \n return listindxpixlelem, listindxpixlelemconc\n\n\n### find the distance between two points on the map\ndef retr_angldistunit(gdat, lgal, bgal, indxpixlelem, retranglcosi=False):\n \n if gdat.typepixl == 'heal':\n xdat, ydat, zaxi = retr_unit(lgal, bgal)\n anglcosi = gdat.xdatgrid[indxpixlelem] * xdat + gdat.ydatgrid[indxpixlelem] * ydat + gdat.zaxigrid[indxpixlelem] * zaxi\n \n if retranglcosi:\n return anglcosi\n else:\n angldist = np.arccos(anglcosi)\n return angldist\n \n else:\n angldist = np.sqrt((lgal - gdat.lgalgrid[indxpixlelem])**2 + (bgal - gdat.bgalgrid[indxpixlelem])**2)\n \n return angldist\n \n\n### find the pixel index of a point on the map\ndef retr_indxpixl(gdat, bgal, lgal):\n\n if gdat.typepixl == 'heal':\n indxpixl = gdat.pixlcnvt[hp.ang2pix(gdat.numbsideheal, np.pi / 2. - bgal, lgal)]\n if gdat.booldiagmode:\n if (indxpixl == -1).any(): \n raise Exception('pixlcnvt went negative!')\n\n if gdat.typepixl == 'cart':\n indxlgcr = np.floor(gdat.numbsidecart * (lgal - gdat.minmlgaldata) / 2. / gdat.maxmgangdata).astype(int)\n indxbgcr = np.floor(gdat.numbsidecart * (bgal - gdat.minmbgaldata) / 2. / gdat.maxmgangdata).astype(int)\n\n if np.isscalar(indxlgcr):\n if indxlgcr < 0:\n indxlgcr = 0\n if indxlgcr >= gdat.numbsidecart:\n indxlgcr = gdat.numbsidecart - 1\n else:\n indxlgcr[np.where(indxlgcr < 0)] = 0\n indxlgcr[np.where(indxlgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1\n \n if np.isscalar(indxbgcr):\n if indxbgcr < 0:\n indxbgcr = 0\n if indxbgcr >= gdat.numbsidecart:\n indxbgcr = gdat.numbsidecart - 1\n else:\n indxbgcr[np.where(indxbgcr < 0)] = 0\n indxbgcr[np.where(indxbgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1\n \n indxpixl = indxlgcr * gdat.numbsidecart + indxbgcr\n \n # convert to an index of non-zero exposure pixels\n #indxpixl = gdat.indxpixlroficnvt[indxpixl]\n\n return indxpixl\n\n\n## obtain count maps\ndef retr_cntp(gdat, sbrt):\n \n cntp = sbrt * gdat.expo * gdat.apix\n if gdat.enerdiff:\n cntp *= gdat.deltener[:, None, None] \n \n return cntp\n\n\n## plotting\n### construct path for plots\ndef retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgplot, nameinte=''):\n \n if strgmodl == 'true' or strgstat == '':\n path = gdat.pathinit + nameinte + strgplot + '.pdf'\n elif strgstat == 'pdfn' or strgstat == 'mlik':\n path = gdat.pathplotrtag + strgpdfn + '/finl/' + nameinte + strgstat + strgplot + '.pdf'\n elif strgstat == 'this':\n path = gdat.pathplotrtag + strgpdfn + '/fram/' + nameinte + strgstat + strgplot + '_swep%09d.pdf' % gdatmodi.cntrswep\n \n return path\n\n\n### determine the marker size\ndef retr_mrkrsize(gdat, strgmodl, compampl, nameparagenrelemampl):\n \n gmod = getattr(gdat, strgmodl)\n minm = getattr(gdat.minmpara, nameparagenrelemampl) \n maxm = getattr(gdat.maxmpara, nameparagenrelemampl)\n mrkrsize = (np.sqrt(compampl) - np.sqrt(minm)) / (np.sqrt(maxm) - np.sqrt(minm)) * (gdat.maxmmrkrsize - gdat.minmmrkrsize) + gdat.minmmrkrsize\n \n return mrkrsize\n\n\n## experiment specific\ndef retr_psfphubb(gmod):\n\n # temp\n gmod.psfpexpr = np.array([0.080, 0.087]) / gdat.anglfact\n\n\ndef retr_psfpchan(gmod):\n\n # temp\n #gmod.psfpexpr = np.array([0.25, 0.3, 0.4, 0.6, 0.7]) / gdat.anglfact\n if gdat.numbenerfull == 5:\n gmod.psfpexpr = np.array([0.424 / gdat.anglfact, 2.75, 0.424 / gdat.anglfact, 2.59, 0.440 / gdat.anglfact, 2.47, 0.457 / gdat.anglfact, 2.45, 0.529 / gdat.anglfact, 3.72])\n if gdat.numbenerfull == 2:\n gmod.psfpexpr = np.array([0.427 / gdat.anglfact, 2.57, 0.449 / gdat.anglfact, 2.49])\n #gdat.psfpchan = gmod.psfpexpr[(2 * gdat.indxenerincl[:, None] + np.arange(2)[None, :]).flatten()] \n #gmod.psfpexpr = np.array([0.25 / gdat.anglfact, \n # 0.30 / gdat.anglfacti\\\n # 0.40 / gdat.anglfacti\\\n # 0.60 / gdat.anglfacti\\\n # 0.70 / gdat.anglfacti\n #gmod.psfpexpr = np.array([0.35 / gdat.anglfact, 2e-1, 1.9, 0.5 / gdat.anglfact, 1.e-1, 2.])\n #gmod.psfpexpr = np.array([0.25 / gdat.anglfact, 2.0e-1, 1.9, \\\n # 0.30 / gdat.anglfact, 1.0e-1, 2.0, \\\n # 0.40 / gdat.anglfact, 1.0e-1, 2.0, \\\n # 0.60 / gdat.anglfact, 1.0e-1, 2.0, \\\n # 0.70 / gdat.anglfact, 1.0e-1, 2.0])\n \n\ndef retr_psfpsdyn(gmod):\n\n gmod.psfpexpr = np.array([0.05])\n \n\ndef retr_psfpferm(gmod):\n \n if gdat.anlytype.startswith('rec8'):\n path = gdat.pathdata + 'expr/irfn/psf_P8R2_SOURCE_V6_PSF.fits'\n else:\n path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'\n irfn = astropy.io.fits.getdata(path, 1)\n minmener = irfn['energ_lo'].squeeze() * 1e-3 # [GeV]\n maxmener = irfn['energ_hi'].squeeze() * 1e-3 # [GeV]\n enerirfn = np.sqrt(minmener * maxmener)\n\n numbpsfpscal = 3\n numbpsfpform = 5\n \n fermscal = np.zeros((gdat.numbevtt, numbpsfpscal))\n fermform = np.zeros((gdat.numbener, gdat.numbevtt, numbpsfpform))\n \n strgpara = ['score', 'gcore', 'stail', 'gtail', 'ntail']\n for m in gdat.indxevtt:\n if gdat.anlytype.startswith('rec8'):\n irfn = astropy.io.fits.getdata(path, 1 + 3 * gdat.indxevttincl[m])\n fermscal[m, :] = astropy.io.fits.getdata(path, 2 + 3 * gdat.indxevttincl[m])['PSFSCALE']\n else:\n if m == 1:\n path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_front.fits'\n elif m == 0:\n path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'\n else:\n continue\n irfn = astropy.io.fits.getdata(path, 1)\n fermscal[m, :] = astropy.io.fits.getdata(path, 2)['PSFSCALE']\n for k in range(numbpsfpform):\n fermform[:, m, k] = sp.interpolate.interp1d(enerirfn, np.mean(irfn[strgpara[k]].squeeze(), axis=0), fill_value='extrapolate')(gdat.meanpara.ener)\n # convert N_tail to f_core\n for m in gdat.indxevtt:\n for i in gdat.indxener:\n fermform[i, m, 4] = 1. / (1. + fermform[i, m, 4] * fermform[i, m, 2]**2 / fermform[i, m, 0]**2)\n\n # calculate the scale factor\n gdat.fermscalfact = np.sqrt((fermscal[None, :, 0] * (10. * gdat.meanpara.ener[:, None])**fermscal[None, :, 2])**2 + fermscal[None, :, 1]**2)\n \n # store the fermi PSF parameters\n gmod.psfpexpr = np.zeros(gdat.numbener * gdat.numbevtt * numbpsfpform)\n for m in gdat.indxevtt:\n for k in range(numbpsfpform):\n indxfermpsfptemp = m * numbpsfpform * gdat.numbener + gdat.indxener * numbpsfpform + k\n gmod.psfpexpr[indxfermpsfptemp] = fermform[:, m, k]\n \n\ndef retr_refrchaninit(gdat):\n \n gdat.indxrefr = np.arange(gdat.numbrefr)\n \n gdat.dictrefr = []\n for q in gdat.indxrefr:\n gdat.dictrefr.append(dict())\n \n gdat.refr.namepara.elemsign = ['flux', 'magt']\n \n gdat.refr.lablelem = ['Xue+2011', 'Wolf+2008']\n \n gdat.listnamerefr += ['xu11', 'wo08']\n \n setattr(gdat, 'plotminmotyp', 0.)\n setattr(gdat, 'plottmaxmotyp', 1.)\n setattr(gmod.lablrootpara, 'otyp', 'O')\n setattr(gdat, 'scalotypplot', 'self')\n \n setattr(gmod.lablrootpara, 'otypxu11', 'O')\n for name in gdat.listnamerefr:\n setattr(gdat, 'plotminmotyp' + name, 0.)\n setattr(gdat, 'plotmaxmotyp' + name, 1.)\n \n if gdat.strgcnfg == 'pcat_chan_inpt_home4msc':\n with open(gdat.pathinpt + 'ECDFS_Cross_ID_Hsu2014.txt', 'r') as thisfile:\n for k, line in enumerate(thisfile):\n if k < 18:\n continue\n rasccand =line[2]\n declcand =line[2]\n \n gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'otyp', 'lumi']\n gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'magt', 'reds', 'otyp']\n\n\ndef retr_refrchanfinl(gdat):\n \n booltemp = False\n if gdat.anlytype.startswith('extr'):\n if gdat.numbsidecart == 300:\n gdat.numbpixllgalshft[0] = 1490\n gdat.numbpixlbgalshft[0] = 1430\n else:\n booltemp = True\n elif gdat.anlytype.startswith('home'):\n gdat.numbpixllgalshft[0] = 0\n gdat.numbpixlbgalshft[0] = 0\n \n if gdat.numbsidecart == 600:\n pass\n elif gdat.numbsidecart == 100:\n indxtile = int(gdat.anlytype[-4:])\n numbsidecntr = int(gdat.anlytype[8:12])\n numbtileside = numbsidecntr / gdat.numbsidecart\n indxtilexaxi = indxtile // numbtileside\n indxtileyaxi = indxtile % numbtileside\n gdat.numbpixllgalshft[0] += indxtilexaxi * gdat.numbsidecart\n gdat.numbpixlbgalshft[0] += indxtileyaxi * gdat.numbsidecart\n elif gdat.numbsidecart == 300:\n gdat.numbpixllgalshft[0] += 150\n gdat.numbpixlbgalshft[0] += 150\n else:\n booltemp = True\n else:\n booltemp = True\n\n if booltemp:\n raise Exception('Reference elements cannot be aligned with the spatial axes!')\n \n ## WCS object for rotating reference elements into the ROI\n if gdat.numbener == 2:\n gdat.listpathwcss[0] = gdat.pathinpt + 'CDFS-4Ms-0p5to2-asca-im-bin1.fits'\n else:\n gdat.listpathwcss[0] = gdat.pathinpt + '0.5-0.91028_flux_%sMs.img' % gdat.anlytype[4]\n \n # Xue et al. (2011)\n #with open(gdat.pathinpt + 'chancatl.txt', 'r') as thisfile:\n pathfile = gdat.pathinpt + 'Xue2011.fits'\n hdun = pf.open(pathfile)\n hdun.info()\n lgalchan = hdun[1].data['_Glon'] / 180. * pi\n bgalchan = hdun[1].data['_Glat'] / 180. * pi\n fluxchansoft = hdun[1].data['SFlux']\n fluxchanhard = hdun[1].data['HFlux']\n objttypechan = hdun[1].data['Otype']\n gdat.refrlumi[0][0] = hdun[1].data['Lx']\n \n # position\n gdat.refr.dictelem[0]['lgal'] = lgalchan\n gdat.refr.dictelem[0]['bgal'] = bgalchan\n\n # spectra\n gdat.refrspec = [[np.zeros((3, gdat.numbener, lgalchan.size))]]\n if gdat.numbener == 2:\n gdat.refrspec[0][0, 0, :] = fluxchansoft * 0.624e9\n gdat.refrspec[0][0, 1, :] = fluxchanhard * 0.624e9 / 16.\n else:\n gdat.refrspec[0][0, :, :] = 2. * fluxchansoft[None, :] * 0.624e9\n gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :]\n gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :]\n \n # fluxes\n gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]\n\n # spectral indices\n if gdat.numbener > 1:\n gdat.refrsind[0] = -np.log(gdat.refrspec[0][0, 1, :] / gdat.refrspec[0][0, 0, :]) / np.log(np.sqrt(7. / 2.) / np.sqrt(0.5 * 2.))\n\n ## object type\n objttypechantemp = np.zeros(lgalchan.size) - 1.\n indx = np.where(objttypechan == 'AGN')[0]\n objttypechantemp[indx] = 0.165\n indx = np.where(objttypechan == 'Galaxy')[0]\n objttypechantemp[indx] = 0.495\n indx = np.where(objttypechan == 'Star')[0]\n objttypechantemp[indx] = 0.835\n gdat.refrotyp[0][0] = objttypechantemp\n\n # Wolf et al. (2011)\n path = gdat.pathdata + 'inpt/Wolf2008.fits'\n data = astropy.io.fits.getdata(path)\n gdat.refrlgal[1] = np.deg2rad(data['_Glon'])\n gdat.refrlgal[1] = ((gdat.refrlgal[1] - pi) % (2. * pi)) - pi\n gdat.refrbgal[1] = np.deg2rad(data['_Glat'])\n gdat.refrmagt[1][0] = data['Rmag']\n gdat.refrreds[1][0] = data['MCz']\n \n #listname = []\n #for k in range(data['MCclass'].size):\n # if not data['MCclass'][k] in listname:\n # listname.append(data['MCclass'][k])\n listname = ['Galaxy', 'Galaxy (Uncl!)', 'QSO (Gal?)', 'Galaxy (Star?)', 'Star', 'Strange Object', 'QSO', 'WDwarf']\n gdat.refrotyp[1][0] = np.zeros_like(gdat.refrreds[1][0]) - 1. \n for k, name in enumerate(listname):\n indx = np.where(data['MCclass'] == name)[0]\n gdat.refrotyp[1][0][indx] = k / 10.\n \n # error budget\n for name in ['lgal', 'bgal', 'sind', 'otyp', 'lumi', 'magt', 'reds']:\n refrtile = [[] for q in gdat.indxrefr]\n refrfeat = getattr(gdat.refr, name)\n for q in gdat.indxrefr:\n if len(refrfeat[q]) > 0:\n refrtile[q] = np.tile(refrfeat[q], (3, 1))\n setattr(gdat.refr, name, refrtile)\n \n\ndef retr_refrferminit(gdat):\n \n gdat.listnamerefr += ['ac15', 'ma05']\n gdat.indxrefr = np.arange(gdat.numbrefr)\n \n gdat.refr.lablelem = ['Acero+2015', 'Manchester+2005']\n\n gdat.refr.namepara.elemsign = ['flux', 'flux0400']\n \n setattr(gmod.lablrootpara, 'curvac15', '%s_{3FGL}' % gdat.lablcurv)\n setattr(gmod.lablrootpara, 'expcac15', 'E_{c,3FGL}')\n \n for name in gdat.listnamerefr:\n setattr(gdat.minmpara, 'curv' + name, -1.)\n setattr(gdat.maxmpara, 'curv' + name, 1.)\n setattr(gdat.minmpara, 'expc' + name, 0.1)\n setattr(gdat.maxmpara, 'expc' + name, 10.)\n \n gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'curv', 'expc', 'tvar', 'etag', 'styp', 'sindcolr0001', 'sindcolr0002']\n gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'flux0400', 'per0', 'per1']\n\n\ndef retr_refrfermfinl(gdat):\n\n gdat.minmstyp = -0.5\n gdat.maxmstyp = 3.5\n gdat.lablstyp = 'S'\n gmod.scalstypplot = 'self'\n \n gdat.minmtvar = 0.\n gdat.maxmtvar = 400.\n gdat.labltvar = 'T'\n gmod.scaltvarplot = 'logt'\n \n # Acero+2015\n path = gdat.pathdata + 'expr/pnts/gll_psc_v16.fit'\n fgl3 = astropy.io.fits.getdata(path)\n \n gdat.refr.dictelem[0]['lgal'] = np.deg2rad(fgl3['glon'])\n gdat.refr.dictelem[0]['lgal'] = np.pi - ((gdat.refr.dictelem[0]['lgal'] - np.pi) % (2. * np.pi))\n gdat.refr.dictelem[0]['bgal'] = np.deg2rad(fgl3['glat'])\n \n gdat.refr.numbelemfull = gdat.refr.dictelem[0]['lgal'].size\n\n gdat.refrspec = [np.empty((3, gdat.numbener, gdat.refr.dictelem[0]['lgal'].size))]\n gdat.refrspec[0][0, :, :] = np.stack((fgl3['Flux300_1000'], fgl3['Flux1000_3000'], fgl3['Flux3000_10000']))[gdat.indxenerincl, :] / gdat.deltener[:, None]\n \n fgl3specstdvtemp = np.stack((fgl3['Unc_Flux100_300'], fgl3['Unc_Flux300_1000'], fgl3['Unc_Flux1000_3000'], fgl3['Unc_Flux3000_10000'], \\\n fgl3['Unc_Flux10000_100000']))[gdat.indxenerincl, :, :] / gdat.deltener[:, None, None] \n gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 0]\n gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 1]\n gdat.refrspec[0][np.where(np.isfinite(gdat.refrspec[0]) == False)] = 0.\n \n gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]\n gdat.refrsindcolr0001[0] = -np.log(gdat.refrspec[0][:, 1, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[1] / gdat.enerpivt)\n gdat.refrsindcolr0002[0] = -np.log(gdat.refrspec[0][:, 2, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[2] / gdat.enerpivt)\n fgl3axisstdv = (fgl3['Conf_68_SemiMinor'] + fgl3['Conf_68_SemiMajor']) * 0.5\n fgl3anglstdv = np.deg2rad(fgl3['Conf_68_PosAng']) # [rad]\n fgl3lgalstdv = fgl3axisstdv * abs(np.cos(fgl3anglstdv))\n fgl3bgalstdv = fgl3axisstdv * abs(np.sin(fgl3anglstdv))\n\n gdat.refretag[0] = np.zeros(gdat.refr.dictelem[0]['lgal'].size, dtype=object)\n for k in range(gdat.refr.dictelem[0]['lgal'].size):\n gdat.refretag[0][k] = '%s, %s, %s' % (fgl3['Source_Name'][k], fgl3['CLASS1'][k], fgl3['ASSOC1'][k])\n gdat.refrtvar[0] = fgl3['Variability_Index']\n \n gdat.refrstyp[0] = np.zeros_like(gdat.refr.dictelem[0]['lgal']) - 1\n gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PowerLaw ')] = 0\n gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'LogParabola ')] = 1\n gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLExpCutoff ')] = 2\n gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLSuperExpCutoff')] = 3\n indx = np.where(gdat.refrstyp[0] == -1)[0]\n if indx.size > 0:\n raise Exception('')\n gdat.refrsind[0] = fgl3['Spectral_Index']\n gdat.refrcurv[0] = fgl3['beta']\n gdat.refrexpc[0] = fgl3['Cutoff'] * 1e-3\n \n gdat.refrcurv[0][np.where(np.logical_not(np.isfinite(gdat.refrcurv[0])))] = -10.\n gdat.refrexpc[0][np.where(np.logical_not(np.isfinite(gdat.refrexpc[0])))] = 0.\n \n gdat.refrsind[0] = np.tile(gdat.refrsind[0], (3, 1)) \n gdat.refrcurv[0] = np.tile(gdat.refrcurv[0], (3, 1)) \n gdat.refrexpc[0] = np.tile(gdat.refrexpc[0], (3, 1)) \n\n # Manchester+2005\n path = gdat.pathdata + 'inpt/Manchester2005.fits'\n data = astropy.io.fits.getdata(path)\n \n gdat.refrlgal[1] = np.deg2rad(data['glon'])\n gdat.refrlgal[1] = ((gdat.refrlgal[1] - np.pi) % (2. * np.pi)) - np.pi\n gdat.refrbgal[1] = np.deg2rad(data['glat'])\n \n gdat.refrper0[1] = data['P0']\n gdat.refrper1[1] = data['P1']\n gdat.refrflux0400[1] = data['S400']\n #gdat.refrdism[1] = data['DM']\n #gdat.refrdlos[1] = data['Dist']\n\n # error budget\n for name in ['lgal', 'bgal', 'per0', 'per1', 'flux0400', 'tvar', 'styp']:\n refrtile = [[] for q in gdat.indxrefr]\n refrfeat = getattr(gdat.refr, name)\n for q in gdat.indxrefr:\n if len(refrfeat[q]) > 0:\n refrtile[q] = np.tile(refrfeat[q], (3, 1))\n setattr(gdat.refr, name, refrtile)\n\n\ndef retr_singgaus(scaldevi, sigc):\n \n psfn = 1. / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)\n\n return psfn\n\n\ndef retr_singking(scaldevi, sigc, gamc):\n \n psfn = 1. / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc)\n\n return psfn\n\n\ndef retr_doubgaus(scaldevi, frac, sigc, sigt):\n \n psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)\n\n return psfn\n\n\ndef retr_gausking(scaldevi, frac, sigc, sigt, gamt):\n\n psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)\n \n return psfn\n\n\ndef retr_doubking(scaldevi, frac, sigc, gamc, sigt, gamt):\n\n psfn = frac / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc) + \\\n (1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)\n \n return psfn\n\n\ndef retr_lgalbgal(gang, aang):\n \n lgal = gang * np.cos(aang)\n bgal = gang * np.sin(aang)\n\n return lgal, bgal\n\n\ndef retr_gang(lgal, bgal):\n \n gang = np.arccos(np.cos(lgal) * np.cos(bgal))\n\n return gang\n\n\ndef retr_aang(lgal, bgal):\n\n aang = np.arctan2(bgal, lgal)\n\n return aang\n\n\ndef show_paragenrscalfull(gdat, gdatmodi, strgstat='this', strgmodl='fitt', indxsampshow=None):\n \n gmod = getattr(gdat, strgmodl)\n gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)\n gmodstat = getattr(gdatobjt, strgstat)\n \n print('strgmodl: ' + strgmodl)\n print('strgstat: ' + strgstat)\n print('%5s %20s %30s %30s %15s' % ('index', 'namepara', 'paragenrunitfull', 'paragenrscalfull', 'scalpara'))\n for k in gmod.indxparagenrfull:\n \n if indxsampshow is not None and not k in indxsampshow:\n continue\n \n if gmod.numbparaelem > 0:\n \n booltemp = False\n for l in gmod.indxpopl:\n if k == gmod.indxparagenrelemsing[l][0]:\n booltemp = True\n if booltemp:\n print('')\n print('%5d %20s %30g %30g %15s' % (k, gmod.namepara.genrfull[k], gmodstat.paragenrunitfull[k], gmodstat.paragenrscalfull[k], gmod.scalpara.genrfull[k]))\n \n\ndef prop_stat(gdat, gdatmodi, strgmodl, thisindxelem=None, thisindxpopl=None, brth=False, deth=False):\n \n if gdat.typeverb > 1:\n print('prop_stat()')\n \n #indxproptype\n # within, birth, death, split, merge\n # 0, 1, 2, 3, 4\n gmod = getattr(gdat, strgmodl)\n gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)\n gmodthis = getattr(gdatobjt, 'this')\n gmodnext = getattr(gdatobjt, 'next')\n \n if gmod.numbparaelem > 0:\n if gdat.booldiagmode:\n for l in gmod.indxpopl:\n if len(gmodthis.indxelemfull[l]) > len(set(gmodthis.indxelemfull[l])):\n raise Exception('Repeating entry in the element index list!')\n\n thisindxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodthis.indxelemfull, strgmodl)\n setattr(gmodthis, 'indxparagenrfullelem', thisindxparagenrfullelem)\n else:\n thisindxparagenrfullelem = None\n \n gdatmodi.this.boolpropfilt = True \n\n # index of the population in which a transdimensional proposal will be attempted\n if gmod.numbparaelem > 0:\n if thisindxpopl is None:\n gdatmodi.indxpopltran = np.random.choice(gmod.indxpopl)\n else:\n gdatmodi.indxpopltran = thisindxpopl\n numbelemtemp = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]\n \n # forced death or birth does not check for the prior on the dimensionality on purpose!\n if gmod.numbparaelem > 0 and (deth or brth or np.random.rand() < gdat.probtran) and \\\n not (numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):\n\n if brth or deth or np.random.rand() < gdat.probbrde or \\\n numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == 1 or numbelemtemp == 0:\n \n ## births and deaths\n if numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] or deth:\n gdatmodi.this.indxproptype = 2\n elif numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or brth:\n gdatmodi.this.indxproptype = 1\n else:\n if np.random.rand() < 0.5:\n gdatmodi.this.indxproptype = 1\n else:\n gdatmodi.this.indxproptype = 2\n\n else:\n ## splits and merges\n if numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or numbelemtemp < 2:\n gdatmodi.this.indxproptype = 3\n elif numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]:\n gdatmodi.this.indxproptype = 4\n else:\n if np.random.rand() < 0.5:\n gdatmodi.this.indxproptype = 3\n else:\n gdatmodi.this.indxproptype = 4\n else:\n \n if gdat.booldiagmode and (gdatmodi.stdp > 1e2).any():\n raise Exception('')\n\n thisindxparagenrfullelemconc = []\n for l in gmod.indxpopl:\n thisindxparagenrfullelemconc.append(thisindxparagenrfullelem[l]['full'])\n\n # get the indices of the current parameter vector\n if gmod.numbparaelem > 0:\n thisindxsampfull = np.concatenate([gmod.indxparagenrbasestdv] + thisindxparagenrfullelemconc)\n else:\n thisindxsampfull = gmod.indxparagenrbasestdv\n \n thisstdp = gdatmodi.stdp[gdat.indxstdppara[thisindxsampfull]]\n if not np.isfinite(thisstdp).all():\n raise Exception('')\n gdatmodi.this.indxproptype = 0\n \n if gdat.booldiagmode and gdat.probspmr == 0 and gdatmodi.this.indxproptype > 2:\n raise Exception('')\n\n if gdat.typeverb > 1:\n print('gdatmodi.this.indxproptype')\n print(gdatmodi.this.indxproptype)\n\n if gdatmodi.this.indxproptype == 0:\n gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)\n if gmod.numbparaelem > 0:\n gmodnext.indxelemfull = gmodthis.indxelemfull\n if gdatmodi.this.indxproptype > 0:\n gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)\n gmodnext.paragenrscalfull = np.copy(gmodthis.paragenrscalfull)\n if gmod.numbparaelem > 0:\n gmodnext.indxelemfull = deepcopy(gmodthis.indxelemfull)\n \n if gdatmodi.this.indxproptype == 0:\n \n ## proposal scale\n if False:\n # amplitude-dependent proposal scale\n for l in gmod.indxpopl:\n thiscompampl = gmodthis.paragenrscalfull[thisindxparagenrfullelem[indxelemfull][gmod.nameparagenrelemampl[l]][l]]\n compampl = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]\n minmcompampl = getattr(gmod.minmpara, gmod.nameparagenrelemampl[l])\n thiscompunit = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]\n compunit = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]\n if nameparagenrelem == gmod.nameparagenrelemampl[l]:\n # temp -- this only works if compampl is powr distributed\n gdatmodi.this.stdp = stdpcomp / (thiscompampl / minmcompampl)**2.\n gdatmodi.this.stdv = stdpcomp / (compampl / minmcompampl)**2.\n gdatmodi.this.ltrp += np.sum(0.5 * (nextcompunit - thiscompunit)**2 * (1. / gdatmodi.this.stdv**2 - 1. / gdatmodi.this.stdv**2))\n else:\n gdatmodi.this.stdp = stdpcomp / (np.minimum(thiscompampl, compampl) / minmcompampl)**0.5\n \n ## propose a step\n diffparagenrunitfull = np.random.normal(size=thisindxsampfull.size) * thisstdp\n gmodnext.paragenrunitfull[thisindxsampfull] = gmodthis.paragenrunitfull[thisindxsampfull] + diffparagenrunitfull\n \n if gdat.booldiagmode:\n if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():\n raise Exception('')\n\n if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():\n raise Exception('')\n\n if not np.isfinite(gmodnext.paragenrunitfull).all():\n raise Exception('')\n\n indxsamplowr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] < 0.)[0]\n if indxsamplowr.size > 0:\n gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr] = abs(gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr]) % 1.\n \n if gdat.booldiagmode:\n if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():\n raise Exception('')\n\n if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():\n raise Exception('')\n\n indxsampuppr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] > 1.)[0]\n if indxsampuppr.size > 0:\n gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] = (gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] - 1.) % 1.\n \n if gdat.booldiagmode:\n if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():\n raise Exception('')\n\n if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():\n raise Exception('')\n\n if not np.isfinite(gmodnext.paragenrunitfull).all():\n raise Exception('')\n\n gmodnext.paragenrscalfull = icdf_paragenrscalfull(gdat, strgmodl, gmodnext.paragenrunitfull, thisindxparagenrfullelem)\n\n if gdat.booldiagmode:\n if not np.isfinite(gmodnext.paragenrunitfull).all():\n raise Exception('')\n \n if np.amin(gmodnext.paragenrunitfull[gmod.numbpopl:]) < 0.:\n raise Exception('')\n \n if np.amax(gmodnext.paragenrunitfull[gmod.numbpopl:]) > 1.:\n raise Exception('')\n \n if not np.isfinite(gmodnext.paragenrscalfull).all():\n raise Exception('')\n \n if gdatmodi.this.indxproptype > 0:\n gdatmodi.indxsamptran = []\n if gdatmodi.this.indxproptype == 1:\n gdatmodi.this.auxipara = np.random.rand(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])\n elif gdatmodi.this.indxproptype != 2:\n gdatmodi.this.auxipara = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])\n \n if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:\n \n # find an empty slot in the element list\n for u in range(gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):\n if not u in gdatmodi.this.indxelemfull[gdatmodi.indxpopltran]:\n break\n gdatmodi.indxelemmodi = [u]\n gdatmodi.indxelemfullmodi = [gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)]\n \n # sample indices to add the new element\n gdatmodi.indxparagenrfullelemaddd = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])\n gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemaddd)\n gmodnext.indxelemfull[gdatmodi.indxpopltran].append(gdatmodi.indxelemmodi[0])\n if gdatmodi.this.indxproptype == 1:\n \n # sample auxiliary variables\n gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.this.auxipara\n \n # death\n if gdatmodi.this.indxproptype == 2:\n \n # occupied element index to be killed\n if thisindxelem is None:\n dethindxindxelem = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))\n else:\n dethindxindxelem = thisindxelem\n\n # element index to be killed\n gdatmodi.indxelemmodi = []\n gdatmodi.indxelemfullmodi = []\n if gdat.typeverb > 1:\n print('dethindxindxelem')\n print(dethindxindxelem)\n\n gdatmodi.indxelemmodi.append(gmodthis.indxelemfull[gdatmodi.indxpopltran][dethindxindxelem])\n gdatmodi.indxelemfullmodi.append(dethindxindxelem)\n # parameter indices to be killed\n indxparagenrfullelemdeth = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])\n gdatmodi.indxsamptran.append(indxparagenrfullelemdeth)\n \n gdatmodi.this.auxipara = gmodthis.paragenrscalfull[indxparagenrfullelemdeth]\n\n if gdatmodi.this.indxproptype > 2:\n gdatmodi.comppare = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])\n gdatmodi.compfrst = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])\n gdatmodi.compseco = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])\n \n # split\n if gdatmodi.this.indxproptype == 3:\n \n # find the probability of splitting elements\n gdatmodi.indxelemfullsplt = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))\n gdatmodi.indxelemsplt = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullsplt]\n gdatmodi.indxelemfullmodi.insert(0, gdatmodi.indxelemfullsplt)\n gdatmodi.indxelemmodi.insert(0, gdatmodi.indxelemsplt)\n\n # sample indices for the first element\n gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.indxelemmodi[0])\n gdatmodi.indxsamptran.insert(0, gdatmodi.indxparagenrfullelemfrst)\n \n # sample indices for the second element\n gdatmodi.indxsampseco = gdatmodi.indxparagenrfullelemaddd\n \n # take the parent element parameters\n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):\n gdatmodi.comppare[k] = np.copy(gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]])\n \n # draw the auxiliary parameters\n for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):\n if gmod.boolcompposi[gdatmodi.indxpopltran][g]:\n gdatmodi.this.auxipara[g] = np.random.randn() * gdat.radispmr\n elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:\n gdatmodi.this.auxipara[g] = np.random.rand()\n else:\n gdatmodi.this.auxipara[g] = icdf_trap(gdat, strgmodl, np.random.rand(), gmodthis.paragenrscalfull, gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \\\n gmod.namepara.genrelem[gdatmodi.indxpopltran][g], l)\n\n # determine the new parameters\n if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):\n gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[1]) * gdatmodi.this.auxipara[0]\n else:\n gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[0]\n gdatmodi.compfrst[1] = gdatmodi.comppare[1] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[1]\n gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] * \\\n gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]\n if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):\n gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[1] * gdatmodi.this.auxipara[0]\n else:\n gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[0]\n gdatmodi.compseco[1] = gdatmodi.comppare[1] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[1]\n gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \\\n gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]\n for g in range(gmod.numbparagenrelemsing[gdatmodi.indxpopltran]):\n if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:\n gdatmodi.compfrst[g] = gdatmodi.comppare[g]\n gdatmodi.compseco[g] = gdatmodi.this.auxipara[g]\n \n # place the new parameters into the sample vector\n gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compfrst, gdatmodi.indxpopltran)\n gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.compfrst\n gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compseco, gdatmodi.indxpopltran)\n gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = gdatmodi.compseco\n \n # check for prior boundaries\n if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):\n if np.fabs(gdatmodi.compfrst[0]) > gdat.maxmelin or np.fabs(gdatmodi.compseco[0]) > gdat.maxmelin:\n gdatmodi.this.boolpropfilt = False\n else:\n if np.fabs(gdatmodi.compfrst[0]) > maxmlgal or np.fabs(gdatmodi.compseco[0]) > maxmlgal or \\\n np.fabs(gdatmodi.compfrst[1]) > maxmbgal or np.fabs(gdatmodi.compseco[1]) > maxmbgal:\n gdatmodi.this.boolpropfilt = False\n if gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]) or \\\n gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):\n gdatmodi.this.boolpropfilt = False\n if gdat.typeverb > 1:\n if not gdatmodi.this.boolpropfilt:\n print('Rejecting the proposal due to a split that falls out of the prior...')\n \n if gdatmodi.this.indxproptype == 4:\n \n # determine the index of the primary element to be merged (in the full element list)\n gdatmodi.indxelemfullmergfrst = np.random.choice(np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran])))\n\n ## first element index to be merged\n gdatmodi.mergindxelemfrst = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergfrst]\n \n # find the probability of merging this element with the others \n probmerg = retr_probmerg(gdat, gdatmodi, gmodthis.paragenrscalfull, thisindxparagenrfullelem, gdatmodi.indxpopltran, 'seco', typeelem=gmod.typeelem)\n \n indxelemfulltemp = np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran]))\n if gdat.booldiagmode:\n if indxelemfulltemp.size < 2:\n raise Exception('')\n gdatmodi.indxelemfullmergseco = np.random.choice(np.setdiff1d(indxelemfulltemp, np.array([gdatmodi.indxelemfullmergfrst])), p=probmerg)\n gdatmodi.indxelemfullmodi = np.sort(np.array([gdatmodi.indxelemfullmergfrst, gdatmodi.indxelemfullmergseco]))\n \n # parameters of the first element to be merged\n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):\n ## first\n gdatmodi.compfrst[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]]\n \n # determine indices of the modified elements in the sample vector\n ## first element\n # temp -- this would not work for multiple populations !\n gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemfrst)\n gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemfrst)\n\n ## second element index to be merged\n gdatmodi.mergindxelemseco = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergseco]\n \n ## second element\n gdatmodi.indxparagenrfullelemseco = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemseco)\n gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemseco)\n \n # parameters of the elements to be merged\n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):\n ## second\n gdatmodi.compseco[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[1]]]\n\n # indices of the element to be merged\n gdatmodi.indxelemmodi = [gdatmodi.mergindxelemfrst, gdatmodi.mergindxelemseco]\n\n # auxiliary parameters\n if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):\n gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]\n else:\n gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]\n gdatmodi.this.auxipara[1] = gdatmodi.compseco[1] - gdatmodi.compfrst[1]\n gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] / \\\n (gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) \n for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):\n if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:\n gdatmodi.this.auxipara[g] = gdatmodi.compseco[g]\n\n # merged element\n gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + \\\n gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]\n if gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] > getattr(gdat, 'maxm' + gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):\n gdatmodi.this.boolpropfilt = False\n if gdat.typeverb > 1:\n print('Proposal rejected due to falling outside the prior.')\n return\n\n if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):\n gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[1]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])\n else:\n gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])\n gdatmodi.comppare[1] = gdatmodi.compfrst[1] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[1] - gdatmodi.compfrst[1])\n for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):\n if gmod.boolcompposi[gdatmodi.indxpopltran][g]:\n gdatmodi.comppare[g] = gdatmodi.compfrst[g] + (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \\\n (gdatmodi.compseco[g] - gdatmodi.compfrst[g])\n elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:\n gdatmodi.comppare[g] = gdatmodi.compfrst[g] + gdatmodi.compseco[g]\n else:\n gdatmodi.comppare[g] = gdatmodi.compfrst[g]\n\n gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.comppare, gdatmodi.indxpopltran)\n gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.comppare\n\n # calculate the proposed list of pairs\n if gdat.typeverb > 1:\n print('mergindxfrst: ', gdatmodi.mergindxelemfrst)\n print('gdatmodi.indxelemfullmergfrst: ', gdatmodi.indxelemfullmergfrst)\n print('mergindxseco: ', gdatmodi.mergindxelemseco)\n print('gdatmodi.indxelemfullmergseco: ', gdatmodi.indxelemfullmergseco)\n print('indxparagenrfullelemfrst: ', gdatmodi.indxparagenrfullelemfrst)\n print('indxparagenrfullelemseco: ', gdatmodi.indxparagenrfullelemseco)\n\n if gdat.typeverb > 1 and (gdatmodi.this.indxproptype == 3 or gdatmodi.this.boolpropfilt and gdatmodi.this.indxproptype == 4):\n \n if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):\n print('elinfrst: ', gdatmodi.compfrst[0])\n print('amplfrst: ', gdatmodi.compfrst[1])\n print('elinseco: ', gdatmodi.compseco[0])\n print('amplseco: ', gdatmodi.compseco[1])\n print('elinpare: ', gdatmodi.comppare[0])\n print('fluxpare: ', gdatmodi.comppare[1])\n print('auxipara[0][0]: ', gdatmodi.this.auxipara[0])\n print('auxipara[0][1]: ', gdatmodi.this.auxipara[1])\n else:\n print('lgalfrst: ', gdat.anglfact * gdatmodi.compfrst[0])\n print('bgalfrst: ', gdat.anglfact * gdatmodi.compfrst[1])\n print('amplfrst: ', gdatmodi.compfrst[2])\n print('lgalseco: ', gdat.anglfact * gdatmodi.compseco[0])\n print('bgalseco: ', gdat.anglfact * gdatmodi.compseco[1])\n print('amplseco: ', gdatmodi.compseco[2])\n print('lgalpare: ', gdat.anglfact * gdatmodi.comppare[0])\n print('bgalpare: ', gdat.anglfact * gdatmodi.comppare[1])\n print('fluxpare: ', gdatmodi.comppare[2])\n print('auxipara[0][0]: ', gdat.anglfact * gdatmodi.this.auxipara[0])\n print('auxipara[0][1]: ', gdat.anglfact * gdatmodi.this.auxipara[1])\n print('auxipara[0][2]: ', gdatmodi.this.auxipara[2])\n \n if gmod.numbparaelem > 0 and gdatmodi.this.indxproptype > 0 and gdatmodi.this.boolpropfilt:\n # change the number of elements\n if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:\n gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] + 1\n if gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4:\n gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] - 1\n gmodnext.paragenrunitfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]\n \n # remove the element from the occupied element list\n if (gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4):\n for a, indxelem in enumerate(gdatmodi.indxelemmodi):\n if a == 0 and gdatmodi.this.indxproptype == 2 or a == 1 and gdatmodi.this.indxproptype == 4:\n gmodnext.indxelemfull[gdatmodi.indxpopltran].remove(indxelem)\n \n if gdatmodi.this.indxproptype == 0:\n gdatmodi.indxsampmodi = thisindxsampfull\n else:\n if gdatmodi.this.indxproptype == 1:\n gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))\n if gdatmodi.this.indxproptype == 2:\n gdatmodi.indxsampmodi = [gmod.indxpara.numbelem[gdatmodi.indxpopltran]]\n if gdatmodi.this.indxproptype == 3:\n gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), \\\n gdatmodi.indxsamptran[0], gdatmodi.indxsamptran[1]))\n if gdatmodi.this.indxproptype == 4:\n gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))\n \n if gmod.numbparaelem > 0:\n if gdatmodi.this.indxproptype == 0:\n indxparagenrfullelem = thisindxparagenrfullelem\n else:\n indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodnext.indxelemfull, strgmodl)\n if gdat.typeverb > 1:\n print('gdatmodi.indxsampmodi')\n print(gdatmodi.indxsampmodi)\n if gmod.numbparaelem > 0:\n print('gmodthis.indxelemfull')\n print(gmodthis.indxelemfull)\n print('gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)')\n print(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int))\n if gdatmodi.this.indxproptype > 0:\n print('gdatmodi.indxelemmodi')\n print(gdatmodi.indxelemmodi)\n print('gdatmodi.indxelemfullmodi')\n print(gdatmodi.indxelemfullmodi)\n print('gdatmodi.this.boolpropfilt')\n print(gdatmodi.this.boolpropfilt)\n print('indxparagenrfullelem')\n print(indxparagenrfullelem)\n \n if gdatmodi.this.indxproptype == 1:\n for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):\n gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0][g]] = icdf_trap(gdat, strgmodl, gdatmodi.this.auxipara[g], gmodthis.paragenrscalfull, \\\n gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \\\n gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gdatmodi.indxpopltran)\n\n if gdat.booldiagmode:\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n if gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]]):\n print('l')\n print(l)\n print('gmod.indxpara.numbelem')\n print(gmod.indxpara.numbelem)\n print('gmodthis.paragenrunitfull')\n print(gmodthis.paragenrunitfull)\n raise Exception('')\n if gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]]):\n raise Exception('')\n if gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]]):\n raise Exception('')\n if gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]]):\n raise Exception('')\n\n if strgmodl == 'fitt':\n diffparagenrscalfull = abs(gmodnext.paragenrscalfull - gmodthis.paragenrscalfull)\n #size = np.where(((gmodthis.paragenrscalfull == 0.) & (diffparagenrscalfull > 0.)) | ((gmodthis.paragenrscalfull != 0.) & (diffparagenrscalfull / gmodthis.paragenrscalfull > 0)))[0].size\n size = np.where(diffparagenrscalfull != 0.)[0].size\n if gdatmodi.this.indxproptype == 1:\n if size - 1 != gmod.numbparagenrelemsing[gdatmodi.indxpopltran]:\n raise Exception('')\n \n\ndef calc_probprop(gdat, gdatmodi):\n \n gmod = gdat.fitt\n\n # calculate the factor to multiply the acceptance rate, i.e., \n ## probability of the auxiliary parameters,\n if gdatmodi.this.indxproptype == 0:\n gdatmodi.this.lpau = 0.\n elif gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 2:\n gdatmodi.this.lpau = gdatmodi.next.lpritotl - gdatmodi.this.lpritotl\n lpautemp = 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[gdatmodi.indxpopltran]\n if gdatmodi.this.indxproptype == 1:\n gdatmodi.this.lpau += lpautemp\n if gdatmodi.this.indxproptype == 2:\n gdatmodi.this.lpau -= lpautemp\n elif gdatmodi.this.indxproptype == 3 or gdatmodi.this.indxproptype == 4:\n gdatmodi.this.lpau = 0.\n dictelemtemp = [dict()]\n for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):\n if gmod.gmod.boolcompposi[gdatmodi.indxpopltran][g]:\n gdatmodi.this.lpau += -0.5 * np.log(2. * np.pi * gdat.radispmr**2) - 0.5 * (gdatmodi.this.auxipara[g] / gdat.radispmr)**2\n elif g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:\n dictelemtemp[0][nameparagenrelem] = gdatmodi.this.auxipara[g]\n gdatmodi.this.lpau += retr_lprielem(gdat, 'fitt', gdatmodi.indxpopltran, g, \\\n gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \\\n gdatmodi.this.paragenrscalfull, dictelemtemp, [1])\n if gdatmodi.this.indxproptype == 4:\n gdatmodi.this.lpau *= -1.\n\n if gdatmodi.this.indxproptype > 2 and gdatmodi.this.boolpropfilt:\n ## the ratio of the probability of the reverse and forward proposals, and\n if gdatmodi.this.indxproptype == 3:\n gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.next.paragenrscalfull, gdatmodi.next.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \\\n typeelem=gmod.typeelem)\n gdatmodi.this.ltrp = np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran] + 1) + np.log(gdatmodi.this.probmergtotl)\n\n else:\n gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.this.paragenrscalfull, gdatmodi.this.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \\\n typeelem=gmod.typeelem)\n \n gdatmodi.this.ltrp = -np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran]) - np.log(gdatmodi.this.probmergtotl)\n \n ## Jacobian\n if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):\n gdatmodi.this.ljcb = np.log(gdatmodi.comppare[1])\n else:\n gdatmodi.this.ljcb = np.log(gdatmodi.comppare[2])\n if gdatmodi.this.indxproptype == 4:\n gdatmodi.this.ljcb *= -1.\n \n else:\n gdatmodi.this.ljcb = 0.\n gdatmodi.this.ltrp = 0.\n \n for l in gmod.indxpopl:\n if gdatmodi.this.indxproptype > 0:\n setattr(gdatmodi, 'auxiparapop%d' % l, gdatmodi.this.auxipara)\n\n\ndef retr_indxparagenrfullelem(gdat, indxelemfull, strgmodl):\n\n gmod = getattr(gdat, strgmodl)\n \n ## element parameters\n if gmod.numbparaelem > 0:\n indxparagenrfullelem = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n indxparagenrfulltemp = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + np.array(indxelemfull[l], dtype=int) * gmod.numbparagenrelemsing[l]\n \n cntr = tdpy.cntr()\n \n indxparagenrfullelem[l] = dict()\n for nameparagenrelem in gmod.namepara.genrelem[l]:\n indxparagenrfullelem[l][nameparagenrelem] = indxparagenrfulltemp + cntr.incr()\n indxparagenrfullelem[l]['full'] = np.repeat(indxparagenrfulltemp, gmod.numbparagenrelemsing[l]) + np.tile(gmod.indxparagenrelemsing[l], len(indxelemfull[l]))\n \n if gdat.booldiagmode:\n for l in gmod.indxpopl:\n if len(indxparagenrfullelem[l]['full']) > 0:\n if np.amax(indxparagenrfullelem[l]['full']) > gmod.numbparagenrelem[l] + gmod.numbparagenrbase:\n print('strgmodl')\n print(strgmodl)\n print('strgstat')\n print(strgstat)\n print('gmod.numbparagenrbase')\n print(gmod.numbparagenrbase)\n print('gmod.numbparagenrelem[l]')\n print(gmod.numbparagenrelem[l])\n print('indxparagenrfullelem[l][full]')\n summgene(indxparagenrfullelem[l]['full'])\n print('gdat.fitt.minmpara.numbelempop0')\n print(gdat.fitt.minmpara.numbelempop0)\n print('gdat.fitt.maxmpara.numbelempop0')\n print(gdat.fitt.maxmpara.numbelempop0)\n raise Exception('Element parameter indices are bad.') \n \n else:\n indxparagenrfullelem = None\n \n return indxparagenrfullelem\n \n\ndef retr_weigmergodim(gdat, elin, elinothr):\n \n weigmerg = np.exp(-0.5 * ((elin - elinothr) / gdat.radispmr)**2)\n \n return weigmerg\n\n\ndef retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr):\n \n weigmerg = np.exp(-0.5 * (((lgal - lgalothr) / gdat.radispmr)**2 + ((bgal - bgalothr) / gdat.radispmr)**2))\n \n return weigmerg\n\n\ndef retr_probmerg(gdat, gdatmodi, paragenrscalfull, indxparagenrfullelem, indxpopltran, strgtype, typeelem=None):\n \n # calculate the weights\n if strgtype == 'seco':\n numb = 1\n if strgtype == 'pair':\n numb = 2\n listweigmerg = []\n for a in range(numb):\n if gmod.typeelem[indxpopltran].startswith('lghtline'):\n elintotl = paragenrscalfull[indxparagenrfullelem['elin'][indxpopltran]]\n elin = elintotl[gdatmodi.indxelemfullmodi[0]]\n elinothr = np.concatenate((elintotl[:gdatmodi.indxelemfullmodi[0]], elintotl[gdatmodi.indxelemfullmodi[0]+1:]))\n weigmerg = retr_weigmergodim(gdat, elin, elinothr)\n else:\n lgaltotl = paragenrscalfull[indxparagenrfullelem['lgal'][indxpopltran]]\n bgaltotl = paragenrscalfull[indxparagenrfullelem['bgal'][indxpopltran]]\n lgal = lgaltotl[gdatmodi.indxelemfullmodi[0]]\n bgal = bgaltotl[gdatmodi.indxelemfullmodi[0]]\n lgalothr = np.concatenate((lgaltotl[:gdatmodi.indxelemfullmodi[0]], lgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))\n bgalothr = np.concatenate((bgaltotl[:gdatmodi.indxelemfullmodi[0]], bgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))\n weigmerg = retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr)\n listweigmerg.append(weigmerg) \n\n # determine the probability of merging the second element given the first element\n if strgtype == 'seco':\n probmerg = listweigmerg[0] / np.sum(listweigmerg[0])\n \n # determine the probability of merging the pair\n if strgtype == 'pair':\n if gmod.typeelem[indxpopltran].startswith('lghtline'):\n weigpair = retr_weigmergtdim(gdat, elin, elintotl[gdatmodi.indxelemfullmodi[1]])\n else:\n weigpair = retr_weigmergtdim(gdat, lgal, lgaltotl[gdatmodi.indxelemfullmodi[1]], bgal, bgaltotl[gdatmodi.indxelemfullmodi[1]])\n probmerg = weigpair / np.sum(listweigmerg[0]) + weigpair / np.sum(listweigmerg[1])\n \n if gdat.booldiagmode:\n if not np.isfinite(probmerg).all():\n raise Exception('Merge probability is infinite.')\n\n return probmerg\n\n \ndef retr_indxparaelem(gmod, l, u):\n\n indxsamppnts = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + u * gmod.numbparagenrelemsing[l] + gmod.indxparagenrelemsing[l]\n\n return indxsamppnts\n\n\ndef gang_detr():\n\n gang, aang, lgal, bgal = sympy.symbols('gang aang lgal bgal')\n\n AB = sympy.matrices.Matrix([[a1*b1,a1*b2,a1*b3],[a2*b1,a2*b2,a2*b3],[a3*b1,a3*b2,a3*b3]])\n\n\ndef retr_psfn(gdat, psfp, indxenertemp, thisangl, typemodlpsfn, strgmodl):\n\n gmod = getattr(gdat, strgmodl)\n \n indxpsfpinit = gmod.numbpsfptotl * (indxenertemp[:, None] + gdat.numbener * gdat.indxevtt[None, :])\n \n if gdat.typeexpr == 'ferm':\n scalangl = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(thisangl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]\n scalanglnorm = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(gdat.binspara.angl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]\n else:\n scalangl = thisangl[None, :, None]\n \n if typemodlpsfn == 'singgaus':\n sigc = psfp[indxpsfpinit]\n sigc = sigc[:, None, :]\n psfn = retr_singgaus(scalangl, sigc)\n \n elif typemodlpsfn == 'singking':\n sigc = psfp[indxpsfpinit]\n gamc = psfp[indxpsfpinit+1]\n sigc = sigc[:, None, :]\n gamc = gamc[:, None, :]\n psfn = retr_singking(scalangl, sigc, gamc)\n \n elif typemodlpsfn == 'doubking':\n sigc = psfp[indxpsfpinit]\n gamc = psfp[indxpsfpinit+1]\n sigt = psfp[indxpsfpinit+2]\n gamt = psfp[indxpsfpinit+3]\n frac = psfp[indxpsfpinit+4]\n sigc = sigc[:, None, :]\n gamc = gamc[:, None, :]\n sigt = sigt[:, None, :]\n gamt = gamt[:, None, :]\n frac = frac[:, None, :]\n psfn = retr_doubking(scalangl, frac, sigc, gamc, sigt, gamt)\n if gdat.typeexpr == 'ferm':\n psfnnorm = retr_doubking(scalanglnorm, frac, sigc, gamc, sigt, gamt)\n \n # normalize the PSF\n if gdat.typeexpr == 'ferm':\n fact = 2. * np.pi * np.trapz(psfnnorm * np.sin(gdat.binspara.angl[None, :, None]), gdat.binspara.angl, axis=1)[:, None, :]\n psfn /= fact\n\n return psfn\n\n\ndef retr_unit(lgal, bgal):\n\n xdat = np.cos(bgal) * np.cos(lgal)\n ydat = -np.cos(bgal) * np.sin(lgal)\n zaxi = np.sin(bgal)\n\n return xdat, ydat, zaxi\n\n\ndef retr_psec(gdat, conv):\n\n # temp\n conv = conv.reshape((gdat.numbsidecart, gdat.numbsidecart))\n psec = (abs(scipy.fftpack.fft2(conv))**2)[:gdat.numbsidecarthalf, :gdat.numbsidecarthalf] * 1e-3\n psec = psec.flatten()\n\n return psec\n \n\ndef retr_psecodim(gdat, psec):\n \n psec = psec.reshape((gdat.numbsidecarthalf, gdat.numbsidecarthalf))\n psecodim = np.zeros(gdat.numbsidecarthalf)\n for k in gdat.indxmpolodim:\n indxmpol = np.where((gdat.meanpara.mpol > gdat.binspara.mpolodim[k]) & (gdat.meanpara.mpol < gdat.binspara.mpolodim[k+1]))\n psecodim[k] = np.mean(psec[indxmpol])\n psecodim *= gdat.meanpara.mpolodim**2\n \n return psecodim\n\n\ndef retr_eerrnorm(minmvarb, maxmvarb, meanvarb, stdvvarb):\n \n cdfnminm = 0.5 * (sp.special.erf((minmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)\n cdfnmaxm = 0.5 * (sp.special.erf((maxmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)\n cdfndiff = cdfnmaxm - cdfnminm\n \n return cdfnminm, cdfndiff\n \n\ndef retr_condcatl(gdat):\n \n # setup\n ## number of stacked samples\n numbstks = 0\n indxtupl = []\n indxstks = []\n indxstksparagenrscalfull = []\n for n in gdat.indxsamptotl:\n indxstks.append([])\n indxstkssamptemp = []\n for l in gmod.indxpopl:\n indxstks[n].append([])\n for k in range(len(gdat.listpostindxelemfull[n][l])):\n indxstks[n][l].append(numbstks)\n indxstkssamptemp.append(numbstks)\n indxtupl.append([n, l, k])\n numbstks += 1\n indxstkssamp.append(np.array(indxstkssamptemp))\n \n if gdat.typeverb > 1:\n print('indxstks')\n print(indxstks)\n print('indxtupl')\n print(indxtupl)\n print('indxstkssamp')\n print(indxstksparagenrscalfull)\n print('numbstks')\n print(numbstks)\n\n cntr = 0 \n arrystks = np.zeros((numbstks, gmod.numbparagenrelemtotl))\n for n in gdat.indxsamptotl:\n indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.listpostindxelemfull[n], 'fitt') \n for l in gmod.indxpopl:\n for k in np.arange(len(gdat.listpostindxelemfull[n][l])):\n for m, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):\n arrystks[indxstks[n][l][k], m] = gdat.listpostparagenrscalfull[n, gmodstat.indxparagenrfullelem[l][nameparagenrelem][k]]\n\n if gdat.typeverb > 0:\n print('Constructing the distance matrix for %d stacked samples...' % arrystks.shape[0])\n timeinit = gdat.functime()\n \n gdat.distthrs = np.empty(gmod.numbparagenrelemtotl)\n for k, nameparagenrelem in enumerate(gmod.namepara.elem):\n # temp\n l = 0\n gdat.distthrs[k] = gdat.stdp[getattr(gdat, 'indxstdppop%d' % l + nameparagenrelem)]\n \n # construct lists of samples for each proposal type\n listdisttemp = [[] for k in range(gmod.numbparagenrelemtotl)]\n indxstksrows = [[] for k in range(gmod.numbparagenrelemtotl)]\n indxstkscols = [[] for k in range(gmod.numbparagenrelemtotl)]\n thisperc = 0\n cntr = 0\n for k in gmod.indxparagenrelemtotl:\n for n in range(numbstks):\n dist = np.fabs(arrystks[n, k] - arrystks[:, k])\n indxstks = np.where(dist < gdat.distthrs[k])[0]\n if indxstks.size > 0:\n for j in indxstks:\n cntr += 1\n listdisttemp[k].append(dist[j])\n indxstksrows[k].append(n)\n indxstkscols[k].append(j)\n \n nextperc = np.floor(100. * float(k * numbstks + n) / numbstks / gmod.numbparagenrelemtotl)\n if nextperc > thisperc:\n thisperc = nextperc\n if cntr > 1e6:\n break\n \n listdisttemp[k] = np.array(listdisttemp[k])\n indxstksrows[k] = np.array(indxstksrows[k])\n indxstkscols[k] = np.array(indxstkscols[k])\n\n if cntr > 1e6:\n break\n \n listdist = [[] for k in range(gmod.numbparagenrelemtotl)]\n for k, nameparagenrelem in enumerate(gmod.namepara.elem):\n listdist[k] = scipy.sparse.csr_matrix((listdisttemp[k], (indxstksrows[k], indxstkscols[k])), shape=(numbstks, numbstks))\n \n listindxstkspair = []\n indxstksleft = []\n\n if gdat.typeverb > 0:\n timefinl = gdat.functime()\n \n indxstksleft = range(numbstks)\n\n # list of sample lists of the labeled element\n indxstksassc = []\n cntr = 0\n \n gdat.prvlthrs = 0.05\n\n while len(indxstksleft) > 0:\n \n # count number of associations\n numbdist = np.zeros(numbstks, dtype=int) - 1\n for p in range(len(indxstksleft)):\n indxindx = np.where((listdist[0][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmlgal < gdat.anglassc) & \\\n (listdist[1][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmbgal < gdat.anglassc))[0]\n numbdist[indxstksleft[p]] = indxindx.size\n \n prvlmaxmesti = np.amax(numbdist) / float(gdat.numbsamptotl)\n \n if prvlmaxmesti < gdat.prvlthrs:\n break\n\n # determine the element with the highest number of neighbors\n indxstkscntr = np.argmax(numbdist)\n indxsamptotlcntr = indxtupl[indxstkscntr][0]\n indxpoplcntr = indxtupl[indxstkscntr][1]\n indxelemcntr = indxtupl[indxstkscntr][2]\n\n # add the central element sample\n indxstksassc.append([])\n indxstksassc[cntr].append(indxstkscntr)\n indxstksleft.remove(indxstkscntr)\n\n if gdat.typeverb > 1:\n print('Match step %d' % cntr)\n print('numbdist')\n print(numbdist)\n print('indxstkscntr')\n print(indxstkscntr)\n print('indxstksleft')\n print(indxstksleft)\n \n # add the associated element samples\n if len(indxstksleft) > 0:\n for n in gdat.indxsamptotl:\n \n indxstkstemp = np.intersect1d(np.array(indxstksleft), indxstksparagenrscalfull[n])\n \n if n == indxsamptotlcntr:\n continue\n \n if indxstkstemp.size > 0:\n totl = np.zeros_like(indxstkstemp)\n for k in gmod.indxparagenrelemtotl:\n temp = listdist[k][indxstkscntr, indxstkstemp].tonp.array()[0]\n totl = totl + temp**2\n\n indxleft = np.argsort(totl)[0]\n \n indxstksthis = indxstkstemp[indxleft]\n \n thisbool = True\n for k in gmod.indxparagenrelemtotl:\n if listdist[k][indxstkscntr, indxstksthis] > gdat.distthrs[k]:\n thisbool = False\n\n if thisbool:\n indxstksassc[cntr].append(indxstksthis)\n indxstksleft.remove(indxstksthis)\n \n # temp\n #if gdat.makeplot:\n # gdatmodi = tdpy.gdatstrt()\n # gdatmodi.this.indxelemfull = deepcopy(listindxelemfull[n])\n # for r in range(len(indxstksassc)): \n # calc_poststkscond(gdat, indxstksassc)\n # gdatmodi.this.indxelemfull = [[] for l in gmod.indxpopl]\n # for indxstkstemp in indxstksleft:\n # indxsamptotlcntr = indxtupl[indxstkstemp][0]\n # indxpoplcntr = indxtupl[indxstkstemp][1]\n # indxelemcntr = indxtupl[indxstkstemp][2]\n # gdatmodi.this.paragenrscalfull = gdat.listparagenrscalfull[indxsamptotlcntr, :]\n # gdatmodi.this.indxelemfull[].append()\n\n # plot_genemaps(gdat, gdatmodi, 'this', 'cntpdata', strgpdfn, indxenerplot=0, indxevttplot=0, cond=True)\n \n cntr += 1\n \n gdat.dictglob['poststkscond'] = []\n gdat.dictglob['liststkscond'] = []\n # for each condensed element\n for r in range(len(indxstksassc)): \n gdat.dictglob['liststkscond'].append([])\n gdat.dictglob['liststkscond'][r] = {}\n gdat.dictglob['poststkscond'].append([])\n gdat.dictglob['poststkscond'][r] = {}\n for strgfeat in gmod.namepara.genrelem:\n gdat.dictglob['liststkscond'][r][strgfeat] = []\n\n # for each associated sample associated with the central stacked sample \n for k in range(len(indxstksassc[r])):\n indxsamptotlcntr = indxtupl[indxstksassc[r][k]][0]\n indxpoplcntr = indxtupl[indxstksassc[r][k]][1]\n indxelemcntr = indxtupl[indxstksassc[r][k]][2]\n \n for strgfeat in gmod.namepara.genrelem:\n temp = getattr(gdat, 'list' + strgfeat)\n if temp[indxsamptotlcntr][indxpoplcntr].size > 0:\n temp = temp[indxsamptotlcntr][indxpoplcntr][..., indxelemcntr]\n gdat.dictglob['liststkscond'][r][strgfeat].append(temp)\n\n for r in range(len(gdat.dictglob['liststkscond'])):\n for strgfeat in gmod.namepara.genrelem:\n arry = np.stack(gdat.dictglob['liststkscond'][r][strgfeat], axis=0)\n gdat.dictglob['poststkscond'][r][strgfeat] = np.zeros(([3] + list(arry.shape[1:])))\n gdat.dictglob['poststkscond'][r][strgfeat][0, ...] = median(arry, axis=0)\n gdat.dictglob['poststkscond'][r][strgfeat][1, ...] = percennp.tile(arry, 16., axis=0)\n gdat.dictglob['poststkscond'][r][strgfeat][2, ...] = percennp.tile(arry, 84., axis=0)\n \n gdat.numbstkscond = len(gdat.dictglob['liststkscond'])\n\n gdat.indxstkscond = np.arange(gdat.numbstkscond)\n gdat.prvl = np.empty(gdat.numbstkscond)\n for r in gdat.indxstkscond:\n gdat.prvl[r] = len(gdat.dictglob['liststkscond'][r]['deltllik'])\n gdat.prvl /= gdat.numbsamptotl\n gdat.minmprvl = 0.\n gdat.maxmprvl = 1.\n retr_axis(gdat, 'prvl')\n gdat.histprvl = np.histogram(gdat.prvl, bins=gdat.binspara.prvl)[0]\n if gdat.makeplot:\n pathcond = getattr(gdat, 'path' + strgpdfn + 'finlcond')\n for k, nameparagenrelem in enumerate(gmod.namepara.elem):\n path = pathcond + 'histdist' + nameparagenrelem \n listtemp = np.copy(listdist[k].tonp.array()).flatten()\n listtemp = listtemp[np.where(listtemp != 1e20)[0]]\n tdpy.mcmc.plot_hist(path, listtemp, r'$\\Delta \\tilde{' + getattr(gmod.lablrootpara, nameparagenrelem) + '}$')\n path = pathcond + 'histprvl'\n tdpy.mcmc.plot_hist(path, gdat.prvl, r'$p$')\n gdat.prvlthrs = 0.1 \n gdat.indxprvlhigh = np.where(gdat.prvl > gdat.prvlthrs)[0]\n gdat.numbprvlhigh = gdat.indxprvlhigh.size\n\n\ndef retr_conv(gdat, defl):\n \n defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))\n # temp\n conv = abs(np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0) + np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) / 2.\n conv = conv.flatten()\n \n return conv\n\n\ndef retr_invm(gdat, defl):\n \n # temp\n defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))\n invm = (1. - np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0)) * (1. - np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) - \\\n np.gradient(defl[:, :, 0], gdat.sizepixl, axis=1) * np.gradient(defl[:, :, 1], gdat.sizepixl, axis=0)\n invm = invm.flatten()\n return invm\n\n\ndef setp_indxswepsave(gdat):\n\n gdat.indxswep = np.arange(gdat.numbswep)\n gdat.boolsave = np.zeros(gdat.numbswep, dtype=bool)\n gdat.indxswepsave = np.arange(gdat.numbburn, gdat.numbburn + gdat.numbsamp * gdat.factthin, gdat.factthin)\n gdat.boolsave[gdat.indxswepsave] = True\n gdat.indxsampsave = np.zeros(gdat.numbswep, dtype=int) - 1\n gdat.indxsampsave[gdat.indxswepsave] = np.arange(gdat.numbsamp)\n \n\ndef retr_cntspnts(gdat, listposi, spec):\n \n cnts = np.zeros((gdat.numbener, spec.shape[1]))\n \n if gdat.boolbinsspat:\n lgal = listposi[0]\n bgal = listposi[1]\n indxpixlpnts = retr_indxpixl(gdat, bgal, lgal)\n else:\n elin = listposi[0]\n indxpixlpnts = np.zeros_like(elin, dtype=int)\n for k in range(spec.shape[1]):\n cnts[:, k] += spec[:, k] * gdat.expototl[:, indxpixlpnts[k]]\n if gdat.enerdiff:\n cnts *= gdat.deltener[:, None]\n cnts = np.sum(cnts, axis=0)\n\n return cnts\n\n\ndef retr_mdencrit(gdat, adissour, adishost, adishostsour):\n \n mdencrit = gdat.factnewtlght / 4. / np.pi * adissour / adishostsour / adishost\n \n return mdencrit\n\n\ndef retr_massfrombein(gdat, adissour, adishost, adishostsour):\n\n mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)\n massfrombein = np.pi * adishost**2 * mdencrit\n\n return massfrombein\n\n\ndef retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut):\n \n mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)\n \n fracacutasca = acut / asca\n \n factmcutfromdefs = np.pi * adishost**2 * mdencrit * asca * retr_mcutfrommscl(fracacutasca)\n\n return factmcutfromdefs\n\n\ndef retr_mcut(gdat, defs, asca, acut, adishost, mdencrit):\n \n mscl = defs * np.pi * adishost**2 * mdencrit * asca\n fracacutasca = acut / asca\n mcut = mscl * retr_mcutfrommscl(fracacutasca)\n \n return mcut\n\n\ndef retr_mcutfrommscl(fracacutasca):\n \n mcut = fracacutasca**2 / (fracacutasca**2 + 1.)**2 * ((fracacutasca**2 - 1.) * np.log(fracacutasca) + fracacutasca * np.pi - (fracacutasca**2 + 1.))\n\n return mcut\n\n\ndef retr_negalogt(varb):\n \n negalogt = sign(varb) * np.log10(np.fabs(varb))\n \n return negalogt\n\n\ndef retr_gradmaps(gdat, maps):\n \n # temp -- this does not work with vanishing exposure\n maps = maps.reshape((gdat.numbsidecart, gdat.numbsidecart))\n grad = np.dstack((np.gradient(maps, gdat.sizepixl, axis=0), np.gradient(maps, gdat.sizepixl, axis=1))).reshape((gdat.numbsidecart, gdat.numbsidecart, 2))\n grad = grad.reshape((gdat.numbpixlcart, 2))\n\n return grad\n\n\ndef retr_spatmean(gdat, inpt, boolcntp=False):\n \n listspatmean = [[] for b in gdat.indxspatmean]\n listspatstdv = [[] for b in gdat.indxspatmean]\n for b, namespatmean in enumerate(gdat.listnamespatmean):\n if boolcntp:\n cntp = inpt[gdat.listindxcubespatmean[b]]\n else:\n cntp = inpt[gdat.listindxcubespatmean[b]] * gdat.expo[gdat.listindxcubespatmean[b]] * gdat.apix\n if gdat.enerdiff:\n cntp *= gdat.deltener[:, None, None]\n spatmean = np.mean(np.sum(cntp, 2), axis=1) / gdat.apix\n spatstdv = np.sqrt(np.sum(cntp, axis=(1, 2))) / gdat.numbdata / gdat.apix\n if gdat.boolcorrexpo:\n spatmean /= gdat.expototlmean\n spatstdv /= gdat.expototlmean\n if gdat.enerdiff:\n spatmean /= gdat.deltener\n spatstdv /= gdat.deltener\n listspatmean[b] = spatmean\n listspatstdv[b] = spatstdv\n\n return listspatmean, listspatstdv\n\n\ndef retr_rele(gdat, maps, lgal, bgal, defs, asca, acut, indxpixlelem, absv=True, cntpmodl=None):\n \n grad = retr_gradmaps(gdat, maps)\n \n defl = retr_defl(gdat, indxpixlelem, lgal, bgal, defs, asca=asca, acut=acut)\n\n prod = grad * defl\n if cntpmodl is not None:\n prod /= cntpmodl[:, None]\n dotstemp = np.sum(prod, 1)\n if absv:\n dotstemp = np.fabs(dotstemp)\n else:\n dotstemp = dotstemp\n \n dots = np.mean(dotstemp)\n \n return dots\n\n\ndef retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn, strgmome='pmea', indxvarb=None, indxlist=None):\n \n if strgvarb.startswith('cntpdata'):\n varb = getattr(gdat, strgvarb)\n elif strgvarb.startswith('histcntpdata'):\n varb = getattr(gdat, strgvarb)\n else:\n if strgmodl == 'true':\n gmod = getattr(gdat, strgmodl)\n gmodstat = getattr(gmod, strgstat)\n varb = getattr(gmodstat, strgvarb)\n if strgmodl == 'fitt':\n if strgstat == 'this':\n if strgmome == 'errr':\n varb = getattr(gdatmodi, strgstat + 'errr' + strgvarb)\n else:\n varb = getattr(gdatmodi, strgstat + strgvarb)\n if strgstat == 'pdfn':\n varb = getattr(gdat, strgmome + strgpdfn + strgvarb)\n\n if indxlist is not None:\n varb = varb[indxlist]\n\n if indxvarb is not None:\n if strgmome == 'errr':\n varb = varb[[slice(None)] + indxvarb]\n else:\n varb = varb[indxvarb]\n\n return np.copy(varb)\n\n\ndef setp_indxpara(gdat, typesetp, strgmodl='fitt'):\n \n print('setp_indxpara(): Building parameter indices for model %s with type %s...' % (strgmodl, typesetp))\n\n gmod = getattr(gdat, strgmodl)\n \n if typesetp == 'init':\n \n if strgmodl == 'fitt':\n gmod.lablmodl = 'Model'\n if strgmodl == 'true':\n gmod.lablmodl = 'True'\n\n # transdimensional element populations\n \n gmod.numbpopl = len(gmod.typeelem)\n gmod.indxpopl = np.arange(gmod.numbpopl)\n \n if gdat.typeexpr != 'user':\n # background component\n gmod.numbback = 0\n gmod.indxback = []\n for c in range(len(gmod.typeback)):\n if isinstance(gmod.typeback[c], str):\n if gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):\n namebfun = gmod.typeback[c][:8]\n ordrexpa = int(gmod.typeback[c][8:])\n numbexpa = 4 * ordrexpa**2\n indxexpa = np.arange(numbexpa)\n del gmod.typeback[c]\n for k in indxexpa:\n gmod.typeback.insert(c+k, namebfun + '%04d' % k)\n gmod.numbback = len(gmod.typeback)\n gmod.indxback = np.arange(gmod.numbback)\n gmod.numbbacktotl = np.sum(gmod.numbback)\n gmod.indxbacktotl = np.arange(gmod.numbbacktotl)\n \n # galaxy components\n gmod.indxsersfgrd = np.arange(gmod.numbsersfgrd)\n\n # name of the generative element parameter used for the amplitude\n gmod.nameparagenrelemampl = [[] for l in gmod.indxpopl]\n gmod.indxparagenrelemampl = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lghtpntspuls':\n gmod.nameparagenrelemampl[l] = 'per0'\n gmod.indxparagenrelemampl[l] = 2\n elif gmod.typeelem[l] == 'lghtpntsagnntrue':\n gmod.nameparagenrelemampl[l] = 'lum0'\n gmod.indxparagenrelemampl[l] = 2\n elif gmod.typeelem[l].startswith('lghtline'):\n gmod.nameparagenrelemampl[l] = 'flux'\n gmod.indxparagenrelemampl[l] = 1\n elif gmod.typeelem[l].startswith('lghtpnts'):\n gmod.nameparagenrelemampl[l] = 'flux'\n gmod.indxparagenrelemampl[l] = 2\n elif gmod.typeelem[l].startswith('lghtgausbgrd'):\n gmod.nameparagenrelemampl[l] = 'flux'\n gmod.indxparagenrelemampl[l] = 2\n if gmod.typeelem[l] == 'lens':\n gmod.nameparagenrelemampl[l] = 'defs'\n gmod.indxparagenrelemampl[l] = 2\n if gmod.typeelem[l].startswith('clus'):\n gmod.nameparagenrelemampl[l] = 'nobj'\n gmod.indxparagenrelemampl[l] = 2\n if gmod.typeelem[l] == 'lens':\n gmod.nameparagenrelemampl[l] = 'defs'\n if gmod.typeelem[l] == 'clus':\n gmod.nameparagenrelemampl[l] = 'nobj'\n if len(gmod.nameparagenrelemampl[l]) == 0:\n raise Exception('Amplitude feature undefined.')\n \n for featpara in gdat.listfeatpara:\n for strggrop in gdat.liststrggroppara:\n setattr(gmod, 'list' + featpara + 'para' + strggrop, [])\n \n if typesetp == 'finl':\n \n # number of elements in the current state of the true model\n if strgmodl == 'true':\n gmod.numbelem = np.zeros(gmod.numbpopl)\n for l in gmod.indxpopl:\n gmod.numbelem[l] += getattr(gmod.maxmpara, 'numbelempop%d' % l)\n gmod.numbelemtotl = np.sum(gmod.numbelem) \n \n # element setup\n ## flag to calculate the kernel approximation errors\n boolcalcerrr = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.typeelemspateval[l] == 'locl' and gdat.numbpixlfull < 1e5:\n # temp\n boolcalcerrr[l] = False\n else:\n boolcalcerrr[l] = False\n setp_varb(gdat, 'boolcalcerrr', valu=boolcalcerrr, strgmodl=strgmodl)\n \n # maximum number of elements for each population\n gmod.maxmpara.numbelem = np.zeros(gmod.numbpopl, dtype=int)\n for l in gmod.indxpopl:\n gmod.maxmpara.numbelem[l] = getattr(gmod.maxmpara, 'numbelempop%d' % l)\n \n # maximum number of elements summed over all populations\n gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem) \n\n ## sorting feature\n nameparaelemsort = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n # feature to be used to sort elements\n if gmod.typeelem[l].startswith('lght'):\n nameparaelemsort[l] = 'flux'\n if gmod.typeelem[l] == 'lens':\n nameparaelemsort[l] = 'defs'\n if gmod.typeelem[l].startswith('clus'):\n nameparaelemsort[l] = 'nobj'\n \n ## label extensions\n gmod.lablelemextn = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gdat.numbgrid > 1:\n if gmod.typeelem[l] == 'lghtpnts':\n gmod.lablelemextn[l] = r'\\rm{fps}'\n if gmod.typeelem[l] == 'lghtgausbgrd':\n gmod.lablelemextn[l] = r'\\rm{bgs}'\n else:\n if gmod.typeelem[l].startswith('lghtpntspuls'):\n gmod.lablelemextn[l] = r'\\rm{pul}'\n if gmod.typeelem[l].startswith('lghtpntsagnn'):\n gmod.lablelemextn[l] = r'\\rm{agn}'\n elif gmod.typeelem[l] == 'lghtpnts':\n gmod.lablelemextn[l] = r'\\rm{pts}'\n if gmod.typeelem[l] == 'lens':\n gmod.lablelemextn[l] = r'\\rm{sub}'\n if gmod.typeelem[l].startswith('clus'):\n gmod.lablelemextn[l] = r'\\rm{cls}'\n if gmod.typeelem[l].startswith('lghtline'):\n gmod.lablelemextn[l] = r'\\rm{lin}'\n \n gmod.indxpoplgrid = [[] for y in gdat.indxgrid]\n for y in gdat.indxgrid: \n for indx, typeelemtemp in enumerate(gmod.typeelem):\n # foreground grid (image plane) -- the one np.where the data is measured\n if y == 0:\n if typeelemtemp.startswith('lght') and not typeelemtemp.endswith('bgrd') or typeelemtemp.startswith('clus'):\n gmod.indxpoplgrid[y].append(indx)\n # foreground mass grid\n if y == 1:\n if typeelemtemp.startswith('lens'):\n gmod.indxpoplgrid[y].append(indx)\n # background grid (source plane)\n if y == 2:\n if typeelemtemp.endswith('bgrd'):\n gmod.indxpoplgrid[y].append(indx)\n \n indxgridpopl = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n for y in gdat.indxgrid:\n if l in gmod.indxpoplgrid[y]:\n indxgridpopl[l] = y\n \n calcelemsbrt = False\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lghtpnts'):\n calcelemsbrt = True\n \n if 'lghtgausbgrd' in gmod.typeelem:\n calcelemsbrtbgrd = True\n else:\n calcelemsbrtbgrd = False\n\n if gmod.boollenssubh:\n calcelemdefl = True\n else:\n calcelemdefl = False\n\n ## element Boolean flags\n gmod.boolelemlght = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lght'):\n gmod.boolelemlght[l] = True\n else:\n gmod.boolelemlght[l] = False\n gmod.boolelemlghtanyy = True in gmod.boolelemlght\n \n gmod.boolelemlens = False\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lens'):\n gmod.boolelemlens = True\n \n gmod.boolelemsbrtdfnc = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.maxmpara.numbelem[l] > 0 and (gmod.typeelem[l].startswith('lght') and not gmod.typeelem[l].endswith('bgrd') or gmod.typeelem[l].startswith('clus')):\n gmod.boolelemsbrtdfnc[l] = True\n else:\n gmod.boolelemsbrtdfnc[l] = False\n gmod.boolelemsbrtdfncanyy = True in gmod.boolelemsbrtdfnc\n\n gmod.boolelemdeflsubh = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lens':\n gmod.boolelemdeflsubh[l] = True\n else:\n gmod.boolelemdeflsubh[l] = False\n gmod.boolelemdeflsubhanyy = True in gmod.boolelemdeflsubh\n\n gmod.boolelemsbrtextsbgrd = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l].endswith('bgrd'):\n gmod.boolelemsbrtextsbgrd[l] = True\n else:\n gmod.boolelemsbrtextsbgrd[l] = False\n gmod.boolelemsbrtextsbgrdanyy = True in gmod.boolelemsbrtextsbgrd\n \n if gmod.boolelemsbrtextsbgrdanyy:\n gmod.indxpopllens = 1\n else:\n gmod.indxpopllens = 0\n\n gmod.boolelemsbrtpnts = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l] != 'lghtline' or gmod.typeelem[l] == 'clus':\n gmod.boolelemsbrtpnts[l] = True\n else:\n gmod.boolelemsbrtpnts[l] = False\n gmod.boolelemsbrtpntsanyy = True in gmod.boolelemsbrtpnts\n\n # temp -- because there is currently no extended source\n gmod.boolelemsbrt = gmod.boolelemsbrtdfnc\n \n gmod.boolelempsfn = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lghtpnts') or gmod.typeelem[l] == 'clus':\n gmod.boolelempsfn[l] = True\n else:\n gmod.boolelempsfn[l] = False\n gmod.boolelempsfnanyy = True in gmod.boolelempsfn\n \n spectype = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.boolelemlght[l]:\n spectype[l] = 'powr'\n else:\n spectype[l] = 'none'\n setp_varb(gdat, 'spectype', valu=spectype, strgmodl=strgmodl)\n \n minmgwdt = 2. * gdat.sizepixl\n maxmgwdt = gdat.maxmgangdata / 4.\n setp_varb(gdat, 'gwdt', minm=minmgwdt, maxm=maxmgwdt, strgmodl=strgmodl)\n setp_varb(gdat, 'aerr', minm=-100, maxm=100, strgmodl=strgmodl, popl='full')\n \n if gmod.boolelemlghtanyy:\n # flux\n if gdat.typeexpr == 'ferm':\n minmflux = 1e-9\n maxmflux = 1e-6\n if gdat.typeexpr == 'tess':\n minmflux = 1.\n maxmflux = 1e3\n if gdat.typeexpr == 'chan':\n if gdat.anlytype == 'spec':\n minmflux = 1e4\n maxmflux = 1e7\n else:\n minmflux = 3e-9\n maxmflux = 1e-6\n if gdat.typeexpr == 'gene':\n minmflux = 0.1\n maxmflux = 100.\n if gdat.typeexpr == 'hubb':\n minmflux = 1e-20\n maxmflux = 1e-17\n if gdat.typeexpr == 'fire':\n minmflux = 1e-20\n maxmflux = 1e-17\n setp_varb(gdat, 'flux', limt=[minmflux, maxmflux], strgmodl=strgmodl)\n \n if gdat.typeexpr == 'ferm':\n setp_varb(gdat, 'brekprioflux', limt=[3e-9, 1e-6], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'sloplowrprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'slopupprprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)\n \n if gdat.boolbinsener:\n ### spectral parameters\n if gdat.typeexpr == 'ferm':\n sind = [1., 3.]\n minmsind = 1.\n maxmsind = 3.\n if gdat.typeexpr == 'chan':\n minmsind = 0.4\n maxmsind = 2.4\n sind = [0.4, 2.4]\n if gdat.typeexpr == 'hubb':\n minmsind = 0.5\n maxmsind = 2.5\n sind = [0.4, 2.4]\n if gdat.typeexpr != 'fire':\n setp_varb(gdat, 'sind', limt=[minmsind, maxmsind], strgmodl=strgmodl)\n setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)\n setp_varb(gdat, 'expc', limt=[0.1, 10.], strgmodl=strgmodl)\n setp_varb(gdat, 'sinddistmean', limt=sind, popl='full', strgmodl=strgmodl)\n #### standard deviations should not be too small\n setp_varb(gdat, 'sinddiststdv', limt=[0.3, 2.], popl='full', strgmodl=strgmodl)\n setp_varb(gdat, 'curvdistmean', limt=[-1., 1.], popl='full', strgmodl=strgmodl)\n setp_varb(gdat, 'curvdiststdv', limt=[0.1, 1.], popl='full', strgmodl=strgmodl)\n setp_varb(gdat, 'expcdistmean', limt=[1., 8.], popl='full', strgmodl=strgmodl)\n setp_varb(gdat, 'expcdiststdv', limt=[0.01 * gdat.maxmener, gdat.maxmener], popl='full', strgmodl=strgmodl)\n for i in gdat.indxenerinde:\n setp_varb(gdat, 'sindcolr0001', limt=[-2., 6.], strgmodl=strgmodl)\n setp_varb(gdat, 'sindcolr0002', limt=[0., 8.], strgmodl=strgmodl)\n setp_varb(gdat, 'sindcolr%04d' % i, limt=[-5., 10.], strgmodl=strgmodl)\n \n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lghtpntspuls':\n setp_varb(gdat, 'gang', limt=[1e-1 * gdat.sizepixl, gdat.maxmgangdata], strgmodl=strgmodl)\n setp_varb(gdat, 'geff', limt=[0., 0.4], strgmodl=strgmodl)\n setp_varb(gdat, 'dglc', limt=[10., 3e3], strgmodl=strgmodl)\n setp_varb(gdat, 'phii', limt=[0., 2. * np.pi], strgmodl=strgmodl)\n setp_varb(gdat, 'thet', limt=[0., np.pi], strgmodl=strgmodl)\n setp_varb(gdat, 'per0distmean', limt=[5e-4, 1e1], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'magfdistmean', limt=[1e7, 1e16], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'per0diststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'magfdiststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'gangslop', limt=[0.5, 4.], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'dglcslop', limt=[0.5, 2.], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'spatdistcons', limt=[1e-4, 1e-2], popl='full')\n setp_varb(gdat, 'bgaldistscal', limt=[0.5 / gdat.anglfact, 5. / gdat.anglfact], popl='full', strgmodl=strgmodl)\n if gmod.typeelem[l] == 'lghtpntsagnntrue':\n setp_varb(gdat, 'dlos', limt=[1e7, 1e9], strgmodl=strgmodl)\n setp_varb(gdat, 'dlosslop', limt=[-0.5, -3.], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'lum0', limt=[1e43, 1e46], strgmodl=strgmodl)\n setp_varb(gdat, 'lum0distbrek', limt=[1e42, 1e46], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'lum0sloplowr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'lum0slopuppr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)\n \n # construct background surface brightness templates from the user input\n gmod.sbrtbacknorm = [[] for c in gmod.indxback]\n gmod.boolunifback = np.ones(gmod.numbback, dtype=bool)\n for c in gmod.indxback:\n gmod.sbrtbacknorm[c] = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))\n if gmod.typeback[c] == 'data':\n gmod.sbrtbacknorm[c] = np.copy(gdat.sbrtdata)\n gmod.sbrtbacknorm[c][np.where(gmod.sbrtbacknorm[c] == 0.)] = 1e-100\n elif isinstance(gmod.typeback[c], float):\n gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c]\n elif isinstance(gmod.typeback[c], list) and isinstance(gmod.typeback[c], float):\n gmod.sbrtbacknorm[c] = retr_spec(gdat, np.array([gmod.typeback[c]]), sind=np.array([gmod.typeback[c]]))[:, 0, None, None]\n elif isinstance(gmod.typeback[c], np.ndarray) and gmod.typeback[c].ndim == 1:\n gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c][:, None, None]\n elif gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):\n indxexpatemp = int(gmod.typeback[c][8:]) \n indxterm = indxexpatemp // ordrexpa**2\n indxexpaxdat = (indxexpatemp % ordrexpa**2) // ordrexpa + 1\n indxexpaydat = (indxexpatemp % ordrexpa**2) % ordrexpa + 1\n if namebfun == 'bfunfour':\n ampl = 1.\n func = gdat.meanpara.bgalcart \n if namebfun == 'bfunwfou':\n functemp = np.exp(-0.5 * (gdat.meanpara.bgalcart / (1. / gdat.anglfact))**2)\n ampl = np.sqrt(functemp)\n func = functemp\n argslgal = 2. * np.pi * indxexpaxdat * gdat.meanpara.lgalcart / gdat.maxmgangdata\n argsbgal = 2. * np.pi * indxexpaydat * func / gdat.maxmgangdata\n if indxterm == 0:\n termfrst = np.sin(argslgal)\n termseco = ampl * np.sin(argsbgal)\n if indxterm == 1:\n termfrst = np.sin(argslgal)\n termseco = ampl * np.cos(argsbgal)\n if indxterm == 2:\n termfrst = np.cos(argslgal)\n termseco = ampl * np.sin(argsbgal)\n if indxterm == 3:\n termfrst = np.cos(argslgal)\n termseco = ampl * np.cos(argsbgal)\n gmod.sbrtbacknorm[c] = (termfrst[None, :] * termseco[:, None]).flatten()[None, :, None] * \\\n np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))\n \n else:\n path = gdat.pathinpt + gmod.typeback[c]\n gmod.sbrtbacknorm[c] = astropy.io.fits.getdata(path)\n \n if gdat.typepixl == 'cart':\n if not gdat.boolforccart:\n if gmod.sbrtbacknorm[c].shape[2] != gdat.numbsidecart:\n raise Exception('Provided background template must have the chosen image dimensions.')\n \n gmod.sbrtbacknorm[c] = gmod.sbrtbacknorm[c].reshape((gmod.sbrtbacknorm[c].shape[0], -1, gmod.sbrtbacknorm[c].shape[-1]))\n \n if gdat.typepixl == 'cart' and gdat.boolforccart:\n sbrtbacknormtemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))\n for i in gdat.indxenerfull:\n for m in gdat.indxevttfull:\n sbrtbacknormtemp[i, :, m] = tdpy.retr_cart(gmod.sbrtbacknorm[c][i, :, m], \\\n numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \\\n minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \\\n minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()\n gmod.sbrtbacknorm[c] = sbrtbacknormtemp\n\n # determine spatially uniform background templates\n for i in gdat.indxenerfull:\n for m in gdat.indxevttfull:\n if np.std(gmod.sbrtbacknorm[c][i, :, m]) > 1e-6:\n gmod.boolunifback[c] = False\n\n boolzero = True\n gmod.boolbfun = False\n for c in gmod.indxback:\n if np.amin(gmod.sbrtbacknorm[c]) < 0. and isinstance(gmod.typeback[c], str) and not gmod.typeback[c].startswith('bfun'):\n booltemp = False\n raise Exception('Background templates must be positive-definite every where.')\n \n if not np.isfinite(gmod.sbrtbacknorm[c]).all():\n raise Exception('Background template is not finite.')\n\n if np.amin(gmod.sbrtbacknorm[c]) > 0. or gmod.typeback[c] == 'data':\n boolzero = False\n \n if isinstance(gmod.typeback[c], str) and gmod.typeback[c].startswith('bfun'):\n gmod.boolbfun = True\n \n if boolzero and not gmod.boolbfun:\n raise Exception('At least one background template must be positive everynp.where.')\n \n # temp -- does not take into account dark hosts\n gmod.boolhost = gmod.typeemishost != 'none'\n \n # type of PSF evaluation\n if gmod.maxmpara.numbelemtotl > 0 and gmod.boolelempsfnanyy:\n if gmod.typeemishost != 'none' or not gmod.boolunifback.all():\n # the background is not convolved by a kernel and point sources exist\n typeevalpsfn = 'full'\n else:\n # the background is not convolved by a kernel and point sources exist\n typeevalpsfn = 'kern'\n else:\n if gmod.typeemishost != 'none' or not gmod.boolunifback.all():\n # the background is convolved by a kernel, no point source exists\n typeevalpsfn = 'conv'\n else:\n # the background is not convolved by a kernel, no point source exists\n typeevalpsfn = 'none'\n setp_varb(gdat, 'typeevalpsfn', valu=typeevalpsfn, strgmodl=strgmodl)\n \n if gdat.typeverb > 1:\n print('gmod.typeevalpsfn')\n print(gmod.typeevalpsfn)\n \n gmod.boolapplpsfn = gmod.typeevalpsfn != 'none'\n\n ### PSF model\n if gmod.typeevalpsfn != 'none':\n \n if gmod.typemodlpsfn == 'singgaus':\n numbpsfpform = 1\n elif gmod.typemodlpsfn == 'singking':\n numbpsfpform = 2\n elif gmod.typemodlpsfn == 'doubgaus':\n numbpsfpform = 3\n elif gmod.typemodlpsfn == 'gausking':\n numbpsfpform = 4\n elif gmod.typemodlpsfn == 'doubking':\n numbpsfpform = 5\n \n gmod.numbpsfptotl = numbpsfpform\n \n if gdat.boolpriopsfninfo:\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n meansigc = gmod.psfpexpr[i * gmod.numbpsfptotl + m * gmod.numbpsfptotl * gdat.numbener]\n stdvsigc = meansigc * 0.1\n setp_varb(gdat, 'sigcen%02devt%d' % (i, m), mean=meansigc, stdv=stdvsigc, lablroot='$\\sigma$', scal='gaus', \\\n strgmodl=strgmodl)\n \n if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':\n meangamc = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 1]\n stdvgamc = meangamc * 0.1\n setp_varb(gdat, 'gamcen%02devt%d' % (i, m), mean=meangamc, stdv=stdvgamc, strgmodl=strgmodl)\n if gmod.typemodlpsfn == 'doubking':\n meansigt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 2]\n stdvsigt = meansigt * 0.1\n setp_varb(gdat, 'sigten%02devt%d' % (i, m), mean=meansigt, stdv=stdvsigt, strgmodl=strgmodl)\n meangamt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 3]\n stdvgamt = meangamt * 0.1\n setp_varb(gdat, 'gamten%02devt%d' % (i, m), mean=meangamt, stdv=stdvgamt, strgmodl=strgmodl)\n meanpsff = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 4]\n stdvpsff = meanpsff * 0.1\n setp_varb(gdat, 'psffen%02devt%d' % (i, m), mean=meanpsff, stdv=stdvpsff, strgmodl=strgmodl)\n else:\n if gdat.typeexpr == 'gene':\n minmsigm = 0.01 / gdat.anglfact\n maxmsigm = 0.1 / gdat.anglfact\n if gdat.typeexpr == 'ferm':\n minmsigm = 0.1\n maxmsigm = 10.\n if gdat.typeexpr == 'hubb':\n minmsigm = 0.01 / gdat.anglfact\n maxmsigm = 0.1 / gdat.anglfact\n if gdat.typeexpr == 'chan':\n minmsigm = 0.1 / gdat.anglfact\n maxmsigm = 2. / gdat.anglfact\n minmgamm = 1.5\n maxmgamm = 20.\n setp_varb(gdat, 'sigc', minm=minmsigm, maxm=maxmsigm, lablroot='$\\sigma_c$', ener='full', evtt='full', strgmodl=strgmodl)\n\n setp_varb(gdat, 'sigt', minm=minmsigm, maxm=maxmsigm, ener='full', evtt='full', strgmodl=strgmodl)\n setp_varb(gdat, 'gamc', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)\n setp_varb(gdat, 'gamt', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)\n \n setp_varb(gdat, 'psff', minm=0., maxm=1., ener='full', evtt='full', strgmodl=strgmodl)\n \n # background\n ## number of background parameters\n numbbacp = 0\n for c in gmod.indxback:\n if gmod.boolspecback[c]:\n numbbacp += 1\n else:\n numbbacp += gdat.numbener\n \n ## background parameter indices\n gmod.indxbackbacp = np.zeros(numbbacp, dtype=int)\n indxenerbacp = np.zeros(numbbacp, dtype=int)\n cntr = 0\n for c in gmod.indxback:\n if gmod.boolspecback[c]:\n gmod.indxbackbacp[cntr] = c\n cntr += 1\n else:\n for i in gdat.indxener:\n indxenerbacp[cntr] = i\n gmod.indxbackbacp[cntr] = c\n cntr += 1\n \n # indices of background parameters for each background component\n gmod.indxbacpback = [[] for c in gmod.indxback]\n for c in gmod.indxback:\n gmod.indxbacpback[c] = np.where((gmod.indxbackbacp == c))[0]\n \n # list of names of diffuse components\n gmod.listnamediff = []\n for c in gmod.indxback:\n gmod.listnamediff += ['back%04d' % c]\n if gmod.typeemishost != 'none':\n for e in gmod.indxsersfgrd:\n gmod.listnamediff += ['hostisf%d' % e]\n if gmod.boollens:\n gmod.listnamediff += ['lens']\n \n # list of names of emission components\n listnameecom = deepcopy(gmod.listnamediff)\n for l in gmod.indxpopl:\n if gmod.boolelemsbrt[l]:\n if strgmodl == 'true' and gmod.numbelem[l] > 0 or strgmodl == 'fitt' and gmod.maxmpara.numbelem[l] > 0:\n if not 'dfnc' in listnameecom:\n listnameecom += ['dfnc']\n if not 'dfncsubt' in listnameecom:\n listnameecom += ['dfncsubt']\n gmod.listnameecomtotl = listnameecom + ['modl']\n \n for c in gmod.indxback:\n setp_varb(gdat, 'cntpback%04d' % c, lablroot='$C_{%d}$' % c, minm=1., maxm=100., scal='logt', strgmodl=strgmodl)\n \n gmod.listnamegcom = deepcopy(gmod.listnameecomtotl)\n if gmod.boollens:\n gmod.listnamegcom += ['bgrd']\n if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:\n gmod.listnamegcom += ['bgrdgalx', 'bgrdexts']\n \n numbdiff = len(gmod.listnamediff)\n convdiff = np.zeros(numbdiff, dtype=bool)\n for k, namediff in enumerate(gmod.listnamediff):\n if not (gdat.boolthindata or gmod.typeevalpsfn == 'none' or gmod.typeevalpsfn == 'kern'):\n if namediff.startswith('back'):\n indx = int(namediff[-4:])\n convdiff[k] = not gmod.boolunifback[indx] \n else:\n convdiff[k] = True\n \n # element parameters that correlate with the statistical significance of the element\n gmod.namepara.elemsign = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lght'):\n gmod.namepara.elemsign[l] = 'flux'\n if gmod.typeelem[l] == 'lens':\n gmod.namepara.elemsign[l] = 'defs'\n if gmod.typeelem[l].startswith('clus'):\n gmod.namepara.elemsign[l] = 'nobj'\n \n if gdat.typeverb > 0:\n if strgmodl == 'true':\n strgtemp = 'true'\n if strgmodl == 'fitt':\n strgtemp = 'fitting'\n print('Building elements for the %s model...' % strgtemp)\n \n # define the names and scalings of element parameters\n gmod.namepara.genrelem = [[] for l in gmod.indxpopl]\n gmod.listscalparagenrelem = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n \n if gmod.typeelem[l].startswith('lghtline'):\n gmod.namepara.genrelem[l] = ['elin']\n gmod.listscalparagenrelem[l] = ['logt']\n elif gmod.typespatdist[l] == 'diskscal':\n gmod.namepara.genrelem[l] = ['lgal', 'bgal']\n gmod.listscalparagenrelem[l] = ['self', 'dexp']\n elif gmod.typespatdist[l] == 'gangexpo':\n gmod.namepara.genrelem[l] = ['gang', 'aang']\n gmod.listscalparagenrelem[l] = ['expo', 'self']\n elif gmod.typespatdist[l] == 'glc3':\n gmod.namepara.genrelem[l] = ['dglc', 'thet', 'phii']\n gmod.listscalparagenrelem[l] = ['powr', 'self', 'self']\n else:\n gmod.namepara.genrelem[l] = ['lgal', 'bgal']\n gmod.listscalparagenrelem[l] = ['self', 'self']\n \n # amplitude\n if gmod.typeelem[l] == 'lghtpntsagnntrue':\n gmod.namepara.genrelem[l] += ['lum0']\n gmod.listscalparagenrelem[l] += ['dpowslopbrek']\n elif gmod.typeelem[l] == 'lghtpntspuls':\n gmod.namepara.genrelem[l] += ['per0']\n gmod.listscalparagenrelem[l] += ['lnormeanstdv']\n elif gmod.typeelem[l].startswith('lght'):\n gmod.namepara.genrelem[l] += ['flux']\n gmod.listscalparagenrelem[l] += [gmod.typeprioflux[l]]\n elif gmod.typeelem[l] == 'lens':\n gmod.namepara.genrelem[l] += ['defs']\n gmod.listscalparagenrelem[l] += ['powr']\n elif gmod.typeelem[l].startswith('clus'):\n gmod.namepara.genrelem[l] += ['nobj']\n gmod.listscalparagenrelem[l] += ['powr']\n \n # shape\n if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':\n gmod.namepara.genrelem[l] += ['gwdt']\n gmod.listscalparagenrelem[l] += ['powr']\n if gmod.typeelem[l] == 'lghtlinevoig':\n gmod.namepara.genrelem[l] += ['sigm']\n gmod.listscalparagenrelem[l] += ['logt']\n gmod.namepara.genrelem[l] += ['gamm']\n gmod.listscalparagenrelem[l] += ['logt']\n \n # others\n if gmod.typeelem[l] == 'lghtpntspuls':\n gmod.namepara.genrelem[l] += ['magf']\n gmod.listscalparagenrelem[l] += ['lnormeanstdv']\n gmod.namepara.genrelem[l] += ['geff']\n gmod.listscalparagenrelem[l] += ['self']\n elif gmod.typeelem[l] == 'lghtpntsagnntrue':\n gmod.namepara.genrelem[l] += ['dlos']\n gmod.listscalparagenrelem[l] += ['powr']\n\n if gdat.numbener > 1 and gmod.typeelem[l].startswith('lghtpnts'):\n if gmod.spectype[l] == 'colr':\n for i in gdat.indxener:\n if i == 0:\n continue\n gmod.namepara.genrelem[l] += ['sindcolr%04d' % i]\n gmod.listscalparagenrelem[l] += ['self']\n else:\n gmod.namepara.genrelem[l] += ['sind']\n gmod.listscalparagenrelem[l] += ['self']\n if gmod.spectype[l] == 'curv':\n gmod.namepara.genrelem[l] += ['curv']\n gmod.listscalparagenrelem[l] += ['self']\n if gmod.spectype[l] == 'expc':\n gmod.namepara.genrelem[l] += ['expc']\n gmod.listscalparagenrelem[l] += ['self']\n if gmod.typeelem[l] == 'lens':\n if gdat.variasca:\n gmod.namepara.genrelem[l] += ['asca']\n gmod.listscalparagenrelem[l] += ['self']\n if gdat.variacut:\n gmod.namepara.genrelem[l] += ['acut']\n gmod.listscalparagenrelem[l] += ['self']\n \n # names of element parameters for each scaling\n gmod.namepara.genrelemscal = [{} for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n for scaltype in gdat.listscaltype:\n gmod.namepara.genrelemscal[l][scaltype] = []\n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):\n if scaltype == gmod.listscalparagenrelem[l][k]:\n gmod.namepara.genrelemscal[l][scaltype].append(nameparagenrelem)\n\n # variables for which whose marginal distribution and pair-correlations will be plotted\n gmod.namepara.derielemodim = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n gmod.namepara.derielemodim[l] = deepcopy(gmod.namepara.genrelem[l])\n gmod.namepara.derielemodim[l] += ['deltllik']\n if gdat.boolbinsspat:\n if not 'lgal' in gmod.namepara.derielemodim[l]:\n gmod.namepara.derielemodim[l] += ['lgal']\n if not 'bgal' in gmod.namepara.derielemodim[l]:\n gmod.namepara.derielemodim[l] += ['bgal']\n if not 'gang' in gmod.namepara.derielemodim[l]:\n gmod.namepara.derielemodim[l] += ['gang']\n if not 'aang' in gmod.namepara.derielemodim[l]:\n gmod.namepara.derielemodim[l] += ['aang']\n if gmod.typeelem[l].startswith('lght'):\n gmod.namepara.derielemodim[l] += ['cnts']\n if gdat.typeexpr == 'ferm':\n gmod.namepara.derielemodim[l] + ['sbrt0018']\n \n if gmod.typeelem[l] == 'lghtpntsagnntrue':\n gmod.namepara.derielemodim[l] += ['reds']\n gmod.namepara.derielemodim[l] += ['lumi']\n gmod.namepara.derielemodim[l] += ['flux']\n if gmod.typeelem[l] == 'lghtpntspuls':\n gmod.namepara.derielemodim[l] += ['lumi']\n gmod.namepara.derielemodim[l] += ['flux']\n gmod.namepara.derielemodim[l] += ['mass']\n gmod.namepara.derielemodim[l] += ['dlos']\n if gmod.typeelem[l] == 'lens':\n gmod.namepara.derielemodim[l] += ['mcut', 'diss', 'rele', 'reln', 'relk', 'relf', 'relm', 'reld', 'relc']\n \n #for k in range(len(gmod.namepara.derielemodim[l])):\n # gmod.namepara.derielemodim[l][k] += 'pop%d' % l\n \n # check later\n # temp\n #if strgmodl == 'fitt':\n # for q in gdat.indxrefr: \n # if gmod.nameparagenrelemampl[l] in gdat.refr.namepara.elem[q]:\n # gmod.namepara.derielemodim[l].append('aerr' + gdat.listnamerefr[q])\n \n if gdat.typeverb > 1:\n print('gmod.namepara.derielemodim')\n print(gmod.namepara.derielemodim)\n \n # derived element parameters\n gmod.namepara.derielem = gmod.namepara.derielemodim[:]\n \n if gdat.typeverb > 1:\n print('gmod.namepara.derielem')\n print(gmod.namepara.derielem)\n \n # derived parameters\n gmod.listnameparaderitotl = [temptemp for temp in gmod.namepara.derielem for temptemp in temp]\n #gmod.listnameparaderitotl += gmod.namepara.scal\n \n for namediff in gmod.listnamediff:\n gmod.listnameparaderitotl += ['cntp' + namediff]\n \n if gdat.typeverb > 1:\n print('gmod.listnameparaderitotl')\n print(gmod.listnameparaderitotl)\n\n if strgmodl == 'fitt':\n # add reference element parameters that are not available in the fitting model\n gdat.refr.namepara.elemonly = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]\n gmod.namepara.extrelem = [[] for l in gmod.indxpopl]\n for q in gdat.indxrefr: \n if gdat.refr.numbelem[q] == 0:\n continue\n for name in gdat.refr.namepara.elem[q]:\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lght') and (name == 'defs' or name == 'acut' or name == 'asca' or name == 'mass'):\n continue\n if gmod.typeelem[l] == ('lens') and (name == 'cnts' or name == 'flux' or name == 'spec' or name == 'sind'):\n continue\n if not name in gmod.namepara.derielemodim[l]:\n nametotl = name + gdat.listnamerefr[q]\n if name == 'etag':\n continue\n gmod.namepara.derielemodim[l].append(nametotl)\n \n if gdat.refr.numbelem[q] == 0:\n continue\n\n gdat.refr.namepara.elemonly[q][l].append(name)\n if not nametotl in gmod.namepara.extrelem[l]:\n gmod.namepara.extrelem[l].append(nametotl) \n #if name == 'reds':\n # for nametemp in ['lumi', 'dlos']:\n # nametemptemp = nametemp + gdat.listnamerefr[q]\n # if not nametemptemp in gmod.namepara.extrelem[l]:\n # gmod.namepara.derielemodim[l].append(nametemp + gdat.listnamerefr[q])\n # gmod.namepara.extrelem[l].append(nametemptemp)\n \n if gdat.typeverb > 1:\n print('gdat.refr.namepara.elemonly')\n print(gdat.refr.namepara.elemonly)\n \n if gdat.typeexpr == 'chan' and gdat.typedata == 'inpt':\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lghtpnts':\n gmod.namepara.extrelem[l].append('lumiwo08')\n gmod.namepara.derielemodim[l].append('lumiwo08')\n \n if gdat.typeverb > 1:\n print('gmod.namepara.extrelem')\n print(gmod.namepara.extrelem)\n\n # defaults\n gmod.liststrgpdfnmodu = [[] for l in gmod.indxpopl]\n gmod.namepara.genrelemmodu = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lght'): \n if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0.:\n if l == 1:\n gmod.liststrgpdfnmodu[l] += ['tmplnfwp']\n gmod.namepara.genrelemmodu[l] += ['lgalbgal']\n if l == 2:\n gmod.liststrgpdfnmodu[l] += ['tmplnfwp']\n gmod.namepara.genrelemmodu[l] += ['lgalbgal']\n \n gmod.namepara.elem = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n for liststrg in [gmod.namepara.genrelem[l], gmod.namepara.derielemodim[l]]:\n for strgthis in liststrg:\n if not strgthis in gmod.namepara.elem[l]:\n gmod.namepara.elem[l].append(strgthis)\n \n # temp\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lghtline'):\n gmod.namepara.genrelem[l] += ['spec']\n if gmod.typeelem[l].startswith('lght'):\n gmod.namepara.genrelem[l] += ['spec', 'specplot']\n if gmod.typeelem[l] == 'lens':\n gmod.namepara.genrelem[l] += ['deflprof']\n \n #gmod.namepara.genrelemeval = [[] for l in gmod.indxpopl]\n #for l in gmod.indxpopl:\n # if gmod.typeelem[l].startswith('clus'):\n # gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'nobj']\n # if gmod.typeelem[l] == 'clusvari':\n # gmod.namepara.genrelemeval[l] += ['gwdt']\n # if gmod.typeelem[l] == 'lens':\n # gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'defs', 'asca', 'acut']\n # if gmod.typeelem[l].startswith('lghtline'):\n # gmod.namepara.genrelemeval[l] = ['elin', 'spec']\n # elif gmod.typeelem[l] == 'lghtgausbgrd':\n # gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'gwdt', 'spec']\n # elif gmod.typeelem[l].startswith('lght'):\n # gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'spec']\n \n ## element legends\n lablpopl = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gdat.numbgrid > 1:\n if gmod.typeelem[l] == 'lghtpnts':\n lablpopl[l] = 'FPS'\n if gmod.typeelem[l] == 'lghtgausbgrd':\n lablpopl[l] = 'BGS'\n else:\n if gmod.typeelem[l] == 'lghtpntspuls':\n lablpopl[l] = 'Pulsar'\n elif gmod.typeelem[l].startswith('lghtpntsagnn'):\n lablpopl[l] = 'AGN'\n elif gmod.typeelem[l].startswith('lghtpnts'):\n lablpopl[l] = 'PS'\n if gmod.typeelem[l] == 'lens':\n lablpopl[l] = 'Subhalo'\n if gmod.typeelem[l].startswith('clus'):\n lablpopl[l] = 'Cluster'\n if gmod.typeelem[l].startswith('lghtline'):\n lablpopl[l]= 'Line'\n setp_varb(gdat, 'lablpopl', valu=lablpopl, strgmodl=strgmodl)\n\n if strgmodl == 'true':\n gmod.indxpoplassc = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmod.numbpopl == 3 and gmod.typeelem[1] == 'lens':\n gmod.indxpoplassc[l] = [l]\n else:\n gmod.indxpoplassc[l] = gmod.indxpopl\n\n # variables for which two dimensional histograms will be calculated\n gmod.namepara.genrelemcorr = [[] for l in gmod.indxpopl]\n if gdat.boolplotelemcorr:\n for l in gmod.indxpopl:\n for strgfeat in gmod.namepara.derielemodim[l]:\n gmod.namepara.genrelemcorr[l].append(strgfeat)\n \n # number of element parameters\n if gmod.numbpopl > 0:\n gmod.numbparagenrelemsing = np.zeros(gmod.numbpopl, dtype=int)\n gmod.numbparaderielemsing = np.zeros(gmod.numbpopl, dtype=int)\n gmod.numbparaelemsing = np.zeros(gmod.numbpopl, dtype=int)\n gmod.numbparagenrelem = np.zeros(gmod.numbpopl, dtype=int)\n gmod.numbparagenrelemcuml = np.zeros(gmod.numbpopl, dtype=int)\n gmod.numbparagenrelemcumr = np.zeros(gmod.numbpopl, dtype=int)\n gmod.numbparaderielem = np.zeros(gmod.numbpopl, dtype=int)\n gmod.numbparaelem = np.zeros(gmod.numbpopl, dtype=int)\n for l in gmod.indxpopl:\n # number of generative element parameters for a single element of a specific population\n gmod.numbparagenrelemsing[l] = len(gmod.namepara.genrelem[l])\n # number of derived element parameters for a single element of a specific population\n gmod.numbparaderielemsing[l] = len(gmod.namepara.derielem[l])\n # number of element parameters for a single element of a specific population\n gmod.numbparaelemsing[l] = len(gmod.namepara.elem[l])\n # number of generative element parameters for all elements of a specific population\n gmod.numbparagenrelem[l] = gmod.numbparagenrelemsing[l] * gmod.maxmpara.numbelem[l]\n # number of generative element parameters up to the beginning of a population\n gmod.numbparagenrelemcuml[l] = np.sum(gmod.numbparagenrelem[:l])\n # number of generative element parameters up to the end of a population\n gmod.numbparagenrelemcumr[l] = np.sum(gmod.numbparagenrelem[:l+1])\n # number of derived element parameters for all elements of a specific population\n gmod.numbparaderielem[l] = gmod.numbparaderielemsing[l] * gmod.numbelem[l]\n # number of element parameters for all elements of a specific population\n gmod.numbparaelem[l] = gmod.numbparaelemsing[l] * gmod.numbelem[l]\n # number of generative element parameters summed over all populations\n gmod.numbparagenrelemtotl = np.sum(gmod.numbparagenrelem)\n # number of derived element parameters summed over all populations\n gmod.numbparaderielemtotl = np.sum(gmod.numbparaderielem)\n # number of element parameters summed over all populations\n gmod.numbparaelemtotl = np.sum(gmod.numbparaderielem)\n \n gmod.indxparagenrelemsing = []\n for l in gmod.indxpopl:\n gmod.indxparagenrelemsing.append(np.arange(gmod.numbparagenrelemsing[l]))\n \n gmod.indxparaderielemsing = []\n for l in gmod.indxpopl:\n gmod.indxparaderielemsing.append(np.arange(gmod.numbparaderielemsing[l]))\n \n gmod.indxparaelemsing = []\n for l in gmod.indxpopl:\n gmod.indxparaelemsing.append(np.arange(gmod.numbparaelemsing[l]))\n\n # size of the auxiliary variable propobability density vector\n if gmod.maxmpara.numbelemtotl > 0:\n gmod.numblpri = 3 + gmod.numbparagenrelem * gmod.numbpopl\n else:\n gmod.numblpri = 0\n if gdat.penalpridiff:\n gmod.numblpri += 1\n indxlpri = np.arange(gmod.numblpri)\n\n # append the population tags to element parameter names\n #for l in gmod.indxpopl:\n # gmod.namepara.genrelem[l] = [gmod.namepara.genrelem[l][g] + 'pop%d' % l for g in gmod.indxparagenrelemsing[l]]\n \n gmod.boolcompposi = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n gmod.boolcompposi[l] = np.zeros(gmod.numbparagenrelemsing[l], dtype=bool)\n if gmod.typeelem[l].startswith('lghtline'):\n gmod.boolcompposi[l][0] = True\n else:\n gmod.boolcompposi[l][0] = True\n gmod.boolcompposi[l][1] = True\n \n # list of strings across all populations\n ## all (generative and derived) element parameters\n gmod.numbparaelem = len(gmod.namepara.elem)\n gmod.indxparaelem = np.arange(gmod.numbparaelem)\n \n # flattened list of generative element parameters\n gmod.listnameparagenfelem = []\n for l in gmod.indxpopl:\n for nameparagenrelem in gmod.namepara.genrelem[l]:\n gmod.listnameparagenfelem.append(nameparagenrelem + 'pop%d' % l)\n \n # concatenated list of flattened generative and derived element parameters\n gmod.listnameparatotlelem = gmod.listnameparagenfelem + gmod.namepara.derielem\n\n gmod.numbparaelem = np.empty(gmod.numbpopl, dtype=int)\n for l in gmod.indxpopl:\n gmod.numbparaelem[l] = len(gmod.namepara.elem[l])\n \n numbdeflsubhplot = 2\n numbdeflsingplot = numbdeflsubhplot\n if gmod.numbparaelem > 0:\n numbdeflsingplot += 3\n\n gmod.convdiffanyy = True in convdiff\n\n cntr = tdpy.cntr()\n \n if gmod.boollens:\n adishost = gdat.adisobjt(redshost)\n adissour = gdat.adisobjt(redssour)\n adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost\n massfrombein = retr_massfrombein(gdat, adissour, adishost, adishostsour)\n mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)\n \n # object of parameter indices\n gmod.indxpara = tdpy.gdatstrt()\n \n # define parameter indices\n if gmod.numbparaelem > 0:\n\n # number of elements\n #gmod.indxpara.numbelem = np.empty(gmod.numbpopl, dtype=int)\n for l in gmod.indxpopl:\n indx = cntr.incr()\n setattr(gmod.indxpara, 'numbelempop%d' % l, indx)\n #gmod.indxpara.numbelem[l] = indx\n \n # hyperparameters\n ## mean number of elements\n if gmod.typemodltran == 'pois':\n #gmod.indxpara.meanelem = np.empty(gmod.numbpopl, dtype=int)\n for l in gmod.indxpopl:\n if gmod.maxmpara.numbelem[l] > 0:\n indx = cntr.incr()\n setattr(gmod.indxpara, 'meanelempop%d' % l, indx)\n #gmod.indxpara.meanelem[l] = indx\n\n ## parameters parametrizing priors on element parameters\n liststrgvarb = []\n for l in gmod.indxpopl:\n if gmod.maxmpara.numbelem[l] > 0:\n for strgpdfnelemgenr, strgfeat in zip(gmod.listscalparagenrelem[l], gmod.namepara.genrelem[l]):\n if strgpdfnelemgenr == 'expo' or strgpdfnelemgenr == 'dexp':\n liststrgvarb += [strgfeat + 'distscal']\n if strgpdfnelemgenr == 'powr':\n liststrgvarb += ['slopprio' + strgfeat + 'pop%d' % l]\n if strgpdfnelemgenr == 'dpow':\n liststrgvarb += [strgfeat + 'distbrek']\n liststrgvarb += [strgfeat + 'sloplowr']\n liststrgvarb += [strgfeat + 'slopuppr']\n if strgpdfnelemgenr == 'gausmean' or strgpdfnelemgenr == 'lnormean':\n liststrgvarb += [strgfeat + 'distmean']\n if strgpdfnelemgenr == 'gausstdv' or strgpdfnelemgenr == 'lnorstdv':\n liststrgvarb += [strgfeat + 'diststdv']\n if strgpdfnelemgenr == 'gausmeanstdv' or strgpdfnelemgenr == 'lnormeanstdv':\n liststrgvarb += [nameparagenrelem + 'distmean', nameparagenrelem + 'diststdv']\n for strgvarb in liststrgvarb:\n setattr(gmod.indxpara, strgvarb, np.zeros(gmod.numbpopl, dtype=int) - 1)\n\n for l in gmod.indxpopl:\n strgpopl = 'pop%d' % l\n if gmod.maxmpara.numbelem[l] > 0:\n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):\n \n if gmod.listscalparagenrelem[l][k] == 'self':\n continue\n indx = cntr.incr()\n\n if gmod.listscalparagenrelem[l][k] == 'dpow':\n for nametemp in ['brek', 'sloplowr', 'slopuppr']:\n strg = '%s' % nametemp + nameparagenrelem\n setattr(gmod.indxpara, strg, indx)\n setattr(gmod.indxpara, strg, indx)\n else:\n if gmod.listscalparagenrelem[l][k] == 'expo' or gmod.listscalparagenrelem[l][k] == 'dexp':\n strghypr = 'scal'\n if gmod.listscalparagenrelem[l][k] == 'powr':\n strghypr = 'slop'\n if gmod.listscalparagenrelem[l][k] == 'gausmean' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \\\n gmod.listscalparagenrelem[l][k] == 'lnormean' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':\n strghypr = 'mean'\n if gmod.listscalparagenrelem[l][k] == 'gausstdv' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \\\n gmod.listscalparagenrelem[l][k] == 'lnorstdv' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':\n strghypr = 'stdv'\n strg = strghypr + 'prio' + nameparagenrelem + 'pop%d' % l\n setattr(gmod.indxpara, strg, indx)\n \n # group PSF parameters\n if gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full':\n for m in gdat.indxevtt:\n for i in gdat.indxener:\n setattr(gmod.indxpara, 'sigcen%02devt%d' % (i, m), cntr.incr())\n if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':\n setattr(gmod.indxpara, 'gamcen%02devt%d' % (i, m), cntr.incr())\n if gmod.typemodlpsfn == 'doubking':\n setattr(gmod.indxpara, 'sigten%02devt%d' % (i, m), cntr.incr())\n setattr(gmod.indxpara, 'gamten%02devt%d' % (i, m), cntr.incr())\n setattr(gmod.indxpara, 'ffenen%02devt%d' % (i, m), cntr.incr())\n \n gmod.indxpara.psfp = []\n for strg, valu in gmod.indxpara.__dict__.items():\n if strg.startswith('sigce') or strg.startswith('sigte') or strg.startswith('gamce') or strg.startswith('gamte') or strg.startswith('psffe'):\n gmod.indxpara.psfp.append(valu)\n gmod.indxpara.psfp = np.array(gmod.indxpara.psfp) \n\n gmod.numbpsfptotlevtt = gdat.numbevtt * gmod.numbpsfptotl\n gmod.numbpsfptotlener = gdat.numbener * gmod.numbpsfptotl\n numbpsfp = gmod.numbpsfptotl * gdat.numbener * gdat.numbevtt\n indxpsfpform = np.arange(numbpsfpform)\n indxpsfptotl = np.arange(gmod.numbpsfptotl)\n \n indxpsfp = np.arange(numbpsfp)\n gmod.indxpara.psfp = np.sort(gmod.indxpara.psfp)\n gmod.indxparapsfpinit = gmod.indxpara.psfp[0]\n \n # group background parameters\n gmod.indxpara.bacp = []\n for c in gmod.indxback:\n if gmod.boolspecback[c]:\n indx = cntr.incr()\n setattr(gmod.indxpara, 'bacpback%04d' % c, indx)\n gmod.indxpara.bacp.append(indx)\n else:\n for i in gdat.indxener:\n indx = cntr.incr()\n setattr(gmod.indxpara, 'bacpback%04den%02d' % (c, i), indx)\n gmod.indxpara.bacp.append(indx)\n gmod.indxpara.bacp = np.array(gmod.indxpara.bacp)\n\n # temp\n #gmod.indxpara.anglsour = []\n #gmod.indxpara.anglhost = []\n #gmod.indxpara.angllens = []\n \n if gmod.typeemishost != 'none':\n gmod.indxpara.specsour = []\n gmod.indxpara.spechost = []\n\n if gmod.boollens:\n gmod.indxpara.lgalsour = cntr.incr()\n gmod.indxpara.bgalsour = cntr.incr()\n gmod.indxpara.fluxsour = cntr.incr()\n if gdat.numbener > 1:\n gmod.indxpara.sindsour = cntr.incr()\n gmod.indxpara.sizesour = cntr.incr()\n gmod.indxpara.ellpsour = cntr.incr()\n gmod.indxpara.anglsour = cntr.incr()\n if gmod.typeemishost != 'none' or gmod.boollens:\n for e in gmod.indxsersfgrd: \n if gmod.typeemishost != 'none':\n setattr(gmod.indxpara, 'lgalhostisf%d' % e, cntr.incr())\n setattr(gmod.indxpara, 'bgalhostisf%d' % e, cntr.incr())\n setattr(gmod.indxpara, 'fluxhostisf%d' % e, cntr.incr())\n if gdat.numbener > 1:\n setattr(gmod.indxpara, 'sindhostisf%d' % e, cntr.incr())\n setattr(gmod.indxpara, 'sizehostisf%d' % e, cntr.incr())\n if gmod.boollens:\n setattr(gmod.indxpara, 'beinhostisf%d' % e, cntr.incr())\n if gmod.typeemishost != 'none':\n setattr(gmod.indxpara, 'ellphostisf%d' % e, cntr.incr())\n setattr(gmod.indxpara, 'anglhostisf%d' % e, cntr.incr())\n setattr(gmod.indxpara, 'serihostisf%d' % e, cntr.incr())\n if gmod.boollens:\n gmod.indxpara.sherextr = cntr.incr()\n gmod.indxpara.sangextr = cntr.incr()\n gmod.indxpara.sour = []\n \n if gmod.boollens and gmod.typeemishost == 'none':\n raise Exception('Lensing cannot be modeled without host galaxy emission.')\n \n # collect groups of parameters\n if gdat.typeexpr == 'hubb':\n gmod.listnamecomplens = ['hostlght', 'hostlens', 'sour', 'extr']\n for namecomplens in gmod.listnamecomplens:\n setattr(gmod, 'liststrg' + namecomplens, [])\n setattr(gmod.indxpara, namecomplens, [])\n if gmod.boollens or gmod.typeemishost != 'none':\n gmod.liststrghostlght += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']\n gmod.liststrghostlens += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']\n if gmod.typeemishost != 'none':\n gmod.liststrghostlght += ['fluxhost', 'sizehost', 'serihost']\n if gdat.numbener > 1:\n gmod.liststrghostlght += ['sindhost']\n if gmod.boollens:\n gmod.liststrghostlens += ['beinhost']\n gmod.liststrgextr += ['sherextr', 'sangextr']\n gmod.liststrgsour += ['lgalsour', 'bgalsour', 'fluxsour', 'sizesour', 'ellpsour', 'anglsour']\n if gdat.numbener > 1:\n gmod.liststrgsour += ['sindsour']\n \n for strg, valu in gmod.__dict__.items():\n \n if isinstance(valu, list) or isinstance(valu, np.ndarray):\n continue\n \n if gdat.typeexpr == 'hubb':\n for namecomplens in gmod.listnamecomplens:\n for strgtemp in getattr(gmod, 'liststrg' + namecomplens):\n if strg[12:].startswith(strgtemp):\n \n if isinstance(valu, list):\n for valutemp in valu:\n gmod['indxparagenr' + namecomplens].append(valutemp)\n else:\n gmod['indxparagenr' + namecomplens].append(valu)\n \n # remove indxpara. from strg\n strg = strg[12:]\n \n if strg.startswith('fluxsour') or strg.startswith('sindsour'):\n gmod.indxpara.specsour.append(valu)\n\n if strg.startswith('fluxhost') or strg.startswith('sindhost'):\n gmod.indxpara.spechost.append(valu)\n \n if gmod.boollens or gmod.boolhost:\n gmod.indxpara.host = gmod.indxparahostlght + gmod.indxparahostlens\n gmod.indxpara.lens = gmod.indxpara.host + gmod.indxpara.sour + gmod.indxpara.extr\n\n ## number of model spectral parameters for each population\n #numbspep = np.empty(gmod.numbpopl, dtype=int)\n #liststrgspep = [[] for l in range(gmod.numbpopl)]\n #for l in gmod.indxpopl:\n # if gdat.numbener > 1:\n # liststrgspep[l] += ['sind']\n # if gmod.spectype[l] == 'expc':\n # liststrgspep[l] += ['expc']\n # if gmod.spectype[l] == 'curv':\n # liststrgspep[l] = ['curv']\n # numbspep[l] = len(liststrgspep[l]) \n \n\ndef setp_paragenrscalbase(gdat, strgmodl='fitt'):\n '''\n Setup labels and scales for base parameters\n '''\n \n print('setp_paragenrscalbase(): Building the %s model base paremeter names and scales...' % strgmodl)\n gmod = getattr(gdat, strgmodl)\n \n listlablback = []\n listlablback = []\n for nameback in gmod.listnameback:\n if nameback == 'isot':\n listlablback.append('Isotropic')\n listlablback.append(r'$\\mathcal{I}$')\n if nameback == 'fdfm':\n listlablback.append('FDM')\n listlablback.append(r'$\\mathcal{D}$')\n if nameback == 'dark':\n listlablback.append('NFW')\n listlablback.append(r'$\\mathcal{D}_{dark}$')\n if nameback == 'part':\n listlablback.append('Particle Back.')\n listlablback.append(r'$\\mathcal{I}_p$')\n\n # background templates\n listlablsbrt = deepcopy(listlablback)\n numblablsbrt = 0\n for l in gmod.indxpopl:\n if gmod.boolelemsbrt[l]:\n listlablsbrt.append(gmod.lablpopl[l])\n listlablsbrt.append(gmod.lablpopl[l] + ' subt')\n numblablsbrt += 2\n if gmod.boollens:\n listlablsbrt.append('Source')\n numblablsbrt += 1\n if gmod.typeemishost != 'none':\n for e in gmod.indxsersfgrd:\n listlablsbrt.append('Host %d' % e)\n numblablsbrt += 1\n if gmod.numbpopl > 0:\n if 'clus' in gmod.typeelem or 'clusvari' in gmod.typeelem:\n listlablsbrt.append('Uniform')\n numblablsbrt += 1\n \n listlablsbrtspec = ['Data']\n listlablsbrtspec += deepcopy(listlablsbrt)\n if len(listlablsbrt) > 1:\n listlablsbrtspec.append('Total Model')\n \n numblablsbrtspec = len(listlablsbrtspec)\n \n # number of generative parameters per element, depends on population\n #numbparaelem = gmod.numbparagenrelem + numbparaelemderi\n\n # maximum total number of parameters\n #numbparagenrfull = gmod.numbparagenrbase + gmod.numbparaelem\n \n #numbparaelemkind = gmod.numbparagenrbase\n #for l in gmod.indxpopl:\n # numbparaelemkind += gmod.numbparagenrelemsing[l]\n \n #nameparagenrbase\n #gmod.namepara.genrelem\n \n #listnameparaderifixd\n #listnameparaderielem\n \n #gmod.namepara.genrelemextd = gmod.namepara.genrelem * maxm.numbelem\n #listnameparaderielemextd = gmod.namepara.genrelem * maxm.numbelem\n \n gmod.listindxparakindscal = {}\n for scaltype in gdat.listscaltype:\n gmod.listindxparakindscal[scaltype] = np.where(scaltype == gmod.listscalparakind)[0]\n\n #\n ## stack\n ## gmod.listnameparastck\n #gmod.listnameparastck = np.zeros(gmod.maxmnumbpara, dtype=object)\n #gmod.listscalparastck = np.zeros(gmod.maxmnumbpara, dtype=object)\n #\n #gmod.listnameparastck[gmod.indxparagenrbase] = gmod.nameparagenrbase\n #gmod.listscalparastck[gmod.indxparagenrbase] = gmod.listscalparagenrbase\n #for k in range(gmod.numbparaelem):\n # for l in gmod.indxpopl: \n # if k >= gmod.numbparagenrelemcuml[l]:\n # indxpopltemp = l\n # indxelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) // gmod.numbparagenrelemsing[indxpopltemp]\n # gmod.indxparagenrelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) % gmod.numbparagenrelemsing[indxpopltemp]\n # break\n # gmod.listnameparastck[gmod.numbparagenrbase+k] = '%spop%d%04d' % (gmod.namepara.genrelem[indxpopltemp][gmod.indxparagenrelemtemp], indxpopltemp, indxelemtemp)\n # gmod.listscalparastck[gmod.numbparagenrbase+k] = gmod.listscalparagenrelem[indxpopltemp][gmod.indxparagenrelemtemp]\n #\n #\n #if np.where(gmod.listscalpara == 0)[0].size > 0:\n # print('gmod.listscalpara[gmod.indxparagenrbase]')\n # print(gmod.listscalpara[gmod.indxparagenrbase])\n # raise Exception('')\n #\n ## labels and scales for variables\n if gmod.boollens:\n setattr(gmod.lablrootpara, 'masssubhintg', r'$M_{\\rm{sub}}$')\n setattr(gmod.lablrootpara, 'masssubhdelt', r'$\\rho_{\\rm{sub}}$')\n setattr(gmod.lablrootpara, 'masssubhintgbein', r'$M_{\\rm{sub,E}}$')\n setattr(gmod.lablrootpara, 'masssubhdeltbein', r'$\\rho_{\\rm{sub,E}}$')\n setattr(gmod.lablrootpara, 'masssubhintgunit', '$10^9 M_{\\odot}$')\n setattr(gmod.lablrootpara, 'masssubhdeltunit', '$M_{\\odot}$/kpc')\n setattr(gmod.lablrootpara, 'masssubhintgbeinunit', '$10^9 M_{\\odot}$')\n setattr(gmod.lablrootpara, 'masssubhdeltbeinunit', '$M_{\\odot}$/kpc')\n setattr(gmod.lablrootpara, 'fracsubhintg', r'f_{\\rm{sub}}')\n setattr(gmod.lablrootpara, 'fracsubhdelt', r'f_{\\rho,\\rm{sub}}')\n setattr(gmod.lablrootpara, 'fracsubhintgbein', r'$f_{\\rm{sub,E}}$')\n setattr(gmod.lablrootpara, 'fracsubhdeltbein', r'$f_{\\rho,\\rm{sub,E}}$')\n for e in gmod.indxsersfgrd:\n setattr(gmod.lablrootpara, 'masshostisf%dbein' % e, r'$M_{\\rm{hst,%d,C}}$' % e)\n setattr(gmod.lablrootpara, 'masshostisf%dintg' % e, r'$M_{\\rm{hst,%d<}}$' % e)\n setattr(gmod.lablrootpara, 'masshostisf%ddelt' % e, r'$M_{\\rm{hst,%d}}$' % e)\n setattr(gmod.lablrootpara, 'masshostisf%dintgbein' % e, r'$M_{\\rm{hst,E,%d<}}$' % e)\n setattr(gmod.lablrootpara, 'masshostisf%ddeltbein' % e, r'$M_{\\rm{hst,E,%d}}$' % e)\n for namevarb in ['fracsubh', 'masssubh']:\n for strgcalcmasssubh in gdat.liststrgcalcmasssubh:\n for nameeval in ['', 'bein']:\n setattr(gdat, 'scal' + namevarb + strgcalcmasssubh + nameeval, 'logt')\n for e in gmod.indxsersfgrd:\n setattr(gdat, 'scalmasshostisf%d' % e + 'bein', 'logt')\n for strgcalcmasssubh in gdat.liststrgcalcmasssubh:\n for nameeval in ['', 'bein']:\n setattr(gdat, 'scalmasshostisf%d' % e + strgcalcmasssubh + nameeval, 'logt')\n \n # scalar variable setup\n gdat.lablhistcntplowrdfncsubten00evt0 = 'N_{pix,l}'\n gdat.lablhistcntphigrdfncsubten00evt0 = 'N_{pix,h}'\n gdat.lablhistcntplowrdfncen00evt0 = 'N_{pix,l}'\n gdat.lablhistcntphigrdfncen00evt0 = 'N_{pix,h}'\n \n gdat.lablbooldfncsubt = 'H'\n \n gdat.lablpriofactdoff = r'$\\alpha_{p}$'\n gmod.scalpriofactdoff = 'self'\n\n gdat.minmreds = 0.\n gdat.maxmreds = 1.5\n \n gdat.minmmagt = 19.\n gdat.maxmmagt = 28.\n\n gmod.scalpara.numbelem = 'logt'\n gmod.scalpara.lliktotl = 'logt'\n\n gdat.lablener = 'E'\n #gdat.lablenertotl = '$%s$ [%s]' % (gdat.lablener, gdat.strgenerunit)\n \n # width of the Gaussian clusters\n gdat.lablgwdt = r'\\sigma_G'\n \n gdat.lablgang = r'\\theta'\n gdat.lablaang = r'\\phi'\n gdat.labllgalunit = gdat.lablgangunit\n gdat.lablbgalunit = gdat.lablgangunit\n \n gdat.lablanglfromhost = r'\\theta_{\\rm{0,hst}}'\n gdat.lablanglfromhostunit = gdat.lablgangunit\n\n gdat.labldefs = r'\\alpha_s'\n gdat.lablflux = 'f'\n gdat.lablnobj = 'p'\n \n gdat.lablelin = r'\\mathcal{E}'\n \n gdat.lablsbrt = r'\\Sigma'\n \n gdat.labldeflprof = r'\\alpha_a'\n gdat.labldeflprofunit = u'$^{\\prime\\prime}$'\n \n gdat.strgenerkevv = 'keV'\n gdat.strgenergevv = 'GeV'\n gdat.strgenerergs = 'erg'\n gdat.strgenerimum = '\\mu m^{-1}'\n\n gdat.labldefsunit = u'$^{\\prime\\prime}$'\n gdat.lablprat = 'cm$^{-2}$ s$^{-1}$'\n \n\n ### labels for derived fixed dimensional parameters\n if gdat.boolbinsener:\n for i in gdat.indxener:\n setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubten%02d' % i, 'f_{D/ST,%d}' % i)\n else:\n gmod.lablrootpara.fracsdenmeandarkdfncsubt = 'f_{D/ST}'\n setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubt', 'f_{D/ST}')\n \n ### labels for background units\n if gdat.typeexpr == 'ferm':\n for nameenerscaltype in ['en00', 'en01', 'en02', 'en03']:\n \n for labltemptemp in ['flux', 'sbrt']:\n\n # define the label\n if nameenerscaltype == 'en00':\n strgenerscal = '%s' % labltemp\n if nameenerscaltype == 'en01':\n strgenerscal = 'E%s' % labltemp\n if nameenerscaltype == 'en02':\n strgenerscal = 'E^2%s' % labltemp\n if nameenerscaltype == 'en03':\n strgenerscal = '%s' % labltemp\n labl = '%s' % strgenerscal\n\n for nameenerunit in ['gevv', 'ergs', 'kevv', 'imum']:\n \n strgenerunit = getattr(gdat, 'strgener' + nameenerunit)\n\n if nameenerscaltype == 'en00':\n strgenerscalunit = '%s$^{-1}$' % strgenerunit\n if nameenerscaltype == 'en01':\n strgenerscalunit = '' \n if nameenerscaltype == 'en02':\n strgenerscalunit = '%s' % strgenerunit\n if nameenerscaltype == 'en03':\n strgenerscalunit = '%s' % strgenerunit\n \n # define the label unit\n for namesoldunit in ['ster', 'degr']:\n if labltemptemp == 'flux':\n lablunit = '%s %s' % (strgenerscalunit, gdat.lablprat)\n setattr(gmod.lablunitpara, 'lablflux' + nameenerscaltype + nameenerunit + 'unit', lablunit)\n else:\n if namesoldunit == 'ster':\n lablunit = '%s %s sr$^{-1}$' % (strgenerscalunit, gdat.lablprat)\n if namesoldunit == 'degr':\n lablunit = '%s %s deg$^{-2}$' % (strgenerscalunit, gdat.lablprat)\n setattr(gmod.lablunitpara, 'sbrt' + nameenerscaltype + nameenerunit + namesoldunit + 'unit', lablunit)\n\n if gdat.boolbinsener:\n gdat.lablfluxunit = getattr(gmod.lablunitpara, 'fluxen00' + gdat.nameenerunit + 'unit')\n gdat.lablsbrtunit = getattr(gmod.lablunitpara, 'sbrten00' + gdat.nameenerunit + 'sterunit')\n\n gdat.lablexpo = r'$\\epsilon$'\n gdat.lablexpounit = 'cm$^2$ s'\n \n gdat.lablprvl = '$p$'\n \n gdat.lablreds = 'z'\n gdat.lablmagt = 'm_R'\n \n gdat.lablper0 = 'P_0'\n gmod.scalper0plot = 'logt'\n \n gdat.labldglc = 'd_{gc}'\n gmod.scaldglcplot = 'logt'\n \n gdat.labldlos = 'd_{los}'\n gmod.scaldlosplot = 'logt'\n if gdat.typeexpr == 'ferm':\n gdat.labldlosunit = 'kpc'\n gdat.labllumi = r'L_{\\gamma}'\n if gdat.typeexpr == 'chan':\n gdat.labldlosunit = 'Mpc'\n gdat.labllumi = r'L_{X}'\n gdat.labllum0 = r'L_{X, 0}'\n \n gdat.lablgeff = r'\\eta_{\\gamma}'\n gmod.scalgeffplot = 'logt'\n \n gmod.scallumiplot = 'logt'\n gdat.labllumiunit = 'erg s$^{-1}$'\n gdat.labllum0unit = 'erg s$^{-1}$'\n \n gdat.lablthet = r'\\theta_{gc}'\n gmod.scalthetplot = 'self'\n \n gdat.lablphii = r'\\phi_{gc}'\n gmod.scalphiiplot = 'self'\n \n setattr(gmod.lablrootpara, 'magf', 'B')\n setattr(gdat, 'scalmagfplot', 'logt')\n \n setattr(gmod.lablrootpara, 'per1', 'P_1')\n if gdat.typedata == 'inpt':\n gdat.minmpara.per0 = 1e-3\n gdat.maxmpara.per0 = 1e1\n gdat.minmpara.per1 = 1e-20\n gdat.maxmpara.per1 = 1e-10\n gdat.minmpara.per1 = 1e-20\n gdat.maxmpara.per1 = 1e-10\n gdat.minmpara.flux0400 = 1e-1\n gdat.maxmpara.flux0400 = 1e4\n setattr(gdat, 'scalper1plot', 'logt')\n setattr(gmod.lablrootpara, 'flux0400', 'S_{400}')\n setattr(gdat, 'scalflux0400plot', 'logt')\n \n for q in gdat.indxrefr:\n setattr(gmod.lablrootpara, 'aerr' + gdat.listnamerefr[q], '\\Delta_{%d}' % q)\n gdat.lablsigm = '\\sigma_l'\n gdat.lablgamm = '\\gamma_l'\n\n gdat.lablbcom = '\\eta'\n \n gdat.lablinfopost = 'D_{KL}'\n gdat.lablinfopostunit = 'nat'\n gdat.lablinfoprio = 'D_{KL,pr}'\n gdat.lablinfopriounit = 'nat'\n \n gdat.labllevipost = '\\ln P(D)'\n gdat.labllevipostunit = 'nat'\n gdat.lablleviprio = '\\ln P_{pr}(D)'\n gdat.labllevipriounit = 'nat'\n \n gdat.lablsind = 's'\n if gdat.boolbinsener:\n for i in gdat.indxenerinde:\n setattr(gmod.lablrootpara, 'sindcolr%04d' % i, 's_%d' % i)\n\n gdat.lablexpcunit = gdat.strgenerunit\n \n gdat.labllliktotl = r'\\ln P(D|M)'\n \n gdat.labllpripena = r'\\ln P(N)'\n \n gdat.lablasca = r'\\theta_s'\n gdat.lablascaunit = gdat.lablgangunit\n gdat.lablacut = r'\\theta_c'\n gdat.lablacutunit = gdat.lablgangunit\n \n gdat.lablmcut = r'M_{c,n}'\n gdat.lablmcutunit = r'$M_{\\odot}$'\n \n gdat.lablmcutcorr = r'\\bar{M}_{c,n}'\n gdat.lablmcutcorrunit = r'$M_{\\odot}$'\n \n gdat.lablspec = gdat.lablflux\n gdat.lablspecunit = gdat.lablfluxunit\n gdat.lablspecplot = gdat.lablflux\n gdat.lablspecplotunit = gdat.lablfluxunit\n gdat.lablcnts = 'C'\n gdat.labldeltllik = r'\\Delta_n \\ln P(D|M)'\n gdat.labldiss = r'\\theta_{sa}'\n gdat.labldissunit = gdat.lablgangunit\n \n gdat.lablrele = r'\\langle|\\vec{\\alpha}_n \\cdot \\vec{\\nabla} k_l| \\rangle'\n \n gdat.lablrelc = r'\\langle\\vec{\\alpha}_n \\cdot \\vec{\\nabla} k_l \\rangle'\n \n gdat.lablreld = r'\\langle|\\vec{\\alpha}_n \\cdot \\vec{\\nabla} k_d| \\rangle'\n \n gdat.lablreln = r'\\langle \\Delta \\theta_{pix} |\\hat{\\alpha}_n \\cdot \\vec{\\nabla} k_l| / \\alpha_{s,n} \\rangle'\n \n gdat.lablrelm = r'\\langle |\\vec{\\nabla}_{\\hat{\\alpha}} k_l| / \\alpha_{s,n} \\rangle'\n gdat.lablrelk = r'\\langle |\\vec{\\nabla}_{\\hat{\\alpha}} k_l| / \\alpha_{s,n} \\rangle'\n gdat.lablrelf = r'\\langle |\\vec{\\nabla}_{\\hat{\\alpha}} k_l| / \\alpha_{s,n} \\rangle / k_m'\n \n for q in gdat.indxrefr:\n for l in gmod.indxpopl:\n setp_varb(gdat, 'fdispop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$F_{%d%d}$' % (l, q))\n setp_varb(gdat, 'cmplpop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$C_{%d%d}$' % (l, q))\n \n if gdat.typeexpr == 'chan':\n if gdat.anlytype == 'spec':\n gdat.minmspec = 1e-2\n gdat.maxmspec = 1e1\n else:\n gdat.minmspec = 1e-11\n gdat.maxmspec = 1e-7\n else:\n gdat.minmspec = 1e-11\n gdat.maxmspec = 1e-7\n \n if gdat.typeexpr == 'ferm':\n gdat.minmlumi = 1e32\n gdat.maxmlumi = 1e36\n elif gdat.typeexpr == 'chan':\n if gdat.typedata == 'inpt':\n gdat.minmlum0 = 1e42\n gdat.maxmlum0 = 1e46\n gdat.minmlumi = 1e41\n gdat.maxmlumi = 1e45\n \n try:\n gdat.minmdlos\n except:\n if gdat.typeexpr == 'chan':\n gdat.minmdlos = 1e7\n gdat.maxmdlos = 1e9\n else:\n gdat.minmdlos = 6e3\n gdat.maxmdlos = 1.1e4\n \n if gdat.typeexpr == 'ferm':\n gdat.minmcnts = 1e1\n gdat.maxmcnts = 1e5\n if gdat.typeexpr == 'chan':\n if gdat.numbpixlfull == 1:\n gdat.minmcnts = 1e4\n gdat.maxmcnts = 1e8\n else:\n gdat.minmcnts = 1.\n gdat.maxmcnts = 1e3\n if gdat.typeexpr == 'hubb':\n gdat.minmcnts = 1.\n gdat.maxmcnts = 1e3\n if gdat.typeexpr == 'fire':\n gdat.minmcnts = 1.\n gdat.maxmcnts = 1e3\n\n gdat.minmspecplot = gdat.minmspec\n gdat.maxmspecplot = gdat.maxmspec\n \n gdat.minmdeltllik = 1.\n gdat.maxmdeltllik = 1e3\n gdat.minmdiss = 0.\n gdat.maxmdiss = gdat.maxmgangdata * np.sqrt(2.)\n \n gdat.minmrele = 1e-3\n gdat.maxmrele = 1e1\n\n gdat.minmreln = 1e-3\n gdat.maxmreln = 1.\n\n gdat.minmrelk = 1e-3\n gdat.maxmrelk = 1.\n\n gdat.minmrelf = 1e-5\n gdat.maxmrelf = 1e-1\n\n gdat.minmrelm = 1e-3\n gdat.maxmrelm = 1e1\n\n gdat.minmreld = 1e-3\n gdat.maxmreld = 1e1\n\n gdat.minmrelc = 1e-3\n gdat.maxmrelc = 1.\n\n gdat.minmmcut = 3e7\n gdat.maxmmcut = 2e9\n gdat.minmmcutcorr = gdat.minmmcut\n gdat.maxmmcutcorr = gdat.maxmmcut\n\n if gdat.boolbinsspat:\n gdat.minmbein = 0.\n gdat.maxmbein = 1. / gdat.anglfact\n \n # scalar variables\n if gdat.boolbinsspat:\n gdat.minmdeflprof = 1e-3 / gdat.anglfact\n gdat.maxmdeflprof = 0.1 / gdat.anglfact\n \n #gdat.minmfracsubh = 0.\n #gdat.maxmfracsubh = 0.3\n #gmod.scalfracsubh = 'self'\n\n #gdat.minmmasshost = 1e10\n #gdat.maxmmasshost = 1e13\n #gmod.scalmasshost = 'self'\n #\n #gdat.minmmasssubh = 1e8\n #gdat.maxmmasssubh = 1e10\n #gmod.scalmasssubh = 'self'\n\n # collect groups of parameter indices into lists\n ## labels and scales for base parameters\n gmod.nameparagenrbase = []\n for name, k in gmod.indxpara.__dict__.items():\n if not np.isscalar(k):\n print('name')\n print(name)\n print('temp: no nonscalar should be here!')\n continue\n gmod.nameparagenrbase.append(name)\n gmod.numbparagenrbase = len(gmod.nameparagenrbase)\n gmod.indxparagenrbase = np.arange(gmod.numbparagenrbase)\n gmod.indxparagenrbasestdv = gmod.indxparagenrbase[gmod.numbpopl:]\n ## list of scalar variable names\n gmod.namepara.scal = list(gmod.nameparagenrbase) \n gmod.namepara.scal += ['lliktotl']\n\n # derived parameters\n print('Determining the list of derived, fixed-dimensional parameter names...')\n gmod.namepara.genrelemextd = [[[] for g in gmod.indxparagenrelemsing[l]] for l in gmod.indxpopl]\n gmod.namepara.derielemextd = [[[] for k in gmod.indxparaderielemsing[l]] for l in gmod.indxpopl]\n gmod.namepara.genrelemflat = []\n gmod.namepara.derielemflat = []\n gmod.namepara.genrelemextdflat = []\n gmod.namepara.derielemextdflat = []\n for l in gmod.indxpopl:\n for g in gmod.indxparagenrelemsing[l]:\n gmod.namepara.genrelemflat.append(gmod.namepara.genrelem[l][g] + 'pop%d' % l)\n for d in range(gmod.maxmpara.numbelem[l]):\n gmod.namepara.genrelemextd[l][g].append(gmod.namepara.genrelem[l][g] + 'pop%d' % l + '%04d' % d)\n gmod.namepara.genrelemextdflat.append(gmod.namepara.genrelemextd[l][g][d])\n for k in gmod.indxparaderielemsing[l]: \n gmod.namepara.derielemflat.append(gmod.namepara.derielem[l][k] + 'pop%d' % l)\n for d in range(gmod.maxmpara.numbelem[l]):\n gmod.namepara.derielemextd[l][k].append(gmod.namepara.derielem[l][k] + 'pop%d' % l + '%04d' % d)\n gmod.namepara.derielemextdflat.append(gmod.namepara.derielemextd[l][k][d])\n\n # list of element parameter names (derived and generative), counting label-degenerate element parameters only once \n gmod.namepara.elem = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n gmod.namepara.elem[l].extend(gmod.namepara.genrelem[l])\n gmod.namepara.elem[l].extend(gmod.namepara.derielem[l])\n \n gmod.namepara.elemflat = []\n for l in gmod.indxpopl:\n gmod.namepara.elemflat.extend(gmod.namepara.elem[l])\n\n gmod.namepara.genrelemdefa = deepcopy(gmod.namepara.elemflat)\n if gmod.boolelemlghtanyy:\n for strgfeat in ['sind', 'curv', 'expc'] + ['sindcolr%04d' % i for i in gdat.indxenerinde]:\n if not strgfeat in gmod.namepara.genrelemdefa:\n gmod.namepara.genrelemdefa.append(strgfeat)\n\n # list of flattened generative element parameter names, counting label-degenerate element parameters only once\n gmod.namepara.genrelemkind = gmod.namepara.genrelemflat + gmod.namepara.derielemflat\n gmod.numbparagenrelemkind = len(gmod.namepara.genrelemkind)\n #gmod.inxparagenrscalelemkind = np.arange(gmod.numbparagenrelemkind)\n gmod.inxparagenrscalelemkind = tdpy.gdatstrt()\n \n gmod.numbparagenrelemextdflat = len(gmod.namepara.genrelemextdflat)\n gmod.indxparagenrelemextdflat = np.arange(gmod.numbparagenrelemextdflat)\n\n # list of parameter names (derived and generative), counting label-degenerate element parameters only once, element lists flattened\n gmod.namepara.kind = gmod.nameparagenrbase + gmod.listnameparaderitotl + gmod.namepara.genrelemflat + gmod.namepara.derielemflat\n \n gmod.numbparakind = len(gmod.namepara.kind)\n gmod.indxparakind = np.arange(gmod.numbparakind)\n\n # list of generative parameter names, separately including all label-degenerate element parameters, element lists flattened\n gmod.namepara.genrscalfull = gmod.nameparagenrbase + gmod.namepara.genrelemextdflat\n gmod.namepara.genrscalfull = np.array(gmod.namepara.genrscalfull)\n gmod.numbparagenrfull = len(gmod.namepara.genrscalfull)\n gmod.indxparagenrfull = np.arange(gmod.numbparagenrfull)\n\n # list of generative parameter names, counting label-degenerate element parameters only once, element lists flattened\n gmod.listnameparagenrscal = gmod.nameparagenrbase + gmod.namepara.genrelemflat\n gmod.numbparagenr = len(gmod.listnameparagenrscal)\n gmod.indxparagenr = np.arange(gmod.numbparagenr)\n\n # list of parameter names (derived and generative), element lists flattened\n gmod.listnameparatotl = gmod.nameparagenrbase + gmod.listnameparaderitotl + \\\n gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat\n \n gmod.nameparagenrbase = np.array(gmod.nameparagenrbase)\n\n for e in gmod.indxsersfgrd:\n gmod.namepara.scal += ['masshost' + strgsersfgrd + 'bein']\n for strgcalcmasssubh in gdat.liststrgcalcmasssubh:\n gmod.namepara.scal += ['masshost' + strgsersfgrd + strgcalcmasssubh + 'bein']\n if gmod.numbparaelem > 0:\n if gmod.boollenssubh:\n for strgcalcmasssubh in gdat.liststrgcalcmasssubh:\n gmod.namepara.scal += ['masssubh' + strgcalcmasssubh + 'bein', 'fracsubh' + strgcalcmasssubh + 'bein'] \n if gmod.numbparaelem > 0:\n gmod.namepara.scal += ['lpripena']\n if False and gmod.boolelemsbrtdfncanyy:\n for strgbins in ['lowr', 'higr']:\n gmod.namepara.scal += ['histcntp%sdfncen00evt0' % strgbins]\n gmod.namepara.scal += ['histcntp%sdfncsubten00evt0' % strgbins]\n for i in gdat.indxener:\n gmod.namepara.scal += ['fracsdenmeandarkdfncsubten%02d' % i]\n gmod.namepara.scal += ['booldfncsubt']\n if gmod.numbparaelem > 0:\n for q in gdat.indxrefr:\n if gdat.boolasscrefr[q]:\n for l in gmod.indxpopl:\n gmod.namepara.scal += ['cmplpop%dpop%d' % (l, q)]\n gmod.namepara.scal += ['fdispop%dpop%d' % (q, l)]\n \n gmod.numbvarbscal = len(gmod.namepara.scal)\n gmod.indxvarbscal = np.arange(gmod.numbvarbscal)\n \n # determine total label\n gmod.listnameparaglob = gmod.namepara.kind + gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat\n gmod.listnameparaglob += ['cntpmodl']\n for l in gmod.indxpopl:\n for g in gmod.indxparagenrelemsing[l]:\n if not gmod.namepara.genrelem[l][g] in gmod.listnameparaglob:\n gmod.listnameparaglob.append(gmod.namepara.genrelem[l][g])\n gmod.listnameparaglob.append(gmod.namepara.derielem[l][g])\n\n for name in gmod.listnameparaglob:\n lablroot = getattr(gmod.lablrootpara, name)\n lablunit = getattr(gmod.lablunitpara, name)\n labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)\n setattr(gmod.labltotlpara, name, labltotl)\n \n # define fact\n for l in gmod.indxpopl:\n for k in gmod.indxparakind:\n name = gmod.namepara.kind[k]\n scal = getattr(gmod.scalpara, name)\n if scal == 'self' or scal == 'logt':\n minm = getattr(gmod.minmpara, name)\n maxm = getattr(gmod.maxmpara, name)\n if scal == 'self':\n fact = maxm - minm\n if scal == 'logt':\n fact = np.log(maxm / minm)\n \n if fact == 0:\n print('name')\n print(name)\n raise Exception('')\n setattr(gmod.factpara, name, fact)\n\n if gmod.numbparaelem > 0:\n gmod.indxparagenrfulleleminit = gmod.indxparagenrbase[-1] + 1\n else:\n gmod.indxparagenrfulleleminit = -1\n \n\n ## arrays of parameter features (e.g., minm, maxm, labl, scal, etc.)\n for featpara in gdat.listfeatparalist:\n \n gmodfeat = getattr(gmod, featpara + 'para')\n \n ### elements\n #for strgtypepara in gdat.liststrgtypepara:\n # listname = getattr(gmod.namepara, strgtypepara + 'elem')\n # listfeat = [[] for l in gmod.indxpopl]\n # listfeatflat = []\n\n # for l in gmod.indxpopl:\n # \n # numb = getattr(gmod, 'numbpara' + strgtypepara + 'elemsing')[l]\n # listfeat[l] = [[] for k in range(numb)]\n # for k in range(numb):\n # scal = getattr(gmod.scalpara, listname[l][k])\n # if featpara == 'fact' and not (scal == 'self' or scal == 'logt'):\n # continue\n # if featpara == 'mean' and (scal != 'gaus' and scal != 'lnor'):\n # continue\n # if featpara == 'stdv' and (scal != 'gaus' and scal != 'lnor'):\n # continue\n # \n # if strgtypepara == 'genr':\n # strgextn = 'pop%d' % l\n # else:\n # strgextn = ''\n # print('featpara')\n # print(featpara)\n # print('listname')\n # print(listname)\n # listfeat[l][k] = getattr(gmodfeat, listname[l][k] + strgextn)\n # listfeatflat.append(listfeat[l][k])\n # setattr(gmodfeat, strgtypepara + 'elem', listfeat)\n # setattr(gmodfeat, strgtypepara + 'elemflat', listfeatflat)\n \n ### groups of parameters inside the parameter vector\n ### 'base': all fixed-dimensional generative parameters\n ### 'full': all generative parameters\n for strggroppara in ['base', 'full']:\n indx = getattr(gmod, 'indxparagenr' + strggroppara)\n feat = [0. for k in indx]\n \n for attr, valu in gmod.indxpara.__dict__.items():\n \n if not np.isscalar(valu):\n continue\n\n scal = getattr(gmod.scalpara, attr)\n if not (scal == 'self' or scal == 'logt') and featpara == 'fact':\n continue\n\n if scal != 'gaus' and (featpara == 'mean' or featpara == 'stdv'):\n print('Mean or Std for non-Gaussian')\n continue\n \n if featpara == 'name':\n feat[valu] = attr\n else:\n feat[valu] = getattr(gmodfeat, attr)\n \n feat = np.array(feat)\n setattr(gmodfeat, 'genr' + strggroppara, feat)\n \n \n #print('gmod.minmpara')\n #for attr, varb in gmod.minmpara.__dict__.items():\n # print(attr, varb)\n #print('gmod.maxmpara')\n #for attr, varb in gmod.maxmpara.__dict__.items():\n # print(attr, varb)\n #print('gmod.scalpara')\n #for attr, varb in gmod.scalpara.__dict__.items():\n # print(attr, varb)\n #raise Exception('')\n\n ## population groups\n ### number of elements\n for strgvarb in ['numbelem', 'meanelem']:\n listindxpara = []\n if strgmodl == 'true':\n listpara = []\n for strg, valu in gmod.indxpara.__dict__.items():\n if strg.startswith(strgvarb + 'p'):\n listindxpara.append(valu)\n if strgmodl == 'true':\n listpara.append(getattr(gmod.this, strg))\n listindxpara = np.array(listindxpara)\n setattr(gmod.indxpara, strgvarb, listindxpara)\n if strgmodl == 'true':\n listpara = np.array(listpara)\n setattr(gmod, strgvarb, listpara)\n \n ### parameters of priors for element parameters\n gmod.indxpara.prioelem = []\n for strg, valu in gmod.indxpara.__dict__.items():\n if strg == 'dist' and np.isscalar(valu):\n gmod.indxpara.prioelem.append(valu)\n gmod.indxpara.prioelem = np.array(gmod.indxpara.prioelem) \n \n ### hyperparameters\n if gmod.typemodltran == 'pois':\n gmod.indxpara.hypr = np.array(list(gmod.indxpara.prioelem) + list(gmod.indxpara.meanelem))\n else:\n gmod.indxpara.hypr = gmod.indxpara.prioelem\n \n ## generative base parameter indices for each scaling\n gmod.listindxparagenrbasescal = dict()\n for scaltype in gdat.listscaltype:\n gmod.listindxparagenrbasescal[scaltype] = np.where(np.array(gmod.scalpara.genrbase) == scaltype)[0]\n\n if gdat.booldiagmode:\n if np.where(gmod.scalpara.genrfull == 0)[0].size > 0:\n raise Exception('')\n\n\ndef plot_lens(gdat):\n \n if gmod.boolelemdeflsubh:\n xdat = gdat.binspara.angl[1:] * gdat.anglfact\n lablxdat = gdat.labltotlpara.gang\n \n listdeflscal = np.array([4e-2, 4e-2, 4e-2]) / gdat.anglfact\n listanglscal = np.array([0.05, 0.1, 0.05]) / gdat.anglfact\n listanglcutf = np.array([1., 1., 10.]) / gdat.anglfact\n listasym = [False, False, False]\n listydat = []\n for deflscal, anglscal, anglcutf, asym in zip(listdeflscal, listanglscal, listanglcutf, listasym):\n listydat.append(retr_deflcutf(gdat.binspara.angl[1:], deflscal, anglscal, anglcutf, asym=asym) * gdat.anglfact)\n \n for scalxdat in ['self', 'logt']:\n path = gdat.pathinitintr + 'deflcutf' + scalxdat + '.pdf'\n tdpy.plot_gene(path, xdat, listydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, \\\n lablydat=r'$\\alpha_n$ [$^{\\prime\\prime}$]', limtydat=[1e-3, 1.5e-2], limtxdat=[None, 2.])\n \n # pixel-convoltuion of the Sersic profile\n # temp -- y axis labels are wrong, should be per solid angle\n xdat = gdat.binspara.lgalsers * gdat.anglfact\n for n in range(gdat.numbindxsers + 1):\n for k in range(gdat.numbhalfsers + 1):\n if k != 5:\n continue\n path = gdat.pathinitintr + 'sersprofconv%04d%04d.pdf' % (n, k)\n tdpy.plot_gene(path, xdat, gdat.sersprof[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])\n #path = gdat.pathinitintr + 'sersprofcntr%04d%04d.pdf' % (n, k)\n #tdpy.plot_gene(path, xdat, gdat.sersprofcntr[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])\n path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)\n tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], \\\n scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])\n path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)\n tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], scalxdat='logt', \\\n scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])\n \n xdat = gdat.binspara.angl * gdat.anglfact\n listspec = np.array([1e-19, 1e-18, 1e-18, 1e-18]) / gdat.anglfact\n listsize = np.array([0.3, 1., 1., 1.]) / gdat.anglfact\n listindx = np.array([4., 2., 4., 10.])\n listydat = []\n listlabl = []\n for spec, size, indx in zip(listspec, listsize, listindx):\n listydat.append(spec * retr_sbrtsersnorm(gdat.binspara.angl, size, indxsers=indx))\n listlabl.append('$R_e = %.3g ^{\\prime\\prime}, n = %.2g$' % (size * gdat.anglfact, indx))\n path = gdat.pathinitintr + 'sersprof.pdf'\n tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, \\\n listlegd=listlegd, listhlin=1e-7, limtydat=[1e-8, 1e0])\n \n minmredshost = 0.01\n maxmredshost = 0.4\n minmredssour = 0.01\n maxmredssour = 2.\n numbreds = 200\n retr_axis(gdat, 'redshost')\n retr_axis(gdat, 'redssour')\n \n gdat.meanpara.adishost = np.empty(numbreds)\n for k in range(numbreds):\n gdat.meanpara.adishost[k] = gdat.adisobjt(gdat.meanpara.redshost[k])\n \n asca = 0.1 / gdat.anglfact\n acut = 1. / gdat.anglfact\n \n minmmass = np.zeros((numbreds + 1, numbreds + 1))\n maxmmass = np.zeros((numbreds + 1, numbreds + 1))\n for k, redshost in enumerate(gdat.binspara.redshost):\n for n, redssour in enumerate(gdat.binspara.redssour):\n if redssour > redshost:\n adishost = gdat.adisobjt(redshost)\n adissour = gdat.adisobjt(redssour)\n adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost\n factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)\n minmmass[n, k] = np.log10(factmcutfromdefs * gdat.minmdefs)\n maxmmass[n, k] = np.log10(factmcutfromdefs * gdat.maxmdefs)\n \n #valulevl = np.linspace(7.5, 9., 5)\n valulevl = [7.0, 7.3, 7.7, 8., 8.6]\n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, minmmass, 10, colors='g', levels=valulevl)\n axis.clabel(cont, inline=1, fontsize=20, fmt='%.3g')\n axis.set_xlabel(r'$z_{\\rm{hst}}$')\n axis.set_ylabel(r'$z_{\\rm{src}}$')\n axis.set_title(r'$M_{c,min}$ [$M_{\\odot}$]')\n path = gdat.pathinitintr + 'massredsminm.pdf'\n plt.tight_layout()\n figr.savefig(path)\n plt.close(figr)\n \n valulevl = np.linspace(9., 11., 20)\n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n imag = axis.imshow(maxmmass, extent=[minmredshost, maxmredshost, minmredssour, maxmredssour], aspect='auto', vmin=9., vmax=11.)\n cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, maxmmass, 10, colors='g', levels=valulevl)\n axis.clabel(cont, inline=1, fontsize=15, fmt='%.3g')\n axis.set_xlabel('$z_{hst}$')\n axis.set_ylabel('$z_{src}$')\n axis.set_title(r'$M_{c,max}$ [$M_{\\odot}$]')\n path = gdat.pathinitintr + 'massredsmaxm.pdf'\n plt.colorbar(imag) \n plt.tight_layout()\n figr.savefig(path)\n plt.close(figr)\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * gdat.sizepixl * 1e-3)\n axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * 2. * gdat.maxmgangdata * 1e-3)\n axis.set_xlabel('$z_h$')\n axis.set_yscale('log')\n axis.set_ylabel(r'$\\lambda$ [kpc]')\n path = gdat.pathinitintr + 'wlenreds.pdf'\n plt.tight_layout()\n figr.savefig(path)\n plt.close(figr)\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n fracacutasca = np.logspace(-1., 2., 20)\n mcut = retr_mcutfrommscl(fracacutasca)\n axis.lognp.log(fracacutasca, mcut)\n axis.set_xlabel(r'$\\tau_n$')\n axis.set_ylabel(r'$M_{c,n} / M_{0,n}$')\n axis.axhline(1., ls='--')\n path = gdat.pathinitintr + 'mcut.pdf'\n plt.tight_layout()\n figr.savefig(path)\n plt.close(figr)\n \n\ndef retr_listrtagprev(strgcnfg, pathpcat):\n \n # list of PCAT run plot outputs\n pathimag = pathpcat + '/imag/'\n listrtag = fnmatch.filter(os.listdir(pathimag), '2*')\n \n listrtagprev = []\n for rtag in listrtag:\n strgstat = pathpcat + '/data/outp/' + rtag\n \n if chec_statfile(pathpcat, rtag, 'gdatmodipost', typeverb=0) and strgcnfg + '_' + rtag[16:].split('_')[-1] == rtag[16:]:\n listrtagprev.append(rtag) \n \n listrtagprev.sort()\n\n return listrtagprev\n\n\ndef make_legd(axis, offs=None, loca=1, numbcols=1, ptch=None, line=None):\n \n hand, labl = axis.get_legend_handles_labels()\n legd = axis.legend(hand, labl, fancybox=True, frameon=True, bbox_to_anchor=offs, bbox_transform=axis.transAxes, ncol=numbcols, loc=loca, labelspacing=1, handlelength=2)\n legd.get_frame().set_fill(True)\n legd.get_frame().set_facecolor('white')\n\n\ndef setp_namevarbsing(gdat, gmod, strgmodl, strgvarb, popl, ener, evtt, back, isfr, iele):\n \n if popl == 'full':\n indxpopltemp = gmod.indxpopl\n elif popl != 'none':\n indxpopltemp = [popl]\n \n if ener == 'full':\n indxenertemp = gdat.indxener\n elif ener != 'none':\n indxenertemp = [ener]\n \n if evtt == 'full':\n indxevtttemp = gdat.indxevtt\n elif evtt != 'none':\n indxevtttemp = [evtt]\n \n if back == 'full':\n gmod.indxbacktemp = gmod.indxback\n elif isinstance(back, int):\n gmod.indxbacktemp = np.array([back])\n \n liststrgvarb = []\n if iele != 'none':\n for l in gmod.indxpopl:\n if iele == 'full':\n listiele = np.arange(gmod.maxmpara.numbelem)\n else:\n listiele = [iele]\n for k in listiele:\n liststrgvarb.append(strgvarb + 'pop%d%04d' % (l, k))\n \n if popl != 'none' and ener == 'none' and evtt == 'none' and back == 'none' and iele == 'none':\n for l in indxpopltemp:\n liststrgvarb.append(strgvarb + 'pop%d' % l)\n \n if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr != 'none':\n for e in indxisfrtemp:\n liststrgvarb.append(strgvarb + 'isf%d' % e)\n \n if popl == 'none' and ener != 'none' and evtt != 'none' and back == 'none':\n for i in indxenertemp:\n for m in indxevtttemp:\n liststrgvarb.append(strgvarb + 'en%02devt%d' % (i, m))\n \n if popl == 'none' and ener != 'none' and evtt == 'none' and back != 'none':\n for c in gmod.indxbacktemp:\n for i in indxenertemp:\n liststrgvarb.append(strgvarb + 'back%04den%02d' % (c, i))\n \n if popl == 'none' and ener == 'none' and evtt == 'none' and back != 'none':\n for c in gmod.indxbacktemp:\n liststrgvarb.append(strgvarb + 'back%04d' % c)\n \n if popl == 'none' and ener != 'none' and evtt == 'none' and back == 'none':\n for i in indxenertemp:\n liststrgvarb.append(strgvarb + 'en%02d' % i)\n \n if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr == 'none':\n liststrgvarb.append(strgvarb)\n \n if gdat.booldiagmode:\n for strgvarb in liststrgvarb:\n if liststrgvarb.count(strgvarb) != 1:\n print('liststrgvarb')\n print(liststrgvarb)\n print('popl')\n print(popl)\n print('ener')\n print(ener)\n print('evtt')\n print(evtt)\n print('back')\n print(back)\n print('isfr')\n print(isfr)\n print('iele')\n print(iele)\n raise Exception('')\n \n return liststrgvarb\n\n\ndef setp_varb(gdat, strgvarbbase, valu=None, minm=None, maxm=None, scal='self', lablroot=None, lablunit='', mean=None, stdv=None, cmap=None, numbbins=10, \\\n popl='none', ener='none', evtt='none', back='none', isfr='none', iele='none', \\\n boolinvr=False, \\\n strgmodl=None, strgstat=None, \\\n ):\n '''\n Set up variable values across all models (true and fitting) as well as all populations, energy bins, \n event bins, background components, and Sersic components \n '''\n \n # determine the list of models\n if strgmodl is None:\n if gdat.typedata == 'mock':\n liststrgmodl = ['true', 'fitt', 'plot']\n else:\n liststrgmodl = ['fitt', 'plot']\n else:\n if strgmodl == 'true' or strgmodl == 'plot' or strgmodl == 'refr':\n liststrgmodl = [strgmodl]\n else:\n liststrgmodl = ['fitt', 'plot']\n print('liststrgmodl')\n print(liststrgmodl)\n for strgmodl in liststrgmodl:\n \n if strgmodl == 'plot':\n gmod = gdat.fitt\n gmodoutp = gdat\n else:\n gmod = getattr(gdat, strgmodl)\n gmodoutp = gmod\n\n # get the list of names of the variable\n liststrgvarbnone = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, 'none')\n \n if iele != 'none':\n liststrgvarb = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, iele)\n else:\n liststrgvarb = liststrgvarbnone\n\n # set the values of each variable in the list\n for strgvarb in liststrgvarb:\n if minm is not None:\n setp_varbcore(gdat, strgmodl, gmodoutp.minmpara, strgvarb, minm)\n \n if maxm is not None:\n setp_varbcore(gdat, strgmodl, gmodoutp.maxmpara, strgvarb, maxm)\n \n if mean is not None:\n setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, mean)\n \n if stdv is not None:\n setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, stdv)\n \n if valu is not None:\n if strgstat is None:\n print('strgvarb')\n print(strgvarb)\n print('strgmodl')\n print(strgmodl)\n print('valu')\n print(valu)\n print('')\n setp_varbcore(gdat, strgmodl, gmodoutp, strgvarb, valu)\n elif strgstat == 'this':\n setp_varbcore(gdat, strgmodl, gmodoutp.this, strgvarb, valu)\n \n if scal is not None:\n setp_varbcore(gdat, strgmodl, gmodoutp.scalpara, strgvarb, scal)\n\n if lablroot is not None:\n setp_varbcore(gdat, strgmodl, gmodoutp.lablrootpara, strgvarb, lablroot)\n\n if lablunit is not None:\n setp_varbcore(gdat, strgmodl, gmodoutp.lablunitpara, strgvarb, lablunit)\n\n if cmap is not None:\n setp_varbcore(gdat, strgmodl, gmodoutp.cmappara, strgvarb, cmap)\n\n setp_varbcore(gdat, strgmodl, gmodoutp.numbbinspara, strgvarb, numbbins)\n \n # create limt, bins, mean, and delt\n if minm is not None and maxm is not None or mean is not None and stdv is not None:\n # determine minima and maxima for Gaussian or log-Gaussian distributed parameters\n if mean is not None:\n minm = mean - gdat.numbstdvgaus * stdv\n maxm = mean + gdat.numbstdvgaus * stdv\n \n # uniformly-distributed\n if scal == 'self' or scal == 'pois' or scal == 'gaus':\n binsunif = np.linspace(minm, maxm, numbbins + 1)\n if scal == 'logt' or scal == 'powr':\n binsunif = np.linspace(np.log10(minm), np.log10(maxm), numbbins + 1)\n if gdat.booldiagmode:\n if minm <= 0.:\n raise Exception('')\n if scal == 'asnh':\n binsunif = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numbbins + 1)\n \n if boolinvr:\n binsunif = binsunif[::-1]\n \n meanparaunif = (binsunif[1:] + binsunif[:-1]) / 2.\n \n if scal == 'self' or scal == 'pois' or scal == 'gaus':\n meanpara = meanparaunif\n bins = binsunif\n minmunif = minm\n maxmunif = maxm\n if scal == 'logt' or scal == 'powr':\n meanpara = 10**meanparaunif\n bins = 10**binsunif\n minmunif = np.log10(minm)\n maxmunif = np.log10(maxm)\n if scal == 'asnh':\n meanpara = np.sinh(meanparaunif)\n bins = np.sinh(binsunif)\n minmunif = np.arcsinh(minm)\n maxmunif = np.arcsinh(maxm)\n\n delt = np.diff(bins) \n limt = np.array([minm, maxm]) \n \n # 'self' is not yet defined\n if scal == 'asnh' or scal == 'logt' or scal == 'powr':\n listvalutickmajr, listlabltickmajr, listvalutickminr, listlabltickminr = tdpy.retr_valulabltick(minm, maxm, scal)\n setattr(gmodoutp.labltickmajrpara, strgvarb, listlabltickmajr)\n setattr(gmodoutp.valutickmajrpara, strgvarb, listvalutickmajr)\n setattr(gmodoutp.labltickminrpara, strgvarb, listlabltickminr)\n setattr(gmodoutp.valutickminrpara, strgvarb, listvalutickminr)\n \n #labltick = np.empty(gdat.numbtickcbar, dtype=object)\n #for k in range(gdat.numbtickcbar):\n # if scal == 'asnh':\n # valutick[k] = np.sinh(tickunif[k])\n # if scal == 'logt' or scal == 'powr':\n # valutick[k] = 10**(tickunif[k])\n\n # # avoid very small, but nonzero central values in the residual count color maps\n # if strgcbar == 'cntpresi' and np.fabs(valutick[k]) < 1e-5:\n # valutick[k] = 0.\n\n # if strgcbar == 'cntpdata' and np.amax(valutick) > 1e3:\n # labltick[k] = '%d' % valutick[k]\n # else:\n # labltick[k] = '%.3g' % valutick[k]\n\n setattr(gmodoutp.limtpara, strgvarb, limt)\n setattr(gmodoutp.binspara, strgvarb, bins)\n setattr(gmodoutp.meanpara, strgvarb, meanpara)\n setattr(gmodoutp.deltpara, strgvarb, delt)\n \n\ndef retr_ticklabltemp(gdat, strgcbar):\n \n minm = getattr(gdat.minmpara, strgcbar)\n maxm = getattr(gdat.maxmpara, strgcbar)\n scal = getattr(gdat.scalpara, strgcbar)\n numb = gdat.numbtickcbar - 1\n retr_axis(gdat, strgcbar, numb=numb)\n\n minmscal = minm\n if scal == 'asnh':\n minmscal = np.arcsinh(minmscal)\n if scal == 'logt':\n minmscal = np.log10(minmscal)\n maxmscal = maxm\n if scal == 'asnh':\n maxmscal = np.arcsinh(maxmscal)\n if scal == 'logt':\n maxmscal = np.log10(maxmscal)\n\n tickscal = np.linspace(minmscal, maxmscal, gdat.numbtickcbar)\n labl = np.empty(gdat.numbtickcbar, dtype=object)\n tick = np.copy(tickscal)\n for k in range(gdat.numbtickcbar):\n if scal == 'asnh':\n tick[k] = np.sinh(tickscal[k])\n elif scal == 'logt':\n tick[k] = 10**(tickscal[k])\n\n # avoid very small, but nonzero central values in the residual count color maps\n if strgcbar == 'cntpresi' and np.fabs(tick[k]) < 1e-5:\n tick[k] = 0.\n\n if strgcbar == 'cntpdata' and np.amax(tick) > 1e3:\n labl[k] = '%d' % tick[k]\n else:\n labl[k] = '%.3g' % tick[k]\n setattr(gdat.tickpara, strgcbar, tick)\n\n\ndef retr_axistemp(gdat, strgvarb, strgmodl=None, boolinvr=False):\n \n if strgmodl is None:\n listgdattemp = [gdat]\n for strgmodl in gdat.liststrgmodl:\n listgdattemp.append(getattr(gdat, strgmodl))\n elif strgmodl == 'fitt' or strgmodl == 'true':\n listgdattemp = [getattr(gdat, strgmodl)]\n elif strgmodl == 'allm':\n listgdattemp = []\n for strgmodl in gdat.liststrgmodl:\n listgdattemp = getattr(gdat, strgmodl)\n \n for gdattemp in listgdattemp:\n minm = getattr(gdattemp.minmpara, strgvarb)\n maxm = getattr(gdattemp.maxmpara, strgvarb)\n numb = getattr(gdattemp.numbbinspara, strgvarb)\n scal = getattr(gdattemp.scalpara, strgvarb)\n\n if scal == 'self' or scal == 'pois' or scal == 'gaus':\n binsscal = np.linspace(minm, maxm, numb + 1)\n if scal == 'logt':\n print('minm')\n print(minm)\n print('maxm')\n print(maxm)\n print('strgvarb')\n print(strgvarb)\n binsscal = np.linspace(np.log10(minm), np.log10(maxm), numb + 1)\n print('')\n if gdat.booldiagmode:\n if minm <= 0.:\n raise Exception('')\n\n if scal == 'asnh':\n binsscal = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numb + 1)\n \n if boolinvr:\n binsscal = binsscal[::-1]\n \n meanvarbscal = (binsscal[1:] + binsscal[:-1]) / 2.\n \n if scal == 'self' or scal == 'pois' or scal == 'gaus':\n meanvarb = meanvarbscal\n bins = binsscal\n if scal == 'logt':\n meanvarb = 10**meanvarbscal\n bins = 10**binsscal\n if scal == 'asnh':\n meanvarb = np.sinh(meanvarbscal)\n bins = np.sinh(binsscal)\n\n delt = np.diff(bins) \n limt = np.array([np.amin(bins), np.amax(bins)]) \n \n setattr(gdattemp.limtpara, strgvarb, limt)\n setattr(gdattemp.binspara, strgvarb, bins)\n setattr(gdattemp.meanpara, strgvarb, meanvarb)\n setattr(gdattemp.deltpara, strgvarb, delt)\n\n\ndef setp_varbcore(gdat, strgmodl, gdattemp, strgvarbtemp, valu):\n \n # check if the variable is defined by the user\n try:\n valutemp = getattr(gdattemp, strgvarbtemp)\n if valutemp is None:\n raise\n if gdat.typeverb > 0:\n print('Received custom value for %s, %s: %s' % (strgvarbtemp, strgmodl, valutemp))\n # if not defined or defined as None, define it\n except:\n \n setattr(gdattemp, strgvarbtemp, valu)\n\n\ndef intp_sinc(gdat, lgal, bgal):\n\n intpsinc = 4. * gdat.numbsidepsfn**2 * np.sum(gdat.temppsfn * sinc(gdat.numbsidepsfn * (gdat.gridpsfnlgal + lgal) - gdat.gridpsfnlgal) * \\\n sinc(gdat.numbsidepsfn * (gdat.gridpsfnbgal + bgal) - gdat.gridpsfnbgal))\n\n return intpsinc\n\n\ndef retr_fluxbrgt(gdat, lgal, bgal, flux):\n\n if lgal.size == 0:\n fluxbrgt = np.array([0.])\n fluxbrgtassc = np.array([0.])\n else:\n indxbrgt = np.argmax(flux)\n fluxbrgt = flux[indxbrgt]\n\n return fluxbrgt, fluxbrgtassc\n\n\ndef init_figr(gdat, gdatmodi, strgpdfn, strgplot, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot):\n \n figrsize = (gdat.sizeimag, gdat.sizeimag)\n figr, axis = plt.subplots(figsize=figrsize)\n \n nameplot = strgplot\n\n if gdat.numbener > 1:\n nameplot += 'en%02d' % gdat.indxenerincl[indxenerplot]\n \n if gdat.numbener > 1:\n if indxevttplot == -1:\n nameplot += 'evtA'\n else:\n nameplot += 'evt%d' % gdat.indxevttincl[indxevttplot]\n \n if gdat.fitt.numbpopl > 1:\n if indxpoplplot == -1:\n nameplot += 'popA'\n else:\n nameplot += 'pop%d' % indxpoplplot\n\n path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, nameplot)\n \n print('gdat.fitt.labltotlpara.lgalpop0')\n print(gdat.fitt.labltotlpara.lgalpop0)\n print('gdat.fitt.labltotlpara.bgalpop0')\n print(gdat.fitt.labltotlpara.bgalpop0)\n axis.set_xlabel(gdat.fitt.labltotlpara.lgalpop0)\n axis.set_ylabel(gdat.fitt.labltotlpara.bgalpop0)\n titl = ''\n if indxenerplot is not None and gdat.numbener > 1 and strgplot.endswith('cnts'):\n titl = gdat.strgener[indxenerplot]\n if indxevttplot is not None and gdat.numbevtt > 1 and strgplot.endswith('cnts'):\n titl += ' ' + gdat.strgevtt[indxevttplot]\n axis.set_title(titl)\n\n return figr, axis, path\n\n\ndef draw_frambndr(gdat, axis):\n \n outr = max(gdat.frambndrmodl, gdat.frambndrdata)\n axis.set_xlim([-outr, outr])\n axis.set_ylim([-outr, outr])\n innr = min(gdat.frambndrmodl, gdat.frambndrdata)\n axis.axvline(innr, ls='--', alpha=gdat.alphbndr, color='black')\n axis.axvline(-innr, ls='--', alpha=gdat.alphbndr, color='black')\n axis.axhline(innr, ls='--', alpha=gdat.alphbndr, color='black')\n axis.axhline(-innr, ls='--', alpha=gdat.alphbndr, color='black')\n\n\ndef retr_imag(gdat, axis, maps, strgstat, strgmodl, strgcbar, indxenerplot=None, indxevttplot=-1, booltdim=False, imag=None):\n \n draw_frambndr(gdat, axis)\n \n # take the relevant energy and PSF bins\n if indxenerplot is not None:\n if indxevttplot == -1:\n maps = np.sum(maps[indxenerplot, ...], axis=1)\n else:\n maps = maps[indxenerplot, :, indxevttplot]\n \n # project the map to 2D\n if gdat.typepixl == 'heal':\n maps = tdpy.retr_cart(maps, indxpixlrofi=gdat.indxpixlrofi, numbsideinpt=gdat.numbsideheal, \\\n minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \\\n minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata)\n \n if gdat.typepixl == 'cart':\n shap = [gdat.numbsidecart] + list(maps.shape)\n shap[1] = gdat.numbsidecart\n shapflat = list(maps.shape)\n shapflat[0] = gdat.numbpixlfull\n mapstemp = np.zeros(shapflat)\n if maps.size == gdat.indxpixlrofi.size:\n mapstemp[gdat.indxpixlrofi, ...] = maps\n else:\n mapstemp[:, ...] = maps\n maps = mapstemp.reshape(shap).swapaxes(0, 1)\n\n # temp -- this is needed to bring the Fermi-LAT map to the right direction\n #maps = fliplr(maps)\n\n # rescale the map\n if strgmodl is not None:\n gmod = getattr(gdat, strgmodl)\n else:\n gmod = gdat\n\n scal = getattr(gdat.scalpara, strgcbar)\n cmap = getattr(gdat.cmappara, strgcbar)\n vmin = getattr(gdat.minmpara, strgcbar)\n vmax = getattr(gdat.maxmpara, strgcbar)\n if scal == 'asnh':\n maps = np.arcsinh(maps)\n if scal == 'logt':\n maps = np.log10(maps)\n if imag is None:\n imag = axis.imshow(maps, cmap=cmap, origin='lower', extent=gdat.exttrofi, interpolation='nearest', vmin=vmin, vmax=vmax, alpha=gdat.alphmaps)\n return imag\n else:\n imag.set_data(maps)\n \n\ndef make_cbar(gdat, axis, imag, strgvarb):\n\n # make a color bar\n valutickmajr = getattr(gdat.valutickmajrpara, strgvarb)\n labltickmajr = getattr(gdat.labltickmajrpara, strgvarb)\n \n print('valutickmajr')\n print(valutickmajr)\n print('labltickmajr')\n print(labltickmajr)\n cbar = plt.colorbar(imag, ax=axis, fraction=0.05, aspect=15)\n cbar.set_ticks(valutickmajr)\n cbar.set_ticklabels(labltickmajr)\n \n return cbar\n\n\ndef make_legdmaps(gdat, strgstat, strgmodl, axis, mosa=False, assc=False):\n \n gmod = getattr(gdat, strgmodl)\n \n # transdimensional elements\n if strgmodl == 'fitt' and (strgstat == 'pdfn' and gdat.boolcondcatl or strgstat == 'this') and gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n colr = retr_colr(gdat, strgstat, strgmodl, l)\n if strgstat == 'pdfn':\n labl = 'Condensed %s %s' % (gmod.legd, gmod.legdpopl[l])\n else:\n labl = 'Sample %s %s' % (gmod.legd, gmod.legdpopl[l])\n if not gmod.maxmpara.numbelem[l] == 0:\n axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \\\n label=labl, marker=gmod.listelemmrkr[l], lw=gdat.mrkrlinewdth, color=colr)\n \n for q in gdat.indxrefr:\n if not np.amax(gdat.refr.numbelem[q]) == 0:\n if assc:\n axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \\\n label=gdat.refr.lablhits[q], marker=gdat.refr.listmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])\n axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \\\n label=gdat.refr.lablmiss[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])\n else:\n axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \\\n label=gdat.refr.lablelem[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])\n \n # fixed-dimensional objects\n if strgmodl == 'fitt':\n if gmod.boollens:\n axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \\\n label='%s Source' % gmod.lablmodl, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)\n \n if gmod.typeemishost != 'none':\n axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \\\n label='%s Host' % gmod.lablmodl, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)\n \n if gdat.typedata == 'mock':\n if gmod.boollens:\n axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \\\n label='%s Source' % gdat.refr.labl, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)\n \n if gmod.typeemishost != 'none':\n axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \\\n label='%s Host' % gdat.refr.labl, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)\n \n temphand, temp = axis.get_legend_handles_labels()\n numblabl = len(temp)\n \n if numblabl == 4:\n numbcols = 2\n else:\n numbcols = 3\n if mosa:\n axis.legend(bbox_to_anchor=[1., 1.15], loc='center', ncol=numbcols)\n else:\n axis.legend(bbox_to_anchor=[0.5, 1.15], loc='center', ncol=numbcols)\n \n\ndef supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis, indxpoplplot=-1, assc=False):\n \n gmod = getattr(gdat, strgmodl)\n gmodstat = getattr(gmod, strgstat)\n \n # associations with the reference elements\n for q in gdat.indxrefr:\n if gdat.refr.numbelem[q] > 0:\n if indxpoplplot == -1:\n listindxpoplplot = gmod.indxpopl\n else:\n listindxpoplplot = [indxpoplplot]\n for l in listindxpoplplot:\n reframpl = gdat.refr.dictelem[q][gdat.refr.nameparagenrelemampl[q]][0, :]\n mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl, gdat.refr.nameparagenrelemampl[q])\n lgal = np.copy(gdat.refr.dictelem[q]['lgal'][0, :])\n bgal = np.copy(gdat.refr.dictelem[q]['bgal'][0, :])\n numbelem = int(gdat.refr.numbelem[q])\n \n if gdatmodi is not None and gmod.numbparaelem > 0 and assc: \n ### hit\n indx = gdatmodi.this.indxelemrefrasschits[q][l]\n if indx.size > 0:\n axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, label=gdat.refr.lablhits, \\\n marker=gdat.refrlistmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])\n ### missed\n indx = gdatmodi.this.indxelemrefrasscmiss[q][l]\n else:\n indx = np.arange(lgal.size)\n \n if indx.size > 0: \n axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, facecolor='none', \\\n label=gdat.refr.listlablmiss, marker=gdat.refr.listmrkrmiss[q], \\\n lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])\n \n sizexoff = gdat.maxmgangdata * 0.05 * gdat.anglfact\n sizeyoff = gdat.maxmgangdata * 0.05 * gdat.anglfact\n if 'etag' in gdat.refr.namepara.elem[q]:\n for k in range(indx.size):\n axis.text(gdat.anglfact * lgal[indx[k]] + sizexoff, gdat.anglfact * bgal[indx[k]] + sizeyoff, gdat.refretag[q][indx[k]], \\\n verticalalignment='center', horizontalalignment='center', \\\n color='red', fontsize=1)\n\n # temp -- generalize this to input refrlgalhost vs.\n if gdat.typedata == 'mock':\n ## host galaxy position\n if gmod.typeemishost != 'none':\n for e in gmod.indxsersfgrd:\n lgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]\n bgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]\n axis.scatter(gdat.anglfact * lgalhost, gdat.anglfact * bgalhost, facecolor='none', alpha=0.7, \\\n label='%s Host %d' % (gdat.refr.labl, e), s=300, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)\n if gmod.boollens:\n ## host galaxy Einstein radius\n for e in gmod.indxsersfgrd:\n truelgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]\n truebgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]\n truebeinhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]\n axis.add_patch(plt.Circle((gdat.anglfact * truelgalhost, \\\n gdat.anglfact * truebgalhost), \\\n gdat.anglfact * truebeinhost, \\\n edgecolor=gdat.refr.colr, facecolor='none', lw=gdat.mrkrlinewdth))\n \n if gmod.boollens:\n ## source galaxy position\n axis.scatter(gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.lgalsour], \\\n gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.bgalsour], \\\n facecolor='none', \\\n alpha=0.7, \\\n #alpha=gdat.alphelem, \\\n label='%s Source' % gdat.refr.labl, s=300, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)\n \n # model catalog\n if indxpoplplot == -1:\n listindxpoplplot = gmod.indxpopl\n else:\n listindxpoplplot = [indxpoplplot]\n for l in listindxpoplplot:\n if gdatmodi is not None:\n if gmod.numbparaelem > 0:\n colr = retr_colr(gdat, strgstat, strgmodl, l)\n mrkrsize = retr_mrkrsize(gdat, strgmodl, gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[gmod.nameparagenrelemampl[l]][l]], gmod.nameparagenrelemampl[l])\n if 'lgal' in gdatmodi.this.indxparagenrfullelem:\n lgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['lgal']]\n bgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['bgal']]\n else:\n gang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['gang']]\n aang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['aang']]\n lgal, bgal = retr_lgalbgal(gang, aang)\n axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, alpha=gdat.alphelem, label='Sample', marker=gmod.listelemmrkr[l], \\\n lw=gdat.mrkrlinewdth, color=colr)\n\n ## source\n if gmod.boollens:\n lgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.lgalsour]\n bgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.bgalsour]\n axis.scatter(gdat.anglfact * lgalsour, gdat.anglfact * bgalsour, facecolor='none', \\\n alpha=gdat.alphelem, \\\n label='%s Source' % gmod.lablpara, s=300, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)\n \n if gmod.typeemishost != 'none':\n ## host\n lgalhost = [[] for e in gmod.indxsersfgrd]\n bgalhost = [[] for e in gmod.indxsersfgrd]\n for e in gmod.indxsersfgrd:\n lgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]\n bgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]\n axis.scatter(gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e], facecolor='none', \\\n alpha=gdat.alphelem, \\\n label='%s Host' % gmod.lablpara, s=300, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)\n if gmod.boollens:\n beinhost = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]\n axis.add_patch(plt.Circle((gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e]), \\\n gdat.anglfact * beinhost, edgecolor=gmod.colr, facecolor='none', \\\n lw=gdat.mrkrlinewdth, ls='--'))\n \n # temp\n if strgstat == 'pdfn' and gdat.boolcondcatl and gmod.numbparaelem > 0:\n lgal = np.zeros(gdat.numbprvlhigh)\n bgal = np.zeros(gdat.numbprvlhigh)\n ampl = np.zeros(gdat.numbprvlhigh)\n cntr = 0\n for r in gdat.indxstkscond:\n if r in gdat.indxprvlhigh:\n lgal[cntr] = gdat.dictglob['poststkscond'][r]['lgal'][0]\n bgal[cntr] = gdat.dictglob['poststkscond'][r]['bgal'][0]\n # temp -- this does not allow sources with different spectra to be assigned to the same stacked sample\n ampl[cntr] = gdat.dictglob['poststkscond'][r][gmod.nameparagenrelemampl[l]][0]\n cntr += 1\n mrkrsize = retr_mrkrsize(gdat, strgmodl, ampl, gmod.nameparagenrelemampl[l])\n \n colr = retr_colr(gdat, strgstat, strgmodl, l)\n axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \\\n label='Condensed', marker=gmod.listelemmrkr[l], color='black', lw=gdat.mrkrlinewdth)\n for r in gdat.indxstkscond:\n lgal = np.array([gdat.dictglob['liststkscond'][r]['lgal']])\n bgal = np.array([gdat.dictglob['liststkscond'][r]['bgal']])\n axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \\\n marker=gmod.listelemmrkr[l], color='black', alpha=0.1, lw=gdat.mrkrlinewdth)\n\n\ndef retr_colr(gdat, strgstat, strgmodl, indxpopl=None):\n \n if strgmodl == 'true':\n if indxpopl is None:\n colr = gdat.refr.colr\n else:\n colr = gdat.refr.colrelem[indxpopl]\n if strgmodl == 'fitt':\n if strgstat == 'this' or strgstat == 'pdfn':\n if indxpopl is None:\n colr = gmod.colr\n else:\n colr = gmod.colrelem[indxpopl]\n if strgstat == 'mlik':\n colr = 'r'\n \n return colr\n\n\ndef retr_levipost(listllik):\n \n minmlistllik = np.amin(listllik)\n levipost = np.log(np.mean(1. / np.exp(listllik - minmlistllik))) + minmlistllik\n \n return levipost\n\n\ndef retr_infofromlevi(pmeallik, levi):\n \n info = pmeallik - levi\n\n return info\n\n\ndef retr_jcbn():\n \n fluxpare, lgalpare, bgalpare, fluxauxi, lgalauxi, bgalauxi = sympy.symbols('fluxpare lgalpare bgalpare fluxauxi lgalauxi bgalauxi')\n \n matr = sympy.Matrix([[ fluxpare, fluxauxi, 0, 0, 0, 0], \\\n [-fluxpare, 1 - fluxauxi, 0, 0, 0, 0], \\\n [-lgalauxi, 0, 1, 1 - fluxauxi, 0, 0], \\\n [-lgalauxi, 0, 1, -fluxauxi, 0, 0], \\\n [-bgalauxi, 0, 0, 0, 1, 1 - fluxauxi], \\\n [-bgalauxi, 0, 0, 0, 1, -fluxauxi]])\n\n jcbn = matr.det()\n\n return jcbn\n\n# f1 = uf f0\n# f2 = (1 - uf) f0\n# x1 = x0 + (1 - uf) ux\n# x2 = x0 - uf ux\n# y1 = y0 + (1 - uf) uy\n# y2 = y0 - uf uy\n\n# f1/uf f1/f0 f1/x0 f1/ux f1/y0 f1/uy\n# f2/uf f2/f0 f2/x0 f2/ux f2/y0 f2/uy\n# x1/uf x1/f0 x1/x0 x1/ux x1/y0 x1/uy\n# x2/uf x2/f0 x2/x0 x2/ux x2/y0 x2/uy\n# y1/uf y1/f0 y1/x0 y1/ux y1/y0 y1/uy\n# y2/uf y2/f0 y2/x0 y2/ux y2/y0 y2/uy\n\n# f0 uf 0 0 0 0\n# -f0 1 - uf 0 0 0 0\n# -ux 0 1 1 - uf 0 0\n# -ux 0 1 -uf 0 0\n# -uy 0 0 0 1 1 - uf\n# -uy 0 0 0 1 -uf\n\n# f0\n#retr_jcbn()\n\ndef retr_angldist(gdat, lgalfrst, bgalfrst, lgalseco, bgalseco):\n \n # temp -- heal does not work when the dimension of lgalfrst is 1\n if gdat.typepixl == 'heal':\n dir1 = np.array([lgalfrst, bgalfrst])\n dir2 = np.array([lgalseco, bgalseco])\n angldist = hp.rotator.angdist(dir1, dir2)\n else:\n angldist = np.sqrt((lgalfrst - lgalseco)**2 + (bgalfrst - bgalseco)**2)\n\n return angldist\n\n\ndef retr_deflextr(gdat, indxpixlelem, sher, sang):\n \n factcosi = sher * np.cos(2. * sang)\n factsine = sher * np.cos(2. * sang)\n defllgal = factcosi * gdat.lgalgrid[indxpixlelem] + factsine * gdat.bgalgrid[indxpixlelem]\n deflbgal = factsine * gdat.lgalgrid[indxpixlelem] - factcosi * gdat.bgalgrid[indxpixlelem]\n \n return np.vstack((defllgal, deflbgal)).T \n\n\ndef readfile(path):\n\n print('Reading %s...' % path)\n\n filepick = open(path + '.p', 'rb')\n filearry = h5py.File(path + '.h5', 'r')\n gdattemptemp = pickle.load(filepick)\n \n for attr in filearry:\n setattr(gdattemptemp, attr, filearry[attr][()])\n\n filepick.close()\n filearry.close()\n \n if 'gdatfinl' in path or 'gdatinit' in path:\n if hasattr(gdattemptemp, 'edis') and gdattemptemp.edis is not None and hasattr(gdattemptemp, 'binsener'):\n gdattemptemp.edisintp = sp.interpolate.interp1d(gdattemptemp.binsener, gdattemptemp.edis, fill_value='extrapolate')\n gdattemptemp.adisobjt = sp.interpolate.interp1d(gdattemptemp.redsintp, gdattemptemp.adisintp, fill_value='extrapolate')\n gdattemptemp.redsfromdlosobjt = sp.interpolate.interp1d(gdattemptemp.adisintp * gdattemptemp.redsintp, \\\n gdattemptemp.redsintp, fill_value='extrapolate')\n \n return gdattemptemp\n\n\ndef init_stat(gdat):\n \n # construct the initial state\n if gdat.typeverb > 0:\n print('Initializing the sampler state...')\n print('inittype')\n print(gdat.inittype)\n \n gmod = gdat.fitt\n \n ## initialization\n ### initialize the unit sample vector randomly\n gmod.this.paragenrunitfull = np.random.rand(gmod.numbparagenrfull)\n gmod.this.paragenrscalfull = np.empty(gmod.numbparagenrfull)\n\n ## impose user-specified initial state\n ### number of elements\n ## create dummy indxparagenrfullelem \n gmod.this.indxparagenrfullelem = None\n if gmod.numbparaelem > 0:\n if gdat.inittype == 'refr':\n for l in gmod.indxpopl:\n gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gmod.paragenrunitfull[gmod.indxpara.numbelem[l]]\n else:\n for l in gmod.indxpopl:\n if gmod.typemodltran == 'pois':\n meanelemtemp = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, \\\n gmod.this.indxparagenrfullelem)[gmod.indxpara.meanelem[l]]\n \n print('temp -- user input is not working for numbelem')\n #namevarb = 'numbelempop%d' % l\n #initvalu = getattr(gmod.init, namevarb)\n #if initvalu > gmod.maxmpara.numbelem[l] or initvalu < gmod.minmpara.numbelem[l]:\n # raise Exception('Bad initial number of elements...')\n #gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = initvalu\n \n if gmod.typemodltran == 'pois':\n gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = np.random.poisson(meanelemtemp)\n gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = round(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]])\n gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \\\n min(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.maxmpara.numbelem[l])\n gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \\\n max(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.minmpara.numbelem[l])\n gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]] = gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]]\n \n if gdat.booldiagmode:\n if gdat.typedata == 'mock' and gdat.inittype == 'refr':\n for l in gmod.indxpopl:\n if gmod.paragenrunitfull[gmod.indxpara.numbelem[l]] > gmod.maxmpara.numbelem[l]:\n raise Exception('')\n\n if gmod.numbparaelem > 0:\n gmod.this.indxelemfull = []\n for l in gmod.indxpopl:\n gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))\n gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')\n\n if gdat.inittype == 'reco':\n if gdat.namerecostat is not None:\n strgcnfg = gdat.namerecostat\n else:\n strgcnfg = gdat.strgcnfg\n path = gdat.pathoutp + 'stat_' + strgcnfg + '.h5'\n if os.path.exists(path):\n boolinitreco = True\n thisfile = h5py.File(path, 'r')\n if gdat.typeverb > 0:\n print('Initializing from the state %s...' % path)\n print('Likelihood:')\n print(thisfile['lliktotl'][...])\n \n # find the number of populations provided\n maxmindxpopl = 0\n for l in range(10):\n for attr in thisfile:\n if attr.startswith('lgalpop'):\n gmod.indxpopl = int(attr[7])\n if gmod.indxpopl > maxmindxpopl:\n maxmindxpopl = gmod.indxpopl\n numbpoplinpt = maxmindxpopl + 1\n \n if numbpoplinpt != gmod.numbpopl:\n print('State file and fitting metamodel have different number of populations.')\n \n # find the number of elements provided\n cntr = np.zeros(gmod.numbpoplinpt, dtype=int)\n for attr in thisfile:\n if attr.startswith('lgalpop'):\n gmod.indxpopl = int(attr[7])\n cntr[indxpopl] += 1\n if gdat.typeverb > 0:\n print('Number of elements found:')\n print(cntr)\n\n for attr in thisfile:\n for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):\n if gmod.nameparagenrbase == attr:\n if gmod.nameparagenrbase.startswith('numbelem'):\n try:\n indxpopltemp = int(gmod.nameparagenrbase[-1])\n initnumbelem = getattr(gdat, 'initnumbelempop%d' % indxpopltemp)\n print('Initial condition for the number of elements conflicts with the state file. Defaulting to the argument...')\n except:\n initnumbelem = thisfile[attr][()]\n gmod.this.paragenrunitfull[k] = initnumbelem\n else:\n gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', thisfile[attr][()], k)\n if gmod.this.paragenrunitfull[k] == 0.:\n print('Warning CDF is zero.')\n if not np.isfinite(thisfile[attr][()]):\n raise Exception('Retreived state parameter is not finite.')\n if (gmod.numbparaelem == 0 or gmod.numbparaelem > 0 and not k in gmod.indxpara.numbelem) and \\\n (not np.isfinite(gmod.this.paragenrunitfull[k]) or gmod.this.paragenrunitfull[k] < 0. or \\\n gmod.this.paragenrunitfull[k] > 1.):\n raise Exception('CDF of the retreived state parameter is bad.')\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n maxm.numbelem = getattr(gdat.fitt.maxm, 'numbelempop%d' % l)\n if gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] > maxm.numbelem:\n gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = maxm.numbelem\n if gdat.typeverb > 0:\n print('Tapering off the element list...')\n\n gmod.this.indxelemfull = []\n for l in gmod.indxpopl:\n gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))\n if gdat.typeverb > 0:\n print('gmod.this.paragenrunitfull[gmod.indxpara.numbelem]')\n print(gmod.this.paragenrunitfull[gmod.indxpara.numbelem])\n \n gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')\n gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)\n \n if (gmod.this.paragenrunitfull == 0).all():\n raise Exception('Bad initialization.')\n \n if gmod.numbparaelem > 0 and gmod.this.indxparagenrfullelem is not None:\n for nameparagenrelem in gmod.namepara.elem:\n initcomp = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n initcomp[l] = np.empty(len(gmod.this.indxelemfull[l]))\n for k in range(len(gmod.this.indxelemfull[l])):\n namefiel = '%spop%d%04d' % (nameparagenrelem, l, k)\n for attr in thisfile:\n if namefiel == attr:\n initcomp[l][k] = thisfile[namefiel][()]\n setattr(gdat, 'init' + nameparagenrelem, initcomp)\n initcompfromstat(gdat, gdatmodi, 'init')\n thisfile.close()\n else:\n boolinitreco = False\n if gdat.typeverb > 0:\n print('Could not find the state file, %s, to initialize the sampler.' % path)\n \n if gdat.inittype == 'refr':\n if gdat.typedata == 'inpt':\n for l in gmod.indxpopl:\n gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gdat.refr.numbelem[l]\n if gdat.typedata == 'mock':\n for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):\n if not (gdat.inittype == 'pert' and gmod.nameparagenrbase.startswith('numbelem')) and \\\n gmod.nameparagenrbase in gmod.nameparagenrbase:\n gmod.indxpara.true = np.where(gmod.nameparagenrbase == gmod.nameparagenrbase)[0]\n gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmodstat.paragenrscalfull[gmod.indxpara.true], k)\n if gmod.numbparaelem > 0:\n gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')\n if gdat.typeverb > 1:\n show_paragenrscalfull(gdat, gdatmodi)\n if gmod.this.indxparagenrfullelem is not None:\n print('Initializing elements from the reference element parameters...')\n show_paragenrscalfull(gdat, gdatmodi)\n gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)\n show_paragenrscalfull(gdat, gdatmodi)\n initcompfromstat(gdat, gdatmodi, 'refr')\n gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)\n \n ## impose user-specified individual initial values\n for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):\n if gmod.nameparagenrbase.startswith('numbelem'):\n continue\n if gdat.inittype == 'reco' or gdat.inittype == 'refr' or gdat.inittype == 'pert':\n try:\n getattr(gdat, 'init' + gmod.nameparagenrbase)\n print('Conflicting initial state arguments detected, init keyword takes precedence.')\n except:\n pass\n try:\n raise Exception('')\n initvalu = getattr(gdat, 'init' + gmod.nameparagenrbase)\n gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', initvalu, k)\n if gdat.typeverb > 0:\n print('Received initial condition for %s: %.3g' % (gmod.nameparagenrbase, initvalu))\n except:\n pass\n \n ## PSF\n if gdat.initpsfp is not None:\n print('Initializing the metamodel PSF from the provided initial state...')\n if gdat.initpsfp.size != gmod.indxpara.psfp.size:\n raise Exception('')\n for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):\n if k in gmod.indxpara.psfp:\n gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gdat.initpsfp[k-gmod.indxpara.psfp[0]], k)\n if gdat.initpsfprefr:\n print('Initializing the metamodel PSF from the reference state...')\n for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):\n if k in gmod.indxpara.psfp:\n gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmod.psfpexpr[k-gmod.indxpara.psfp[0]], k)\n\n if gdat.inittype == 'rand' or gdat.inittype == 'reco' and not boolinitreco:\n if gdat.typeverb > 0:\n print('Initializing from a random state...')\n gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)\n \n if gmod.numbparaelem > 0:\n gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')\n\n # check the initial unit sample vector for bad entries\n if gmod.numbparaelem > 0:\n indxsampdiff = np.setdiff1d(gmod.indxparagenrfull, gmod.indxpara.numbelem)\n \n if np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])).any():\n raise Exception('')\n indxsampbaddlowr = np.where((gmod.this.paragenrunitfull[indxsampdiff] <= 0.) | np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])))[0]\n indxsampbadduppr = np.where(gmod.this.paragenrunitfull[indxsampdiff] >= 1.)[0]\n indxsampbaddlowr = indxsampdiff[indxsampbaddlowr]\n indxsampbadduppr = indxsampdiff[indxsampbadduppr]\n else:\n indxsampbaddlowr = np.where(gmod.this.paragenrunitfull <= 0.)[0]\n indxsampbadduppr = np.where(gmod.this.paragenrunitfull >= 1.)[0]\n \n indxsampbadd = np.concatenate((indxsampbaddlowr, indxsampbadduppr))\n if indxsampbadd.size > 0:\n print('Initial value caused unit sample vector to go outside the unit interval...')\n show_paragenrscalfull(gdat, gdatmodi, indxsampshow=indxsampbadd)\n gmod.this.paragenrunitfull[indxsampbadd] = np.random.rand(indxsampbadd.size)\n raise Exception('')\n \n gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)\n indxbadd = np.where(np.logical_not(np.isfinite(gmod.this.paragenrscalfull)))[0]\n if indxbadd.size > 0:\n raise Exception('')\n\n\ndef writfile(gdattemp, path):\n \n filepick = open(path + '.p', 'wb')\n filearry = h5py.File(path + '.h5', 'w')\n \n gdattemptemp = tdpy.gdatstrt()\n for attr, valu in gdattemp.__dict__.items():\n if attr.endswith('psfnintp'):\n continue\n \n if isinstance(valu, np.ndarray) and valu.dtype != np.dtype('O') and valu.dtype != np.dtype('<U4'):# or isinstance(valu, str) or \\\n #isinstance(valu, float) or isinstance(valu, bool) or isinstance(valu, int) or isinstance(valu, np.float):\n \n filearry.create_dataset(attr, data=valu)\n else:\n # temp -- make sure interpolation objects are not written.\n if attr != 'adisobjt' and attr != 'redsfromdlosobjt' and attr != 'edisintp':\n setattr(gdattemptemp, attr, valu)\n \n print('Writing to %s...' % path)\n\n pickle.dump(gdattemptemp, filepick, protocol=pickle.HIGHEST_PROTOCOL)\n filepick.close()\n filearry.close()\n \n\ndef retr_deflcutf(angl, defs, asca, acut, asym=False):\n\n fracanglasca = angl / asca\n \n deflcutf = defs / fracanglasca\n \n # second term in the NFW deflection profile\n fact = np.ones_like(fracanglasca)\n indxlowr = np.where(fracanglasca < 1.)[0]\n indxuppr = np.where(fracanglasca > 1.)[0]\n fact[indxlowr] = np.arccosh(1. / fracanglasca[indxlowr]) / np.sqrt(1. - fracanglasca[indxlowr]**2)\n fact[indxuppr] = np.arccos(1. / fracanglasca[indxuppr]) / np.sqrt(fracanglasca[indxuppr]**2 - 1.)\n \n if asym:\n deflcutf *= np.log(fracanglasca / 2.) + fact\n else:\n fracacutasca = acut / asca\n factcutf = fracacutasca**2 / (fracacutasca**2 + 1)**2 * ((fracacutasca**2 + 1. + 2. * (fracanglasca**2 - 1.)) * fact + \\\n np.pi * fracacutasca + (fracacutasca**2 - 1.) * np.log(fracacutasca) + np.sqrt(fracanglasca**2 + fracacutasca**2) * (-np.pi + (fracacutasca**2 - 1.) / fracacutasca * \\\n np.log(fracanglasca / (np.sqrt(fracanglasca**2 + fracacutasca**2) + fracacutasca))))\n deflcutf *= factcutf\n \n return deflcutf\n\n\ndef initchro(gdat, gdatmodi, name):\n \n if gdatmodi is not None: \n setattr(gdatmodi.this, 'chro' + name, gdat.functime())\n \n\ndef stopchro(gdat, gdatmodi, name):\n \n if gdatmodi is not None: \n setattr(gdatmodi.this, 'chro' + name, gdat.functime() - getattr(gdatmodi.this, 'chro' + name))\n\n\ndef retr_defl(gdat, indxpixlelem, lgal, bgal, angllens, ellp=None, angl=None, rcor=None, asca=None, acut=None):\n \n # translate the grid\n lgaltran = gdat.lgalgrid[indxpixlelem] - lgal\n bgaltran = gdat.bgalgrid[indxpixlelem] - bgal\n \n if acut is not None:\n defs = angllens\n angl = np.sqrt(lgaltran**2 + bgaltran**2)\n defl = retr_deflcutf(angl, defs, asca, acut)\n defllgal = lgaltran / angl * defl\n deflbgal = bgaltran / angl * defl\n\n else:\n bein = angllens\n\n # rotate the grid\n lgalrttr = np.cos(angl) * lgaltran - np.sin(angl) * bgaltran\n bgalrttr = np.sin(angl) * lgaltran + np.cos(angl) * bgaltran\n \n axisrati = 1. - ellp\n facteccc = np.sqrt(1. - axisrati**2)\n factrcor = np.sqrt(axisrati**2 * lgalrttr**2 + bgalrttr**2)\n defllgalrttr = bein * axisrati / facteccc * np.arctan(facteccc * lgalrttr / factrcor)\n deflbgalrttr = bein * axisrati / facteccc * np.arctanh(facteccc * bgalrttr / factrcor)\n \n # totate back vector to original basis\n defllgal = np.cos(angl) * defllgalrttr + np.sin(angl) * deflbgalrttr\n deflbgal = -np.sin(angl) * defllgalrttr + np.cos(angl) * deflbgalrttr\n \n defl = np.vstack((defllgal, deflbgal)).T\n \n return defl\n\n\ndef retr_lpriselfdist(gdat, strgmodl, feat, strgfeat):\n \n minm = getattr(gmod.minmpara, strgfeat)\n maxm = getattr(gmod.maxmpara, strgfeat)\n \n lpri = np.sum(np.log(pdfn_self(feat, minm, maxm)))\n \n return lpri\n\n\ndef retr_lprilogtdist(gdat, strgmodl, feat, strgfeat):\n \n minm = getattr(gmod.minmpara, strgfeat)\n maxm = getattr(gmod.maxmpara, strgfeat)\n \n lpri = np.sum(np.log(pdfn_logt(feat, minm, maxm)))\n \n return lpri\n\n\ndef retr_lpripowrdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):\n \n gmod = getattr(gdat, strgmodl)\n \n minm = getattr(gmod.minmpara, strgfeat)\n maxm = getattr(gmod.maxmpara, strgfeat)\n \n slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + strgfeat + 'pop%d' % l)]\n \n lpri = np.sum(np.log(pdfn_powr(feat, minm, maxm, slop)))\n \n return lpri\n\n\ndef retr_lpridpowdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):\n \n minm = getattr(gmod.minmpara, strgfeat)\n maxm = getattr(gmod.maxmpara, strgfeat)\n \n brek = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distbrek')[l]]\n sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + strgfeat)[l]]\n slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + strgfeat)[l]]\n \n lpri = np.sum(np.log(pdfn_dpow(feat, minm, maxm, brek, sloplowr, slopuppr)))\n \n return lpri\n\n\ndef retr_lprigausdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):\n \n distmean = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distmean')[l]]\n diststdv = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'diststdv')[l]]\n \n lpri = np.sum(np.log(pdfn_gaus(feat, distmean, diststdv)))\n \n return lpri\n\n\ndef retr_lpriigamdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):\n \n slop = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'slop')[l]]\n cutf = getattr(gmod, 'cutf' + strgfeat)\n \n lpri = np.sum(np.log(pdfn_igam(feat, slop, cutf)))\n\n return lpri\n\n\ndef traptdim(gdat, arry):\n \n s1 = arry[0, 0] + arry[-1, 0] + arry[0, -1] + arry[-1, -1]\n s2 = np.sum(arry[1:-1, 0]) + np.sum(arry[1:-1, -1]) + np.sum(arry[0, 1:-1]) + np.sum(arry[-1, 1:-1])\n s3 = np.sum(arry[1:-1, 1:-1])\n summ = (s1 + 2*s2 + 4*s3) * gdat.apix\n \n return summ\n\n\ndef retr_spatprio(gdat, pdfnspatpriotemp, spatdistcons=None):\n \n pdfnspatprio = pdfnspatpriotemp\n if spatdistcons is not None:\n pdfnspatprio += spatdistcons\n\n summ = traptdim(gdat, pdfnspatprio)\n pdfnspatprio /= summ\n lpdfspatprio = np.log(pdfnspatprio)\n lpdfspatprioobjt = sp.interpolate.RectBivariateSpline(gdat.binspara.bgalcart, gdat.binspara.lgalcart, lpdfspatprio)\n \n return lpdfspatprio, lpdfspatprioobjt\n\n\ndef retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=False):\n \n if strgmodl == 'true':\n gdatobjt = gdat.true\n elif strgmodl == 'fitt' and boolinit:\n gdatobjt = gdat.fitt\n else:\n gdatobjt = gdatmodi\n\n return gdatobjt\n\n\ndef proc_samp(gdat, gdatmodi, strgstat, strgmodl, fast=False, boolinit=False):\n \n gmod = getattr(gdat, strgmodl)\n \n gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=boolinit)\n gmodstat = getattr(gdatobjt, strgstat)\n \n initchro(gdat, gdatmodi, 'pars')\n\n # grab the sample vector\n indxpara = np.arange(gmodstat.paragenrscalfull.size) \n\n if gdat.booldiagmode:\n if not np.isfinite(gmodstat.paragenrscalfull).all():\n raise Exception('')\n\n if gmod.typeevalpsfn != 'none' and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):\n psfp = gmodstat.paragenrscalfull[gmod.indxpara.psfp]\n if gdat.booldiagmode:\n if np.where(psfp == 0)[0].size == psfp.size:\n raise Exception('')\n setattr(gmodstat, 'psfp', psfp)\n bacp = gmodstat.paragenrscalfull[gmod.indxpara.bacp]\n \n if gmod.numbparaelem > 0:\n \n # temp -- this may slow down execution\n gmodstat.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodstat.indxelemfull, strgmodl)\n\n gmodstat.numbelem = np.empty(gmod.numbpopl, dtype=int)\n indxelem = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)\n indxelem[l] = np.arange(gmodstat.numbelem[l])\n gmodstat.numbelem[l] = np.sum(gmodstat.numbelem[l])\n gmodstat.numbelemtotl = np.sum(gmodstat.numbelem) \n\n gmodstat.dictelem = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n gmodstat.dictelem[l] = dict()\n for strgfeat in gmod.namepara.genrelemdefa:\n gmodstat.dictelem[l][strgfeat] = []\n for nameparagenrelem in gmod.namepara.genrelem[l]:\n gmodstat.dictelem[l][nameparagenrelem] = gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]\n if gdat.booldiagmode:\n if ((abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) < 1e-100 ) & (abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) > 0.)).any():\n raise Exception('')\n\n if gmodstat.numbelem[l] != len(gmodstat.dictelem[l][nameparagenrelem]):\n print('l')\n print(l)\n print('numbelem')\n print(numbelem)\n print('gmodstat.dictelem')\n print(gmodstat.dictelem)\n print('nameparagenrelem')\n print(nameparagenrelem)\n raise Exception('')\n \n if gdat.boolbinsener:\n if gdat.typeverb > 2:\n print('Calculating element spectra...')\n initchro(gdat, gdatmodi, 'spec')\n for l in gmod.indxpopl:\n for strgfeat in gmod.namepara.genrelem[l]:\n sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]\n gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \\\n expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])\n if gmod.typeelem[l].startswith('lghtline'):\n if gmod.typeelem[l] == 'lghtlinevoig':\n gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \\\n gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])\n else:\n gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], \\\n edisintp=gdat.edisintp, spectype=gmod.spectype[l])\n\n stopchro(gdat, gdatmodi, 'spec')\n \n if gdat.typeverb > 2:\n print('Element features:')\n for l in gmod.indxpopl:\n print('l')\n print(l)\n for strgfeat in gmod.namepara.genrelem[l]:\n print(strgfeat)\n print(gmodstat.dictelem[l][strgfeat])\n \n if gdat.booldiagmode:\n for l in gmod.indxpopl:\n for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):\n if (gmod.listscalparagenrelem[l][g] != 'gaus' and not gmod.listscalparagenrelem[l][g].startswith('lnor')) and \\\n (gmod.listscalparagenrelem[l][g] != 'expo' and (gmodstat.dictelem[l][nameparagenrelem] < getattr(gmod.minmpara, nameparagenrelem)).any()) or \\\n (gmodstat.dictelem[l][nameparagenrelem] > getattr(gmod.maxmpara, nameparagenrelem)).any():\n \n print('l, g')\n print(l, g)\n print('nameparagenrelem')\n print(nameparagenrelem)\n print('gmodstat.dictelem[l][nameparagenrelem]')\n summgene(gmodstat.dictelem[l][nameparagenrelem])\n print('getattr(gmod, minm + nameparagenrelem)')\n print(getattr(gmod.minmpara, nameparagenrelem))\n print('getattr(gmod, maxm + nameparagenrelem)')\n print(getattr(gmod.maxmpara, nameparagenrelem))\n print('gmod.listscalparagenrelem[l][g]')\n print(gmod.listscalparagenrelem[l][g])\n raise Exception('')\n \n # calculate element spectra\n # temp\n if gdat.booldiagmode:\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lens':\n if gdat.variasca:\n indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['acut']] < 0.)[0]\n if indx.size > 0:\n raise Exception('')\n if gdat.variacut:\n indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['asca']] < 0.)[0]\n if indx.size > 0:\n raise Exception('')\n \n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lght'):\n \n # evaluate horizontal and vertical position for elements whose position is a power law in image-centric radius\n if gmod.typespatdist[l] == 'glc3':\n gmodstat.dictelem[l]['dlos'], gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'] = retr_glc3(gmodstat.dictelem[l]['dglc'], \\\n gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'])\n \n if gmod.typespatdist[l] == 'gangexpo':\n gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], = retr_lgalbgal(gmodstat.dictelem[l]['gang'], \\\n gmodstat.dictelem[l]['aang'])\n \n if gdat.booldiagmode:\n if gmodstat.numbelem[l] > 0:\n if np.amin(gmodstat.dictelem[l]['lgal']) < gmod.minmlgal or \\\n np.amax(gmodstat.dictelem[l]['lgal']) > gmod.maxmlgal or \\\n np.amin(gmodstat.dictelem[l]['bgal']) < gmod.minmbgal or \\\n np.amax(gmodstat.dictelem[l]['bgal']) > gmod.maxmbgal:\n raise Exception('Bad coordinates!')\n\n if gmod.typespatdist[l] == 'los3':\n gmodstat.dictelem[l]['dglc'], gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'] = retr_los3(gmodstat.dictelem[l]['dlos'], \\\n gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])\n\n # evaluate flux for pulsars\n if gmod.typeelem[l] == 'lghtpntspuls':\n gmodstat.dictelem[l]['lumi'] = retr_lumipuls(gmodstat.dictelem[l]['geff'], gmodstat.dictelem[l]['magf'], gmodstat.dictelem[l]['per0'])\n if gmod.typeelem[l] == 'lghtpntsagnntrue':\n gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])\n gmodstat.dictelem[l]['lumi'] = gmodstat.dictelem[l]['lum0'] * (1. + gmodstat.dictelem[l]['reds'])**4\n if gmod.typeelem[l] == 'lghtpntspuls' or gmod.typeelem[l] == 'lghtpntsagnntrue':\n gmodstat.dictelem[l]['flux'] = retr_flux(gdat, gmodstat.dictelem[l]['lumi'], gmodstat.dictelem[l]['dlos'])\n # evaluate spectra\n if gmod.typeelem[l].startswith('lghtline'):\n if gmod.typeelem[l] == 'lghtlinevoig':\n gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \\\n gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])\n else:\n gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], edisintp=gdat.edisintp, spectype=gmod.spectype[l])\n else:\n sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]\n gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \\\n expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])\n \n\n stopchro(gdat, gdatmodi, 'pars')\n \n ### loglikelihood\n initchro(gdat, gdatmodi, 'modl')\n \n if gmod.boollens:\n lgalsour = gmodstat.paragenrscalfull[gmod.indxpara.lgalsour]\n bgalsour = gmodstat.paragenrscalfull[gmod.indxpara.bgalsour]\n \n if gdat.typeverb > 2:\n print('Evaluating the likelihood...')\n \n # process a sample vector and the occupancy list to calculate secondary variables\n if gmod.boollens:\n fluxsour = gmodstat.paragenrscalfull[gmod.indxpara.fluxsour]\n if gdat.numbener > 1:\n sindsour = gmodstat.paragenrscalfull[gmod.indxpara.sindsour]\n sizesour = gmodstat.paragenrscalfull[gmod.indxpara.sizesour]\n ellpsour = gmodstat.paragenrscalfull[gmod.indxpara.ellpsour]\n anglsour = gmodstat.paragenrscalfull[gmod.indxpara.anglsour]\n if gmod.typeemishost != 'none':\n lgalhost = [[] for e in gmod.indxsersfgrd]\n bgalhost = [[] for e in gmod.indxsersfgrd]\n fluxhost = [[] for e in gmod.indxsersfgrd]\n if gdat.numbener > 1:\n sindhost = [[] for e in gmod.indxsersfgrd]\n sizehost = [[] for e in gmod.indxsersfgrd]\n for e in gmod.indxsersfgrd:\n lgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % e)]\n bgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % e)]\n fluxhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'fluxhostisf%d' % e)]\n if gdat.numbener > 1:\n sindhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sindhostisf%d' % e)]\n sizehost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sizehostisf%d' % e)]\n if gmod.boollens:\n beinhost = [[] for e in gmod.indxsersfgrd]\n for e in gmod.indxsersfgrd:\n beinhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % e)]\n if gmod.typeemishost != 'none':\n ellphost = [[] for e in gmod.indxsersfgrd]\n anglhost = [[] for e in gmod.indxsersfgrd]\n serihost = [[] for e in gmod.indxsersfgrd]\n for e in gmod.indxsersfgrd:\n ellphost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'ellphostisf%d' % e)]\n anglhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'anglhostisf%d' % e)]\n serihost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'serihostisf%d' % e)]\n if gmod.boollens:\n numbpixltemp = gdat.numbpixlcart\n defl = np.zeros((numbpixltemp, 2))\n \n # determine the indices of the pixels over which element kernels will be evaluated\n if gdat.boolbinsspat:\n if gmod.numbparaelem > 0:\n listindxpixlelem = [[] for l in gmod.indxpopl]\n listindxpixlelemconc = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n if gmodstat.numbelem[l] > 0:\n listindxpixlelem[l], listindxpixlelemconc[l] = retr_indxpixlelemconc(gdat, strgmodl, gmodstat.dictelem, l)\n \n if gmod.boollens:\n sherextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sherextr')]\n sangextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sangextr')]\n \n ## host halo deflection\n initchro(gdat, gdatmodi, 'deflhost')\n deflhost = [[] for e in gmod.indxsersfgrd]\n \n indxpixlmiss = gdat.indxpixlcart\n\n for e in gmod.indxsersfgrd:\n if gdat.typeverb > 2:\n print('Evaluating the deflection field due to host galaxy %d' % e)\n print('lgalhost[e]')\n print(lgalhost[e])\n print('bgalhost[e]')\n print(bgalhost[e])\n print('beinhost[e]')\n print(beinhost[e])\n print('ellphost[e]')\n print(ellphost[e])\n print('anglhost[e]')\n print(anglhost[e])\n\n deflhost[e] = retr_defl(gdat, indxpixlmiss, lgalhost[e], bgalhost[e], beinhost[e], ellp=ellphost[e], angl=anglhost[e])\n \n if gdat.booldiagmode:\n indxpixltemp = slice(None)\n \n setattr(gmodstat, 'deflhostisf%d' % e, deflhost[e])\n \n if gdat.typeverb > 2:\n print('deflhost[e]')\n summgene(deflhost[e])\n \n defl += deflhost[e]\n if gdat.typeverb > 2:\n print('After adding the host deflection...')\n print('defl')\n summgene(defl)\n if gdat.booldiagmode:\n if not np.isfinite(deflhost).all():\n raise Exception('')\n \n stopchro(gdat, gdatmodi, 'deflhost')\n\n ## external shear\n initchro(gdat, gdatmodi, 'deflextr')\n deflextr = []\n indxpixltemp = gdat.indxpixlcart\n deflextr = retr_deflextr(gdat, indxpixltemp, sherextr, sangextr)\n defl += deflextr\n if gdat.typeverb > 2:\n print('After adding the external deflection...')\n print('defl')\n summgene(defl)\n stopchro(gdat, gdatmodi, 'deflextr')\n \n # Boolean flag to indicate that the object to convolve the image will be needed\n boolneedpsfnconv = gdat.typepixl == 'cart' and (gmod.typeevalpsfn == 'conv' or gmod.typeevalpsfn == 'full')\n \n ## Boolean flag to indicate that the object to convolve the image will be constructed\n boolcalcpsfnconv = strgmodl == 'true' or boolinit or gdat.boolmodipsfn\n \n # get the convolution object\n if boolneedpsfnconv and boolcalcpsfnconv:\n initchro(gdat, gdatmodi, 'psfnconv')\n if gdat.typeverb > 2:\n print('Evaluating the PSF convolution kernel...')\n psfnconv = [[[] for i in gdat.indxener] for m in gdat.indxevtt]\n if gdat.typepixl == 'cart':\n \n gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)\n fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)\n for mm, m in enumerate(gdat.indxevtt):\n for ii, i in enumerate(gdat.indxener):\n if gmod.typemodlpsfn == 'singgaus':\n sigm = psfp[i+m*gdat.numbener]\n else:\n sigm = fwhm[i, m] / 2.355\n gmodstat.psfnconv[mm][ii] = AiryDisk2DKernel(sigm / gdat.sizepixl)\n \n stopchro(gdat, gdatmodi, 'psfnconv')\n \n if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and gmod.numbparaelem > 0:\n if strgmodl == 'true' or boolinit or gdat.boolmodipsfn:\n if gdat.typepixl == 'heal':\n gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)\n gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')\n fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)\n if gdat.typepixl == 'cart':\n if gdat.kernevaltype == 'ulip':\n gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)\n gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')\n if gdat.booldiagmode:\n if not np.isfinite(gmodstat.psfnintp(0.05)).all():\n raise Exception('')\n\n if gdat.kernevaltype == 'bspx':\n \n gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.anglcart.flatten(), gmod.typemodlpsfn, strgmodl)\n \n # side length of the upsampled kernel\n gdat.numbsidekernusam = 100\n # side length of the original kernel\n gdat.numbsidekern = gdat.numbsidekernusam / factkernusam \n gdat.indxsidekern = np.arange(gdat.numbsidekern)\n\n \t \t# pad by one row and one column\n \t \t#psf = np.zeros((gdat.numbsidekernusam+1, gdat.numbsidekernusam+1))\n \t \t#psf[0:gdat.numbsidekernusam, 0:gdat.numbsidekernusam] = psf0\n\t\t \t\n \t \t# make design matrix for each factkernusam x factkernusam region\n nx = factkernusam + 1\n y, x = mgrid[0:nx, 0:nx] / float(factkernusam)\n x = x.flatten()\n y = y.flatten()\n kernmatrdesi = np.array([full(nx*nx, 1), x, y, x*x, x*y, y*y, x*x*x, x*x*y, x*y*y, y*y*y]).T\n \t \t\n # output np.array of coefficients\n gmodstat.psfnintp = np.empty((gdat.numbsidekern, gdat.numbsidekern, kernmatrdesi.shape[1]))\n\n \t \t# solve p = kernmatrdesi psfnintp for psfnintp\n for iy in gdat.indxsidekern:\n for ix in gdat.indxsidekern:\n p = psf[iy*factkernusam:(iy+1)*factkernusam+1, ix*factkernusam:(ix+1)*factkernusam+1].flatten()\n gmodstat.psfnintp[iy, ix, :] = dot(linalg.inv(dot(kernmatrdesi.T, kernmatrdesi)), dot(kernmatrdesi.T, p))\n else:\n gmodstat.psfnintp = gdat.fitt.this.psfnintp\n sbrt = dict()\n for name in gmod.listnamediff:\n sbrt[name] = []\n \n if gmod.numbparaelem > 0:\n if gmod.boolelemsbrtdfncanyy:\n sbrtdfnc = []\n if gmod.boolelemsbrtextsbgrdanyy:\n sbrtextsbgrd = []\n if gmod.boolelemdeflsubhanyy:\n deflsubh = []\n # retrieve or initialize state variable\n if gmod.boolelemsbrtdfncanyy:\n sbrtdfnc = np.zeros_like(gdat.expo)\n if gmod.boolelemdeflsubhanyy:\n deflsubh = np.zeros((gdat.numbpixl, 2))\n if gmod.boolelemsbrtextsbgrdanyy: \n sbrtextsbgrd = np.zeros_like(gdat.expo)\n \n # element kernel evaluation\n if gmod.boolelemsbrtdfncanyy:\n initchro(gdat, gdatmodi, 'elemsbrtdfnc')\n sbrt['dfnc'] = []\n for l in gmod.indxpopl:\n if gmod.boolelemsbrtdfnc[l]:\n for k in range(gmodstat.numbelem[l]):\n if gmod.boolelemlght[l]:\n varbamplextd = gmodstat.dictelem[l]['spec'][:, k]\n if gmod.typeelem[l].startswith('clus'):\n varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]\n if gmod.typeelem[l] == 'clusvari':\n sbrtdfnc[0, listindxpixlelem[l][k], 0] += gmodstat.dictelem[l]['nobj'][k] / 2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \\\n np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[listindxpixlelem[l][k]])**2 + \\\n (gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[listindxpixlelem[l][k]])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)\n \n if gmod.boolelempsfn[l]:\n print('sbrtdfnc')\n summgene(sbrtdfnc)\n sbrtdfnc[:, listindxpixlelem[l][k], :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], \\\n gmodstat.dictelem[l]['bgal'][k], varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])\n \n if gmod.typeelem[l].startswith('lghtline'):\n sbrtdfnc[:, 0, 0] += gmodstat.dictelem[l]['spec'][:, k]\n \n sbrt['dfnc'] = sbrtdfnc\n \n if gdat.booldiagmode:\n if not np.isfinite(sbrtdfnc).all():\n raise Exception('Element delta function brightness not finite.')\n\n setattr(gmodstat, 'sbrtdfnc', sbrt['dfnc'])\n\n if gdat.booldiagmode:\n cntppntschec = retr_cntp(gdat, sbrt['dfnc'])\n numbelemtemp = 0\n for l in gmod.indxpopl:\n if gmod.boolelemsbrtdfnc[l]:\n numbelemtemp += np.sum(gmodstat.numbelem[l])\n if np.amin(cntppntschec) < -0.1:\n raise Exception('Point source spectral surface brightness is not positive-definite.')\n \n stopchro(gdat, gdatmodi, 'elemsbrtdfnc')\n \n if gmod.boolelemdeflsubhanyy:\n initchro(gdat, gdatmodi, 'elemdeflsubh')\n if gdat.typeverb > 2:\n print('Perturbing subhalo deflection field')\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lens':\n for kk, k in enumerate(indxelem[l]):\n asca = gmodstat.dictelem[l]['asca'][k]\n acut = gmodstat.dictelem[l]['acut'][k]\n if gmod.typeelemspateval[l] == 'locl':\n indxpixl = listindxpixlelem[l][kk]\n else:\n indxpixl = gdat.indxpixl\n deflsubh[indxpixl, :] += retr_defl(gdat, indxpixl, \\\n gmodstat.dictelem[l]['lgal'][kk], gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['defs'][kk], \\\n asca=asca, acut=acut)\n \n # temp -- find out what is causing the features in the element convergence maps\n #for kk, k in enumerate(indxelem[l]):\n # indxpixlpnts = retr_indxpixl(gdat, gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['lgal'][kk])\n # if deflsubh[listindxpixlelem[l][kk], :]\n \n if gdat.typeverb > 2:\n print('deflsubh')\n summgene(deflsubh)\n setattr(gmodstat, 'deflsubh', deflsubh)\n \n if gdat.booldiagmode:\n if not np.isfinite(deflsubh).all():\n raise Exception('Element deflection is not finite.')\n\n defl += deflsubh\n if gdat.typeverb > 2:\n print('After adding subhalo deflection to the total deflection')\n print('defl')\n summgene(defl)\n\n stopchro(gdat, gdatmodi, 'elemdeflsubh')\n\n if gmod.boolelemsbrtextsbgrdanyy:\n initchro(gdat, gdatmodi, 'elemsbrtextsbgrd')\n if strgstat == 'this':\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lghtgausbgrd':\n for k in range(gmodstat.numbelem[l]):\n sbrtextsbgrd[:, listindxpixlelem[l][k], :] += gmodstat.dictelem[l]['spec'][:, k, None, None] / \\\n 2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \\\n np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[None, listindxpixlelem[l][k], None])**2 + \\\n (gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[None, listindxpixlelem[l][k], None])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)\n \n setattr(gmodstat, 'sbrtextsbgrd', sbrtextsbgrd)\n sbrt['extsbgrd'] = []\n sbrt['extsbgrd'] = sbrtextsbgrd\n \n if gdat.booldiagmode:\n cntppntschec = retr_cntp(gdat, sbrt['extsbgrd'])\n if np.amin(cntppntschec) < -0.1:\n raise Exception('Point source spectral surface brightness is not positive-definite.')\n \n stopchro(gdat, gdatmodi, 'elemsbrtextsbgrd')\n \n if gdat.typeverb > 2:\n print('Element related state variables after perturbations...')\n if gmod.boolelemsbrtdfncanyy:\n print('sbrtdfnc')\n summgene(sbrtdfnc)\n if gmod.boolelemdeflsubhanyy:\n print('deflsubh')\n summgene(deflsubh)\n if gmod.boolelemsbrtextsbgrdanyy:\n print('sbrtextsbgrd')\n summgene(sbrtextsbgrd)\n \n if gmod.boollens:\n \n # lensed surface brightness\n initchro(gdat, gdatmodi, 'sbrtlens')\n \n if gdat.typeverb > 2:\n print('Evaluating lensed surface brightness...')\n \n if strgstat == 'this' or gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:\n sbrt['bgrd'] = []\n if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:\n sbrt['bgrdgalx'] = []\n \n if gdat.numbener > 1:\n specsour = retr_spec(gdat, np.array([fluxsour]), sind=np.array([sindsour]))\n if gdat.typeverb > 2:\n print('sindsour')\n print(sindsour)\n else:\n specsour = np.array([fluxsour])\n \n if gdat.typeverb > 2:\n print('lgalsour')\n print(lgalsour)\n print('bgalsour')\n print(bgalsour)\n print('sizesour')\n print(sizesour)\n print('ellpsour')\n print(ellpsour)\n print('anglsour')\n print(anglsour)\n print('fluxsour')\n print(fluxsour)\n print('specsour')\n print(specsour)\n\n if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:\n \n if gdat.typeverb > 2:\n print('Interpolating the background emission...')\n\n sbrt['bgrdgalx'] = retr_sbrtsers(gdat, gdat.lgalgrid[indxpixlelem[0]], gdat.bgalgrid[indxpixlelem[0]], \\\n lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)\n \n if gdat.typeverb > 2:\n print('sbrt[bgrdgalx]')\n summgene(sbrt['bgrdgalx'])\n print('sbrtextsbgrd')\n summgene(sbrtextsbgrd)\n sbrt['bgrd'] = sbrt['bgrdgalx'] + sbrtextsbgrd\n \n sbrt['lens'] = np.empty_like(gdat.cntpdata)\n for ii, i in enumerate(gdat.indxener):\n for mm, m in enumerate(gdat.indxevtt):\n sbrtbgrdobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \\\n sbrt['bgrd'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)\n \n bgalprim = gdat.bgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 1]\n lgalprim = gdat.lgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 0]\n # temp -- T?\n sbrt['lens'][ii, :, m] = sbrtbgrdobjt(bgalprim, lgalprim, grid=False).flatten()\n else:\n if gdat.typeverb > 2:\n print('Not interpolating the background emission...')\n \n sbrt['lens'] = retr_sbrtsers(gdat, gdat.lgalgrid - defl[gdat.indxpixl, 0], \\\n gdat.bgalgrid - defl[gdat.indxpixl, 1], \\\n lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)\n \n sbrt['bgrd'] = retr_sbrtsers(gdat, gdat.lgalgrid, \\\n gdat.bgalgrid, \\\n lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)\n \n setattr(gmodthis, 'sbrtlens', sbrt['lens'])\n\n if gdat.booldiagmode:\n if not np.isfinite(sbrt['lens']).all():\n raise Exception('Lensed emission is not finite.')\n if (sbrt['lens'] == 0).all():\n raise Exception('Lensed emission is zero everynp.where.')\n\n stopchro(gdat, gdatmodi, 'sbrtlens')\n \n ### background surface brightness\n sbrtback = []\n # temp\n #sbrtback = np.empty((numbback, gdat.numbener, indxpixlelem[yy].size, gdat.numbevtt))\n \n # evaluate host galaxy surface brightness\n if gmod.typeemishost != 'none':\n initchro(gdat, gdatmodi, 'sbrthost')\n for e in gmod.indxsersfgrd:\n if gdat.typeverb > 2:\n print('Evaluating the host galaxy surface brightness...')\n if gdat.numbener > 1:\n spechost = retr_spec(gdat, np.array([fluxhost[e]]), sind=np.array([sindhost[e]]))\n else:\n spechost = np.array([fluxhost[e]])\n \n if gdat.typeverb > 2:\n print('lgalhost[e]')\n print(lgalhost[e] * gdat.anglfact)\n print('bgalhost[e]')\n print(bgalhost[e] * gdat.anglfact)\n print('spechost')\n print(spechost)\n print('sizehost[e]')\n print(sizehost[e])\n print('ellphost[e]')\n print(ellphost[e])\n print('anglhost[e]')\n print(anglhost[e])\n print('serihost[e]')\n print(serihost[e])\n \n sbrt['hostisf%d' % e] = retr_sbrtsers(gdat, gdat.lgalgrid, gdat.bgalgrid, lgalhost[e], \\\n bgalhost[e], spechost, sizehost[e], ellphost[e], anglhost[e], serihost[e])\n \n setattr(gmodstat, 'sbrthostisf%d' % e, sbrt['hostisf%d' % e])\n \n #sbrthost = sbrt['host']\n if gdat.typeverb > 2:\n for e in gmod.indxsersfgrd:\n print('e')\n print(e)\n print('sbrt[hostisf%d]')\n summgene(sbrt['hostisf%d' % e])\n stopchro(gdat, gdatmodi, 'sbrthost')\n \n ## model emission\n initchro(gdat, gdatmodi, 'sbrtmodl')\n if gdat.typeverb > 2:\n print('Summing up the model emission...')\n \n sbrt['modlraww'] = np.zeros((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt))\n for name in gmod.listnamediff:\n if name.startswith('back'):\n gmod.indxbacktemp = int(name[4:8])\n \n if gdat.typepixl == 'heal' and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv') and not gmod.boolunifback[gmod.indxbacktemp]:\n sbrttemp = getattr(gmod, 'sbrtbackhealfull')[gmod.indxbacktemp]\n else:\n sbrttemp = gmod.sbrtbacknorm[gmod.indxbacktemp]\n \n if gmod.boolspecback[gmod.indxbacktemp]:\n sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp]]\n else:\n sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp][gdat.indxener]][:, None, None]\n \n sbrt['modlraww'] += sbrt[name]\n if gdat.booldiagmode:\n if np.amax(sbrttemp) == 0.:\n raise Exception('')\n\n if gdat.typeverb > 2:\n print('name')\n print(name)\n print('sbrt[name]')\n summgene(sbrt[name])\n if gdat.typeverb > 2:\n for ii, i in enumerate(gdat.indxener):\n print('ii, i')\n print(ii, i)\n for mm, m in enumerate(gdat.indxevtt):\n print('mm, m')\n print(mm, m)\n print('sbrt[modlraww][ii, :, mm]')\n summgene(sbrt['modlraww'][ii, :, mm])\n \n # convolve the model with the PSF\n if gmod.convdiffanyy and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv'):\n sbrt['modlconv'] = []\n # temp -- isotropic background proposals are unnecessarily entering this clause\n if gdat.typeverb > 2:\n print('Convolving the model image with the PSF...') \n sbrt['modlconv'] = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))\n for ii, i in enumerate(gdat.indxener):\n for mm, m in enumerate(gdat.indxevtt):\n if gdat.strgcnfg == 'pcat_ferm_igal_mock_test':\n print('Convolving ii, i, mm, m')\n print(ii, i, mm, m)\n if gdat.typepixl == 'cart':\n if gdat.numbpixl == gdat.numbpixlcart:\n sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrt['modlraww'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)), \\\n psfnconv[mm][ii]).flatten()\n else:\n sbrtfull = np.zeros(gdat.numbpixlcart)\n sbrtfull[gdat.indxpixlrofi] = sbrt['modlraww'][ii, :, mm]\n sbrtfull = sbrtfull.reshape((gdat.numbsidecart, gdat.numbsidecart))\n sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrtfull, psfnconv[mm][ii]).flatten()[gdat.indxpixlrofi]\n indx = np.where(sbrt['modlconv'][ii, :, mm] < 1e-50)\n sbrt['modlconv'][ii, indx, mm] = 1e-50\n if gdat.typepixl == 'heal':\n sbrt['modlconv'][ii, :, mm] = hp.smoothing(sbrt['modlraww'][ii, :, mm], fwhm=fwhm[i, m])[gdat.indxpixlrofi]\n sbrt['modlconv'][ii, :, mm][np.where(sbrt['modlraww'][ii, :, mm] <= 1e-50)] = 1e-50\n \n setattr(gmodstat, 'sbrtmodlconv', sbrt['modlconv'])\n # temp -- this could be made faster -- need the copy() statement because sbrtdfnc gets added to sbrtmodl afterwards\n sbrt['modl'] = np.copy(sbrt['modlconv'])\n else:\n if gdat.typeverb > 2:\n print('Skipping PSF convolution of the model...')\n sbrt['modl'] = np.copy(sbrt['modlraww'])\n \n if gdat.typeverb > 2:\n print('sbrt[modl]')\n summgene(sbrt['modl'])\n\n ## add PSF-convolved delta functions to the model\n if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:\n if gdat.typeverb > 2:\n print('Adding delta functions into the model...')\n print('sbrt[dfnc]')\n summgene(sbrt['dfnc'])\n sbrt['modl'] += sbrt['dfnc']\n stopchro(gdat, gdatmodi, 'sbrtmodl')\n \n if gdat.typeverb > 2:\n print('sbrt[modl]')\n summgene(sbrt['modl'])\n\n ### count map\n initchro(gdat, gdatmodi, 'expo')\n cntp = dict()\n cntp['modl'] = retr_cntp(gdat, sbrt['modl'])\n \n if gdat.booldiagmode:\n setattr(gmodstat, 'cntpmodl', cntp['modl'])\n stopchro(gdat, gdatmodi, 'expo')\n\n # mock data specific\n if strgmodl == 'true' and strgstat == 'this':\n \n # generate count data\n cntptemp = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))\n for i in gdat.indxener:\n for j in gdat.indxpixl:\n for m in gdat.indxevtt:\n cntptemp[i, j, m] = np.random.poisson(cntp['modl'][i, j, m])\n setattr(gdat, 'cntpdata', cntptemp)\n \n if not gdat.boolsqzeexpo and np.amax(cntptemp) == 0:\n print('cntp[modl]')\n summgene(cntp['modl'])\n print('gdat.boolsqzeexpo')\n print(gdat.boolsqzeexpo)\n print('cntptemp')\n summgene(cntptemp)\n raise Exception('Data is zero.')\n \n proc_cntpdata(gdat)\n \n ## diagnostics\n if gdat.booldiagmode:\n frac = cntp['modl'] / np.mean(cntp['modl'])\n if np.amin(frac) < -1e-3 and np.amin(cntp['modl']) < -0.1:\n raise Exception('')\n \n indxcubebadd = np.where(cntp['modl'] < 0.)[0]\n if indxcubebadd.size > 0:\n print('Warning! Model prediction is negative. Correcting to 1e-20...')\n cntp['modl'][indxcubebadd] = 1e-20\n stopchro(gdat, gdatmodi, 'modl')\n\n # log-prior\n initchro(gdat, gdatmodi, 'lpri')\n if gdat.typeverb > 2:\n print('Evaluating the prior...')\n \n lpri = np.zeros(gmod.numblpri)\n if gmod.numbparaelem > 0:\n \n for l in gmod.indxpopl:\n lpri[0] -= 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[l] * gmodstat.numbelem[l]\n \n if gdat.penalpridiff:\n sbrtdatapnts = gdat.sbrtdata - sbrt['dfnc']\n if gdat.typepixl == 'heal':\n raise Exception('')\n if gdat.typepixl == 'cart':\n psecodimdatapnts = np.empty((gdat.numbener, gdat.numbsidecarthalf, gdat.numbevtt))\n psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)\n fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)\n sigm = fwhm / 2.355\n psecodimdatapntsprio = np.exp(-2. * gdat.meanpara.mpolodim[None, :, None] / (0.1 / sigm[:, None, :]))\n lpridiff = 0.\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n psecdatapnts = retr_psec(gdat, sbrtdatapnts[i, :, m])\n psecodimdatapnts[i, :, m] = retr_psecodim(gdat, psecdatapnts)\n psecodimdatapnts[i, :, m] /= psecodimdatapnts[i, 0, m]\n lpridiff += -0.5 * np.sum((psecodimdatapnts[i, :, m] - psecodimdatapntsprio[i, :, m])**2)\n setattr(gmodstat, 'psecodimdatapntsen%02devt%d' % (i, m), psecodimdatapnts[i, :, m])\n setattr(gmodstat, 'psecodimdatapntsprioen%02devt%d'% (i, m), psecodimdatapntsprio[i, :, m])\n lpri[1] = lpridiff \n setattr(gmodstat, 'lpridiff', lpridiff)\n \n if gmod.typemodltran == 'pois':\n meanelem = gmodstat.paragenrscalfull[gmod.indxpara.meanelem]\n for l in gmod.indxpopl:\n lpri[2] += retr_lprbpois(gmodstat.numbelem[l], meanelem[l])\n \n for l in gmod.indxpopl:\n for g, (strgfeat, strgpdfn) in enumerate(zip(gmod.namepara.genrelem[l], gmod.listscalparagenrelem[l])):\n indxlpritemp = 3 + l * gmod.numbparagenrelem + g\n lpri[indxlpritemp] = retr_lprielem(gdat, strgmodl, l, g, strgfeat, strgpdfn, gmodstat.paragenrscalfull, gmodstat.dictelem, gmodstat.numbelem)\n lpritotl = np.sum(lpri)\n \n if gdat.typeverb > 1:\n print('lpritotl')\n print(lpritotl)\n \n ### log-likelihood\n initchro(gdat, gdatmodi, 'llik')\n llik = retr_llik(gdat, strgmodl, cntp['modl'])\n \n if gdat.typeverb > 2:\n print('cntp[modl]')\n summgene(cntp['modl'])\n print('np.sum(cntp[modl], (1, 2))')\n print(np.sum(cntp['modl'], (1, 2)))\n print('np.sum(gdat.cntpdata, (1, 2))')\n print(np.sum(gdat.cntpdata, (1, 2)))\n\n if gdat.booldiagmode:\n if not np.isfinite(llik).all():\n raise Exception('Likelihood is not finite.')\n \n gmodstat.lliktotl = np.sum(llik)\n if gdat.booldiagmode:\n if isinstance(gmodstat.lliktotl, np.ndarray):\n raise Exception('')\n if not np.isfinite(gmodstat.lliktotl).all():\n raise Exception('')\n\n numbdoff = gdat.numbdata - gmod.numbparagenrbase\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n numbdoff -= len(gmodstat.indxparagenrfullelem[l]['full'])\n\n setattr(gmodstat, 'llik', llik)\n setattr(gmodstat, 'llikmean', gmodstat.lliktotl / gdat.numbdata) \n setattr(gmodstat, 'llikcmea', gmodstat.lliktotl / (gdat.numbdata - numbdoff)) \n\n if gdat.typeverb > 2:\n print('llik')\n summgene(llik)\n if gdat.typeverb > 1:\n print('gmodstat.lliktotl')\n print(gmodstat.lliktotl)\n stopchro(gdat, gdatmodi, 'llik')\n\n lpostotl = lpritotl + gmodstat.lliktotl\n if gdat.typeverb > 1:\n print('lpostotl')\n print(lpostotl)\n\n setattr(gmodstat, 'lpritotl', lpritotl) \n setattr(gmodstat, 'gmodstat.lliktotl', gmodstat.lliktotl)\n setattr(gmodstat, 'lpostotl', lpostotl) \n \n stopchro(gdat, gdatmodi, 'lpri')\n \n if strgstat == 'next':\n return\n\n initchro(gdat, gdatmodi, 'tert')\n \n setattr(gmodstat, 'lpri', lpri)\n \n if gmod.numbparaelem > 0:\n setattr(gmodstat, 'lpripena', lpri[0])\n \n dicttert = {}\n \n ## load necessary variables\n \n ## derived variables\n ## residual count map \n cntp['resi'] = []\n cntp['resi'] = gdat.cntpdata - cntp['modl']\n \n setattr(gmodstat, 'cntpmodl', cntp['modl'])\n setattr(gmodstat, 'cntpresi', cntp['resi'])\n setattr(gmodstat, 'llik', llik)\n #if gmod.boollens:\n # setattr(gmodstat, 'deflhost', deflhost)\n \n if gmod.boollens:\n \n setattr(gmodstat, 'defl', defl)\n for e in gmod.indxsersfgrd:\n masshostbein = massfrombein * beinhost[e]**2\n setattr(gmodstat, 'masshostisf%dbein' % e, masshostbein)\n ### sort with respect to deflection at scale radius\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n if gmodstat.numbelem[l] > 0:\n indxelemsortampl = np.argsort(gmodstat.dictelem[l][nameparaelemsort[l]])[::-1]\n for nameparagenrelem in gmod.namepara.genrelem[l]:\n gmodstat.dictelem[l][nameparagenrelem + 'sort'] = gmodstat.dictelem[l][nameparagenrelem][indxelemsortampl]\n\n deflsing = np.zeros((gdat.numbpixlcart, 2, numbdeflsingplot))\n conv = np.zeros((gdat.numbpixlcart))\n convpsec = np.zeros(((gdat.numbsidecarthalf)**2))\n convpsecodim = np.zeros((gdat.numbsidecarthalf))\n if gmod.numbparaelem > 0:\n if boolelemlens:\n gmod.indxpopllens = gmod.typeelem.index('lens')\n numbdeflsing = 2\n if gmod.numbparaelem > 0:\n if boolelemlens:\n if numbelem[indxpopllens] > 0:\n numbdeflsing += min(numbdeflsubhplot, numbelem[indxpopllens]) \n numbdeflsing += 1\n for k in range(numbdeflsing):\n indxpixltemp = gdat.indxpixlcart\n if k == 0:\n # temp -- should take other sersics into account\n deflsing[indxpixltemp, :, k] = deflhost[0]\n elif k == 1:\n deflsing[indxpixltemp, :, k] = deflextr\n elif k == 2:\n deflsing[indxpixltemp, :, k] = defl - deflextr - deflhost[0]\n else:\n asca = gmodstat.dictelem[indxpopllens]['ascasort'][None, k-3]\n acut = gmodstat.dictelem[indxpopllens]['acutsort'][None, k-3]\n deflsing[listindxpixlelem[indxpopllens][k], :, k] = retr_defl(gdat, listindxpixlelem[indxpopllens][k], \\\n gmodstat.dictelem[indxpopllens]['lgalsort'][None, k-3], gmodstat.dictelem[indxpopllens]['bgalsort'][None, k-3], \\\n gmodstat.dictelem[indxpopllens]['defssort'][None, k-3], asca=asca, acut=acut)\n\n # convergence\n ## total\n conv[:] = retr_conv(gdat, defl) \n convhost = np.zeros((gmod.numbsersfgrd, gdat.numbpixlcart))\n for e in gmod.indxsersfgrd:\n convhost[e, :] = retr_conv(gdat, deflhost[e]) \n \n ### power spectrum\n #### two dimensional\n convpsec[:] = retr_psec(gdat, conv[:])\n \n #### one dimensional\n convpsecodim[:] = retr_psecodim(gdat, convpsec[:]) \n setattr(gmodstat, 'convpsec', convpsec)\n setattr(gmodstat, 'convpsecodim', convpsecodim)\n setattr(gmodstat, 'conv', conv[...])\n for e in gmod.indxsersfgrd:\n setattr(gmodstat, 'convisf%d' % e, convhost[e, ...])\n \n ## subhalos\n if gmod.numbparaelem > 0:\n if boolelemlens:\n convelem = np.zeros((gdat.numbpixl))\n convpsecelem = np.zeros(((gdat.numbsidecarthalf)**2))\n convpsecelemodim = np.zeros((gdat.numbsidecarthalf))\n ### convergence\n convelem[:] = retr_conv(gdat, deflsubh) \n ### power spectrum\n ##### two dimensional\n convpsecelem[:] = retr_psec(gdat, convelem[:])\n ##### one dimensional\n convpsecelemodim[:] = retr_psecodim(gdat, convpsecelem[:]) \n setattr(gmodstat, 'convpsecelem', convpsecelem)\n setattr(gmodstat, 'convpsecelemodim', convpsecelemodim)\n setattr(gmodstat, 'convelem', convelem[...])\n setattr(gmodstat, 'defl', defl)\n \n ### magnification\n magn = np.empty((gdat.numbpixlcart))\n histdefl = np.empty((gdat.numbdefl))\n if gmod.numbparaelem > 0 and boolelemlens:\n histdeflsubh = np.empty((gdat.numbdefl))\n deflsingmgtd = np.zeros((gdat.numbpixlcart, numbdeflsingplot))\n magn[:] = 1. / retr_invm(gdat, defl) \n histdefl[:] = np.histogram(defl, bins=gdat.binspara.defl)[0]\n if gmod.numbparaelem > 0:\n if boolelemlens:\n histdeflsubh[:] = np.histogram(deflsubh, bins=gdat.binspara.deflsubh)[0]\n deflsingmgtd[:, :] = np.sqrt(np.sum(deflsing[...]**2, axis=1))\n if gmod.numbparaelem > 0:\n if boolelemlens:\n setattr(gmodstat, 'histdeflsubh', histdeflsubh)\n setattr(gmodstat, 'histdefl', histdefl)\n setattr(gmodstat, 'magn', magn[...])\n setattr(gmodstat, 'deflsing', deflsing[...])\n setattr(gmodstat, 'deflsingmgtd', deflsingmgtd[...])\n \n ## element related\n if gmod.numbparaelem > 0:\n if gdat.numbpixl == 1:\n for l in gmod.indxpopl:\n for k in range(gmodstat.numbelem[l]):\n setattr(gmodstat, 'speclinepop%d%04d' % (l, k), gmodstat.dictelem[l]['spec'][:, k])\n \n if gdat.typedata == 'mock' and strgmodl == 'true' and gdat.numbpixl > 1:\n gdat.refrlgal = [[] for l in gmod.indxpopl]\n gdat.refrbgal = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n gdat.refrlgal[l] = np.tile(gmodstat.dictelem[l]['lgal'], [3] + list(np.ones(gmodstat.dictelem[l]['lgal'].ndim, dtype=int)))\n gdat.refrbgal[l] = np.tile(gmodstat.dictelem[l]['bgal'], [3] + list(np.ones(gmodstat.dictelem[l]['bgal'].ndim, dtype=int)))\n \n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lghtpntspuls':\n gmodstat.dictelem[l]['per1'] = retr_per1(gmodstat.dictelem[l]['per0'], gmodstat.dictelem[l]['magf'])\n \n if gmod.numbparaelem > 0:\n if strgstat == 'this' or gdat.boolrefeforc and strgmodl == 'fitt':\n # correlate the fitting model elements with the reference elements\n if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:\n indxelemrefrasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]\n indxelemfittasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]\n for q in gdat.indxrefr:\n for l in gmod.indxpopl:\n if gdat.refr.numbelem[q] == 0:\n continue\n \n indxelemfittmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)\n indxelemrefrmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)\n matrdist = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]))\n for k in range(gmodstat.numbelem[l]):\n # construct a matrix of angular distances between reference and fitting elements\n if gmod.typeelem[l].startswith('lghtline'):\n matrdist[:, k] = abs(gdat.refrelin[q][0, :] - gmodstat.dictelem[l]['elin'][k]) / gdat.refrelin[q][0, :]\n else:\n matrdist[:, k] = retr_angldist(gdat, gdat.refr.dictelem[q]['lgal'][0, :], gdat.refr.dictelem[q]['bgal'][0, :], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k])\n indxelemrefrmatr[:, k] = np.arange(gdat.refr.numbelem[q])\n indxelemfittmatr[:, k] = k\n matrdist = matrdist.flatten()\n indxelemrefrmatr = indxelemrefrmatr.flatten()\n indxelemfittmatr = indxelemfittmatr.flatten()\n\n # take only angular separations smaller than some threshold\n indxmatrthrs = np.where(matrdist < gdat.anglassc)\n matrdist = matrdist[indxmatrthrs]\n indxelemrefrmatr = indxelemrefrmatr[indxmatrthrs]\n indxelemfittmatr = indxelemfittmatr[indxmatrthrs]\n\n # sort the remaining associations with respect to distance\n indxmatrsort = np.argsort(matrdist)\n matrdist = matrdist[indxmatrsort]\n indxelemrefrmatr = indxelemrefrmatr[indxmatrsort]\n indxelemfittmatr = indxelemfittmatr[indxmatrsort]\n \n for c in range(matrdist.size):\n if indxelemrefrmatr[c] in indxelemrefrasschits[q][l] or indxelemfittmatr[c] in indxelemfittasschits[q][l]:\n continue\n indxelemrefrasschits[q][l].append(indxelemrefrmatr[c])\n indxelemfittasschits[q][l].append(indxelemfittmatr[c])\n \n indxelemrefrasschits[q][l] = np.array(indxelemrefrasschits[q][l])\n indxelemfittasschits[q][l] = np.array(indxelemfittasschits[q][l])\n setattr(gmodstat, 'indxelemrefrasschits', indxelemrefrasschits)\n setattr(gmodstat, 'indxelemfittasschits', indxelemfittasschits)\n \n indxelemrefrasscmiss = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]\n indxelemfittasscfals = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]\n for q in gdat.indxrefr:\n for l in gmod.indxpopl:\n # indices of the reference elements not associated with the fitting model elements\n if gdat.refr.numbelem[q] > 0:\n indxelemrefrasscmiss[q][l] = np.setdiff1d(np.arange(gdat.refr.numbelem[q]), indxelemrefrasschits[q][l])\n # indices of the fitting model elements not associated with the reference elements\n if gmodstat.numbelem[l] > 0:\n indxelemfittasscfals[q][l] = np.setdiff1d(np.arange(gmodstat.numbelem[l]), indxelemfittasschits[q][l])\n setattr(gmodstat, 'indxelemrefrasscmiss', indxelemrefrasscmiss)\n setattr(gmodstat, 'indxelemfittasscfals', indxelemfittasscfals)\n \n for q in gdat.indxrefr:\n if gdat.refr.numbelem[q] == 0:\n continue\n for l in gmod.indxpopl:\n # collect the associated reference element parameter for each fitting element \n for strgfeat in gdat.refr.namepara.elemonly[q][l]:\n name = strgfeat + gdat.listnamerefr[q]\n if strgfeat != 'spec' and strgfeat != 'specplot':\n refrfeat = getattr(gdat.refr, strgfeat)\n gmodstat.dictelem[l][name] = np.zeros(gmodstat.numbelem[l])\n if len(refrfeat[q]) > 0 and len(indxelemrefrasschits[q][l]) > 0:\n gmodstat.dictelem[l][name][indxelemfittasschits[q][l]] = refrfeat[q][0, indxelemrefrasschits[q][l]]\n \n print('temp')\n continue\n\n # collect the error in the associated reference element amplitude\n for strgfeat in gdat.listnameparaetotlelemcomm[q][l]:\n refrfeat = getattr(gdat.refr, strgfeat)\n if strgfeat == gmod.nameparagenrelemampl[l] and len(indxelemfittasschits[q][l]) > 0:\n gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]] = np.zeros(gmodstat.numbelem[l])\n fittfeattemp = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]\n refrfeattemp = refrfeat[q][0, indxelemrefrasschits[q][l]]\n if gdat.booldiagmode:\n if not np.isfinite(refrfeattemp).all():\n raise Exception('')\n gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]][indxelemfittasschits[q][l]] = 100. * (fittfeattemp - refrfeattemp) / refrfeattemp\n \n if gdat.boolrefeforc and strgmodl == 'fitt':\n for l in gmod.indxpopl:\n for strgfeat in gmod.namepara.genrelem[l]:\n if strgfeat in gdat.refr.namepara.elem[gdat.indxrefrforc[l]]:\n if len(indxelemrefrasschits[gdat.indxrefrforc[l]][l]) == 0:\n continue\n refrfeat = getattr(gdat.refr, strgfeat)[gdat.indxrefrforc[l]][0, indxelemrefrasschits[gdat.indxrefrforc[l]][l]]\n if len(gmodstat.dictelem[l][strgfeat]) == 0:\n continue\n lpritotl += -2. * np.sum(1e6 * (gmodstat.dictelem[l][strgfeat][indxelemfittasschits[gdat.indxrefrforc[l]][l]] - refrfeat)**2 / refrfeat**2)\n\n # other tertiary variables continues\n ## number of degrees of freedom\n\n chi2doff = np.sum(cntp['resi']**2 / gdat.varidata) / numbdoff\n if gdat.booldiagmode:\n if not np.isfinite(cntp['resi']).all():\n raise Exception('')\n if not np.isfinite(numbdoff):\n raise Exception('')\n if not np.isfinite(chi2doff):\n raise Exception('')\n setattr(gmodstat, 'numbdoff', numbdoff)\n setattr(gmodstat, 'chi2doff', chi2doff)\n \n if gmod.boolelempsfn and gmod.numbparaelem > 0:\n gmodstat.fwhmpsfn = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)\n \n if gmod.numbparaelem > 0:\n \n ### derived parameters\n for l in gmod.indxpopl:\n\n # luminosity\n if gmod.boolelemlght[l] and 'flux' in gmod.namepara.genrelem[l]:\n for strgfeat in gmod.namepara.genrelem[l]:\n if strgfeat.startswith('reds') and strgfeat != 'reds':\n namerefr = strgfeat[-4:]\n gmodstat.dictelem[l]['lumi' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan\n gmodstat.dictelem[l]['dlos' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan\n reds = gmodstat.dictelem[l]['reds' + namerefr]\n indxgood = np.where(np.isfinite(gmodstat.dictelem[l]['reds' + namerefr]))[0]\n if indxgood.size > 0:\n # temp -- these units only work for energy units of keV\n dlos = gdat.adisobjt(reds)\n gmodstat.dictelem[l]['dlos' + namerefr][indxgood] = dlos\n lumi = retr_lumi(gdat, gmodstat.dictelem[l]['flux'], dlos, reds)\n gmodstat.dictelem[l]['lumi' + namerefr][indxgood] = lumi\n \n if gmod.typeelem[l] == 'lghtpntsagnntrue':\n gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])\n if gmod.typeelem[l] == 'lghtpntspuls':\n gmodstat.dictelem[l]['mass'] = full([numbelem[l]], 3.)\n\n if gdat.typeverb > 2:\n print('l')\n print(l)\n if gdat.boolbinsspat:\n #### radial and angular coordinates\n gmodstat.dictelem[l]['gang'] = retr_gang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])\n gmodstat.dictelem[l]['aang'] = retr_aang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])\n \n if gmod.boolelemlght[l]:\n #### number of expected counts\n if gdat.boolbinsspat:\n gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal']], gmodstat.dictelem[l]['spec'])\n else:\n gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['elin']], gmodstat.dictelem[l]['spec'])\n \n #### delta log-likelihood\n gmodstat.dictelem[l]['deltllik'] = np.zeros(gmodstat.numbelem[l])\n if not (strgmodl == 'true' and gdat.checprio): \n if gdat.typeverb > 2:\n print('Calculating log-likelihood differences when removing elements from the model.')\n for k in range(gmodstat.numbelem[l]):\n \n # construct gdatmodi\n gdatmoditemp = tdpy.gdatstrt()\n gdatmoditemp.this = tdpy.gdatstrt()\n gdatmoditemp.next = tdpy.gdatstrt()\n gdatmoditemp.this.indxelemfull = gmodstat.indxelemfull\n gdatmoditemp.this.paragenrscalfull = gmodstat.paragenrscalfull\n gdatmoditemp.this.paragenrunitfull = gmodstat.paragenrunitfull\n\n prop_stat(gdat, gdatmoditemp, strgmodl, deth=True, thisindxpopl=l, thisindxelem=k)\n proc_samp(gdat, gdatmoditemp, 'next', strgmodl)#, boolinit=boolinit)\n \n if gdat.booldiagmode:\n if not np.isfinite(gmodstat.lliktotl):\n raise Exception('')\n \n gdatobjttemp = retr_gdatobjt(gdat, gdatmoditemp, strgmodl)#, boolinit=boolinit)\n nextlliktotl = gdatobjttemp.next.lliktotl\n gmodstat.dictelem[l]['deltllik'][k] = gmodstat.lliktotl - nextlliktotl\n \n if gdat.typeverb > 2:\n print('deltllik calculation ended.')\n \n # more derived parameters\n if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):\n ### PSF FWHM\n if gdat.typepixl == 'cart':\n fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)\n setattr(gmodstat, 'fwhm', fwhm)\n \n if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:\n \n if gmod.numbparaelem > 0:\n sbrt['dfnctotl'] = np.zeros_like(gdat.expo)\n sbrt['dfncsubt'] = np.zeros_like(gdat.expo)\n sbrt['dfncsupt'] = np.zeros_like(gdat.expo)\n for l in gmod.indxpopl:\n if gmod.boolcalcerrr[l]:\n sbrt['dfncfull'] = np.zeros_like(gdat.expo)\n if gmod.boolelemsbrt[l]:\n for k in range(gmodstat.numbelem[l]):\n \n # read normalization from the element dictionary\n if gmod.boolelemlght[l]:\n varbamplextd = gmodstat.dictelem[l]['spec'][:, k]\n if gmod.typeelem[l].startswith('clus'):\n varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]\n \n # calculate imprint on the element surface brightness state variable\n if gmod.boolelempsfn[l]:\n sbrttemp = retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \\\n varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])\n indxpixltemp = listindxpixlelem[l][k]\n\n if gmod.typeelem[l].startswith('lghtline'):\n sbrttemp = gmodstat.dictelem[l]['spec'][:, k, None, None]\n \n # add it to the state variable depending on the significance\n sbrt['dfnctotl'][:, indxpixltemp, :] += sbrttemp\n if gmodstat.dictelem[l]['deltllik'][k] > 35:\n sbrt['dfncsupt'][:, indxpixltemp, :] += sbrttemp\n if gmodstat.dictelem[l]['deltllik'][k] < 35:\n sbrt['dfncsubt'][:, indxpixltemp, :] += sbrttemp\n \n # calculate imprint without PSF truncation to calculate approximation errors\n if gmod.boolcalcerrr[l]:\n sbrt['dfncfull'][:, :, :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \\\n varbamplextd, gmodstat.psfnintp, gdat.indxpixl)\n \n setattr(gmodstat, 'sbrtdfncsubtpop%d' % l, sbrt['dfncsubt'])\n \n if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:\n if gdat.booldiagmode:\n numbtemp = 0\n for l in gmod.indxpopl:\n if gmod.boolelemsbrtextsbgrd[l]:\n numbtemp += np.sum(gmodstat.numbelem[l])\n if numbtemp > 0 and (sbrtextsbgrd == 0.).all():\n raise Exception('')\n\n sbrt['bgrdexts'] = sbrtextsbgrd\n\n #### count maps\n cntp = dict()\n for name in gmod.listnamegcom:\n cntp[name] = retr_cntp(gdat, sbrt[name])\n setattr(gmodstat, 'cntp' + name, cntp[name])\n \n ### spatial averages\n sbrtmean = dict()\n sbrtstdv = dict()\n for name in gmod.listnamegcom:\n sbrtmean[name], sbrtstdv[name] = retr_spatmean(gdat, sbrt[name])\n for b in gdat.indxspatmean:\n setattr(gmodstat, 'sbrt%smea%d' % (name, b), sbrtmean[name][b])\n setattr(gmodstat, 'sbrt%sstd%d' % (name, b), sbrtstdv[name][b])\n \n if gmod.numbparaelem > 0:\n if gmod.boolelemsbrtdfncanyy:\n for i in gdat.indxener:\n if 'dark' in gmod.listnamegcom:\n fracsdenmeandarkdfncsubt = sbrtmean['dfncsubt'][0][0][i] / (sbrtmean['dfncsubt'][0][0][i] + sbrtmean['dark'][0][0][i])\n else:\n fracsdenmeandarkdfncsubt = 1.\n setattr(gmodstat, 'fracsdenmeandarkdfncsubten%02d' % i, np.array([fracsdenmeandarkdfncsubt]))\n \n if 'dark' in gmod.listnamegcom:\n booldfncsubt = float(np.where(sbrtmean['dfncsubt'][0][0] > sbrtmean['dark'][0][0])[0].any())\n else:\n booldfncsubt = 1.\n setattr(gmodstat, 'booldfncsubt', np.array([booldfncsubt]))\n\n # find the 1-point function of the count maps of all emission components including the total emission\n for name in gmod.listnamegcom:\n namehistcntp = 'histcntp' + name\n for m in gdat.indxevtt:\n if gdat.numbevtt > 1:\n namehistcntp += 'evt%d' % m\n for i in gdat.indxener: \n if gdat.numbener > 1:\n namehistcntp += 'en%02d' % i\n \n histcntp = np.histogram(cntp[name][i, :, m], bins=gdat.binspara.cntpmodl)[0]\n setattr(gmodstat, namehistcntp, histcntp)\n \n if False and i == 0 and m == 0 and (name == 'dfnc' or name == 'dfncsubt'):\n for strgbins in ['lowr', 'higr']:\n strgtemp = 'histcntp' + strgbins + name + 'en%02devt%d' % (i, m)\n if strgbins == 'lowr':\n setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[:gdat.numbtickcbar-1]))]))\n else:\n setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[gdat.numbtickcbar-1:]))]))\n else:\n histcntp = np.histogram(cntp[name][:, 0, m], bins=gdat.binspara.cntpmodl)[0]\n setattr(gmodstat, 'histcntp' + name + 'evt%d' % m, histcntp)\n\n if gmod.boollens:\n if strgmodl == 'true':\n s2nr = []\n s2nr = cntp['lens'] / np.sqrt(cntp['modl'])\n setattr(gmodstat, 's2nr', s2nr)\n cntplensgrad = np.empty((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt, 2))\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n cntplenstemp = np.zeros(gdat.numbpixlcart)\n cntplenstemp[gdat.indxpixlrofi] = cntp['lens'][i, :, m]\n cntplensgrad[i, :, m, :] = retr_gradmaps(gdat, cntplenstemp) * gdat.sizepixl\n \n cntplensgradmgtd = np.sqrt(np.sum(cntplensgrad**2, axis=3))\n cntplensgrad *= gdat.sizepixl\n indx = np.where(np.fabs(cntplensgrad) > 1. * gdat.sizepixl)\n cntplensgrad[indx] = np.sign(cntplensgrad[indx]) * 1. * gdat.sizepixl\n deflmgtd = np.sqrt(np.sum(defl**2, axis=1))\n setattr(gmodstat, 'deflmgtd', deflmgtd)\n setattr(gmodstat, 'cntplensgrad', cntplensgrad)\n setattr(gmodstat, 'cntplensgradmgtd', cntplensgradmgtd)\n\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n if gmod.boolelemlght[l]:\n #### spectra\n if gdat.boolbinsspat:\n sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]\n gmodstat.dictelem[l]['specplot'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], \\\n curv=gmodstat.dictelem[l]['curv'], expc=gmodstat.dictelem[l]['expc'], \\\n sindcolr=sindcolr, spectype=gmod.spectype[l], plot=True)\n \n if gdat.typedata == 'inpt':\n if gdat.typeexpr == 'ferm':\n # temp\n try:\n gmodstat.dictelem[l]['sbrt0018'] = gdat.sbrt0018objt(gmodstat.dictelem[l]['bgal'], gmodstat.dictelem[l]['lgal'])\n except:\n gmodstat.dictelem[l]['sbrt0018'] = gmodstat.dictelem[l]['bgal'] * 0.\n\n if gmod.typeelem[l] == 'lens':\n #### distance to the source\n if gmod.boollens:\n gmodstat.dictelem[l]['diss'] = retr_angldist(gdat, gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], lgalsour, bgalsour)\n \n if gmod.boollenssubh:\n gmodstat.dictelem[l]['deflprof'] = np.empty((gdat.numbanglfull, gmodstat.numbelem[l]))\n gmodstat.dictelem[l]['mcut'] = np.empty(gmodstat.numbelem[l])\n gmodstat.dictelem[l]['rele'] = np.empty(gmodstat.numbelem[l])\n gmodstat.dictelem[l]['reln'] = np.empty(gmodstat.numbelem[l])\n gmodstat.dictelem[l]['relk'] = np.empty(gmodstat.numbelem[l])\n gmodstat.dictelem[l]['relf'] = np.empty(gmodstat.numbelem[l])\n gmodstat.dictelem[l]['reld'] = np.empty(gmodstat.numbelem[l])\n gmodstat.dictelem[l]['relc'] = np.empty(gmodstat.numbelem[l])\n gmodstat.dictelem[l]['relm'] = np.empty(gmodstat.numbelem[l])\n\n # temp -- this can be placed earlier in the code\n cntplensobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \\\n cntp['lens'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)\n \n for k in np.arange(gmodstat.numbelem[l]):\n \n asca = gmodstat.dictelem[l]['asca'][k]\n acut = gmodstat.dictelem[l]['acut'][k]\n \n #### deflection profiles\n gmodstat.dictelem[l]['deflprof'][:, k] = retr_deflcutf(gdat.meanpara.anglfull, gmodstat.dictelem[l]['defs'][k], asca, acut)\n \n ### truncated mass \n gmodstat.dictelem[l]['mcut'][k] = retr_mcut(gdat, gmodstat.dictelem[l]['defs'][k], asca, acut, adishost, mdencrit)\n\n #### dot product with the source flux gradient\n # temp -- weigh the energy and PSF bins\n gmodstat.dictelem[l]['rele'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \\\n gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)\n \n gmodstat.dictelem[l]['relf'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \\\n gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, cntpmodl=cntp['modl'][0, :, 0])\n \n deflelem = retr_defl(gdat, gdat.indxpixl, gmodstat.dictelem[l]['lgal'][k], \\\n gmodstat.dictelem[l]['bgal'][k], gmodstat.dictelem[l]['defs'][k], asca=asca, acut=acut)\n bgalprim = gdat.bgalgrid - deflelem[:, 1]\n lgalprim = gdat.lgalgrid - deflelem[:, 0]\n gmodstat.dictelem[l]['relm'][k] = np.mean(abs(cntp['lens'][0, :, 0] - cntplensobjt(bgalprim, lgalprim, grid=False).flatten()))\n \n \n gmodstat.dictelem[l]['relk'][k] = gmodstat.dictelem[l]['relm'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl\n gmodstat.dictelem[l]['reln'][k] = gmodstat.dictelem[l]['rele'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl\n gmodstat.dictelem[l]['reld'][k] = retr_rele(gdat, gdat.cntpdata[0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \\\n gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)\n gmodstat.dictelem[l]['relc'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \\\n gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, absv=False) / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl\n \n ### distribution of element parameters and features\n #### calculate the model filter\n listindxelemfilt = [[[] for l in gmod.indxpopl] for namefilt in gdat.listnamefilt]\n for k, namefilt in enumerate(gdat.listnamefilt):\n for l in gmod.indxpopl:\n if namefilt == '':\n listindxelemfilt[k][l] = np.arange(gmodstat.numbelem[l])\n if namefilt == 'imagbndr':\n listindxelemfilt[k][l] = np.where((np.fabs(gmodstat.dictelem[l]['lgal']) < gdat.maxmgangdata) & (np.fabs(gmodstat.dictelem[l]['bgal']) < gdat.maxmgangdata))[0]\n if namefilt == 'deltllik':\n listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['deltllik'] > 0.5 * gmod.numbparagenrelemsing[l])[0]\n if namefilt == 'nrel':\n listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['reln'] > 0.3)[0]\n \n for l in gmod.indxpopl:\n # histograms of element parameters\n for namefrst in gmod.namepara.elem[l]:\n \n ## one dimensional\n if namefrst[:-4] == 'etag':\n continue\n if namefrst == 'specplot' or namefrst == 'deflprof':\n continue\n elif namefrst == 'spec':\n histfrst = np.zeros((gdat.numbbinsplot, gdat.numbener))\n for i in gdat.indxener:\n histfrst[:, i] = np.histogram(gmodstat.dictelem[l]['spec'][i, listindxelemfilt[0][l]], gdat.binspara.spec)[0]\n elif namefrst == 'cnts':\n histfrst = np.histogram(gmodstat.dictelem[l]['cnts'][listindxelemfilt[0][l]], gdat.binspara.cnts)[0]\n else:\n #elif not (namefrst == 'curv' and gmod.spectype[l] != 'curv' or namefrst == 'expc' \\\n # and gmod.spectype[l] != 'expc' or namefrst.startswith('sindarry') and \\\n # gmod.spectype[l] != 'colr'):\n binsfrst = getattr(gdat.binspara, namefrst)\n #if len(gmodstat.dictelem[l][namefrst]) > 0 and len(listindxelemfilt[0][l]) > 0:\n histfrst = np.histogram(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], binsfrst)[0]\n strgvarb = 'hist' + namefrst + 'pop%d' % l\n setattr(gmodstat, strgvarb, histfrst)\n \n #### two dimensional\n for nameseco in gmod.namepara.elem[l]:\n if namefrst == 'spec' or namefrst == 'specplot' or namefrst == 'deflprof' or \\\n nameseco == 'spec' or nameseco == 'specplot' or nameseco == 'deflprof':\n continue\n \n if not checstrgfeat(namefrst, nameseco):\n continue\n\n binsseco = getattr(gdat.binspara, nameseco)\n histtdim = np.histogram2d(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], \\\n gmodstat.dictelem[l][nameseco][listindxelemfilt[0][l]], [binsfrst, binsseco])[0]\n \n setattr(gmodstat, 'hist' + namefrst + nameseco + 'pop%d' % l, histtdim)\n \n ### priors on element parameters and features\n for nameparagenrelem in gmod.namepara.genrelem[l]:\n xdat = gmodstat.dictelem[l][nameparagenrelem]\n minm = getattr(gmod.minmpara, nameparagenrelem + 'pop%d' % l)\n maxm = getattr(gmod.maxmpara, nameparagenrelem + 'pop%d' % l)\n scal = getattr(gmod.scalpara, nameparagenrelem + 'pop%d' % l)\n booltemp = False\n if scal.startswith('expo') or scal.startswith('dexp'):\n if scal.startswith('expo'):\n if scal == 'expo':\n sexp = getattr(gmod, 'gangdistsexppop%d' % l)\n else:\n sexp = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]\n pdfn = pdfn_expo(xdat, maxm, sexp)\n if scal.startswith('dexp'):\n pdfn = pdfn_dnp.exp(xdat, maxm, scal)\n booltemp = True\n if scal.startswith('self') or scal.startswith('logt'):\n if scal.startswith('self'):\n pdfn = 1. / (maxm - minm) + np.zeros_like(xdat)\n else:\n pdfn = 1. / (np.log(maxm) - np.log(minm)) + np.zeros_like(xdat)\n booltemp = True\n # temp \n if scal.startswith('powr'):\n slop = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem + 'pop%d' % l)]\n pdfn = pdfn_powr(xdat, minm, maxm, slop)\n booltemp = True\n if scal.startswith('dpowslopbrek'):\n pdfn = pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr)\n booltemp = True\n if scal == 'lnormeanstdv':\n pdfn = pdfn_lnor(xdat, meanlnor, stdvlnor)\n booltemp = True\n if scal.startswith('igam'):\n cutf = getattr(gdat, 'cutf' + nameparagenrelem)\n pdfn = pdfn_igam(xdat, slop, cutf)\n booltemp = True\n if scal.startswith('gaus'):\n # this does not work for mismodeling\n meanvarb = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]\n stdv = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]\n if nameparagenrelem == 'expc' and gmod.spectype[l] == 'expc':\n pdfn = pdfn_gaus(xdat, meanvarb, stdv)\n else:\n pdfn = pdfn_gaus(xdat, meanvarb, stdv)\n booltemp = True\n \n # temp -- meanelem will not be defined\n #if booltemp:\n # gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'] = gmodstat.numbelem[l] * pdfn * np.interp(xdat, xdatplot, delt)\n \n #setattr(gmodstat, 'hist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])\n #if strgmodl == 'true':\n # setattr(gmodstat, 'refrhist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])\n \n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lens':\n if gmodstat.numbelem[l] > 0:\n ## total truncated mass of the subhalo as a cross check\n # temp -- generalize\n asca = gmodstat.dictelem[l]['asca']\n acut = gmodstat.dictelem[l]['acut']\n factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut) \n masssubh = np.array([np.sum(factmcutfromdefs * gmodstat.dictelem[l]['defs'])])\n \n ## derived variables as a function of other derived variables\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lghtpntspuls'):\n massshel = np.empty(gdat.numbanglhalf)\n for k in gdat.indxanglhalf:\n indxelemshel = np.where((gdat.binspara.anglhalf[k] < gmodstat.dictelem[l]['gang']) & (gmodstat.dictelem[l]['gang'] < gdat.binspara.anglhalf[k+1]))\n massshel[k] = np.sum(gmodstat.dictelem[l]['mass'][indxelemshel])\n setattr(gmodstat, 'massshelpop%d' % l, massshel)\n \n if gmod.boollens or gmod.numbparaelem > 0 and gmod.boollenssubh:\n # find the host, subhalo masses and subhalo mass fraction as a function of halo-centric radius\n listnametemp = gdat.liststrgcalcmasssubh\n listnamevarbmass = []\n listnamevarbmassscal = []\n listnamevarbmassvect = []\n for e in gmod.indxsersfgrd:\n if boolllenshost:\n listnamevarbmassscal += ['masshosttotl']\n for strgtemp in listnametemp:\n listnamevarbmassvect.append('masshostisf%d' % e + strgtemp)\n listnamevarbmassscal.append('masshostisf%d' % e + strgtemp + 'bein')\n if gmod.numbparaelem > 0 and gmod.boollenssubh:\n listnamevarbmassscal.append('masssubhtotl')\n listnamevarbmassscal.append('fracsubhtotl')\n for strgtemp in listnametemp:\n listnamevarbmassvect.append('masssubh' + strgtemp)\n listnamevarbmassvect.append('fracsubh' + strgtemp)\n listnamevarbmassscal.append('masssubh' + strgtemp + 'bein')\n listnamevarbmassscal.append('fracsubh' + strgtemp + 'bein')\n\n for name in listnamevarbmassvect:\n dicttert[name] = np.zeros(gdat.numbanglhalf)\n if 'isf' in name:\n indxisfrtemp = int(name.split('isf')[1][0])\n angl = np.sqrt((gdat.meanpara.lgalcartmesh - lgalhost[indxisfrtemp])**2 + (gdat.meanpara.bgalcartmesh - bgalhost[indxisfrtemp])**2).flatten()\n for k in gdat.indxanglhalf:\n if name[4:8] == 'host':\n convtemp = conv[:]\n if name[4:8] == 'subh':\n convtemp = convelem[:]\n \n if name.endswith('delt'):\n indxpixl = np.where((gdat.binspara.anglhalf[k] < angl) & (angl < gdat.binspara.anglhalf[k+1]))[0]\n dicttert[name][k] = 1e6 * np.sum(convtemp[indxpixl]) * mdencrit * \\\n gdat.apix * adishost**2 / 2. / np.pi * gdat.deltanglhalf[k] / gdat.meanpara.anglhalf[k]\n if name.endswith('intg'):\n indxpixl = np.where(angl < gdat.meanpara.anglhalf[k])[0]\n dicttert[name][k] = np.sum(convtemp[indxpixl]) * mdencrit * gdat.apix * adishost**2\n \n if name[:4] == 'frac':\n masshosttotl = 0.\n for e in gmod.indxsersfgrd:\n masshosttotl += dicttert['masshostisf%d' % e + name[-4:]][k]\n if masshosttotl != 0.:\n dicttert['fracsubh' + name[8:]][k] = dicttert['masssubh' + name[8:]][k] / masshosttotl\n setattr(gmodstat, name, dicttert[name])\n \n # interpolate the host, subhalo masses and subhalo mass fraction at the Einstein radius and save it as a scalar variable\n dicttert[name + 'bein'] = np.interp(beinhost, gdat.meanpara.anglhalf, dicttert[name])\n setattr(gmodstat, name + 'bein', dicttert[name + 'bein'])\n \n #if gmod.numbparaelem > 0:\n # ## copy element parameters to the global object\n # feat = [[] for l in gmod.indxpopl]\n # for l in gmod.indxpopl:\n # feat[l] = dict()\n # for strgfeat in gmod.namepara.genrelem[l]:\n # if strgfeat[:-4] == 'etag':\n # continue\n # if len(gmodstat.dictelem[l][strgfeat]) > 0:\n # if strgmodl == 'true':\n # shap = list(np.ones(gmodstat.dictelem[l][strgfeat].ndim, dtype=int))\n # feat[l][strgfeat] = np.tile(gmodstat.dictelem[l][strgfeat], [3] + shap)\n # if strgmodl == 'fitt':\n # feat[l][strgfeat] = gmodstat.dictelem[l][strgfeat]\n # \n # #for strgfeat in gmod.namepara.elem:\n # # feattemp = [[] for l in gmod.indxpopl]\n # # for l in gmod.indxpopl:\n # # if strgfeat in gmod.namepara.genrelem[l]:\n # # if strgfeat in feat[l]:\n # # feattemp[l] = feat[l][strgfeat]\n # # else:\n # # feattemp[l] = np.array([])\n # # setattr(gmodstat, strgfeat, feattemp)\n \n # copy true state to the reference state\n #if strgmodl == 'true':\n # for name, valu in deepcopy(gdat.__dict__).items():\n # if name.startswith('true'):\n # #indx = name.find('pop')\n # #if indx != -1 and not name.endswith('pop') and name[indx+3].isdigit():\n # # namerefr = name.replace('pop%s' % name[indx+3], 'ref%s' % name[indx+3])\n # #else:\n # # namerefr = name\n # #namerefr = name\n # #namerefr = namerefr.replace('true', 'refr')\n # name = name.replace('true', 'refr')\n # setattr(gdat, name, valu)\n \n if gmod.numbparaelem > 0 and gdat.priofactdoff != 0.:\n if strgmodl == 'true':\n for q in gdat.indxrefr:\n for strgfeat in gdat.refr.namepara.elem[q]:\n \n if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':\n continue\n reca = np.zeros(gdat.numbbinsplot) - 1.\n \n indxelempars = np.where(gmodstat.dictelem[q]['deltllik'] > 2.5)[0]\n \n refrhistpars = np.zeros(gdat.numbbinsplot) - 1.\n \n histparaelem = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)\n indxrefrgood = np.where(histparaelem > 0)[0]\n reca[indxrefrgood] = 0.\n refrhistpars[indxrefrgood] = 0.\n refrhist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)\n\n bins = getattr(gdat.binspara, strgfeat)\n if len(indxelempars) > 0:\n refrhistpars = np.histogram(gmodstat.dictelem[q][strgfeat][indxelempars], bins=bins)[0].astype(float)\n if indxrefrgood.size > 0:\n reca[indxrefrgood] = refrhistpars[indxrefrgood] / refrhist[indxrefrgood]\n \n setattr(gmodstat, 'histpars' + strgfeat + 'pop%d' % q, refrhistpars)\n setattr(gmodstat, 'reca' + strgfeat + 'pop%d' % q, reca)\n \n print('gdat.rtagmock')\n print(gdat.rtagmock)\n if gdat.rtagmock is not None:\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n for strgfeat in gmod.namepara.genrelem[l]:\n if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':# or strgfeat.startswith('aerr'):\n continue\n if strgfeat in gmod.namepara.genrelem[l]:\n hist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % l)\n reca = getattr(gdat.true.this, 'reca' + strgfeat + 'pop%d' % l)\n histcorrreca = hist / reca\n setattr(gmodstat, 'histcorrreca' + strgfeat + 'pop%d' % l, histcorrreca)\n\n ### Exculusive comparison with the true state\n if strgmodl == 'fitt' and gdat.typedata == 'mock':\n if gmod.boollens:\n numbsingcomm = min(deflsing.shape[2], gmod.deflsing.shape[2])\n deflsingresi = deflsing[0, ..., :numbsingcomm] - gmod.deflsing[..., :numbsingcomm]\n deflsingresimgtd = np.sqrt(np.sum(deflsingresi**2, axis=1))\n deflsingresiperc = 100. * deflsingresimgtd / gmod.deflsingmgtd[..., :numbsingcomm]\n setattr(gmodstat, 'numbsingcomm', numbsingcomm)\n setattr(gmodstat, 'deflsingresi', deflsingresi)\n truedeflmgtd = getattr(gdat.true.this, 'deflmgtd')\n truedefl = getattr(gdat.true.this, 'defl')\n deflresi = defl - truedefl\n deflresimgtd = np.sqrt(np.sum(deflresi**2, axis=1))\n deflresiperc = 100. * deflresimgtd / truedeflmgtd\n setattr(gmodstat, 'deflresi', deflresi)\n setattr(gmodstat, 'deflresimgtd', deflresimgtd)\n if gmod.numbparaelem > 0:\n trueconvelem = getattr(gdat.true.this, 'convelem')\n convelemresi = convelem[:] - trueconvelem\n convelemresiperc = 100. * convelemresi / trueconvelem\n setattr(gmodstat, 'convelemresi', convelemresi)\n setattr(gmodstat, 'convelemresiperc', convelemresiperc)\n truemagn = getattr(gdat.true.this, 'magn')\n magnresi = magn[:] - truemagn\n magnresiperc = 100. * magnresi / truemagn\n setattr(gmodstat, 'magnresi', magnresi)\n setattr(gmodstat, 'magnresiperc', magnresiperc)\n \n if gmod.numbparaelem > 0:\n \n # correlate the catalog sample with the reference catalog\n if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:\n \n for q in gdat.indxrefr:\n for l in gmod.indxpopl:\n if gdat.refr.numbelem[q] > 0:\n cmpl = np.array([float(len(indxelemrefrasschits[q][l])) / gdat.refr.numbelem[q]])\n if gdat.booldiagmode:\n if cmpl > 1. or cmpl < 0.:\n raise Exception('')\n else:\n cmpl = np.array([-1.])\n setattr(gmodstat, 'cmplpop%dpop%d' % (l, q), cmpl)\n if gmodstat.numbelem[l] > 0:\n fdis = np.array([float(indxelemfittasscfals[q][l].size) / gmodstat.numbelem[l]])\n if gdat.booldiagmode:\n if fdis > 1. or fdis < 0.:\n raise Exception('')\n else:\n fdis = np.array([-1.])\n setattr(gmodstat, 'fdispop%dpop%d' % (q, l), fdis)\n \n # collect the associated fitting element parameter for each reference element\n featrefrassc = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]\n for q in gdat.indxrefr:\n for l in gmod.indxpopl:\n featrefrassc[q][l] = dict()\n for strgfeat in gdat.refr.namepara.elem[q]:\n if not strgfeat in gmod.namepara.genrelem[l] or strgfeat in gdat.refr.namepara.elemonly[q][l]:\n continue\n if isinstance(gmodstat.dictelem[l][strgfeat], np.ndarray) and gmodstat.dictelem[l][strgfeat].ndim > 1:\n continue\n featrefrassc[q][l][strgfeat] = np.zeros(gdat.refr.numbelem[q]) + np.nan\n if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][strgfeat]) > 0:\n featrefrassc[q][l][strgfeat][indxelemrefrasschits[q][l]] = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]\n name = strgfeat + 'asscpop%dpop%d' % (q, l)\n setattr(gmodstat, name, featrefrassc[q][l][strgfeat])\n \n # completeness\n for q in gdat.indxrefr:\n if gdat.refr.numbelem[q] == 0:\n continue\n \n l = gdat.refr.indxpoplfittassc[q]\n \n for nameparaelemfrst in gdat.refr.namepara.elem[q]:\n \n if nameparaelemfrst.startswith('etag'):\n continue\n \n if nameparaelemfrst == 'spec' or nameparaelemfrst == 'specplot':\n continue\n \n refrfeatfrst = gdat.refr.dictelem[q][nameparaelemfrst][0, :]\n binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)\n \n for nameparaelemseco in gdat.refr.namepara.elem[q]:\n if nameparaelemfrst == nameparaelemseco:\n continue\n \n if nameparaelemseco.startswith('etag'):\n continue\n \n if nameparaelemseco == 'spec' or nameparaelemseco == 'specplot':\n continue\n \n if not checstrgfeat(nameparaelemfrst, nameparaelemseco):\n continue\n \n # temp -- the size of the cmpl np.array should depend on strgmodl\n cmpltdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot)) - 1.\n \n if len(indxelemrefrasschits[q][l]) > 0:\n refrhistfeattdim = getattr(gdat.refr, 'hist%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q))\n refrfeatseco = gdat.refr.dictelem[q][nameparaelemseco][0, :]\n binsfeatseco = getattr(gdat.binspara, nameparaelemseco)\n \n refrhistfeattdimassc = np.histogram2d(refrfeatfrst[indxelemrefrasschits[q][l]], \\\n refrfeatseco[indxelemrefrasschits[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]\n indxgood = np.where(refrhistfeattdim != 0.)\n if indxgood[0].size > 0:\n cmpltdim[indxgood] = refrhistfeattdimassc[indxgood].astype(float) / refrhistfeattdim[indxgood]\n if gdat.booldiagmode:\n if np.where((cmpltdim[indxgood] > 1.) | (cmpltdim[indxgood] < 0.))[0].size > 0:\n raise Exception('')\n setattr(gmodstat, 'cmpl%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q), cmpltdim)\n\n cmplfrst = np.zeros(gdat.numbbinsplot) - 1.\n if len(indxelemrefrasschits[q][l]) > 0:\n refrhistfeatfrst = getattr(gdat.refr, 'hist' + nameparaelemfrst + 'pop%d' % q)\n binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)\n refrhistfeatfrstassc = np.histogram(refrfeatfrst[indxelemrefrasschits[q][l]], bins=binsfeatfrst)[0]\n indxgood = np.where(refrhistfeatfrst != 0.)[0]\n if indxgood.size > 0:\n cmplfrst[indxgood] = refrhistfeatfrstassc[indxgood].astype(float) / refrhistfeatfrst[indxgood]\n if gdat.booldiagmode:\n if np.where((cmplfrst[indxgood] > 1.) | (cmplfrst[indxgood] < 0.))[0].size > 0:\n raise Exception('')\n \n setattr(gmodstat, 'cmpl%spop%d' % (nameparaelemfrst, q), cmplfrst)\n \n # false discovery rate\n for l in gmod.indxpopl:\n q = gmod.indxpoplrefrassc[l]\n \n for nameparaelemfrst in gmod.namepara.elem[l]:\n \n binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)\n for nameparaelemseco in gmod.namepara.elem[l]:\n \n if not checstrgfeat(nameparaelemfrst, nameparaelemseco):\n continue\n \n # temp -- the size of the fdis np.array should depend on strgmodl\n fdistdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot))\n \n if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemseco]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0: \n strgfeattdim = nameparaelemfrst + nameparaelemseco + 'pop%d' % l\n fitthistfeattdim = getattr(gmodstat, 'hist' + strgfeattdim)\n binsfeatseco = getattr(gdat.binspara, nameparaelemseco)\n \n fitthistfeattdimfals = np.histogram2d(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], \\\n gmodstat.dictelem[l][nameparaelemseco][indxelemfittasscfals[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]\n indxgood = np.where(fitthistfeattdim != 0.)\n if indxgood[0].size > 0:\n fdistdim[indxgood] = fitthistfeattdimfals[indxgood].astype(float) / fitthistfeattdim[indxgood]\n if gdat.booldiagmode:\n if np.where((fdistdim[indxgood] > 1.) | (fdistdim[indxgood] < 0.))[0].size > 0:\n raise Exception('')\n \n setattr(gmodstat, 'fdis%s%spop%d' % (nameparaelemfrst, nameparaelemseco, l), fdistdim)\n \n fdisfrst = np.zeros(gdat.numbbinsplot)\n if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0:\n binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)\n fitthistfeatfrstfals = np.histogram(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], bins=binsfeatfrst)[0]\n fitthistfeatfrst = getattr(gmodstat, 'hist' + nameparaelemfrst + 'pop%d' % l)\n indxgood = np.where(fitthistfeatfrst != 0.)[0]\n if indxgood.size > 0:\n fdisfrst[indxgood] = fitthistfeatfrstfals[indxgood].astype(float) / fitthistfeatfrst[indxgood]\n if gdat.booldiagmode:\n if np.where((fdisfrst[indxgood] > 1.) | (fdisfrst[indxgood] < 0.))[0].size > 0:\n raise Exception('')\n \n setattr(gmodstat, 'fdis%spop%d' % (nameparaelemfrst, l), fdisfrst)\n \n # temp\n if strgmodl == 'true' and gdat.typeverb > 0:\n for l in gmod.indxpopl:\n for strgfeat in gmod.namepara.genrelem[l]:\n minm = getattr(gmod.minmpara, strgfeat)\n maxm = getattr(gmod.maxmpara, strgfeat)\n if np.where(minm > gmodstat.dictelem[l][strgfeat])[0].size > 0 or np.where(maxm < gmodstat.dictelem[l][strgfeat])[0].size > 0:\n print('Warning: element parameter outside the plot limits.')\n print('l')\n print(l)\n print('Feature: ')\n print(strgfeat)\n print('Plot minmimum')\n print(minm)\n print('Plot maxmimum')\n print(maxm)\n if strgfeat == gmod.nameparagenrelemampl[l] and strgfeat in gmod.namepara.genrelem[l]:\n gmod.indxparagenrelemtemp = gmod.namepara.genrelem[l].index(strgfeat)\n if (gmod.listscalparagenrelem[l][gmod.indxparagenrelemtemp] != 'gaus' and not gmod.listscalparagenrelem[l][gmod.indxparagenrelemtemp].startswith('lnor')):\n raise Exception('')\n stopchro(gdat, gdatmodi, 'tert')\n \n \ndef retr_lprielem(gdat, strgmodl, l, g, strgfeat, strgpdfn, paragenrscalfull, dictelem, numbelem):\n \n gmod = getattr(gdat, strgmodl)\n \n if strgpdfn == 'self':\n minmfeat = getattr(gmod.minmpara, strgfeat)\n maxmfeat = getattr(gmod.maxmpara, strgfeat)\n lpri = numbelem[l] * np.log(1. / (maxmfeat - minmfeat))\n if strgpdfn == 'logt':\n lpri = retr_lprilogtdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)\n if strgpdfn == 'gaus':\n lpri = retr_lprigausdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)\n if strgpdfn == 'dexp':\n maxmbgal = getattr(gmod, 'maxmbgal')\n gmod.indxpara.bgaldistscal = getattr(gmod.indxpara, 'bgaldistscalpop%d' % l)\n lpri = np.sum(np.log(pdfn_dnp.exp(dictelem[l]['bgal'], maxmbgal, paragenrscalfull[gmod.indxpara.bgaldistscal]))) \n if strgpdfn == 'expo':\n maxmgang = getattr(gmod, 'maxmgang')\n gang = retr_gang(dictelem[l]['lgal'], dictelem[l]['bgal'])\n gmod.indxpara.gangdistscal = getattr(gmod.indxpara, 'gangdistscalpop%d' % l)\n lpri = np.sum(np.log(pdfn_expo(gang, maxmgang, paragenrscalfull[gmod.indxpara.gangdistscal]))) \n lpri = -numbelem[l] * np.log(2. * pi) \n if strgpdfn == 'tmpl':\n lpri = np.sum(lpdfspatprioobjt(dictelem[l]['bgal'], dictelem[l]['lgal'], grid=False))\n if strgpdfn == 'powr':\n lpri = retr_lpripowrdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)\n if strgpdfn == 'dpowslopbrek':\n lpri = retr_lpridpowdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)\n if strgpdfn == 'dsrcexpo':\n lpri += -np.sum(np.sqrt((dictelem[l]['lgal'] - lgalsour)**2 + (dictelem[l]['bgal'] - bgalsour)**2) / \\\n getattr(gmod, 'dsrcdistsexppop%d' % l))\n if strgpdfn == 'tmpl':\n if strgpdfn.endswith('cons'):\n pdfnspatpriotemp = getattr(gmod, 'pdfnspatpriotemp')\n spatdistcons = paragenrscalfull[getattr(gmod.indxpara, 'spatdistcons')]\n lpdfspatprio, lpdfspatprioobjt = retr_spatprio(gdat, pdfnspatpriotemp, spatdistcons)\n lpdfspatpriointp = lpdfspatprioobjt(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart)\n lpdfspatpriointp = lpdfspatpriointp.T\n setattr(gmodstat, 'lpdfspatpriointp', lpdfspatpriointp)\n setattr(gmodstat, 'lpdfspatprioobjt', lpdfspatprioobjt)\n else:\n lpdfspatprioobjt = gmod.lpdfspatprioobjt\n \n return lpri\n\n\ndef checstrgfeat(strgfrst, strgseco):\n\n numbfrst = len(strgfrst)\n numbseco = len(strgseco)\n numb = min(numbfrst, numbseco)\n if strgfrst[:numb] < strgseco[:numb]:\n booltemp = True\n elif strgfrst[:numb] == strgseco[:numb]:\n if numbfrst >= numbseco:\n booltemp = False\n else:\n booltemp = True\n else:\n booltemp = False\n\n return booltemp\n\n\ndef retr_pathoutprtag(pathpcat, rtag):\n \n pathoutprtag = pathpcat + '/data/outp/' + rtag + '/'\n \n return pathoutprtag\n\n\ndef proc_finl(gdat=None, rtag=None, strgpdfn='post', listnamevarbproc=None, forcplot=False):\n \n gdatmock = None\n \n print('proc_finl()')\n\n if rtag is None:\n rtag = gdat.rtag\n \n # determine if the final-processing if nominal or tiling\n if isinstance(rtag, list):\n listrtagmodi = rtag\n rtagfinl = tdpy.retr_strgtimestmp() + rtag[0][15:] + 'tile'\n booltile = True\n else:\n listrtagmodi = [rtag]\n rtagfinl = rtag\n booltile = False\n \n # determine of the gdatfinl object is available \n boolgdatfinl = chec_statfile(pathpcat, rtagfinl, 'gdatfinlpost')\n boolgdatfinlgood = False\n if boolgdatfinl:\n print('Final-processing has been performed previously.')\n pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)\n path = pathoutprtag + 'gdatfinl' + strgpdfn\n try:\n gdat = readfile(path) \n boolgdatfinlgood = True\n except:\n print('gdatfinl object is corrupted.')\n\n if boolgdatfinl and boolgdatfinlgood:\n # read gdatfinl\n pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)\n path = pathoutprtag + 'gdatfinl' + strgpdfn\n gdatfinl = readfile(path) \n \n if gdatfinl.fitt.numbparaelem > 0:\n if gdatfinl.typedata == 'inpt':\n if gdatfinl.boolcrex or gdatfinl.boolcrin:\n if gdatfinl.rtagmock is not None:\n path = gdatfinl.pathoutprtagmock + 'gdatfinlpost'\n gdatmock = readfile(path)\n \n else:\n \n if booltile:\n gdatfinltile = tdpy.gdatstrt()\n \n indxrtaggood = []\n liststrgtile = []\n listrtaggood = []\n indxtiletemp = 0\n for n, rtagmodi in enumerate(listrtagmodi):\n \n # read gdatinit\n boolgdatinit = chec_statfile(pathpcat, rtagmodi, 'gdatinit')\n if not boolgdatinit:\n if booltile:\n print('Initial global object not found. Skipping...')\n continue\n else:\n print('Initial global object not found. Quitting...')\n return\n \n pathoutprtag = retr_pathoutprtag(pathpcat, rtagmodi)\n path = pathoutprtag + 'gdatinit'\n \n gdatinit = readfile(path) \n if booltile:\n gdatfinltile = gdatinit\n gdatfinl = gdatinit\n else:\n gdatfinl = gdatinit\n\n pathoutprtagmodi = retr_pathoutprtag(pathpcat, rtagmodi)\n listgdatmodi = []\n for k in gdatinit.indxproc:\n path = pathoutprtagmodi + 'gdatmodi%04d' % k + strgpdfn\n listgdatmodi.append(readfile(path))\n \n # erase\n gdatdictcopy = deepcopy(gdatinit.__dict__)\n for strg, valu in gdatdictcopy.items():\n if strg.startswith('fitt.indxpara.'):\n delattr(gdatinit, strg)\n\n if gdatinit.boolmockonly:\n print('Mock only run. Quitting final-processing...')\n return\n\n # read gdatmodi\n print('rtagmodi')\n print(rtagmodi)\n boolgdatmodi = chec_statfile(pathpcat, rtagmodi, 'gdatmodipost')\n if not boolgdatmodi:\n print('Modified global object not found. Quitting final-processing...')\n return\n \n ## list of other parameters to be flattened\n gdatinit.liststrgvarbarryflat = deepcopy(listgdatmodi[0].liststrgvarbarry)\n # temp\n #for strg in ['memoresi']:\n # gdatinit.liststrgvarbarryflat.remove(strg)\n \n listparagenrscalfull = np.empty((gdatinit.numbsamptotl, gdatinit.fitt.maxmnumbpara))\n \n if booltile:\n gdatfinltile.pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)\n numbsamptotlrsmp = gdatinit.numbsamptotl\n indxsamptotlrsmp = np.random.choice(gdatinit.indxsamptotl, size=gdatinit.numbsamptotl, replace=False)\n \n # aggregate samples from the chains\n if gdatinit.typeverb > 0:\n print('Reading gdatmodi objects from all processes...')\n timeinit = gdatinit.functime()\n \n if gdatinit.typeverb > 0:\n timefinl = gdatinit.functime()\n print('Done in %.3g seconds.' % (timefinl - timeinit))\n \n if gdatinit.fitt.numbparaelem > 0:\n if len(getattr(listgdatmodi[0], 'list' + strgpdfn + 'gmodstat.indxelemfull')) == 0:\n print('Found an empty element list. Skipping...')\n continue\n \n if gdatinit.typeverb > 0:\n print('Accumulating np.arrays...')\n timeinit = gdatinit.functime()\n \n for strgvarb in gdatinit.liststrgvarbarryflat:\n for k in gdatinit.indxproc:\n if k == 0:\n shap = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb).shape\n shap = [shap[0], gdatinit.numbproc] + list(shap[1:])\n temp = np.zeros(shap) - 1\n if len(shap) > 2:\n temp[:, k, :] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)\n else:\n temp[:, k] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)\n setattr(gdatfinl, 'list' + strgpdfn + strgvarb, temp)\n \n if gdatfinl.typeverb > 0:\n timefinl = gdatfinl.functime()\n print('Done in %.3g seconds.' % (timefinl - timeinit))\n \n if gdatfinl.typeverb > 0:\n print('Accumulating lists...')\n timeinit = gdatfinl.functime()\n \n # lists of lists collected at each sample\n for strgvarb in listgdatmodi[0].liststrgvarblistsamp:\n listtemp = [[[] for k in gdatfinl.indxproc] for j in gdatfinl.indxsamp]\n for j in gdatfinl.indxsamp: \n for k in gdatfinl.indxproc:\n listtemp[j][k] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)[j]\n setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listtemp)\n \n if gdatfinl.typeverb > 0:\n timefinl = gdatfinl.functime()\n print('Done in %.3g seconds.' % (timefinl - timeinit))\n \n if not booltile:\n ## np.maximum likelihood sample \n gdatfinl.maxmllikproc = np.empty(gdatfinl.numbproc)\n gdatfinl.indxswepmaxmllikproc = np.empty(gdatfinl.numbproc, dtype=int)\n gdatfinl.sampmaxmllikproc = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara))\n for k in gdatfinl.indxproc:\n gdatfinl.maxmllikproc[k] = listgdatmodi[k].maxmllikswep\n gdatfinl.indxswepmaxmllikproc[k] = listgdatmodi[k].indxswepmaxmllik\n gdatfinl.sampmaxmllikproc[k, :] = listgdatmodi[k].sampmaxmllik\n \n listparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')\n listparagenrunitfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrunitfull')\n\n # Gelman-Rubin test\n if gdatfinl.numbproc > 1:\n if gdatfinl.typeverb > 0:\n print('Computing the Gelman-Rubin TS...')\n timeinit = gdatfinl.functime()\n gdatfinl.gmrbparagenrscalbase = np.zeros(gdatfinl.fitt.numbparagenrbase)\n gdatfinl.gmrbstat = np.zeros((gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt))\n for k in gdatfinl.fitt.indxparagenrbase:\n gdatfinl.gmrbparagenrscalbase[k] = tdpy.mcmc.gmrb_test(listparagenrscalfull[:, :, k])\n if not np.isfinite(gdatfinl.gmrbparagenrscalbase[k]):\n gdatfinl.gmrbparagenrscalbase[k] = 0.\n listcntpmodl = getattr(gdatfinl, 'list' + strgpdfn + 'cntpmodl')\n for i in gdatfinl.indxener:\n for j in gdatfinl.indxpixl:\n for m in gdatfinl.indxevtt:\n gdatfinl.gmrbstat[i, j, m] = tdpy.mcmc.gmrb_test(listcntpmodl[:, :, i, j, m])\n if gdatfinl.typeverb > 0:\n timefinl = gdatfinl.functime()\n print('Done in %.3g seconds.' % (timefinl - timeinit))\n\n # calculate the autocorrelation of the chains\n if gdatfinl.typeverb > 0:\n print('Computing the autocorrelation of the chains...')\n timeinit = gdatfinl.functime()\n gdatfinl.atcrcntp = np.empty((gdatfinl.numbproc, gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt, int(gdatfinl.numbparagenrfull / 2)))\n gdatfinl.timeatcrcntp = np.empty((gdatfinl.numbproc, gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt))\n gdatfinl.atcrpara = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara, int(gdatfinl.numbparagenrfull / 2)))\n gdatfinl.timeatcrpara = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara))\n for k in gdatfinl.indxproc:\n gdatfinl.atcrpara[k, :, :], gdatfinl.timeatcrpara[k, :] = tdpy.mcmc.retr_timeatcr(listparagenrscalfull[:, k, :], typeverb=gdatfinl.typeverb)\n listcntpmodl = getattr(gdatfinl, 'list' + strgpdfn + 'cntpmodl')\n gdatfinl.atcrcntp[k, :], gdatfinl.timeatcrcntp[k, :] = tdpy.mcmc.retr_timeatcr(listcntpmodl[:, k, :, :, :], typeverb=gdatfinl.typeverb)\n timeatcrcntpmaxm = np.amax(gdatfinl.timeatcrcntp)\n gdatfinl.timeatcrcntpmaxm = np.amax(timeatcrcntpmaxm)\n \n if gdatfinl.typeverb > 0:\n timefinl = gdatfinl.functime()\n print('Done in %.3g seconds.' % (timefinl - timeinit))\n \n setattr(gdatfinl, 'list' + strgpdfn + 'sampproc', np.copy(getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')))\n\n # flatten the list chains from different walkers\n for strgvarb in listgdatmodi[0].liststrgvarblistsamp:\n listtemp = []\n listinpt = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)\n for j in gdatfinl.indxsamp: \n for k in gdatfinl.indxproc:\n listtemp.append(listinpt[j][k])\n setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listtemp)\n \n # flatten the np.array chains from different walkers\n for strgvarb in gdatinit.liststrgvarbarryflat:\n inpt = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)\n shap = [inpt.shape[0] * inpt.shape[1]] + list(inpt.shape[2:])\n setattr(gdatfinl, 'list' + strgpdfn + strgvarb, inpt.reshape(shap))\n listparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')\n listparagenrunitfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrunitfull')\n \n if booltile:\n\n liststrgtile.append(rtagmodi.split('_')[-2][-4:])\n listrtaggood.append(rtagmodi)\n indxrtaggood.append(n)\n indxtiletemp += 1\n \n if len(liststrgtile) == 1:\n for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:\n refrfeattile = [[] for q in gdatfinl.indxrefr]\n setattr(gdatfinl, 'refr' + strgfeat, refrfeattile)\n \n for strgvarb in gdatfinl.liststrgvarbarrysamp:\n if not strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:\n listvarb = []\n setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listvarb)\n else:\n hist = np.zeros_like(getattr(listgdatmodi[0], 'list' + strgpdfn + strgvarb))\n setattr(gdatfinl, 'list' + strgpdfn + strgvarb, hist)\n \n for name, valu in gdatfinl.__dict__.items():\n if name.startswith('refrhist'):\n setattr(gdatfinl, name, np.zeros_like(getattr(gdatfinl, name)))\n \n #for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:\n # refrfeattile = getattr(gdatfinl, 'refr' + strgfeat)\n # #refrfeat = getattr(gdatfinl, 'refr' + strgfeat)\n # refrfeat = [[] for q in gdatfinl.indxrefr]\n # for q in gdatfinl.indxrefr:\n # if strgfeat in gdatfinl.refrgmod.namepara.genrelem[q]:\n # refrfeat[q].append(refrfeattile[q])\n \n for strgvarb in gdatfinl.liststrgvarbarrysamp:\n if strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:\n # temp\n if 'spec' in strgvarb:\n continue\n hist = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)\n hist += getattr(gdatfinl, 'list' + strgpdfn + strgvarb)\n \n for name, valu in gdatfinl.__dict__.items():\n if name.startswith('refrhist'):\n hist = getattr(gdatfinl, name)\n hist += getattr(gdatfinl, name)\n\n print('Done with the tile number %d, run number %d...' % (indxtiletemp, n))\n \n if booltile:\n gdatfinl.pathplotrtag = gdatfinl.pathimag + rtagfinl + '/'\n make_fold(gdatfinl)\n indxrtaggood = np.array(indxrtaggood).astype(int)\n numbrtaggood = indxrtaggood.size\n numbtile = numbrtaggood\n print('Found %d tiles with run tags:' % numbrtaggood)\n for indxrtaggoodtemp in indxrtaggood:\n print(rtag[indxrtaggoodtemp])\n\n # np.concatenate reference elements from different tiles\n #for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:\n # refrfeat = getattr(gdatfinl, 'refr' + strgfeat, refrfeat)\n # for q in gdatfinl.indxrefr:\n # if strgfeat in gdatfinl.refrgmod.namepara.genrelem[q]:\n # refrfeat[q] = np.concatenate(refrfeat[q], axis=1)\n \n for strgvarb in gdatfinl.liststrgvarbarrysamp:\n if not strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:\n listvarb = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)\n if 'assc' in strgvarb:\n numbrefrelemtotl = 0\n for k, varbrsmp in enumerate(listvarb):\n numbrefrelemtotl += varbrsmp.shape[1]\n shap = [gdatfinl.numbsamptotl, numbrefrelemtotl]\n listvarbtemp = np.empty(shap)\n cntr = 0\n for k, varb in enumerate(listvarb):\n listvarbtemp[:, cntr:cntr+varb.shape[1]] = varb\n cntr += varb.shape[1]\n else:\n shap = [gdatfinl.numbsamptotl * numbtile] + list(listvarb[0].shape[1:])\n listvarbtemp = np.empty(shap)\n for k, varb in enumerate(listvarb):\n listvarbtemp[k*gdatfinl.numbsamptotl:(k+1)*gdatfinl.numbsamptotl, ...] = varb\n setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listvarbtemp)\n else:\n # np.maximum likelihood sample\n if gdatfinl.fitt.numbparaelem > 0:\n listindxelemfull = getattr(gdatfinl, 'list' + strgpdfn + 'indxelemfull')\n listllik = getattr(gdatfinl, 'list' + strgpdfn + 'llik')\n listlliktotl = getattr(gdatfinl, 'list' + strgpdfn + 'lliktotl')\n indxsamptotlmlik = np.argmax(np.sum(np.sum(np.sum(listllik, 3), 2), 1))\n \n # copy the np.maximum likelihood sample\n for strgvarb in listgdatmodi[0].liststrgvarbarrysamp:\n setattr(gdatfinl, 'mlik' + strgvarb, getattr(gdatfinl, 'list' + strgpdfn + strgvarb)[indxsamptotlmlik, ...])\n for strgvarb in listgdatmodi[0].liststrgvarblistsamp:\n setattr(gdatfinl, 'mlik' + strgvarb, getattr(gdatfinl, 'list' + strgpdfn + strgvarb)[indxsamptotlmlik])\n\n # temp -- dont gdatfinl.listllik and gdatfinl.listparagenrscalfull have the same dimensions?\n gdatfinl.mlikparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')[indxsamptotlmlik, :]\n gdatfinl.mlikparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')[indxsamptotlmlik, :]\n #if gdatfinl.fitt.numbparaelem > 0:\n # gdatfinl.mlikindxelemfull = listindxelemfull[indxsamptotlmlik]\n gdatfinl.mlikparagenrscalbase = gdatfinl.mlikparagenrscalfull[gdatfinl.fitt.indxparagenrbase]\n for k, gmod.nameparagenrbase in enumerate(gdatfinl.fitt.nameparagenrbase):\n setattr(gdatfinl, 'mlik' + gmod.nameparagenrbase, gdatfinl.mlikparagenrscalbase[k])\n\n # add execution times to the chain output\n gdatfinl.timereal = np.zeros(gdatfinl.numbproc)\n gdatfinl.timeproc = np.zeros(gdatfinl.numbproc)\n for k in gdatfinl.indxproc:\n gdatfinl.timereal[k] = listgdatmodi[k].timereal\n gdatfinl.timeproc[k] = listgdatmodi[k].timeproc\n \n # find the np.maximum likelihood and posterior over the chains\n gdatfinl.indxprocmaxmllik = np.argmax(gdatfinl.maxmllikproc)\n #gdatfinl.maxmlliktotl = gdatfinl.maxmllikproc[gdatfinl.indxprocmaxmllik]\n gdatfinl.indxswepmaxmllik = gdatfinl.indxprocmaxmllik * gdatfinl.numbparagenrfull + gdatfinl.indxswepmaxmllikproc[gdatfinl.indxprocmaxmllik]\n gdatfinl.sampmaxmllik = gdatfinl.sampmaxmllikproc[gdatfinl.indxprocmaxmllik, :]\n \n if strgpdfn == 'post':\n levipost = retr_levipost(listlliktotl)\n setattr(gdatfinl, strgpdfn + 'levipost', levipost)\n \n if strgpdfn == 'prio':\n leviprio = np.log(np.mean(np.exp(listlliktotl)))\n setattr(gdatfinl, strgpdfn + 'leviprio', leviprio)\n \n # parse the sample vector\n listparagenrscalbase = listparagenrscalfull[:, gdatfinl.fitt.indxparagenrbase]\n for k, gmod.nameparagenrbase in enumerate(gdatfinl.fitt.nameparagenrbase):\n setattr(gdatfinl, 'list' + strgpdfn + gmod.nameparagenrbase, listparagenrscalbase[:, k])\n setattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalbase', listparagenrscalbase)\n\n if strgpdfn == 'post' and gdatfinl.checprio:\n pathoutprtag = retr_pathoutprtag(pathpcat, rtag)\n path = pathoutprtag + 'gdatfinlprio'\n try:\n gdatprio = readfile(path)\n except:\n proc_finl(gdat=gdatfinl, strgpdfn='prio', listnamevarbproc=listnamevarbproc, forcplot=forcplot)\n else:\n gdatprio = None\n \n # post process samples\n ## bin element parameters\n if gdatfinl.typeverb > 0:\n print('Binning the probabilistic catalog spatially...')\n timeinit = gdatfinl.functime()\n \n if not booltile:\n if gdatfinl.fitt.numbparaelem > 0:\n if gdatfinl.boolbinsspat:\n histlgalbgalelemstkd = [[] for l in gdatfinl.fittindxpopl]\n \n listlgal = getattr(gdatfinl, 'list' + strgpdfn + 'lgal')\n listbgal = getattr(gdatfinl, 'list' + strgpdfn + 'bgal')\n for l in gdatfinl.fittindxpopl:\n if gdatfinl.fitttypeelem[l] != 'lghtline':\n histlgalbgalelemstkd[l] = np.zeros((gdatfinl.numbbgalpntsprob, gdatfinl.numblgalpntsprob, gdatfinl.numbbinsplot, numb))\n temparry = np.concatenate([listlgal[n][l] for n in gdatfinl.indxsamptotl])\n temp = np.empty((len(temparry), 3))\n temp[:, 0] = temparry\n temp[:, 1] = np.concatenate([listbgal[n][l] for n in gdatfinl.indxsamptotl])\n temp[:, 2] = np.concatenate([getattr(gdatfinl, 'list' + strgpdfn + strgfeat)[n][l] for n in gdatfinl.indxsamptotl])\n bins = getattr(gdatfinl, 'bins' + strgfeat)\n histlgalbgalelemstkd[l][:, :, :, k] = np.histogramdd(temp, \\\n bins=(gdatfinl.binslgalpntsprob, gdatfinl.binsbgalpntsprob, bins))[0]\n setattr(gdatfinl, strgpdfn + 'histlgalbgalelemstkd', histlgalbgalelemstkd)\n\n if gdatfinl.typeverb > 0:\n timefinl = gdatfinl.functime()\n print('Done in %.3g seconds.' % (timefinl - timeinit))\n\n ## construct a condensed catalog of elements\n if gdatfinl.boolcondcatl and gdatfinl.fitt.numbparaelem > 0:\n \n if gdatfinl.typeverb > 0:\n print('Constructing a condensed catalog...')\n timeinit = gdatfinl.functime()\n \n retr_condcatl(gdatfinl)\n \n if gdatfinl.typeverb > 0:\n timefinl = gdatfinl.functime()\n print('Done in %.3g seconds.' % (timefinl - timeinit))\n\n # construct lists of samples for each proposal type\n listindxproptype = getattr(gdatfinl, 'list' + strgpdfn + 'indxproptype')\n listboolpropaccp = getattr(gdatfinl, 'list' + strgpdfn + 'boolpropaccp')\n listboolpropfilt = getattr(gdatfinl, 'list' + strgpdfn + 'boolpropfilt')\n listindxsamptotlproptotl = []\n listindxsamptotlpropfilt = []\n listindxsamptotlpropaccp = []\n listindxsamptotlpropreje = []\n for n in gdatfinl.indxproptype:\n indxsampproptype = np.where(listindxproptype == gdatfinl.indxproptype[n])[0]\n listindxsamptotlproptotl.append(indxsampproptype)\n listindxsamptotlpropaccp.append(np.intersect1d(indxsampproptype, np.where(listboolpropaccp)[0]))\n listindxsamptotlpropfilt.append(np.intersect1d(indxsampproptype, np.where(listboolpropfilt)[0]))\n listindxsamptotlpropreje.append(np.intersect1d(indxsampproptype, np.where(np.logical_not(listboolpropaccp))[0]))\n if listindxsamptotlproptotl[n].size == 0:\n accp = 0.\n else:\n accp = float(listindxsamptotlpropaccp[n].size) / listindxsamptotlproptotl[n].size\n setattr(gdatfinl, 'accp' + gdatfinl.nameproptype[n], accp)\n\n setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlproptotl', listindxsamptotlproptotl)\n setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlpropaccp', listindxsamptotlpropaccp)\n setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlpropreje', listindxsamptotlpropreje)\n \n if gdatfinl.fitt.numbparaelem > 0 and strgpdfn == 'post':\n if gdatfinl.typedata == 'inpt':\n if gdatfinl.boolcrex or gdatfinl.boolcrin:\n if gdatfinl.rtagmock is not None:\n path = gdatfinl.pathoutprtagmock + 'gdatfinlpost'\n gdatmock = readfile(path)\n \n # posterior corrections\n if gdatfinl.fitt.numbparaelem > 0 and strgpdfn == 'post':\n\n ## perform corrections\n if gdatfinl.typedata == 'inpt':\n if gdatfinl.boolcrex or gdatfinl.boolcrin:\n\n for gmod.namepara.genrelemvarbhist in gdatfinl.liststrgvarbhist:\n strgvarb = gmod.namepara.genrelemvarbhist[0]\n\n if gmod.namepara.genrelemvarbhist[1].startswith('aerr') or len(gmod.namepara.genrelemvarbhist[2]) > 0 and gmod.namepara.genrelemvarbhist[2].startswith('aerr'):\n continue\n if gmod.namepara.genrelemvarbhist[1] == 'spec' or gmod.namepara.genrelemvarbhist[1] == 'deflprof' or gmod.namepara.genrelemvarbhist[1] == 'specplot':\n continue\n if len(gmod.namepara.genrelemvarbhist[2]) > 0 and (gmod.namepara.genrelemvarbhist[2] == 'spec' or \\\n gmod.namepara.genrelemvarbhist[2] == 'deflprof' or gmod.namepara.genrelemvarbhist[2] == 'specplot'):\n continue\n \n ## internal correction\n listhist = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)\n \n for qq in gdatmock.indxrefr:\n l = int(gmod.namepara.genrelemvarbhist[3][qq].split('pop')[1][0])\n qq = int(gmod.namepara.genrelemvarbhist[3][qq].split('pop')[2][0])\n if gmod.namepara.genrelemvarbhist[1][-4:] in gdatfinl.listnamerefr and \\\n (len(gmod.namepara.genrelemvarbhist[2]) == 0 or gmod.namepara.genrelemvarbhist[2][-4:] in gdatfinl.listnamerefr):\n listhistincr = listhist\n else:\n if gmod.namepara.genrelemvarbhist[1][-4:] in gdatfinl.listnamerefr and len(gmod.namepara.genrelemvarbhist[2]) > 0:\n listcmpltrue = np.stack(gdatfinl.numbbinsplot * \\\n [getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[2] + 'pop%dpop%d' % (l, qq))], 2)\n listfdistrue = np.stack(gdatfinl.numbbinsplot * \\\n [getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[2] + 'pop%dpop%d' % (qq, l))], 2)\n elif len(gmod.namepara.genrelemvarbhist[2][:-4]) > 0 and gmod.namepara.genrelemvarbhist[2][-4:] in gdatfinl.listnamerefr:\n listcmpltrue = np.stack(gdatfinl.numbbinsplot * \\\n [getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[1] + 'pop%dpop%d' % (l, qq))], 1)\n listfdistrue = np.stack(gdatfinl.numbbinsplot * \\\n [getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[1] + 'pop%dpop%d' % (qq, l))], 1)\n else:\n listcmpltrue = getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[3][qq])\n listfdistrue = getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[3][qq])\n if len(gmod.namepara.genrelemvarbhist[2]) == 0:\n listcmplboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))\n listfdisboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))\n listhistboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))\n for k in gdatfinl.indxbinsplot:\n listcmplboot[:, k] = np.random.choice(listcmpltrue[:, k], size=gdatfinl.numbsampboot)\n listfdisboot[:, k] = np.random.choice(listfdistrue[:, k], size=gdatfinl.numbsampboot)\n listhistboot[:, k] = np.random.choice(listhist[:, k], size=gdatfinl.numbsampboot)\n else:\n listcmplboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))\n listfdisboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))\n listhistboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))\n for a in gdatfinl.indxbinsplot:\n for b in gdatfinl.indxbinsplot:\n listcmplboot[:, a, b] = np.random.choice(listcmpltrue[:, a, b], size=gdatfinl.numbsampboot)\n listfdisboot[:, a, b] = np.random.choice(listfdistrue[:, a, b], size=gdatfinl.numbsampboot)\n listhistboot[:, a, b] = np.random.choice(listhist[:, a, b], size=gdatfinl.numbsampboot)\n indxbadd = np.where(listcmplboot == -1)\n indxbaddzero = np.where(listcmplboot == 0.)\n listhistincr = listhistboot / listcmplboot * (1. - listfdisboot)\n listhistincr[indxbadd] = -1.5\n listhistincr[indxbaddzero] = 1.5\n \n listgdatmodi[0].liststrgchan += ['incr' + gmod.namepara.genrelemvarbhist[4][qq]]\n setattr(gdatfinl, 'listpostincr' + gmod.namepara.genrelemvarbhist[4][qq], listhistincr)\n \n ## external correction\n for q in gdatfinl.indxrefr:\n nametemp = gmod.namepara.genrelemvarbhist[1] \n if len(gmod.namepara.genrelemvarbhist[2]) > 0:\n nametemp += gmod.namepara.genrelemvarbhist[2]\n nametemp += 'pop%dpop%dpop%d' % (q, qq, l)\n crexhist = getattr(gdatfinl, 'crex' + nametemp)\n if crexhist is not None:\n \n listhistexcr = listhistincr * crexhist \n \n if crexhist.ndim == 1 and listhistincr.ndim == 3:\n raise Exception('')\n \n listgdatmodi[0].liststrgchan += ['excr' + nametemp]\n setattr(gdatfinl, 'listpostexcr' + nametemp, listhistexcr)\n \n # compute credible intervals\n if gdatfinl.typeverb > 0:\n print('Computing credible intervals...')\n timeinit = gdatfinl.functime()\n \n for strgchan in listgdatmodi[0].liststrgchan:\n \n if booltile:\n if strgchan in gdatfinl.liststrgvarbarryswep or strgchan in listgdatmodi[0].liststrgvarblistsamp:\n continue\n if not (strgchan.startswith('hist') or strgchan.startswith('incr') or strgchan.startswith('excr')):\n continue\n\n if gdatfinl.fitt.numbparaelem > 0 and strgchan in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:\n if 'spec' in strgchan:\n continue\n if strgchan == 'spec':\n continue\n\n listtemp = getattr(gdatfinl, 'list' + strgpdfn + strgchan)\n \n if isinstance(listtemp, list):\n \n if booltile:\n continue\n\n # ensure that transdimensional lists are not included\n # temp\n if strgchan in gdatfinl.fitt.namepara.genrelemtotl or strgchan == 'indxelemfull':\n continue\n\n pctltemp = []\n pmeatemp = []\n meditemp = []\n errrtemp = []\n stdvtemp = []\n numb = len(listtemp[0])\n \n for k in range(numb):\n if isinstance(listtemp[0][k], list):\n continue\n shap = [gdatfinl.numbsamptotl] + list(listtemp[0][k].shape)\n temp = np.zeros(shap)\n for n in gdatfinl.indxsamptotl:\n temp[n, ...] = listtemp[n][k]\n \n pctltempsing = tdpy.retr_pctlvarb(temp)\n pmeatempsing = np.mean(temp, axis=0)\n meditempsing = pctltempsing[0, ...]\n errrtempsing = tdpy.retr_errrvarb(pctltempsing)\n stdvtempsing = np.std(temp)\n \n pctltemp.append(pctltempsing)\n pmeatemp.append(pmeatempsing)\n meditemp.append(meditempsing)\n errrtemp.append(errrtempsing)\n stdvtemp.append(stdvtempsing)\n else:\n # this is needed for finding posterior moments of features of associated reference elements\n if 'asscref' in strgchan:\n if listtemp.ndim != 2:\n raise Exception('')\n pmeatemp = np.zeros(listtemp.shape[1])\n pctltemp = np.zeros([3] + [listtemp.shape[1]])\n # temp -- this only works for 2D listtemp\n for k in range(listtemp.shape[1]):\n indxassc = np.where(np.isfinite(listtemp[:, k]))[0]\n if indxassc.size > 0:\n pctltemp[:, k] = tdpy.retr_pctlvarb(listtemp[indxassc, k])\n pmeatemp[k] = np.mean(listtemp[indxassc, k])\n else:\n pctltemp = tdpy.retr_pctlvarb(listtemp)\n pmeatemp = np.mean(listtemp, axis=0)\n \n errrtemp = tdpy.retr_errrvarb(pctltemp)\n stdvtemp = np.std(pctltemp, axis=0)\n meditemp = pctltemp[0, ...]\n \n if strgchan in gdatfinl.listnamevarbcpct:\n cpcttemp = np.empty([gdatfinl.numbsampcpct] + [3] + list(listtemp.shape[1:]))\n for n in gdatfinl.indxsampcpct:\n cpcttemp[n, ...] = tdpy.retr_pctlvarb(listtemp[:n+1, ...])\n \n setattr(gdatfinl, 'pctl' + strgpdfn + strgchan, pctltemp)\n setattr(gdatfinl, 'medi' + strgpdfn + strgchan, meditemp)\n setattr(gdatfinl, 'pmea' + strgpdfn + strgchan, pmeatemp)\n setattr(gdatfinl, 'errr' + strgpdfn + strgchan, errrtemp)\n setattr(gdatfinl, 'stdv' + strgpdfn + strgchan, stdvtemp)\n if strgchan in gdatfinl.listnamevarbcpct:\n setattr(gdatfinl, 'cpct' + strgpdfn + strgchan, cpcttemp)\n \n if not booltile:\n pmealliktotl = getattr(gdatfinl, 'pmea' + strgpdfn + 'lliktotl')\n stdvlliktotl = getattr(gdatfinl, 'stdv' + strgpdfn + 'lliktotl')\n minmlliktotl = np.amin(listlliktotl)\n maxmlliktotl = np.amax(listlliktotl)\n skewlliktotl = np.mean(((listlliktotl - pmealliktotl) / stdvlliktotl)**3)\n kurtlliktotl = np.mean(((listlliktotl - pmealliktotl) / stdvlliktotl)**4)\n setattr(gdatfinl, 'minm' + strgpdfn + 'lliktotl', minmlliktotl)\n setattr(gdatfinl, 'maxm' + strgpdfn + 'lliktotl', maxmlliktotl)\n setattr(gdatfinl, 'skew' + strgpdfn + 'lliktotl', skewlliktotl)\n setattr(gdatfinl, 'kurt' + strgpdfn + 'lliktotl', kurtlliktotl)\n\n if strgpdfn == 'post':\n infopost = retr_infofromlevi(pmealliktotl, levipost)\n setattr(gdatfinl, strgpdfn + 'infopost', infopost)\n if strgpdfn == 'post' and gdatfinl.checprio:\n leviprio = getattr(gdatprio, 'prioleviprio')\n infoprio = retr_infofromlevi(pmealliktotl, leviprio)\n setattr(gdatfinl, strgpdfn + 'infoprio', infoprio)\n \n bcom = maxmlliktotl - pmealliktotl\n setattr(gdatfinl, strgpdfn + 'bcom', bcom)\n \n listnametemp = ['lliktotl']\n if gmod.numbparaelem > 0:\n listnametemp += ['lpripena']\n\n for namevarbscal in listnametemp:\n listtemp = getattr(gdatfinl, 'list' + strgpdfn + namevarbscal)\n minm = np.amin(listtemp)\n maxm = np.amax(listtemp)\n setattr(gdatfinl, 'minm' + namevarbscal, minm)\n setattr(gdatfinl, 'maxm' + namevarbscal, maxm)\n setattr(gdatfinl, 'scal' + namevarbscal, 'self')\n retr_axis(gdat, namevarbscal)\n \n if gdatfinl.checprio:\n for strgvarb in gdatfinl.listnamevarbscal:\n setp_pdfnvarb(gdatfinl, strgpdfn, strgvarb, strgvarb)\n for l0 in gdatfinl.fittindxpopl:\n for strgfeatfrst in gdatfinl.fitt.namepara.genrelem[l0]:\n if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':\n continue\n setp_pdfnvarb(gdatfinl, strgpdfn, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l0)\n for strgfeatseco in gdatfinl.fitt.namepara.genrelem[l0]:\n if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':\n continue\n \n if not checstrgfeat(strgfeatfrst, strgfeatseco):\n continue\n \n setp_pdfnvarb(gdatfinl, strgpdfn, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0, nameseco=strgfeatseco)\n\n # calculate information gain\n if strgpdfn == 'post':\n for namevarbscal in gdatfinl.listnamevarbscal:\n setp_info(gdatfinl, gdatprio, namevarbscal, namevarbscal)\n for l0 in gdatfinl.fittindxpopl:\n for strgfeatfrst in gdatfinl.fitt.namepara.genrelem[l0]:\n if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':\n continue\n setp_info(gdatfinl, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l0)\n for strgfeatseco in gdatfinl.fitt.namepara.genrelem[l0]:\n if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':\n continue\n \n if not checstrgfeat(strgfeatfrst, strgfeatseco):\n continue\n \n setp_info(gdatfinl, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0, nameseco=strgfeatseco)\n\n if gdatfinl.typeverb > 0:\n timefinl = gdatfinl.functime()\n print('Done in %.3g seconds.' % (timefinl - timeinit))\n \n # flatten the np.arrays which have been collected at each sweep\n #setattr(gdat, 'list' + strgpdfn + strgpdfntemp + 'flat', getattr(gdat, 'list' + strgpdfn + strgpdfntemp + 'totl').flatten())\n if not booltile:\n # memory usage\n listmemoresi = getattr(gdatfinl, 'list' + strgpdfn + 'memoresi')\n gdatfinl.meanmemoresi = np.mean(listmemoresi, 1)\n gdatfinl.derimemoresi = (gdatfinl.meanmemoresi[-1] - gdatfinl.meanmemoresi[0]) / gdatfinl.numbswep\n\n gdatfinl.timerealtotl = time.time() - gdatfinl.timerealtotl\n gdatfinl.timeproctotl = time.clock() - gdatfinl.timeproctotl\n gdatfinl.timeproctotlswep = gdatfinl.timeproctotl / gdatfinl.numbswep\n \n if gdatfinl.timeatcrcntpmaxm == 0.:\n gdatfinl.timeprocnorm = 0.\n else:\n gdatfinl.timeprocnorm = gdatfinl.timeproctotlswep / gdatfinl.timeatcrcntpmaxm\n \n # write the final gdat object\n path = gdatfinl.pathoutprtag + 'gdatfinl' + strgpdfn\n\n if gdatfinl.typeverb > 0:\n print('Writing gdatfinl to %s...' % path)\n writfile(gdatfinl, path) \n \n filestat = open(gdatfinl.pathoutprtag + 'stat.txt', 'a')\n filestat.write('gdatfinl%s written.\\n' % strgpdfn)\n filestat.close()\n \n if not booltile:\n if gdatfinl.typeverb > 0:\n for k in gdatfinl.indxproc:\n print('Process %d has been completed in %d real seconds, %d CPU seconds.' % (k, gdatfinl.timereal[k], gdatfinl.timeproc[k]))\n print('Parent process has run in %d real seconds, %d CPU seconds.' % (gdatfinl.timerealtotl, gdatfinl.timeproctotl))\n \n print('HACKING!!')\n gdatfinl.strgpdfn = 'post'\n\n print('Checking whether post-processing plots already exist.')\n booltemp = chec_statfile(pathpcat, rtagfinl, 'plotfinl')\n if booltemp:\n print('Final plots already exist. Skipping...')\n else:\n if strgpdfn == 'post' and gdatfinl.checprio:\n path = pathoutprtag + 'gdatfinlprio'\n gdatprio = readfile(path)\n else:\n gdatprio = None\n \n if gdatfinl.makeplot and getattr(gdatfinl, 'makeplotfinl' + strgpdfn) or forcplot:\n plot_finl(gdatfinl, gdatprio=gdatprio, strgpdfn=strgpdfn, gdatmock=gdatmock, booltile=booltile)\n filestat = open(gdatfinl.pathoutprtag + 'stat.txt', 'a')\n filestat.write('plotfinl%s written.\\n' % strgpdfn)\n filestat.close()\n\n\ndef retr_listgdat(listrtag, typegdat='finlpost'):\n \n listgdat = []\n for rtag in listrtag:\n pathoutprtag = retr_pathoutprtag(pathpcat, rtag)\n path = pathoutprtag + 'gdat%s' % typegdat\n listgdat.append(readfile(path))\n\n return listgdat\n\n\ndef make_fold(gdat):\n\n for strgpdfn in gdat.liststrgpdfn:\n setattr(gdat, 'path' + strgpdfn, gdat.pathplotrtag + strgpdfn + '/') \n path = getattr(gdat, 'path' + strgpdfn)\n\n for nameseco in ['finl', 'fram', 'anim', 'opti']:\n setattr(gdat, 'path' + strgpdfn + nameseco, path + nameseco + '/')\n \n for nameseco in ['diag', 'lpac', 'varbscal', 'cond', 'varbscalproc']:\n setattr(gdat, 'path' + strgpdfn + 'finl' + nameseco, path + 'finl/' + nameseco + '/')\n \n for n in gdat.indxproptype:\n setattr(gdat, 'path' + strgpdfn + 'finl' + gdat.nameproptype[n], path + 'finl/lpac/' + gdat.nameproptype[n] + '/')\n\n for namethrd in ['hist', 'trac', 'join', 'cova']:\n setattr(gdat, 'path' + strgpdfn + 'finlvarbscal' + namethrd, path + 'finl/varbscal/' + namethrd + '/')\n \n for strgphas in gdat.liststrgphas + ['init']:\n liststrgfold = getattr(gdat, 'liststrgfold' + strgphas)\n for nameseco in liststrgfold:\n if strgphas == 'init':\n if nameseco == 'assc' or nameseco.startswith('cmpl') or nameseco.startswith('fdis'):\n continue\n setattr(gdat, 'path' + strgphas + nameseco[:-1], gdat.pathplotrtag + 'init/' + nameseco)\n else:\n setattr(gdat, 'path' + strgpdfn + strgphas + nameseco[:-1], path + strgphas + '/' + nameseco)\n gdat.pathinfo = gdat.pathplotrtag + 'info/'\n \n ## make the directories \n for attr, valu in gdat.__dict__.items():\n if attr.startswith('path'):\n os.system('mkdir -p %s' % valu)\n\n\ndef make_cmapdivg(strgcolrloww, strgcolrhigh):\n \n funccolr = mpl.colors.ColorConverter().to_rgb\n \n colrloww = funccolr(strgcolrloww)\n colrhigh = funccolr(strgcolrhigh)\n \n cmap = make_cmap([colrloww, funccolr('white'), 0.5, funccolr('white'), colrhigh])\n\n return cmap\n\n\ndef make_cmap(seq):\n \n seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]\n cdict = {'red': [], 'green': [], 'blue': []}\n for i, item in enumerate(seq):\n if isinstance(item, float):\n r1, g1, b1 = seq[i - 1]\n r2, g2, b2 = seq[i + 1]\n cdict['red'].append([item, r1, r2])\n cdict['green'].append([item, g1, g2])\n cdict['blue'].append([item, b1, b2])\n \n return mpl.colors.LinearSegmentedColormap('CustomMap', cdict)\n\n\ndef setp_pdfnvarb(gdat, strgpdfn, name, namefull, nameseco=None):\n \n if listvarb.ndim == 1:\n shaptemp = [gdat.numbbinspdfn, 1]\n else:\n shaptemp = [gdat.numbbinspdfn] + list(listvarb.shape[1:])\n pdfn = np.empty(shaptemp)\n if listvarb.ndim == 1:\n binsvarb = getattr(gdat.binspara, name)\n deltvarb = getattr(gdat, 'delt' + name)\n pdfn[:, 0] = np.histogram(listvarb, bins=binsvarb)[0].astype(float)\n pdfn[:, 0] /= np.sum(pdfn[:, 0])\n pdfn[:, 0] /= deltvarb\n else:\n binsvarb = np.linspace(0, gmod.maxmpara.numbelemtotl, 51)\n \n if listvarb.ndim == 2:\n for k in range(listvarb.shape[1]):\n pdfn[:, k] = np.histogram(listvarb[:, k], bins=binsvarb)[0].astype(float)\n pdfn[:, k] /= np.sum(pdfn[:, k])\n pdfn *= 50.\n if listvarb.ndim == 3:\n for k in range(listvarb.shape[1]):\n for m in range(listvarb.shape[2]):\n pdfn[:, k, m] = np.histogram(listvarb[:, k, m], bins=binsvarb)[0].astype(float)\n pdfn[:, k, m] /= np.sum(pdfn[:, k, m])\n pdfn *= 2500.\n pdfn[np.where(pdfn < 1e-50)[0]] = 1e-50\n \n setattr(gdat, 'pdfn' + strgpdfn + namefull, pdfn)\n\n\ndef setp_info(gdat, gdatprio, name, namefull, nameseco=None, namesecofull=None):\n \n listpost = getattr(gdat, 'listpost' + namefull)\n listprio = getattr(gdatprio, 'listprio' + namefull)\n pdfnpost = getattr(gdat, 'pdfnpost' + namefull)\n pdfnprio = getattr(gdatprio, 'pdfnprio' + namefull)\n if listpost.ndim == 3:\n infodens = np.empty((gdat.numbbinspdfn, listpost.shape[1], listpost.shape[2]))\n info = np.empty((listpost.shape[1], listpost.shape[2]))\n pvks = np.empty((listpost.shape[1], listpost.shape[2]))\n else:\n if listpost.ndim == 1:\n numbtemp = 1\n else:\n numbtemp = listpost.shape[1]\n infodens = np.empty((gdat.numbbinspdfn, numbtemp))\n info = np.empty(numbtemp)\n pvks = np.empty(numbtemp)\n if listpost.ndim == 1:\n listpost = listpost[:, None]\n listprio = listprio[:, None]\n deltvarb = getattr(gdat, 'delt' + name)\n else:\n if listpost.ndim == 2:\n deltvarb = 1. / 50\n else:\n deltvarb = 1. / 50**list2\n \n if listpost.ndim == 1 or listpost.ndim == 2:\n for k in range(listpost.shape[1]):\n infodens[:, k] = retr_infodens(pdfnpost[:, k], pdfnprio[:, k])\n info[k] = np.sum(infodens[:, k] * deltvarb)\n temp, pvks[k] = sp.stats.ks_2samp(listpost[:, k], listprio[:, k])\n if listpost.ndim == 3:\n for k in range(listpost.shape[1]):\n for m in range(listpost.shape[2]):\n infodens[:, k, m] = retr_infodens(pdfnpost[:, k, m], pdfnprio[:, k, m])\n info[k, m] = np.sum(infodens[:, k, m] * deltvarb)\n temp, pvks[k, m] = sp.stats.ks_2samp(listpost[:, k, m], listprio[:, k, m])\n \n setattr(gdat, 'pvks' + namefull, pvks)\n setattr(gdat, 'infodens' + namefull, infodens)\n setattr(gdat, 'info' + namefull, info)\n\n\n# check the state file\ndef chec_statfile(pathpcat, rtag, strggdat, typeverb=1):\n \n print('Checking the state file %s for %s...' % (strggdat, rtag))\n \n pathoutprtag = retr_pathoutprtag(pathpcat, rtag)\n \n # check the status file\n if not os.path.isfile(pathoutprtag + 'stat.txt'):\n if typeverb > 0:\n print('pathoutprtag')\n print(pathoutprtag)\n print('stat.txt not found.')\n return False\n\n # check the global object\n filestat = open(pathoutprtag + 'stat.txt', 'r')\n booltemp = False\n linesrch = strggdat + ' written.\\n'\n for line in filestat:\n if line == linesrch:\n booltemp = True\n\n filestat.close()\n if not booltemp:\n if typeverb > 0:\n print('bad %s status.' % (strggdat))\n return False\n else:\n return True\n\n\ndef retr_los3(dlos, lgal, bgal):\n\n dglc = np.sqrt(8.5e3**2 + dlos**2 - 2. * dlos * 8.5e3 * np.cos(bgal) * np.cos(lgal))\n thet = np.arccos(np.sin(bgal) * dlos / dglc)\n phii = np.arcsin(np.sqrt(np.cos(bgal)**2 * dlos**2 + 8.5e3**2 - 2 * dlos * np.cos(bgal) * 8.5e3) / dglc)\n \n return dglc, thet, phii\n\n\ndef retr_glc3(dglc, thet, phii):\n\n xpos = dglc * np.sin(thet) * np.cos(phii)\n ypos = dglc * np.sin(thet) * np.sin(phii)\n zpos = dglc * np.cos(thet)\n dlos = np.sqrt(zpos**2 + xpos**2 + (8.5e3 - ypos)**2)\n lgal = np.arctan2(8.5e3 - ypos, xpos) - np.pi / 2\n bgal = np.arcsin(zpos / dlos)\n \n return dlos, lgal, bgal\n\n\ndef retr_lumipuls(geff, magf, per0):\n\n # temp -- this is bolometric luminosity np.whereas dictelem[l]['flux'] is differential!\n lumi = 9.6e33 * (geff / 0.2) * (magf / 10**8.5)**2 * (3e-3 / per0)*4\n\n return lumi\n\n\ndef retr_lumi(gdat, flux, dlos, reds=None):\n\n lumi = flux * 4. * np.pi * dlos**2 * gdat.prsccmtr**2 / gdat.ergsgevv\n \n # temp\n # redshift correction\n if reds is not None:\n lumi *= (1. + reds)**2\n\n return lumi\n\n\ndef retr_flux(gdat, lumi, dlos, reds=None):\n\n flux = lumi / 4. / np.pi / dlos**2 / gdat.prsccmtr**2 * gdat.ergsgevv\n \n # temp\n # redshift correction\n if reds is not None:\n pass\n\n return flux\n\n\ndef retr_per1(per0, magf):\n\n per1 = 3.3e-20 * (magf / 10**8.5)**2 * (3e-3 / per0)\n\n return per1\n\n\ndef retr_dlosgalx(lgal, bgal, dglc):\n\n # temp -- this is obviously wrong\n dlos = 8.5e3 - dglc\n\n return dlos\n\n\ndef retr_arryfromlist(listtemp):\n \n shap = [len(listtemp)] + list(listtemp[0].shape)\n arry = np.empty(shap)\n for k in range(len(listtemp)):\n arry[k, ...] = listtemp[k]\n \n return arry\n\n\ndef proc_cntpdata(gdat):\n\n # exclude voxels with vanishing exposure\n ## data counts\n if gdat.typedata == 'inpt':\n gdat.cntpdata = retr_cntp(gdat, gdat.sbrtdata)\n \n # data variance\n gdat.varidata = np.maximum(gdat.cntpdata, 1.)\n\n # correct the likelihoods for the constant data dependent factorial\n gdat.llikoffs = -sp.special.gammaln(gdat.cntpdata + 1)\n\n ## spatial average\n gdat.sbrtdatamean, gdat.sbrtdatastdv = retr_spatmean(gdat, gdat.cntpdata, boolcntp=True)\n \n # data count limits\n minmcntpdata = np.amin(gdat.cntpdata)\n maxmcntpdata = np.amax(gdat.cntpdata)\n minm = minmcntpdata\n maxm = maxmcntpdata\n setp_varb(gdat, 'cntpdata', minm=minm, maxm=maxm, lablroot='$C_{D}$', scal='asnh', strgmodl='plot')\n \n maxm = maxmcntpdata\n minm = 1e-1 * minmcntpdata\n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n setp_varb(gdat, 'cntpmodl', minm=minm, maxm=maxm, strgmodl=strgmodl, scal='asnh')\n \n print('gdat.labltickmajrpara.cntpmodl')\n print(gdat.labltickmajrpara.cntpmodl)\n\n # residual limits\n maxm = np.ceil(maxmcntpdata * 0.1)\n minm = -np.ceil(maxmcntpdata * 0.1)\n setp_varb(gdat, 'cntpresi', minm=minm, maxm=maxm, lablroot='$C_{R}$', scal='asnh', strgmodl='plot')\n\n # 1-point function of the data counts\n for m in gdat.indxevtt:\n if gdat.numbpixl > 1:\n for i in gdat.indxener: \n print('gdat.cntpdata[i, :, m]')\n summgene(gdat.cntpdata[i, :, m])\n print('gdat.binspara.cntpdata')\n summgene(gdat.binspara.cntpdata)\n histcntp = np.histogram(gdat.cntpdata[i, :, m], bins=gdat.binspara.cntpdata)[0]\n setattr(gdat, 'histcntpdataen%02devt%d' % (i, m), histcntp)\n else:\n histcntp = np.histogram(gdat.cntpdata[:, 0, m], bins=gdat.binspara.cntpdata)[0]\n setattr(gdat, 'histcntpdataevt%d' % m, histcntp)\n\n # obtain cartesian versions of the maps\n if gdat.typepixl == 'cart':\n ## data counts\n gdat.cntpdatacart = np.zeros((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt))\n gdat.cntpdatacart[:, gdat.indxpixlrofi, :] = gdat.cntpdata\n gdat.cntpdatacart = gdat.cntpdatacart.reshape((gdat.numbener, gdat.numbsidecart, gdat.numbsidecart, gdat.numbevtt))\n \n\ndef retr_infodens(pdfnpost, pdfnprio):\n \n infodens = pdfnpost * np.log(pdfnpost / pdfnprio)\n\n return infodens\n\n\ndef retr_llik(gdat, strgmodl, cntpmodl):\n \n if gdat.liketype == 'pois':\n llik = gdat.cntpdata * np.log(cntpmodl) - cntpmodl\n if gdat.liketype == 'gaus':\n llik = -0.5 * (gdat.cntpdata - cntpmodl)**2 / gdat.varidata\n \n return llik\n\n\ndef retr_mapsgaus(gdat, lgal, bgal, spec, size, ellp, angl):\n \n rttrmatr = np.array([[np.cos(angl), -np.sin(angl)], [np.sin(angl), np.cos(angl)]])\n icovmatr = np.array([[1. / ((1. - ellp) * size)**2, 0.], [0., 1. / size**2]])\n\n posi = np.array([lgalgrid - lgal, bgalgrid - bgal])\n mapsgaus = flux * np.exp(-0.5 * np.sum(posi * tensordot(self.icovmatr, posi, (1,0)), 0)) / size**2 / (1. - ellp)\n \n return mapsgaus\n\n\ndef retr_sbrtsers(gdat, lgalgrid, bgalgrid, lgal, bgal, spec, size, ellp, angl, seri=np.array([4.])):\n \n lgalrttr = (1. - ellp) * (np.cos(angl) * (lgalgrid - lgal) - np.sin(angl) * (bgalgrid - bgal))\n bgalrttr = np.sin(angl) * (lgalgrid - lgal) + np.cos(angl) * (bgalgrid - bgal) \n angl = np.sqrt(lgalrttr**2 + bgalrttr**2)\n \n # interpolate pixel-convolved Sersic surface brightness\n if gdat.typesers == 'intp':\n\n shapinpt = angl.shape \n inpt = np.empty(list(shapinpt) + [3])\n inpt[..., 0] = angl\n inpt[..., 1] = size\n inpt[..., 2] = seri\n \n sbrtsers = spec[:, None, None] * sp.interpolate.interpn((gdat.binspara.lgalsers, gdat.binspara.halfsers, gdat.binspara.indxsers), gdat.sersprof, inpt)[None, :, None]\n \n # evaluate directly de Vaucouleurs\n if gdat.typesers == 'vauc':\n sbrtsers = spec[:, None, None] * retr_sbrtsersnorm(angl, size)[None, :, None]\n \n return sbrtsers\n\n\ndef retr_sbrtsersnorm(angl, halfsers, indxsers=4.):\n\n ## this approximation works for 0.5 < indx < 10\n factsers = 1.9992 * indxsers - 0.3271\n \n ## surface brightness profile at the half-light radius for a 1 erg cm^-2 s^-1 A^-1 source\n if indxsers == 4.:\n sbrthalf = 1. / 7.2 / np.pi / halfsers**2\n else:\n sbrthalf = 1. / 2. / np.pi / np.exp(factsers) * factsers**(2 * indxsers) / indxsers / sp.special.gamma(2. * indxsers) / halfsers**2\n \n ## surface brightness profile\n sbrtsers = sbrthalf * np.exp(-factsers * ((angl / halfsers)**(1. / indxsers) - 1.))\n \n return sbrtsers\n\n\ndef copytdgu(varb):\n \n if isinstance(varb, np.ndarray):\n return np.copy(varb)\n else:\n return deepcopy(varb)\n\n\ndef proc_anim(rtag):\n \n pathoutprtag = retr_pathoutprtag(pathpcat, rtag)\n \n print('Making animations of frame plots for %s...' % rtag)\n \n path = pathoutprtag + 'gdatinit'\n gdat = readfile(path)\n for strgpdfn in gdat.liststrgpdfn:\n for nameextn in gdat.liststrgfoldanim:\n \n pathframextn = gdat.pathimag + rtag + '/' + strgpdfn + '/fram/' + nameextn\n pathanimextn = gdat.pathimag + rtag + '/' + strgpdfn + '/anim/' + nameextn\n \n try:\n listfile = fnmatch.filter(os.listdir(pathframextn), '*_swep*.pdf')\n except:\n print('%s failed.' % pathframextn)\n continue\n \n listfiletemp = []\n for thisfile in listfile:\n listfiletemp.extend((thisfile.split('_')[0]).rsplit('/', 1))\n \n listname = list(set(listfiletemp))\n if len(listname) == 0:\n continue\n \n shuffle(listname)\n \n for name in listname:\n \n strgtemp = '%s*_swep*.pdf' % name\n listfile = fnmatch.filter(os.listdir(pathframextn), strgtemp)\n numbfile = len(listfile)\n liststrgextn = []\n for k in range(numbfile):\n liststrgextn.append((listfile[k].split(name)[1]).split('_')[0])\n \n liststrgextn = list(set(liststrgextn))\n \n for k in range(len(liststrgextn)):\n \n listfile = fnmatch.filter(os.listdir(pathframextn), name + liststrgextn[k] + '_swep*.pdf')\n numbfile = len(listfile)\n \n indxfilelowr = 0\n \n if indxfilelowr < numbfile:\n indxfileanim = np.arange(indxfilelowr, numbfile)\n else:\n continue\n \n indxfileanim = np.random.choice(indxfileanim, replace=False, size=indxfileanim.size)\n \n cmnd = 'convert -delay 20 -density 300 -quality 100 '\n for n in range(indxfileanim.size):\n cmnd += '%s%s ' % (pathframextn, listfile[indxfileanim[n]])\n \n namegiff = '%s%s.gif' % (pathanimextn, name + liststrgextn[k])\n cmnd += ' ' + namegiff\n print('Processing %s' % namegiff)\n if not os.path.exists(namegiff):\n print('Run: %s, pdf: %s' % (rtag, strgpdfn))\n print('Making %s animation...' % name)\n os.system(cmnd)\n else:\n print('GIF already exists.')\n pass\n \n pathoutprtag = retr_pathoutprtag(pathpcat, rtag)\n filestat = open(pathoutprtag + 'stat.txt', 'a')\n filestat.write('animfinl written.\\n')\n filestat.close()\n \n\ndef plot_samp(gdat, gdatmodi, strgstat, strgmodl, strgphas, strgpdfn='post', gdatmock=None, booltile=False):\n \n gmod = getattr(gdat, strgmodl)\n gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)\n gmodstat = getattr(gdatobjt, strgstat)\n \n if not booltile:\n \n if strgstat != 'pdfn':\n numbelem = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)\n \n if gdatmodi is not None:\n strgswep = '_%09d' % gdatmodi.cntrswep\n else:\n strgswep = ''\n \n if not booltile:\n # data count maps\n if gdat.numbpixl > 1:\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n if gdat.boolmakeframcent and (i != gdat.numbener / 2 or m != gdat.numbevtt / 2):\n continue\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpdata', i, m)\n ## residual count maps\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n if gdat.boolmakeframcent and (i != gdat.numbener / 2 or m != gdat.numbevtt / 2):\n continue\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpresi', i, m)\n \n if gdat.numbpixl > 1:\n if gmod.numbparaelem > 0:\n if gmod.boolelemlens:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convelem', booltdim=True)\n \n # temp -- restrict other plots to indxmodlelemcomp\n if gdat.boolbinsener:\n for specconvunit in gdat.listspecconvunit:\n if not gmod.boolbfun:\n plot_sbrt(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, specconvunit)\n \n if gmod.boolapplpsfn:\n plot_psfn(gdat, gdatmodi, strgstat, strgmodl)\n \n setp_indxswepsave(gdat)\n if gmod.numbparaelem > 0:\n # element parameter histograms\n if not (strgmodl == 'true' and gdat.typedata == 'inpt'):\n \n limtydat = gdat.limtydathistfeat\n\n for l in gmod.indxpopl:\n strgindxydat = 'pop%d' % l\n for nameparaderielemodim in gmod.namepara.derielemodim[l]:\n if not (nameparaderielemodim == 'flux' or nameparaderielemodim == 'mcut' or \\\n nameparaderielemodim == 'deltllik' or nameparaderielemodim == 'defs' or nameparaderielemodim == 'nobj'):\n continue\n \n if gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt':\n continue\n indxydat = [l, slice(None)]\n \n name = nameparaderielemodim\n namepopl = nameparaderielemodim + 'pop%d' % l\n lablxdat = getattr(gmod.labltotlpara, namepopl)\n scalxdat = getattr(gmod.scalpara, namepopl)\n limtxdat = getattr(gmod.limtpara, namepopl)\n meanxdat = getattr(gdat.meanpara, name)\n \n if gdat.numbpixl > 1:\n listydattype = ['totl', 'sden']\n else:\n listydattype = ['totl']\n for ydattype in listydattype:\n \n ## plot the surface density of elements\n if ydattype == 'sden':\n \n # plot the surface density of elements only for the amplitude feature\n if nameparaderielemodim != gmod.nameparagenrelemampl: \n continue\n \n if gdat.sdenunit == 'degr':\n lablydat = r'$\\Sigma_{%s}$ [deg$^{-2}$]' % gmod.lablelemextn[l]\n if gdat.sdenunit == 'ster':\n lablydat = r'$\\Sigma_{%s}$ [sr$^{-2}$]' % gmod.lablelemextn[l]\n \n ## plot the total number of elements\n if ydattype == 'totl':\n lablydat = r'$N_{%s}$' % gmod.lablelemextn[l]\n \n if ydattype == 'totl' and not gdat.rtagmock is None:\n listtypehist = ['hist', 'histcorrreca']\n else:\n listtypehist = ['hist']\n \n boolhistprio = not booltile\n for typehist in listtypehist:\n \n if typehist == 'histcorrreca':\n \n if gmod.numbparaelem == 0 or gdat.priofactdoff == 0.:\n continue\n\n if nameparaderielemodim == 'specplot' or nameparaderielemodim == 'spec' or nameparaderielemodim == 'deflprof':\n continue\n \n if not nameparaderielemodim in gmod.namepara.genrelem[l]:\n continue\n \n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'hist' + nameparaderielemodim + 'pop%d' % l, \\\n 'mean' + nameparaderielemodim, scalydat='logt', lablxdat=lablxdat, \\\n lablydat=lablydat, histodim=True, ydattype=ydattype, \\\n scalxdat=scalxdat, meanxdat=meanxdat, limtydat=limtydat, \\\n limtxdat=limtxdat, boolhistprio=boolhistprio, \\\n #indxydat=indxydat, strgindxydat=strgindxydat, \\\n nameinte='histodim/', typehist=typehist)\n \n if not booltile:\n if gmod.numbparaelem > 0:\n # element parameter correlations\n for l in gmod.indxpopl:\n if strgmodl != 'true' and gdat.boolinforefr and gdat.boolasscrefr:\n for strgfeat in gmod.namepara.derielemodim[l]:\n if not (strgfeat == 'flux' or strgfeat == 'mass' or strgfeat == 'deltllik' or strgfeat == 'nobj') and \\\n (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):\n continue\n for q in gdat.indxrefr:\n if not l in gdat.refrindxpoplassc[q]:\n continue\n if gdat.refr.numbelem[q] == 0:\n continue\n if not strgfeat in gdat.refr.namepara.elem[q] or strgfeat in gdat.refr.namepara.elemonly[q][l]:\n continue\n plot_scatassc(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, q, l, strgfeat)\n plot_scatassc(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, q, l, strgfeat, plotdiff=True)\n \n if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):\n # plots\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n if gmod.numbpopl > 1:\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpdata', i, m, indxpoplplot=l)\n \n ## histograms of the number of counts per pixel\n limtxdat = [gdat.minmpara.cntpmodl, gdat.maxmpara.cntpmodl]\n for nameecom in gmod.listnameecomtotl:\n name = 'histcntp' + nameecom\n for m in gdat.indxevtt: \n for i in gdat.indxener:\n if gdat.numbener > 1:\n name += 'en%02d' % (i)\n if gdat.numbevtt > 1:\n name += 'evt%d' % (m)\n \n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, \\\n name, 'meancntpdata', scalydat='logt', scalxdat='logt', lablxdat=gdat.lablcnts, histodim=True, \\\n lablydat='$N_{pix}$', limtydat=[0.5, gdat.numbener], limtxdat=limtxdat)\n\n \n ## highest amplitude element\n # temp\n if gmod.numbparaelem > 0:\n # completeness and false discovery rate\n if strgmodl != 'true' and gdat.boolasscrefr:\n for strgclas in ['cmpl', 'fdis']:\n nameinte = strgclas + 'odim/'\n limtydat = [getattr(gdat, 'minm' + strgclas), getattr(gdat, 'maxm' + strgclas)]\n for l in gmod.indxpopl:\n for q in gdat.indxrefr:\n if not l in gdat.refrindxpoplassc[q]:\n continue\n if gdat.refr.numbelem[q] == 0 and strgclas == 'cmpl' or gmod.numbparaelem == 0 and strgclas == 'fdis':\n continue\n if strgclas == 'cmpl':\n lablydat = getattr(gmod.lablpara, strgclas + 'pop%dpop%d' % (l, q))\n strgindxydat = 'pop%dpop%d' % (l, q)\n else:\n lablydat = getattr(gmod.lablpara, strgclas + 'pop%dpop%d' % (q, l))\n strgindxydat = 'pop%dpop%d' % (q, l)\n for strgfeat in gdat.refr.namepara.elem[q]:\n if strgfeat == 'etag':\n continue\n if strgclas == 'fdis' and not strgfeat in gmod.namepara.derielemodim[l]:\n continue\n if not strgfeat.startswith('spec') and not strgfeat.startswith('defl') \\\n and not strgfeat in gdat.refr.namepara.elemonly[q][l] and \\\n not (gdat.typedata == 'mock' and (strgfeat.endswith('pars') or strgfeat.endswith('nrel'))):\n \n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, strgclas + strgfeat + strgindxydat, \\\n 'mean' + strgfeat, lablxdat=lablxdat, \\\n lablydat=lablydat, \\\n #plottype='errr', \\\n scalxdat=scalxdat, limtydat=limtydat, limtxdat=limtxdat, \\\n omittrue=True, nameinte=nameinte)\n\n if gmod.numbparaelem > 0:\n alph = 0.1\n if strgmodl == 'true':\n pathtemp = gdat.pathinit\n else:\n if strgstat == 'this':\n pathtemp = gdat.pathplotrtag + strgpdfn + '/fram/'\n elif strgstat == 'mlik':\n pathtemp = gdat.pathplotrtag + strgpdfn + '/finl/'\n elif strgstat == 'pdfn':\n pathtemp = gdat.pathplotrtag + strgpdfn + '/finl/'\n colr = retr_colr(gdat, strgstat, strgmodl, indxpopl=None)\n \n # transdimensional element parameters projected onto the data axes\n if not (strgstat == 'pdfn' and not gdat.boolcondcatl):\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lght':\n # PS spectra\n if strgstat == 'pdfn':\n specplot = [np.empty((gdat.numbenerplot, gdat.numbstkscond))]\n for r in gdat.indxstkscond:\n specplot[0][:, r] = gdat.dictglob['poststkscond'][r]['specplot'][0, :]\n \n listxdat = []\n listplottype = []\n \n for k in range(specplot[l].shape[-1]):\n listxdat.append(gdat.meanpara.enerplot)\n listplottype.append('lghtline')\n \n for specconvunit in gdat.listspecconvunit:\n listydat = []\n \n for k in range(specplot[l].shape[-1]):\n specplottemp = specplot[l]\n if strgmodl == 'true':\n specplottemp = np.copy(specplottemp[0, :, k])\n else:\n specplottemp = np.copy(specplottemp[:, k])\n if specconvunit[0] == 'en01':\n specplottemp *= gdat.meanpara.enerplot\n if specconvunit[0] == 'en02':\n specplottemp *= gdat.meanpara.enerplot**2\n if specconvunit[0] == 'en03':\n # temp\n pass\n listydat.append(specplottemp)\n \n lablydat = getattr(gmod.lablpara, 'flux' + specconvunit[0] + specconvunit[1] + 'totl')\n strgtemp = specconvunit[0] + specconvunit[1]\n if specconvunit[0] == 'en03':\n strgtemp += specconvunit[2]\n path = pathtemp + strgstat + 'specpop%d%s%s.pdf' % (l, strgtemp, strgswep)\n limtydat = [np.amin(gdat.minmspec), np.amax(gdat.maxmspec)]\n tdpy.plot_gene(path, listxdat, listydat, scalxdat='logt', scalydat='logt', \\\n lablxdat=gdat.lablenertotl, colr=colr, alph=alph, \\\n plottype=listplottype, limtxdat=[gdat.minmener, gdat.maxmener], lablydat=lablydat, \\\n limtydat=limtydat)\n \n if gmod.boollenssubh:\n\n ## deflection profiles\n if gdat.boolvariasca and gdat.boolvariacut:\n lablxdat = gdat.labltotlpara.gang\n if strgstat == 'pdfn':\n deflprof = [np.empty((gdat.numbanglfull, gdat.numbstkscond))]\n asca = [np.empty(gdat.numbstkscond)]\n acut = [np.empty(gdat.numbstkscond)]\n for r in gdat.indxstkscond:\n deflprof[0][:, r] = gdat.dictglob['poststkscond'][r]['deflprof'][0, :]\n asca[0][r] = gdat.dictglob['poststkscond'][r]['asca'][0]\n acut[0][r] = gdat.dictglob['poststkscond'][r]['acut'][0]\n\n for l in range(len(deflprof)):\n xdat = gdat.meanpara.anglfull * gdat.anglfact\n listydat = []\n listvlinfrst = []\n listvlinseco = []\n \n if 'deflprof' in gmod.typeelem[l]:\n\n if strgmodl == 'true':\n deflproftemp = deflprof[l][0, :, :]\n else:\n deflproftemp = deflprof[l]\n \n for k in range(deflprof[l].shape[-1]):\n listydat.append(deflproftemp[:, k] * gdat.anglfact)\n if strgmodl == 'true':\n ascatemp = asca[l][0, k]\n acuttemp = acut[l][0, k]\n else:\n ascatemp = asca[l][k]\n acuttemp = acut[l][k]\n listvlinfrst.append(ascatemp * gdat.anglfact) \n listvlinseco.append(acuttemp * gdat.anglfact)\n \n beinhost = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'paragenrscalfull', strgpdfn, indxvarb=gmod.indxpara.beinhost)\n listydat.append(xdat * 0. + gdat.anglfact * beinhost)\n \n path = pathtemp + strgstat + 'deflsubhpop%d%s.pdf' % (l, strgswep)\n limtydat = [1e-3, 1.]\n limtxdat = [1e-3, 1.]\n tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', \\\n lablxdat=lablxdat, drawdiag=True, limtydat=limtydat, \\\n limtxdat=limtxdat, colr=colr, alph=alph, lablydat=r'$\\alpha$ [$^{\\prime\\prime}$]', \\\n listvlinfrst=listvlinfrst, listvlinseco=listvlinseco)\n \n if gdat.typedata == 'mock':\n # pulsar masses\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lghtpntspuls':\n lablxdat = gdat.labltotlpara.gang\n limtydat = [gdat.minmmassshel, gdat.maxmmassshel]\n lablydat = gdat.lablmassshel\n name = 'massshelpop%d' % l\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \\\n lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)\n\n if gmod.boollens:\n ## radial mass budget\n lablxdat = gdat.lablanglfromhosttotl\n for strgcalcmasssubh in gdat.liststrgcalcmasssubh:\n \n # host mass\n for e in gmod.indxsersfgrd:\n strgsersfgrd = 'isf%d' % e\n limtydat = [gdat.minmmcut, getattr(gdat, 'plotmaxmmasshost' + strgsersfgrd + strgcalcmasssubh + 'bein')]\n lablydat = getattr(gmod.lablpara, 'masshost' + strgsersfgrd + strgcalcmasssubh + 'totl')\n name = 'masshost%s%s' % (strgsersfgrd, strgcalcmasssubh)\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \\\n lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)\n \n if gmod.boolelemdeflsubhanyy:\n # subhalo masses\n limtydat = [gdat.minmmcut, getattr(gdat, 'plotmaxmmasssubh' + strgcalcmasssubh + 'bein')]\n lablydat = getattr(gmod.lablpara, 'masssubh' + strgcalcmasssubh + 'totl')\n name = 'masssubh%s' % (strgcalcmasssubh)\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \\\n lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)\n\n # subhalo mass fraction\n limtydat = [1e-3, 0.1]\n lablydat = getattr(gmod.lablpara, 'fracsubh' + strgcalcmasssubh + 'totl')\n name = 'fracsubh%s' % (strgcalcmasssubh)\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \\\n lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)\n\n alph = 0.1\n\n if gdat.boolmodipsfn and gmod.boolelempsfnanyy:\n ## PSF radial profile\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n indxydat = [i, slice(None), m]\n strgindxydat = 'en%02devt%d' % (i, m)\n lablxdat = gdat.labltotlpara.gang\n limtydat= np.array([1e-3, 1e3]) * gdat.anglfact**2\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'psfn', \\\n 'binsangl', indxydat=indxydat, strgindxydat=strgindxydat, scalydat='logt', \\\n lablxdat=lablxdat, lablydat=r'$\\mathcal{P}$', limtydat=limtydat)\n \n # internally and externally corrected element parameter histograms\n if gdat.typedata == 'inpt' and strgstat == 'pdfn' and gdat.rtagmock is not None:\n limtydat = gdat.limtydathistfeat\n for l in gmod.indxpopl:\n strgindxydat = 'pop%d' % l\n for strgfeat in gmod.namepara.derielemodim[l]:\n if strgfeat.startswith('aerr') or strgfeat == 'specplot' or strgfeat == 'spec' or strgfeat == 'deflprof':\n continue\n lablydat = r'$N_{%s}$' % gmod.lablelemextn[l]\n for namecorr in ['incr', 'excr']:\n nameinte = namecorr + 'odim/'\n for qq in gdatmock.indxrefr:\n if namecorr == 'excr':\n if not strgfeat in gmod.namepara.extrelem[l]:\n continue\n q = gdat.listnamerefr.index(strgfeat[-4:])\n if getattr(gdat, 'crex' + strgfeat + 'pop%dpop%dpop%d' % (q, qq, l)) is None:\n continue\n name = namecorr + strgfeat + 'pop%dpop%dpop%d' % (q, qq, l)\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'mean' + strgfeat, scalydat='logt', lablxdat=lablxdat, \\\n lablydat=lablydat, histodim=True, ydattype='totl', \\\n scalxdat=scalxdat, limtydat=limtydat, limtxdat=limtxdat, \\\n nameinte=nameinte)\n \n else:\n if strgfeat in gmod.namepara.extrelem[l]:\n continue\n name = namecorr + strgfeat + 'pop%dpop%d' % (qq, l)\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'mean' + strgfeat, scalydat='logt', lablxdat=lablxdat, \\\n lablydat=lablydat, histodim=True, ydattype='totl', \\\n scalxdat=scalxdat, limtydat=limtydat, limtxdat=limtxdat, \\\n nameinte=nameinte)\n\n\n if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):\n if gmod.numbparaelem > 0:\n # element parameter correlations\n liststrgelemtdimvarb = getattr(gdat, 'liststrgelemtdimvarb' + strgphas)\n for strgelemtdimtype in gdat.liststrgelemtdimtype:\n for strgelemtdimvarb in liststrgelemtdimvarb:\n if strgelemtdimvarb.startswith('cmpl'):\n continue\n for l0 in gmod.indxpopl:\n for strgfrst in gmod.namepara.genrelem[l0]:\n \n if strgfrst.startswith('spec') or strgfrst == 'specplot' or strgfrst == 'deflprof':\n continue\n\n for strgseco in gmod.namepara.genrelem[l0]:\n \n if strgseco.startswith('spec') or strgseco == 'specplot' or strgseco == 'deflprof':\n continue\n \n if not checstrgfeat(strgfrst, strgseco):\n continue\n \n if strgelemtdimvarb.startswith('hist'):\n \n strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%d' % l0\n plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \\\n l0, strgfrst + 'pop%d' % l0, \\\n strgseco + 'pop%d' % l0, \\\n strgtotl, strgpdfn=strgpdfn)\n else:\n if booltile:\n continue\n\n if strgfrst.startswith('aerr') or strgseco.startswith('aerr'):\n continue\n if strgelemtdimvarb.startswith('fdis'):\n for q in gdat.indxrefr:\n strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%dpop%d' % (q, l0)\n plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \\\n l0, strgfrst, strgseco, strgtotl, strgpdfn=strgpdfn)\n elif strgelemtdimvarb.startswith('excr') or strgelemtdimvarb.startswith('incr'):\n for qq in gdatmock.indxrefr:\n if strgelemtdimvarb.startswith('excr'):\n for q in gdat.indxrefr:\n if getattr(gdat, 'crex' + strgfrst + strgseco + 'pop%dpop%dpop%d' % (q, qq, l0)) is None:\n continue\n strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%dpop%dpop%d' % (q, qq, l0)\n plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \\\n l0, strgfrst, strgseco, strgtotl, strgpdfn=strgpdfn)\n else:\n if strgfrst[-4:] in gdat.listnamerefr and strgseco[-4:] in gdat.listnamerefr:\n continue\n strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%dpop%d' % (qq, l0)\n plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \\\n l0, strgfrst, strgseco, strgtotl, strgpdfn=strgpdfn)\n \n if not (gdat.typedata == 'mock' and (gmod.numbelemtotl == 0 or gmod.maxmpara.numbelemtotl == 0)):\n \n\n for q in gdat.indxrefr:\n \n if strgphas == 'init' and gdat.typedata == 'mock':\n continue\n print('strgpdfn')\n print(strgpdfn)\n raise Exception('')\n\n if booltile:\n continue\n for l0 in gmod.indxpopl:\n for refrstrgfrst in gdat.refr.namepara.elem[q]:\n if refrstrgfrst == 'spec' or refrstrgfrst == 'specplot' or refrstrgfrst == 'deflprof' or refrstrgfrst == 'etag':\n continue\n if refrstrgfrst in gdat.refr.namepara.elemonly[q][l0]:\n continue\n for refrstrgseco in gdat.refr.namepara.elem[q]:\n if refrstrgseco in gdat.refr.namepara.elemonly[q][l0]:\n continue\n if refrstrgseco == 'spec' or refrstrgseco == 'specplot' or refrstrgseco == 'deflprof' or refrstrgseco == 'etag':\n continue\n \n if not checstrgfeat(refrstrgfrst, refrstrgseco):\n continue\n \n if refrstrgfrst.startswith('aerr') or refrstrgseco.startswith('aerr') or refrstrgfrst == 'specplot' or refrstrgseco == 'specplot':\n continue\n \n strgtotl = 'cmpl' + refrstrgfrst + refrstrgseco + 'pop%dpop%d' % (l0, q)\n \n plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, 'bind', 'cmpl', \\\n q, refrstrgfrst + 'pop%d' % l0, refrstrgseco + 'pop%d' % l0, strgtotl, strgpdfn=strgpdfn)\n \n if not booltile:\n if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):\n # data and model count scatter\n for m in gdat.indxevttplot:\n if gdat.numbpixl > 1:\n for i in gdat.indxener:\n plot_scatcntp(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, m, indxenerplot=i)\n else:\n plot_scatcntp(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, m)\n\n ## spatial priors\n # temp\n if gdat.numbpixl > 1:\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n for strgfeat, strgpdfn in zip(gmod.namepara.genrelemmodu[l], gmod.liststrgpdfnmodu[l]):\n if strgpdfn == 'tmplreln':\n plot_genemaps(gdat, gdatmodi, 'fitt', strgpdfn, 'lpdfspatpriointp', booltdim=True)\n if strgpdfn == 'tmplgaum':\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'lpdfspatpriointp', booltdim=True)\n \n # model count maps\n ## backgrounds\n if gdat.numbpixl > 1:\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n for c in gmod.indxback:\n if gmod.boolbfun:\n continue\n if not gmod.boolunifback[c]:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpback%04d' % c, i, m, strgcbar='cntpdata')\n \n ## count error\n if strgmodl != 'true':\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n if gmod.boolcalcerrr[l]:\n for i in gdat.indxener:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntperrr', i, -1, strgcbar='cntpresi')\n \n ## diffuse components \n for i in gdat.indxener:\n for k, name in enumerate(gmod.listnamediff):\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntp%s' % (name), i, strgcbar='cntpdata')\n \n ## model count maps\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpmodl', i, m, strgcbar='cntpdata')\n \n # likelihood\n if strgmodl != 'true':\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'llik', i, m, strgcbar='llikmaps')\n \n if gmod.boollens:\n ## lensing signal to noise\n if strgmodl == 'true':\n for i in gdat.indxener:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 's2nr', i, -1)\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'magn', booltdim=True)\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'conv', booltdim=True)\n for i in gdat.indxener:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntplens', i, strgcbar='cntpdata', booltdim=True)\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntplensgradmgtd', i, strgcbar='cntpdata', booltdim=True)\n \n if gdat.penalpridiff:\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, \\\n 'psecodimdatapntsen%02devt%d' % (i, m), 'meanmpolodim', lablxdat='$l$', lablydat='$P_{resi}(l)$', \\\n limtydat=[1e-2, 2.], scalxdat='logt', scalydat='logt')\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'psecodimdatapntsprioen%02devt%d' % (i, m), 'meanmpolodim', lablxdat='$l$', \\\n lablydat='$P_{prio}(l)$', limtydat=[1e-2, 2.], scalxdat='logt', scalydat='logt')\n \n if gmod.boollens:\n indxydat = [slice(None)]\n strgindxydat = ''\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convpsecodim', 'meanwvecodim', lablxdat='$k$ [1/kpc]', lablydat='$P(k)$', limtydat=[1e-1, 1e2], \\\n scalxdat='logt', scalydat='logt', indxydat=indxydat, strgindxydat=strgindxydat)\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'histdefl', 'meandefl', \\\n scal='self', lablxdat=r'$\\alpha$ [arcsec]', lablydat=r'$N_{pix}$', \\\n strgindxydat=strgindxydat, indxydat=indxydat, histodim=True)\n if gmod.numbparaelem > 0 and gmod.boolelemdeflsubhanyy:\n indxydat = [slice(None)]\n strgindxydat = ''\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convpsecelemodim', 'meanwvecodim', lablxdat='$k$ [1/kpc]', lablydat='$P_{sub}(k)$', \\\n strgindxydat=strgindxydat, indxydat=indxydat, limtydat=[1e-5, 1e-1], scalxdat='logt', scalydat='logt')\n plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'histdeflsubh', 'meandeflsubh', scal='self', lablxdat=r'$\\alpha$ [arcsec]', \\\n strgindxydat=strgindxydat, indxydat=indxydat, lablydat=r'$N_{pix}$', histodim=True)\n \n if gmod.boollens:\n for i in gdat.indxener:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpbgrd', i, -1, strgcbar='cntpdata')\n if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpbgrdgalx', i, -1, strgcbar='cntpdata')\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpbgrdexts', i, -1, strgcbar='cntpdata')\n \n # gradient of the lens emission\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntplensgrad', indxenerplot=i, indxevttplot=m)\n \n if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):\n if gmod.boollens:\n # overall deflection field\n plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, multfact=0.1)\n \n # deflection field due to individual lenses\n for k in range(numbdeflsingplot): \n if k == 0:\n multfact = 0.1\n elif k == 1:\n multfact = 1.\n elif k >= 2:\n multfact = 10.\n plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, indxdefl=k, multfact=multfact)\n \n # residual deflection field\n if strgmodl == 'fitt' and gdat.typedata == 'mock':\n plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, nameparagenrelem='resi', multfact=100.)\n if strgstat != 'pdfn':\n for k in range(numbsingcomm):\n plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, nameparagenrelem='resi', indxdefl=k, multfact=100.)\n \n if gdat.numbpixl > 1:\n if gmod.numbparaelem > 0:\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convelemresi', booltdim=True)\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convelemresiperc', booltdim=True)\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'magnresi', booltdim=True)\n plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'magnresiperc', booltdim=True)\n \n\ndef dele_rtag(rtag):\n \n pathdata = pathpcat + '/data/outp/'\n pathimag = pathpcat + '/imag/'\n \n cmnd = 'rm -rf %s%s' % (pathdata, rtag)\n print(cmnd)\n os.system(cmnd)\n cmnd = 'rm -rf %s%s' % (pathimag, rtag)\n os.system(cmnd)\n print(cmnd)\n\n\ndef plot_infopvks(gdat, gdatprio, name, namefull, nameseco=None):\n \n pvks = getattr(gdat, 'pvks' + namefull)\n\n info = getattr(gdat, 'info' + namefull)\n\n path = gdat.pathinfo + 'info' + namefull\n\n if nameseco is not None:\n \n indxpoplfrst = int(namefull[-1])\n \n # information gain\n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n imag = axis.pcolor(varbfrst, varbseco, info, cmap='Greys')\n plt.colorbar(imag)\n plot_sigmcont(gdat.fitt, '', axis, name, indxpoplfrst, strgseco=nameseco)\n if scalfrst == 'logt':\n axis.set_xscale('log')\n if scalseco == 'logt':\n axis.set_yscale('log')\n axis.set_xlabel(getattr(gdat.labltotlpara, name))\n axis.set_ylabel(getattr(gdat.labltotlpara, nameseco))\n axis.set_xlim(limtfrst)\n axis.set_ylim(limtseco)\n plt.tight_layout()\n plt.savefig(path)\n plt.close(figr)\n\n # KS test p value\n pathpvkstdim = gdat.pathinfo + 'pvks' + namefull\n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n imag = axis.pcolor(varbfrst, varbseco, pvks, cmap='Greys')\n plt.colorbar(imag)\n plot_sigmcont(gdat.fitt, '', axis, name, indxpoplfrst, strgseco=nameseco)\n if scalfrst == 'logt':\n axis.set_xscale('log')\n if scalseco == 'logt':\n axis.set_yscale('log')\n axis.set_xlabel(getattr(gdat.labltotlpara, name))\n axis.set_ylabel(getattr(gdat.labltotlpara, nameseco))\n axis.set_xlim(limtfrst)\n axis.set_ylim(limtseco)\n plt.tight_layout()\n plt.savefig(pathpvkstdim)\n plt.close(figr)\n\n elif name != namefull:\n \n lablydat = '$D_{KL}$'\n lablxdat = getattr(gmod.lablpara, name + 'totl')\n xdat = getattr(gdat, 'mean' + name)\n ydat = getattr(gdat, 'info' + namefull)\n tdpy.mcmc.plot_plot(path, xdat, ydat, lablxdat, lablydat, scal)\n \n ydat = getattr(gdat, 'pvks' + namefull)\n pathpvks = gdat.pathinfo + 'pvks' + namefull\n tdpy.mcmc.plot_plot(pathpvks, xdat, ydat, lablxdat, '$p_{KS}$', scal)\n \n else:\n # horizontal axis\n xdat = getattr(gdat, 'mean' + name)\n lablxdat = getattr(gmod.lablpara, name + 'totl')\n \n # scaling\n scal = getattr(gdat, 'scal' + name) \n \n # common title\n titl = '$D_{KL} = %.3g$, KS = %.3g $\\sigma$' % (info, pvks)\n\n # DKL density\n pathdinf = gdat.pathinfo + 'dinf' + namefull\n ydat = getattr(gdat, 'infodens' + namefull)\n lablydat = r'$\\rho_{D_{KL}}$'\n tdpy.mcmc.plot_plot(pathdinf, xdat, ydat, lablxdat, lablydat, scal, titl=titl)\n \n # prior and posterior PDFs\n pathpdfn = gdat.pathinfo + 'pdfn' + namefull\n lablydat = r'$P$'\n ydat = [getattr(gdat, 'pdfnpost' + namefull), getattr(gdatprio, 'pdfnprio' + namefull)]\n legd = ['$P$(%s|$D$)' % lablxdat, '$P$(%s)' % lablxdat]\n tdpy.mcmc.plot_plot(pathpdfn, xdat, ydat, lablxdat, lablydat, scal, colr=['k', 'k'], linestyl=['-', '--'], legd=legd, titl=titl)\n\n\ndef plot_finl(gdat=None, gdatprio=None, rtag=None, strgpdfn='post', gdatmock=None, booltile=None):\n \n if gdat.typeverb > 0:\n print('plot_finl()')\n print('Producing postprocessing plots...')\n\n timetotlinit = gdat.functime()\n \n gdat.strgbest = 'ML'\n \n if not booltile:\n # terms in the log-acceptance probability\n listindxsamptotlproptotl = getattr(gdat, 'list' + strgpdfn + 'indxsamptotlproptotl')\n listindxsamptotlpropaccp = getattr(gdat, 'list' + strgpdfn + 'indxsamptotlpropaccp')\n listindxsamptotlpropreje = getattr(gdat, 'list' + strgpdfn + 'indxsamptotlpropreje')\n for n in gdat.indxproptype:\n pathbase = getattr(gdat, 'path' + strgpdfn + 'finl%s' % gdat.nameproptype[n])\n for k in gdat.indxtermlacp:\n varb = getattr(gdat, 'list' + strgpdfn + gdat.listnametermlacp[k])\n labl = gdat.listlabltermlacp[k]\n \n if listindxsamptotlproptotl[n].size > 0 and (varb[listindxsamptotlproptotl[n]] != 0.).any():\n path = pathbase + gdat.listnametermlacp[k] + 'totl'\n tdpy.mcmc.plot_trac(path, varb[listindxsamptotlproptotl[n]], labl, titl=gdat.nameproptype[n] + ', Total')\n \n if listindxsamptotlpropaccp[n].size > 0 and (varb[listindxsamptotlpropaccp[n]] != 0.).any():\n path = pathbase + gdat.listnametermlacp[k] + 'accp'\n tdpy.mcmc.plot_trac(path, varb[listindxsamptotlpropaccp[n]], labl, titl=gdat.nameproptype[n] + ', Accepted')\n \n if listindxsamptotlpropreje[n].size > 0 and (varb[listindxsamptotlpropreje[n]] != 0.).any():\n path = pathbase + gdat.listnametermlacp[k] + 'reje'\n tdpy.mcmc.plot_trac(path, varb[listindxsamptotlpropreje[n]], labl, titl=gdat.nameproptype[n] + ', Rejected')\n \n if gdat.checprio and strgpdfn == 'post' and not booltile:\n # this works only for scalar variables -- needs to be generalized to all variables\n if gdatprio is None:\n pathoutprtag = retr_pathoutprtag(pathpcat, rtag)\n path = pathoutprtag + 'gdatfinlprio'\n gdatprio = readfile(path)\n\n for namevarbscal in gmod.namepara.scal:\n plot_infopvks(gdat, gdatprio, namevarbscal, namevarbscal)\n for l in gmod.indxpopl:\n for strgfeatfrst in gmod.namepara.genrelem[l]:\n if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':\n continue\n plot_infopvks(gdat, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l)\n for strgfeatseco in gmod.namepara.genrelem[l]:\n if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':\n continue\n \n if not checstrgfeat(strgfeatfrst, strgfeatseco):\n continue\n \n plot_infopvks(gdat, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l, nameseco=strgfeatseco)\n \n listparagenrscalfull = getattr(gdat, 'list' + strgpdfn + 'paragenrscalfull')\n listparagenrscalfull = getattr(gdat, 'list' + strgpdfn + 'paragenrscalfull')\n listparagenrscalbase = getattr(gdat, 'list' + strgpdfn + 'paragenrscalbase')\n \n listboolpropfilt = getattr(gdat, 'list' + strgpdfn + 'boolpropfilt')\n listmemoresi = getattr(gdat, 'list' + strgpdfn + 'memoresi')\n listindxproptype = getattr(gdat, 'list' + strgpdfn + 'indxproptype')\n listsampproc = getattr(gdat, 'list' + strgpdfn + 'sampproc')\n \n # Gelman-Rubin test\n pathdiag = getattr(gdat, 'path' + strgpdfn + 'finldiag')\n if gdat.numbproc > 1:\n if np.isfinite(gdat.gmrbstat).all():\n if gdat.typeverb > 0:\n print('Gelman-Rubin TS...')\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n minm = min(np.amin(gdat.gmrbstat), np.amin(gdat.gmrbparagenrscalbase))\n maxm = max(np.amax(gdat.gmrbstat), np.amax(gdat.gmrbparagenrscalbase))\n bins = np.linspace(minm, maxm, 40)\n axis.hist(gdat.gmrbstat.flatten(), bins=bins, label='Data proj.')\n axis.hist(gdat.gmrbparagenrscalbase, bins=bins, label='Fixed dim.')\n axis.set_xlabel('PSRF')\n axis.set_ylabel('$N_{stat}$')\n plt.tight_layout()\n figr.savefig(pathdiag + 'gmrbhist.pdf')\n plt.close(figr)\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n axis.plot(gmod.indxparagenrbase, gdat.gmrbparagenrscalbase)\n axis.set_xticklabels(gmod.labltotlpara.genrbase)\n axis.set_ylabel('PSRF')\n plt.tight_layout()\n figr.savefig(pathdiag + 'gmrbparagenrscalbase.pdf')\n plt.close(figr)\n \n for i in gdat.indxener:\n for m in gdat.indxevtt:\n maps = gdat.gmrbstat[i, :, m]\n path = pathdiag + 'gmrbdataen%02devt%d.pdf' % (i, m)\n tdpy.plot_maps(path, maps, indxpixlrofi=gdat.indxpixlrofi, numbpixl=gdat.numbpixlfull, typepixl=gdat.typepixl, \\\n minmlgal=gdat.anglfact*gdat.minmlgal, maxmlgal=gdat.anglfact*gdat.maxmlgal, \\\n minmbgal=gdat.anglfact*gdat.minmbgal, maxmbgal=gdat.anglfact*gdat.maxmbgal)\n else:\n print('Inappropriate Gelman-Rubin test statistics encountered.')\n \n # plot autocorrelation\n if gdat.typeverb > 0:\n print('Autocorrelation...')\n tdpy.mcmc.plot_atcr(pathdiag, gdat.atcrcntp[0, 0, 0, 0, :], gdat.timeatcrcntp[0, 0, 0, 0], strgextn='cntp')\n tdpy.mcmc.plot_atcr(pathdiag, gdat.atcrpara[0, 0, :], gdat.timeatcrpara[0, 0], strgextn='para')\n print('Autocorrelation times:')\n for k, namepara in enumerate(gmod.namepara):\n print('%s %g' % (namepara, np.mean(gdat.timeatcrpara[:, k])))\n \n # plot proposal efficiency\n if gdat.typeverb > 0:\n print('Acceptance ratio...')\n numbtimemcmc = 20\n binstimemcmc = np.linspace(0., gdat.numbswep, numbtimemcmc)\n numbtick = 2\n sizefigrydat = 4. * gdat.numbproptype\n figr, axgr = plt.subplots(gdat.numbproptype, 1, figsize=(12., sizefigrydat), sharex='all')\n if gdat.numbproptype == 1:\n axgr = [axgr]\n for n, axis in enumerate(axgr):\n histtotl = axis.hist(listindxsamptotlproptotl[n], bins=binstimemcmc)[0]\n histaccp = axis.hist(listindxsamptotlpropaccp[n], bins=binstimemcmc)[0]\n axis.set_ylabel('%s' % gdat.nameproptype[n])\n if k == gdat.numbproptype - 1:\n axis.set_xlabel('$i_{samp}$')\n plt.tight_layout()\n figr.savefig(pathdiag + 'accpratiproptype.pdf')\n plt.close(figr)\n \n if gdat.typeverb > 0:\n print('Proposal execution times...')\n \n ## time performance\n #listchro = np.empty((gdat.numbswep, gdat.numbchro))\n #listchro = []\n #for k, name in enumerate(gdat.listnamechro):\n # #listchro[:, k] = getattr(gdat, 'list' + strgpdfn + 'chro' + name).flatten() * 1e3\n # listchro.append(getattr(gdat, 'list' + strgpdfn + 'chro' + name).flatten() * 1e3)\n #pathdiag = getattr(gdat, 'path' + strgpdfn + 'finldiag')\n #figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))\n #axis.violin(listchro)\n #axis.set_yscale('log')\n #axis.set_ylabel('$t$ [ms]')\n #axis.set_xticklabels(gdat.listlablchro)\n #axis.axvline(mean(chro), ls='--', alpha=0.2, color='black')\n #figr.savefig(pathdiag + 'chro.pdf' % gdat.listnamechro[k])\n #plt.close(figr)\n\n # temp\n gdat.lablpmea = 'Mean'\n\n # posterior versions of the frame plots\n plot_samp(gdat, None, 'pdfn', 'fitt', 'finl', strgpdfn=strgpdfn, gdatmock=gdatmock, booltile=booltile)\n \n if booltile:\n return\n\n if gmod.numbparaelem > 0:\n if gdat.typeverb > 0:\n print('A mosaic of samples...')\n \n ## mosaic of images of posterior catalogs\n if gdat.numbpixl > 1:\n plot_mosa(gdat, strgpdfn)\n \n ## randomly selected trandimensional parameters\n if gmod.numbparaelem > 0:\n if gdat.typeverb > 0:\n print('Transdimensional parameters...')\n \n # choose the parameters based on persistence\n stdvlistsamptran = np.std(listparagenrscalfull[:, gmod.indxsamptrap], axis=0)\n indxtrapgood = np.where(stdvlistsamptran > 0.)[0]\n gmod.numbparaelemgood = indxtrapgood.size\n gmod.numbparaelemplot = min(3, gmod.numbparaelemgood)\n if gmod.numbparaelemplot > 0:\n indxtrapplot = np.sort(np.random.choice(gmod.indxsamptrap[indxtrapgood], size=gmod.numbparaelemplot, replace=False))\n\n path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')\n tdpy.mcmc.plot_grid(path, 'listelemfrst', listparagenrscalfull[:, gmod.indxsamptrap[:3]], [gmod.lablpara[k] for k in gmod.indxsamptrap[:3]])\n path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')\n tdpy.mcmc.plot_grid(path, 'listsamp', listparagenrscalfull[:, indxtrapplot], ['%d' % k for k in indxtrapplot])\n path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')\n tdpy.mcmc.plot_grid(path, 'listsamp', listparagenrscalfull[:, indxtrapplot], [gmod.lablpara[k] for k in indxtrapplot])\n \n if gdat.typeverb > 0:\n print('Scalar variables...')\n # scalar variables\n ## trace and marginal distribution of each parameter\n for name in gmod.namepara.scal:\n \n if gdat.typeverb > 0:\n print('Working on %s...' % name)\n scal = getattr(gdat, 'scal' + name) \n corr = getattr(gdat, 'corr' + name)\n if corr is None:\n truepara = None\n else:\n truepara = getattr(gdat, 'corr' + name)\n \n listvarb = getattr(gdat, 'list' + strgpdfn + name)\n if listvarb.ndim != 1:\n if listvarb.shape[1] == 1:\n listvarb = listvarb[:, 0]\n else:\n raise Exception('')\n \n mlik = getattr(gdat, 'mlik' + name)\n path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscaltrac') + name\n tdpy.mcmc.plot_trac(path, listvarb, labltotl, truepara=truepara, scalpara=scal, listvarbdraw=[mlik], listlabldraw=[''], listcolrdraw=['r'])\n path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalhist') + name\n tdpy.mcmc.plot_hist(path, listvarb, labltotl, truepara=truepara, scalpara=scal, listvarbdraw=[mlik], listlabldraw=[''], listcolrdraw=['r'])\n \n for nameseco in gmod.namepara.scal:\n \n if name == nameseco:\n continue\n \n if gdat.typeverb > 0:\n print('Working on correlation of %s with %s...' % (name, nameseco))\n \n pathjoin = getattr(gdat, 'path' + strgpdfn + 'finlvarbscaljoin')\n if corrseco is None:\n trueparaseco = None\n else:\n trueparaseco = getattr(gdat, 'corr' + nameseco)\n \n if listvarbseco.ndim != 1:\n if listvarbseco.shape[1] == 1:\n listvarbseco = listvarbseco[:, 0]\n else:\n raise Exception('')\n \n listjoin = np.vstack((listvarb, listvarbseco)).T\n \n tdpy.mcmc.plot_grid(pathjoin, name + nameseco, listjoin, [labltotl, labltotlseco], scalpara=[scal, scalseco], truepara=[truepara, trueparaseco], \\\n join=True, listvarbdraw=[np.array([mlik, mlikseco])])\n\n if gdat.typeverb > 0:\n print('Fixed dimensional parameter covariance...')\n \n ### covariance\n ## overall\n path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')\n truepara = gmod.corrparagenrscalbase\n mlikpara = gdat.mlikparagenrscalbase\n tdpy.mcmc.plot_grid(path, 'paragenrscalbase', listparagenrscalbase, gmod.labltotlpara.genrbasetotl, truepara=truepara, listvarbdraw=[mlikpara])\n \n # stacked posteiors binned in position and flux\n if gmod.numbparaelem > 0 and gdat.numbpixl > 1:\n liststrgbins = ['quad', 'full']\n for l in gmod.indxpopl:\n plot_histlgalbgalelemstkd(gdat, strgpdfn, l, 'cumu')\n for strgbins in liststrgbins:\n plot_histlgalbgalelemstkd(gdat, strgpdfn, l, strgbins, namepara.elemsign[l])\n\n if gdat.typeverb > 0:\n print('Prior and likelihood...')\n \n for strgpdfntemp in ['lpritotl', 'lliktotl']:\n\n if strgpdfntemp == 'lpritotl':\n labltemp = '\\ln P(M)'\n if strgpdfntemp == 'lliktotl':\n labltemp = '\\ln P(D|M)'\n labl = r'$%s$' % labltemp\n\n path = getattr(gdat, 'path' + strgpdfn + 'finl') + strgpdfntemp\n \n varb = getattr(gdat, 'list' + strgpdfn + strgpdfntemp)\n tdpy.mcmc.plot_hist(path, varb, labl)\n listvarbdraw = []\n listlabldraw = []\n listcolrdraw = []\n if gdat.typedata == 'mock':\n listvarbdraw += [getattr(gdat.true, strgpdfntemp)]\n listlabldraw += ['True model']\n listcolrdraw += [gdat.refr.colr]\n \n tdpy.mcmc.plot_trac(path, getattr(gdat, 'list' + strgpdfn + strgpdfntemp), labl, \\\n listvarbdraw=listvarbdraw, listlabldraw=listlabldraw, listcolrdraw=listcolrdraw)\n \n # plot resident memory\n figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))\n axis.plot(gdat.indxswep, np.mean(listmemoresi, 1) / float(2**30))\n axis.set_ylabel(r'$M$ [GB]')\n axis.set_xlabel(r'$i_{samp}$')\n plt.tight_layout()\n figr.savefig(pathdiag + 'memoresi.pdf')\n plt.close(figr)\n\n timetotlfinl = gdat.functime()\n if gdat.typeverb > 0:\n print('Plots and animations are produced in %.3g seconds.' % (timetotlfinl - timetotlinit))\n\n\ndef plot_sbrt(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, specconvunit):\n \n gmod = getattr(gdat, strgmodl)\n gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)\n gmodstat = getattr(gdatobjt, strgstat)\n \n for b, namespatmean in enumerate(gdat.listnamespatmean):\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n \n # plot reference spectra\n if gdat.listprefsbrtlabltotl is not None:\n for k in range(len(gdat.listprefsbrtlabltotl)):\n if gdat.listprefsbrttype[k] == 'shad':\n factenerrefr = [[] for a in range(3)]\n for a in range(3):\n factenerrefr[a] = retr_factener(specconvunit[0], gdat.listprefsbrtener[k][a])\n axis.plot(gdat.listprefsbrtener[k][0], gdat.listprefsbrtsbrt[k][0] * factenerrefr[0], color='m', label=gdat.listprefsbrtlabltotl[k])\n enerpoly = np.empty(gdat.listprefsbrtener[k][1].size + gdat.listprefsbrtener[k][2].size)\n enerpoly[:gdat.listprefsbrtener[k][1].size] = gdat.listprefsbrtener[k][1]\n enerpoly[gdat.listprefsbrtener[k][1].size:] = gdat.listprefsbrtener[k][2][::-1]\n sbrtpoly = np.empty(gdat.listprefsbrtener[k][1].size + gdat.listprefsbrtener[k][2].size)\n sbrtpoly[:gdat.listprefsbrtener[k][1].size] = gdat.listprefsbrtsbrt[k][1] * factenerrefr[1]\n sbrtpoly[gdat.listprefsbrtener[k][1].size:] = gdat.listprefsbrtsbrt[k][2][::-1] * factenerrefr[2][::-1]\n axis.fill(enerpoly, sbrtpoly, color='m', alpha=0.5)\n else:\n factenerrefr = retr_factener(specconvunit[0], gdat.listprefsbrtener[k][1])\n axis.errorbar(gdat.listprefsbrtener[k][1], gdat.listprefsbrtsbrt[k][1] * factenerrefr, label=gdat.listprefsbrtlabltotl[k], color='m')\n \n if strgmodl == 'true':\n liststrgmodl = [strgmodl]\n listgdatobjt = [gdat]\n if strgmodl == 'fitt' and (strgstat == 'this' or strgstat == 'pdfn'):\n if gdat.typedata == 'mock':\n liststrgmodl = [strgmodl, 'true']\n listgdatobjt = [gdatobjt, gdat]\n else:\n liststrgmodl = [strgmodl]\n listgdatobjt = [gdatobjt]\n numbstrgstattemp = len(liststrgmodl)\n for a in range(numbstrgstattemp):\n \n indxploteleminit = []\n indxplotelemendd = []\n \n # number of transdimensional elements to be overplotted\n numbelemtemp = 0\n \n if gdat.numbpixl == 1 and strgstat != 'pdfn':\n if liststrgmodl[a] == 'fitt':\n numbelem = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)\n numbelemtemp += np.sum(gmodstat.numbelem[l])\n else:\n for q in gdat.indxrefr:\n numbelemtemp += np.sum(gdat.refr.numbelem[q])\n \n numbplot = numblablsbrtspec + numbelemtemp\n listydat = np.zeros((numbplot, gdat.numbener))\n listyerr = np.zeros((2, numbplot, gdat.numbener))\n \n cntr = 0\n cntrdata = cntr\n\n ## data\n listydat[cntr, :] = gdat.sbrtdatamean[b]\n listyerr[:, cntr, :] = gdat.sbrtdatastdv[b]\n cntr += 1\n \n for c in gmod.indxback:\n listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtback%04dmea%d' % (c, b), strgpdfn)\n if strgstat == 'pdfn':\n listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtback%04dmea%d' % (c, b), strgpdfn, strgmome='errr')\n cntr += 1\n \n if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy and not (liststrgmodl[a] == 'true' and gdat.refr.numbelemtotl == 0):\n listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncmea%d' % (b), strgpdfn)\n if strgstat == 'pdfn':\n listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncmea%d' % (b), strgpdfn, strgmome='errr')\n cntr += 1\n \n listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncsubtmea%d' % (b), strgpdfn)\n if strgstat == 'pdfn':\n listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncsubtmea%d' % (b), strgpdfn, strgmome='errr')\n cntr += 1\n \n if gmod.typeemishost != 'none':\n for e in gmod.indxsersfgrd:\n listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrthostisf%dmea%d' % (e, b), strgpdfn)\n if strgstat == 'pdfn':\n listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], \\\n 'sbrthostisf%dmea%d' % (e, b), strgpdfn, strgmome='errr')\n cntr += 1\n \n if gmod.boollens:\n listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtlensmea%d' % (b), strgpdfn)\n if strgstat == 'pdfn':\n listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtlensmea%d' % (b), strgpdfn, strgmome='errr')\n cntr += 1\n \n if gdat.numbpixl == 1 and strgstat != 'pdfn':\n cntrline = cntr\n indxploteleminit.append(cntr)\n for l in gmod.indxpopl:\n if liststrgmodl[a] == 'true':\n for k in range(gmod.numbelem[l]):\n listydat[cntr, :] = getattr(listgdatobjt[a], liststrgmodl[a] + 'spec')[l][0, :, k]\n \n if cntr == cntrline:\n listlablsbrtspec = listlablsbrtspec[:cntr] + ['Lines'] + listlablsbrtspec[cntr:]\n else:\n listlablsbrtspec = listlablsbrtspec[:cntr] + [None] + listlablsbrtspec[cntr:]\n \n cntr += 1\n if k == gmod.numbelem[l] - 1:\n indxplotelemendd.append(k)\n else: \n for k in range(gmodstat.numbelem[l]):\n listydat[cntr, :] = getattr(listgdatobjt[a], strgstat + 'spec')[l][:, k]\n \n if cntr == cntrline:\n listlablsbrtspec = listlablsbrtspec[:cntr] + ['Lines'] + listlablsbrtspec[cntr:]\n else:\n listlablsbrtspec = listlablsbrtspec[:cntr] + [None] + listlablsbrtspec[cntr:]\n \n cntr += 1\n if k == gmodstat.numbelem[l] - 1:\n indxplotelemendd.append(k)\n ## total model\n if numblablsbrt > 1:\n listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtmodlmea%d' % (b), strgpdfn)\n if strgstat == 'pdfn':\n listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtmodlmea%d' % (b), strgpdfn, strgmome='errr')\n cntr += 1\n \n if liststrgmodl[a] == 'true':\n listyerr = np.zeros((2, numbplot, gdat.numbener))\n \n # plot energy spectra of the data, background model components and total background\n if gdat.numbener > 1:\n \n listmrkr = ['o', '>', 's', 'h', '*', 'p', 'x']\n for k in range(100):\n listmrkr.append('x')\n\n # determine the energy scaling factor\n if specconvunit[0] == 'en00':\n factener = 1.\n if specconvunit[0] == 'en01':\n factener = gdat.meanpara.ener\n if specconvunit[0] == 'en02':\n factener = gdat.meanpara.ener**2\n if specconvunit[0] == 'en03':\n # temp\n pass\n factener = 1.\n #indxenerintv = np.where((gdat.meanpara.ener < specconvunit[4]) & (gdat.meanpara.ener > specconvunit[3]))[0]\n #ener = np.concatenate((np.array([specconvunit[3]]), gdat.meanpara.ener[indxenerintv], np.array([specconvunit[4]])))\n #\n #for k in range(3):\n # if k == 0:\n # ydattemp = \n # ydatminmener = np.interp(specconvunit[3], gdat.meanpara.ener, ydat)\n # ydatmaxmener = np.interp(specconvunit[4], gdat.meanpara.ener, ydat)\n # ydat = np.concatenate((np.array([ydatminmener]), ydat[indxenerintv], np.array([ydatmaxmener])))\n # ydat = np.trapz(ydat, gdat.meanpara.ener)\n #\n #yerrminmener = np.interp(specconvunit[3], gdat.meanpara.ener, yerr, axis=1)\n #yerrmaxmener = np.interp(specconvunit[4], gdat.meanpara.ener, yerr, axis=1)\n #ydat = np.stack((np.array([yerrminmener]), ydat[indxenerintv], np.array([yerrmaxmener])))\n #\n #\n #yerr = np.trapz(yerr, gdat.meanpara.ener)\n\n\n xdat = gdat.meanpara.ener\n cntr = 0\n \n for k in range(listydat.shape[0]):\n mrkr = listmrkr[cntr]\n if k == cntrdata:\n colr = 'black'\n alph = 1.\n linestyl = '-'\n else:\n colr = retr_colr(gdat, strgstat, liststrgmodl[a], indxpopl=None)\n linestyl = '--'\n alph = 0.5\n \n ydat = np.copy(listydat[k, :])\n yerr = np.copy(listyerr[:, k, :])\n \n ydat *= factener\n yerr *= factener\n \n if k == cntrdata and a > 0:\n continue\n \n if liststrgmodl[a] == 'fitt':\n labl = listlablsbrtspec[k]\n else:\n labl = None\n \n temp, listcaps, temp = axis.errorbar(xdat, ydat, yerr=yerr, color=colr, marker=mrkr, ls=linestyl, markersize=10, alpha=alph, label=labl)\n for caps in listcaps:\n caps.set_markeredgewidth(1)\n\n if gdat.numbpixl == 1 and strgstat != 'pdfn':\n if cntr != cntrline or k in indxplotelemendd:\n cntr += 1\n else:\n cntr += 1\n\n if gdat.numbener > 1:\n axis.set_xlim([np.amin(gdat.binspara.ener), np.amax(gdat.binspara.ener)])\n \n if gdat.typeexpr == 'chan':\n factminm = 1e-1\n factmaxm = 1e2\n elif gdat.typeexpr == 'ferm':\n factminm = 1e1\n factmaxm = 1e-1\n else:\n factminm = 1e-4\n factmaxm = 1e0\n minmydat = factminm * gdat.factylimtbrt[0] * np.amax(listydat[cntrdata, :] * factener)\n maxmydat = factmaxm * gdat.factylimtbrt[1] * np.amax(listydat[cntrdata, :] * factener)\n limtydat = [minmydat, maxmydat]\n axis.set_ylim(limtydat)\n axis.set_yscale('log')\n axis.set_xlabel(gdat.lablenertotl)\n axis.set_xscale('log')\n labl = getattr(gmod.lablpara, 'sbrt' + specconvunit[0] + specconvunit[1] + 'stertotl')\n axis.set_ylabel(labl)\n make_legd(axis, numbcols=2)\n \n plt.tight_layout()\n path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, 'sdenmean%s%s%s' % (namespatmean, specconvunit[0], specconvunit[1]))\n figr.savefig(path)\n plt.close(figr)\n \n\ndef retr_factener(strgconvunit, ener):\n \n if strgconvunit == 'en00':\n factener = np.ones_like(ener)\n \n if strgconvunit == 'en01':\n factener = ener\n \n if strgconvunit == 'en02':\n factener = ener**2\n \n if strgconvunit == 'en03':\n # temp\n pass\n factener = np.ones_like(ener)\n \n return factener\n\n\ndef plot_pdfntotlflux():\n\n minm = 1e-9\n maxm = 10e-9\n numbvarb = 90\n numbparagenrfull = 100000\n numbbins = 40\n alph = 0.5\n \n binssing = np.linspace(minm, maxm, numbvarb + 1)\n meansing = (binssing[:-1] + binssing[1:]) / 2.\n deltsing = binssing[1:] - binssing[:-1]\n \n binsdoub = np.linspace(2. * minm, 2. * maxm, 2 * numbvarb)\n meandoub = (binsdoub[:-1] + binsdoub[1:]) / 2.\n deltdoub = binsdoub[1:] - binsdoub[:-1]\n \n bins = np.linspace(minm, 2. * maxm, 2 * numbvarb + 1)\n \n arry = np.empty((2, numbparagenrfull))\n \n minmslop = 1.5\n maxmslop = 3.\n numbslop = 4\n sloparry = np.linspace(minmslop, maxmslop, numbslop)\n for n in range(numbslop):\n slop = sloparry[n]\n for k in range(2):\n arry[k, :] = (np.random.rand(numbparagenrfull) * (maxm**(1. - slop) - minm**(1. - slop)) + minm**(1. - slop))**(1. / (1. - slop))\n \n totl = np.sum(arry, 0)\n \n powrprob = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop)) * meansing**(-slop)\n \n convprob = convolve(powrprob, powrprob) * deltdoub[0]\n \n indxdoub = np.where(meandoub <= maxm)[0]\n convprobpoly = polyval(polyfit(meandoub[indxdoub], convprob[indxdoub], 8), meandoub[indxdoub])\n \n figr, axis = plt.subplots()\n axis.hist(arry[k, :], bins=bins, alpha=alph, label='$f_1$ (Sampled)', color='b')\n axis.hist(totl, bins=bins, alpha=alph, label='$f_0$ (Sampled)', color='g')\n axis.plot(meansing, powrprob * numbparagenrfull * deltsing, label='$f_1$ (Analytic)', color='b')\n axis.plot(meandoub, convprob * numbparagenrfull * deltdoub[0], label='$f_0$ (Numerically convolved)', color='g')\n \n axis.plot(meandoub[indxdoub], convprobpoly * numbparagenrfull * deltdoub[indxdoub], label='$f_0$ (Fit)', color='r')\n \n axis.set_ylim([0.5, numbsamp])\n axis.set_xlabel('$f$')\n axis.set_xlim([np.amin(bins), np.amax(bins)])\n axis.set_xscale('log')\n axis.set_yscale('log')\n axis.set_ylabel('$N_{samp}$')\n make_legd(axis)\n plt.tight_layout()\n pathfold = os.environ[\"TDGU_DATA_PATH\"] + '/imag/powrpdfn/'\n figr.savefig(pathfold + 'powrpdfn%04d.pdf' % n)\n plt.close(figr)\n \n\ndef savefigr(gdat, gdatmodi, figr, path):\n \n #if gdatmodi is not None and gdat.numbproc > 1:\n # gdatmodi.lock.acquire()\n # print 'Process %d acquiring the lock...' % gdatmodi.indxprocwork \n \n plt.savefig(path)\n \n #if gdatmodi is not None and gdat.numbproc > 1:\n # gdatmodi.lock.release()\n # print 'Process %d releasing the lock...' % gdatmodi.indxprocwork \n \n\ndef plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, indxpoplfrst, strgfrst, \\\n strgseco, strgtotl, strgmome='pmea', strgpdfn='post'):\n \n gmod = getattr(gdat, strgmodl)\n \n sizelarg = 10\n sizesmll = 1\n \n if strgstat == 'pdfn':\n lablmome = getattr(gdat, 'labl' + strgmome)\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n if strgmodl == 'fitt':\n colrtemp = gmod.colrelem[indxpoplfrst]\n if strgstat == 'pdfn':\n labl = gdat.lablsampdist + ' ' + lablmome\n if strgelemtdimtype == 'bind':\n varb = getattr(gdat, strgmome + strgpdfn + strgtotl)\n varbfrst = gdat.binspara.strgfrst\n varbseco = getattr(gdat.binspara, strgseco)\n if strgtotl.startswith('hist') or strgtotl.startswith('exr') or strgtotl.startswith('incr') or np.amax(varb) <= 0.:\n normtdim = None\n else:\n normtdim = mpl.colors.LogNorm(0.5, vmax=np.amax(varb))\n imag = axis.pcolor(varbfrst, varbseco, varb.T, cmap='Blues', label=labl, norm=normtdim)\n make_cbar(gdat, axis, imag)\n \n else:\n if gdat.boolcondcatl:\n varbfrst = np.zeros(gdat.numbprvlhigh)\n varbseco = np.zeros(gdat.numbprvlhigh)\n cntr = 0\n for r in gdat.indxstkscond:\n if r in gdat.indxprvlhigh:\n varbfrst[cntr] = gdat.dictglob['poststkscond'][r][strgfrst][indxpoplfrst]\n varbseco[cntr] = gdat.dictglob['poststkscond'][r][strgseco][indxpoplfrst]\n cntr += 1\n axis.scatter(varbfrst, varbseco, alpha=gdat.alphelem, color=colrtemp, label=gdat.lablparagenrscalfull)\n \n if strgstat == 'this' or strgstat == 'mlik':\n if strgelemtdimtype == 'bind':\n meanfrst = getattr(gdat.binspara, strgfrst)\n meanseco = getattr(gdat.binspara, strgseco)\n hist = getattr(gdatmodi, strgstat + strgtotl)\n if strgtotl.startswith('hist') or strgtotl.startswith('exr') or strgtotl.startswith('incr') or np.amax(hist) <= 0.:\n normtdim = None\n else:\n normtdim = mpl.colors.LogNorm(0.5, vmax=np.amax(hist))\n imag = axis.pcolor(meanfrst, meanseco, hist.T, cmap='Blues', label=gdat.lablparagenrscalfull, alpha=gdat.alphhist, norm=normtdim)\n else:\n varbfrst = getattr(gdatmodi.this, strgfrst)[indxpoplfrst]\n varbseco = getattr(gdatmodi.this, strgseco)[indxpoplfrst]\n if len(varbfrst) == 0 or len(varbseco) == 0:\n varbfrst = np.array([limtfrst[0] * 0.1])\n varbseco = np.array([limtseco[0] * 0.1])\n axis.scatter(varbfrst, varbseco, alpha=gdat.alphelem, color=colrtemp, label=gdat.lablparagenrscalfull)\n \n # reference elements\n if strgfrst[-4:] in gdat.listnamerefr:\n strgfrsttemp = strgfrst[-4:]\n else:\n strgfrsttemp = strgfrst\n if strgseco[-4:] in gdat.listnamerefr:\n strgsecotemp = strgseco[-4:]\n else:\n strgsecotemp = strgseco\n if hasattr(gdat.refr, strgfrsttemp) and hasattr(gdat.refr, strgsecotemp):\n for q in gdat.indxrefr:\n if strgfrsttemp in gdat.refr.namepara.elem[q] and strgsecotemp in gdat.refr.namepara.elem[q]:\n refrvarbfrst = getattr(gdat.refr, strgfrsttemp)[q]\n refrvarbseco = getattr(gdat.refr, strgsecotemp)[q]\n if len(refrvarbfrst) == 0 or len(refrvarbseco) == 0:\n refrvarbfrst = np.array([limtfrst[0] * 0.1])\n refrvarbseco = np.array([limtseco[0] * 0.1])\n axis.scatter(refrvarbfrst, refrvarbseco, alpha=gdat.alphelem, color=gdat.refr.colrelem[q], label=gdat.refr.lablelem[q], s=sizelarg)\n\n plot_sigmcont(gdat, strgmodl, axis, strgfrst, indxpoplfrst, strgseco=strgseco)\n \n scalfrst = getattr(gmod.scalpara, strgfrst)\n scalseco = getattr(gmod.scalpara, strgseco)\n\n if scalfrst == 'logt':\n axis.set_xscale('log')\n if scalseco == 'logt':\n axis.set_yscale('log')\n \n axis.set_xlabel(getattr(gmod.labltotlpara, strgfrst))\n axis.set_ylabel(getattr(gmod.labltotlpara, strgseco))\n axis.set_xlim(getattr(gmod.limtpara, strgfrst))\n axis.set_ylim(getattr(gmod.limtpara, strgseco))\n \n make_legd(axis)\n\n plt.tight_layout()\n if strgstat == 'pdfn':\n strgmometemp = strgmome\n else:\n strgmometemp = ''\n \n nameinte = strgelemtdimvarb + 'tdim/'\n path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, '%s%s' % (strgmometemp, strgtotl), nameinte=nameinte)\n \n savefigr(gdat, gdatmodi, figr, path)\n plt.close(figr)\n \n\ndef plot_sigmcont(gdat, strgmodl, axis, strgfrst, indxpoplfrst, strgseco=None):\n \n if strgfrst == 'deltllik' or strgseco == 'deltllik':\n for pval in gdat.pvalcont:\n if strgfrst == 'deltllik':\n deltlliksigm = scipy.stats.chi2.ppf(1. - pval, gmod.numbparagenrelemsing[indxpoplfrst])\n axis.axvline(deltlliksigm, ls='--', color='black', alpha=0.2) \n if strgseco == 'deltllik':\n deltlliksigm = scipy.stats.chi2.ppf(1. - pval, gmod.numbparagenrelemsing[indxpoplfrst])\n axis.axhline(deltlliksigm, ls='--', color='black', alpha=0.2) \n \n\ndef plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, strgydat, strgxdat, typehist='hist', \\\n indxrefrplot=None, indxydat=None, strgindxydat=None, indxxdat=None, strgindxxdat=None, plottype='none', \\\n meanxdat=None, \\\n scal=None, scalxdat=None, scalydat=None, limtxdat=None, limtydat=None, omittrue=False, nameinte='', \\\n lablxdat='', lablydat='', histodim=False, offslegd=None, booltdim=False, ydattype='totl', boolhistprio=True):\n \n gmod = getattr(gdat, strgmodl)\n gmodstat = getattr(gmod, strgstat)\n\n if strgydat[-8:-5] == 'pop':\n boolelem = True\n else:\n boolelem = False\n\n if scal is None:\n if scalxdat is None:\n scalxdat = 'linr'\n if scalydat is None:\n scalydat = 'linr'\n else:\n scalxdat = scal\n scalydat = scal\n\n if histodim:\n figrsize = (gdat.plotsize, 0.8 * gdat.plotsize)\n else:\n figrsize = (gdat.plotsize, gdat.plotsize)\n\n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n \n if booltdim:\n xdat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgxdat, strgpdfn)\n ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn)\n else:\n xdat = getattr(gdat.meanpara, strgxdat[4:])\n if typehist == 'histcorrreca':\n ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'histcorrreca' + strgydat[4:], strgpdfn)\n else:\n ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn)\n \n if indxxdat is not None:\n xdat = xdat[indxxdat]\n if indxydat is not None:\n ydat = ydat[indxydat]\n \n xerr = np.zeros((2, xdat.size))\n \n if booltdim:\n axis.scatter(xdat, ydat, alpha=gdat.alphelem, color=colr, label=gdat.lablparagenrscalfull)\n else:\n if histodim:\n # temp\n if strgxdat[4:] in gmod.namepara.elem:\n deltxdat = getattr(gdat.deltpara, strgxdat[4:])\n binsxdat = getattr(gdat.binspara, strgxdat[4:])\n else:\n deltxdat = getattr(gdat.deltpara, strgxdat[4:])\n binsxdat = getattr(gdat.binspara, strgxdat[4:])\n\n xdattemp = binsxdat[:-1] + deltxdat / 2.\n \n if strgmodl == 'fitt':\n if boolelem:\n if strgydat.startswith('cmpl'):\n labl = gmod.lablelem[int(strgydat[-5])]\n colr = gmod.colrelem[int(strgydat[-5])]\n else:\n labl = gmod.lablelem[int(strgydat[-1])]\n colr = gmod.colrelem[int(strgydat[-1])]\n else:\n labl = gmod.labl\n colr = gmod.colr\n \n if strgstat == 'pdfn':\n if typehist == 'histcorrreca':\n yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'histcorrreca' + strgydat[4:], strgpdfn, strgmome='errr')\n else:\n yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn, strgmome='errr')\n if indxydat is not None:\n yerr = yerr[[slice(None)] + indxydat]\n \n # label\n if strgydat.startswith('hist'):\n ## element distribution\n labl = gdat.lablsampdist\n else:\n ## other\n labl = gdat.lablsampdist\n \n # draw points\n indxerrr = np.where((yerr[0, :] > 0.) | (yerr[1, :] > 0.))[0]\n if indxerrr.size > 0:\n labltemp = None\n else:\n labltemp = labl\n temp, listcaps, temp = axis.errorbar(xdat, ydat, yerr=yerr, xerr=xerr, label=labl, \\\n marker='o', ls='', markersize=5, color=colr, lw=1, capsize=5)\n\n # draw error-bar caps \n if indxerrr.size > 0:\n temp, listcaps, temp = axis.errorbar(xdat[indxerrr], ydat[indxerrr], yerr=yerr[:, indxerrr], xerr=xerr[:, indxerrr], \\\n marker='o', ls='', markersize=5, color=colr, lw=1, capsize=5)\n for caps in listcaps:\n caps.set_markeredgewidth(1)\n\n elif strgstat == 'this' or strgstat == 'mlik':\n \n if strgstat == 'this':\n labl = gdat.lablsamp\n else:\n labl = gdat.lablmlik\n\n if histodim:\n axis.bar(xdattemp, ydat, deltxdat, label=gdat.lablparagenrscalfull, alpha=0.5, linewidth=1, edgecolor=colr)\n else:\n if plottype == 'errr':\n yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn, strgmome='errr')\n\n if indxydat is not None:\n yerr = yerr[[slice(None)] + indxydat]\n temp, listcaps, temp = axis.errorbar(xdat, ydat, yerr=yerr, xerr=xerr, \\\n marker='o', ls='', markersize=5, label=labl, lw=1, capsize=5, color=colr)\n for caps in listcaps:\n caps.set_markeredgewidth(1)\n else:\n axis.plot(xdat, ydat, label=gdat.lablparagenrscalfull, alpha=0.5, color=colr)\n \n # reference histogram\n if not omittrue:\n for q in gdat.indxrefr:\n \n if boolelem:\n if strgydat[-12:-8] in gdat.listnamerefr:\n name = 'refr' + strgydat[:-12] + 'pop%d' % q + strgydat[-4:]\n else:\n name = 'refr' + strgydat[:-8] + 'pop%d' % q + strgydat[-4:]\n else:\n name = 'refr' + strgydat\n \n if not hasattr(gdat, name):\n continue\n \n ydattemp = getattr(gdat, name)\n \n ydat = ydattemp\n if indxydat is not None:\n ydat = ydat[indxydat]\n \n if strgydat[-8:-5] == 'pop':\n labl = gdat.refr.lablelem[q]\n colr = gdat.refr.colrelem[q]\n else:\n labl = gdat.refr.labl\n colr = gdat.refr.colr\n \n if histodim:\n axis.bar(xdattemp, ydat, deltxdat, color=colr, label=labl, alpha=gdat.alphhist, linewidth=1, edgecolor=colr)\n else:\n axis.plot(xdat, ydat, color=colr, label=labl, alpha=gdat.alphline)\n \n try:\n if histodim:\n if typehist == 'histcorrreca':\n reca = getattr(gdat.true, 'reca' + strgydat[4:])\n axis.plot(xdattemp, 10. * reca, color='purple', label='PTFN', alpha=gdat.alphline)\n except:\n pass\n\n if not boolelem:\n break\n \n # external reference histogram\n if histodim and strgydat == 'histfluxpop0':\n try:\n if gdat.listprefhistfluxlabl is not None:\n for k in range(len(gdat.listprefhistfluxlabl)):\n if gdat.listprefhistfluxtype[k] == 'shad':\n axis.plot(gdat.listprefhistfluxflux[k][0], gdat.listprefhistfluxhist[k][0], color='m', label=gdat.listprefhistfluxlabl[k])\n enerpoly = np.empty(gdat.listprefhistfluxflux[k][1].size + gdat.listprefhistfluxflux[k][2].size)\n enerpoly[:gdat.listprefhistfluxflux[k][1].size] = gdat.listprefhistfluxflux[k][1]\n enerpoly[gdat.listprefhistfluxflux[k][1].size:] = gdat.listprefhistfluxflux[k][2][::-1]\n sbrtpoly = np.empty(gdat.listprefhistfluxflux[k][1].size + gdat.listprefhistfluxflux[k][2].size)\n sbrtpoly[:gdat.listprefhistfluxflux[k][1].size] = gdat.listprefhistfluxhist[k][1]\n sbrtpoly[gdat.listprefhistfluxflux[k][1].size:] = gdat.listprefhistfluxhist[k][2][::-1]\n axis.fill(enerpoly, sbrtpoly, color='m', alpha=0.5)\n else:\n axis.errorbar(gdat.listprefhistfluxflux[k], gdat.listprefhistfluxhist[k], label=gdat.listprefhistfluxlabl[k], color='m')\n except:\n pass\n\n if strgydat.startswith('histcntp'):\n ydattemp = getattr(gmodstat, strgydat)\n axis.bar(xdattemp, ydattemp, deltxdat, color='black', label='Data', alpha=gdat.alphhist, linewidth=1, edgecolor='black')\n \n # axis scales\n if scalxdat == 'logt':\n axis.set_xscale('log')\n if scalydat == 'logt':\n if np.where(ydat > 0.)[0].size > 0:\n axis.set_yscale('log')\n \n # axis labels\n axis.set_xlabel(lablxdat)\n axis.set_ylabel(lablydat)\n\n # superimpose prior on the feature\n ptch = None\n line = None\n\n if strgydat.startswith('hist') and strgydat != 'histdefl' and strgydat != 'histdeflelem' and boolhistprio:\n if strgydat[-8:-5] == 'pop':\n strgtemp = strgydat[4:-8]\n if strgtemp in gmod.namepara.genrelem[int(strgydat[-5])]:\n xdatprio = getattr(gmod, strgxdat + 'prio')\n if gdat.typedata == 'mock' and not omittrue:\n for q in gdat.indxrefr:\n if gdat.refr.numbelem[q] == 0:\n continue\n if strgtemp in gmod.namepara.genrelem[q]:\n truexdatprio = getattr(gdat.true, strgxdat + 'prio')\n trueydatsupr = getattr(gdat.true, strgydat + 'prio')\n trueydatsupr = retr_fromgdat(gdat, gdatmodi, strgstat, 'true', strgydat + 'prio', strgpdfn)\n axis.plot(truexdatprio, trueydatsupr, ls='-', alpha=gdat.alphline, color=gdat.refr.colrelem[q])\n\n if strgmodl != 'true':\n ydatsupr = retr_fromgdat(gdat, gdatmodi, strgstat, 'fitt', strgydat + 'prio', strgpdfn)\n if strgstat == 'pdfn':\n yerrsupr = retr_fromgdat(gdat, gdatmodi, strgstat, 'fitt', strgydat + 'prio', strgpdfn, strgmome='errr')\n labl = gdat.lablsampdist + ' hyper-distribution'\n ptch, line = tdpy.plot_braz(axis, xdatprio, ydatsupr, yerr=yerrsupr, lcol='lightgrey', dcol='grey', labltotl=labltotl)\n else:\n axis.plot(xdatprio, ydatsupr, ls='--', alpha=gdat.alphline, color=gmod.colrelem[int(strgydat[-5])])\n \n for name, valu in gdat.refr.__dict__.items():\n if name[8:12] == 'hist' and name[12:16] == strgydat[4:] and name[16:19] == 'pop' and int(name[-1]) == indxpopltemp:\n colr = getattr(gdat, name + 'colr')\n linestyl = getattr(gdat, name + 'linestyl')\n axis.plot(valu[0, :], valu[1, :], ls=linestyl, color=colr)\n\n if strgydat.startswith('hist') and strgydat[4:-8] == 'deltllik':\n plot_sigmcont(gdat, strgmodl, axis, strgxdat[4:], int(strgydat[-1]))\n \n if indxydat is not None:\n strgydat += strgindxydat\n \n if indxxdat is not None:\n strgxdat += strgindxxdat\n \n if limtxdat is not None:\n axis.set_xlim(limtxdat)\n else:\n axis.set_xlim([np.amin(xdat), np.amax(xdat)])\n if limtydat is not None:\n axis.set_ylim([limtydat[0], limtydat[1]])\n else:\n axis.set_ylim([np.amin(ydat), np.amax(ydat)])\n \n if ydattype != 'totl':\n strgydat += ydattype\n \n try:\n make_legd(axis, offs=offslegd, ptch=ptch, line=line)\n except:\n print('Legend failed when')\n print('strgstat')\n print(strgstat)\n print('strgmodl')\n print(strgmodl)\n print('strgydat')\n print(strgydat)\n raise Exception('')\n\n plt.tight_layout()\n if typehist == 'histcorrreca':\n path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, 'histcorrreca' + strgydat[4:], nameinte=nameinte)\n else:\n path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgydat, nameinte=nameinte)\n savefigr(gdat, gdatmodi, figr, path)\n plt.close(figr)\n\n\ndef plot_scatassc(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, q, l, strgfeat, plotdiff=False):\n \n if plotdiff:\n figrsize = (gdat.plotsize, 0.7 * gdat.plotsize)\n else:\n figrsize = (gdat.plotsize, gdat.plotsize)\n figr, axis = plt.subplots(1, 1, figsize=figrsize)\n \n # prepare data to be plotted\n xdat = np.copy(getattr(gdat.refr, strgfeat)[q][0, :])\n xerr = tdpy.retr_errrvarb(getattr(gdat.refr, strgfeat)[q])\n \n ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgfeat + 'asscpop%dpop%d' % (q, l), strgpdfn)\n \n yerr = np.zeros((2, ydat.size))\n if strgstat == 'pdfn':\n yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgfeat + 'asscpop%dpop%d' % (q, l), strgpdfn, strgmome='errr')\n \n if plotdiff:\n ydat = 100. * (ydat - xdat) / xdat\n \n # handle the case when there is a single reference element\n if yerr.ndim == 1:\n ydat = np.array([ydat])\n yerr = yerr[:, None]\n \n # plot all associations\n if plotdiff:\n indx = np.where(ydat > -100.)[0]\n else:\n indx = np.where(ydat > 0.)[0]\n if indx.size > 0:\n axis.errorbar(xdat[indx], ydat[indx], ls='', yerr=yerr[:, indx], xerr=xerr[:, indx], lw=1, marker='o', markersize=5, color='black')\n \n # temp -- plot associations inside the comparison area\n if plotdiff:\n axis.axhline(0., ls='--', alpha=gdat.alphline, color='black')\n else:\n axis.plot(binsplot, binsplot, ls='--', alpha=gdat.alphline, color='black')\n \n lablxdat = getattr(gmod.lablpara, strgfeat + 'refr')\n lablydat = getattr(gmod.lablpara, strgfeat + 'paragenrscalfull')\n axis.set_xlabel(lablxdat)\n axis.set_ylabel(lablydat)\n boollogtxaxi = False\n boollogtyaxi = False\n if indx.size > 0 and scal == 'logt':\n if not plotdiff:\n axis.set_yscale('log')\n boollogtyaxi = True\n axis.set_xscale('log')\n boollogtaxis = True\n \n if plotdiff:\n limtydat = np.array([-100., 100.])\n else:\n limtydat = np.array([minmplot, maxmplot])\n limtxdat = [minmplot, maxmplot]\n \n # overplot text\n if 'etag' in gdat.refr.namepara.elem[q]:\n for k in range(indx.size):\n if boollogtxaxi:\n sizexoff = 0.01 * xdat[indx[k]]\n else:\n sizexoff = 0.01 * (limtxdat[1] - limtxdat[0])\n if boollogtyaxi:\n sizeyoff = 0.01 * ydat[indx[k]]\n else:\n sizeyoff = 0.01 * (limtydat[1] - limtydat[0])\n axis.text(xdat[indx[k]] + sizexoff, ydat[indx[k]] + sizeyoff, gdat.refretag[q][indx[k]], verticalalignment='center', horizontalalignment='center', \\\n color='red', fontsize=1)\n\n axis.set_ylim(limtydat)\n axis.set_xlim(limtxdat)\n \n plt.tight_layout()\n if plotdiff:\n strgtype = 'diff'\n else:\n strgtype = ''\n path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, 'scatassc' + strgfeat + '%spop%dpop%d' % (strgtype, q, l), nameinte='assc')\n savefigr(gdat, gdatmodi, figr, path)\n plt.close(figr)\n\n\ndef plot_scatcntp(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, indxevttplot, indxenerplot=None):\n \n gmod = getattr(gdat, strgmodl)\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'cntpmodl', strgpdfn)\n if indxenerplot is None:\n xdat = gdat.cntpdata[:, :, indxevttplot].flatten()\n ydat = ydat[:, :, indxevttplot].flatten()\n nameplot = 'scatcntpevt%d' % (indxevttplot)\n if strgstat == 'pdfn':\n indxvarb = [slice(None), slice(None), indxevttplot]\n else:\n xdat = gdat.cntpdata[indxenerplot, :, indxevttplot]\n ydat = ydat[indxenerplot, :, indxevttplot]\n nameplot = 'scatcntpen%02devt%d' % (indxenerplot, indxevttplot)\n if strgstat == 'pdfn':\n indxvarb = [indxenerplot, slice(None), indxevttplot]\n if strgstat == 'pdfn':\n yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'cntpmodl', strgpdfn, strgmome='errr', indxvarb=indxvarb)\n colr = gmod.colr\n\n if strgstat == 'pdfn':\n axis.errorbar(xdat, ydat, yerr=yerr, marker='o', ls='', markersize=5, color=gmod.colr, capsize=5)\n else:\n axis.plot(xdat, ydat, marker='o', ls='', markersize=5, color=gmod.colr)\n gdat.limtcntpdata = [gdat.binspara.cntpdata[0], gdat.binspara.cntpdata[-1]]\n axis.set_xlim(gdat.limtcntpdata)\n axis.set_ylim(gdat.limtcntpdata)\n axis.set_ylabel('$k^{modl}$')\n axis.set_xlabel('$k^{data}$')\n axis.set_xscale('log')\n axis.set_yscale('log')\n plt.tight_layout()\n\n path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, nameplot)\n savefigr(gdat, gdatmodi, figr, path)\n plt.close(figr)\n \n \ndef plot_indxprox(gdat):\n\n numbbins = 40\n numbfluxprox = len(gdat.indxpixlprox)\n bins = np.empty((gdat.numbprox, numbbins + 1))\n indxpixlproxsize = np.empty((numbfluxprox, gdat.numbpixlfull))\n for h in gdat.indxprox:\n for j in gdat.indxpixlfull:\n try:\n indxpixlproxsize[h, j] = gdat.indxpixlprox[h][j].size\n except:\n indxpixlproxsize[h, j] = gdat.numbpixlfull\n bins[h, :] = np.logspace(np.log10(np.amin(indxpixlproxsize[h, :])), np.log10(np.amax(indxpixlproxsize[h, :])), numbbins + 1)\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n for h in gdat.indxprox:\n axis.hist(indxpixlproxsize[h, :], bins=bins[h, :], log=True, label='Flux bin %d' % h, alpha=gdat.alphhist)\n axis.set_xscale('log')\n axis.axvline(gdat.numbpixlfull, label='ROI', ls='--')\n axis.set_xlabel('Number of pixels')\n axis.set_ylabel(\"Number of tables\")\n make_legd(axis)\n plt.tight_layout()\n figr.savefig(gdat.pathplotrtag + 'init/indxprox.pdf')\n plt.close()\n \n \ndef plot_psfn_type():\n \n devi = np.linspace(0., 5., 100)\n y = np.zeros((x.size, 5))\n\n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n singgaus = retr_singgaus(devi, 0.25)\n axis.plot(devi, singgaus, label='Single Gaussian')\n\n singking = retr_singking(devi, 0.25, 10.)\n axis.plot(devi, singking, label='Single King')\n\n doubgaus = retr_doubgaus(devi, 0.1, 0.25, 1.)\n axis.plot(devi, doubgaus, label='Double Gaussian')\n\n gausking = retr_gausking(devi, 0.1, 0.25, 1., 10.)\n axis.plot(devi, gausking, label='Gaussian + King')\n\n doubking = retr_doubking(devi, 0.1, 0.25, 10., 1., 5.)\n axis.plot(devi, doubking, label='Double King')\n\n make_legd(axis)\n axis.set_xscale('log')\n axis.set_yscale('log')\n axis.set_ylim([1e-3, None])\n \n \ndef plot_evidtest():\n \n minmgain = -1.\n maxmgain = 5.\n minmdevi = 0.\n maxmdevi = 5.\n gain = np.linspace(minmgain, maxmgain, 100)\n devi = np.linspace(minmdevi, maxmdevi, 100)\n\n evid = np.log(np.sqrt(1. + np.exp(2. * gain[None, :])) * np.exp(-devi[:, None]**2 / 2. / (1. + 1. / np.exp(2. * gain[None, :]))))\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n figr.suptitle('Log-Bayesian Evidence For Lower-Dimension Model', fontsize=18)\n imag = axis.imshow(evid, extent=[minmgain, maxmgain, minmdevi, maxmdevi], cmap='winter', origin='lower')\n cset1 = plt.contourf(gain, devi, evid, cmap='winter')\n axis.set_xlabel('Information gain')\n axis.set_ylabel('Goodness of fit')\n plt.colorbar(imag, ax=axis, fraction=0.03)\n\n plt.tight_layout()\n figr.savefig(gdat.pathplotrtag + 'evidtest.pdf')\n plt.close(figr)\n \n \ndef plot_histlgalbgalelemstkd(gdat, strgpdfn, indxpoplplot, strgbins, strgfeat=None):\n \n if strgfeat is not None:\n numbparaplot = gdat.numbbinsplot\n else:\n numbparaplot = 1\n\n if strgbins == 'cumu':\n numbrows = 1\n numbcols = 1\n else:\n numbcols = 2\n if strgbins == 'full':\n numbrows = numbparaplot / 2\n else:\n numbrows = 2\n \n histlgalbgalelemstkd = getattr(gdat, strgpdfn + 'histlgalbgalelemstkd')\n\n figr, axgr = plt.subplots(numbrows, numbcols, figsize=(numbcols * gdat.plotsize, numbrows * gdat.plotsize), sharex='all', sharey='all')\n if numbrows == 1:\n axgr = [axgr] \n for a, axrw in enumerate(axgr):\n if numbcols == 1:\n axrw = [axrw]\n for b, axis in enumerate(axrw):\n if strgfeat is not None:\n h = a * 2 + b\n if strgbins == 'full':\n indxlowr = h\n indxuppr = h + 1\n elif strgbins == 'cumu':\n indxlowr = 0\n indxuppr = numbparaplot\n else:\n if h < 3:\n indxlowr = 2 * h\n indxuppr = 2 * (h + 1)\n else:\n indxlowr = 2 * h\n indxuppr = numbparaplot\n temp = np.sum(histlgalbgalelemstkd[indxpoplplot][:, :, indxlowr:indxuppr], 2).T\n else:\n temp = np.sum(np.sum(histlgalbgalelemstkd[indxpoplplot], 2), 2).T\n \n if np.where(temp > 0.)[0].size > 0:\n imag = axis.imshow(temp, interpolation='nearest', origin='lower', cmap='BuPu', \\\n extent=gdat.exttrofi, norm=mpl.colors.LogNorm(vmin=0.5, vmax=None))\n else:\n imag = axis.imshow(temp, interpolation='nearest', origin='lower', cmap='BuPu', extent=gdat.exttrofi)\n \n if strgfeat is not None:\n bins = getattr(gdat.binspara, strgfeat)\n \n # superimpose reference elements\n for q in gdat.indxrefr:\n if gdat.refr.numbelem[q] == 0:\n continue\n # temp -- backcomp\n reframpl = getattr(gdat.refr, gdat.refr.nameparagenrelemampl[q])\n if strgfeat in gdat.refr.namepara.elem[q]:\n refrfeat = getattr(gdat.refr, strgfeat)[q]\n if len(refrfeat) > 0:\n indxelem = np.where((bins[indxlowr] < refrfeat[0, :]) & (refrfeat[0, :] < bins[indxuppr]))[0]\n else:\n indxelem = np.array([])\n else:\n indxelem = np.arange(gdat.refr.numbelem[q])\n # temp -- backcomp\n try:\n mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl[q][0, indxelem], gdat.refr.nameparagenrelemampl[q])\n except:\n mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl[q][0, indxelem], gdat.refr.nameparagenrelemampl[q])\n\n if indxelem.size > 0:\n axis.scatter(gdat.anglfact * gdat.refr.dictelem[q]['lgal'][0, indxelem], gdat.anglfact * gdat.refr.dictelem[q]['bgal'][0, indxelem], \\\n s=mrkrsize, alpha=gdat.alphelem, marker=gdat.refrlistmrkrhits[q], lw=2, color=gdat.refr.colrelem[q])\n\n if a == numbrows - 1:\n axis.set_xlabel(gdat.labllgaltotl)\n else:\n axis.set_xticklabels([])\n if b == 0:\n axis.set_ylabel(gdat.lablbgaltotl)\n else:\n axis.set_yticklabels([])\n\n draw_frambndr(gdat, axis)\n \n if strgbins != 'cumu':\n titl = tdpy.mexp(bins[indxlowr]) + ' < $%s$ < ' % lablfeat + tdpy.mexp(bins[indxuppr])\n axis.set_title(titl)\n \n if strgfeat is not None:\n lablfeattotl = getattr(gmod.lablpara, strgfeat + 'totl')\n plt.figtext(0.5, 0.95, '%s' % lablfeattotl, ha='center', va='center')\n axiscomm = figr.add_axes([0.87, 0.2, 0.02, 0.6])\n cbar = figr.colorbar(imag, cax=axiscomm)\n\n plt.subplots_adjust()\n #plt.subplots_adjust(left=0.18, top=.9, right=0.82, bottom=0.15, hspace=0.08, wspace=0.08)\n if strgbins == 'cumu':\n strgtemp = ''\n else:\n strgtemp = strgfeat\n path = getattr(gdat, 'path' + strgpdfn + 'finl') + 'histlgalbgalelemstkd%s%spop%d' % (strgbins, strgtemp, indxpoplplot) + '.pdf'\n figr.savefig(path)\n plt.close(figr)\n \n \ndef plot_king(gdat):\n\n angl = rad2deg(gdat.binspara.angl)\n\n figr, axgr = plt.subplots(1, 2, figsize=(2 * gdat.plotsize, gdat.plotsize))\n figr.suptitle('King Function', fontsize=20)\n for k, axis in enumerate(axgr):\n if k == 0:\n sigmlist = [0.25]\n gammlist = [1.01, 2.5, 10.]\n else:\n sigmlist = [0.1, 0.25, 1.]\n gammlist = [2.]\n for sigm in sigmlist:\n for gamm in gammlist:\n axis.plot(angl, retr_singking(angl, sigm, gamm), label=r'$\\sigma = %.4g, \\gamma = %.3g$' % (sigm, gamm))\n make_legd(axis)\n axis.set_yscale('log')\n axis.set_xlabel(gdat.labltotlpara.gang)\n axis.set_xlabel(r'$\\mathcal{K}$')\n \n plt.tight_layout()\n figr.savefig(gdat.pathplotrtag + 'king.pdf')\n plt.close(figr)\n \n \ndef plot_intr(gdat):\n \n if gdat.typeverb > 0:\n print('Making PCAT introductory plots...')\n\n #plot_grap(plottype='meta', typeverb=1)\n plot_grap(plottype='lght0000', typeverb=1)\n #plot_grap(plottype='lght0001', typeverb=1)\n #plot_grap(plottype='lght0002', typeverb=1)\n #plot_grap(plottype='lght0003', typeverb=1)\n #plot_grap(plottype='lens0000', typeverb=1)\n plot_grap(plottype='lens0001', typeverb=1)\n \n with plt.xkcd():\n\n from matplotlib import patheffects\n mpl.rcParams['path.effects'] = [patheffects.withStroke(linewidth=0)]\n\n figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))\n\n catl = np.arange(80)\n probcatl = pss.pmf(catl, 30.) + 0.5 * pss.pmf(catl, 60.)\n axis.plot(catl, probcatl)\n axis.set_xticks([10, 30, 60])\n axis.set_xticklabels([\"Crackpot's Catalog\", \"Best-fit catalog\", \"Not-so-best-fit catalog\"])\n axis.set_yticks([])\n text = axis.set_title(\"Exploring the catalog space with Probabilistic cataloging\")\n text.set_position([.5, 1.05])\n axis.set_xlabel('Catalog index')\n axis.set_ylabel(\"Probability\")\n \n axis.tick_params(axis='x', colors='#B6E954')\n axis.tick_params(axis='y', colors='#B6E954')\n axis.spines['bottom'].set_color('#B6E954')\n axis.spines['top'].set_color('#B6E954') \n axis.spines['right'].set_color('#B6E954')\n axis.spines['left'].set_color('#B6E954')\n axis.yaxis.label.set_color('#B6E954')\n axis.xaxis.label.set_color('#B6E954')\n axis.title.set_color('#B6E954')\n\n axis.set_axis_bgcolor('black')\n figr.set_facecolor('black')\n plt.tight_layout()\n figr.savefig(gdat.pathimag + 'talkintr.pdf', facecolor=figr.get_facecolor())\n plt.close() \n \n \ndef plot_psfn(gdat, gdatmodi, strgstat, strgmodl):\n\n gmod = getattr(gdat, strgmodl)\n gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)\n gmodstat = getattr(gdatobjt, strgstat)\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n for k in range(gdat.numbprox + 1):\n if k == 0 or k == gdat.numbprox:\n alph = 1.\n colr = 'b'\n if k == 0:\n labl = 'Dimmest PS'\n else:\n labl = 'Brightest PS'\n else:\n alph = 0.2\n labl = None\n colr = 'black'\n axis.plot(gdat.binspara.angl * gdat.anglfact, gdat.binspara.prox[k] * gmodstat.psfn[i, :, m], label=labl, color=colr, alpha=alph)\n axis.set_xlim([np.amin(gdat.binspara.angl) * gdat.anglfact, np.amax(gdat.binspara.angl) * gdat.anglfact])\n if k > 0:\n axis.axvline(gdat.anglfact * gdat.maxmangleval[k-1], ls='--', alpha=alph, color=colr)\n axis.set_yscale('log')\n axis.set_xlabel(gdat.labltotlpara.gang)\n axis.set_ylabel(gdat.lablsbrttotl)\n \n limt = gdat.specfraceval * np.amax(gdat.binspara.prox[0] * gmodstat.psfn[i, :, m])\n\n if limt != 0.:\n axis.axhline(limt, color='red', ls=':', label='Flux floor')\n \n make_legd(axis)\n\n plt.tight_layout()\n name = 'psfn'\n if gdat.numbener > 1:\n name += 'en%02d' % i\n if gdat.numbevtt > 1:\n name += 'evt%d' % m\n figr.savefig(gdat.pathinit + name + '.pdf')\n plt.close(figr)\n\n\ndef plot_mosa(gdat, strgpdfn):\n\n # empty global object\n gdatmodi = tdpy.gdatstrt()\n \n listparagenrscalfull = getattr(gdat, 'list' + strgpdfn + 'paragenrscalfull')\n listparagenrunitfull = getattr(gdat, 'list' + strgpdfn + 'paragenrunitfull')\n\n numbrows = 3\n numbcols = 2\n numbsampmosa = numbrows * numbcols\n if numbsampmosa <= gdat.numbsamptotl:\n indxsampmosa = np.random.choice(gdat.indxsamptotl, size=numbsampmosa, replace=False)\n for l in gmod.indxpopl:\n for i in gdat.indxener:\n for m in gdat.indxevttplot:\n \n figr, axgr = plt.subplots(numbrows, numbcols, figsize=(numbcols * gdat.plotsize, numbrows * gdat.plotsize))\n for a, axrw in enumerate(axgr):\n for b, axis in enumerate(axrw):\n \n n = indxsampmosa[numbcols*a+b]\n gdatmodi.this.paragenrscalfull = listparagenrscalfull[n, :].flatten()\n gdatmodi.this.paragenrunitfull = listparagenrunitfull[n, :].flatten()\n if gmod.numbparaelem > 0:\n gdatmodi.this.indxelemfull = getattr(gdat, 'list' + strgpdfn + 'indxelemfull')[n]\n proc_samp(gdat, gdatmodi, 'this', 'fitt')\n\n if a == numbrows - 1:\n axis.set_xlabel(gdat.labllgaltotl)\n else:\n axis.set_xticklabels([])\n if b == 0:\n axis.set_ylabel(gdat.lablbgaltotl)\n else:\n axis.set_yticklabels([])\n \n imag = retr_imag(gdat, axis, gdat.cntpdata, '', 'fitt', 'cntpdata', i, m)\n supr_fram(gdat, gdatmodi, 'this', 'fitt', axis, l)\n \n if gdat.boolbinsener:\n plt.figtext(0.5, 0.93, gdat.strgener[i], ha='center', va='center')\n axiscomm = figr.add_axes([0.92, 0.1, 0.02, 0.8])\n cbar = figr.colorbar(imag, cax=axiscomm)\n cbar.set_ticks(gdat.valutickmajrpara.cntpdata)\n cbar.set_ticklabels(gdat.labltickmajrpara.cntpdata)\n plt.subplots_adjust()\n #plt.subplots_adjust(left=0.1, top=.91, hspace=0.03, wspace=0.1, bottom=0.09)\n if l == 1:\n strg = ''\n else:\n strg = 'pop%d' % l\n pathfinl = getattr(gdat, 'path' + strgpdfn + 'finl')\n if m is None:\n path = pathfinl + 'mosa' + strg + 'en%02dA.pdf' % (gdat.indxenerincl[i])\n else:\n path = pathfinl + 'mosa' + strg + 'en%02devtt%d.pdf' % (gdat.indxenerincl[i], gdat.indxevttincl[m])\n figr.savefig(path)\n plt.close(figr)\n else:\n if gdat.typeverb > 0:\n print('Skipping the mosaic plot...')\n\n\ndef plot_grap(plottype, typeverb=0):\n \n import networkx as nx\n\n figr, axis = plt.subplots(figsize=(6, 6))\n\n grap = nx.DiGraph()\n if plottype == 'meta':\n listcolr = ['black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', 'magenta']\n\n\n if plottype == 'lens0001':\n listcolr = ['olive', 'olive', 'black', 'magenta', 'magenta', 'magenta', 'magenta', 'magenta', 'olive', 'olive', 'olive', 'olive', 'olive', \\\n r'black', 'olive', 'black']\n\n if plottype == 'lght0000':\n listcolr = [r'olive', r'black', r'magenta', r'magenta', 'magenta', r'magenta', r'olive', r'olive', r'black', r'olive', r'olive', r'black', r'olive']\n \n\n\n\n if plottype == 'lght0001':\n listcolr = ['black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', 'olive', 'olive', 'magenta', 'magenta', 'magenta', 'magenta', 'black']\n\n if plottype == 'lght0002':\n listcolr = ['black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', 'olive', 'olive', 'olive', 'magenta', \\\n 'magenta', 'magenta', 'magenta', 'magenta', 'black']\n if plottype == 'lght0003':\n listcolr = ['black', 'black', 'black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', \\\n 'olive', 'olive', 'magenta', 'magenta', 'magenta', 'magenta']\n \n if plottype == 'lens0000':\n listcolr = ['olive', 'black', 'black', 'olive', 'olive', 'olive', 'olive', 'black', 'olive', 'magenta', 'magenta', 'magenta']\n\n\n if plottype.startswith('meta'):\n grap.add_edges_from([ \\\n ('meanelem', 'numbelem'), \\\n ('modl','data'), \\\n ('psfp', 'modl'), \\\n ('feat','modl'), \\\n ('numbelem','feat'), \\\n ('amplslop', 'ampl'), \\\n ])\n \n if plottype.startswith('lght') or plottype.startswith('lens'):\n grap.add_edges_from([ \\\n ('meanelem', 'numbelem'), \\\n ('modl','data'), \\\n ('psfp', 'modl'), \\\n ('bacp', 'modl'), \\\n ('lgal','modl'), \\\n ('bgal','modl'), \\\n ('numbelem','lgal'), \\\n ('numbelem','bgal'), \\\n ])\n \n if plottype.startswith('lght'):\n grap.add_edges_from([ \\\n ('amplslop', 'ampl'), \\\n ('ampl', 'modl'), \\\n ('numbelem','ampl'), \\\n ('numbelem', 'sind'), \\\n ('sind','modl'), \\\n ])\n \n if plottype.startswith('lens'):\n grap.add_edges_from([ \\\n ('lenp', 'modl'), \\\n ('defsslop', 'defs'), \\\n ('defs', 'modl'), \\\n ('numbelem','defs'), \\\n ])\n \n if plottype == 'lens0001':\n grap.add_edges_from([ \\\n ('asca', 'modl'), \\\n ('numbelem','asca'), \\\n ('acut', 'modl'), \\\n ('numbelem','acut'), \\\n ])\n \n if plottype == 'lght0001' or plottype == 'lght0002':\n grap.add_edges_from([ \\\n ('sinddistmean', 'sind'), \\\n ])\n \n if plottype == 'lght0002':\n grap.add_edges_from([ \\\n ('numbelem', 'expc'), \\\n ('expc', 'modl'), \\\n ])\n \n if plottype == 'lght0003':\n grap.add_edges_from([ \\\n ('spatdistcons', 'lgal'), \\\n ('spatdistcons', 'bgal'), \\\n ])\n \n labl = {}\n if plottype.startswith('lens'):\n nameelem = r'\\rm{sub}'\n else:\n nameelem = r'\\rm{pts}'\n if plottype.startswith('lght') and (plottype == 'lght0001' or plottype == 'lght0002'):\n labl['numbelem'] = r'$\\vec{N}_{%s}$' % nameelem\n labl['meanelem'] = r'$\\vec{\\mu}_{%s}$' % nameelem\n else:\n labl['numbelem'] = '$N_{%s}$' % nameelem\n labl['meanelem'] = r'$\\mu_{%s}$' % nameelem\n \n if plottype.startswith('lght'):\n if plottype == 'lght0000' or plottype == 'lght0003':\n labl['amplslop'] = r'$\\alpha$'\n else:\n labl['amplslop'] = r'$\\vec{\\alpha}$'\n if plottype.startswith('lens'):\n labl['defsslop'] = r'$\\beta$'\n \n if plottype == 'lght0001' or plottype == 'lght0002':\n labl['sinddistmean'] = r'$\\vec{\\beta}$'\n \n if plottype == 'lght0003':\n labl['spatdistcons'] = r'$\\gamma$'\n if plottype.startswith('lens'):\n labl['lenp'] = r'$\\vec{\\chi}$'\n labl['psfp'] = r'$\\vec{\\eta}$'\n labl['bacp'] = r'$\\vec{A}$'\n labl['lgal'] = r'$\\vec{\\theta_1}$'\n labl['bgal'] = r'$\\vec{\\theta_2}$'\n if plottype.startswith('meta'):\n labl['feat'] = r'$\\vec{\\xi}$'\n else:\n if plottype.startswith('lght'):\n labl['sind'] = r'$\\vec{s}$'\n labl['ampl'] = r'$\\vec{f}$'\n else:\n labl['defs'] = r'$\\vec{\\alpha_{\\rm{s}}}$'\n if plottype == 'lens0001':\n labl['asca'] = r'$\\vec{\\theta_{\\rm{s}}}$'\n labl['acut'] = r'$\\vec{\\theta_{\\rm{c}}}$'\n \n if plottype == 'lght0002':\n labl['expc'] = r'$\\vec{E_{\\rm{c}}}$'\n labl['modl'] = r'$M_D$'\n labl['data'] = r'$D$'\n \n posi = nx.circular_layout(grap)\n posi['sinddistmean'] = np.array([0.4, 0.15])\n if plottype == 'lght0003':\n posi['spatdistcons'] = np.array([-0.2, 0.15])\n if plottype.startswith('lght'):\n posi['numbelem'] = np.array([0., 0.075])\n posi['meanelem'] = np.array([0., 0.15])\n posi['amplslop'] = np.array([0.2, 0.15])\n if plottype.startswith('lens'):\n posi['numbelem'] = np.array([-0.1, 0.075])\n posi['meanelem'] = np.array([-0.1, 0.15])\n posi['defsslop'] = np.array([0.1, 0.15])\n \n if plottype.startswith('lght'):\n if plottype == 'lght0002':\n posi['psfp'] = np.array([0.7, -0.0])\n posi['bacp'] = np.array([0.9, -0.0])\n else:\n posi['psfp'] = np.array([0.5, -0.0])\n posi['bacp'] = np.array([0.7, -0.0])\n if plottype == 'lens0000':\n posi['psfp'] = np.array([0.3, -0.0])\n posi['bacp'] = np.array([0.5, -0.0])\n posi['lenp'] = np.array([0.7, -0.0])\n if plottype == 'lens0001':\n posi['psfp'] = np.array([0.7, -0.0])\n posi['bacp'] = np.array([0.9, -0.0])\n posi['lenp'] = np.array([1.1, -0.0])\n posi['lgal'] = np.array([-0.3, -0.0])\n posi['bgal'] = np.array([-0.1, -0.0])\n if plottype.startswith('lght'):\n posi['ampl'] = np.array([0.1, -0.0])\n posi['sind'] = np.array([0.3, -0.0])\n if plottype == 'lght0002':\n posi['expc'] = np.array([0.5, -0.0])\n\n if plottype.startswith('lens'):\n posi['defs'] = np.array([0.1, -0.0])\n if plottype == 'lens0001':\n posi['asca'] = np.array([0.3, -0.0])\n posi['acut'] = np.array([0.5, -0.0])\n posi['modl'] = np.array([0., -0.075])\n posi['data'] = np.array([0., -0.15])\n \n if typeverb > 0:\n numb = max(len(grap.edges()), len(listcolr))\n for k in range(numb):\n try:\n print('%15s %15s %15s' % (grap.edges()[k][0], grap.edges()[k][1], listcolr[k]))\n except:\n print('unequal')\n\n size = 1000\n nx.draw(grap, posi, labels=labl, ax=axis, edgelist=[], nodelist=[])\n nx.draw_networkx_edges(grap, posi, ax=axis, labels=labl, edge_color=listcolr)\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['modl', 'data'], node_color='grey', node_size=size)\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['numbelem'], node_color='b', node_size=size)\n if plottype.startswith('lght'):\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['meanelem', 'amplslop'], node_color='r', node_size=size)\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lgal', 'bgal', 'ampl', 'sind'], node_color='g', node_size=size)\n if plottype == 'lght0001' or plottype == 'lght0002':\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['sinddistmean'], node_color='r', node_size=size)\n if plottype == 'lght0002':\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['expc'], node_color='g', node_size=size)\n if plottype == 'lght0003':\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['spatdistcons'], node_color='r', node_size=size)\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['psfp', 'bacp'], node_color='y', node_size=size)\n if plottype.startswith('lens'):\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['meanelem', 'defsslop'], node_color='r', node_size=size)\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lenp'], node_color='y', node_size=size)\n if plottype == 'lens0000':\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lgal', 'bgal', 'defs'], node_color='g', node_size=size)\n if plottype == 'lens0001':\n nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lgal', 'bgal', 'defs', 'asca', 'acut'], node_color='g', node_size=size)\n \n pathplot = pathpcat + '/imag/'\n plt.tight_layout()\n figr.savefig(pathplot + 'grap%s.pdf' % plottype)\n plt.close(figr)\n\n\ndef plot_3fgl_thrs(gdat):\n\n path = pathpcat + '/detthresh_P7v15source_4years_PL22.fits'\n fluxthrs = astropy.io.fits.getdata(path, 0)\n\n bgalfgl3 = np.linspace(-90., 90., 481)\n lgalfgl3 = np.linspace(-180., 180., 960)\n\n bgalexpo = np.linspace(-90., 90., 400)\n lgalexpo = np.linspace(-180., 180., 800)\n\n #fluxthrs = interp2d(lgalfgl3, bgalfgl3, fluxthrs)(lgalexpo, bgalexpo)\n fluxthrs = griddata([lgalfgl3, bgalfgl3], fluxthrs, [gdat.lgalheal])\n\n cntsthrs = fluxthrs * gdat.expo\n\n jbgal = np.where(abs(bgalexpo) < 10.)[0]\n jlgal = np.where(abs(lgalexpo) < 10.)[0]\n extent = [-10, 10, -10, 10]\n \n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n axis.set_xlabel(gdat.labllgaltotl)\n axis.set_ylabel(gdat.lablbgaltotl)\n\n imag = plt.imshow(fluxthrs[np.amin(jbgal):np.amax(jbgal)+1, np.amin(jlghprofi):np.amax(jlghprofi)+1], origin='lower', cmap='Reds', extent=gdat.exttrofi)\n plt.colorbar(imag, fraction=0.05)\n plt.tight_layout()\n figr.savefig(gdat.pathplotrtag + 'thrs.pdf')\n plt.close(figr)\n \n\ndef plot_init(gdat):\n \n print('Making initial plots...')\n\n gmod = gdat.fitt\n\n # make initial plots\n if gdat.makeplot:\n \n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n if (gmod.typeelemspateval[l] == 'locl' and gmod.maxmpara.numbelem[l] > 0) and gdat.numbpixl > 1:\n plot_indxprox(gdat)\n \n for i in gdat.indxener:\n for m in gdat.indxevtt:\n if gdat.typedata == 'mock' and gmod.boollens:\n figr, axis, path = init_figr(gdat, None, 'post', 'cntpmodlraww', 'this', 'true', i, m, -1)\n imag = retr_imag(gdat, axis, gmod.cntpmodlraww, 'this', 'true', 'cntpdata', i, m, booltdim=True)\n make_cbar(gdat, axis, imag, 0, tick=gdat.valutickmajrpara.cntpdata, labltotl=gdat.lablcntpdata)\n plt.tight_layout()\n figr.savefig(path)\n plt.close(figr)\n\n if gdat.boolcorrexpo:\n gdat.lablnumbpixl = r'$N_{\\rm{pix}}$'\n gdat.limtexpo = [gdat.minmpara.expo, gdat.maxmpara.expo]\n if gdat.boolbinsener:\n path = gdat.pathinit + 'expototlmean.pdf'\n tdpy.plot_gene(path, gdat.meanpara.ener, gdat.expototlmean, scalxdat='logt', scalydat='logt', lablxdat=gdat.lablenertotl, \\\n lablydat=gdat.lablexpototl, limtydat=gdat.limtexpo)\n \n for m in gdat.indxevtt:\n for i in gdat.indxener:\n figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))\n axis.hist(gdat.expo[i, :, m], gdat.binspara.expo)\n axis.set_xlabel(gdat.labltotlpara.expo)\n axis.set_ylabel(gdat.labltotlpara.numbpixl)\n axis.set_xscale('log')\n axis.set_yscale('log')\n plt.tight_layout()\n name = 'histexpo'\n if gdat.numbener > 1:\n name += 'en%02d' % i\n if gdat.numbevtt > 1:\n name += 'evt%d' % m\n path = gdat.pathinit + name + '.pdf'\n figr.savefig(path)\n plt.close(figr)\n \n if gdat.numbpixl > 1:\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n figr, axis, path = init_figr(gdat, None, 'post', 'expo', '', '', i, m, -1)\n imag = retr_imag(gdat, axis, gdat.expo, None, None, 'expo', i, m)\n make_cbar(gdat, axis, imag, i)\n plt.tight_layout()\n figr.savefig(path)\n plt.close(figr)\n \n\ndef plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, \\\n strgvarb='defl', nameparagenrelem='', indxdefl=None, indxpoplplot=-1, multfact=1., indxenerplot=None, indxevttplot=None):\n\n if indxdefl is not None:\n strgvarb += 'sing'\n strgvarb = strgvarb + nameparagenrelem\n \n defl = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn)\n \n defl *= multfact\n \n if indxenerplot is not None:\n defl = defl[indxenerplot, :, indxevttplot, ...]\n\n if indxdefl is not None:\n defl = defl[..., indxdefl]\n strgvarb += '%04d' % indxdefl\n defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))\n\n figr, axis, path = init_figr(gdat, gdatmodi, strgpdfn, strgvarb, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot)\n make_legdmaps(gdat, strgstat, strgmodl, axis)\n draw_frambndr(gdat, axis)\n \n defllgal = defl[:, :, 0]\n deflbgal = defl[:, :, 1]\n fact = 4\n ptch = axis.quiver(gdat.anglfact * gdat.lgalgridcart[::fact, ::fact], gdat.anglfact * gdat.bgalgridcart[::fact, ::fact], \\\n gdat.anglfact * defllgal[::fact, ::fact], gdat.anglfact * deflbgal[::fact, ::fact], scale_units='xy', angles='xy', scale=1)\n supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis)\n plt.subplots_adjust(left=0.2, bottom=0.15, top=0.75, right=0.85)\n plt.subplots_adjust()\n savefigr(gdat, gdatmodi, figr, path)\n plt.close(figr)\n \n\ndef plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, strgvarb, indxenerplot=None, indxevttplot=-1, strgcbar=None, \\\n booltdim=False, indxpoplplot=-1, strgmome='pmea'):\n \n gmod = getattr(gdat, strgmodl)\n \n if strgcbar is None:\n strgcbar = strgvarb\n \n # construct the string for the map\n if strgvarb == 'cntpdata':\n strgplot = strgvarb\n else:\n if strgstat == 'post':\n strgtemp = strgmome + strgpdfn\n else:\n strgtemp = ''\n strgplot = strgtemp + strgvarb\n \n figr, axis, path = init_figr(gdat, gdatmodi, strgpdfn, strgplot, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot)\n \n maps = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn)\n \n imag = retr_imag(gdat, axis, maps, strgstat, strgmodl, strgcbar, indxenerplot, indxevttplot, booltdim=booltdim)\n \n make_cbar(gdat, axis, imag, strgvarb)\n \n make_legdmaps(gdat, strgstat, strgmodl, axis)\n if gdat.boolsuprelem:\n supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis, indxpoplplot)\n\n print('strgvarb')\n print(strgvarb)\n plt.tight_layout()\n savefigr(gdat, gdatmodi, figr, path)\n plt.close(figr)\n\n\ndef init( \\\n # user interaction\n ## type of verbosity\n typeverb=1, \\\n\n ## path in which PCAT data lives\n pathpcat=None, \\\n \n # miscelleneaous\n ## type of PDF to sample from\n strgpdfn='post', \\\n\n # data\n ## type of data\n ### 'mock': simulated data\n ### 'inpt': input data\n ### 'real': real data retrieved from databases\n typedata=None, \\\n ## type of experiment\n typeexpr='user', \\\n\n # diagnostics\n ## Boolean to enter the diagnostic mode\n booldiagmode=True, \\\n ## squeeze exposure to check the low sample limit \n boolsqzeexpo=False, \\\n ### explode exposure to check the large sample limit \n boolexplexpo=False, \\\n ## squeeze proposal scale to check the acceptance ratio\n boolsqzeprop=False, \\\n ## explode proposal scale to check the acceptance ratio\n boolexplprop=False, \\\n ## Boolean to thin down the data\n boolthindata=False, \\\n ## factor by which to thin down the data\n factthin=None, \\\n \n # reference catalog\n ## Boolean to use the reference catalogs to associate\n boolasscrefr=None, \\\n \n # sampling\n ## Boolean flag to make burn-in tempered\n boolburntmpr=False, \\\n ## number of sweeps\n numbswep=100000, \\\n ## number of samples\n numbsamp=None, \\\n ## number of initial sweeps to be burned\n numbburn=None, \\\n \n # output\n ## Boolean to make condensed catalog\n boolcondcatl=True, \\\n \n refrlabltotl=None, \\\n refrlablpopl=None, \\\n fittlablpopl=None, \\\n \n # numpy RNG seed\n seedtype=0, \\\n ## Boolean flag to re-seed each chain separately\n boolseedchan=True, \\\n ## optional deterministic seed for sampling element parameters\n seedelem=None, \\\n \n indxevttincl=None, \\\n indxenerincl=None, \\\n \n listmask=None, \\\n \n # number of samples for Bootstrap\n numbsampboot=None, \\\n\n listnamefeatsele=None, \\\n \n # type of mask for the exposure map\n typemaskexpo='ignr', \\\n \n # type of exposure\n ## 'cons': constant\n ## 'file': provided in a file\n typeexpo='cons', \\\n\n # maximum spatial distance out to which element kernel will be evaluated\n maxmangleval=None, \\\n \n # initial state\n initpsfprefr=False, \\\n initpsfp=None, \\\n \n # evaluate the likelihood inside circles around elements\n typeelemspateval=None, \\\n \n namestattrue=None, \\\n \n # plotting\n ## Boolean flag to make the frame plots short\n boolshrtfram=True, \\\n \n boolrefeforc=False, \\\n indxrefrforc=None, \\\n\n ## Boolean to overplot the elements\n boolsuprelem=True, \\\n \n ## Boolean to plot the correlation between elements\n boolplotelemcorr=True, \\\n \n ## Boolean flag to vary the PSF\n boolmodipsfn=False, \\\n\n # name of the configuration\n strgcnfg=None, \\\n \n # model\n ## number of spatial dimensions\n numbspatdims=2, \\\n # hyperparameters\n fittampldisttype=None, \\\n # metamodel settings\n \n ## PSF evaluation type\n ## kernel evaluation type\n kernevaltype='ulip', \\\n\n # photometric model\n \n ## base parameters\n ### Sersic type\n typesers='vauc', \\\n ## transdimensional parameters (elements)\n ### vary projected scale radius\n variasca=True, \\\n ### vary projected cutoff radius\n variacut=True, \\\n\n # prior\n penalpridiff=False, \\\n priotype='logt', \\\n priofactdoff=None, \\\n\n # initialization\n ## initialization type\n inittype=None, \\\n \n loadvaripara=False, \\\n \n # save the state of the MCMC\n savestat=False, \\\n namesavestat=None, \\\n # recover the state from a previous run\n namerecostat=None, \\\n forcsavestat=False, \\\n\n # proposals\n ## Boolean flag to turn on proposals on element parameters\n boolpropcomp=True, \\\n boolpropcova=True, \\\n propwithsing=True, \\\n # type of covariance estimation\n typeopti='none', \\\n \n # modes of operation\n ## only generate and plot mock data\n boolmockonly=False, \\\n ## perform an additional run sampling from the prior\n checprio=False, \\\n\n strgexprsbrt=None, \\\n anglassc=None, \\\n nameexpr=None, \\\n \n # likelihood dependent\n ## exposure map\n expo=None, \\\n\n lgalprio=None, \\\n bgalprio=None, \\\n minmcntpdata=None, \\\n strgexpo=None, \\\n \n # number of processors\n numbproc=None, \\\n \n # likelihood function\n liketype='pois', \\\n # user-defined likelihood function\n retr_llik=None, \\\n \n anlytype=None, \\\n \n lgalcntr=0., \\\n bgalcntr=0., \\\n \n maxmangl=None, \\\n \n # spatial grid\n ## type of spatial pixelization\n typepixl=None, \\\n ## Boolean flag to force Cartesian spatial grid\n boolforccart=False, \\\n # number of pixels on a side in the Cartesian grid\n numbsidecart=None, \\\n # Nside in Healpix\n numbsideheal=256, \\\n \n allwfixdtrue=True, \\\n asscmetrtype='dist', \\\n\n # plotting\n numbswepplot=None, \\\n # Boolean flagt to make the frame plots only for the central energy and PSF bin\n boolmakeframcent=True, \\\n makeplot=True, \\\n makeplotinit=True, \\\n makeplotfram=True, \\\n makeplotfinlprio=True, \\\n makeplotfinlpost=True, \\\n \n makeplotintr=False, \\\n scalmaps='asnh', \\\n makeanim=True, \\\n strgenerfull=None, \\\n strgexprname=None, \\\n strganglunit=None, \\\n strganglunittext=None, \\\n anglfact=None, \\\n \n limtydathistfeat=None, \\\n \n # model\n # emission\n ## elements\n\n ## PSF\n specfraceval=None, \\\n numbangl=1000, \\\n binsangltype='logt', \\\n numbsidepntsprob=100, \\\n \n listprefsbrtsbrt=None, \\\n listprefsbrtener=None, \\\n listprefsbrtlabltotl=None, \\\n\n lablgangunit=None, \\\n labllgal=None, \\\n lablbgal=None, \\\n lablfluxunit=None, \\\n lablflux=None, \\\n strgenerunit=None, \\\n indxenerfull=None, \\\n indxevttfull=None, \\\n binsenerfull=None, \\\n asymfluxprop=False, \\\n \n ## Boolean flag to make the PSF model informed\n boolpriopsfninfo=False, \\\n \n ## spectral\n\n # lensing\n fittrelnpowr=0., \\\n\n # temp\n margfactmodl=1., \\\n maxmgangdata=None, \\\n \n # proposals\n stdvprophypr=0.01, \\\n stdvproppsfp=0.1, \\\n stdvpropbacp=0.01, \\\n stdvproplenp=1e-4, \\\n stdvlgal=0.001, \\\n stdvbgal=0.001, \\\n stdvflux=0.001, \\\n stdvspep=0.001, \\\n stdvspmrsind=0.2, \\\n varistdvlbhl=True, \\\n \n rtagmock=None, \\\n \n ## transdimensional proposal probabilities\n probtran=None, \\\n probspmr=None, \\\n # when proposing from the covariance, fracproprand should be very small!\n fracproprand=0., \\\n # standard deviation of the Gaussian from which the angular splitting will be drawn for splits and merges\n radispmr=None, \\\n \n defa=False, \\\n **args \\\n ):\n\n # preliminary setup\n # construct the global object \n gdat = tdpy.gdatstrt()\n for attr, valu in locals().items():\n if '__' not in attr and attr != 'gdat':\n setattr(gdat, attr, valu)\n \n # copy all provided inputs to the global object\n for strg, valu in args.items():\n setattr(gdat, strg, valu)\n\n # PCAT folders\n if gdat.pathpcat is None:\n gdat.pathpcat = os.environ[\"PCAT_DATA_PATH\"] + '/'\n \n if gdat.pathpcat[-1] != '/':\n gdat.pathpcat += '/'\n gdat.pathdata = gdat.pathpcat + 'data/'\n gdat.pathdataopti = gdat.pathdata + 'opti/'\n gdat.pathimag = gdat.pathpcat + 'imag/'\n gdat.pathoutp = gdat.pathdata + 'outp/'\n gdat.pathinpt = gdat.pathdata + 'inpt/'\n \n # list of parameter groups\n gdat.liststrggroppara = ['genrbase', 'genrelem', 'derifixd', 'derielem', 'genrelemextd', 'derielemextd', 'kind', 'full']\n \n # list of parameter features to be turned into lists\n gdat.listfeatparalist = ['minm', 'maxm', 'fact', 'scal', 'lablroot', 'lablunit', 'stdv', 'labltotl', 'name']\n # list of parameter features\n gdat.listfeatpara = gdat.listfeatparalist + ['limt', 'bins', 'delt', 'numb', 'indx', 'cmap', 'mean', 'tick', 'numbbins', 'valutickmajr', 'labltickmajr', 'valutickminr', 'labltickminr']\n \n # run tag\n gdat.strgswep = '%d' % (gdat.numbswep)\n \n ## time stamp\n gdat.strgtimestmp = tdpy.retr_strgtimestmp()\n \n ## name of the configuration function\n if gdat.strgcnfg is None:\n gdat.strgcnfg = inspect.stack()[1][3]\n \n gdat.strgvers = 'v0.3'\n if gdat.typeverb > 0:\n print('PCAT %s started at %s.' % (gdat.strgvers, gdat.strgtimestmp))\n print('Configuration %s' % gdat.strgcnfg)\n \n # string describing the number of sweeps\n gdat.strgnumbswep = '%d' % gdat.numbswep\n \n # output paths\n gdat.rtag = retr_rtag(gdat.strgcnfg, gdat.strgnumbswep)\n gdat.pathoutprtag = retr_pathoutprtag(gdat.pathpcat, gdat.rtag)\n\n # physical constants\n gdat.prsccmtr = 3.086e18\n gdat.ergsgevv = 624.151\n gdat.factnewtlght = 2.09e13 # Msun / pc\n \n gdat.listnamepdir = ['forw', 'reve']\n gdat.listlablpdir = ['f', 'r']\n \n # number of standard deviations around mean of Gaussian-distributed variables\n gdat.numbstdvgaus = 4.\n \n # start the timer\n gdat.timerealtotl = time.time()\n gdat.timeproctotl = time.clock()\n \n # list of parameter types\n ## 'genr': generative parameters\n ## 'deri': derived parameters\n gdat.liststrgtypepara = ['genr', 'deri']\n \n booltemp = chec_statfile(gdat.pathpcat, gdat.rtag, 'gdatmodi')\n if booltemp:\n print('gdatmodi already exists. Skipping...')\n else:\n \n # create output folder for the run\n os.system('mkdir -p %s' % gdat.pathoutprtag)\n\n # write the list of arguments to file\n fram = inspect.currentframe()\n listargs, temp, temp, listargsvals = inspect.getargvalues(fram)\n fileargs = open(gdat.pathoutprtag + 'cmndargs.txt', 'w')\n fileargs.write('PCAT call arguments\\n')\n for args in listargs:\n fileargs.write('%s = %s\\n' % (args, listargsvals[args]))\n fileargs.close()\n \n # write the list of arguments to file\n fileargs = open(gdat.pathoutprtag + 'args.txt', 'w')\n fileargs.write('PCAT call arguments\\n')\n for args in listargs:\n fileargs.write('%20s %s\\n' % (args, listargsvals[args]))\n fileargs.close()\n \n # defaults\n if gdat.typedata is None:\n if gdat.strgexprsbrt is None:\n gdat.typedata = 'mock'\n else:\n gdat.typedata = 'inpt'\n print('gdat.typedata')\n print(gdat.typedata)\n\n # list of models\n gdat.liststrgmodl = []\n if gdat.typedata == 'mock':\n gdat.liststrgmodl += ['true']\n gdat.liststrgmodl += ['fitt']\n \n gdat.refr = tdpy.gdatstrt()\n \n gdat.listgmod = []\n for strgmodl in gdat.liststrgmodl + ['refr']:\n setattr(gdat, strgmodl, tdpy.gdatstrt())\n gmod = getattr(gdat, strgmodl)\n for strgstat in ['this', 'next']:\n setattr(gmod, strgstat, tdpy.gdatstrt())\n for strgfeatpara in gdat.listfeatpara:\n setattr(gmod, strgfeatpara + 'para', tdpy.gdatstrt())\n \n gdat.listgmod += [gmod]\n\n for strgfeatpara in gdat.listfeatpara:\n setattr(gdat, strgfeatpara + 'para', tdpy.gdatstrt())\n \n ## number of processes\n gdat.strgproc = os.uname()[1]\n if gdat.numbproc is None:\n if gdat.strgproc == 'fink1.rc.fas.harvard.edu' or gdat.strgproc == 'fink2.rc.fas.harvard.edu' or gdat.strgproc == 'wise':\n gdat.numbproc = 1\n else:\n gdat.numbproc = 1\n \n if gdat.typedata == 'inpt' and gdat.rtagmock is not None:\n print('Will use %s to account for selection effects.' % gdat.rtagmock)\n gdat.pathoutprtagmock = retr_pathoutprtag(gdat.pathpcat, gdat.rtagmock)\n\n ## number of burned sweeps\n if gdat.numbburn is None:\n print('gdat.numbswep')\n print(gdat.numbswep)\n gdat.numbburn = int(gdat.numbswep / 10)\n print('gdat.numbburn')\n print(gdat.numbburn)\n \n # burn-in\n gdat.factburntmpr = 0.75\n gdat.numbburntmpr = gdat.factburntmpr * gdat.numbburn\n \n if (gdat.boolsqzeprop or gdat.boolexplprop) and gdat.typeopti == 'hess':\n raise Exception('')\n\n print('gdat.boolpriopsfninfo')\n print(gdat.boolpriopsfninfo)\n \n print('gdat.typeexpr')\n print(gdat.typeexpr)\n \n ## factor by which to thin the sweeps to get samples\n if gdat.factthin is not None and gdat.numbsamp is not None:\n raise Exception('Both factthin and numbparagenrfull cannot be provided at the same time.')\n elif gdat.factthin is None and gdat.numbsamp is None:\n gdat.factthin = int(np.ceil(1e-3 * (gdat.numbswep - gdat.numbburn)))\n gdat.numbsamp = int((gdat.numbswep - gdat.numbburn) / gdat.factthin)\n elif gdat.numbsamp is not None:\n gdat.factthin = int((gdat.numbswep - gdat.numbburn) / gdat.numbsamp)\n elif gdat.factthin is not None:\n gdat.numbsamp = int((gdat.numbswep - gdat.numbburn) / gdat.factthin)\n if not isinstance(gdat.numbsamp, int) or not isinstance(gdat.factthin, int) or \\\n not isinstance(gdat.numbburn, int) or not isinstance(gdat.numbswep, int):\n print('gdat.numbsamp')\n print(gdat.numbsamp)\n print('gdat.factthin')\n print(gdat.factthin)\n print('gdat.numbburn')\n print(gdat.numbburn)\n print('gdat.numbswep')\n print(gdat.numbswep)\n raise Exception('Number of samples is not an integer.')\n\n # samples to be saved\n gdat.indxsamp = np.arange(gdat.numbsamp)\n \n # samples to be saved from all chains\n gdat.numbsamptotl = gdat.numbsamp * gdat.numbproc\n gdat.indxsamptotl = np.arange(gdat.numbsamptotl)\n gdat.numbsweptotl = gdat.numbswep * gdat.numbproc\n \n if gdat.typeverb > 0:\n print('%d samples will be taken, discarding the first %d. The chain will be thinned by a factor of %d.' % \\\n (gdat.numbswep, gdat.numbburn, gdat.factthin))\n print('The resulting chain will contain %d samples per chain and %d samples in total.' % (gdat.numbsamp, gdat.numbsamptotl))\n\n if gdat.anlytype is None:\n if gdat.typeexpr == 'chan':\n gdat.anlytype = 'home'\n elif gdat.typeexpr == 'ferm':\n gdat.anlytype = 'rec8pnts'\n else:\n gdat.anlytype = 'nomi'\n \n if gdat.priofactdoff is None:\n gdat.priofactdoff = 1.\n \n # experiment defaults\n if gdat.typeexpr == 'ferm':\n gdat.lablenerunit = 'GeV'\n if gdat.typeexpr == 'chan':\n gdat.lablenerunit = 'keV'\n if gdat.typeexpr == 'gene':\n gdat.lablenerunit = ''\n if gdat.typeexpr == 'fire':\n gdat.lablenerunit = '$\\mu$m^{-1}'\n \n if gdat.typeexpr == 'ferm':\n if gdat.anlytype[4:8] == 'pnts':\n bins = np.logspace(np.log10(0.3), np.log10(10.), 4)\n if gdat.anlytype[4:8] == 'back':\n bins = np.logspace(np.log10(0.3), np.log10(300.), 31)\n if gdat.typeexpr == 'chan':\n if gdat.anlytype.startswith('home'):\n bins = np.array([0.5, 0.91, 1.66, 3.02, 5.49, 10.])\n if gdat.anlytype.startswith('extr'):\n bins = np.array([0.5, 2., 8.])\n if gdat.anlytype.startswith('spec'):\n bins = np.logspace(np.log10(0.5), np.log10(10.), 21)\n if gdat.typeexpr == 'fire':\n bins = np.logspace(np.log10(1. / 2.5e-6), np.log10(1. / 0.8e-6), 31)\n if gdat.typeexpr == 'hubb':\n # temp\n #bins = np.array([500., 750, 1000.])\n bins = np.array([750, 1000.])\n if gdat.typeexpr != 'gene':\n setp_varb(gdat, 'enerfull', bins=bins)\n \n setp_varb(gdat, 'numbpixl', lablroot='$N_{pix}$')\n \n if gdat.expo is not None:\n setp_varb(gdat, 'expo', minm=np.amin(gdat.expo), maxm=np.amax(gdat.expo), lablroot='$\\epsilon$', cmap='OrRd', scal='logt')\n \n # energy band string\n if gdat.strgenerfull is None:\n if gdat.typeexpr == 'tess':\n gdat.strgenerfull = ['T']\n if gdat.typeexpr == 'sdss':\n gdat.strgenerfull = ['z-band', 'i-band', 'r-band', 'g-band', 'u-band']\n if gdat.typeexpr == 'hubb':\n #gdat.strgenerfull = ['F606W', 'F814W']\n gdat.strgenerfull = ['F814W']\n if gdat.typeexpr == 'ferm' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'fire': \n gdat.strgenerfull = []\n for i in range(len(gdat.binspara.enerfull) - 1):\n gdat.strgenerfull.append('%.3g %s - %.3g %s' % (gdat.binspara.enerfull[i], gdat.lablenerunit, gdat.binspara.enerfull[i+1], gdat.lablenerunit))\n if gdat.typeexpr == 'gene':\n gdat.strgenerfull = ['']\n \n ## PSF class\n if gdat.indxevttfull is None:\n if gdat.typeexpr == 'ferm':\n gdat.indxevttfull = np.arange(2)\n else:\n gdat.indxevttfull = np.arange(1)\n \n if gdat.indxevttincl is None:\n if gdat.typeexpr == 'ferm':\n gdat.indxevttincl = np.array([0, 1])\n else:\n gdat.indxevttincl = np.arange(1)\n \n if gdat.indxevttincl is not None:\n gdat.evttbins = True\n else:\n gdat.evttbins = False\n if gdat.evttbins:\n gdat.numbevtt = gdat.indxevttincl.size\n gdat.numbevttfull = gdat.indxevttfull.size\n else:\n gdat.numbevtt = 1\n gdat.numbevttfull = 1\n gdat.indxevttincl = np.array([0])\n gdat.indxevtt = np.arange(gdat.numbevtt)\n \n # Boolean flag to indicate that the data are binned in energy\n if gdat.typeexpr == 'gene':\n gdat.boolbinsener = False\n else:\n gdat.boolbinsener = True\n \n if gdat.boolbinsener:\n gdat.numbenerfull = len(gdat.strgenerfull)\n else:\n gdat.numbenerfull = 1\n gdat.indxenerfull = np.arange(gdat.numbenerfull)\n\n if gdat.typepixl is None:\n if gdat.typeexpr == 'ferm':\n gdat.typepixl = 'heal'\n else:\n gdat.typepixl = 'cart'\n \n if gdat.boolbinsener:\n gdat.meanpara.enerfull = np.sqrt(gdat.binspara.enerfull[1:] * gdat.binspara.enerfull[:-1])\n \n setp_varb(gdat, 'boolmodipsfn', valu=False, strgmodl='fitt')\n \n # default values for model types\n print('Starting to determine the default values for model types using setp_varbvalu()...')\n if gdat.typeexpr == 'hubb':\n typeemishost = 'sers'\n else:\n typeemishost = 'none'\n setp_varb(gdat, 'typeemishost', valu=typeemishost)\n\n setp_varb(gdat, 'lliktotl', lablroot='$L$')\n \n ### background type\n #### template\n if gdat.typeexpr == 'ferm':\n if gdat.anlytype == 'bfun':\n gdat.ordrexpa = 10\n gdat.numbexpasing = gdat.ordrexpa**2\n gdat.numbexpa = gdat.numbexpasing * 4\n gdat.indxexpa = np.arange(gdat.numbexpa)\n typeback = ['bfun%04d' % k for k in gdat.indxexpa]\n else:\n typeback = [1., 'sbrtfdfmsmthrec8pntsnorm.fits']\n if gdat.typeexpr == 'chan':\n # particle background\n if gdat.anlytype.startswith('spec'):\n # temp -- this is fake!\n sbrtparttemp = np.array([70.04, 70.04, 12.12, 15.98, 10.79, 73.59, 73.59])\n binsenerpart = np.logspace(np.log10(0.5), np.log10(10.), 6)\n meanenerpart = np.sqrt(binsenerpart[:-1] * binsenerpart[1:])\n meanenerparttemp = np.concatenate((np.array([0.5]), meanenerpart, np.array([10.])))\n typebacktemp = interp(gdat.meanpara.enerfull, meanenerparttemp, sbrtparttemp)\n if gdat.anlytype.startswith('home') :\n typebacktemp = 1.\n #typebacktemp = np.array([70.04, 12.12, 15.98, 10.79, 73.59]) / 70.04\n if gdat.anlytype.startswith('extr'):\n #typebacktemp = 'sbrtchanback' + gdat.anlytype + '.fits'\n typebacktemp = 1.\n \n if gdat.anlytype.startswith('spec'):\n typeback = [[1e2, 2.], typebacktemp]\n else:\n typeback = [1., typebacktemp]\n \n if gdat.typeexpr == 'hubb':\n typeback = [1.]\n if gdat.typeexpr == 'tess':\n typeback = [1.]\n if gdat.typeexpr == 'gene':\n typeback = [1.]\n if gdat.typeexpr == 'fire':\n typeback = [1.]\n if gdat.typeexpr != 'user':\n setp_varb(gdat, 'typeback', valu=typeback)\n \n if gdat.typeexpr == 'hubb':\n numbsersfgrd = 1\n else:\n numbsersfgrd = 0\n setp_varb(gdat, 'numbsersfgrd', valu=numbsersfgrd)\n \n if gdat.typeexpr == 'gene':\n typeelem = ['clus']\n if gdat.typeexpr == 'ferm':\n typeelem = ['lghtpnts']\n if gdat.typeexpr == 'tess':\n typeelem = ['lghtpnts']\n if gdat.typeexpr == 'chan':\n typeelem = ['lghtpnts']\n if gdat.typeexpr == 'hubb':\n typeelem = ['lghtpnts', 'lens', 'lghtgausbgrd']\n if gdat.typeexpr == 'fire':\n typeelem = ['lghtlineabso']\n if gdat.typeexpr == 'user':\n typeelem = ['user']\n setp_varb(gdat, 'typeelem', valu=typeelem)\n print('gdat.fitt.typeelem')\n print(gdat.fitt.typeelem)\n\n ### PSF model\n #### angular profile\n if gdat.typeexpr == 'ferm':\n typemodlpsfn = 'doubking'\n if gdat.typeexpr == 'chan':\n typemodlpsfn = 'singking'\n if gdat.typeexpr == 'sdss':\n typemodlpsfn = 'singgaus'\n if gdat.typeexpr == 'hubb':\n typemodlpsfn = 'singgaus'\n if gdat.typeexpr == 'tess':\n typemodlpsfn = 'singgaus'\n if gdat.typeexpr == 'gene':\n typemodlpsfn = 'singgaus'\n if gdat.typeexpr == 'fire':\n typemodlpsfn = None\n if gdat.typeexpr != 'user':\n setp_varb(gdat, 'typemodlpsfn', valu=typemodlpsfn)\n \n #### background names\n listnameback = ['isot']\n if gdat.typeexpr == 'ferm':\n listnameback.append('fdfm')\n #if gdat.typeexpr == 'chan':\n # listnameback.append('part')\n setp_varb(gdat, 'listnameback', valu=listnameback)\n \n if gdat.strgpdfn == 'prio':\n gdat.lablsampdist = 'Prior'\n if gdat.strgpdfn == 'post':\n gdat.lablsampdist = 'Posterior'\n\n for strgmodl in gdat.liststrgmodl:\n # set up the indices of the model\n setp_indxpara(gdat, 'init', strgmodl=strgmodl)\n \n if gdat.numbswepplot is None:\n gdat.numbswepplot = 50000\n \n gdat.numbplotfram = gdat.numbswep / gdat.numbswepplot\n\n #setp_varb(gdat, 'colr', valu='mediumseagreen', strgmodl='refr')\n setp_varb(gdat, 'colr', valu='b', strgmodl='fitt')\n if gdat.typedata == 'mock':\n setp_varb(gdat, 'colr', valu='g', strgmodl='true')\n \n #gdat.refr.colr = 'mediumseagreen'\n #gdat.fitt.colr = 'deepskyblue'\n\n gdat.minmmass = 1.\n gdat.maxmmass = 10.\n \n if gdat.checprio:\n gdat.liststrgpdfn = ['prio', 'post']\n else:\n gdat.liststrgpdfn = ['post']\n\n gdat.lablmass = 'M'\n gdat.minmmassshel = 1e1\n gdat.maxmmassshel = 1e5\n gdat.lablmassshel = '$M_r$' \n\n gdat.lablcurv = r'\\kappa'\n gdat.lablexpc = r'E_{c}'\n \n gmod.scalcurvplot = 'self'\n gmod.scalexpcplot = 'self'\n \n #gdat.minmper0 = 1e-3 \n #gdat.maxmper0 = 1e1\n #\n #gdat.minmmagf = 10**7.5\n #gdat.maxmmagf = 10**16\n \n # temp -- automatize this eventually\n #gmod.minmper0 = gdat.minmper0\n #gmod.minmper0 = gdat.minmper0\n #gmod.maxmper0 = gdat.maxmper0\n #gmod.maxmper0 = gdat.maxmper0\n #gmod.minmmagf = gdat.minmmagf\n #gmod.minmmagf = gdat.minmmagf\n #gmod.maxmmagf = gdat.maxmmagf\n #gmod.maxmmagf = gdat.maxmmagf\n\n gdat.fitt.listelemmrkr = ['+', '_', '3']\n gdat.true.listmrkrhits = ['x', '|', '4']\n gdat.true.listmrkrmiss = ['s', 'o', 'p']\n gdat.true.listlablmiss = ['s', 'o', 'p']\n \n # list of scalings\n gdat.listscaltype = ['self', 'logt', 'atan', 'gaus', 'pois', 'expo']\n \n # number of grids\n gdat.numbgrid = 1\n gdat.indxgrid = np.arange(gdat.numbgrid)\n\n if gdat.typepixl == 'heal' and gdat.boolforccart:\n raise Exception('Cartesian forcing can only used with cart typepixl')\n\n gdat.liststrgphas = ['fram', 'finl', 'anim']\n gdat.liststrgelemtdimtype = ['bind']\n \n # lensing\n ## list of strings indicating different methods of calculating the subhalo mass fraction\n gdat.liststrgcalcmasssubh = ['delt', 'intg']\n \n # input data\n if gdat.typedata == 'inpt':\n path = gdat.pathinpt + gdat.strgexprsbrt\n gdat.sbrtdata = astropy.io.fits.getdata(path)\n \n if gdat.typepixl == 'heal' or gdat.typepixl == 'cart' and gdat.boolforccart:\n if gdat.sbrtdata.ndim != 3:\n raise Exception('exprsbrtdata should be a 3D numpy np.array if pixelization is HealPix.')\n else:\n if gdat.sbrtdata.ndim != 4:\n raise Exception('exprsbrtdata should be a 4D numpy np.array if pixelization is Cartesian.')\n \n if gdat.typepixl == 'cart' and not gdat.boolforccart:\n gdat.sbrtdata = gdat.sbrtdata.reshape((gdat.sbrtdata.shape[0], -1, gdat.sbrtdata.shape[3]))\n \n gdat.numbenerfull = gdat.sbrtdata.shape[0]\n if gdat.typepixl == 'heal':\n gdat.numbpixlfull = gdat.sbrtdata.shape[1]\n elif gdat.boolforccart:\n gdat.numbpixlfull = gdat.numbsidecart**2\n else:\n gdat.numbpixlfull = gdat.sbrtdata.shape[1] * gdat.sbrtdata.shape[2]\n gdat.numbevttfull = gdat.sbrtdata.shape[2]\n \n if gdat.typepixl == 'heal':\n # temp\n gdat.numbsidecart = 100\n gdat.numbsidecarthalf = int(gdat.numbsidecart / 2)\n gdat.numbsideheal = int(np.sqrt(gdat.numbpixlfull / 12))\n \n if gdat.typeexpr == 'hubb':\n gdat.hubbexpofact = 1.63050e-19\n \n if gdat.strgexpo is None:\n if gdat.typeexpr == 'ferm':\n gdat.strgexpo = 'expofermrec8pntsigal0256.fits'\n \n if gdat.typeexpo is None:\n if gdat.typeexpr == 'ferm':\n gdat.typeexpo = 'file'\n else:\n gdat.typeexpo = 'cons'\n \n print('strgexpo') \n print(strgexpo)\n \n ## generative model\n # the factor to convert radians (i.e., internal angular unit of PCAT) to the angular unit that will be used in the output (i.e., plots and tables)\n if gdat.anglfact is None:\n if gdat.typeexpr == 'ferm':\n gdat.anglfact = 180. / np.pi\n if gdat.typeexpr == 'tess':\n gdat.anglfact = 60 * 180. / np.pi\n if gdat.typeexpr == 'sdss' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'hubb':\n gdat.anglfact = 3600 * 180. / np.pi\n if gdat.typeexpr == 'sche' or gdat.typeexpr == 'gene':\n gdat.anglfact = 1.\n \n if gdat.numbsidecart is not None and gdat.typepixl == 'cart' and not gdat.boolforccart and isinstance(strgexpo, str):\n raise Exception('numbsidecart argument should not be provided when strgexpo is a file name and pixelization is Cartesian.')\n \n if gdat.typepixl == 'heal' or gdat.typepixl == 'cart' and gdat.boolforccart:\n if gdat.numbsidecart is None:\n gdat.numbsidecart = 100\n \n # exposure\n gdat.boolcorrexpo = gdat.expo is not None\n if gdat.typeexpo == 'cons':\n if gdat.typedata == 'mock':\n if gdat.numbsidecart is None:\n gdat.numbsidecart = 100\n if gdat.typedata == 'mock':\n if gdat.typepixl == 'heal':\n gdat.expo = np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))\n if gdat.typepixl == 'cart':\n gdat.expo = np.ones((gdat.numbenerfull, gdat.numbsidecart**2, gdat.numbevttfull))\n if gdat.typedata == 'inpt':\n gdat.expo = np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))\n if gdat.typeexpo == 'file':\n path = gdat.pathinpt + gdat.strgexpo\n if gdat.typeverb > 0:\n print('Reading %s...' % path)\n gdat.expo = astropy.io.fits.getdata(path)\n \n if gdat.typepixl == 'cart':\n gdat.expo = gdat.expo.reshape((gdat.expo.shape[0], -1, gdat.expo.shape[-1]))\n \n if gdat.numbsidecart is None:\n # temp -- gdat.numbsidecart takes the value of the region 0\n if np.sqrt(gdat.expo.shape[1]) % 1. != 0.:\n raise Exception('')\n gdat.numbsidecart = int(np.sqrt(gdat.expo.shape[1]))\n \n if gdat.typedata == 'mock':\n if gdat.typepixl == 'cart':\n gdat.numbpixlfull = gdat.numbsidecart**2\n if gdat.typepixl == 'heal':\n gdat.numbpixlfull = 12 * gdat.numbsideheal**2\n \n # initialization type\n if gdat.inittype is None:\n gdat.inittype = 'rand'\n\n if gdat.typeexpr != 'user':\n \n # Boolean flag to indicate binning in space\n gdat.boolbinsspat = gdat.numbpixlfull != 1\n\n print('gdat.boolbinsspat')\n print(gdat.boolbinsspat)\n \n if gdat.boolcorrexpo and np.amin(gdat.expo) == np.amax(gdat.expo) and not isinstance(gdat.strgexpo, float):\n raise Exception('Bad input exposure map.')\n \n if gdat.boolbinsspat:\n if gdat.typepixl == 'cart' and isinstance(gdat.strgexpo, float) and gdat.typedata == 'inpt':\n if np.sqrt(gdat.sbrtdata.shape[1]) % 1. != 0.:\n raise Exception('')\n gdat.numbsidecart = int(np.sqrt(gdat.sbrtdata.shape[1]))\n \n gdat.numbsidecarthalf = int(gdat.numbsidecart / 2)\n\n if gdat.typepixl == 'cart':\n gdat.numbpixlcart = gdat.numbsidecart**2\n \n ### spatial extent of the data\n if gdat.maxmgangdata is None:\n if gdat.typeexpr == 'chan':\n gdat.maxmgangdata = 0.492 / gdat.anglfact * gdat.numbsidecarthalf\n if gdat.typeexpr == 'ferm':\n gdat.maxmgangdata = 15. / gdat.anglfact\n if gdat.typeexpr == 'tess':\n gdat.maxmgangdata = 20. / gdat.anglfact\n if gdat.typeexpr == 'hubb':\n gdat.maxmgangdata = 2. / gdat.anglfact\n if gdat.typeexpr == 'gene':\n gdat.maxmgangdata = 1. / gdat.anglfact\n \n print('gdat.numbsidecart')\n print(gdat.numbsidecart)\n print('gdat.maxmgangdata')\n print(gdat.maxmgangdata)\n \n # pixelization\n if gdat.typepixl == 'cart':\n gdat.apix = (2. * gdat.maxmgangdata / gdat.numbsidecart)**2\n if gdat.typepixl == 'heal':\n temp, temp, temp, gdat.apix = tdpy.retr_healgrid(gdat.numbsideheal)\n gdat.sizepixl = np.sqrt(gdat.apix)\n \n # factor by which to multiply the y axis limits of the surface brightness plot\n if gdat.numbpixlfull == 1:\n gdat.factylimtbrt = [1e-4, 1e7]\n else:\n gdat.factylimtbrt = [1e-4, 1e3]\n\n # grid\n gdat.minmlgaldata = -gdat.maxmgangdata\n gdat.maxmlgaldata = gdat.maxmgangdata\n gdat.minmbgaldata = -gdat.maxmgangdata\n gdat.maxmbgaldata = gdat.maxmgangdata\n \n if gdat.typepixl == 'cart' and gdat.boolforccart:\n if gdat.typedata == 'inpt':\n sbrtdatatemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))\n for i in gdat.indxenerfull:\n for m in gdat.indxevttfull:\n sbrtdatatemp[i, :, m] = tdpy.retr_cart(gdat.sbrtdata[i, :, m], \\\n numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \\\n minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \\\n minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()\n gdat.sbrtdata = sbrtdatatemp\n\n if gdat.boolcorrexpo:\n expotemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))\n for i in gdat.indxenerfull:\n for m in gdat.indxevttfull:\n expotemp[i, :, m] = tdpy.retr_cart(gdat.expo[i, :, m], \\\n numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \\\n minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \\\n minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()\n gdat.expo = expotemp\n \n gdat.sdenunit = 'degr'\n\n gdat.factergskevv = 1.6e-9\n if gdat.typeexpr == 'ferm':\n gdat.listspecconvunit = [['en02', 'gevv']]\n if gdat.typeexpr == 'chan':\n gdat.listspecconvunit = [['en00', 'kevv'], ['en02', 'kevv'], ['en02', 'ergs'], ['en03', 'ergs', '0520', 0.5, 2.], \\\n ['en03', 'ergs', '0210', 2., 10.], \\\n ['en03', 'ergs', '0510', 0.5, 10.], \\\n ['en03', 'ergs', '0208', 2., 8.], \\\n ['en03', 'ergs', '0508', 0.5, 8.], \\\n ['en03', 'ergs', '0207', 2., 7.], \\\n ['en03', 'ergs', '0507', 0.5, 7.]]\n if gdat.typeexpr == 'hubb':\n gdat.listspecconvunit = [['en03', 'ergs']]\n if gdat.typeexpr == 'fire':\n gdat.listspecconvunit = [['en00', 'imum']]\n \n # temp\n #if gdat.typeexpr == 'chan' and (gdat.anlytype.startswith('home') or gdat.anlytype.startswith('extr')):\n # gmod.lablpopl = ['AGN', 'Galaxy']\n\n if gdat.typeexpr == 'ferm' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'fire':\n gdat.enerdiff = True\n if gdat.typeexpr == 'hubb' or gdat.typeexpr == 'gene' or gdat.typeexpr == 'tess':\n gdat.enerdiff = False\n \n if gdat.indxenerincl is None:\n \n # default\n if gdat.boolbinsener:\n gdat.indxenerincl = np.arange(gdat.binspara.enerfull.size - 1)\n \n if gdat.typeexpr == 'ferm':\n if gdat.anlytype[4:8] == 'pnts':\n gdat.indxenerincl = np.arange(3)\n if gdat.anlytype[4:8] == 'back':\n gdat.indxenerincl = np.arange(30)\n if gdat.typeexpr == 'chan':\n if gdat.anlytype.startswith('home'):\n gdat.indxenerincl = np.arange(5)\n if gdat.anlytype.startswith('extr'):\n gdat.indxenerincl = np.arange(2)\n if gdat.typeexpr == 'hubb':\n gdat.indxenerincl = np.array([0])\n #gdat.indxenerincl = np.array([1])\n #gdat.indxenerincl = np.array([0, 1])\n if gdat.typeexpr == 'gene':\n gdat.indxenerincl = np.array([0])\n \n if gdat.indxenerincl is None:\n gdat.numbener = 1\n else:\n gdat.numbener = gdat.indxenerincl.size\n gdat.indxener = np.arange(gdat.numbener, dtype=int)\n \n if gdat.indxenerincl is None:\n gdat.indxenerincl = gdat.indxener\n \n if gdat.boolbinsener:\n gdat.indxenerinclbins = np.empty(gdat.numbener+1, dtype=int)\n gdat.indxenerinclbins[0:-1] = gdat.indxenerincl\n gdat.indxenerinclbins[-1] = gdat.indxenerincl[-1] + 1\n gdat.indxenerpivt = 0\n gdat.numbenerplot = 100\n gdat.strgener = [gdat.strgenerfull[k] for k in gdat.indxenerincl]\n gdat.binspara.ener = gdat.binspara.enerfull[gdat.indxenerinclbins]\n gdat.meanpara.ener = np.sqrt(gdat.binspara.ener[1:] * gdat.binspara.ener[:-1])\n gdat.deltener = gdat.binspara.ener[1:] - gdat.binspara.ener[:-1]\n gdat.minmener = gdat.binspara.ener[0]\n gdat.maxmener = gdat.binspara.ener[-1]\n retr_axis(gdat, 'ener')\n\n gdat.limtener = [np.amin(gdat.binspara.ener), np.amax(gdat.binspara.ener)] \n if gdat.boolbinsener: \n if gdat.numbener > 1:\n gdat.enerpivt = gdat.meanpara.ener[gdat.indxenerpivt]\n # energy bin indices other than that of the pivot bin\n gdat.indxenerinde = np.setdiff1d(gdat.indxener, gdat.indxenerpivt)\n \n # temp\n if gdat.typeexpr == 'chan':\n gdat.edis = 0.3 * np.sqrt(gdat.binspara.ener) / 2.35\n gdat.edisintp = sp.interpolate.interp1d(gdat.binspara.ener, gdat.edis, fill_value='extrapolate')\n else:\n gdat.edis = None\n gdat.edisintp = None\n\n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n \n setp_varb(gdat, 'cntpmodl', lablroot='$C_{M}$', scal='asnh', strgmodl=strgmodl)\n\n # number of elements\n if strgmodl == 'true':\n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lens':\n numbelem = 25\n else:\n numbelem = 5\n setp_varb(gdat, 'numbelem', minm=0, maxm=10, lablroot='N', scal='pois', valu=numbelem, popl=l, strgmodl=strgmodl, strgstat='this')\n if strgmodl == 'fitt':\n setp_varb(gdat, 'numbelem', minm=0, maxm=10, lablroot='N', scal='pois', popl='full', strgmodl=strgmodl)\n\n ## hyperparameters\n setp_varb(gdat, 'typemodltran', valu='drct', strgmodl=strgmodl)\n \n if gmod.typemodltran == 'pois':\n setp_varb(gdat, 'meanelem', minm=0.1, maxm=1000., scal='logt', popl='full', strgmodl=strgmodl)\n \n #### boolean flag background\n if gdat.typeexpr != 'user':\n if gdat.typeexpr == 'chan':\n if gdat.numbpixlfull == 1:\n boolspecback = [True, True]\n else:\n boolspecback = [False, False]\n else:\n boolspecback = [False for k in gmod.indxback]\n setp_varb(gdat, 'boolspecback', valu=boolspecback, strgmodl=strgmodl)\n \n typeelemspateval = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n # these element types slow down execution!\n if gmod.typeelem[l] == 'lens' or gmod.typeelem[l].startswith('lghtline') or gmod.typeelem[l] == 'clusvari' or gmod.typeelem[l] == 'lghtgausbgrd':\n typeelemspateval[l] = 'full'\n else:\n typeelemspateval[l] = 'locl'\n setp_varb(gdat, 'typeelemspateval', valu=typeelemspateval, strgmodl=strgmodl)\n \n gmod.minmpara.numbelem = np.empty(gmod.numbpopl, dtype=int)\n gmod.maxmpara.numbelem = np.empty(gmod.numbpopl, dtype=int)\n for l in gmod.indxpopl:\n gmod.maxmpara.numbelem[l] = int(getattr(gmod.maxmpara, 'numbelempop%d' % l))\n gmod.minmpara.numbelem[l] = int(getattr(gmod.minmpara, 'numbelempop%d' % l))\n gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem)\n gmod.minmpara.numbelemtotl = np.sum(gmod.minmpara.numbelem)\n \n # spatial distribution type\n typespatdist = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n typespatdist[l] = 'unif'\n setp_varb(gdat, 'typespatdist', valu=typespatdist, strgmodl=strgmodl)\n \n # flux distribution type\n typeprioflux = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n # temp -- this can assign powr to populations whose flux is not drawn from a power law!\n if gmod.typeelem[l].startswith('lght'):\n typeprioflux[l] = 'powr'\n else:\n typeprioflux[l] = None\n setp_varb(gdat, 'typeprioflux', valu=typeprioflux, strgmodl=strgmodl)\n \n if gdat.strgexprname is None:\n if gdat.typeexpr == 'chan':\n gdat.strgexprname = 'Chandra'\n if gdat.typeexpr == 'ferm':\n gdat.strgexprname = 'Fermi-LAT'\n if gdat.typeexpr == 'hubb':\n gdat.strgexprname = 'HST'\n if gdat.typeexpr == 'sche':\n gdat.strgexprname = 'XXXXX'\n if gdat.typeexpr == 'gene':\n gdat.strgexprname = 'TGAS-RAVE'\n \n if gdat.lablgangunit is None:\n if gdat.typeexpr == 'ferm':\n gdat.lablgangunit = '$^o$'\n if gdat.typeexpr == 'gene':\n gdat.lablgangunit = ''\n if gdat.typeexpr == 'sdss' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'hubb':\n gdat.lablgangunit = '$^{\\prime\\prime}$'\n \n if gdat.labllgal is None:\n if gdat.typeexpr == 'gene':\n gdat.labllgal = r'L_{z}'\n else:\n if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0 and gdat.bgalcntr == 0:\n gdat.labllgal = r'l'\n else:\n gdat.labllgal = r'\\theta_1'\n if gdat.lablbgal is None:\n if gdat.typeexpr == 'gene':\n gdat.lablbgal = r'E_k'\n else:\n if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0 and gdat.bgalcntr == 0:\n gdat.lablbgal = r'b'\n else:\n gdat.lablbgal = r'\\theta_2'\n\n if gdat.strgenerunit is None:\n if gdat.typeexpr == 'ferm':\n gdat.strgenerunit = 'GeV'\n gdat.nameenerunit = 'gevv'\n if gdat.typeexpr == 'chan':\n gdat.strgenerunit = 'keV'\n gdat.nameenerunit = 'kevv'\n if gdat.typeexpr == 'gene':\n gdat.strgenerunit = ''\n gdat.nameenerunit = ''\n if gdat.typeexpr == 'hubb':\n gdat.strgenerunit = 'erg'\n gdat.nameenerunit = 'ergs'\n if gdat.typeexpr == 'fire':\n gdat.strgenerunit = '$\\mu$ m$^{-1}$'\n gdat.nameenerunit = 'imum'\n\n if gdat.nameexpr is None:\n if gdat.typeexpr == 'ferm':\n gdat.nameexpr = 'Fermi-LAT'\n if gdat.typeexpr == 'sdss':\n gdat.nameexpr = 'SDSS'\n if gdat.typeexpr == 'chan':\n gdat.nameexpr = 'Chandra'\n if gdat.typeexpr == 'hubb':\n gdat.nameexpr = 'HST'\n if gdat.typeexpr == 'gaia':\n gdat.nameexpr = 'Gaia'\n \n ## Lensing\n if gdat.radispmr is None:\n if gdat.typeexpr == 'ferm':\n gdat.radispmr = 0.6 / gdat.anglfact\n if gdat.typeexpr == 'hubb':\n gdat.radispmr = 0.15 / gdat.anglfact\n if gdat.typeexpr == 'tess':\n gdat.radispmr = 1. / gdat.anglfact\n if gdat.typeexpr == 'chan':\n if gdat.anlytype == 'spec':\n gdat.radispmr = 0.1\n else:\n gdat.radispmr = 0.2 / gdat.anglfact\n if gdat.typeexpr == 'sdss':\n gdat.radispmr = 0.5 / gdat.anglfact\n if gdat.typeexpr == 'gene':\n gdat.radispmr = 0.2\n \n print('gdat.radispmr')\n print(gdat.radispmr)\n\n if gdat.anglassc is None:\n gdat.anglassc = 5. * gdat.radispmr\n \n print('gdat.anglassc')\n print(gdat.anglassc)\n\n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n \n if gdat.boolbinsspat:\n if gdat.typeexpr == 'chan' or gdat.typeexpr == 'sdss':\n numbpsfpform = 0\n gmod.numbpsfptotl = 0\n if gdat.typeexpr == 'chan':\n retr_psfpchan(gmod)\n if gdat.typeexpr == 'ferm':\n retr_psfpferm(gmod)\n if gdat.typeexpr == 'sdss':\n retr_psfpsdss(gmod)\n if gdat.typeexpr == 'hubb':\n retr_psfphubb(gmod)\n if gdat.typeexpr == 'tess':\n retr_psfptess(gmod)\n if gdat.typeexpr == 'gene':\n retr_psfpsdyn(gmod)\n \n\n # model evaluation approximation error tolerance in units of the fraction of the lowest PS flux\n if gdat.specfraceval is None:\n if gdat.typeexpr == 'ferm':\n gdat.specfraceval = 0.5\n else:\n gdat.specfraceval = 0.1\n\n gdat.binspara.lgalcart = np.linspace(gdat.minmlgaldata, gdat.maxmlgaldata, gdat.numbsidecart + 1)\n gdat.binspara.bgalcart = np.linspace(gdat.minmbgaldata, gdat.maxmbgaldata, gdat.numbsidecart + 1)\n gdat.meanpara.lgalcart = (gdat.binspara.lgalcart[0:-1] + gdat.binspara.lgalcart[1:]) / 2.\n gdat.meanpara.bgalcart = (gdat.binspara.bgalcart[0:-1] + gdat.binspara.bgalcart[1:]) / 2.\n \n # reference elements\n gdat.numbrefr = 0\n if gdat.typedata == 'mock':\n gdat.numbrefr = gmod.numbpopl\n if gdat.typedata == 'inpt':\n if gdat.typeexpr == 'ferm':\n gdat.numbrefr = 2\n if gdat.typeexpr == 'chan':\n gdat.numbrefr = 2\n print('gdat.numbrefr')\n print(gdat.numbrefr)\n\n gdat.indxrefr = np.arange(gdat.numbrefr)\n if gdat.boolasscrefr is None:\n gdat.boolasscrefr = [True for q in gdat.indxrefr]\n \n gdat.listnamerefr = [] \n gdat.refr.nameparagenrelemampl = [[] for q in gdat.indxrefr]\n gdat.refr.namepara.elem = [[] for q in gdat.indxrefr]\n gdat.refr.namepara.elemodim = [[] for q in gdat.indxrefr]\n gdat.boolinforefr = False\n gdat.listpathwcss = []\n \n gdat.numbpixllgalshft = []\n gdat.numbpixlbgalshft = []\n gdat.refrindxpoplassc = [[] for q in gdat.indxrefr] \n \n # temp -- this allows up to 3 reference populations\n gdat.true.colrelem = ['darkgreen', 'olivedrab', 'mediumspringgreen']\n # temp -- this allows up to 3 reference populations\n gdat.fitt.colrelem = ['royalblue', 'dodgerblue', 'navy']\n if gdat.typedata == 'mock':\n gdat.boolinforefr = True\n gdat.listnamerefr = ['moc%d' % l for l in gmod.indxpopl] \n gdat.indxrefr = np.arange(gdat.numbrefr)\n if gdat.typedata == 'inpt':\n if gdat.typeexpr == 'ferm':\n gdat.boolinforefr = True\n retr_refrferminit(gdat)\n for q in gdat.indxrefr:\n gdat.refrindxpoplassc[q] = gmod.indxpopl\n if gdat.typeexpr == 'chan':\n gdat.boolinforefr = True\n retr_refrchaninit(gdat)\n for q in gdat.indxrefr:\n gdat.refrindxpoplassc[q] = gmod.indxpopl\n \n for q in gdat.indxrefr:\n if 'lgal' in gdat.refr.namepara.elem[q] and 'bgal' in gdat.refr.namepara.elem[q]:\n gdat.refr.namepara.elem[q] += ['gang', 'aang']\n for strgfeat in gdat.refr.namepara.elem[q]:\n setattr(gdat.refr, strgfeat, [[] for q in gdat.indxrefr])\n \n if gdat.typeexpr == 'ferm':\n retr_refrfermfinl(gdat)\n if gdat.typeexpr == 'chan':\n retr_refrchanfinl(gdat)\n \n if gdat.typeexpr == 'hubb':\n boollenshost = True\n else:\n boollenshost = False\n setp_varb(gdat, 'boollenshost', valu=boollenshost)\n \n if gdat.typeexpr == 'hubb':\n boollenssubh = True\n else:\n boollenssubh = False\n setp_varb(gdat, 'boollenssubh', valu=boollenssubh)\n \n if gdat.typeexpr == 'hubb':\n boollens = True\n else:\n boollens = False\n setp_varb(gdat, 'boollens', valu=boollens)\n \n if gdat.typeexpr == 'hubb':\n boolemishost = True\n else:\n boolemishost = False\n setp_varb(gdat, 'boolemishost', valu=boolemishost)\n \n for strgmodl in gdat.liststrgmodl:\n \n gmod = getattr(gdat, strgmodl)\n\n ## names of the variables for which cumulative posteriors will be plotted\n if gmod.boollenssubh:\n gmod.listnamevarbcpct = ['convelem']\n else:\n gmod.listnamevarbcpct = []\n \n # the adis in the file is kpc\n fileh5py = h5py.File(gdat.pathdata + 'inpt/adis.h5','r')\n \n gdat.redsintp = fileh5py['reds'][()]\n gdat.adisintp = fileh5py['adis'][()] * 1e6 # [pc]\n\n gdat.adisobjt = sp.interpolate.interp1d(gdat.redsintp, gdat.adisintp, fill_value='extrapolate')\n\n gdat.redsfromdlosobjt = sp.interpolate.interp1d(gdat.adisintp * gdat.redsintp, gdat.redsintp, fill_value='extrapolate')\n fileh5py.close()\n \n setp_varb(gdat, 'lgal', minm=-10., maxm=10., lablroot='$l$')\n \n for strgmodl in gdat.liststrgmodl:\n \n gmod = getattr(gdat, strgmodl)\n \n if gdat.typedata == 'mock':\n if gmod.boollenshost:\n setp_varb(gdat, 'redshost', valu=0.2, strgmodl='true')\n setp_varb(gdat, 'redssour', valu=1., strgmodl='true')\n \n setp_indxpara(gdat, 'finl', strgmodl='true')\n \n ### background parameters\n if gdat.typeexpr == 'chan':\n if gdat.anlytype.startswith('extr'):\n meanbacpbac1 = 1.\n else:\n meanbacpbac1 = 70.04\n stdvbacpbac1 = 1e-5 * meanbacpbac1\n setp_varb(gdat, 'bacp', mean=meanbacpbac1, stdv=stdvbacpbac1, back=1, scal='gaus', strgmodl='true')\n\n if gdat.numbpixlfull == 1:\n bacp = [1e0, 1e2]\n setp_varb(gdat, 'bacp', limt=bacp, back=0)\n else:\n bacp = [1e-1, 1e3]\n setp_varb(gdat, 'bacp', limt=bacp, ener='full', back=0)\n if gdat.numbpixlfull == 1:\n bacp = 10.\n setp_varb(gdat, 'bacp', valu=bacp)\n else:\n setp_varb(gdat, 'bacp', valu=170., back=0, ener=0)\n setp_varb(gdat, 'bacp', valu=17.4, back=0, ener=1)\n setp_varb(gdat, 'bacp', valu=27., back=0, ener=2)\n setp_varb(gdat, 'bacp', valu=11.8, back=0, ener=3)\n setp_varb(gdat, 'bacp', valu=101., back=0, ener=4)\n if gdat.typeexpr == 'ferm':\n if 'ferm_bubb' in gdat.strgcnfg:\n setp_varb(gdat, 'bacp', limt=[1e-10, 1e10], ener='full', back='full')\n else:\n # isotropic + unresolved\n setp_varb(gdat, 'bacp', limt=[1e-7, 1e-2], ener=0, back=0)\n setp_varb(gdat, 'bacp', limt=[1e-9, 1e-3], ener=1, back=0)\n setp_varb(gdat, 'bacp', limt=[1e-10, 1e-4], ener=2, back=0)\n # diffuse\n setp_varb(gdat, 'bacp', limt=[1e-6, 1e-2], ener=0, back=1)\n setp_varb(gdat, 'bacp', limt=[1e-7, 1e-3], ener=1, back=1)\n setp_varb(gdat, 'bacp', limt=[1e-8, 1e-4], ener=2, back=1)\n # dark\n setp_varb(gdat, 'bacp', limt=[1e-11, 1e-4], ener=0, back=2)\n setp_varb(gdat, 'bacp', limt=[1e-11, 1e-4], ener=1, back=2)\n setp_varb(gdat, 'bacp', limt=[1e-11, 1e-4], ener=2, back=2)\n\n setp_varb(gdat, 'bacp', valu=5e-6, ener=0, back=0)\n setp_varb(gdat, 'bacp', valu=5e-6, ener=0, back=0)\n setp_varb(gdat, 'bacp', valu=2e-8, ener=1, back=0)\n setp_varb(gdat, 'bacp', valu=2e-9, ener=2, back=0)\n setp_varb(gdat, 'bacp', valu=1e-5, ener=4, back=0)\n setp_varb(gdat, 'bacp', valu=7e-7, ener=0, back=1)\n setp_varb(gdat, 'bacp', valu=1e-4, ener=0, back=1)\n setp_varb(gdat, 'bacp', valu=1e-5, ener=1, back=1)\n setp_varb(gdat, 'bacp', valu=7e-7, ener=2, back=1)\n setp_varb(gdat, 'bacp', valu=3e-8, ener=4, back=1)\n\n # Fourier basis\n for strgmodl in gdat.liststrgmodl:\n for c in gmod.indxback:\n if isinstance(typeback[c], str):\n if 'bfun' in typeback[c]:\n setp_varb(gdat, 'bacp', limt=[1e-10, 1e10], ener='full', back=c)\n\n if gdat.typeexpr == 'hubb':\n bacp = [1e-10, 1e-6]\n if gdat.typeexpr == 'gene':\n setp_varb(gdat, 'bacp', minm=1e-1, maxm=1e3, valu=1e1, lablroot='$A$', scal='logt', ener=0, back=0, strgmodl=strgmodl)\n \n if gdat.typeexpr == 'fire':\n bacp = [1e-1, 1e1]\n if gdat.typeexpr == 'tess':\n bacp = [1e-1, 1e1]\n setp_varb(gdat, 'bacp', limt=bacp, ener='full', back=0)\n \n if gdat.typeexpr == 'hubb':\n bacp = 2e-7\n if gdat.typeexpr == 'chan':\n bacp = 1.\n if gdat.numbpixlfull == 1:\n setp_varb(gdat, 'bacp', valu=bacp, back=0)\n else:\n setp_varb(gdat, 'bacp', valu=bacp, ener='full', back=0)\n\n # particle background\n if gdat.typeexpr == 'chan':\n bacp = 70.04\n setp_varb(gdat, 'bacp', valu=bacp, back=1)\n \n # particle background\n #if gdat.typeexpr == 'chan':\n # if gdat.anlytype == 'spec':\n # bacp = [1e-8, 1e-6]\n # else:\n # bacp = [1e-1, 1e2]\n # setp_varb(gdat, 'bacp', limt=bacp, back=1)\n \n ### element parameter boundaries\n #### spatial\n if gdat.boolbinsspat:\n if gdat.typeexpr == 'ferm':\n minmgang = 1e-1 / gdat.anglfact\n else:\n minmgang = 1e-2 / gdat.anglfact\n setp_varb(gdat, 'minmgang', valu=minmgang, popl='full', strgmodl=strgmodl)\n \n # parameter defaults\n for l in gmod.indxpopl:\n if gmod.typeelem[l].startswith('lghtline'):\n enertemp = np.sqrt(gdat.limtener[0] * gdat.limtener[1])\n # temp -- these should depend on population index\n setp_varb(gdat, 'elin', limt=gdat.limtener, strgmodl=strgmodl)\n setp_varb(gdat, 'sigm', limt=np.array([1e-1, 1e0]) * enertemp, strgmodl=strgmodl)\n setp_varb(gdat, 'gamm', limt=np.array([1e-1, 1e0]) * enertemp, strgmodl=strgmodl)\n \n if gdat.boolbinsspat:\n minmdefs = 0.003 / gdat.anglfact\n setp_varb(gdat, 'minmdefs', valu=minmdefs, strgmodl=strgmodl)\n \n if gdat.typeexpr == 'ferm':\n setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)\n \n if gdat.boolbinsspat:\n maxmdefs = 1. / gdat.anglfact\n setp_varb(gdat, 'maxmdefs', valu=maxmdefs, strgmodl=strgmodl)\n \n # true model parameters\n if gdat.typedata == 'mock':\n gmod.numbelem = np.zeros(gmod.numbpopl, dtype=int)\n if gmod.typemodltran == 'pois':\n for l in gmod.indxpopl:\n setattr(gdat.true.this, 'meanelempop%d' % l, getattr(gdat.true.this, 'numbelempop%d' % l))\n gmod.numbelem[l] = getattr(gdat.true.this, 'numbelempop%d' % l)\n \n if gmod.numbelem[l] > gmod.maxmpara.numbelem[l]:\n raise Exception('True number of elements is larger than maximum.')\n\n gdat.stdvhostsour = 0.04 / gdat.anglfact\n \n ## distribution\n ### flux\n if gmod.boollenssubh:\n ### projected scale radius\n limtasca = np.array([0., 0.1]) / gdat.anglfact\n setp_varb(gdat, 'asca', minm=minmasca, maxm=maxmasca)\n ### projected cutoff radius\n limtacut = np.array([0., 2.]) / gdat.anglfact\n setp_varb(gdat, 'acut', minm=minmacut, maxm=maxmacut)\n\n if gdat.boolbinsspat:\n\n setp_varb(gdat, 'gangdisttype', valu=['self'], strgmodl=strgmodl)\n \n for l in gmod.indxpopl:\n if gmod.typespatdist[l] == 'gangexpo':\n setp_varb(gdat, 'maxmgang', valu=gmod.maxmlgal, strgmodl=strgmodl)\n if gdat.typeexpr == 'ferm':\n gangdistsexp = 5. / gdat.anglfact\n setp_varb(gdat, 'gangdistsexp', valu=gangdistsexp, strgmodl=strgmodl, popl=l)\n if gmod.typespatdist[l] == 'dsrcexpo':\n if gdat.typeexpr == 'hubb':\n dsrcdistsexp = 0.5 / gdat.anglfact\n setp_varb(gdat, 'dsrcdistsexp', valu=dsrcdistsexp, strgmodl=strgmodl, popl=l)\n \n\n if strgmodl == 'true':\n if gmod.boollenshost or boolemishost:\n setp_varb(gdat, 'lgalhost', mean=0., stdv=gdat.stdvhostsour, strgmodl='true', isfr='full')\n setp_varb(gdat, 'bgalhost', mean=0., stdv=gdat.stdvhostsour, strgmodl='true', isfr='full')\n if gmod.boollens:\n setp_varb(gdat, 'lgalsour', mean=0., stdv=gdat.stdvhostsour, strgmodl='true')\n setp_varb(gdat, 'bgalsour', mean=0., stdv=gdat.stdvhostsour, strgmodl='true')\n if strgmodl == 'fitt':\n if gmod.boollenshost or boolemishost:\n setp_varb(gdat, 'lgalhost', limt=[-gdat.maxmgangdata, gdat.maxmgangdata], strgmodl='fitt', isfr='full')\n setp_varb(gdat, 'bgalhost', limt=[-gdat.maxmgangdata, gdat.maxmgangdata], strgmodl='fitt', isfr='full')\n if gmod.boollens:\n setp_varb(gdat, 'lgalsour', limt=[-gdat.maxmgangdata, gdat.maxmgangdata], strgmodl='fitt')\n setp_varb(gdat, 'bgalsour', limt=[-gdat.maxmgangdata, gdat.maxmgangdata], strgmodl='fitt')\n \n if gmod.boollens:\n setp_varb(gdat, 'redshost', limt=[0., 0.4], strgmodl=strgmodl)\n setp_varb(gdat, 'redssour', limt=[0.5, 1.5], strgmodl=strgmodl)\n setp_varb(gdat, 'fluxsour', limt=np.array([1e-22, 1e-17]), strgmodl=strgmodl)\n setp_varb(gdat, 'sindsour', limt=np.array([0., 4.]), strgmodl=strgmodl)\n setp_varb(gdat, 'sizesour', limt=[0.1 / gdat.anglfact, 2. / gdat.anglfact], strgmodl=strgmodl)\n setp_varb(gdat, 'ellpsour', limt=[0., 0.5], strgmodl=strgmodl)\n setp_varb(gdat, 'redshost', valu=0.2, strgmodl=strgmodl)\n setp_varb(gdat, 'redssour', valu=1., strgmodl=strgmodl)\n \n if gmod.boollenshost or boolemishost:\n setp_varb(gdat, 'fluxhost', limt=np.array([1e-20, 1e-15]), isfr='full', strgmodl=strgmodl)\n setp_varb(gdat, 'sindhost', limt=np.array([0., 4.]), isfr='full', strgmodl=strgmodl)\n setp_varb(gdat, 'sizehost', limt=[0.1 / gdat.anglfact, 4. / gdat.anglfact], isfr='full', strgmodl=strgmodl)\n setp_varb(gdat, 'beinhost', limt=[0.5 / gdat.anglfact, 2. / gdat.anglfact], isfr='full', strgmodl=strgmodl)\n setp_varb(gdat, 'ellphost', limt=[0., 0.5], isfr='full', strgmodl=strgmodl)\n setp_varb(gdat, 'anglhost', limt=[0., np.pi], isfr='full', strgmodl=strgmodl)\n if strgmodl == 'fitt':\n setp_varb(gdat, 'serihost', limt=[1., 8.], isfr='full', strgmodl=strgmodl)\n if strgmodl == 'true':\n setp_varb(gdat, 'serihost', valu=4., isfr='full', strgmodl=strgmodl)\n setp_varb(gdat, 'serihost', limt=[1., 8.], isfr='full', strgmodl=strgmodl)\n \n if gmod.boollens:\n setp_varb(gdat, 'sherextr', limt=[0., 0.1], strgmodl=strgmodl)\n setp_varb(gdat, 'anglsour', limt=[0., np.pi], strgmodl=strgmodl)\n setp_varb(gdat, 'sangextr', limt=[0., np.pi], strgmodl=strgmodl)\n \n # temp -- to be removed\n #gmod.factlgal = gmod.maxmlgal - gmod.minmlgal\n #gmod.factbgal = gmod.maxmbgal - gmod.minmbgal\n #gmod.minmaang = -np.pi\n #gmod.maxmaang = pi\n \n # loglikelihood difference for each element\n setp_varb(gdat, 'deltllik', lablroot='$\\Delta \\log L$', minm=1., maxm=100., strgmodl=strgmodl)\n setp_varb(gdat, 'deltllik', lablroot='$\\Delta \\log L$', minm=1., maxm=100., popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'deltllik', lablroot='$\\Delta \\log L$', minm=1., maxm=100., popl=l, strgmodl=strgmodl, iele='full')\n \n for l in gmod.indxpopl:\n if gmod.typeelem[l] == 'lens':\n meanslop = 1.9\n stdvslop = 0.5\n scal = 'gaus'\n else:\n minmslop = 0.5\n maxmslop = 3.\n scal = 'logt'\n if scal == 'gaus':\n mean = meanslop\n stdv = stdvslop\n else:\n limt = [minmslop, maxmslop]\n \n if gmod.typeelem[l].startswith('clus'):\n valu = 2.\n name = 'slopprio' + gmod.nameparagenrelemampl[l]\n\n setp_varb(gdat, name, minm=minmslop, maxm=maxmslop, scal=scal, lablroot='$\\alpha$', popl=l, strgmodl=strgmodl)\n \n if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':\n setp_varb(gdat, 'gwdtslop', limt=[0.5, 4.], scal='logt', popl=l, strgmodl=strgmodl)\n \n if gdat.typeexpr != 'user':\n if gdat.boolbinsspat:\n setp_varb(gdat, 'spatdistcons', valu=1e-3, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'gangslop', valu=1.1, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'bgaldistscal', valu=2. / gdat.anglfact, popl=l, strgmodl=strgmodl)\n\n if gdat.typeexpr == 'ferm':\n setp_varb(gdat, 'sloplowrprioflux', valu=1.5, popl=l)\n setp_varb(gdat, 'slopupprprioflux', valu=2.5, popl=l)\n setp_varb(gdat, 'brekprioflux', valu=1e-9, popl=l)\n if gmod.typeelem[l] == 'lghtpnts':\n setp_varb(gdat, 'slopprioflux', valu=2.2, popl=l, strgmodl=strgmodl)\n if gmod.typeelem[l].startswith('lghtline'):\n setp_varb(gdat, 'slopprioflux', valu=2., popl=l, strgmodl=strgmodl)\n if gmod.typeelem[l] == 'lens':\n setp_varb(gdat, 'defsslop', valu=1.9, popl=l, strgmodl=strgmodl)\n\n if gmod.typeelem[l] == 'lens':\n setp_varb(gdat, 'ascadistmean', valu=0.05 / gdat.anglfact, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'ascadiststdv', valu=0.04 / gdat.anglfact, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'acutdistmean', valu=1. / gdat.anglfact, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'acutdiststdv', valu=0.04 / gdat.anglfact, popl=l, strgmodl=strgmodl)\n \n if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':\n setp_varb(gdat, 'gwdtslop', valu=2., popl=l, strgmodl=strgmodl)\n \n if gdat.typeexpr == 'ferm':\n sinddistmean = 2.15\n if gdat.typeexpr == 'chan':\n sinddistmean = 1.\n if gdat.typeexpr == 'hubb':\n sinddistmean = 1.\n if gdat.typeexpr == 'ferm' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'hubb':\n setp_varb(gdat, 'sinddistmean', valu=sinddistmean, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'sinddiststdv', valu=0.5, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'curvdistmean', valu=2., popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'curvdiststdv', valu=0.2, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'expcdistmean', valu=2., popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'expcdiststdv', valu=0.2, popl=l, strgmodl=strgmodl)\n \n if gmod.typeelem[l] == 'lghtpntspuls':\n setp_varb(gdat, 'per0distmean', valu=3e-3, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'per0diststdv', valu=0.3, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'magfdistmean', valu=10**8.5, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'magfdiststdv', valu=0.7, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'dglcslop', valu=2., popl=l, strgmodl=strgmodl)\n elif gmod.typeelem[l] == 'lghtpntsagnntrue':\n setp_varb(gdat, 'dlosslop', valu=-2., popl=l, strgmodl=strgmodl)\n \n setp_varb(gdat, 'lum0sloplowr', valu=0.5, popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'lum0slopuppr', valu=1.5, popl=l, strgmodl=strgmodl)\n \n if gmod.boollenshost:\n setp_varb(gdat, 'beinhost', valu=1.5 / gdat.anglfact)\n setp_varb(gdat, 'sizesour', valu=0.3 / gdat.anglfact)\n setp_varb(gdat, 'sizehost', valu=1. / gdat.anglfact)\n setp_varb(gdat, 'ellpsour', valu=0.2)\n setp_varb(gdat, 'fluxsour', valu=1e-18)\n setp_varb(gdat, 'sindsour', valu=1.5)\n setp_varb(gdat, 'fluxhost', valu=1e-16)\n setp_varb(gdat, 'sindhost', valu=2.5)\n setp_varb(gdat, 'ellphost', valu=0.2)\n setp_varb(gdat, 'sangextr', valu=np.pi / 2.)\n setp_varb(gdat, 'serihost', valu=4.)\n \n if gdat.typeexpr != 'user':\n if gdat.boolbinsspat:\n minm = -gdat.maxmgangdata\n maxm = gdat.maxmgangdata\n for l in gmod.indxpopl:\n setp_varb(gdat, 'lgal', minm=minm, maxm=maxm, lablroot='$l$', strgmodl=strgmodl)\n setp_varb(gdat, 'bgal', minm=minm, maxm=maxm, lablroot='$b$', strgmodl=strgmodl)\n setp_varb(gdat, 'lgal', minm=minm, maxm=maxm, lablroot='l_{gal}', popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'bgal', minm=minm, maxm=maxm, lablroot='b_{gal}', popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'lgal', minm=minm, maxm=maxm, lablroot='l_{gal}', popl=l, iele='full', strgmodl=strgmodl)\n setp_varb(gdat, 'bgal', minm=minm, maxm=maxm, lablroot='b_{gal}', popl=l, iele='full', strgmodl=strgmodl)\n \n minm = 0.1\n maxm = 10.\n for l in gmod.indxpopl:\n if strgmodl == 'fitt':\n setp_varb(gdat, 'nobj', minm=minm, maxm=maxm, scal='powr', lablroot='N')\n setp_varb(gdat, 'nobj', minm=minm, maxm=maxm, scal='powr', lablroot='N', strgmodl=strgmodl)\n setp_varb(gdat, 'nobj', minm=minm, maxm=maxm, scal='powr', lablroot='N', popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'nobj', minm=minm, maxm=maxm, scal='powr', lablroot='N', popl=l, iele='full', strgmodl=strgmodl)\n \n if gdat.boolbinsspat:\n for l in gmod.indxpopl:\n setp_varb(gdat, 'aang', minm=-np.pi, maxm=np.pi, lablroot=r'$\\theta$', strgmodl=strgmodl)\n setp_varb(gdat, 'aang', minm=-np.pi, maxm=np.pi, lablroot=r'$\\theta$', popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'aang', minm=-np.pi, maxm=np.pi, lablroot=r'$\\theta$', popl=l, strgmodl=strgmodl, iele='full')\n setp_varb(gdat, 'gang', minm=0, maxm=gdat.maxmgangdata, lablroot=r'$\\psi$', strgmodl=strgmodl)\n setp_varb(gdat, 'gang', minm=0, maxm=gdat.maxmgangdata, lablroot=r'$\\psi$', popl=l, strgmodl=strgmodl)\n setp_varb(gdat, 'gang', minm=0, maxm=gdat.maxmgangdata, lablroot=r'$\\psi$', popl=l, strgmodl=strgmodl, iele='full')\n\n \n # copy the true model to the inference model if the inference model parameter has not been specified\n #temp = deepcopy(gdat.__dict__)\n #for strg, valu in temp.items():\n # if strg.startswith('true') and not strg[4:].startswith('indx'):\n # try:\n # valumodl = getattr(gdat.fitt, strg[4:])\n # if valumodl is None:\n # raise\n # if gdat.typeverb > 1:\n # print 'Received custom input for ' + strg[4:]\n # except:\n # setattr(gdat.fitt, strg[4:], getattr(gdat, strg))\n \n # check inputs\n if gdat.numbburn > gdat.numbswep:\n raise Exception('Bad number of burn-in sweeps.')\n if gdat.factthin > gdat.numbswep - gdat.numbburn or gdat.factthin < 1:\n raise Exception('Bad thinning factor.')\n if gdat.typepixl == 'heal' and gdat.numbspatdims > 2:\n raise Exception('More than 2 spatial dimensions require Cartesian binning.')\n \n if gdat.defa:\n return gdat\n \n if gdat.typeverb > 0:\n if gdat.boolburntmpr:\n print('Warning: Tempered burn-in.')\n \n if gdat.typedata == 'inpt':\n gdat.minmpara.sind = -1.\n gdat.maxmpara.sind = 2.\n gdat.minmpara.curv = -1.\n gdat.maxmpara.curv = 1.\n gdat.minmpara.expc = 0.1\n gdat.maxmpara.expc = 10.\n\n for q in gdat.indxrefr:\n for strgfeat in gdat.refr.namepara.elem[q]:\n if strgfeat == 'etag' or strgfeat == 'gang' or strgfeat == 'aang':\n continue\n refrfeat = getattr(gdat.refr, strgfeat)\n \n if len(refrfeat[q]) == 0 or refrfeat[q].ndim < 2:\n raise Exception('')\n \n if gdat.typedata != 'mock':\n gdat.refr.numbelem = np.zeros(gdat.numbrefr, dtype=int)\n \n for strgmodl in gdat.liststrgmodl:\n \n # set up the indices of the fitting model\n setp_indxpara(gdat, 'finl', strgmodl=strgmodl)\n \n # construct the model\n setp_paragenrscalbase(gdat, strgmodl=strgmodl)\n \n gmod = getattr(gdat, strgmodl)\n\n if strgmodl == 'true':\n # transfer the true model to the reference model \n #for strg, valu in gdat.true.__dict__.items():\n # setattr(gdat.refr, strg, valu)\n for name in ['listmrkrmiss', 'listlablmiss', 'colr', 'colrelem', 'namepara', 'nameparagenrelemampl', 'numbelem']:\n setattr(gdat.refr, name, getattr(gdat.true, name))\n gdat.refr.indxpoplfittassc = gdat.fitt.indxpopl\n gdat.fitt.indxpoplrefrassc = gdat.fitt.indxpopl\n\n # to be deleted\n # determine total label\n #for name in ['expo', 'numbpixl']:\n # lablroot = getattr(gdat.lablrootpara, name)\n # lablunit = getattr(gdat.lablunitpara, name)\n # labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)\n # setattr(gdat.labltotlpara, name, labltotl)\n \n # set the reference model to true model\n # derived lens parameter minima and maxima\n print('Defining minima and maxima for derived parameters...')\n for strgmodl in gdat.liststrgmodl:\n for e in gmod.indxsersfgrd:\n strgsersfgrd = 'isf%d' % e\n setp_varb(gdat, 'masshost' + strgsersfgrd + 'bein', limt=[1e7, 1e14], strgmodl=strgmodl)\n for strgcalcmasssubh in gdat.liststrgcalcmasssubh:\n setp_varb(gdat, 'masshost' + strgsersfgrd + strgcalcmasssubh + 'bein', limt=[1e7, 1e14], strgmodl=strgmodl)\n if gmod.numbparaelem > 0:\n if gmod.boollenssubh:\n for strgcalcmasssubh in gdat.liststrgcalcmasssubh:\n setp_varb(gdat, 'masssubh' + strgsersfgrd + 'bein', limt=[1e7, 1e10], strgmodl=strgmodl)\n setp_varb(gdat, 'fracsubh' + strgsersfgrd + 'bein', limt=[0., 1.], strgmodl=strgmodl)\n \n gdat.typeelem = []\n gdat.typeelemspateval = []\n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n for typeelemtemp in gmod.typeelem:\n if not typeelemtemp in gdat.typeelem:\n gdat.typeelem.append(typeelemtemp)\n for typeelemspatevaltemp in typeelemspateval:\n if not typeelemspatevaltemp in gdat.typeelemspateval:\n gdat.typeelemspateval.append(typeelemspatevaltemp)\n \n for strgvarb in ['boolelempsfn']:\n varbcomm = False\n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n varb = getattr(gmod, strgvarb)\n varbcomm = varbcomm or varb\n setattr(gdat, strgvarb + 'anyy', varbcomm) \n\n #gdat.fitt.namepara.genrelemtagg = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]\n #for q in gdat.indxrefr:\n # for strgfeat in gdat.refr.namepara.elem[q]:\n # for l in gmod.indxpopl:\n # gdat.fitt.namepara.genrelemtagg[q][l].append(strgfeat + gdat.listnamerefr[q])\n \n gdat.listnamevarbstat = ['paragenrscalfull', 'paragenrunitfull', 'indxelemfull', 'lliktotl', 'llik', 'lpritotl', 'lpri']\n if gdat.typepixl == 'cart' and (gmod.typeevalpsfn == 'conv' or gmod.typeevalpsfn == 'full'):\n gdat.listnamevarbstat += ['psfnconv']\n if gmod.boolelemsbrtdfncanyy:\n gdat.listnamevarbstat += ['sbrtdfnc']\n if gmod.boolelemsbrtextsbgrdanyy:\n gdat.listnamevarbstat += ['sbrtextsbgrd']\n if gmod.boollens:\n gdat.listnamevarbstat += ['sbrtlens']\n if gmod.boollens or gmod.typeemishost != 'none':\n for e in gmod.indxsersfgrd:\n if gmod.boollens:\n gdat.listnamevarbstat += ['deflhostisf%d' % e]\n if gmod.typeemishost != 'none':\n gdat.listnamevarbstat += ['sbrthostisf%d' % e]\n if gmod.convdiffanyy and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv'):\n gdat.listnamevarbstat += ['sbrtmodlconv']\n if gmod.boolelemdeflsubhanyy:\n gdat.listnamevarbstat += ['deflsubh']\n \n # paths\n ## data\n gdat.pathpixlcnvt = gdat.pathdata + 'pixlcnvt/'\n gdat.pathprox = gdat.pathdata + 'prox/'\n ## plot\n gdat.pathplotrtag = gdat.pathimag + gdat.rtag + '/'\n gdat.pathinit = gdat.pathplotrtag + 'init/'\n gdat.pathinitintr = gdat.pathinit + 'intr/'\n \n if gdat.boolbinsspat:\n gdat.ascaglob = 0.05 / gdat.anglfact\n gdat.acutglob = 1. / gdat.anglfact\n gdat.cutfdefs = 3e-3 / gdat.anglfact\n\n # plotting\n gdat.lablsampdist = 'Posterior'\n gdat.lablparagenrscalfull = 'Sample'\n gdat.lablmlik = 'Maximum likelihood'\n gdat.lablmedi = 'Median'\n gdat.lablpmea = 'Mean'\n gdat.lablstdv = 'Std. dev.'\n \n # number of samples for which cumulative posterior will be calculated\n gdat.numbsampcpct = 10\n gdat.indxsampcpct = np.arange(gdat.numbsampcpct)\n \n # p value contours \n gdat.pvalcont = [0.317, 0.0455, 2.7e-3, 6e-5, 1.3e-6]\n\n ## number of bins in histogram plots\n gdat.numbbinsplot = 20\n gdat.indxbinsplot = np.arange(gdat.numbbinsplot)\n \n ## number of bins in hyperprior plots\n gdat.numbbinsplotprio = 100\n # temp\n if gdat.typedata == 'inpt':\n for l in gmod.indxpopl:\n for strgpdfn in gmod.listscalparagenrelem[l]:\n if strgpdfn.startswith('gaum') and gmod.lgalprio is None and gmod.bgalprio is None:\n raise Exception('If typespatdist is \"gaus\", spatial coordinates of the prior catalog should be provided via lgalprio and bgalprio.')\n \n # temp -- have these definitions separate for all features\n # feature plotting factors and scalings\n gdat.dictglob = {}\n \n gdat.listnamechro = ['totl', 'prop', 'diag', 'save', 'plot', 'proc', 'pars', 'modl', 'llik', 'sbrtmodl']\n gdat.listlablchro = ['Total', 'Proposal', 'Diagnostics', 'Save', 'Plot', 'Process', 'Parse', 'Model', 'Likelihood', 'Total emission']\n if gmod.numbparaelem > 0:\n gdat.listnamechro += ['spec']\n gdat.listlablchro += ['Spectrum calculation']\n if gmod.boollens:\n gdat.listnamechro += ['deflzero', 'deflhost', 'deflextr', 'sbrtlens', 'sbrthost']\n gdat.listlablchro += ['Array initialization', 'Host Deflection', 'External deflection', 'Lensed emission', 'Host emission']\n if gmod.boolelemsbrtdfncanyy:\n gdat.listnamechro += ['elemsbrtdfnc']\n gdat.listlablchro += ['Dfnc S Brght']\n if gmod.boolelemdeflsubhanyy:\n gdat.listnamechro += ['elemdeflsubh']\n gdat.listlablchro += ['Subh Defl']\n if gmod.boolelemsbrtextsbgrdanyy:\n gdat.listnamechro += ['elemsbrtextsbgrd']\n gdat.listlablchro += ['Bkg Exts S Brght']\n booltemp = False\n for strgmodl in gdat.liststrgmodl:\n booltemp = booltemp or gmod.typeevalpsfn\n if booltemp or gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'full':\n gdat.listnamechro += ['psfnconv']\n gdat.listlablchro += ['Img for PSF Conv.']\n \n gdat.listnamechro += ['expo', 'lpri', 'tert']\n gdat.listlablchro += ['Exposure', 'Prior', 'Tertiary']\n gdat.numbchro = len(gdat.listnamechro)\n \n if gdat.typedata != 'mock':\n if gmod.boolelemlghtanyy and gdat.typeexpr == 'ferm' and gdat.maxmgangdata == 20. / gdat.anglfact:\n path = gdat.pathinpt + 'sbrt0018.png'\n gdat.sbrt0018 = sp.ndimage.imread(path, flatten=True)\n gdat.sbrt0018 -= np.amin(gdat.sbrt0018)\n gdat.sbrt0018 /= np.amax(gdat.sbrt0018)\n binslgaltemp = np.linspace(-gdat.maxmgangdata, gdat.maxmgangdata, gdat.sbrt0018.shape[1])\n binsbgaltemp = np.linspace(-gdat.maxmgangdata, gdat.maxmgangdata, gdat.sbrt0018.shape[0])\n gdat.sbrt0018objt = sp.interpolate.RectBivariateSpline(binsbgaltemp, binslgaltemp, gdat.sbrt0018)\n\n # log-prior register\n ## indices of split and merge term\n indxlprispme = -1\n ## number of elements\n numb = 0\n for l in gmod.indxpopl:\n numb += len(gmod.namepara.genrelem[l])\n \n # process index\n gdat.indxproc = np.arange(gdat.numbproc)\n\n if gmod.boollens or gdat.typedata == 'mock' and gmod.boollens:\n retr_axis(gdat, 'mcut')\n retr_axis(gdat, 'bein')\n\n # angular deviation\n gdat.numbanglhalf = 10\n gdat.indxanglhalf = np.arange(gdat.numbanglhalf)\n retr_axis(gdat, 'anglhalf')\n gdat.numbanglfull = 1000\n gdat.indxanglfull = np.arange(gdat.numbanglfull)\n gdat.minmpara.anglfull = 0.\n gdat.maxmpara.anglfull = 3. * gdat.maxmgangdata\n retr_axis(gdat, 'anglfull')\n \n # temp\n #gdat.binspara.anglcosi = np.sort(np.cos(gdat.binspara.angl))\n \n # temp\n #gdat.meshbackener = np.meshgrid(gdat.gmod.indxback, gdat.indxener, indexing='ij')\n \n # plotting\n ## the normalized offset for text annotation of point sources in the frames\n gdat.offstextimag = gdat.maxmgangdata * 0.05\n \n ## figure size\n gdat.plotsize = 6\n ## size of the images\n gdat.sizeimag = 1.3 * gdat.plotsize\n \n ## label of the models\n gdat.fitt.lablmodl = 'Model'\n if gdat.typedata == 'mock':\n gdat.refr.lablmodl = 'True'\n else:\n gdat.refr.lablmodl = 'Ref'\n \n # element parameters common between the fitting and reference models\n gdat.namepara.elemcomm = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]\n for q in gdat.indxrefr:\n for l in gmod.indxpopl:\n for strgfeat in gmod.listnameparatotlelem[l]:\n if strgfeat in gdat.refr.namepara.elem[q]:\n gdat.namepara.elemcomm[q][l].append(strgfeat)\n \n if gdat.typedata == 'mock':\n gdat.refr.indxpopl = gdat.true.indxpopl\n gdat.refr.lablpopl = gdat.true.lablpopl\n\n \n for strgmodl in ['refr', 'fitt']:\n \n gmod = getattr(gdat, strgmodl)\n \n print('strgmodl')\n print(strgmodl)\n # labels of elements\n lablelem = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n lablelem[l] = gmod.lablmodl + ' ' + gmod.lablpopl[l]\n setp_varb(gdat, 'lablelem', valu=lablelem, strgmodl=strgmodl)\n \n lablelemmiss = [[] for l in gmod.indxpopl]\n lablelemhits = [[] for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n lablelemmiss[l] = gmod.lablelem[l] + ' miss'\n lablelemhits[l] = gmod.lablelem[l] + ' hit'\n setp_varb(gdat, 'lablelemmiss', valu=lablelemmiss, strgmodl=strgmodl)\n setp_varb(gdat, 'lablelemhits', valu=lablelemhits, strgmodl=strgmodl)\n \n lablhost = gmod.lablmodl + ' host'\n setp_varb(gdat, 'lablhost', valu=lablhost, strgmodl=strgmodl)\n \n lablsour = gmod.lablmodl + ' sour'\n setp_varb(gdat, 'lablsour', valu=lablsour, strgmodl=strgmodl)\n\n ## PSF class indices for which images will be plotted\n if gdat.numbevtt == 1:\n gdat.indxevttplot = gdat.indxevtt\n else:\n gdat.indxevttplot = np.concatenate((np.array([-1]), gdat.indxevtt))\n \n gdat.numbenerevtt = gdat.numbener * gdat.numbevtt\n \n # temp\n gdat.boolintpanglcosi = False\n\n if gdat.boolthindata:\n gdat.factdatathin = 10\n if gdat.typepixl != 'cart' or gdat.numbsidecart % gdat.factdatathin != 0:\n raise Exception('Cannot thin the data.')\n #gdat.indxpixlkeep = gdat.indxpixlfull[::gdat.factdatathin]\n #gdat.numbpixlkeep = gdat.indxpixlkeep.size\n gdat.indxpixlkill = np.setdiff1d(gdat.indxpixlfull, gdat.indxpixlkeep)\n gdat.numbsidecart = gdat.numbsidecart / 10\n gdat.numbsidecarthalf = int(gdat.numbsidecart / 2)\n gdat.lgalgrid = gdat.lgalgrid[gdat.indxpixlkeep]\n gdat.bgalgrid = gdat.bgalgrid[gdat.indxpixlkeep]\n gdat.indxpixlfull = gdat.indxpixlfull[gdat.indxpixlkeep]\n \n # the function to measure time\n # temp\n gdat.strgfunctime = 'clck'\n if gdat.strgfunctime == 'clck':\n gdat.functime = time.clock\n if gdat.strgfunctime == 'time':\n gdat.functime = time.time\n\n ## longitude\n gdat.numblgalpntsprob = gdat.numbsidepntsprob\n gdat.numbbgalpntsprob = gdat.numbsidepntsprob\n gdat.binspara.lgalpntsprob = np.linspace(-gdat.maxmgangdata, gdat.maxmgangdata, gdat.numbsidepntsprob + 1)\n gdat.binspara.bgalpntsprob = np.linspace(-gdat.maxmgangdata, gdat.maxmgangdata, gdat.numbsidepntsprob + 1)\n gdat.indxlgalpntsprob = np.arange(gdat.numblgalpntsprob)\n gdat.indxbgalpntsprob = np.arange(gdat.numbbgalpntsprob)\n\n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n if gmod.boollens or gdat.typedata == 'mock' and gmod.boollens:\n retr_axis(gdat, 'defl')\n retr_axis(gdat, 'deflsubh')\n\n # lensing problem setup\n ## number of deflection components to plot\n\n gdat.binspara.lgalcartmesh, gdat.binspara.bgalcartmesh = np.meshgrid(gdat.binspara.lgalcart, gdat.binspara.bgalcart, indexing='ij')\n gdat.meanpara.lgalcartmesh, gdat.meanpara.bgalcartmesh = np.meshgrid(gdat.meanpara.lgalcart, gdat.meanpara.bgalcart, indexing='ij')\n if gdat.typepixl == 'cart':\n gdat.sizepixl = np.sqrt(gdat.apix)\n gdat.indxsidecart = np.arange(gdat.numbsidecart)\n gdat.indxpixlrofi = np.arange(gdat.numbpixlcart)\n gdat.indxsidemesh = np.meshgrid(gdat.indxsidecart, gdat.indxsidecart, indexing='ij')\n gdat.lgalgrid = gdat.meanpara.lgalcart[gdat.indxsidemesh[0].flatten()]\n gdat.bgalgrid = gdat.meanpara.bgalcart[gdat.indxsidemesh[1].flatten()]\n gdat.shapcart = (gdat.numbsidecart, gdat.numbsidecart)\n gdat.lgalgridfull = np.copy(gdat.lgalgrid)\n gdat.bgalgridfull = np.copy(gdat.bgalgrid)\n gdat.lgalgridcart = gdat.lgalgrid.reshape(gdat.shapcart)\n gdat.bgalgridcart = gdat.bgalgrid.reshape(gdat.shapcart)\n gdat.indxpent = np.meshgrid(gdat.indxener, gdat.indxsidecart, gdat.indxsidecart, gdat.indxevtt, indexing='ij')\n if gdat.typepixl == 'heal':\n lgalheal, bgalheal, gdat.numbpixlfull, gdat.apix = tdpy.retr_healgrid(gdat.numbsideheal)\n lgalheal = np.deg2rad(lgalheal)\n bgalheal = np.deg2rad(bgalheal)\n \n gdat.indxpixlrofi = np.where((np.fabs(lgalheal) < gdat.maxmgangdata) & (np.fabs(bgalheal) < gdat.maxmgangdata))[0]\n \n gdat.indxpixlrofimarg = np.where((np.fabs(lgalheal) < 1.2 * gdat.maxmgangdata) & (np.fabs(bgalheal) < 1.2 * gdat.maxmgangdata))[0]\n\n gdat.lgalgrid = lgalheal\n gdat.bgalgrid = bgalheal\n \n gdat.indxpixlfull = np.arange(gdat.numbpixlfull)\n if gdat.typepixl == 'cart':\n gdat.indxpixlcart = np.arange(gdat.numbpixlcart)\n \n if gdat.evttbins:\n # PSF class string\n gdat.strgevtt = []\n for m in gdat.indxevtt:\n gdat.strgevtt.append('PSF%d' % gdat.indxevttincl[m])\n \n # power spectra\n if gdat.typepixl == 'cart':\n setp_varb(gdat, 'anglodim', minm=0., maxm=1., boolinvr=True)\n setp_varb(gdat, 'mpolodim', minm=0., maxm=1.)\n #retr_axis(gdat, 'anglodim', boolinvr=True)\n #retr_axis(gdat, 'mpolodim')\n \n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n \n gdat.numbwvecodim = gdat.numbsidecart\n gdat.minmanglodim = 0.\n gdat.maxmanglodim = 2. * gdat.maxmgangdata\n gdat.minmmpolodim = 0.\n gdat.maxmmpolodim = 1. / 2. / gdat.sizepixl\n\n if gmod.boollens or gdat.typedata == 'mock' and gmod.boollens:\n # temp -- this should minima, maxima of adishost and the true metamodel into account\n gdat.minmwvecodim = gdat.minmmpolodim / np.amax(gmod.adishost)\n gdat.maxmwvecodim = gdat.maxmmpolodim / np.amin(gmod.adishost)\n gdat.minmwlenodim = gdat.minmanglodim * np.amin(gmod.adishost)\n gdat.maxmwlenodim = gdat.maxmanglodim * np.amax(gmod.adishost)\n retr_axis(gdat, 'wvecodim', strgmodl=strgmodl)\n retr_axis(gdat, 'wlenodim', strgmodl=strgmodl, boolinvr=True)\n gdat.meanpara.wveclgal, gdat.meanpara.wvecbgal = np.meshgrid(gdat.meanpara.wvecodim, gdat.meanpara.wvecodim, indexing='ij')\n gdat.meanpara.wvec = np.sqrt(gdat.meanpara.wveclgal**2 + gdat.meanpara.wvecbgal**2)\n gdat.meanpara.mpollgal, gdat.meanpara.mpolbgal = np.meshgrid(gdat.meanpara.mpolodim, gdat.meanpara.mpolodim, indexing='ij')\n gdat.meanpara.mpol = np.sqrt(gdat.meanpara.mpollgal**2 + gdat.meanpara.mpolbgal**2)\n\n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n \n # element parameter vector indices\n gmod.indxparagenrelemlgal = 0\n gmod.indxparagenrelembgal = 1\n gmod.indxparagenrelemflux = 2\n gmod.indxparagenrelemsind = 3\n gmod.indxparagenrelemcurv = 4\n gmod.indxparagenrelemexpc = 4\n\n # check the exposure map data structure\n if gdat.boolcorrexpo:\n booltemp = False\n if gdat.expo.ndim != 3:\n booltemp = True\n if gdat.typepixl == 'cart' and gdat.expo.shape[1] != gdat.numbpixlcart:\n booltemp = True\n if booltemp:\n raise Exception('Exposure does not have the right data structure. It should be a list of 3D np.arrays.')\n \n if gdat.boolsqzeexpo:\n gdat.expo *= 1e-10\n if gdat.boolexplexpo:\n gdat.expo *= 1e10\n \n if gdat.boolthindata:\n #gdat.expo[:, gdat.indxpixlkill, :] = 0.\n expotemp = np.copy(gdat.expo[:, gdat.indxpixlfull[::gdat.factdatathin], :])\n sbrttemp = np.copy(gdat.sbrtdata[:, gdat.indxpixlfull[::gdat.factdatathin], :])\n gdat.expo = expotemp \n gdat.sbrtdata = sbrttemp\n \n # only include desired energy and PSF class bins\n gdat.indxcubeincl = np.meshgrid(gdat.indxenerincl, gdat.indxpixlfull, gdat.indxevttincl, indexing='ij')\n \n ## exposure\n if gdat.boolcorrexpo:\n # temp -- for some reason lists of np.arrays require manual processing\n gdat.expo = gdat.expo[tuple(gdat.indxcubeincl)]\n if gdat.typedata == 'inpt':\n gdat.sbrtdata = gdat.sbrtdata[tuple(gdat.indxcubeincl)]\n \n ## backgrounds\n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n gmod.sbrtbacknormincl = [[] for c in gmod.indxback]\n for c in gmod.indxback:\n gmod.sbrtbacknormincl[c] = gmod.sbrtbacknorm[c][tuple(gdat.indxcubeincl)]\n \n # obtain cartesian versions of the maps\n #if gdat.typepixl == 'cart':\n # gdat.expocart = gdat.expo.reshape((gdat.numbener, gdat.numbsidecart, gdat.numbsidecart, gdat.numbevtt))\n # for strgmodl in gdat.liststrgmodl:\n # gmod.sbrtbacknormcart = []\n # for c in getattr(gmod, 'gmod.indxback'):\n # gmod.sbrtbacknormcart.append(gmod.sbrtbacknorm[c].reshape((gdat.numbener, gdat.numbsidecart, gdat.numbsidecart, gdat.numbevtt)))\n \n # mask the exposure map\n if gdat.listmask is not None:\n for mask in gdat.listmask:\n if mask[0] == 'sqre':\n indxpixlmask = np.where((gdat.lgalgrid > mask[1]) & (gdat.lgalgrid < mask[2]) & (gdat.bgalgrid > mask[3]) & (gdat.bgalgrid < mask[4]))[0]\n if mask[0] == 'circ':\n indxpixlmask = np.where(np.sqrt((gdat.lgalgrid - mask[1])**2 + (gdat.bgalgrid - mask[2])**2) < mask[3])[0]\n if mask[0] == 'hstr':\n indxpixlmask = np.where((gdat.bgalgrid > mask[1]) & (gdat.bgalgrid < mask[2]))[0]\n if gdat.typemaskexpo == 'zero':\n gdat.expo[:, indxpixlmask, :] = 0.\n if gdat.typemaskexpo == 'ignr':\n gdat.expo[:, indxpixlmask, :] = 1e-49\n\n # plotting\n ## ROI\n if gdat.boolbinsspat:\n gdat.exttrofi = np.array([gdat.minmlgaldata, gdat.maxmlgaldata, gdat.minmbgaldata, gdat.maxmbgaldata])\n gdat.exttrofi *= gdat.anglfact \n gdat.frambndrdata = gdat.maxmgangdata * gdat.anglfact\n\n ## marker size\n gdat.minmmrkrsize = 100\n gdat.maxmmrkrsize = 500\n ## marker line width\n gdat.mrkrlinewdth = 3\n ## marker opacity\n gdat.alphhist = 0.5\n gdat.alphline = 0.5\n gdat.alphbndr = 0.5\n gdat.alphelem = 1.\n gdat.alphmaps = 1.\n \n # number of colorbar ticks in the maps\n gdat.numbtickcbar = 11\n \n ## color bars\n gdat.minmlpdfspatpriointp = np.log(1. / 2. / gdat.maxmgangdata) - 10.\n gdat.maxmlpdfspatpriointp = np.log(1. / 2. / gdat.maxmgangdata) + 10.\n gmod.scallpdfspatpriointp = 'self'\n gdat.cmaplpdfspatpriointp = 'PuBu'\n \n gdat.minmllikmaps = -10.\n gdat.maxmllikmaps = 0.\n gmod.scalllikmaps = 'asnh'\n gdat.cmapllikmaps = 'YlGn'\n \n gdat.minmperc = 0.\n gdat.maxmperc = 1e2\n gdat.scalperc = 'asnh'\n gdat.cmapperc = 'afmhot'\n \n gdat.minmpercresi = -1e2\n gdat.maxmpercresi = 1e2\n gdat.scalpercresi = 'asnh'\n gdat.cmappercresi = 'coolwarm'\n \n gdat.scalpara.cntpdata = 'logt'\n gdat.cmappara.cntpdata = 'Greys'\n \n gdat.scalpara.cntpmodl = 'logt'\n gdat.cmappara.cntpmodl = 'Greys'\n \n gdat.scalpara.cntpresi = 'asnh'\n gdat.cmappara.cntpresi = make_cmapdivg('Red', 'Orange')\n\n gdat.minmconv = 1e-2\n gdat.maxmconv = 10.\n gdat.scalconv = 'logt'\n gdat.cmapconv = 'Purples'\n \n gdat.minmconvelem = 1e-4\n gdat.maxmconvelem = 1e-1\n gdat.scalconvelem = 'logt'\n gdat.cmapconvelem = 'Purples'\n \n gdat.minms2nr = 0.\n gdat.maxms2nr = 10.\n gmod.scals2nr = 'asnh'\n gdat.cmaps2nr = 'magma'\n \n gdat.minmmagn = -1e2\n gdat.maxmmagn = 1e2\n gmod.scalmagn = 'asnh'\n gdat.cmapmagn = 'BrBG'\n \n gdat.minmdeflresiperc = -100.\n gdat.maxmdeflresiperc = 100.\n gmod.scaldeflresiperc = 'self'\n gdat.cmapdeflresiperc = 'Oranges'\n \n gdat.minmconvelemresi = -0.1\n gdat.maxmconvelemresi = 0.1\n gmod.scalconvelemresi = 'self'\n gdat.cmapconvelemresi = 'PiYG'\n \n gdat.minmconvelemresiperc = -100.\n gdat.maxmconvelemresiperc = 100.\n gmod.scalconvelemresiperc = 'self'\n gdat.cmapconvelemresiperc = 'PiYG'\n \n gdat.minmmagnresi = -10.\n gdat.maxmmagnresi = 10.\n gmod.scalmagnresi = 'self'\n gdat.cmapmagnresi = 'PRGn'\n \n gdat.minmmagnresiperc = -100.\n gdat.maxmmagnresiperc = 100.\n gmod.scalmagnresiperc = 'self'\n gdat.cmapmagnresiperc = 'PRGn'\n \n gdat.lgalgrid = gdat.lgalgrid[gdat.indxpixlrofi]\n gdat.bgalgrid = gdat.bgalgrid[gdat.indxpixlrofi]\n \n if gdat.boolcorrexpo:\n if np.amax(gdat.expo) <= 0.:\n raise Exception('Bad exposure.')\n\n # temp\n #gdat.expo[np.where(gdat.expo < 1e-50)] = 1e-50\n \n # exclude voxels with vanishing exposure\n if gdat.boolcorrexpo:\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n gdat.indxpixlrofi = np.intersect1d(gdat.indxpixlrofi, np.where(gdat.expo[i, :, m] > 0.)[0])\n \n gdat.indxcuberofi = np.meshgrid(gdat.indxener, gdat.indxpixlrofi, gdat.indxevtt, indexing='ij')\n gdat.numbpixl = gdat.indxpixlrofi.size\n gdat.indxpixl = np.arange(gdat.numbpixl)\n gdat.numbdata = gdat.numbener * gdat.numbevtt * gdat.numbpixl\n\n #gdat.lgalgridrofi = gdat.lgalgrid[gdat.indxpixlrofi]\n #gdat.bgalgridrofi = gdat.bgalgrid[gdat.indxpixlrofi]\n\n\n if gdat.typedata == 'inpt':\n gdat.sbrtdata = gdat.sbrtdata[tuple(gdat.indxcuberofi)]\n\n ## exposure\n if gdat.boolcorrexpo:\n gdat.expofull = np.copy(gdat.expo)\n gdat.expo = gdat.expo[tuple(gdat.indxcuberofi)]\n \n gdat.minmpara.expo = np.amin(gdat.expo[np.where(gdat.expo > 1e-100)])\n gdat.maxmpara.expo = np.amax(gdat.expo)\n gdat.minmpara.expo = np.amin(gdat.minmpara.expo)\n gdat.maxmpara.expo = np.amax(gdat.maxmpara.expo)\n \n # required to convert to an index of non-zero exposure pixels\n #if gdat.minmpara.expo > 0:\n # gdat.indxpixlroficnvt = np.arange(gdat.numbpixlfull)\n #else:\n # cntr = 0\n # gdat.indxpixlroficnvt = full(gdat.numbpixlfull, -1)\n # for j in gdat.indxpixlfull:\n # if j in gdat.indxpixlrofi:\n # gdat.indxpixlroficnvt[j] = cntr\n # cntr += 1\n #\n \n ## backgrounds\n for strgmodl in gdat.liststrgmodl:\n if gdat.typepixl == 'heal':\n sbrtbackhealfull = [[] for c in gmod.indxback]\n for c in gmod.indxback:\n sbrtbackhealfull[c] = np.copy(gmod.sbrtbacknorm[c])\n gmod.sbrtbacknormincl = [[] for c in gmod.indxback]\n for c in gmod.indxback:\n gmod.sbrtbacknormincl[c] = gmod.sbrtbacknorm[c][tuple(gdat.indxcuberofi)]\n \n if gdat.boolcorrexpo:\n gdat.expototl = []\n gdat.expototlmean = []\n gdat.expototl = np.sum(gdat.expo, axis=2)\n gdat.expototlmean = np.mean(gdat.expototl, axis=1)\n\n if gdat.typeelemspateval == 'locl':\n if gdat.typeexpr == 'gene':\n gdat.maxmangl = 1.\n if gdat.typeexpr == 'ferm':\n gdat.maxmangl = 20. / gdat.anglfact\n if gdat.typeexpr == 'tess':\n gdat.maxmangl = 25. / gdat.anglfact\n if gdat.typeexpr == 'chan':\n gdat.maxmangl = 15. / gdat.anglfact\n if gdat.typeexpr == 'hubb':\n gdat.maxmangl = 1. / gdat.anglfact\n else:\n gdat.maxmangl = gdat.maxmgangdata * np.sqrt(2.) * 2. * 1.1\n \n gdat.listnamespatmean = ['full']\n if gdat.typeexpr == 'ferm':\n gdat.listnamespatmean += ['innr']\n gdat.numbspatmean = len(gdat.listnamespatmean)\n gdat.indxspatmean = np.arange(gdat.numbspatmean)\n gdat.listindxcubespatmean = [[] for b in gdat.indxspatmean]\n gdat.indxcube = np.meshgrid(gdat.indxener, gdat.indxpixl, gdat.indxevtt, indexing='ij')\n for b, namespatmean in enumerate(gdat.listnamespatmean):\n if namespatmean == 'full':\n gdat.listindxcubespatmean[b] = gdat.indxcube\n if namespatmean == 'innr':\n gdat.indxpixlinnr = np.where(np.sqrt(gdat.lgalgrid**2 + gdat.bgalgrid**2) < 5. / gdat.anglfact)[0]\n gdat.listindxcubespatmean[b] = np.meshgrid(gdat.indxener, gdat.indxpixlinnr, gdat.indxevtt, indexing='ij')\n \n if gdat.numbpixl > 1:\n # store pixels as unit vectors\n gdat.xdatgrid, gdat.ydatgrid, gdat.zaxigrid = retr_unit(gdat.lgalgrid, gdat.bgalgrid)\n \n # construct a lookup table for converting HealPix pixels to ROI pixels\n if gdat.typepixl == 'heal':\n path = gdat.pathpixlcnvt + 'pixlcnvt_%09g.p' % gdat.maxmgangdata\n\n if os.path.isfile(path):\n fobj = open(path, 'rb')\n gdat.pixlcnvt = pickle.load(fobj)\n fobj.close()\n else:\n gdat.pixlcnvt = np.zeros(gdat.numbpixlfull, dtype=int) - 1\n numbpixlmarg = gdat.indxpixlrofimarg.size\n for k in range(numbpixlmarg):\n dist = retr_angldistunit(gdat, lgalheal[gdat.indxpixlrofimarg[k]], bgalheal[gdat.indxpixlrofimarg[k]], gdat.indxpixl)\n gdat.pixlcnvt[gdat.indxpixlrofimarg[k]] = argmin(dist)\n fobj = open(path, 'wb')\n pickle.dump(gdat.pixlcnvt, fobj, protocol=pickle.HIGHEST_PROTOCOL)\n fobj.close()\n \n # dummy pixel indices for full (nonlocal) element kernel evaluation \n gdat.listindxpixl = []\n if gdat.typedata == 'mock':\n numb = max(np.sum(gmod.maxmpara.numbelem), np.sum(gmod.maxmpara.numbelem)) + 2\n else:\n numb = np.sum(gmod.maxmpara.numbelem) + 2\n for k in range(int(numb)):\n gdat.listindxpixl.append([])\n for kk in range(k):\n gdat.listindxpixl[k].append(gdat.indxpixl)\n \n # spatial averaging setup\n # temp\n \n # temp -- check if 1000 is too much\n gdat.numbanglelem = 1000\n \n # turn off relevant proposal types\n gdat.numbprop = 5\n gdat.indxprop = np.arange(gdat.numbprop)\n \n gdat.numbstdp = gmod.numbparagenrbase - gmod.numbpopl\n cntr = 0\n for l in gmod.indxpopl:\n for nameparagenrelem in gmod.namepara.genrelem[l]:\n setattr(gdat.fitt.inxparagenrscalelemkind, nameparagenrelem + 'pop%d' % l, gdat.numbstdp)\n cntr += 1\n gdat.numbstdp += cntr\n \n gdat.lablstdp = np.copy(np.array(gmod.labltotlpara.genrbase[gmod.numbpopl:]))\n gdat.namestdp = np.copy(np.array(gmod.nameparagenrbase[gmod.numbpopl:]))\n for l in gmod.indxpopl:\n for nameparagenrelem in gmod.namepara.genrelem[l]:\n gdat.lablstdp = np.append(gdat.lablstdp, getattr(gdat.fitt.labltotlpara, nameparagenrelem))\n gdat.namestdp = np.append(gdat.namestdp, nameparagenrelem + 'pop%d' % l)\n gdat.namestdp = gdat.namestdp.astype(object)\n gdat.lablstdp = list(gdat.lablstdp)\n gdat.indxstdp = np.arange(gdat.numbstdp)\n gdat.indxstdpprop = gdat.indxstdp\n \n # proposal scale indices for each parameter\n indxelemfull = [list(range(gmod.maxmpara.numbelem[l])) for l in gmod.indxpopl]\n gdat.fitt.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, indxelemfull, 'fitt')\n \n gdat.indxstdppara = np.zeros(gmod.numbparagenrfull, dtype=int) - 1\n cntr = 0\n gdat.indxstdppara[gmod.numbpopl:gmod.numbparagenrbase] = gmod.indxparagenrbase[gmod.numbpopl:] - gmod.numbpopl\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):\n for indx in gdat.fitt.this.indxparagenrfullelem[l][nameparagenrelem]:\n gdat.indxstdppara[indx] = cntr + gmod.numbparagenrbase - gmod.numbpopl\n cntr += 1\n \n # for the fitting model, define proposal type indices\n for name, valu in gmod.indxpara.__dict__.items():\n if not name.startswith('numbelem') and name != 'dist':\n if not isinstance(valu, int):\n continue\n indxstdp = gdat.indxstdppara[valu]\n setattr(gdat, 'indxstdp' + name, indxstdp)\n \n # for each parameter in the fitting model, determine if there is a corresponding parameter in the generative model\n gmod.corr = tdpy.gdatstrt()\n for k in gmod.indxvarbscal:\n name = gmod.namepara.scal[k]\n try:\n temp = getattr(gdat.true.this, name)\n except:\n temp = None\n setattr(gmod.corr, name, temp)\n\n gmod.corrparagenrscalbase = np.empty(gmod.numbparagenrbase)\n for k in gmod.indxparagenrbase:\n try:\n gmod.corrparagenrscalbase[k] = getattr(gdat.true, gmod.nameparagenrbase[k])\n except:\n gmod.corrparagenrscalbase[k] = None\n\n for namepara in gdat.fitt.listnameparaglob:\n setattr(gdat.labltotlpara, namepara, getattr(gdat.fitt.labltotlpara, namepara))\n\n # set parameter features common between true and fitting models\n for strgmodl in gdat.liststrgmodl:\n \n gmod = getattr(gdat, strgmodl)\n \n for namepara in gmod.namepara.kind:\n try:\n getattr(gdat.minmpara, namepara)\n getattr(gdat.maxmpara, namepara)\n except:\n try:\n setattr(gdat.minmpara, namepara, min(getattr(gdat.fitt.minmpara, namepara), getattr(gdat.true.minmpara, namepara)))\n setattr(gdat.maxmpara, namepara, max(getattr(gdat.fitt.maxmpara, namepara), getattr(gdat.true.maxmpara, namepara)))\n except:\n try:\n setattr(gdat.minmpara, namepara, getattr(gdat.fitt.minmpara, namepara))\n setattr(gdat.maxmpara, namepara, getattr(gdat.fitt.maxmpara, namepara))\n except:\n setattr(gdat.minmpara, namepara, getattr(gdat.true.minmpara, namepara))\n setattr(gdat.maxmpara, namepara, getattr(gdat.true.minmpara, namepara))\n \n # set plot limits for each model if not already set (for Gaussian, log-normal distributions)\n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n \n for namepara in gmod.namepara.kind:\n minm = getattr(gmod.minmpara, namepara)\n maxm = getattr(gmod.maxmpara, namepara)\n limt = np.array([minm, maxm])\n setattr(gmod.limtpara, namepara, limt)\n \n # construct bins for scalar parameters\n for namevarbscal in gmod.namepara.scal:\n \n # variables with only label and scaling\n if namevarbscal == 'lliktotl' or namevarbscal == 'lpripena':\n continue\n\n print('temp -- place here setp_varb for all variables')\n #retr_axis(gdat, namevarbscal)\n \n gmod = gdat.fitt\n # proposal scale\n if gmod.boollens or gdat.typedata == 'mock':\n \n gdat.stdp = 1e-4 + np.zeros(gdat.numbstdp)\n \n if gmod.typemodltran == 'pois' and gmod.numbpopl > 0:\n if gmod.maxmpara.numbelem[0] > 0:\n gdat.stdp[gdat.indxstdpmeanelempop0] = 1e-1\n \n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sigcen00evt0]] = 3e-2\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.bacpback0000en00]] = 1e-3\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.bacpback0000en00]] = 1e-1\n \n if gmod.boollens:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.lgalsour]] = 1e-3\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.bgalsour]] = 1e-3\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.fluxsour]] = 1e-2\n if gdat.numbener > 1:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sindsour]] = 1e-3\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sizesour]] = 1e-1\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.ellpsour]] = 1e-1\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.anglsour]] = 1e-1\n if gmod.typeemishost != 'none':\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.lgalhostisf0]] = 3e-4\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.bgalhostisf0]] = 3e-4\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.fluxhostisf0]] = 1e-3\n if gdat.numbener > 1:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sindhostisf0]] = 1e-3\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sizehostisf0]] = 3e-3\n \n if gmod.boollens:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.beinhostisf0]] = 1e-3\n if gmod.typeemishost != 'none':\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.ellphostisf0]] = 1e-2\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.anglhostisf0]] = 1e-2\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.serihostisf0]] = 1e-2\n if gmod.boollens:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sherextr]] = 1e-1\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sangextr]] = 3e-2\n \n else:\n \n if gdat.typeexpr == 'ferm':\n gdat.stdp = 1e-2 + np.zeros(gdat.numbstdp)\n \n if gmod.typemodltran == 'pois' and gmod.numbparaelem > 0:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.meanelem]] = 4e-2\n \n for l in gmod.indxpopl:\n if gmod.typeprioflux[l] == 'powr':\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sloppriofluxpop0]] = 1e-1\n else:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.brekpriofluxpop0]] = 1e-1\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sloplowrpriofluxpop0]] = 1e-1\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.slopupprpriofluxpop0]] = 1e-1\n \n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en00')]] = 5e-3\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en01')]] = 1e-2\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en02')]] = 3e-2\n \n if 'fdfm' in gmod.listnameback: \n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0001en00')]] = 8e-4\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0001en01')]] = 1e-3\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0001en02')]] = 2e-3\n \n if 'dark' in gmod.listnameback: \n gmod.indxbackdark = gmod.listnameback.index('dark')\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback%04den00' % gmod.indxbackdark)]] = 2e-2\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback%04den01' % gmod.indxbackdark)]] = 2e-2\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback%04den02' % gmod.indxbackdark)]] = 3e-2\n \n if gmod.numbparaelem > 0:\n gdat.stdp[gdat.indxstdppop0flux] = 8e-2\n\n if gmod.spectype[0] == 'colr':\n gdat.stdp[gdat.indxstdppop0sindcolr0001] = 8e-2\n gdat.stdp[gdat.indxstdppop0sindcolr0002] = 2e-1\n \n if gdat.typeexpr == 'chan':\n gdat.stdp = 1e-2 + np.zeros(gdat.numbstdp)\n \n if gmod.typemodltran == 'pois' and gmod.numbparaelem > 0:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.meanelem]] = 2e-1\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.sloppriofluxpop0]] = 2e-1\n \n if gmod.numbparaelem > 0 and gdat.boolbinsspat:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.psfp]] = 4e-1\n \n if gdat.indxenerincl.size == 5 and (gdat.indxenerincl == np.arange(5)).all():\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en00')]] = 2e-2\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en01')]] = 3e-2\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en02')]] = 2e-2\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en03')]] = 2e-2\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en04')]] = 1e-2\n elif gdat.indxenerincl.size == 2 and (gdat.indxenerincl == np.array([2])).all():\n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en00')]] = 2e-2\n \n if gmod.numbparaelem > 0:\n if gdat.boolbinsspat:\n gdat.stdp[gdat.fitt.inxparagenrscalelemkind.lgalpop0] = 2e-2\n gdat.stdp[gdat.fitt.inxparagenrscalelemkind.bgalpop0] = 2e-2\n if gdat.numbener > 1:\n gdat.stdp[gdat.indxstdppop0sind] = 2e-1\n gdat.stdp[gdat.indxstdppop0flux] = 2e-1\n \n if gdat.typeexpr == 'gene':\n gdat.stdp = 1e-2 + np.zeros(gdat.numbstdp)\n \n if gmod.typemodltran == 'pois' and gmod.numbparaelem > 0:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.meanelem]] = 2e-1\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.slopprionobjpop0]] = 3e-1\n try:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.gwdtsloppop0]] = 3e-1\n except:\n pass\n\n if gmod.typeevalpsfn != 'none' and gdat.boolmodipsfn:\n gdat.stdp[gdat.indxstdppara[gmod.indxpara.psfp]] = 4e-1\n \n gdat.stdp[gdat.indxstdppara[getattr(gmod.indxpara, 'bacpback0000en00')]] = 2e-2\n \n if gmod.numbparaelem > 0:\n gdat.stdp[gdat.fitt.inxparagenrscalelemkind.lgalpop0] = 4e-2\n gdat.stdp[gdat.fitt.inxparagenrscalelemkind.bgalpop0] = 4e-2\n gdat.stdp[gdat.fitt.inxparagenrscalelemkind.nobjpop0] = 3e-1\n try:\n gdat.stdp[gdat.indxstdppop0gwdt] = 5e-1\n except:\n pass\n\n if gdat.typeexpr == 'fire':\n gdat.stdp = 1e-2 + np.zeros(gdat.numbstdp)\n \n if gdat.boolsqzeprop:\n gdat.stdp[:]= 1e-100\n \n if gdat.boolexplprop:\n gdat.stdp[:] = 1e100\n\n if (gdat.stdp > 1e100).any():\n raise Exception('')\n \n if (gdat.stdp == 0).any():\n raise Exception('')\n \n if gdat.stdp.size != gdat.numbstdp or gdat.indxstdp.size != gdat.stdp.size:\n print('gdat.stdp')\n summgene(gdat.stdp)\n print('gdat.numbstdp')\n print(gdat.numbstdp)\n print('gdat.indxstdp')\n print(gdat.indxstdp)\n raise Exception('')\n\n if gdat.typeverb > 1:\n # temp\n for strgmodl in gdat.liststrgmodl:\n print('strgmodl')\n print(strgmodl)\n print('Fixed dimensional parameters:')\n print('%20s%25s%5s%20s%20s' % ('name', 'labltotl', 'scal', 'minm', 'maxm'))\n for k in gmod.indxparagenrbase:\n print('%20s%25s%5s%20.6g%20.6g' % (gmod.nameparagenrbase[k], gmod.labltotlpara.genrbase[k], gmod.scalpara.genrbase[k], \\\n gmod.minmpara.genrbase[k], gmod.maxmpara.genrbase[k]))\n \n print('Element parameters')\n print('%20s%20s' % ('nameparagenrelem', 'scalcomp'))\n for l in gmod.indxpopl:\n for nameparagenrelem, scalcomp in zip(gmod.namepara.genrelem[l], gmod.listscalparagenrelem[l]):\n print('%20s%20s' % (nameparagenrelem, scalcomp))\n \n print('%20s%20s' % ('strgmodu', 'pdfnmodu'))\n for l in gmod.indxpopl:\n for strgmodu, pdfnmodu in zip(gmod.namepara.genrelemmodu[l], gmod.liststrgpdfnmodu[l]):\n print('%20s%20s' % (strgmodu, pdfnmodu))\n \n print('%20s%20s' % ('strgfeat', 'pdfnprio'))\n for l in gmod.indxpopl:\n for strgfeat, pdfnprio in zip(gmod.namepara.genrelem[l], gmod.listscalparagenrelem[l]):\n print('%20s%20s' % (strgfeat, pdfnprio))\n \n # proposals\n # terms in the log-acceptance probability\n gdat.listnametermlacp = []\n gdat.listlabltermlacp = []\n for l in gmod.indxpopl:\n if gmod.numbpopl > 1:\n strgpopl = '%d,' % l\n else:\n strgpopl = ''\n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):\n labl = getattr(gmod.lablrootpara, nameparagenrelem)\n gdat.listlabltermlacp += ['$u_{%s%s}$' % (strgpopl, labl)]\n gdat.listnametermlacp += ['ltrp']\n gdat.listlabltermlacp += [u'$\\ln P(q)$']\n gdat.listnametermlacp += ['ljcb']\n gdat.listlabltermlacp += [r'$\\ln \\alpha_j$']\n \n gdat.numbtermlacp = len(gdat.listnametermlacp)\n gdat.indxtermlacp = np.arange(gdat.numbtermlacp)\n \n if gdat.probtran is None:\n if gmod.numbparaelem > 0:\n gdat.probtran = 0.4\n else:\n gdat.probtran = 0.\n if gdat.probspmr is None:\n if gmod.numbparaelem > 0:\n gdat.probspmr = gdat.probtran / 2.\n else:\n gdat.probspmr = 0.\n \n gdat.probbrde = 1. - gdat.probspmr\n\n if gdat.probbrde < 0:\n raise Exception('')\n gdat.lablproptype = ['Within']\n gdat.nameproptype = ['with']\n if gmod.numbparaelem > 0:\n gdat.lablproptype += ['Birth', 'Death', 'Split', 'Merge']\n gdat.nameproptype += ['brth', 'deth', 'splt', 'merg']\n gdat.numbproptype = len(gdat.lablproptype)\n gdat.nameproptype = np.array(gdat.nameproptype)\n cntr = tdpy.cntr()\n if gmod.numbparaelem > 0.:\n # birth\n gdat.indxproptypebrth = cntr.incr()\n # death\n gdat.indxproptypedeth = cntr.incr()\n if gdat.probspmr > 0.:\n # split\n gdat.indxproptypesplt = cntr.incr()\n # merge\n gdat.indxproptypemerg = cntr.incr()\n \n gdat.indxproptype = np.arange(gdat.numbproptype)\n gmod.indxpara.prop = np.arange(gmod.numbparagenrbase)\n gdat.numbstdpparagenrscalbase = gmod.numbparagenrbase - gmod.numbpopl\n #### filter for model elements\n gdat.listnamefilt = ['']\n if gdat.priofactdoff != 1.:\n gdat.listnamefilt += ['pars']\n #### model elements inside the image\n if gdat.boolelempsfnanyy:\n gdat.listnamefilt += ['bndr']\n #### model subhalos inside high normalized relevance region\n if 'lens' in gdat.typeelem:\n gdat.listnamefilt += ['nrel']\n \n if gdat.typedata == 'inpt':\n proc_cntpdata(gdat)\n \n # interpolated prior for models\n for strgmodl in gdat.liststrgmodl:\n \n gmod = getattr(gdat, strgmodl)\n \n lpdfprio = [None for l in gmod.indxpopl]\n lpdfprioobjt = [None for l in gmod.indxpopl]\n lpdfpriointp = [None for l in gmod.indxpopl]\n for l in gmod.indxpopl:\n for strgfeat, strgpdfn in zip(gmod.namepara.genrelem, gmod.listscalparagenrelem):\n if strgpdfn == 'tmplgrad':\n pdfnpriotemp = np.empty((gdat.numbsidecart + 1, gdat.numbsidecart + 1))\n lpdfprio, lpdfprioobjt = retr_spatprio(gdat, pdfnpriotemp)\n lpdfpriointp = lpdfprioobjt(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart)\n \n gdat.indxpoplcrin = 0\n if gmod.numbparaelem > 0:\n if gdat.rtagmock is not None:\n path = gdat.pathoutprtagmock + 'gdatfinlpost'\n gdatmock = readfile(path)\n gdat.liststrgvarbhist = []\n cntr = 0\n for l0 in gmod.indxpopl:\n for a, strgfeatfrst in enumerate(gmod.namepara.genrelem[l0]):\n if strgfeatfrst == 'spec':\n continue\n gdat.liststrgvarbhist.append([[] for k in range(5)])\n gdat.liststrgvarbhist[cntr][0] = 'hist' + strgfeatfrst + 'pop%d' % l\n gdat.liststrgvarbhist[cntr][1] = strgfeatfrst\n if gdat.rtagmock is not None:\n # cmpl\n gdat.liststrgvarbhist[cntr][3] = [[] for qq in gdatmock.indxrefr]\n # fdis\n gdat.liststrgvarbhist[cntr][4] = [[] for qq in gdatmock.indxrefr]\n booltemp = True\n if strgfeatfrst[-4:] in gdat.listnamerefr:\n q = gdat.listnamerefr.index(strgfeatfrst[-4:])\n booltemp = not strgfeatfrst in gdat.refr.namepara.elemonly[q][l]\n if booltemp:\n gdat.liststrgvarbhist[cntr][3][qq] = strgfeatfrst + 'pop%dpop%d' % (l, qq)\n gdat.liststrgvarbhist[cntr][4][qq] = strgfeatfrst + 'pop%dpop%d' % (qq, l)\n cntr += 1 \n for b, strgfeatseco in enumerate(gmod.namepara.genrelem[l0]):\n \n if strgfeatseco == 'spec':\n continue\n\n if not checstrgfeat(strgfeatfrst, strgfeatseco):\n continue\n \n gdat.liststrgvarbhist.append([[] for k in range(5)])\n gdat.liststrgvarbhist[cntr][0] = 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0\n gdat.liststrgvarbhist[cntr][1] = strgfeatfrst\n gdat.liststrgvarbhist[cntr][2] = strgfeatseco\n gdat.liststrgvarbhist[cntr][3] = [[] for qq in gdat.indxrefr]\n gdat.liststrgvarbhist[cntr][4] = [[] for qq in gdat.indxrefr]\n if gdat.rtagmock is not None:\n booltempfrst = True\n booltempseco = True\n if strgfeatfrst[-4:] in gdat.listnamerefr:\n q = gdat.listnamerefr.index(strgfeatfrst[-4:])\n booltempfrst = not strgfeatfrst in gdat.refr.namepara.elemonly[q][l]\n if strgfeatseco[-4:] in gdat.listnamerefr:\n q = gdat.listnamerefr.index(strgfeatseco[-4:])\n booltempseco = not strgfeatseco in gdat.refr.namepara.elemonly[q][l]\n for qq in gdatmock.indxrefr:\n if booltempfrst and booltempseco:\n gdat.liststrgvarbhist[cntr][3][qq] = strgfeatfrst + strgfeatseco + 'pop%dpop%d' % (l0, qq)\n gdat.liststrgvarbhist[cntr][4][qq] = strgfeatfrst + strgfeatseco + 'pop%dpop%d' % (qq, l0)\n elif booltempfrst:\n gdat.liststrgvarbhist[cntr][3][qq] = strgfeatfrst + 'pop%dpop%d' % (l0, qq)\n gdat.liststrgvarbhist[cntr][4][qq] = strgfeatfrst + 'pop%dpop%d' % (qq, l0)\n elif booltempseco:\n gdat.liststrgvarbhist[cntr][3][qq] = strgfeatseco + 'pop%dpop%d' % (l0, qq)\n gdat.liststrgvarbhist[cntr][4][qq] = strgfeatseco + 'pop%dpop%d' % (qq, l0)\n cntr += 1 \n \n # selection effects\n if gdat.typedata == 'inpt' and gmod.numbparaelem > 0:\n if gdat.numbsampboot is None:\n gdat.numbsampboot = gdat.numbsamp\n \n gdat.boolcrex = False\n if gdat.rtagmock is not None:\n for qq in gdatmock.indxrefr:\n for q in gdat.indxrefr:\n for l in gmod.indxpopl:\n for strgfeatfrst in gmod.namepara.genrelem[l]:\n \n if gdat.typeexpr == 'chan' and strgfeatfrst == 'redswo08':\n crex = (1. + gdat.meanpara.redswo08)**2\n else:\n crex = None\n \n setattr(gdat, 'crex' + strgfeatfrst + 'pop%dpop%dpop%d' % (q, qq, l), crex)\n \n for strgfeatseco in gmod.namepara.genrelem[l]:\n \n if not checstrgfeat(strgfeatfrst, strgfeatseco):\n continue\n \n if gdat.typeexpr == 'chan' and (strgfeatfrst == 'redswo08' or strgfeatseco == 'redswo08'):\n crex = np.empty((gdat.numbbinsplot, gdat.numbbinsplot))\n if strgfeatfrst == 'redswo08':\n crex[:, :] = (1. + gdat.meanpara.redswo08[:, None])**2\n else:\n crex[:, :] = (1. + gdat.meanpara.redswo08[None, :])**2\n else:\n crex = None\n \n setattr(gdat, 'crex' + strgfeatfrst + strgfeatseco + 'pop%dpop%dpop%d' % (q, qq, l), crex)\n \n if gdat.refr.numbelemtotl > 0:\n for listtemp in gdat.liststrgvarbhist:\n strgvarb = listtemp[0]\n for qq in gdatmock.indxrefr:\n for q in gdat.indxrefr:\n nametemp = listtemp[1]\n if len(listtemp[2]) > 0:\n nametemp += listtemp[2]\n l = int(listtemp[4][qq].split('pop')[2][0])\n nametemp += 'pop%dpop%dpop%d' % (q, qq, l)\n crexhist = getattr(gdat, 'crex' + nametemp)\n if crexhist is not None:\n gdat.boolcrex = True\n \n ## internal correction\n gdat.boolcrin = gdat.typedata == 'inpt' and gdat.rtagmock is not None\n \n if gmod.numbparaelem > 0:\n # variables for which two dimensional functions will be plotted\n gdat.liststrgelemtdimvarbinit = ['hist']\n gdat.liststrgelemtdimvarbfram = deepcopy(gdat.liststrgelemtdimvarbinit)\n if gdat.boolinforefr:\n gdat.liststrgelemtdimvarbfram += ['cmpl', 'fdis']\n gdat.liststrgelemtdimvarbfinl = deepcopy(gdat.liststrgelemtdimvarbfram)\n if gdat.typedata == 'inpt':\n if gdat.boolcrex:\n gdat.liststrgelemtdimvarbfinl += ['excr']\n if gdat.boolcrin:\n gdat.liststrgelemtdimvarbfinl += ['incr']\n gdat.liststrgelemtdimvarbanim = deepcopy(gdat.liststrgelemtdimvarbfram)\n \n gdat.liststrgfoldinit = ['']\n if gmod.numbparaelem > 0 or gdat.typedata == 'mock' and gmod.numbparaelem > 0:\n gdat.liststrgfoldinit += ['', 'histodim/', 'histtdim/', 'scattdim/', 'cmpltdim/']\n gdat.liststrgfoldfram = ['']\n if gmod.numbparaelem > 0:\n gdat.liststrgfoldfram += ['scattdim/']\n gdat.liststrgfoldfinl = ['']\n if gdat.boolinforefr and gmod.numbparaelem > 0:\n gdat.liststrgfoldfram += ['assc']\n gdat.liststrgfoldfinl += ['assc']\n gdat.liststrgfoldanim = deepcopy(gdat.liststrgfoldfram)\n\n if gmod.numbparaelem > 0:\n for strgdims in ['odim/', 'tdim/']:\n for strgelemtdimvarb in gdat.liststrgelemtdimvarbfram:\n gdat.liststrgfoldfram += [strgelemtdimvarb + strgdims]\n for strgelemtdimvarb in gdat.liststrgelemtdimvarbfinl:\n gdat.liststrgfoldfinl += [strgelemtdimvarb + strgdims]\n\n # make folders\n #gdat.pathprio = gdat.pathplotrtag + 'prio/'\n #gdat.pathpost = gdat.pathplotrtag + 'post/'\n make_fold(gdat)\n\n setp_indxswepsave(gdat)\n \n if gdat.typeopti == 'hess':\n pathopti = gdat.pathoutprtag + 'opti.h5'\n if os.path.exists(pathopti):\n thisfile = h5py.File(pathopti, 'r')\n if thisfile['stdp'][()].size == gdat.stdp.size:\n print('Recovering the proposal scale from the previous run...')\n gdat.stdp = thisfile['stdp'][()]\n thisfile.close()\n\n if gdat.rtagmock is not None:\n if gdat.typedata == 'inpt':\n path = gdat.pathoutprtagmock + 'gdatfinlpost'\n booltemp = True\n try:\n gdatmock = readfile(path)\n except:\n booltemp = False\n gdat.rtagmock = None\n\n if booltemp:\n numbparaelem = gdatmock.true.numbparaelem\n if gdatmock.trueindxpopl != gmod.indxpopl:\n raise Exception('')\n for l in gmod.indxpopl:\n for strgfeat in gmod.namepara.genrelem[l]:\n if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':\n continue\n\n if strgfeat[-4:] in gdat.listnamerefr:\n continue\n reca = getattr(gdatmock.true, 'reca' + strgfeat + 'pop%d' % l)\n setattr(gdat.true, 'reca' + strgfeat + 'pop%d' % l, reca)\n gmod.namepara.genrelem = gdatmock.truegmod.namepara.genrelem\n \n if gmod.typeelemspateval[l] == 'locl' and gmod.numbparaelem > 0 or \\\n gdat.typedata == 'mock' and gmod.typeelemspateval[l] == 'locl' and gmod.numbparaelem > 0:\n gdat.numbprox = 3\n gdat.indxprox = np.arange(gdat.numbprox)\n minmparagenrscalelemampl = getattr(gdat.fitt.minmpara, gmod.nameparagenrelemampl[0])\n maxmparagenrscalelemampl = getattr(gdat.fitt.maxmpara, gmod.nameparagenrelemampl[0])\n gdat.binspara.prox = np.logspace(np.log10(minmparagenrscalelemampl), np.log10(maxmparagenrscalelemampl), gdat.numbprox + 1)\n \n # determine the maximum angle at which the contribution of the element will be computed\n if gdat.boolbinsspat:\n if gdat.maxmangleval is None:\n if gdat.typeexpr == 'chan':\n gdat.maxmangleval = np.array([5., 6., 9.]) / gdat.anglfact\n elif gdat.typeexpr == 'gene':\n gdat.maxmangleval = np.array([0.1, 0.2, 0.3]) / gdat.anglfact\n elif gdat.typeexpr == 'ferm':\n gdat.maxmangleval = np.array([7., 9., 15.]) / gdat.anglfact\n else:\n gdat.maxmangleval = np.empty(gdat.numbprox)\n for h in gdat.indxprox:\n if gdat.specfraceval == 0:\n gdat.maxmangleval[h] = 3. * gdat.maxmgang\n else: \n frac = min(1e-2, gdat.specfraceval * gdat.binspara.prox[0] / gdat.binspara.prox[h+1])\n psfnwdth = retr_psfnwdth(gdat, gmodstat.psfn, frac)\n gdat.indxmaxmangl = np.unravel_index(np.argmax(psfnwdth), psfnwdth.shape)\n gdat.maxmangleval[h] = psfnwdth[gdat.indxmaxmangl]\n \n if gdat.typeverb > 1:\n if gmod.typeelemspateval == 'locl':\n print('maxmangleval')\n print(gdat.anglfact * gdat.maxmangleval[l], ' [%s]' % gdat.strganglunit)\n\n setp_varb(gdat, 'angl', minm=0., maxm=10.)\n if gdat.boolelempsfnanyy and gdat.maxmpara.angl < np.amax(gdat.maxmangleval):\n print('gdat.maxmpara.angl')\n print(gdat.maxmpara.angl)\n print('gdat.maxmangleval')\n print(gdat.maxmangleval)\n \n raise Exception('Angular axis is too short.')\n\n # make a look-up table of nearby pixels for each pixel\n path = gdat.pathprox + 'indxprox_%08d_%s_%0.4g_%0.4g_%04d.p' % (gdat.numbpixl, gdat.typepixl, 1e2 * np.amin(gdat.maxmangleval), \\\n 1e2 * np.amax(gdat.maxmangleval), gdat.numbprox)\n \n if gdat.typeverb > 1:\n print('gdat.typepixl')\n print(gdat.typepixl)\n print('gdat.minmlgaldata')\n print(gdat.minmlgaldata)\n print('gdat.minmbgaldata')\n print(gdat.minmbgaldata)\n print('gdat.maxmlgaldata')\n print(gdat.maxmlgaldata)\n print('gdat.maxmbgaldata')\n print(gdat.maxmbgaldata)\n if gdat.typeverb > 0:\n print('Element evaluation will be performed up to')\n if gdat.boolbinsspat:\n print(gdat.maxmangleval * gdat.anglfact)\n\n if os.path.isfile(path):\n if gdat.typeverb > 0:\n print('Previously computed nearby pixel look-up table will be used.')\n print('Reading %s...' % path)\n fobj = open(path, 'rb')\n gdat.indxpixlprox = pickle.load(fobj)\n fobj.close()\n else:\n if gdat.typeverb > 0:\n print('Computing the look-up table...')\n gdat.indxpixlprox = [[] for h in gdat.indxprox]\n cntrsave = -1.\n # temp\n for j in gdat.indxpixl:\n dist = retr_angldistunit(gdat, gdat.lgalgrid[j], gdat.bgalgrid[j], gdat.indxpixl)\n dist[j] = 0.\n for h in gdat.indxprox:\n indxpixlproxtemp = np.where(dist < gdat.maxmangleval[h])[0]\n if indxpixlproxtemp.size > 2e4:\n indxpixlproxtemp = -1\n if gdat.maxmangl < np.sqrt(2.) * gdat.maxmgangdata:\n raise Exception('Angular axis used to interpolate the PSF should be longer.')\n \n if indxpixlproxtemp.size < 10:\n raise Exception('Pixel hash list should not have fewer than 10 pixels.')\n\n gdat.indxpixlprox[h].append(indxpixlproxtemp)\n cntrsave = tdpy.show_prog(j, gdat.numbpixl, cntrsave)\n fobj = open(path, 'wb')\n pickle.dump(gdat.indxpixlprox, fobj, protocol=pickle.HIGHEST_PROTOCOL)\n fobj.close()\n \n gdat.numbpixlprox = np.zeros(gdat.numbprox) \n for h in gdat.indxprox:\n for j in gdat.indxpixl:\n gdat.numbpixlprox[h] += len(gdat.indxpixlprox[h][j])\n gdat.numbpixlprox[h] /= len(gdat.indxpixlprox[h])\n \n if (gdat.numbpixlprox - np.mean(gdat.numbpixlprox) == 0.).all():\n raise Exception('Number of pixels in the hash lists should be different.')\n\n gdat.minmgang = 1e-3 * np.sqrt(2.) * gdat.maxmgangdata\n gdat.maxmgang = np.sqrt(2.) * gdat.maxmgangdata\n \n # try to pass true metamodel minima and maxima to common minima and maxima when that feature does not exist in the fitting metamodel\n if gdat.typedata == 'mock':\n for q in gdat.indxrefr:\n for strgfeat in gmod.namepara.genrelem[q]:\n booltemp = False\n for l in gmod.indxpopl:\n if strgfeat in gmod.namepara.genrelem[l]:\n booltemp = True\n if not booltemp:\n try:\n setattr(gdat.minmpara, 'minm' + strgfeat + gdat.listnamerefr[q], getattr(gdat.true.minm, strgfeat))\n setattr(gdat.maxmpara, 'maxm' + strgfeat + gdat.listnamerefr[q], getattr(gdat.true.maxm, strgfeat))\n except:\n pass\n\n ## reference spectra\n if gdat.listprefsbrtlabltotl is None:\n if gdat.typeexpr == 'chan' and gdat.boolbinsspat:\n gdat.listprefsbrtener = [[[] for k in range(3)]]\n gdat.listprefsbrtsbrt = [[[] for k in range(3)]]\n gdat.listprefsbrtlabltotl = ['Moretti+(2012)']\n gdat.listprefsbrttype = ['shad']\n \n for k, strgextn in enumerate(['', '_lower', '_higher']):\n path = gdat.pathinpt + 'Moretti2012%s.csv' % strgextn\n enerrefrplot = np.loadtxt(path, delimiter=',')[:, 0]\n sbrtrefrplot = np.loadtxt(path, delimiter=',')[:, 1] / gdat.factergskevv / enerrefrplot**2 * (180. / np.pi)**2\n gdat.listprefsbrtener[0][k] = enerrefrplot\n gdat.listprefsbrtsbrt[0][k] = sbrtrefrplot\n\n # temp\n if gdat.numbener > 1:\n if gdat.enerpivt == 0.:\n raise Exception('Pivot energy cannot be zero.')\n #if gdat.typeexpr != 'fire':\n # gdat.enerexpcfact = gdat.enerpivt - gdat.meanpara.ener\n #if gmod.numbparaelem > 0 and gdat.numbener > 1:\n # minmsinddistmeanpop0 = getattr(gmod, 'minmsinddistmeanpop0')\n # factspecener = (gdat.meanpara.ener / gdat.enerpivt)**(-np.sqrt(np.amin(minmsinddistmeanpop0) * np.amax(maxmsinddistmeanpop0)))\n else:\n pass\n #gdat.factspecener = np.array([1.])\n\n # temp -- this assumes square ROI\n if gdat.boolbinsspat:\n gdat.frambndrmodl = gdat.maxmlgaldata * gdat.anglfact\n \n if gmod.boollenshost or gdat.typedata == 'mock' and gmod.boollenshost:\n \n if gdat.typesers == 'intp':\n # construct pixel-convolved Sersic surface brightness template\n gdat.factsersusam = 10\n maxmlgal = 4. * np.sqrt(2.) * gdat.maxmlgal\n gdat.numblgalsers = int(np.ceil(maxmlgal / gdat.sizepixl))\n gdat.numblgalsersusam = (1 + gdat.numblgalsers) * gdat.factsersusam\n retr_axis(gdat, 'lgalsers')\n retr_axis(gdat, 'lgalsersusam')\n retr_axis(gdat, 'bgalsersusam')\n \n gdat.numbhalfsers = 20\n gdat.numbindxsers = 20\n \n retr_axis(gdat, 'halfsers')\n retr_axis(gdat, 'indxsers')\n \n gdat.binspara.lgalsersusammesh, gdat.binspara.bgalsersusammesh = np.meshgrid(gdat.binspara.lgalsersusam, gdat.binspara.bgalsersusam, indexing='ij')\n gdat.binspara.radisersusam = np.sqrt(gdat.binspara.lgalsersusammesh**2 + gdat.binspara.bgalsersusammesh**2)\n \n gdat.sersprofcntr = np.empty((gdat.numblgalsers + 1, gdat.numbhalfsers + 1, gdat.numbindxsers + 1))\n gdat.sersprof = np.empty((gdat.numblgalsers + 1, gdat.numbhalfsers + 1, gdat.numbindxsers + 1))\n \n for n in range(gdat.numbindxsers + 1):\n for k in range(gdat.numbhalfsers + 1):\n \n profusam = retr_sbrtsersnorm(gdat.binspara.radisersusam, gdat.binspara.halfsers[k], indxsers=gdat.binspara.indxsers[n])\n \n ## take the pixel average\n indxbgallowr = gdat.factsersusam * (gdat.numblgalsers + 1) / 2\n indxbgaluppr = gdat.factsersusam * (gdat.numblgalsers + 3) / 2\n for a in range(gdat.numblgalsers):\n indxlgallowr = gdat.factsersusam * a\n indxlgaluppr = gdat.factsersusam * (a + 1) + 1\n gdat.sersprofcntr[a, k, n] = profusam[(indxlgallowr+indxlgaluppr)/2, 0]\n gdat.sersprof[a, k, n] = np.mean(profusam[indxlgallowr:indxlgaluppr, :])\n \n temp, indx = unique(gdat.binspara.lgalsers, return_index=True)\n gdat.binspara.lgalsers = gdat.binspara.lgalsers[indx]\n gdat.sersprof = gdat.sersprof[indx, :, :]\n gdat.sersprofcntr = gdat.sersprofcntr[indx, :, :]\n \n indx = np.argsort(gdat.binspara.lgalsers)\n gdat.binspara.lgalsers = gdat.binspara.lgalsers[indx]\n gdat.sersprof = gdat.sersprof[indx, :, :]\n gdat.sersprofcntr = gdat.sersprofcntr[indx, :, :]\n\n #for strg, valu in gmod.cmappara.__dict__.items():\n # retr_ticklabl(gdat, strg)\n \n # generate true data\n if gdat.typedata == 'mock':\n \n if gdat.typeverb > 0:\n print('Generating mock data...')\n\n if gdat.seedtype == 'rand':\n np.random.seed()\n else:\n if gdat.typeverb > 0:\n print('Setting the seed for the RNG to %d...' % gdat.seedtype)\n np.random.seed(gdat.seedtype)\n \n ## unit sample vector\n gdat.true.this.paragenrunitfull = np.random.rand(gdat.true.numbparagenrfull)\n gdat.true.this.paragenrscalfull = np.zeros(gdat.true.numbparagenrfull)\n \n if gdat.true.numbparaelem > 0:\n gdat.true.this.numbelempopl = np.empty(gdat.true.maxmpara.numbelem[l], dtype=int)\n for l in gdat.true.indxpopl:\n gdat.true.this.paragenrunitfull[gdat.true.indxpara.numbelem[l]] = getattr(gdat.true.this, 'numbelempop%d' % l)\n gdat.true.this.numbelempopl[l] = getattr(gdat.true.this, 'numbelempop%d' % l)\n\n gdat.true.this.indxelemfull = [[] for l in gdat.true.indxpopl]\n for l in gdat.true.indxpopl:\n gdat.true.this.indxelemfull[l] = list(range(gdat.true.numbelem[l]))\n gdat.true.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.true.this.indxelemfull, 'true')\n else:\n gdat.true.this.indxelemfull = []\n gdat.true.this.indxparagenrfullelem = None\n\n if gdat.true.numbparaelem > 0:\n if gdat.seedelem is None:\n np.random.seed()\n else:\n np.random.seed(gdat.seedelem)\n gdat.true.this.paragenrunitfull[gdat.true.numbparagenrbase:] = np.random.rand(gdat.true.numbparagenrelemtotl)\n \n gdat.true.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'true', gdat.true.this.paragenrunitfull, gdat.true.this.indxparagenrfullelem)\n \n # impose true values (valu)\n for k in gdat.true.indxparagenr:\n \n if gdat.true.numbparaelem > 0 and (k in gdat.true.indxpara.numbelem or \\\n gdat.true.typemodltran == 'pois' and k in gdat.true.indxpara.meanelem):\n continue\n \n # assume the true PSF\n if gdat.true.typeevalpsfn != 'none' and gdat.numbpixl > 1 and k in gdat.true.indxpara.psfp:\n gdat.true.this.paragenrscalfull[k] = gdat.true.psfpexpr[k-gdat.true.indxpara.psfp[0]]\n else:\n ## read input mock model parameters\n try:\n # impose user-defined true parameter\n gdat.true.this.paragenrscalfull[k] = getattr(gdat.true, gdat.true.namepara.genrscalfull[k])\n except:\n pass\n \n if gdat.typeverb > 0:\n show_paragenrscalfull(gdat, None, strgmodl='true')\n\n if gmod.boollenshost:\n proc_samp(gdat, None, 'this', 'true', boolinit=True)\n \n #for strgmodl in gdat.liststrgmodl:\n # gmod = getattr(gdat, strgmodl)\n # print('gmod.minmpara.numbelempop0')\n # print(gmod.minmpara.numbelempop0)\n # print('gmod.minmpara.numbelem')\n # print(gmod.minmpara.numbelem)\n #raise Exception('')\n \n # construct bins for element parameters of the true model\n for strgmodl in ['true']:\n \n gmod = getattr(gdat, strgmodl)\n\n # list of names for element parameters, concatenated across all populations\n for l in gmod.indxpopl:\n if gmod.maxmpara.numbelem[l] > 0:\n # temp -- does not cover the case when different populations have parameters with the same name\n for strgfeat in gmod.listnameparaglob:\n #for strgfeat in gmod.namepara.genrelem[l]:\n if strgfeat[:-4] == 'etag':\n continue\n #retr_axis(gdat, strgfeat)\n #if strgfeat in gmod.namepara.elem:\n # retr_axis(gdat, strgfeat + 'prio')\n \n proc_samp(gdat, None, 'this', 'true', boolinit=True)\n \n # transfer current state of the true model to the reference model \n for strg, valu in gdat.true.this.__dict__.items():\n if strg == 'dictelem':\n # modify the current state of the element parameters of the true model to include uncertainty\n valutemp = [[] for l in gdat.true.indxpopl]\n for l in gdat.true.indxpopl:\n valutemp[l] = dict()\n for nameparaelem in gdat.true.this.dictelem[l]:\n valutemp[l][nameparaelem] = np.zeros((3, gdat.true.this.dictelem[l][nameparaelem].size))\n valutemp[l][nameparaelem][0, :] = gdat.true.this.dictelem[l][nameparaelem]\n else:\n valutemp = valu\n setattr(gdat.refr, strg, valutemp)\n if gdat.makeplot and gdat.makeplotinit:\n plot_samp(gdat, None, 'this', 'true', 'init')\n \n for strgmodl in gdat.liststrgmodl:\n gmod = getattr(gdat, strgmodl)\n print('gmod.minmpara.numbelempop0')\n print(gmod.minmpara.numbelempop0)\n print('gmod.minmpara.numbelem')\n print(gmod.minmpara.numbelem)\n \n ## initialization\n gdat.fitt.this = tdpy.gdatstrt()\n gdat.fitt.next = tdpy.gdatstrt()\n init_stat(gdat)\n \n # process the parameter vector\n proc_samp(gdat, None, 'this', 'fitt', boolinit=True)\n \n #liststrgcbar = ['llikmaps', 'perc', 'percresi', 'expo', 'lpdfspatpriointp', 'conv', 'magn', 'deflcomp', 'resiconvelem', 'resimagn']\n #for strgcbar in liststrgcbar:\n # retr_ticklabl(gdat, strgcbar)\n \n # temp\n #for strgmodl in gdat.liststrgmodl:\n # for namesele in gdat.listnamesele:\n # for namefeat in gdat.listnamefeatsele:\n # for strglimt in gdat.liststrglimt:\n # try:\n # getattr(gdat, strglimt + namefeat + namesele)\n # except:\n # setattr(gdat, strglimt + namefeat + namesele, getattr(gdat, strglimt + namefeat))\n\n # construct bins for element parameters of the fitting model\n #for strgmodl in ['fitt']:\n # \n # gmod = getattr(gdat, strgmodl)\n\n # # list of names for element parameters, concatenated across all populations\n # for l in gmod.indxpopl:\n # if gmod.maxmpara.numbelem[l] > 0:\n # # temp -- does not cover the case when different populations have parameters with the same name\n # for strgfeat in gmod.listnameparaglob:\n # #for strgfeat in gmod.namepara.genrelem[l]:\n # if strgfeat[:-4] == 'etag':\n # continue\n # #retr_axis(gdat, strgfeat)\n # #if strgfeat in gmod.namepara.elem:\n # # retr_axis(gdat, strgfeat + 'prio')\n \n gdat.numbbinspdfn = 50\n \n # scalar variable setup continued\n for strgbins in ['lowr', 'higr']:\n for strgecom in ['dfnc', 'dfncsubt']:\n setattr(gdat, 'scalhistcntp' + strgbins + strgecom + 'en00evt0', 'self')\n setattr(gdat, 'minmhistcntp' + strgbins + strgecom + 'en00evt0', 0.)\n setattr(gdat, 'maxmhistcntp' + strgbins + strgecom + 'en00evt0', gdat.numbpixl)\n setattr(gdat, 'facthistcntp' + strgbins + strgecom + 'en00evt0', 1.)\n for i in gdat.indxener:\n setattr(gdat, 'scalfracsdenmeandarkdfncsubten%02d' % i, 'self')\n setattr(gdat, 'minmfracsdenmeandarkdfncsubten%02d' % i, 0.)\n setattr(gdat, 'maxmfracsdenmeandarkdfncsubten%02d' % i, 1.)\n setattr(gdat, 'factfracsdenmeandarkdfncsubten%02d' % i, 1.)\n \n gmod.scalbooldfncsubt = 'self'\n gdat.minmbooldfncsubt = -0.5\n gdat.maxmbooldfncsubt = 1.5\n gdat.factbooldfncsubt = 1.\n\n #sys.stdout = logg(gdat)\n #gdat.log.close()\n\n # initial plots\n if gdat.makeplot and gdat.makeplotinit:\n plot_init(gdat)\n\n if gdat.typeverb > 0:\n sizetotl = 0.\n for root, dirs, listfile in os.walk(gdat.pathoutp):\n for thisfile in listfile:\n sizetotl += os.path.getsize(root + '/' + thisfile) / 2**30\n if sizetotl > 10.:\n print('Warning: PCAT data path size is %d GB' % sizetotl)\n\n if gdat.typedata == 'inpt':\n \n ## rotate element coordinates to the ROI center\n if gdat.typepixl == 'heal' and (gdat.lgalcntr != 0. or gdat.bgalcntr != 0.):\n for q in gdat.indxrefr:\n for l in gmod.indxpopl:\n rttr = hp.rotator.Rotator(rot=[rad2deg(gdat.lgalcntr), rad2deg(gdat.bgalcntr), 0.], deg=True, eulertype='ZYX')\n gdat.refr.dictelem[q]['bgal'][0, :], gdat.refrlgal[0, :] = rttr(pi / 2. - gdat.refrbgal[0, :], gdat.refrlgal[0, :])\n gdat.refr.dictelem[q]['bgal'][0, :] = pi / 2. - gdat.refrbgal[0, :]\n\n ## assign zero to nonspecified uncertainties for the reference element features\n for q in gdat.indxrefr:\n for strgfeat in gdat.refr.namepara.elem[q]:\n if strgfeat == 'gang' or strgfeat == 'aang':\n continue\n if strgfeat == 'etag':\n continue\n refrfeat = getattr(gdat.refr, strgfeat)\n if refrfeat[q].ndim == 1:\n refrfeat[q] = np.tile(refrfeat[q], (3, 1)) \n \n # temp\n #if gdat.refr.numbelem > 0:\n # gdat.refrfluxbrgt, gdat.refrfluxbrgtassc = retr_fluxbrgt(gdat, gdat.refrlgal, gdat.refrbgal, gdat.refrflux[0, :])\n \n print('gdat.liketype')\n print(gdat.liketype)\n\n print('Data settings')\n print('gdat.numbener')\n print(gdat.numbener)\n print('gdat.numbevtt')\n print(gdat.numbevtt)\n\n print('Model settings')\n print('gdat.fitt.numbpopl')\n print(gdat.fitt.numbpopl)\n print('gdat.fitt.numbparagenrbase')\n print(gdat.fitt.numbparagenrbase)\n \n for strgmodl in gdat.liststrgmodl:\n for l in gmod.indxpopl:\n for strgfeat, strgpdfn in zip(gmod.namepara.genrelemmodu[l], gmod.liststrgpdfnmodu[l]):\n if strgpdfn == 'tmpl':\n if gdat.lgalprio is None or gdat.bgalprio is None:\n gdat.lgalprio = np.concatenate((gmod.lgal))\n gdat.bgalprio = np.concatenate((gmod.bgal))\n gdat.numbspatprio = gdat.lgalprio.size\n \n # spatial template for the catalog prior\n # temp -- this should move outside the if\n gdat.pdfnspatpriotemp = np.zeros((gdat.numbsidecart + 1, gdat.numbsidecart + 1))\n for k in range(gdat.numbspatprio):\n gdat.pdfnspatpriotemp[:] += 1. / np.sqrt(2. * np.pi) / gdat.stdvspatprio * \\\n exp(-0.5 * (gdat.binspara.lgalcartmesh - gdat.lgalprio[k])**2 / gdat.stdvspatprio**2) * \\\n exp(-0.5 * (gdat.binspara.bgalcartmesh - gdat.bgalprio[k])**2 / gdat.stdvspatprio**2)\n gdat.pdfnspatpriotemp /= np.amax(gdat.pdfnspatpriotemp)\n \n if gdat.typedata == 'inpt':\n\n # rotate reference elements to the spatial coordinate system of PCAT\n # temp -- this does not rotate the uncertainties!\n\n if gdat.typeverb > 0:\n print('Rotating the reference elements...')\n for q in gdat.indxrefr:\n # temp -- this should depend on q\n if len(gdat.listpathwcss) > 0:\n listhdun = ap.io.fits.open(gdat.listpathwcss)\n wcso = ap.wcs.WCS(listhdun[0].header)\n skycobjt = ap.coordinates.SkyCoord(\"galactic\", l=gdat.refr.dictelem[q]['lgal'][0, :] * 180. / pi, b=gdat.refr.dictelem[q]['bgal'][0, :] * 180. / pi, unit='deg')\n rasc = skycobjt.fk5.ra.degree\n decl = skycobjt.fk5.dec.degree\n lgal, bgal = wcso.wcs_world2pix(rasc, decl, 0)\n lgal -= gdat.numbpixllgalshft + gdat.numbsidecarthalf\n bgal -= gdat.numbpixlbgalshft + gdat.numbsidecarthalf\n lgal *= gdat.sizepixl\n bgal *= gdat.sizepixl\n gdat.refr.dictelem[q]['lgal'][0, :] = bgal\n gdat.refr.dictelem[q]['bgal'][0, :] = lgal\n\n ## preprocess reference element features\n for q in gdat.indxrefr:\n # temp -- this should depend on q\n # temp -- this does not properly calculate uncertainties\n gdat.refrgang[q] = np.zeros((3, gdat.refr.dictelem[q]['lgal'].shape[1]))\n gdat.refraang[q] = np.zeros((3, gdat.refr.dictelem[q]['lgal'].shape[1]))\n gdat.refrgang[q][:, :] = retr_gang(gdat.refr.dictelem[q]['lgal'][0, :], gdat.refr.dictelem[q]['bgal'][0, :])[None, :]\n gdat.refraang[q][:, :] = retr_aang(gdat.refr.dictelem[q]['lgal'][0, :], gdat.refr.dictelem[q]['bgal'][0, :])[None, :]\n\n # save all reference element features\n for strgfeat in gdat.refr.namepara.elemtotl:\n refrfeattotl = [[] for q in gdat.indxrefr]\n for q in gdat.indxrefr:\n for strgfeat in gdat.refr.namepara.elem[q]:\n refrfeat = getattr(gdat.refr, strgfeat)\n for l in gmod.indxpopl:\n if len(refrfeat[q]) > 0:\n refrfeattotl[q] = refrfeat[q]\n setattr(gdat.refr, strgfeat + 'totl', refrfeattotl)\n \n # find the reference elements inside the ROI\n gdat.indxrefrpntsrofi = [[] for q in gdat.indxrefr]\n for q in gdat.indxrefr:\n gdat.indxrefrpntsrofi[q] = np.where((np.fabs(gdat.refr.dictelem[q]['lgal'][0, :]) < gdat.maxmgangdata) & \\\n (np.fabs(gdat.refr.dictelem[q]['bgal'][0, :]) < gdat.maxmgangdata))[0]\n for strgfeat in gdat.refr.namepara.elemtotl:\n refrfeat = getattr(gdat.refr, strgfeat)\n refrfeatrofi = [[] for q in gdat.indxrefr]\n for q in gdat.indxrefr:\n if len(refrfeat[q]) > 0:\n refrfeatrofi[q] = refrfeat[q][..., gdat.indxrefrpntsrofi[q]]\n setattr(gdat.refr, strgfeat, refrfeatrofi)\n \n # temp -- gdat.refr.numbelem is defined twice, one before and one after the filter. The initial definition is needed for strgfeat definitions.\n gdat.refr.numbelem = [[] for q in gdat.indxrefr]\n gdat.refr.numbelemtotl = 0\n for q in gdat.indxrefr:\n gdat.refr.numbelem[q] = 0\n gdat.refr.numbelem[q] = gdat.refr.dictelem[q]['lgal'].shape[1]\n gdat.refr.numbelem[q] = np.sum(gdat.refr.numbelem[q])\n gdat.refr.numbelemtotl += np.sum(gdat.refr.numbelem[q]) \n \n ## check that all reference element features are finite\n for q in gdat.indxrefr:\n for strgfeat in gdat.refr.namepara.elem[q]:\n if strgfeat == 'etag':\n continue\n refrfeat = getattr(gdat.refr, strgfeat)\n if len(refrfeat[q]) > 0:\n indxbadd = np.where(np.logical_not(np.isfinite(refrfeat[q])))\n if indxbadd[0].size > 0:\n refrfeat[q][indxbadd] = 0.\n if gdat.typeverb > 0:\n print('Warning: Provided reference element feature is not finite. Defaulting to 0...')\n \n if refrfeat[q].size == 0:\n print('Warning! A reference element feature has length zero!')\n print('strgfeat')\n print(strgfeat)\n else:\n if np.amin(refrfeat[q]) == 0. and np.amax(refrfeat[q]) == 0.:\n print('Warning! A reference element feature is all np.zeros!')\n raise Exception('')\n \n ## element feature indices ordered with respect to the amplitude variable\n refrfeatsort = [[] for q in gdat.indxrefr]\n if not (gdat.typedata == 'mock' and gmod.numbparaelem == 0):\n for q in gdat.indxrefr:\n refrparagenrscalelemampl = getattr(gdat.refr, gdat.refr.nameparagenrelemampl[q])\n if len(refrparagenrscalelemampl[q]) > 0:\n indxelem = np.argsort(refrparagenrscalelemampl[q][0, :])[::-1]\n for strgfeat in gdat.refr.namepara.elem[q]:\n refrfeat = getattr(gdat.refr, strgfeat)\n if len(refrfeat[q]) > 0:\n refrfeatsort[q] = refrfeat[q][..., indxelem]\n setattr(gdat.refr, strgfeat, refrfeatsort)\n \n # bin reference element features\n for q in gdat.indxrefr:\n for strgfeatfrst in gdat.refr.namepara.elem[q]:\n if strgfeatfrst.startswith('etag'):\n continue\n refrfeatfrst = getattr(gdat.refr, strgfeatfrst)\n if len(refrfeatfrst[q]) > 0:\n binsfrst = getattr(gdat.binspara, strgfeatfrst)\n hist = np.histogram(refrfeatfrst[q][0, :], binsfrst)[0]\n setattr(gdat.refr, 'hist' + strgfeatfrst + 'pop%d' % q, hist)\n for strgfeatseco in gdat.refr.namepara.elem[q]:\n if strgfeatseco.startswith('etag'):\n continue\n refrfeatseco = getattr(gdat.refr, strgfeatseco)\n \n strgfeattdim = strgfeatfrst + strgfeatseco + 'pop%d' % q\n \n if not checstrgfeat(strgfeatfrst, strgfeatseco):\n continue\n \n if len(refrfeatseco[q]) > 0:\n binsseco = getattr(gdat.binspara, strgfeatseco)\n hist = np.histogram2d(refrfeatfrst[q][0, :], refrfeatseco[q][0, :], bins=(binsfrst, binsseco))[0]\n setattr(gdat.refr, 'hist' + strgfeattdim, hist)\n \n if gmod.numbparaelem > 0:\n # plot settings\n ## upper limit of histograms\n if gdat.limtydathistfeat is None:\n gdat.limtydathistfeat = [0.5, max(100., 10**np.ceil(np.log10(gdat.refr.numbelemtotl)))]\n #gdat.limtydathistfeat = [0.5, max(100., 10**np.ceil(np.log10(gmod.maxmpara.numbelemtotl)))]\n\n # initial plots\n if gdat.makeplot and gdat.makeplotinit:\n # problem-specific plots\n if gdat.makeplotintr:\n plot_intr(gdat)\n #plot_pert()\n #plot_king(gdat)\n plot_lens(gdat)\n #plot_3fgl_thrs(gdat)\n #if gdat.typeexpr == 'ferm':\n # plot_fgl3(gdat)\n \n # find the pixels at which data count maps have local maxima\n if gdat.typepixl == 'cart':\n for i in gdat.indxener:\n for m in gdat.indxevtt:\n # temp\n gdat.indxxdatmaxm, gdat.indxydatmaxm = tdpy.retr_indximagmaxm(gdat.cntpdatacart[i, :, m])\n \n if not gdat.boolsqzeexpo and np.amax(gdat.cntpdata) < 1.:\n raise Exception('Data counts per pixel is less than 1.')\n \n # check the data\n if (np.fabs(gdat.cntpdata - np.round(gdat.cntpdata)) > 1e-3).any():\n raise Exception('')\n if np.amin(gdat.cntpdata) < 0.:\n raise Exception('')\n \n # list of variables for which the posterior is collected at each proposal\n gdat.liststrgvarbarryswep = ['memoresi', 'accpprob', 'boolpropfilt', 'boolpropaccp', 'indxproptype', 'amplpert']\n for namechro in gdat.listnamechro:\n gdat.liststrgvarbarryswep += ['chro' + namechro]\n gdat.liststrgvarbarryswep += ['ltrp']\n if gdat.probtran > 0.:\n for l in gmod.indxpopl:\n gdat.liststrgvarbarryswep += ['auxiparapop%d' % l]\n gdat.liststrgvarbarryswep += ['ljcb']\n \n # write the numpy RNG state to file\n with open(gdat.pathoutprtag + 'stat.p', 'wb') as thisfile:\n \tpickle.dump(np.random.get_state(), thisfile)\n \n # process lock for simultaneous plotting\n lock = mp.Manager().Lock()\n \n if gdat.typeverb > 0:\n print('Writing the global state to the disc before spawning workers...')\n path = gdat.pathoutprtag + 'gdatinit'\n writfile(gdat, path) \n gdat.filestat = open(gdat.pathoutprtag + 'stat.txt', 'w')\n gdat.filestat.write('gdatinit written.\\n')\n gdat.filestat.close()\n \n # exit before running the sampler\n if gdat.boolmockonly:\n if gdat.typeverb > 0:\n print('Mock dataset is generated. Quitting...')\n return gdat.rtag\n \n # perform an initial run, sampling from the prior\n if gdat.checprio:\n \n if gdat.typeverb > 0:\n print('Sampling from the prior...')\n \n ## perform sampling\n worksamp(gdat, lock, strgpdfn='prio')\n \n ## post process the samples\n proc_finl(gdat=gdat, strgpdfn='prio')\n \n if gdat.typeverb > 0:\n print('Sampling from the posterior...')\n \n # run the sampler\n worksamp(gdat, lock)\n \n # post process the samples\n proc_finl(gdat=gdat)\n \n # make animations\n if gdat.makeanim and gdat.numbplotfram > 1:\n proc_anim(gdat.rtag)\n\n if gdat.typeverb > 0:\n print('The output is at ' + gdat.pathoutprtag)\n if gdat.makeplot:\n print('The plots are at ' + gdat.pathplotrtag)\n print('PCAT has run successfully. Returning to the OS...')\n\n return gdat.rtag\n\n\ndef initarry( \\\n dictvarbvari, \\\n dictvarb, \\\n listnamecnfgextn, \\\n forcneww=False, \\\n forcprev=False, \\\n strgpara=False, \\\n \n # Boolean flag to execute the runs in parallel\n boolexecpara=True, \\\n \n strgcnfgextnexec=None, \\\n listnamevarbcomp=[], \\\n listscalvarbcomp=[], \\\n listlablvarbcomp=[], \\\n listtypevarbcomp=[], \\\n listpdfnvarbcomp=[], \\\n listgdatvarbcomp=[], \\\n \n # parameter name, axis label, tick values and scaling of the input variable changed across PCAT runs\n namexaxivari=None, \\\n lablxaxivari=None, \\\n tickxaxivari=None, \\\n scalxaxivari=None, \\\n ):\n \n print('Running PCAT in array mode...')\n \n numbiter = len(dictvarbvari)\n indxiter = np.arange(numbiter) \n \n cntrcomp = 0\n \n if boolexecpara:\n cntrproc = 0\n\n listrtag = []\n listpridchld = []\n for k, strgcnfgextn in enumerate(listnamecnfgextn):\n \n if strgcnfgextnexec is not None:\n if strgcnfgextn != strgcnfgextnexec:\n continue\n \n strgcnfg = inspect.stack()[1][3] + '_' + strgcnfgextn\n \n dictvarbtemp = deepcopy(dictvarb)\n for strgvarb, valu in dictvarbvari[strgcnfgextn].items():\n dictvarbtemp[strgvarb] = valu\n dictvarbtemp['strgcnfg'] = strgcnfg\n \n listrtagprev = retr_listrtagprev(strgcnfg, gdat.pathpcat)\n cntrcomp += 1\n\n if (not forcneww and strgcnfgextnexec is None or forcprev and strgcnfgextnexec is not None) and len(listrtagprev) > 0:\n print('Found at least one previous run with the configuration %s' % strgcnfg)\n print('Skipping...')\n listrtag.append(listrtagprev[-1])\n else:\n if len(listrtagprev) > 0:\n print('Found at least one previous run. But, repeating the run anways...')\n else:\n print('Did not find any previous run.')\n if boolexecpara and strgcnfgextnexec is None:\n cntrproc += 1\n prid = os.fork()\n if prid > 0:\n listpridchld.append(prid)\n else:\n print('Forking a child process to run the configuration extension...')\n rtag = init(**dictvarbtemp)\n os._exit(0)\n else:\n print('Calling the main PCAT function without forking a child...')\n listrtag.append(init(**dictvarbtemp))\n \n if boolexecpara and strgcnfgextnexec is None:\n for prid in listpridchld:\n os.waitpid(prid, 0)\n if cntrproc > 0:\n print('Exiting before comparion plots because of parallel execution...')\n return\n \n if cntrcomp == 0:\n print('Found no runs...')\n\n print('Final-processing run outputs...')\n for rtag in listrtag:\n print(rtag)\n proc_finl(rtag=rtag, strgpdfn='post')\n proc_anim(rtag)\n \n strgtimestmp = tdpy.retr_strgtimestmp()\n \n if strgcnfgextnexec is not None or namexaxivari is None: \n return\n \n print('Making plots to compare the output of different PCAT runs...')\n \n if 'boolmockonly' in dictvarb and dictvarb['boolmockonly']:\n listgdat = retr_listgdat(listrtag, typegdat='init')\n else:\n listgdat = retr_listgdat(listrtag)\n \n numbgdat = len(listgdat)\n\n for namevarbscal in listgdat[0].listnamevarbscal:\n booltemp = True\n for k in range(1, numbgdat - 1):\n if not namevarbscal in listgdat[k].listnamevarbscal:\n booltemp = False\n if booltemp:\n if namevarbscal in listnamevarbcomp:\n raise Exception('')\n listnamevarbcomp += [namevarbscal]\n listscalvarbcomp += [getattr(listgdat[0], 'scal' + namevarbscal)]\n listlablvarbcomp += [getattr(listgdat[0], 'labl' + namevarbscal + 'totl')]\n listtypevarbcomp += ['pctl']\n listpdfnvarbcomp += ['post']\n listgdatvarbcomp += ['post']\n \n # add others to the variable list\n listnamevarbcomp += ['lliktotl', 'lliktotl', 'infopost', 'bcom', 'lliktotl', 'lliktotl', 'lliktotl', 'levipost']\n listscalvarbcomp += ['self', 'self', 'self', 'self', 'self', 'self', 'self', 'self']\n listlablvarbcomp += ['$\\ln P(D|M_{min})$', '$\\ln P(D|M_{max})$', '$D_{KL}$', '$\\eta_B$', '$\\sigma_{P(D|M)}$', r'$\\gamma_{P(D|M)}$', \\\n r'$\\kappa_{P(D|M)}$', '$\\ln P_H(D)$']\n listtypevarbcomp += ['minm', 'maxm', '', '', 'stdv', 'skew', 'kurt', '']\n listpdfnvarbcomp += ['post', 'post', 'post', 'post', 'post', 'post', 'post', 'post']\n listgdatvarbcomp += ['post', 'post', 'post', 'post', 'post', 'post', 'post', 'post']\n \n arrytemp = np.array([len(listnamevarbcomp), len(listscalvarbcomp), len(listlablvarbcomp), len(listtypevarbcomp), len(listpdfnvarbcomp), len(listgdatvarbcomp)])\n if (arrytemp - np.mean(arrytemp) != 0.).all():\n raise Exception('')\n\n # add log-evidence to the variable list, if prior is also sampled\n booltemp = True\n for k in range(numbgdat):\n if not listgdat[k].checprio:\n booltemp = False\n \n if booltemp:\n listgdatprio = retr_listgdat(listrtag, typegdat='finlprio')\n \n listnamevarbcomp += ['leviprio']\n listscalvarbcomp += ['self']\n listlablvarbcomp += ['$\\ln P_{pr}(D)$']\n listtypevarbcomp += ['']\n listpdfnvarbcomp += ['prio']\n listgdatvarbcomp += ['prio']\n \n # time stamp\n strgtimestmp = tdpy.retr_strgtimestmp()\n \n dictoutp = dict()\n liststrgvarbtotl = []\n for (typevarbcomp, pdfnvarbcomp, namevarbcomp) in zip(listtypevarbcomp, listpdfnvarbcomp, listnamevarbcomp):\n strgtemp = typevarbcomp + pdfnvarbcomp + namevarbcomp\n liststrgvarbtotl.append(strgtemp)\n dictoutp[strgtemp] = [[] for k in range(numbiter)]\n \n for k in indxiter:\n for a, strgvarbtotl in enumerate(liststrgvarbtotl):\n if listgdatvarbcomp[a] == 'prio':\n gdattemp = listgdatprio[k]\n else:\n gdattemp = listgdat[k]\n dictoutp[strgvarbtotl][k] = getattr(gdattemp, strgvarbtotl)\n\n pathbase = '%s/imag/%s_%s/' % (gdat.pathpcat, strgtimestmp, inspect.stack()[1][3])\n cmnd = 'mkdir -p %s' % pathbase \n os.system(cmnd)\n cmnd = 'gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=%smrgd.pdf' % pathbase\n for strgvarbtotl, varboutp in dictoutp.items():\n \n figr, axis = plt.subplots(figsize=(6, 6))\n ydat = np.empty(numbiter)\n yerr = np.zeros((2, numbiter))\n \n indxlist = liststrgvarbtotl.index(strgvarbtotl)\n \n if listscalvarbcomp is None:\n scalyaxi = getattr(listgdat[0], 'scal' + listnamevarbcomp[indxlist])\n else:\n scalyaxi = listscalvarbcomp[indxlist]\n \n lablyaxi = listlablvarbcomp[indxlist]\n \n try:\n if listtypevarbcomp[indxlist] == 'pctl':\n trueyaxi = getattr(listgdat[0], 'true' + listnamevarbcomp[indxlist])\n else:\n trueyaxi = getattr(listgdat[0], 'true' + listtypevarbcomp[indxlist] + listnamevarbcomp[indxlist])\n except:\n trueyaxi = None\n \n for k in indxiter:\n \n if isinstance(varboutp[k], list) or isinstance(varboutp[k], np.ndarray) and varboutp[k].ndim > 2:\n raise Exception('')\n elif isinstance(varboutp[k], float):\n ydat[k] = varboutp[k]\n else:\n if listtypevarbcomp[indxlist] != 'pctl':\n yerr[:, k] = 0.\n if varboutp[k].ndim == 2:\n if varboutp[k].shape[1] != 1:\n raise Exception('varboutp format is wrong.')\n varboutp[k] = varboutp[k][:, 0]\n if listtypevarbcomp[indxlist] == 'pctl':\n yerr[:, k] = getattr(listgdat[k], 'errr' + listpdfnvarbcomp[indxlist] + listnamevarbcomp[indxlist])[:, 0]\n else:\n if listtypevarbcomp[indxlist] == 'pctl':\n yerr[:, k] = getattr(listgdat[k], 'errr' + listpdfnvarbcomp[indxlist] + listnamevarbcomp[indxlist])\n ydat[k] = varboutp[k][0]\n \n axis.errorbar(indxiter+1., ydat, yerr=yerr, color='b', ls='', markersize=15, marker='o', lw=3)\n indxrtagyerr = np.where((yerr[0, :] > 0.) | (yerr[1, :] > 0.))[0]\n if indxrtagyerr.size > 0:\n temp, listcaps, temp = axis.errorbar(indxiter[indxrtagyerr]+1., ydat[indxrtagyerr], yerr=yerr[:, indxrtagyerr], \\\n color='b', ls='', capsize=15, markersize=15, marker='o', lw=3)\n for caps in listcaps:\n caps.set_markeredgewidth(3)\n \n if trueyaxi is not None:\n axis.axhline(trueyaxi, ls='--', color='g')\n \n if lablxaxivari is None:\n lablxaxivari = getattr(listgdat[0], 'labl' + namexaxivari + 'totl')\n \n if scalxaxivari is None:\n scalxaxivari = getattr(listgdat[0], 'scal' + namexaxivari)\n \n axis.set_xlabel(lablxaxivari)\n axis.set_xticks(indxiter+1.)\n axis.set_xticklabels(tickxaxivari)\n \n axis.set_ylabel(lablyaxi)\n if scalyaxi == 'logt':\n axis.set_yscale('log')\n plt.tight_layout()\n \n pathfull = '%s%s_%s_%s.pdf' % (pathbase, strgtimestmp, inspect.stack()[1][3], liststrgvarbtotl[indxlist])\n print('Writing to %s...' % pathfull)\n plt.savefig(pathfull)\n plt.close(figr)\n \n cmnd += ' %s' % pathfull\n\n print(cmnd)\n os.system(cmnd)\n\n print('Making animations...')\n for rtag in listrtag:\n print('Working on %s...' % rtag)\n proc_anim(rtag=rtag)\n \n print('Compiling run plots...')\n cmnd = 'python comp_rtag.py'\n for rtag in listrtag: \n cmnd += ' %s' % rtag\n os.system(cmnd)\n\n return listrtag\n\n\ndef retr_rtag(strgcnfg, strgnumbswep):\n \n rtag = strgcnfg + '_' + strgnumbswep\n \n return rtag\n\n\nclass logg(object):\n \n def __init__(self, gdat):\n self.terminal = sys.stdout\n gdat.pathstdo = gdat.pathoutprtag + 'stdo.txt'\n self.log = open(gdat.pathstdo, 'a')\n pathlink = gdat.pathplotrtag + 'stdo.txt'\n os.system('ln -s %s %s' % (gdat.pathstdo, pathlink))\n \n def write(self, strg):\n self.terminal.write(strg)\n self.log.write(strg) \n\n def flush(self):\n pass\n\n\ndef worktrac(pathoutprtag, lock, strgpdfn, indxprocwork):\n\t\n try:\n return work(pathoutprtag, lock, strgpdfn, indxprocwork)\n except:\n raise Exception(\"\".join(traceback.format_exception(*sys.exc_info())))\n\n\ndef opti_hess(gdat, gdatmodi):\n \n gmod = gdat.fitt\n\n if gmod.numbparaelem > 0:\n cntr = 0\n for l in gmod.indxpopl:\n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):\n gdatmodi.indxparastdp[gmod.numbparagenrbase-gmod.numbpopl+cntr] = np.concatenate(gdatmodi.this.indxparagenrfullelem[nameparagenrelem])\n cntr += 1\n \n if gmod.numbparaelem > 0:\n gdatmodi.next.indxelemfull = gdatmodi.this.indxelemfull\n gdatmodi.next.indxparagenrfullelem = gdatmodi.this.indxparagenrfullelem\n else:\n gdatmodi.next.indxparagenrfullelem = None\n\n gdatmodi.stdpmatr = np.zeros((gdat.numbstdp, gdat.numbstdp)) \n gdatmodi.hess = np.zeros((gdat.numbstdp, gdat.numbstdp)) \n deltlpos = np.zeros((3, 3))\n diffpara = np.empty(gdat.numbstdp)\n for k, indxparatemp in enumerate(gdatmodi.indxparastdp):\n if len(indxparatemp) == 0:\n diffpara[k] = 0.\n else:\n diffpara[k] = min(min(np.amin(gdatmodi.this.paragenrunitfull[indxparatemp]) * 0.9, np.amin(1. - gdatmodi.this.paragenrunitfull[indxparatemp]) * 0.9), 1e-5)\n\n #gdatmodi.this.sampunitsave = np.copy(gdatmodi.this.paragenrunitfull)\n \n #if gmod.numbparaelem > 0:\n # gdatmodi.dictmodi = [[] for l in gmod.indxpopl]\n # for l in gmod.indxpopl:\n # gdatmodi.dictmodi[l] = dict()\n # gdatmodi.dictmodi[l][gmod.nameparagenrelemampl[l] + 'indv'] = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[gmod.nameparagenrelemampl[l]][l]]\n # for nameparagenrelem in gmod.namepara.genrelem[l]:\n # gdatmodi.dictmodi[l]['stdv' + nameparagenrelem + 'indv'] = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]]\n #if gmod.numbparaelem > 0:\n # gdatmodi.this.indxparagenrfullelemconc = np.concatenate([gdatmodi.this.indxparagenrfullelem[l]['full'] for l in gmod.indxpopl])\n #if gdat.boolpropcomp:\n # indxsamptranprop = gdatmodi.this.indxparagenrfullelemconc\n #else:\n # indxsamptranprop = []\n \n deltlpos[1, 1] = gdatmodi.this.lliktotl\n for indxstdpfrst in gdat.indxstdpprop:\n for indxstdpseco in gdat.indxstdpprop:\n \n if indxstdpfrst > indxstdpseco:\n continue\n \n if indxstdpfrst == indxstdpseco:\n \n #if gmod.numbparaelem > 0:\n # if k in gdatmodi.this.indxparagenrfullelemconc:\n # indxtrapmoditemp = k - gmod.indxparagenrfulleleminit\n # indxpoplmoditemp = np.array([np.amin(np.where(indxtrapmoditemp // gmod.numbparagenrelemcumr == 0))])\n # numbparapoplinittemp = indxtrapmoditemp - gmod.numbparagenrelemcuml[indxpoplmoditemp[0]]\n # indxelemmoditemp = [numbparapoplinittemp // gmod.numbparagenrelemsing[indxpoplmoditemp[0]]]\n # gmod.indxparagenrelemmoditemp = numbparapoplinittemp % gmod.numbparagenrelemsing[indxpoplmoditemp[0]]\n # nameparagenrelem = gmod.namepara.genrelem[indxpoplmoditemp[0]][gmod.indxparagenrelemmoditemp] \n # indxsampampltemp = k - gmod.indxparagenrelemmoditemp + gmod.indxparagenrelemampl[indxpoplmoditemp[0]]\n # #amplfact = gdatmodi.this.paragenrscalfull[indxsampampltemp] / getattr(gdat, 'minm' + gmod.nameparagenrelemampl[indxpoplmoditemp[0]])\n # stdv = 1. / np.sqrt(gdatmodi.hess[indxstdpfrst, indxstdpseco])\n # gdatmodi.stdpmatr[indxstdpfrst, indxstdpseco] += stdv\n # gdatmodi.dictmodi[indxpoplmoditemp[0]]['stdv' + nameparagenrelem + 'indv'][indxelemmoditemp[0]] = stdv\n # gdatmodi.dictmodi[indxpoplmoditemp[0]][gmod.nameparagenrelemampl[indxpoplmoditemp[0]] + 'indv'][indxelemmoditemp[0]] = \\\n # gdatmodi.this.paragenrscalfull[indxsampampltemp]\n \n if len(gdatmodi.indxparastdp[indxstdpseco]) == 0:\n continue\n \n for a in range(2):\n gdatmodi.next.paragenrunitfull = np.copy(gdatmodi.this.paragenrunitfull)\n if a == 0:\n gdatmodi.next.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] -= diffpara[indxstdpseco]\n if a == 1:\n gdatmodi.next.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] += diffpara[indxstdpseco]\n \n gdatmodi.next.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gdatmodi.next.paragenrunitfull, gdatmodi.next.indxparagenrfullelem)\n \n proc_samp(gdat, gdatmodi, 'next', 'fitt')\n if a == 0:\n deltlpos[0, 1] = gdatmodi.next.lliktotl\n if a == 1:\n deltlpos[2, 1] = gdatmodi.next.lliktotl\n \n gdatmodi.hess[indxstdpfrst, indxstdpseco] = 1. / 4. / diffpara[indxstdpseco]**2 * np.fabs(deltlpos[0, 1] + \\\n deltlpos[2, 1] - 2. * deltlpos[1, 1])\n else:\n # temp\n continue\n\n for a in range(4):\n gdatmodi.this.paragenrunitfull = np.copy(gdatmodi.this.sampunitsave)\n if a == 0:\n gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpfrst]] -= diffpara\n gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] -= diffpara\n if a == 1:\n gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpfrst]] += diffpara\n gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] += diffpara\n if a == 2:\n gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpfrst]] -= diffpara\n gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] += diffpara\n if a == 3:\n gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpfrst]] += diffpara\n gdatmodi.this.paragenrunitfull[gdatmodi.indxparastdp[indxstdpseco]] -= diffpara\n proc_samp(gdat, gdatmodi, 'this', 'fitt')\n if a == 0:\n deltlpos[0, 0] = gdatmodi.this.lpostotl\n if a == 1:\n deltlpos[2, 2] = gdatmodi.this.lpostotl\n if a == 2:\n deltlpos[1, 2] = gdatmodi.this.lpostotl\n if a == 3:\n deltlpos[2, 1] = gdatmodi.this.lpostotl\n gdatmodi.hess[indxstdpfrst, indxstdpseco] = 1. / 4. / diffpara**2 * \\\n (deltlpos[2, 2] + deltlpos[0, 0] - deltlpos[1, 2] - deltlpos[2, 1])\n \n if not np.isfinite(gdatmodi.hess[indxstdpfrst, indxstdpseco]):\n raise Exception('')\n if gdat.booldiagmode and not np.isfinite(gdatmodi.next.paragenrscalfull).all():\n raise Exception('')\n if gdatmodi.hess[indxstdpfrst, indxstdpseco] == 0.:\n raise Exception('')\n\n gdatmodi.hess[np.where(gdatmodi.hess == 0)] = 10.\n\n # temp\n #gdatmodi.stdpmatr = np.sqrt(linalg.inv(gdatmodi.hess))\n numbdoffefff = gmod.numbparagenrbase\n if gmod.numbparaelem > 0:\n numbdoffefff += gmod.numbparagenrelem * 10\n gdatmodi.stdpmatr = np.sqrt(1. / gdatmodi.hess) / np.sqrt(numbdoffefff)\n \n if (gdatmodi.stdpmatr == 0).any():\n raise Exception('')\n \n gdatmodi.stdp = gdatmodi.stdpmatr[gdat.indxstdp, gdat.indxstdp]\n \n\ndef worksamp(gdat, lock, strgpdfn='post'): \n \n pathorig = gdat.pathoutprtag + 'stat.txt'\n pathlink = gdat.pathplotrtag + 'stat.txt'\n os.system('ln -s %s %s' % (pathorig, pathlink))\n \n if gdat.numbproc == 1:\n worktrac(gdat.pathoutprtag, lock, strgpdfn, 0)\n else:\n if gdat.typeverb > 0:\n print('Forking the sampler...')\n\n # process pool\n pool = mp.Pool(gdat.numbproc)\n \n # spawn the processes\n workpart = functools.partial(worktrac, gdat.pathoutprtag, lock, strgpdfn)\n pool.map(workpart, gdat.indxproc)\n\n pool.close()\n pool.join()\n \n gdat.filestat = open(gdat.pathoutprtag + 'stat.txt', 'a')\n gdat.filestat.write('gdatmodi%s written.\\n' % strgpdfn)\n gdat.filestat.close()\n\n\ndef work(pathoutprtag, lock, strgpdfn, indxprocwork):\n \n print('Worker #%d' % indxprocwork)\n \n # read the initial global object, gdatinit\n path = pathoutprtag + 'gdatinit'\n gdat = readfile(path) \n \n gmod = gdat.fitt\n \n # define time functions\n timereal = time.time()\n timeproc = time.clock()\n \n # re-seed the random number generator for this chain\n if gdat.boolseedchan:\n np.random.seed(indxprocwork + 1000)\n\n # construct a global object for the walker\n gdatmodi = tdpy.gdatstrt()\n gdatmodi.this = tdpy.gdatstrt()\n gdatmodi.next = tdpy.gdatstrt()\n gdatmodi.indxprocwork = indxprocwork\n \n gdatmodi.this = gdat.fitt.this\n\n # path of gdatmodi\n gdatmodi.pathgdatmodi = gdat.pathoutprtag + 'gdatmodi%04d' % gdatmodi.indxprocwork + gdat.strgpdfn\n \n print('Determining the parameter indices of the fitting model with only the floating parameters...')\n\n gdatmodi.booldone = False\n gdatmodi.lock = lock\n gdatmodi.indxprocwork = indxprocwork\n \n # plotting factors for scalar variables\n for name in gmod.namepara.scal:\n if name in gmod.nameparagenrbase:\n gmod.indxpara.temp = np.where(gmod.nameparagenrbase == name)[0]\n \n # find the list of variables for which the posterior will be calculated\n if not gdat.boolmockonly:\n \n if gdat.typeverb > 1:\n print('gdatmodi.this.paragenrunitfull')\n print(gdatmodi.this.paragenrunitfull)\n show_paragenrscalfull(gdat, gdatmodi)\n proc_samp(gdat, gdatmodi, 'this', 'fitt')\n \n gdat.liststrgvarbarrysamp = []\n gdat.liststrgvarblistsamp = []\n for strg, valu in gdatmodi.this.__dict__.items():\n if not strg in gdat.liststrgvarbarryswep:\n if isinstance(valu, np.ndarray) or isinstance(valu, float):\n gdat.liststrgvarbarrysamp.append(strg)\n elif isinstance(valu, list) and strg != 'indxparagenrfullelem' and strg != 'psfnconv' and \\\n strg != 'trueindxelemasscmiss' and strg != 'trueindxelemasschits':\n gdat.liststrgvarblistsamp.append(strg)\n if gdat.typeverb == 2:\n print('gdat.liststrgvarbarrysamp')\n print(gdat.liststrgvarbarrysamp)\n print('gdat.liststrgvarblistsamp')\n print(gdat.liststrgvarblistsamp)\n \n gdat.liststrgvarblistswep = []\n if gdat.typeverb == 2:\n print('gdat.liststrgvarblistswep')\n print(gdat.liststrgvarblistswep)\n\n gdat.liststrgvarblist = gdat.liststrgvarblistsamp + gdat.liststrgvarblistswep\n\n gdatmodi.liststrgchan = gdat.liststrgvarbarryswep + ['paragenrscalbase'] + gmod.namepara.scal\n \n if gdat.typeverb == 2:\n print('gdatmodi.liststrgchan')\n print(gdatmodi.liststrgchan)\n \n gdat.liststrgvarbarry = gdat.liststrgvarbarrysamp + gdat.liststrgvarbarryswep\n \n ## sample index\n gdatmodi.cntrswep = 0\n \n if gdat.booldiagmode:\n if gdat.indxswepsave.size != gdat.numbsamp:\n raise Exception('Inappropriate number of samples.')\n\n # initialize the worker sampler\n \n # definitions required for the initial sample\n gdatmodi.this.boolpropfilt = True\n gdatmodi.this.boolpropaccp = True\n \n # dummy definitions required for logs\n gdatmodi.this.indxproptype = np.zeros(1, dtype=int)\n for l in gmod.indxpopl:\n setattr(gdatmodi.this, 'auxiparapop%d' % l, np.zeros(gmod.numbparagenrelemsing[l]))\n gdatmodi.this.lpri = np.zeros(gmod.numblpri)\n gdatmodi.this.lpau = np.zeros(1)\n gdatmodi.this.ltrp = np.zeros(1)\n gdatmodi.this.ljcb = np.zeros(1)\n gdatmodi.this.accpprob = np.zeros(1)\n gdatmodi.this.memoresi = np.zeros(1)\n gdatmodi.this.amplpert = np.zeros(1)\n \n # make sure the first sample derived variables are generated on gdatmodi\n proc_samp(gdat, gdatmodi, 'this', 'fitt')\n \n # log the initial state\n if False and gdat.typeverb > 1:\n tdpy.show_memo(gdatmodi, 'gdatmodi')\n \n for k, name in enumerate(gdat.listnamechro):\n setattr(gdatmodi.this, 'chro' + name, 0.)\n \n gdatmodi.stdp = np.copy(gdat.stdp)\n \n # indices of parameters corresping to each proposal scale\n gdatmodi.indxparastdp = [[] for k in gdat.indxstdp]\n for k in gmod.indxparagenrbase:\n if k < gmod.numbpopl:\n continue\n gdatmodi.indxparastdp[k-gmod.numbpopl] = [k]\n \n workdict = {}\n # list of variable names with type numpy array\n for strgvarb in gdat.liststrgvarbarry:\n valu = getattr(gdatmodi.this, strgvarb)\n if strgvarb in gdat.liststrgvarbarryswep:\n if isinstance(valu, dict):\n shap = [gdat.numbswep, len(valu.keys())]\n elif isinstance(valu, float) or isinstance(valu, bool):\n shap = [gdat.numbswep, 1]\n else:\n shap = [gdat.numbswep] + list(valu.shape)\n else:\n shap = [gdat.numbsamp] + list(valu.shape)\n workdict['list' + gdat.strgpdfn + strgvarb] = np.zeros(shap)\n \n # list of variable names with type list\n for strgvarb in gdat.liststrgvarblist:\n workdict['list' + gdat.strgpdfn + strgvarb] = []\n \n ## saved state of the sample index used for logging progress status\n gdatmodi.percswepsave = -1\n \n # store the initial sample as the best fit sample\n gdatmodi.maxmllikswep = np.sum(gdatmodi.this.llik)\n gdatmodi.indxswepmaxmllik = -1 \n gdatmodi.sampmaxmllik = np.copy(gdatmodi.this.paragenrscalfull)\n \n if gdat.typeverb > 0:\n print('Sampling...')\n print('gdat.stdp')\n for k in gdat.indxstdp:\n print('%04d %s %g' % (k, gdat.namestdp[k], gdat.stdp[k]))\n\n gdatmodi.this.stdp = np.copy(gdat.stdp)\n\n gdatmodi.optidone = False \n \n while gdatmodi.cntrswep < gdat.numbswep:\n \n initchro(gdat, gdatmodi, 'totl')\n \n # Boolean flag to indicate burn-in phase\n gdatmodi.boolburn = gdatmodi.cntrswep < gdat.numbburn\n \n # temp\n if gdat.typeopti == 'hess' and gdatmodi.cntrswep % gdat.numbstdp * 4 == 0 and gdatmodi.cntrswep < gdat.numbburn:\n if gdat.typeverb > 0:\n print('Optimizing proposal scale...')\n opti_hess(gdat, gdatmodi)\n \n if (gdatmodi.stdpmatr[gdat.indxstdp, gdat.indxstdp] < 0.5).all():\n gdatmodi.optidone = True\n \n if gdat.typeopti == 'hess' and gdatmodi.cntrswep == gdat.numbburn:\n path = gdat.pathoutprtag + 'opti.h5'\n if gdat.typeverb > 0:\n print('Writing the estimated covariance matrix to %s...' % path)\n thisfile = h5py.File(path, 'w')\n thisfile.create_dataset('stdp', data=gdatmodi.stdp)\n thisfile.close()\n \n if gdat.makeplot:\n \n xdat = gdat.indxstdp\n ydat = gdatmodi.stdp\n \n pathopti = getattr(gdat, 'path' + gdat.strgpdfn + 'opti')\n path = pathopti + 'stdv%d.pdf' % gdatmodi.indxprocwork\n tdpy.plot_gene(path, xdat, ydat, scalydat='logt', \\\n lablxdat='$i_{stdp}$', lablydat=r'$\\sigma$', plottype='hist', limtydat=[np.amin(ydat) / 2., 2. * np.amax(ydat)])\n \n # plot uncertainties of element parameters as a function of amplitude parameter\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n for nameparagenrelem in gmod.namepara.genrelem[l]:\n path = pathopti + 'stdv' + nameparagenrelem + 'pop%d.pdf' % l\n xdat = [gdatmodi.dictmodi[l][gmod.nameparagenrelemampl[l] + 'indv'], meanplot]\n \n if nameparagenrelem == gmod.nameparagenrelemampl[l]:\n ydat = [gdatmodi.dictmodi[l]['stdv' + nameparagenrelem + 'indv'], \\\n gdatmodi.stdp[getattr(gdat, 'indxstdp' + nameparagenrelem)] / (meanplot / minm)**2.]\n else:\n ydat = [gdatmodi.dictmodi[l]['stdv' + nameparagenrelem + 'indv'], \\\n gdatmodi.stdp[getattr(gdat, 'indxstdp' + nameparagenrelem)] / (meanplot / minm)**0.5]\n lablxdat = getattr(gmod.lablpara, gmod.nameparagenrelemampl[l] + 'totl')\n scalxdat = getattr(gdat, 'scal' + gmod.nameparagenrelemampl[l] + 'plot')\n limtxdat = np.array(getattr(gdat, 'limt' + gmod.nameparagenrelemampl[l]))\n tdpy.plot_gene(path, xdat, ydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, limtxdat=limtxdat, \\\n lablydat=r'$\\sigma_{%s}$' % getattr(gmod.lablpara, nameparagenrelem), plottype=['scat', 'lghtline'])\n #tdpy.plot_gene(path, xdat, ydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, limtxdat=limtxdat, \\\n # lablydat=r'$\\sigma_{%s}$%s' % (getattr(gmod.lablpara, nameparagenrelem), getattr(gmod.lablpara, nameparagenrelem + 'unit')), plottype=['scat', 'lghtline'])\n \n tdpy.plot_gene(path, xdat, ydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, limtxdat=limtxdat, \\\n lablydat=r'$\\sigma_{%s}$' % getattr(gmod.lablpara, nameparagenrelem), plottype=['scat', 'lghtline'])\n\n\n if gdat.typeverb > 1:\n print('-' * 10)\n print('Sweep %d' % gdatmodi.cntrswep)\n\n # decide whether to make a frame\n thismakefram = (gdatmodi.cntrswep % gdat.numbswepplot == 0) and \\\n gdatmodi.indxprocwork == int(float(gdatmodi.cntrswep) / gdat.numbswep * gdat.numbproc) \\\n and gdat.makeplotfram and gdat.makeplot\n \n # decide whether to make a log\n boollogg = False\n if gdat.typeverb > 0:\n gdatmodi.this.percswep = 5 * int(20. * gdatmodi.cntrswep / gdat.numbswep) \n if gdatmodi.this.percswep > gdatmodi.percswepsave or thismakefram:\n gdatmodi.percswepsave = gdatmodi.this.percswep\n minmswepintv = max(0, gdatmodi.cntrswep - 1000)\n maxmswepintv = gdatmodi.cntrswep + 1\n if maxmswepintv > minmswepintv:\n boollogg = True\n \n # propose the next sample\n if gdat.typeverb > 1: \n print('-----')\n print('thislliktotl')\n print(gdatmodi.this.lliktotl)\n print('thislpostotl')\n print(gdatmodi.this.lpostotl)\n print('Proposing...')\n \n if gdat.boolburntmpr and gdatmodi.cntrswep < gdat.numbburntmpr:\n gdatmodi.this.facttmpr = ((gdatmodi.cntrswep + 1.) / gdat.numbburntmpr)**4\n gdatmodi.this.tmprfactstdv = 1. / gdatmodi.this.facttmpr\n #gdatmodi.this.tmprlposelem = -1000. * (1. - gdatmodi.this.facttmpr) * np.concatenate(gdatmodi.this.indxparagenrfullelem['full']).size\n gdatmodi.this.tmprlposelem = 0.\n else:\n gdatmodi.this.tmprfactstdv = 1.\n gdatmodi.this.tmprlposelem = 0. \n \n # temp -- this can be faster\n for l in gmod.indxpopl:\n setattr(gdatmodi.this, 'auxiparapop%d' % l, np.empty(gmod.numbparagenrelemsing[l]))\n\n if gdat.typeverb > 1:\n show_paragenrscalfull(gdat, gdatmodi)\n \n # make a proposal\n initchro(gdat, gdatmodi, 'prop')\n prop_stat(gdat, gdatmodi, 'fitt')\n stopchro(gdat, gdatmodi, 'prop')\n\n if gdat.booldiagmode:\n \n for k in gmod.indxparagenrbase:\n if gmod.scalpara.genrbase[k] == 'logt' and gdatmodi.this.paragenrscalfull[k] < 0.:\n raise Exception('')\n\n if not np.isfinite(gdatmodi.next.paragenrscalfull).all():\n raise Exception('')\n \n if gdat.typeverb > 1:\n show_paragenrscalfull(gdat, gdatmodi, strgstat='next')\n \n if (thismakefram or gdat.boolsave[gdatmodi.cntrswep] or boollogg):\n # preprocess the current sample to calculate variables that are not updated\n proc_samp(gdat, gdatmodi, 'this', 'fitt')\n \n # diagnostics\n if gdat.booldiagmode:\n \n initchro(gdat, gdatmodi, 'diag')\n \n indxsampbadd = np.where((gdatmodi.this.paragenrunitfull[gmod.numbpopl:] > 1.) | (gdatmodi.this.paragenrunitfull[gmod.numbpopl:] < 0.))[0] + 1\n if indxsampbadd.size > 0:\n raise Exception('Unit sample vector went outside [0,1].')\n \n if not np.isfinite(gdatmodi.this.lliktotl):\n raise Exception('Log-likelihood is infinite!')\n \n #indxsampclos = np.where((gdatmodi.this.paragenrscalfull < 0.01) & (gdatmodi.this.paragenrscalfull % 1. != 0.))[0]\n #indxsampclos = list(indxsampclos)\n #for indxparagenrfulltemp in indxsampclos:\n # for l in gmod.indxpopl:\n # if not indxparagenrfulltemp in gdatmodi.this.indxparagenrfullelem[l]['full']:\n # indxsampclos.remove(indxparagenrfulltemp)\n #indxsampclos = np.array(indxsampclos)\n #if indxsampclos.size > 0:\n # print 'Warning! State is too close to 0!'\n # print gmod.namepara[indxsampclos]\n\n #indxsampclos = np.where((gdatmodi.this.paragenrscalfull > 0.99) & (gdatmodi.this.paragenrscalfull % 1. != 0.))[0]\n #indxsampclos = list(indxsampclos)\n #for indxparagenrfulltemp in indxsampclos:\n # for l in gmod.indxpopl:\n # if not indxparagenrfulltemp in gdatmodi.this.indxparagenrfullelem[l]['full']:\n # indxsampclos.remove(indxparagenrfulltemp)\n #indxsampclos = np.array(indxsampclos)\n #if indxsampclos.size > 0:\n # print 'Warning! State is too close to 1!'\n # print gmod.namepara[indxsampclos]\n\n if gdatmodi.cntrswep == 0:\n gdatmodi.this.lliktotlprev = gdatmodi.this.lliktotl\n \n lliktotldiff = gdatmodi.this.lliktotl - gdatmodi.this.lliktotlprev\n\n if gdatmodi.this.lliktotl - gdatmodi.this.lliktotlprev < -10.:\n raise Exception('loglikelihood drop is very unlikely!')\n gdatmodi.this.lliktotlprev = gdatmodi.this.lliktotl\n \n for strgstat in ['this', 'next']:\n for strgvarb in ['paragenrscalfull', 'paragenrunitfull']:\n varb = getattr(getattr(gdatmodi, strgstat), strgvarb)\n if not np.isfinite(varb).all():\n raise Exception('Sample vector is not finite.')\n \n if gmod.numbparaelem > 0:\n if gmod.boolelemsbrtdfncanyy:\n thissbrtdfnc = getattr(gdatmodi.this, 'sbrtdfnc')\n frac = np.amin(thissbrtdfnc) / np.mean(thissbrtdfnc)\n cntppntschec = retr_cntp(gdat, thissbrtdfnc)\n if np.amin(cntppntschec) < -0.1 and frac < -1e-3:\n raise Exception('thissbrtdfnc went negative by %.3g percent.' % (100. * frac))\n \n # check the population index\n if (gdatmodi.this.cntpmodl <= 0.).any() or not (np.isfinite(gdatmodi.this.cntpmodl)).all():\n raise Exception('Current flux model is not positive')\n\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n if gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]] != len(gdatmodi.this.indxelemfull[l]):\n raise Exception('Number of elements is inconsistent with the element index list.')\n\n if gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]] != len(gdatmodi.this.indxelemfull[l]):\n raise Exception('Number of elements is inconsistent across data structures.')\n \n for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):\n if gmod.listscalparagenrelem[l][k] == 'gaus' or gmod.listscalparagenrelem[l][k] == 'igam' \\\n or gmod.listscalparagenrelem[l][k] == 'expo':\n continue\n comp = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]]\n minm = getattr(gdat.fitt.minmpara, nameparagenrelem)\n maxm = getattr(gdat.fitt.maxmpara, nameparagenrelem)\n indxtemp = np.where((comp < minm) | (comp > maxm))[0]\n if indxtemp.size > 0:\n raise Exception('A component of an element went outside the prior range.')\n \n stopchro(gdat, gdatmodi, 'diag')\n \n # save the sample\n if gdat.boolsave[gdatmodi.cntrswep]:\n \n initchro(gdat, gdatmodi, 'save')\n \n if gdat.savestat:\n \n if gdat.namesavestat is not None:\n strgcnfg = gdat.namesavestat\n else:\n strgcnfg = gdat.strgcnfg\n path = gdat.pathoutp + 'stat_' + strgcnfg + '.h5'\n \n booltemp = False\n if os.path.isfile(path) and gdatmodi.indxprocwork == 0:\n thisfilechec = h5py.File(path, 'r')\n if thisfilechec['lliktotl'][...] > gdatmodi.this.lliktotl:\n if gdat.typeverb > 0:\n print('Not saving the state to %s because loglikelihood is lower...' % path)\n print('Likelihood in the file:')\n print(thisfilechec['lliktotl'][...])\n else:\n booltemp = True\n thisfilechec.close()\n else:\n booltemp = True\n if gdat.forcsavestat:\n booltemp = True\n if booltemp:\n if gdatmodi.indxprocwork > 0:\n continue\n if gdat.typeverb > 0:\n print('Saving the state to %s...' % path)\n \n thisfile = h5py.File(path, 'w')\n thisfile.create_dataset('lliktotl', data=gdatmodi.this.lliktotl)\n for gmod.nameparagenrbase in gmod.nameparagenrbase:\n valu = gdatmodi.this.paragenrscalfull[gmod.indxparagenrbase]\n thisfile.create_dataset(gmod.nameparagenrbase, data=valu)\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n for nameparagenrelem in gmod.namepara.genrelem[l]:\n comp = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]]\n for k in np.arange(comp.size):\n name = nameparagenrelem + 'pop%d%04d' % (l, k)\n thisfile.create_dataset(name, data=comp[k])\n thisfile.close()\n \n indxsampsave = gdat.indxsampsave[gdatmodi.cntrswep]\n \n # fill the sample lists\n for strgvarb in gdat.liststrgvarbarrysamp:\n valu = getattr(gdatmodi.this, strgvarb)\n workdict['list' + gdat.strgpdfn + strgvarb][indxsampsave, ...] = valu\n for strgvarb in gdat.liststrgvarblistsamp:\n workdict['list' + gdat.strgpdfn + strgvarb].append(deepcopy(getattr(gdatmodi.this, strgvarb)))\n stopchro(gdat, gdatmodi, 'save')\n\n # plot the current sample\n if thismakefram:\n \n initchro(gdat, gdatmodi, 'plot')\n \n writfile(gdatmodi, gdatmodi.pathgdatmodi) \n\n if gdat.typeverb > 0:\n print('Process %d is in queue for making a frame.' % gdatmodi.indxprocwork)\n \n if gdat.numbproc > 1:\n gdatmodi.lock.acquire()\n \n if gdat.typeverb > 0:\n print('Process %d started making a frame.' % gdatmodi.indxprocwork)\n \n plot_samp(gdat, gdatmodi, 'this', 'fitt', 'fram')\n \n if gdat.typeverb > 0:\n print('Process %d finished making a frame.' % gdatmodi.indxprocwork)\n \n if gdat.numbproc > 1:\n gdatmodi.lock.release()\n \n stopchro(gdat, gdatmodi, 'plot')\n \n # determine the acceptance probability\n if gdatmodi.this.boolpropfilt:\n \n initchro(gdat, gdatmodi, 'proc')\n proc_samp(gdat, gdatmodi, 'next', 'fitt')\n stopchro(gdat, gdatmodi, 'proc')\n \n calc_probprop(gdat, gdatmodi)\n \n if gdat.booldiagmode:\n if not gdatmodi.this.indxproptype > 2 and gdatmodi.this.ljcb != 0.:\n raise Exception('log Jacobian can only be be nonzero when a split or merge is proposed.')\n if not gdatmodi.this.indxproptype > 2 and gdatmodi.this.ltrp != 0.:\n raise Exception('log ratio proposal probability can only be be nonzero when a split or merge is proposed.')\n \n # evaluate the acceptance probability\n gdatmodi.this.deltlpostotl = gdatmodi.next.lpostotl - gdatmodi.this.lpostotl\n gdatmodi.this.accplprb = gdatmodi.this.deltlpostotl + gdatmodi.this.tmprlposelem - gdatmodi.this.lpau + gdatmodi.this.ltrp + gdatmodi.this.ljcb\n gdatmodi.this.accpprob[0] = np.exp(gdatmodi.this.accplprb)\n if gdat.typeverb > 1:\n print('gdatmodi.this.lpritotl')\n print(gdatmodi.this.lpritotl)\n print('gdatmodi.next.lpritotl')\n print(gdatmodi.next.lpritotl)\n print('gdatmodi.this.lliktotl')\n print(gdatmodi.this.lliktotl)\n print('gdatmodi.next.lliktotl')\n print(gdatmodi.next.lliktotl)\n print('gdatmodi.this.lpostotl')\n print(gdatmodi.this.lpostotl)\n print('gdatmodi.next.lpostotl')\n print(gdatmodi.next.lpostotl)\n \n print('gdatmodi.this.deltlpostotl')\n print(gdatmodi.this.deltlpostotl)\n print('gdatmodi.this.tmprlposelem')\n print(gdatmodi.this.tmprlposelem)\n print('gdatmodi.this.lpau')\n print(gdatmodi.this.lpau)\n print('gdatmodi.this.ltrp')\n print(gdatmodi.this.ltrp)\n print('gdatmodi.this.ljcb')\n print(gdatmodi.this.ljcb)\n \n print('gdatmodi.this.accplprb')\n print(gdatmodi.this.accplprb)\n else:\n gdatmodi.this.accpprob[0] = 0.\n \n # accept or reject the proposal\n booltemp = gdatmodi.this.accpprob[0] >= np.random.rand()\n \n if gdat.booldiagmode:\n if gdatmodi.this.indxproptype == 0:\n if gdat.boolsqzeprop and not booltemp:\n raise Exception('')\n\n if booltemp:\n if gdat.typeverb > 1:\n print('Accepted.')\n \n # update the current state\n updt_stat(gdat, gdatmodi)\n\n # check if the accepted sample has maximal likelihood\n if gdatmodi.this.lliktotl > gdatmodi.maxmllikswep:\n gdatmodi.maxmllikswep = gdatmodi.this.lliktotl\n gdatmodi.indxswepmaxmllik = gdatmodi.cntrswep\n gdatmodi.sampmaxmllik = np.copy(gdatmodi.this.paragenrscalfull)\n \n # register the sample as accepted\n gdatmodi.this.boolpropaccp = True\n\n # reject the sample\n else:\n\n if gdat.typeverb > 1:\n print('Rejected.')\n\n gdatmodi.this.boolpropaccp = False\n \n ## variables to be saved for each sweep\n for strg in gdat.liststrgvarbarryswep:\n workdict['list' + gdat.strgpdfn + strg][gdatmodi.cntrswep, ...] = getattr(gdatmodi.this, strg)\n \n workdict['list' + gdat.strgpdfn + 'accpprob'][gdatmodi.cntrswep, 0] = gdatmodi.this.accpprob[0]\n \n # log the progress\n if boollogg:\n \n print('--------------')\n print('Sweep number %d' % gdatmodi.cntrswep)\n print('%3d%% completed.' % gdatmodi.this.percswep)\n print('%30s %50s %10s' % ('Prop', 'Accp rate', 'Scale'))\n \n indxswepintv = np.arange(minmswepintv, maxmswepintv)\n for k in gdat.indxproptype:\n indxswepprop = indxswepintv[np.where(workdict['list' + gdat.strgpdfn + 'indxproptype'][indxswepintv, 0] == k)]\n boolproptype = workdict['list' + gdat.strgpdfn + 'indxproptype'][indxswepintv, 0] == k\n boolaccp = workdict['list' + gdat.strgpdfn + 'boolpropaccp'][indxswepintv, 0] == 1\n numbaccp = np.where(boolaccp & boolproptype)[0].size\n numbtotl = np.where(boolproptype)[0].size\n if numbtotl > 0:\n percaccp = 100. * numbaccp / float(numbtotl)\n else:\n percaccp = 0.\n if k in gdat.indxstdp:\n strgstdp = '%.3g' % gdat.stdp[k]\n else:\n strgstdp = ''\n print('%30s %50s' % (gdat.lablproptype[k], 'acceptance rate: %3d%% (%5d out of %5d)' % (percaccp, numbaccp, numbtotl)))\n \n if gdat.boolburntmpr and gdatmodi.cntrswep < gdat.numbburntmpr:\n print('Tempered burn-in')\n print('gdatmodi.this.facttmpr')\n print(gdatmodi.this.facttmpr)\n print \n numbpara = gmod.numbparagenrbase\n if gmod.numbparaelem > 0:\n for l in gmod.indxpopl:\n numbpara += gdatmodi.this.indxparagenrfullelem[l]['full'].size\n if gmod.numbparaelem > 0:\n print('Number of elements:')\n for l in gmod.indxpopl:\n print(gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int))\n print('Current number of parameters:')\n print(numbpara)\n print('gdatmodi.this.numbdoff')\n print(gdatmodi.this.numbdoff)\n for attr, valu in gdatmodi.__dict__.items():\n if isinstance(valu, np.ndarray):\n if 8 * valu.size * gdat.numbsamptotl > 1e9:\n print('Warning! %s has total length %d and size %s' % (attr, valu.size * gdat.numbsamptotl, \\\n tdpy.retr_strgmemo(8 * valu.size * gdat.numbsamptotl)))\n if gmod.numbparaelem > 0:\n if gmod.typemodltran == 'pois':\n print('Mean number of elements:')\n print(gdatmodi.this.paragenrscalfull[gmod.indxpara.meanelem])\n for l in gmod.indxpopl:\n if gmod.nameparagenrelemampl[l] == 'flux' and gmod.typeprioflux[l] == 'powr' or gmod.nameparagenrelemampl[l] != 'flux':\n print('Log-slope of the amplitude parameter distribution, population %d:' % l)\n indxparagenrbase = getattr(gmod.indxpara, 'slopprio' + gmod.nameparagenrelemampl[l] + 'pop%d' % l)\n print(gdatmodi.this.paragenrscalfull[indxparagenrbase])\n else:\n print('Flux distribution break:')\n print(gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'brek' + gmod.nameparagenrelemampl[l] + 'pop%d' % l)])\n print('Flux distribution lower slope:')\n print(gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'sloplowr' + gmod.nameparagenrelemampl[l] + 'pop%d' % l)])\n print('Flux distribution upper slope:')\n print(gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'slopuppr' + gmod.nameparagenrelemampl[l] + 'pop%d' % l)])\n print('Backgrounds')\n print(gdatmodi.this.paragenrscalfull[gmod.indxpara.bacp])\n if gmod.numbparaelem > 0:\n print('Log-prior penalization term: ')\n print(gdatmodi.this.lpripena)\n print('Completeness')\n for q in gdat.indxrefr:\n if gdat.refr.numbelem[q] == 0:\n continue\n l = gdat.refr.indxpoplfittassc[q]\n print('Reference Population %d, Fitting Population %d' % (q, l))\n #print('Total:')\n #print(getattr(gdatmodi.this, 'cmpl' + namevarb))\n print('Binned in significance feature:')\n print(getattr(gdatmodi.this, 'cmpl' + gdat.refr.namepara.elemsign[q] + 'pop%d' % q))\n print('False discovery rate')\n for l in gmod.indxpopl:\n if gdat.fitt.this.numbelem[l] == 0:\n continue\n q = gmod.indxpoplrefrassc[l]\n print('Fitting population %d, Reference Population %d' % (l, q))\n #print('Total:')\n #print(getattr(gdatmodi.this, 'fdis' + namevarb))\n print('Binned in significance feature:')\n print(getattr(gdatmodi.this, 'fdis' + gdat.fitt.namepara.elemsign[l] + 'pop%d' % l))\n \n print('gdatmodi.this.lliktotl')\n print(gdatmodi.this.lliktotl)\n print('Chi2 per degree of freedom')\n print(gdatmodi.this.chi2doff)\n \n # save the execution time for the sweep\n stopchro(gdat, gdatmodi, 'totl')\n \n if boollogg:\n print('Chronometers: ')\n for k, name in enumerate(gdat.listnamechro):\n #for name, valu in gdat.indxchro.items():\n #if valu == k:\n thischro = getattr(gdatmodi.this, 'chro' + name)\n print('%s: %.3g msec' % (name, thischro * 1e3))\n booltemp = False\n for l in gmod.indxpopl:\n if gmod.typeelemspateval[l] == 'locl' and gmod.maxmpara.numbelem[l] > 0:\n booltemp = True\n if name == 'llik' and gdat.numbpixl > 1 and gmod.numbparaelem > 0 and booltemp:\n print('%.3g per pixel' % (thischro * 1e3 / np.amin(gdat.numbpixlprox)))\n print \n\n if gdat.typeverb > 1:\n print('')\n print('')\n print('')\n print('')\n print('')\n print('')\n print('')\n print('')\n print('')\n print('')\n print('')\n print('')\n \n # update the sweep counter\n gdatmodi.cntrswep += 1\n \n for strgvarb in gdat.liststrgvarbarry + gdat.liststrgvarblistsamp:\n valu = workdict['list' + gdat.strgpdfn + strgvarb]\n setattr(gdatmodi, 'list' + gdat.strgpdfn + strgvarb, valu)\n\n gdatmodi.timereal = time.time() - timereal\n gdatmodi.timeproc = time.clock() - timeproc\n \n delattr(gdatmodi, 'lock')\n \n gdatmodi.booldone = True\n\n writfile(gdatmodi, gdatmodi.pathgdatmodi) \n\n"
] | [
[
"numpy.ones",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.arctanh",
"matplotlib.pyplot.subplots_adjust",
"numpy.stack",
"matplotlib.pyplot.xkcd",
"numpy.histogramdd",
"matplotlib.pyplot.figtext",
"matplotlib.pyplot.savefig",
"numpy.arccos",
"numpy.histogram2d",
"numpy.mean",
"scipy.special.gammaln",
"numpy.repeat",
"numpy.arcsin",
"numpy.arctan",
"numpy.floor",
"scipy.interpolate.RectBivariateSpline",
"numpy.exp",
"numpy.random.normal",
"numpy.array",
"numpy.round",
"matplotlib.pyplot.tight_layout",
"numpy.ones_like",
"numpy.random.seed",
"numpy.random.poisson",
"numpy.isscalar",
"numpy.vstack",
"scipy.stats.ks_2samp",
"matplotlib.use",
"numpy.tile",
"matplotlib.pyplot.subplots",
"numpy.argmax",
"matplotlib.pyplot.colorbar",
"numpy.zeros_like",
"numpy.arctan2",
"numpy.interp",
"matplotlib.pyplot.Circle",
"numpy.amin",
"numpy.gradient",
"numpy.sqrt",
"matplotlib.pyplot.contourf",
"scipy.interpolate.interpn",
"numpy.sin",
"numpy.concatenate",
"numpy.sum",
"numpy.histogram",
"numpy.argsort",
"numpy.copy",
"numpy.log",
"numpy.amax",
"numpy.arccosh",
"numpy.append",
"numpy.empty_like",
"numpy.logical_not",
"numpy.random.rand",
"scipy.ndimage.imread",
"scipy.stats.invgamma.pdf",
"numpy.deg2rad",
"numpy.ceil",
"numpy.zeros",
"numpy.arange",
"matplotlib.pyplot.close",
"numpy.sort",
"matplotlib.patheffects.withStroke",
"numpy.random.randn",
"numpy.digitize",
"numpy.dtype",
"scipy.special.gamma",
"scipy.stats.poisson.pmf",
"numpy.meshgrid",
"numpy.isfinite",
"numpy.fabs",
"numpy.arcsinh",
"numpy.random.choice",
"numpy.cos",
"numpy.log10",
"numpy.where",
"numpy.linspace",
"numpy.minimum",
"numpy.random.get_state",
"numpy.setdiff1d",
"numpy.sinh",
"numpy.maximum",
"numpy.std",
"numpy.sign",
"numpy.empty",
"numpy.logspace",
"numpy.loadtxt"
]
] |
dataprofessor/st-write | [
"5cbd8608824a9c16560c8746393292c17ce6a792"
] | [
"streamlit_app.py"
] | [
"import numpy as np\nimport altair as alt\nimport pandas as pd\nimport streamlit as st\n\nst.header('st.write')\n\n# Example 1\nst.subheader('Display text')\nst.write('Hello, *World!* :sunglasses:')\n\n# Example 2\nst.subheader('Display numbers')\nst.write(1234)\n\n# Example 3\nst.subheader('Display DataFrame')\ndf = pd.DataFrame({\n 'first column': [1, 2, 3, 4],\n 'second column': [10, 20, 30, 40]\n })\nst.write(df)\n\n# Example 4\nst.subheader('Accept multiple arguments')\nst.write('Below is a DataFrame:', df, 'Above is a dataframe.')\n\n# Example 5\nst.subheader('Display charts')\ndf2 = pd.DataFrame(\n np.random.randn(200, 3),\n columns=['a', 'b', 'c'])\nc = alt.Chart(df2).mark_circle().encode(\n x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])\nst.write(c)\n"
] | [
[
"pandas.DataFrame",
"numpy.random.randn"
]
] |
wangwei2009/speechbrain | [
"ebbac4561a9c9101786e0ab0b1105017eb655fc8"
] | [
"recipes/windnoise/model/CRN.py"
] | [
"\"\"\"\nsingle channel speech enhancement for wind noise reduction.\n\nrefer to\n \"A Convolutional Recurrent Neural Network for Real-Time Speech Enhancement\" .\n\nAuthors\n * Wang Wei 2021\n\"\"\"\nimport torch\nimport torch.nn as nn\n\nclass CNN_Block(torch.nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size=[3, 3],\n stride=(1,2),\n padding=(1,0)) -> None:\n super().__init__()\n self.layers = torch.nn.ModuleList()\n\n self.layers.append(nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding))\n self.layers.append(nn.BatchNorm2d(out_channels))\n self.layers.append(nn.ELU())\n\n def forward(self, x):\n for layer in self.layers:\n x = layer(x)\n\n return x\n\nclass RNN_Block(torch.nn.Module):\n def __init__(self,\n input_size=1792,\n hidden_size=1792,\n num_layers=2,\n rnn_type='LSTM',\n dropout=0.2) -> None:\n super().__init__()\n\n self.rnn_type = rnn_type\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.dropout = dropout\n\n if self.rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(self.input_size,\n self.hidden_size, self.num_layers,\n batch_first=True, dropout=self.dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]\n except KeyError:\n raise ValueError(\"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\")\n self.rnn = nn.RNN(self.input_size, self.hidden_size, self.num_layers, nonlinearity=nonlinearity, dropout=self.dropout)\n\n # self.hidden = self.init_hidden(batch_size)\n\n def init_hidden(self, batch_size=1):\n if self.rnn_type == 'GRU':\n return torch.zeros(self.num_layers * self.directions_count, batch_size, self.hidden_dim).to(self.device)\n elif self.rnn_type == 'LSTM':\n return (\n torch.zeros(self.num_layers * self.directions_count, batch_size, self.hidden_dim).to(self.device),\n torch.zeros(self.num_layers * self.directions_count, batch_size, self.hidden_dim).to(self.device))\n else:\n raise Exception('Unknown rnn_type. Valid options: \"gru\", \"lstm\"')\n\n def forward(self, x):\n self.rnn.flatten_parameters()\n x, _ = self.rnn(x)\n\n return x\n\nclass DeCNN_Block(torch.nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size=[3, 3],\n stride=(1,2),\n padding=(1,0),\n output_padding=0) -> None:\n super().__init__()\n self.layers = torch.nn.ModuleList()\n\n self.layers.append(\n nn.ConvTranspose2d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n output_padding=output_padding))\n self.layers.append(nn.BatchNorm2d(out_channels))\n self.layers.append(nn.ELU())\n\n def forward(self, x):\n for layer in self.layers:\n x = layer(x)\n\n return x\n\n\nclass Encoder(torch.nn.Module):\n def __init__(self, in_channels=1, channels=16, layers=5, scale=2) -> None:\n super().__init__()\n\n self.cnn_b1 = CNN_Block(1, channels)\n self.cnn_b2 = CNN_Block(channels, channels*2)\n self.cnn_b3 = CNN_Block(channels*2, channels*4)\n self.cnn_b4 = CNN_Block(channels*4, channels*8)\n self.cnn_b5 = CNN_Block(channels*8, channels*16)\n\n def forward(self, x):\n o1 = self.cnn_b1(x)\n o2 = self.cnn_b2(o1)\n o3 = self.cnn_b3(o2)\n o4 = self.cnn_b4(o3)\n o5 = self.cnn_b5(o4)\n\n return o1, o2, o3, o4, o5\n\n\nclass Decoder(torch.nn.Module):\n def __init__(self, in_channels=512, layers=5, scale=2) -> None:\n super().__init__()\n\n self.decnn_b5 = DeCNN_Block(512, 128)\n self.decnn_b4 = DeCNN_Block(256, 64)\n self.decnn_b3 = DeCNN_Block(128, 32)\n self.decnn_b2 = DeCNN_Block(64, 16, output_padding=(0,1))\n self.decnn_b1 = DeCNN_Block(32, 1)\n\n def forward(self, x, decoder_o5, decoder_o4, decoder_o3, decoder_o2, decoder_o1):\n o5 = self.decnn_b5(torch.cat((x, decoder_o5), 1))\n o4 = self.decnn_b4(torch.cat((o5, decoder_o4), 1))\n o3 = self.decnn_b3(torch.cat((o4, decoder_o3), 1))\n o2 = self.decnn_b2(torch.cat((o3, decoder_o2), 1))\n o = self.decnn_b1(torch.cat((o2, decoder_o1), 1))\n\n return o\n\n\n\nclass crn(torch.nn.Module):\n \"\"\"Basic RNN model with projection layers between RNN layers.\n\n Arguments\n ---------\n input_size : int\n Size of the expected input in the 3rd dimension.\n rnn_size : int\n Number of neurons to use in rnn (for each direction -> and <-).\n projection : int\n Number of neurons in projection layer.\n layers : int\n Number of RNN layers to use.\n \"\"\"\n\n def __init__(self, input_size=161, contex=0, bidir=False, rnn_size=128, projection=64, layers=2):\n super().__init__()\n self.layers = torch.nn.ModuleList()\n\n if input_size == 257:\n rnn_size = 1792\n elif input_size == 161:\n rnn_size = 1024\n\n self.encoder = Encoder()\n self.rnn = RNN_Block(input_size=rnn_size, hidden_size=rnn_size)\n self.decoder = Decoder()\n\n def forward(self, x: torch.Tensor):\n \"\"\"model forward\n\n Args:\n x (tensor): input tenosr, [N,T,F]\n\n Returns:\n [type]: [description]\n \"\"\"\n # N, T, F = x.size()\n if len(x.shape)==3:\n x = x.unsqueeze(1) # [N,T,F] to [N, 1, T, F]\n\n N, C, T, F = x.size()\n\n o1, o2, o3, o4, o5 = self.encoder(x)\n\n embeded_ch = o5.size(1)\n\n rnn_in = o5.transpose(1, 2)\n rnn_in = rnn_in.reshape(N, T, -1)\n rnn_out = self.rnn(rnn_in)\n rnn_out = rnn_out.unsqueeze(1)\n\n decoder_in = rnn_out.reshape(N, embeded_ch, T, -1)\n\n decoder_out = self.decoder(decoder_in, o5, o4, o3, o2, o1)\n\n return decoder_out.squeeze(1)\n\nif __name__ == \"__main__\":\n N, C, T, F = 10, 1, 100, 257\n data = torch.rand((N, T,F))\n print(data.shape)\n model = crn(input_size=F)\n output = model(data)\n print(output.shape)\n # input_size = 257\n # contex = 3\n # model = CustomModel(input_size, contex=contex)\n # # input_data = torch.rand(100, 20, input_size)\n from torchsummary import summary\n summary(model, (1, 100, 257))\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.rand",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.RNN",
"torch.zeros",
"torch.nn.ELU",
"torch.cat",
"torch.nn.ConvTranspose2d"
]
] |
ajavadia/qiskit-sdk-py | [
"a59e8e6be1793197e19998c1f7dcfc45e6f2f3af"
] | [
"qiskit/quantum_info/states/densitymatrix.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nDensityMatrix quantum state class.\n\"\"\"\n\nimport copy\nfrom numbers import Number\nimport numpy as np\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.circuit.instruction import Instruction\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.quantum_info.states.quantum_state import QuantumState\nfrom qiskit.quantum_info.operators.tolerances import TolerancesMixin\nfrom qiskit.quantum_info.operators.op_shape import OpShape\nfrom qiskit.quantum_info.operators.operator import Operator\nfrom qiskit.quantum_info.operators.scalar_op import ScalarOp\nfrom qiskit.quantum_info.operators.predicates import is_hermitian_matrix\nfrom qiskit.quantum_info.operators.predicates import is_positive_semidefinite_matrix\nfrom qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel\nfrom qiskit.quantum_info.operators.channel.superop import SuperOp\nfrom qiskit.quantum_info.states.statevector import Statevector\n\n\nclass DensityMatrix(QuantumState, TolerancesMixin):\n \"\"\"DensityMatrix class\"\"\"\n\n def __init__(self, data, dims=None):\n \"\"\"Initialize a density matrix object.\n\n Args:\n data (np.ndarray or list or matrix_like or QuantumCircuit or\n qiskit.circuit.Instruction):\n A statevector, quantum instruction or an object with a ``to_operator`` or\n ``to_matrix`` method from which the density matrix can be constructed.\n If a vector the density matrix is constructed as the projector of that vector.\n If a quantum instruction, the density matrix is constructed by assuming all\n qubits are initialized in the zero state.\n dims (int or tuple or list): Optional. The subsystem dimension\n of the state (See additional information).\n\n Raises:\n QiskitError: if input data is not valid.\n\n Additional Information:\n The ``dims`` kwarg can be None, an integer, or an iterable of\n integers.\n\n * ``Iterable`` -- the subsystem dimensions are the values in the list\n with the total number of subsystems given by the length of the list.\n\n * ``Int`` or ``None`` -- the leading dimension of the input matrix\n specifies the total dimension of the density matrix. If it is a\n power of two the state will be initialized as an N-qubit state.\n If it is not a power of two the state will have a single\n d-dimensional subsystem.\n \"\"\"\n if isinstance(data, (list, np.ndarray)):\n # Finally we check if the input is a raw matrix in either a\n # python list or numpy array format.\n self._data = np.asarray(data, dtype=complex)\n elif isinstance(data, (QuantumCircuit, Instruction)):\n # If the data is a circuit or an instruction use the classmethod\n # to construct the DensityMatrix object\n self._data = DensityMatrix.from_instruction(data)._data\n elif hasattr(data, 'to_operator'):\n # If the data object has a 'to_operator' attribute this is given\n # higher preference than the 'to_matrix' method for initializing\n # an Operator object.\n op = data.to_operator()\n self._data = op.data\n if dims is None:\n dims = op.output_dims()\n elif hasattr(data, 'to_matrix'):\n # If no 'to_operator' attribute exists we next look for a\n # 'to_matrix' attribute to a matrix that will be cast into\n # a complex numpy matrix.\n self._data = np.asarray(data.to_matrix(), dtype=complex)\n else:\n raise QiskitError(\"Invalid input data format for DensityMatrix\")\n # Convert statevector into a density matrix\n ndim = self._data.ndim\n shape = self._data.shape\n if ndim == 2 and shape[0] == shape[1]:\n pass # We good\n elif ndim == 1:\n self._data = np.outer(self._data, np.conj(self._data))\n elif ndim == 2 and shape[1] == 1:\n self._data = np.reshape(self._data, shape[0])\n else:\n raise QiskitError(\n \"Invalid DensityMatrix input: not a square matrix.\")\n super().__init__(op_shape=OpShape.auto(\n shape=self._data.shape, dims_l=dims, dims_r=dims))\n\n def __array__(self, dtype=None):\n if dtype:\n return np.asarray(self.data, dtype=dtype)\n return self.data\n\n def __eq__(self, other):\n return super().__eq__(other) and np.allclose(\n self._data, other._data, rtol=self.rtol, atol=self.atol)\n\n def __repr__(self):\n prefix = 'DensityMatrix('\n pad = len(prefix) * ' '\n return '{}{},\\n{}dims={})'.format(\n prefix, np.array2string(\n self._data, separator=', ', prefix=prefix),\n pad, self._op_shape.dims_l())\n\n @property\n def data(self):\n \"\"\"Return data.\"\"\"\n return self._data\n\n def is_valid(self, atol=None, rtol=None):\n \"\"\"Return True if trace 1 and positive semidefinite.\"\"\"\n if atol is None:\n atol = self.atol\n if rtol is None:\n rtol = self.rtol\n # Check trace == 1\n if not np.allclose(self.trace(), 1, rtol=rtol, atol=atol):\n return False\n # Check Hermitian\n if not is_hermitian_matrix(self.data, rtol=rtol, atol=atol):\n return False\n # Check positive semidefinite\n return is_positive_semidefinite_matrix(self.data, rtol=rtol, atol=atol)\n\n def to_operator(self):\n \"\"\"Convert to Operator\"\"\"\n dims = self.dims()\n return Operator(self.data, input_dims=dims, output_dims=dims)\n\n def conjugate(self):\n \"\"\"Return the conjugate of the density matrix.\"\"\"\n return DensityMatrix(np.conj(self.data), dims=self.dims())\n\n def trace(self):\n \"\"\"Return the trace of the density matrix.\"\"\"\n return np.trace(self.data)\n\n def purity(self):\n \"\"\"Return the purity of the quantum state.\"\"\"\n # For a valid statevector the purity is always 1, however if we simply\n # have an arbitrary vector (not correctly normalized) then the\n # purity is equivalent to the trace squared:\n # P(|psi>) = Tr[|psi><psi|psi><psi|] = |<psi|psi>|^2\n return np.trace(np.dot(self.data, self.data))\n\n def tensor(self, other):\n \"\"\"Return the tensor product state self ⊗ other.\n\n Args:\n other (DensityMatrix): a quantum state object.\n\n Returns:\n DensityMatrix: the tensor product operator self ⊗ other.\n\n Raises:\n QiskitError: if other is not a quantum state.\n \"\"\"\n if not isinstance(other, DensityMatrix):\n other = DensityMatrix(other)\n ret = copy.copy(self)\n ret._data = np.kron(self._data, other._data)\n ret._op_shape = self._op_shape.tensor(other._op_shape)\n return ret\n\n def expand(self, other):\n \"\"\"Return the tensor product state other ⊗ self.\n\n Args:\n other (DensityMatrix): a quantum state object.\n\n Returns:\n DensityMatrix: the tensor product state other ⊗ self.\n\n Raises:\n QiskitError: if other is not a quantum state.\n \"\"\"\n if not isinstance(other, DensityMatrix):\n other = DensityMatrix(other)\n ret = copy.copy(self)\n ret._data = np.kron(other._data, self._data)\n ret._op_shape = self._op_shape.expand(other._op_shape)\n return ret\n\n def _add(self, other):\n \"\"\"Return the linear combination self + other.\n\n Args:\n other (DensityMatrix): a quantum state object.\n\n Returns:\n DensityMatrix: the linear combination self + other.\n\n Raises:\n QiskitError: if other is not a quantum state, or has\n incompatible dimensions.\n \"\"\"\n if not isinstance(other, DensityMatrix):\n other = DensityMatrix(other)\n self._op_shape._validate_add(other._op_shape)\n ret = copy.copy(self)\n ret._data = self.data + other.data\n return ret\n\n def _multiply(self, other):\n \"\"\"Return the scalar multiplied state other * self.\n\n Args:\n other (complex): a complex number.\n\n Returns:\n DensityMatrix: the scalar multiplied state other * self.\n\n Raises:\n QiskitError: if other is not a valid complex number.\n \"\"\"\n if not isinstance(other, Number):\n raise QiskitError(\"other is not a number\")\n ret = copy.copy(self)\n ret._data = other * self.data\n return ret\n\n def evolve(self, other, qargs=None):\n \"\"\"Evolve a quantum state by an operator.\n\n Args:\n other (Operator or QuantumChannel\n or Instruction or Circuit): The operator to evolve by.\n qargs (list): a list of QuantumState subsystem positions to apply\n the operator on.\n\n Returns:\n QuantumState: the output quantum state.\n\n Raises:\n QiskitError: if the operator dimension does not match the\n specified QuantumState subsystem dimensions.\n \"\"\"\n if qargs is None:\n qargs = getattr(other, 'qargs', None)\n\n # Evolution by a circuit or instruction\n if isinstance(other, (QuantumCircuit, Instruction)):\n return self._evolve_instruction(other, qargs=qargs)\n\n # Evolution by a QuantumChannel\n if hasattr(other, 'to_quantumchannel'):\n return other.to_quantumchannel()._evolve(self, qargs=qargs)\n if isinstance(other, QuantumChannel):\n return other._evolve(self, qargs=qargs)\n\n # Unitary evolution by an Operator\n if not isinstance(other, Operator):\n other = Operator(other)\n return self._evolve_operator(other, qargs=qargs)\n\n def reverse_qargs(self):\n r\"\"\"Return a DensityMatrix with reversed subsystem ordering.\n\n For a tensor product state this is equivalent to reversing the order\n of tensor product subsystems. For a density matrix\n :math:`\\rho = \\rho_{n-1} \\otimes ... \\otimes \\rho_0`\n the returned state will be\n :math:`\\rho_0 \\otimes ... \\otimes \\rho_{n-1}`.\n\n Returns:\n DensityMatrix: the state with reversed subsystem order.\n \"\"\"\n ret = copy.copy(self)\n axes = tuple(range(self._op_shape._num_qargs_l - 1, -1, -1))\n axes = axes + tuple(len(axes) + i for i in axes)\n ret._data = np.reshape(np.transpose(\n np.reshape(self.data, self._op_shape.tensor_shape), axes),\n self._op_shape.shape)\n ret._op_shape = self._op_shape.reverse()\n return ret\n\n def expectation_value(self, oper, qargs=None):\n \"\"\"Compute the expectation value of an operator.\n\n Args:\n oper (Operator): an operator to evaluate expval.\n qargs (None or list): subsystems to apply the operator on.\n\n Returns:\n complex: the expectation value.\n \"\"\"\n if not isinstance(oper, Operator):\n oper = Operator(oper)\n return np.trace(Operator(self).dot(oper.adjoint(), qargs=qargs).data)\n\n def probabilities(self, qargs=None, decimals=None):\n \"\"\"Return the subsystem measurement probability vector.\n\n Measurement probabilities are with respect to measurement in the\n computation (diagonal) basis.\n\n Args:\n qargs (None or list): subsystems to return probabilities for,\n if None return for all subsystems (Default: None).\n decimals (None or int): the number of decimal places to round\n values. If None no rounding is done (Default: None).\n\n Returns:\n np.array: The Numpy vector array of probabilities.\n\n Examples:\n\n Consider a 2-qubit product state :math:`\\\\rho=\\\\rho_1\\\\otimes\\\\rho_0`\n with :math:`\\\\rho_1=|+\\\\rangle\\\\!\\\\langle+|`,\n :math:`\\\\rho_0=|0\\\\rangle\\\\!\\\\langle0|`.\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import DensityMatrix\n\n rho = DensityMatrix.from_label('+0')\n\n # Probabilities for measuring both qubits\n probs = rho.probabilities()\n print('probs: {}'.format(probs))\n\n # Probabilities for measuring only qubit-0\n probs_qubit_0 = rho.probabilities([0])\n print('Qubit-0 probs: {}'.format(probs_qubit_0))\n\n # Probabilities for measuring only qubit-1\n probs_qubit_1 = rho.probabilities([1])\n print('Qubit-1 probs: {}'.format(probs_qubit_1))\n\n We can also permute the order of qubits in the ``qargs`` list\n to change the qubit position in the probabilities output\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import DensityMatrix\n\n rho = DensityMatrix.from_label('+0')\n\n # Probabilities for measuring both qubits\n probs = rho.probabilities([0, 1])\n print('probs: {}'.format(probs))\n\n # Probabilities for measuring both qubits\n # but swapping qubits 0 and 1 in output\n probs_swapped = rho.probabilities([1, 0])\n print('Swapped probs: {}'.format(probs_swapped))\n \"\"\"\n probs = self._subsystem_probabilities(\n np.abs(self.data.diagonal()), self._op_shape.dims_l(), qargs=qargs)\n if decimals is not None:\n probs = probs.round(decimals=decimals)\n return probs\n\n def reset(self, qargs=None):\n \"\"\"Reset state or subsystems to the 0-state.\n\n Args:\n qargs (list or None): subsystems to reset, if None all\n subsystems will be reset to their 0-state\n (Default: None).\n\n Returns:\n DensityMatrix: the reset state.\n\n Additional Information:\n If all subsystems are reset this will return the ground state\n on all subsystems. If only a some subsystems are reset this\n function will perform evolution by the reset\n :class:`~qiskit.quantum_info.SuperOp` of the reset subsystems.\n \"\"\"\n if qargs is None:\n # Resetting all qubits does not require sampling or RNG\n ret = copy.copy(self)\n state = np.zeros(self._op_shape.shape, dtype=complex)\n state[0, 0] = 1\n ret._data = state\n return ret\n\n # Reset by evolving by reset SuperOp\n dims = self.dims(qargs)\n reset_superop = SuperOp(ScalarOp(dims, coeff=0))\n reset_superop.data[0] = Operator(ScalarOp(dims)).data.ravel()\n return self.evolve(reset_superop, qargs=qargs)\n\n @classmethod\n def from_label(cls, label):\n r\"\"\"Return a tensor product of Pauli X,Y,Z eigenstates.\n\n .. list-table:: Single-qubit state labels\n :header-rows: 1\n\n * - Label\n - Statevector\n * - ``\"0\"``\n - :math:`\\begin{pmatrix} 1 & 0 \\\\ 0 & 0 \\end{pmatrix}`\n * - ``\"1\"``\n - :math:`\\begin{pmatrix} 0 & 0 \\\\ 0 & 1 \\end{pmatrix}`\n * - ``\"+\"``\n - :math:`\\frac{1}{2}\\begin{pmatrix} 1 & 1 \\\\ 1 & 1 \\end{pmatrix}`\n * - ``\"-\"``\n - :math:`\\frac{1}{2}\\begin{pmatrix} 1 & -1 \\\\ -1 & 1 \\end{pmatrix}`\n * - ``\"r\"``\n - :math:`\\frac{1}{2}\\begin{pmatrix} 1 & -i \\\\ i & 1 \\end{pmatrix}`\n * - ``\"l\"``\n - :math:`\\frac{1}{2}\\begin{pmatrix} 1 & i \\\\ -i & 1 \\end{pmatrix}`\n\n Args:\n label (string): a eigenstate string ket label (see table for\n allowed values).\n\n Returns:\n Statevector: The N-qubit basis state density matrix.\n\n Raises:\n QiskitError: if the label contains invalid characters, or the length\n of the label is larger than an explicitly specified num_qubits.\n \"\"\"\n return DensityMatrix(Statevector.from_label(label))\n\n @staticmethod\n def from_int(i, dims):\n \"\"\"Return a computational basis state density matrix.\n\n Args:\n i (int): the basis state element.\n dims (int or tuple or list): The subsystem dimensions of the statevector\n (See additional information).\n\n Returns:\n DensityMatrix: The computational basis state :math:`|i\\\\rangle\\\\!\\\\langle i|`.\n\n Additional Information:\n The ``dims`` kwarg can be an integer or an iterable of integers.\n\n * ``Iterable`` -- the subsystem dimensions are the values in the list\n with the total number of subsystems given by the length of the list.\n\n * ``Int`` -- the integer specifies the total dimension of the\n state. If it is a power of two the state will be initialized\n as an N-qubit state. If it is not a power of two the state\n will have a single d-dimensional subsystem.\n \"\"\"\n size = np.product(dims)\n state = np.zeros((size, size), dtype=complex)\n state[i, i] = 1.0\n return DensityMatrix(state, dims=dims)\n\n @classmethod\n def from_instruction(cls, instruction):\n \"\"\"Return the output density matrix of an instruction.\n\n The statevector is initialized in the state :math:`|{0,\\\\ldots,0}\\\\rangle` of\n the same number of qubits as the input instruction or circuit, evolved\n by the input instruction, and the output statevector returned.\n\n Args:\n instruction (qiskit.circuit.Instruction or QuantumCircuit): instruction or circuit\n\n Returns:\n DensityMatrix: the final density matrix.\n\n Raises:\n QiskitError: if the instruction contains invalid instructions for\n density matrix simulation.\n \"\"\"\n # Convert circuit to an instruction\n if isinstance(instruction, QuantumCircuit):\n instruction = instruction.to_instruction()\n # Initialize an the statevector in the all |0> state\n num_qubits = instruction.num_qubits\n init = np.zeros((2**num_qubits, 2**num_qubits), dtype=complex)\n init[0, 0] = 1\n vec = DensityMatrix(init, dims=num_qubits * (2, ))\n vec._append_instruction(instruction)\n return vec\n\n def to_dict(self, decimals=None):\n r\"\"\"Convert the density matrix to dictionary form.\n\n This dictionary representation uses a Ket-like notation where the\n dictionary keys are qudit strings for the subsystem basis vectors.\n If any subsystem has a dimension greater than 10 comma delimiters are\n inserted between integers so that subsystems can be distinguished.\n\n Args:\n decimals (None or int): the number of decimal places to round\n values. If None no rounding is done\n (Default: None).\n\n Returns:\n dict: the dictionary form of the DensityMatrix.\n\n Examples:\n\n The ket-form of a 2-qubit density matrix\n :math:`rho = |-\\rangle\\!\\langle -|\\otimes |0\\rangle\\!\\langle 0|`\n\n .. jupyter-execute::\n\n from qiskit.quantum_info import DensityMatrix\n\n rho = DensityMatrix.from_label('-0')\n print(rho.to_dict())\n\n For non-qubit subsystems the integer range can go from 0 to 9. For\n example in a qutrit system\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit.quantum_info import DensityMatrix\n\n mat = np.zeros((9, 9))\n mat[0, 0] = 0.25\n mat[3, 3] = 0.25\n mat[6, 6] = 0.25\n mat[-1, -1] = 0.25\n rho = DensityMatrix(mat, dims=(3, 3))\n print(rho.to_dict())\n\n For large subsystem dimensions delimeters are required. The\n following example is for a 20-dimensional system consisting of\n a qubit and 10-dimensional qudit.\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit.quantum_info import DensityMatrix\n\n mat = np.zeros((2 * 10, 2 * 10))\n mat[0, 0] = 0.5\n mat[-1, -1] = 0.5\n rho = DensityMatrix(mat, dims=(2, 10))\n print(rho.to_dict())\n \"\"\"\n return self._matrix_to_dict(self.data,\n self._op_shape.dims_l(),\n decimals=decimals,\n string_labels=True)\n\n def _evolve_operator(self, other, qargs=None):\n \"\"\"Evolve density matrix by an operator\"\"\"\n # Get shape of output density matrix\n new_shape = self._op_shape.compose(other._op_shape, qargs=qargs)\n new_shape._dims_r = new_shape._dims_l\n new_shape._num_qargs_r = new_shape._num_qargs_l\n\n ret = copy.copy(self)\n if qargs is None:\n # Evolution on full matrix\n op_mat = other.data\n ret._data = np.dot(op_mat, self.data).dot(op_mat.T.conj())\n ret._op_shape = new_shape\n return ret\n\n # Reshape statevector and operator\n tensor = np.reshape(self.data, self._op_shape.tensor_shape)\n # Construct list of tensor indices of statevector to be contracted\n num_indices = len(self.dims())\n indices = [num_indices - 1 - qubit for qubit in qargs]\n # Left multiple by mat\n mat = np.reshape(other.data, other._op_shape.tensor_shape)\n tensor = Operator._einsum_matmul(tensor, mat, indices)\n # Right multiply by mat ** dagger\n adj = other.adjoint()\n mat_adj = np.reshape(adj.data, adj._op_shape.tensor_shape)\n tensor = Operator._einsum_matmul(tensor, mat_adj, indices, num_indices,\n True)\n # Replace evolved dimensions\n ret._data = np.reshape(tensor, new_shape.shape)\n ret._op_shape = new_shape\n return ret\n\n def _append_instruction(self, other, qargs=None):\n \"\"\"Update the current Statevector by applying an instruction.\"\"\"\n from qiskit.circuit.reset import Reset\n from qiskit.circuit.barrier import Barrier\n\n # Try evolving by a matrix operator (unitary-like evolution)\n mat = Operator._instruction_to_matrix(other)\n if mat is not None:\n self._data = self._evolve_operator(Operator(mat), qargs=qargs).data\n return\n\n # Special instruction types\n if isinstance(other, Reset):\n self._data = self.reset(qargs)._data\n return\n if isinstance(other, Barrier):\n return\n\n # Otherwise try evolving by a Superoperator\n chan = SuperOp._instruction_to_superop(other)\n if chan is not None:\n # Evolve current state by the superoperator\n self._data = chan._evolve(self, qargs=qargs).data\n return\n # If the instruction doesn't have a matrix defined we use its\n # circuit decomposition definition if it exists, otherwise we\n # cannot compose this gate and raise an error.\n if other.definition is None:\n raise QiskitError('Cannot apply Instruction: {}'.format(\n other.name))\n if not isinstance(other.definition, QuantumCircuit):\n raise QiskitError('{} instruction definition is {}; expected QuantumCircuit'.format(\n other.name, type(other.definition)))\n for instr, qregs, cregs in other.definition:\n if cregs:\n raise QiskitError(\n 'Cannot apply instruction with classical registers: {}'.\n format(instr.name))\n # Get the integer position of the flat register\n if qargs is None:\n new_qargs = [tup.index for tup in qregs]\n else:\n new_qargs = [qargs[tup.index] for tup in qregs]\n self._append_instruction(instr, qargs=new_qargs)\n\n def _evolve_instruction(self, obj, qargs=None):\n \"\"\"Return a new statevector by applying an instruction.\"\"\"\n if isinstance(obj, QuantumCircuit):\n obj = obj.to_instruction()\n vec = copy.copy(self)\n vec._append_instruction(obj, qargs=qargs)\n return vec\n\n def to_statevector(self, atol=None, rtol=None):\n \"\"\"Return a statevector from a pure density matrix.\n\n Args:\n atol (float): Absolute tolerance for checking operation validity.\n rtol (float): Relative tolerance for checking operation validity.\n\n Returns:\n Statevector: The pure density matrix's corresponding statevector.\n Corresponds to the eigenvector of the only non-zero eigenvalue.\n\n Raises:\n QiskitError: if the state is not pure.\n \"\"\"\n if atol is None:\n atol = self.atol\n if rtol is None:\n rtol = self.rtol\n\n if not is_hermitian_matrix(self._data, atol=atol, rtol=rtol):\n raise QiskitError(\"Not a valid density matrix (non-hermitian).\")\n\n evals, evecs = np.linalg.eig(self._data)\n\n nonzero_evals = evals[abs(evals) > atol]\n if len(nonzero_evals) != 1 or not np.isclose(nonzero_evals[0], 1,\n atol=atol, rtol=rtol):\n raise QiskitError(\"Density matrix is not a pure state\")\n\n psi = evecs[:, np.argmax(evals)] # eigenvectors returned in columns.\n return Statevector(psi)\n"
] | [
[
"numpy.allclose",
"numpy.zeros",
"numpy.array2string",
"numpy.dot",
"numpy.conj",
"numpy.reshape",
"numpy.isclose",
"numpy.asarray",
"numpy.argmax",
"numpy.trace",
"numpy.product",
"numpy.kron",
"numpy.linalg.eig"
]
] |
alanhdu/metrics | [
"b168272eaf1ff08b9447e75338753f9c2abf0859"
] | [
"tests/classification/test_inputs.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pytest\nimport torch\nfrom torch import Tensor, rand, randint, tensor\n\nfrom tests.classification.inputs import Input\nfrom tests.classification.inputs import _input_binary as _bin\nfrom tests.classification.inputs import _input_binary_prob as _bin_prob\nfrom tests.classification.inputs import _input_multiclass as _mc\nfrom tests.classification.inputs import _input_multiclass_prob as _mc_prob\nfrom tests.classification.inputs import _input_multidim_multiclass as _mdmc\nfrom tests.classification.inputs import _input_multidim_multiclass_prob as _mdmc_prob\nfrom tests.classification.inputs import _input_multilabel as _ml\nfrom tests.classification.inputs import _input_multilabel_multidim as _mlmd\nfrom tests.classification.inputs import _input_multilabel_multidim_prob as _mlmd_prob\nfrom tests.classification.inputs import _input_multilabel_prob as _ml_prob\nfrom tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, NUM_CLASSES, THRESHOLD\nfrom torchmetrics.utilities.checks import _input_format_classification\nfrom torchmetrics.utilities.data import select_topk, to_onehot\nfrom torchmetrics.utilities.enums import DataType\n\ntorch.manual_seed(42)\n\n# Some additional inputs to test on\n_ml_prob_half = Input(_ml_prob.preds.half(), _ml_prob.target)\n\n_mc_prob_2cls_preds = rand(NUM_BATCHES, BATCH_SIZE, 2)\n_mc_prob_2cls_preds /= _mc_prob_2cls_preds.sum(dim=2, keepdim=True)\n_mc_prob_2cls = Input(_mc_prob_2cls_preds, randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)))\n\n_mdmc_prob_many_dims_preds = rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM, EXTRA_DIM)\n_mdmc_prob_many_dims_preds /= _mdmc_prob_many_dims_preds.sum(dim=2, keepdim=True)\n_mdmc_prob_many_dims = Input(\n _mdmc_prob_many_dims_preds,\n randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM, EXTRA_DIM)),\n)\n\n_mdmc_prob_2cls_preds = rand(NUM_BATCHES, BATCH_SIZE, 2, EXTRA_DIM)\n_mdmc_prob_2cls_preds /= _mdmc_prob_2cls_preds.sum(dim=2, keepdim=True)\n_mdmc_prob_2cls = Input(_mdmc_prob_2cls_preds, randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)))\n\n# Some utils\nT = Tensor\n\n\ndef _idn(x):\n return x\n\n\ndef _usq(x):\n return x.unsqueeze(-1)\n\n\ndef _thrs(x):\n return x >= THRESHOLD\n\n\ndef _rshp1(x):\n return x.reshape(x.shape[0], -1)\n\n\ndef _rshp2(x):\n return x.reshape(x.shape[0], x.shape[1], -1)\n\n\ndef _onehot(x):\n return to_onehot(x, NUM_CLASSES)\n\n\ndef _onehot2(x):\n return to_onehot(x, 2)\n\n\ndef _top1(x):\n return select_topk(x, 1)\n\n\ndef _top2(x):\n return select_topk(x, 2)\n\n\n# To avoid ugly black line wrapping\ndef _ml_preds_tr(x):\n return _rshp1(_thrs(x))\n\n\ndef _onehot_rshp1(x):\n return _onehot(_rshp1(x))\n\n\ndef _onehot2_rshp1(x):\n return _onehot2(_rshp1(x))\n\n\ndef _top1_rshp2(x):\n return _top1(_rshp2(x))\n\n\ndef _top2_rshp2(x):\n return _top2(_rshp2(x))\n\n\ndef _probs_to_mc_preds_tr(x):\n return _onehot2(_thrs(x))\n\n\ndef _mlmd_prob_to_mc_preds_tr(x):\n return _onehot2(_rshp1(_thrs(x)))\n\n\n########################\n# Test correct inputs\n########################\n\n\[email protected](\n \"inputs, num_classes, is_multiclass, top_k, exp_mode, post_preds, post_target\",\n [\n #############################\n # Test usual expected cases\n (_bin, None, False, None, \"multi-class\", _usq, _usq),\n (_bin, 1, False, None, \"multi-class\", _usq, _usq),\n (_bin_prob, None, None, None, \"binary\", lambda x: _usq(_thrs(x)), _usq),\n (_ml_prob, None, None, None, \"multi-label\", _thrs, _idn),\n (_ml, None, False, None, \"multi-dim multi-class\", _idn, _idn),\n (_ml_prob, None, None, None, \"multi-label\", _ml_preds_tr, _rshp1),\n (_ml_prob, None, None, 2, \"multi-label\", _top2, _rshp1),\n (_mlmd, None, False, None, \"multi-dim multi-class\", _rshp1, _rshp1),\n (_mc, NUM_CLASSES, None, None, \"multi-class\", _onehot, _onehot),\n (_mc_prob, None, None, None, \"multi-class\", _top1, _onehot),\n (_mc_prob, None, None, 2, \"multi-class\", _top2, _onehot),\n (_mdmc, NUM_CLASSES, None, None, \"multi-dim multi-class\", _onehot, _onehot),\n (_mdmc_prob, None, None, None, \"multi-dim multi-class\", _top1_rshp2, _onehot),\n (_mdmc_prob, None, None, 2, \"multi-dim multi-class\", _top2_rshp2, _onehot),\n (_mdmc_prob_many_dims, None, None, None, \"multi-dim multi-class\", _top1_rshp2, _onehot_rshp1),\n (_mdmc_prob_many_dims, None, None, 2, \"multi-dim multi-class\", _top2_rshp2, _onehot_rshp1),\n ###########################\n # Test some special cases\n # Make sure that half precision works, i.e. is converted to full precision\n (_ml_prob_half, None, None, None, \"multi-label\", lambda x: _ml_preds_tr(x.float()), _rshp1),\n # Binary as multiclass\n (_bin, None, None, None, \"multi-class\", _onehot2, _onehot2),\n # Binary probs as multiclass\n (_bin_prob, None, True, None, \"binary\", _probs_to_mc_preds_tr, _onehot2),\n # Multilabel as multiclass\n (_ml, None, True, None, \"multi-dim multi-class\", _onehot2, _onehot2),\n # Multilabel probs as multiclass\n (_ml_prob, None, True, None, \"multi-label\", _probs_to_mc_preds_tr, _onehot2),\n # Multidim multilabel as multiclass\n (_mlmd, None, True, None, \"multi-dim multi-class\", _onehot2_rshp1, _onehot2_rshp1),\n # Multidim multilabel probs as multiclass\n (_mlmd_prob, None, True, None, \"multi-label\", _mlmd_prob_to_mc_preds_tr, _onehot2_rshp1),\n # Multiclass prob with 2 classes as binary\n (_mc_prob_2cls, None, False, None, \"multi-class\", lambda x: _top1(x)[:, [1]], _usq),\n # Multi-dim multi-class with 2 classes as multi-label\n (_mdmc_prob_2cls, None, False, None, \"multi-dim multi-class\", lambda x: _top1(x)[:, 1], _idn),\n ],\n)\ndef test_usual_cases(inputs, num_classes, is_multiclass, top_k, exp_mode, post_preds, post_target):\n\n def __get_data_type_enum(str_exp_mode):\n return next(DataType[n] for n in dir(DataType) if DataType[n] == str_exp_mode)\n\n for exp_mode in (exp_mode, __get_data_type_enum(exp_mode)):\n preds_out, target_out, mode = _input_format_classification(\n preds=inputs.preds[0],\n target=inputs.target[0],\n threshold=THRESHOLD,\n num_classes=num_classes,\n is_multiclass=is_multiclass,\n top_k=top_k,\n )\n\n assert mode == exp_mode\n assert torch.equal(preds_out, post_preds(inputs.preds[0]).int())\n assert torch.equal(target_out, post_target(inputs.target[0]).int())\n\n # Test that things work when batch_size = 1\n preds_out, target_out, mode = _input_format_classification(\n preds=inputs.preds[0][[0], ...],\n target=inputs.target[0][[0], ...],\n threshold=THRESHOLD,\n num_classes=num_classes,\n is_multiclass=is_multiclass,\n top_k=top_k,\n )\n\n assert mode == exp_mode\n assert torch.equal(preds_out, post_preds(inputs.preds[0][[0], ...]).int())\n assert torch.equal(target_out, post_target(inputs.target[0][[0], ...]).int())\n\n\n# Test that threshold is correctly applied\ndef test_threshold():\n target = T([1, 1, 1]).int()\n preds_probs = T([0.5 - 1e-5, 0.5, 0.5 + 1e-5])\n\n preds_probs_out, _, _ = _input_format_classification(preds_probs, target, threshold=0.5)\n\n assert torch.equal(tensor([0, 1, 1], dtype=torch.int), preds_probs_out.squeeze().int())\n\n\n########################################################################\n# Test incorrect inputs\n########################################################################\n\n\[email protected](\"threshold\", [-0.5, 0.0, 1.0, 1.5])\ndef test_incorrect_threshold(threshold):\n preds, target = rand(size=(7, )), randint(high=2, size=(7, ))\n with pytest.raises(ValueError):\n _input_format_classification(preds, target, threshold=threshold)\n\n\[email protected](\n \"preds, target, num_classes, is_multiclass\",\n [\n # Target not integer\n (randint(high=2, size=(7, )), randint(high=2, size=(7, )).float(), None, None),\n # Target negative\n (randint(high=2, size=(7, )), -randint(high=2, size=(7, )), None, None),\n # Preds negative integers\n (-randint(high=2, size=(7, )), randint(high=2, size=(7, )), None, None),\n # Negative probabilities\n (-rand(size=(7, )), randint(high=2, size=(7, )), None, None),\n # is_multiclass=False and target > 1\n (rand(size=(7, )), randint(low=2, high=4, size=(7, )), None, False),\n # is_multiclass=False and preds integers with > 1\n (randint(low=2, high=4, size=(7, )), randint(high=2, size=(7, )), None, False),\n # Wrong batch size\n (randint(high=2, size=(8, )), randint(high=2, size=(7, )), None, None),\n # Completely wrong shape\n (randint(high=2, size=(7, )), randint(high=2, size=(7, 4)), None, None),\n # Same #dims, different shape\n (randint(high=2, size=(7, 3)), randint(high=2, size=(7, 4)), None, None),\n # Same shape and preds floats, target not binary\n (rand(size=(7, 3)), randint(low=2, high=4, size=(7, 3)), None, None),\n # #dims in preds = 1 + #dims in target, C shape not second or last\n (rand(size=(7, 3, 4, 3)), randint(high=4, size=(7, 3, 3)), None, None),\n # #dims in preds = 1 + #dims in target, preds not float\n (randint(high=2, size=(7, 3, 3, 4)), randint(high=4, size=(7, 3, 3)), None, None),\n # is_multiclass=False, with C dimension > 2\n (_mc_prob.preds[0], randint(high=2, size=(BATCH_SIZE, )), None, False),\n # Probs of multiclass preds do not sum up to 1\n (rand(size=(7, 3, 5)), randint(high=2, size=(7, 5)), None, None),\n # Max target larger or equal to C dimension\n (_mc_prob.preds[0], randint(low=NUM_CLASSES + 1, high=100, size=(BATCH_SIZE, )), None, None),\n # C dimension not equal to num_classes\n (_mc_prob.preds[0], _mc_prob.target[0], NUM_CLASSES + 1, None),\n # Max target larger than num_classes (with #dim preds = 1 + #dims target)\n (_mc_prob.preds[0], randint(low=NUM_CLASSES + 1, high=100, size=(BATCH_SIZE, NUM_CLASSES)), 4, None),\n # Max target larger than num_classes (with #dim preds = #dims target)\n (randint(high=4, size=(7, 3)), randint(low=5, high=7, size=(7, 3)), 4, None),\n # Max preds larger than num_classes (with #dim preds = #dims target)\n (randint(low=5, high=7, size=(7, 3)), randint(high=4, size=(7, 3)), 4, None),\n # Num_classes=1, but is_multiclass not false\n (randint(high=2, size=(7, )), randint(high=2, size=(7, )), 1, None),\n # is_multiclass=False, but implied class dimension (for multi-label, from shape) != num_classes\n (randint(high=2, size=(7, 3, 3)), randint(high=2, size=(7, 3, 3)), 4, False),\n # Multilabel input with implied class dimension != num_classes\n (rand(size=(7, 3, 3)), randint(high=2, size=(7, 3, 3)), 4, False),\n # Multilabel input with is_multiclass=True, but num_classes != 2 (or None)\n (rand(size=(7, 3)), randint(high=2, size=(7, 3)), 4, True),\n # Binary input, num_classes > 2\n (rand(size=(7, )), randint(high=2, size=(7, )), 4, None),\n # Binary input, num_classes == 2 and is_multiclass not True\n (rand(size=(7, )), randint(high=2, size=(7, )), 2, None),\n (rand(size=(7, )), randint(high=2, size=(7, )), 2, False),\n # Binary input, num_classes == 1 and is_multiclass=True\n (rand(size=(7, )), randint(high=2, size=(7, )), 1, True),\n ],\n)\ndef test_incorrect_inputs(preds, target, num_classes, is_multiclass):\n with pytest.raises(ValueError):\n _input_format_classification(\n preds=preds, target=target, threshold=THRESHOLD, num_classes=num_classes, is_multiclass=is_multiclass\n )\n\n\[email protected](\n \"preds, target, num_classes, is_multiclass, top_k\",\n [\n # Topk set with non (md)mc or ml prob data\n (_bin.preds[0], _bin.target[0], None, None, 2),\n (_bin_prob.preds[0], _bin_prob.target[0], None, None, 2),\n (_mc.preds[0], _mc.target[0], None, None, 2),\n (_ml.preds[0], _ml.target[0], None, None, 2),\n (_mlmd.preds[0], _mlmd.target[0], None, None, 2),\n (_mdmc.preds[0], _mdmc.target[0], None, None, 2),\n # top_k = 0\n (_mc_prob_2cls.preds[0], _mc_prob_2cls.target[0], None, None, 0),\n # top_k = float\n (_mc_prob_2cls.preds[0], _mc_prob_2cls.target[0], None, None, 0.123),\n # top_k =2 with 2 classes, is_multiclass=False\n (_mc_prob_2cls.preds[0], _mc_prob_2cls.target[0], None, False, 2),\n # top_k = number of classes (C dimension)\n (_mc_prob.preds[0], _mc_prob.target[0], None, None, NUM_CLASSES),\n # is_multiclass = True for ml prob inputs, top_k set\n (_ml_prob.preds[0], _ml_prob.target[0], None, True, 2),\n # top_k = num_classes for ml prob inputs\n (_ml_prob.preds[0], _ml_prob.target[0], None, True, NUM_CLASSES),\n ],\n)\ndef test_incorrect_inputs_topk(preds, target, num_classes, is_multiclass, top_k):\n with pytest.raises(ValueError):\n _input_format_classification(\n preds=preds,\n target=target,\n threshold=THRESHOLD,\n num_classes=num_classes,\n is_multiclass=is_multiclass,\n top_k=top_k,\n )\n"
] | [
[
"torch.manual_seed",
"torch.rand",
"torch.tensor",
"torch.randint"
]
] |
Naouali/holbertonschool-machine_learning | [
"60b7d50cd385bdf9c1372b6004308eec408d2670"
] | [
"supervised_learning/0x05-regularization/3-l2_reg_create_layer.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nL2 regularization layer\n\"\"\"\n\n\nimport tensorflow as tf\n\n\ndef l2_reg_create_layer(prev, n, activation, lambtha):\n \"\"\"\n l2 regularization layer\n \"\"\"\n reg = tf.contrib.layers.l2_regularizer(scale=lambtha)\n w = tf.contrib.layers.variance_scaling_initializer(mode='FAN_AVG')\n layer = tf.layers.Dense(\n units=n,\n kernel_initializer=w,\n kernel_regularizer=reg,\n activation=activation)\n return layer(prev)\n"
] | [
[
"tensorflow.layers.Dense",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.contrib.layers.variance_scaling_initializer"
]
] |
d-v-b/napari | [
"f0ab04af8bf3854325af1e44b5214c4710cab980"
] | [
"napari/layers/shapes/_shapes_models/rectangle.py"
] | [
"import numpy as np\nfrom .shape import Shape\nfrom .._shapes_utils import find_corners, rectangle_to_box\n\n\nclass Rectangle(Shape):\n \"\"\"Class for a single rectangle\n\n Parameters\n ----------\n data : (4, D) or (2, 2) array\n Either a (2, 2) array specifying the two corners of an axis aligned\n rectangle, or a (4, D) array specifying the four corners of a bounding\n box that contains the rectangle. These need not be axis aligned.\n edge_width : float\n thickness of lines and edges.\n edge_color : str | tuple\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3 or\n 4 elements.\n face_color : str | tuple\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3 or\n 4 elements.\n opacity : float\n Opacity of the shape, must be between 0 and 1.\n z_index : int\n Specifier of z order priority. Shapes with higher z order are displayed\n ontop of others.\n dims_order : (D,) list\n Order that the dimensions are to be rendered in.\n \"\"\"\n\n def __init__(\n self,\n data,\n *,\n edge_width=1,\n edge_color='black',\n face_color='white',\n opacity=1,\n z_index=0,\n dims_order=None,\n ndisplay=2,\n ):\n\n super().__init__(\n edge_width=edge_width,\n edge_color=edge_color,\n face_color=face_color,\n opacity=opacity,\n z_index=z_index,\n dims_order=dims_order,\n ndisplay=ndisplay,\n )\n\n self._closed = True\n self.data = data\n self.name = 'rectangle'\n\n @property\n def data(self):\n \"\"\"(4, D) array: rectangle vertices.\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, data):\n data = np.array(data).astype(float)\n\n if len(self.dims_order) != data.shape[1]:\n self._dims_order = list(range(data.shape[1]))\n\n if len(data) == 2 and data.shape[1] == 2:\n data = find_corners(data)\n\n if len(data) != 4:\n print(data)\n raise ValueError(\n f\"\"\"Data shape does not match a rectangle.\n Rectangle expects four corner vertices,\n {len(data)} provided.\"\"\"\n )\n\n self._data = data\n self._update_displayed_data()\n\n def _update_displayed_data(self):\n \"\"\"Update the data that is to be displayed.\"\"\"\n # Add four boundary lines and then two triangles for each\n self._set_meshes(self.data_displayed, face=False)\n self._face_vertices = self.data_displayed\n self._face_triangles = np.array([[0, 1, 2], [0, 2, 3]])\n self._box = rectangle_to_box(self.data_displayed)\n\n data_not_displayed = self.data[:, self.dims_not_displayed]\n self.slice_key = np.round(\n [\n np.min(data_not_displayed, axis=0),\n np.max(data_not_displayed, axis=0),\n ]\n ).astype('int')\n"
] | [
[
"numpy.array",
"numpy.max",
"numpy.min"
]
] |
banayoyo/yoolact | [
"9dd0ee01ce5d5d238f89fa0886e24627ec3ffbe6"
] | [
"eval.py"
] | [
"from data import COCODetection, get_label_map, MEANS, COLORS\nfrom yolact import Yolact\nfrom utils.augmentations import BaseTransform, FastBaseTransform, Resize\nfrom utils.functions import MovingAverage, ProgressBar\nfrom layers.box_utils import jaccard, center_size\nfrom utils import timer\nfrom utils.functions import SavePath\nfrom layers.output_utils import postprocess, undo_image_transformation\nimport pycocotools\n\nfrom data import cfg, set_cfg, set_dataset\n\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport argparse\nimport time\nimport random\nimport cProfile\nimport pickle\nimport json\nimport os\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom collections import OrderedDict\nfrom PIL import Image\n\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef parse_args(argv=None):\n parser = argparse.ArgumentParser(\n description='YOLACT COCO Evaluation')\n parser.add_argument('--trained_model',\n default='weights/ssd300_mAP_77.43_v2.pth', type=str,\n help='Trained state_dict file path to open. If \"interrupt\", this will open the interrupt file.')\n parser.add_argument('--top_k', default=5, type=int,\n help='Further restrict the number of predictions to parse')\n parser.add_argument('--cuda', default=True, type=str2bool,\n help='Use cuda to evaulate model')\n parser.add_argument('--fast_nms', default=True, type=str2bool,\n help='Whether to use a faster, but not entirely correct version of NMS.')\n parser.add_argument('--display_masks', default=True, type=str2bool,\n help='Whether or not to display masks over bounding boxes')\n parser.add_argument('--display_bboxes', default=True, type=str2bool,\n help='Whether or not to display bboxes around masks')\n parser.add_argument('--display_text', default=True, type=str2bool,\n help='Whether or not to display text (class [score])')\n parser.add_argument('--display_scores', default=True, type=str2bool,\n help='Whether or not to display scores in addition to classes')\n parser.add_argument('--display', dest='display', action='store_true',\n help='Display qualitative results instead of quantitative ones.')\n parser.add_argument('--shuffle', dest='shuffle', action='store_true',\n help='Shuffles the images when displaying them. Doesn\\'t have much of an effect when display is off though.')\n parser.add_argument('--ap_data_file', default='results/ap_data.pkl', type=str,\n help='In quantitative mode, the file to save detections before calculating mAP.')\n parser.add_argument('--resume', dest='resume', action='store_true',\n help='If display not set, this resumes mAP calculations from the ap_data_file.')\n parser.add_argument('--max_images', default=-1, type=int,\n help='The maximum number of images from the dataset to consider. Use -1 for all.')\n parser.add_argument('--output_coco_json', dest='output_coco_json', action='store_true',\n help='If display is not set, instead of processing IoU values, this just dumps detections into the coco json file.')\n parser.add_argument('--bbox_det_file', default='results/bbox_detections.json', type=str,\n help='The output file for coco bbox results if --coco_results is set.')\n parser.add_argument('--mask_det_file', default='results/mask_detections.json', type=str,\n help='The output file for coco mask results if --coco_results is set.')\n parser.add_argument('--config', default=None,\n help='The config object to use.')\n parser.add_argument('--output_web_json', dest='output_web_json', action='store_true',\n help='If display is not set, instead of processing IoU values, this dumps detections for usage with the detections viewer web thingy.')\n parser.add_argument('--web_det_path', default='web/dets/', type=str,\n help='If output_web_json is set, this is the path to dump detections into.')\n parser.add_argument('--no_bar', dest='no_bar', action='store_true',\n help='Do not output the status bar. This is useful for when piping to a file.')\n parser.add_argument('--display_lincomb', default=False, type=str2bool,\n help='If the config uses lincomb masks, output a visualization of how those masks are created.')\n parser.add_argument('--benchmark', default=False, dest='benchmark', action='store_true',\n help='Equivalent to running display mode but without displaying an image.')\n parser.add_argument('--no_sort', default=False, dest='no_sort', action='store_true',\n help='Do not sort images by hashed image ID.')\n parser.add_argument('--seed', default=None, type=int,\n help='The seed to pass into random.seed. Note: this is only really for the shuffle and does not (I think) affect cuda stuff.')\n parser.add_argument('--mask_proto_debug', default=False, dest='mask_proto_debug', action='store_true',\n help='Outputs stuff for scripts/compute_mask.py.')\n parser.add_argument('--no_crop', default=False, dest='crop', action='store_false',\n help='Do not crop output masks with the predicted bounding box.')\n parser.add_argument('--image', default=None, type=str,\n help='A path to an image to use for display.')\n parser.add_argument('--images', default=None, type=str,\n help='An input folder of images and output folder to save detected images. Should be in the format input->output.')\n parser.add_argument('--video', default=None, type=str,\n help='A path to a video to evaluate on. Passing in a number will use that index webcam.')\n parser.add_argument('--video_multiframe', default=1, type=int,\n help='The number of frames to evaluate in parallel to make videos play at higher fps.')\n parser.add_argument('--score_threshold', default=0, type=float,\n help='Detections with a score under this threshold will not be considered. This currently only works in display mode.')\n parser.add_argument('--dataset', default=None, type=str,\n help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')\n parser.add_argument('--detect', default=False, dest='detect', action='store_true',\n help='Don\\'t evauluate the mask branch at all and only do object detection. This only works for --display and --benchmark.')\n\n parser.set_defaults(no_bar=False, display=False, resume=False, output_coco_json=False, output_web_json=False, shuffle=False,\n benchmark=False, no_sort=False, no_hash=False, mask_proto_debug=False, crop=True, detect=False)\n\n global args\n args = parser.parse_args(argv)\n\n if args.output_web_json:\n args.output_coco_json = True\n \n if args.seed is not None:\n random.seed(args.seed)\n\niou_thresholds = [x / 100 for x in range(50, 100, 5)]\ncoco_cats = {} # Call prep_coco_cats to fill this\ncoco_cats_inv = {}\ncolor_cache = defaultdict(lambda: {})\n\ndef prep_display(dets_out, img, h, w, undo_transform=True, class_color=False, mask_alpha=0.45):\n \"\"\"\n Note: If undo_transform=False then im_h and im_w are allowed to be None.\n \"\"\"\n if undo_transform:\n img_numpy = undo_image_transformation(img, w, h)\n img_gpu = torch.Tensor(img_numpy).cuda()\n else:\n img_gpu = img / 255.0\n h, w, _ = img.shape\n \n with timer.env('Postprocess'):\n t = postprocess(dets_out, w, h, visualize_lincomb = args.display_lincomb,\n crop_masks = args.crop,\n score_threshold = args.score_threshold)\n #torch.cuda.synchronize()\n\n with timer.env('Copy'):\n if cfg.eval_mask_branch:\n # Masks are drawn on the GPU, so don't copy\n masks = t[3][:args.top_k]\n classes, scores, boxes = [x[:args.top_k].cpu().numpy() for x in t[:3]]\n\n num_dets_to_consider = min(args.top_k, classes.shape[0])\n for j in range(num_dets_to_consider):\n if scores[j] < args.score_threshold:\n num_dets_to_consider = j\n break\n \n if num_dets_to_consider == 0:\n # No detections found so just output the original image\n return (img_gpu * 255).byte().cpu().numpy()\n\n # Quick and dirty lambda for selecting the color for a particular index\n # Also keeps track of a per-gpu color cache for maximum speed\n def get_color(j, on_gpu=None):\n global color_cache\n color_idx = (classes[j] * 5 if class_color else j * 5) % len(COLORS)\n \n if on_gpu is not None and color_idx in color_cache[on_gpu]:\n return color_cache[on_gpu][color_idx]\n else:\n color = COLORS[color_idx]\n if not undo_transform:\n # The image might come in as RGB or BRG, depending\n color = (color[2], color[1], color[0])\n if on_gpu is not None:\n color = torch.Tensor(color).to(on_gpu).float() / 255.\n color_cache[on_gpu][color_idx] = color\n return color\n\n # First, draw the masks on the GPU where we can do it really fast\n # Beware: very fast but possibly unintelligible mask-drawing code ahead\n # I wish I had access to OpenGL or Vulkan but alas, I guess Pytorch tensor operations will have to suffice\n if args.display_masks and cfg.eval_mask_branch:\n # After this, mask is of size [num_dets, h, w, 1]\n masks = masks[:num_dets_to_consider, :, :, None]\n \n# ccc = torch.tensor(np.reshape(get_color(0, on_gpu=img.device.index),(1,1,1,3)))\n# for ii in range(1,3):\n# aaa = torch.tensor(np.reshape(get_color(ii, on_gpu=img.device.index),(1,1,1,3)))\n## bbb = torch.tensor(np.reshape(get_color(1, on_gpu=img.device.index),(1,1,1,3)))\n# ccc = torch.cat([ccc,aaa],dim=0)\n \n # Prepare the RGB images for each mask given their color (size [num_dets, h, w, 1])\n colors1 = torch.cat([torch.tensor(np.reshape(get_color(j, on_gpu=img.device.index),(1, 1, 1, 3))) for j in range(num_dets_to_consider)], dim=0)\n\n colors = colors1.float()\n masks_color = masks.repeat(1, 1, 1, 3) * colors * mask_alpha\n\n # This is 1 everywhere except for 1-mask_alpha where the mask is\n inv_alph_masks = masks * (-mask_alpha) + 1\n \n # I did the math for this on pen and paper. This whole block should be equivalent to:\n # for j in range(num_dets_to_consider):\n # img_gpu = img_gpu * inv_alph_masks[j] + masks_color[j]\n masks_color_summand = masks_color[0]\n if num_dets_to_consider > 1:\n inv_alph_cumul = inv_alph_masks[:(num_dets_to_consider-1)].cumprod(dim=0)\n masks_color_cumul = masks_color[1:] * inv_alph_cumul\n masks_color_summand += masks_color_cumul.sum(dim=0)\n\n img_gpu = img_gpu * inv_alph_masks.prod(dim=0) + masks_color_summand\n \n # Then draw the stuff that needs to be done on the cpu\n # Note, make sure this is a uint8 tensor or opencv will not anti alias text for whatever reason\n img_numpy = (img_gpu * 255).byte().cpu().numpy()\n \n if args.display_text or args.display_bboxes:\n for j in reversed(range(num_dets_to_consider)):\n x1, y1, x2, y2 = boxes[j, :]\n color = get_color(j)\n score = scores[j]\n\n if args.display_bboxes:\n cv2.rectangle(img_numpy, (x1, y1), (x2, y2), color, 1)\n\n if args.display_text:\n _class = cfg.dataset.class_names[classes[j]]\n text_str = '%s: %.2f' % (_class, score) if args.display_scores else _class\n\n font_face = cv2.FONT_HERSHEY_DUPLEX\n font_scale = 0.6\n font_thickness = 1\n\n text_w, text_h = cv2.getTextSize(text_str, font_face, font_scale, font_thickness)[0]\n\n text_pt = (x1, y1 - 3)\n text_color = [255, 255, 255]\n\n cv2.rectangle(img_numpy, (x1, y1), (x1 + text_w, y1 - text_h - 4), color, -1)\n cv2.putText(img_numpy, text_str, text_pt, font_face, font_scale, text_color, font_thickness, cv2.LINE_AA)\n \n return img_numpy\n\ndef prep_benchmark(dets_out, h, w):\n with timer.env('Postprocess'):\n t = postprocess(dets_out, w, h, crop_masks=args.crop, score_threshold=args.score_threshold)\n\n with timer.env('Copy'):\n classes, scores, boxes, masks = [x[:args.top_k].cpu().numpy() for x in t]\n \n #with timer.env('Sync'):\n # Just in case\n #torch.cuda.synchronize()\n\ndef prep_coco_cats():\n \"\"\" Prepare inverted table for category id lookup given a coco cats object. \"\"\"\n for coco_cat_id, transformed_cat_id_p1 in get_label_map().items():\n transformed_cat_id = transformed_cat_id_p1 - 1\n coco_cats[transformed_cat_id] = coco_cat_id\n coco_cats_inv[coco_cat_id] = transformed_cat_id\n\n\ndef get_coco_cat(transformed_cat_id):\n \"\"\" transformed_cat_id is [0,80) as indices in cfg.dataset.class_names \"\"\"\n return coco_cats[transformed_cat_id]\n\ndef get_transformed_cat(coco_cat_id):\n \"\"\" transformed_cat_id is [0,80) as indices in cfg.dataset.class_names \"\"\"\n return coco_cats_inv[coco_cat_id]\n\n\nclass Detections:\n\n def __init__(self):\n self.bbox_data = []\n self.mask_data = []\n\n def add_bbox(self, image_id:int, category_id:int, bbox:list, score:float):\n \"\"\" Note that bbox should be a list or tuple of (x1, y1, x2, y2) \"\"\"\n bbox = [bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1]]\n\n # Round to the nearest 10th to avoid huge file sizes, as COCO suggests\n bbox = [round(float(x)*10)/10 for x in bbox]\n\n self.bbox_data.append({\n 'image_id': int(image_id),\n 'category_id': get_coco_cat(int(category_id)),\n 'bbox': bbox,\n 'score': float(score)\n })\n\n def add_mask(self, image_id:int, category_id:int, segmentation:np.ndarray, score:float):\n \"\"\" The segmentation should be the full mask, the size of the image and with size [h, w]. \"\"\"\n rle = pycocotools.mask.encode(np.asfortranarray(segmentation.astype(np.uint8)))\n rle['counts'] = rle['counts'].decode('ascii') # json.dump doesn't like bytes strings\n\n self.mask_data.append({\n 'image_id': int(image_id),\n 'category_id': get_coco_cat(int(category_id)),\n 'segmentation': rle,\n 'score': float(score)\n })\n \n def dump(self):\n dump_arguments = [\n (self.bbox_data, args.bbox_det_file),\n (self.mask_data, args.mask_det_file)\n ]\n\n for data, path in dump_arguments:\n with open(path, 'w') as f:\n json.dump(data, f)\n \n def dump_web(self):\n \"\"\" Dumps it in the format for my web app. Warning: bad code ahead! \"\"\"\n config_outs = ['preserve_aspect_ratio', 'use_prediction_module',\n 'use_yolo_regressors', 'use_prediction_matching',\n 'train_masks']\n\n output = {\n 'info' : {\n 'Config': {key: getattr(cfg, key) for key in config_outs},\n }\n }\n\n image_ids = list(set([x['image_id'] for x in self.bbox_data]))\n image_ids.sort()\n image_lookup = {_id: idx for idx, _id in enumerate(image_ids)}\n\n output['images'] = [{'image_id': image_id, 'dets': []} for image_id in image_ids]\n\n # These should already be sorted by score with the way prep_metrics works.\n for bbox, mask in zip(self.bbox_data, self.mask_data):\n image_obj = output['images'][image_lookup[bbox['image_id']]]\n image_obj['dets'].append({\n 'score': bbox['score'],\n 'bbox': bbox['bbox'],\n 'category': cfg.dataset.class_names[get_transformed_cat(bbox['category_id'])],\n 'mask': mask['segmentation'],\n })\n\n with open(os.path.join(args.web_det_path, '%s.json' % cfg.name), 'w') as f:\n json.dump(output, f)\n \n\n \n\ndef mask_iou(mask1, mask2, iscrowd=False):\n \"\"\"\n Inputs inputs are matricies of size _ x N. Output is size _1 x _2.\n Note: if iscrowd is True, then mask2 should be the crowd.\n \"\"\"\n timer.start('Mask IoU')\n\n intersection = torch.matmul(mask1, mask2.t())\n area1 = torch.sum(mask1, dim=1).view(1, -1)\n area2 = torch.sum(mask2, dim=1).view(1, -1)\n union = (area1.t() + area2) - intersection\n\n if iscrowd:\n # Make sure to brodcast to the right dimension\n ret = intersection / area1.t()\n else:\n ret = intersection / union\n timer.stop('Mask IoU')\n return ret.cpu()\n\ndef bbox_iou(bbox1, bbox2, iscrowd=False):\n with timer.env('BBox IoU'):\n ret = jaccard(bbox1, bbox2, iscrowd)\n return ret.cpu()\n\ndef prep_metrics(ap_data, dets, img, gt, gt_masks, h, w, num_crowd, image_id, detections:Detections=None):\n \"\"\" Returns a list of APs for this image, with each element being for a class \"\"\"\n if not args.output_coco_json:\n with timer.env('Prepare gt'):\n gt_boxes = torch.Tensor(gt[:, :4])\n gt_boxes[:, [0, 2]] *= w\n gt_boxes[:, [1, 3]] *= h\n gt_classes = list(gt[:, 4].astype(int))\n gt_masks = torch.Tensor(gt_masks).view(-1, h*w)\n\n if num_crowd > 0:\n split = lambda x: (x[-num_crowd:], x[:-num_crowd])\n crowd_boxes , gt_boxes = split(gt_boxes)\n crowd_masks , gt_masks = split(gt_masks)\n crowd_classes, gt_classes = split(gt_classes)\n\n with timer.env('Postprocess'):\n classes, scores, boxes, masks = postprocess(dets, w, h, crop_masks=args.crop, score_threshold=args.score_threshold)\n\n if classes.size(0) == 0:\n return\n\n classes = list(classes.cpu().numpy().astype(int))\n scores = list(scores.cpu().numpy().astype(float))\n masks = masks.view(-1, h*w).cuda()\n boxes = boxes.cuda()\n\n\n if args.output_coco_json:\n with timer.env('JSON Output'):\n boxes = boxes.cpu().numpy()\n masks = masks.view(-1, h, w).cpu().numpy()\n for i in range(masks.shape[0]):\n # Make sure that the bounding box actually makes sense and a mask was produced\n if (boxes[i, 3] - boxes[i, 1]) * (boxes[i, 2] - boxes[i, 0]) > 0:\n detections.add_bbox(image_id, classes[i], boxes[i,:], scores[i])\n detections.add_mask(image_id, classes[i], masks[i,:,:], scores[i])\n return\n \n with timer.env('Eval Setup'):\n num_pred = len(classes)\n num_gt = len(gt_classes)\n\n mask_iou_cache = mask_iou(masks, gt_masks)\n bbox_iou_cache = bbox_iou(boxes.float(), gt_boxes.float())\n\n if num_crowd > 0:\n crowd_mask_iou_cache = mask_iou(masks, crowd_masks, iscrowd=True)\n crowd_bbox_iou_cache = bbox_iou(boxes.float(), crowd_boxes.float(), iscrowd=True)\n else:\n crowd_mask_iou_cache = None\n crowd_bbox_iou_cache = None\n\n iou_types = [\n ('box', lambda i,j: bbox_iou_cache[i, j].item(), lambda i,j: crowd_bbox_iou_cache[i,j].item()),\n ('mask', lambda i,j: mask_iou_cache[i, j].item(), lambda i,j: crowd_mask_iou_cache[i,j].item())\n ]\n\n timer.start('Main loop')\n for _class in set(classes + gt_classes):\n ap_per_iou = []\n num_gt_for_class = sum([1 for x in gt_classes if x == _class])\n \n for iouIdx in range(len(iou_thresholds)):\n iou_threshold = iou_thresholds[iouIdx]\n\n for iou_type, iou_func, crowd_func in iou_types:\n gt_used = [False] * len(gt_classes)\n \n ap_obj = ap_data[iou_type][iouIdx][_class]\n ap_obj.add_gt_positives(num_gt_for_class)\n\n for i in range(num_pred):\n if classes[i] != _class:\n continue\n \n max_iou_found = iou_threshold\n max_match_idx = -1\n for j in range(num_gt):\n if gt_used[j] or gt_classes[j] != _class:\n continue\n \n iou = iou_func(i, j)\n\n if iou > max_iou_found:\n max_iou_found = iou\n max_match_idx = j\n \n if max_match_idx >= 0:\n gt_used[max_match_idx] = True\n ap_obj.push(scores[i], True)\n else:\n # If the detection matches a crowd, we can just ignore it\n matched_crowd = False\n\n if num_crowd > 0:\n for j in range(len(crowd_classes)):\n if crowd_classes[j] != _class:\n continue\n \n iou = crowd_func(i, j)\n\n if iou > iou_threshold:\n matched_crowd = True\n break\n\n # All this crowd code so that we can make sure that our eval code gives the\n # same result as COCOEval. There aren't even that many crowd annotations to\n # begin with, but accuracy is of the utmost importance.\n if not matched_crowd:\n ap_obj.push(scores[i], False)\n timer.stop('Main loop')\n\n\nclass APDataObject:\n \"\"\"\n Stores all the information necessary to calculate the AP for one IoU and one class.\n Note: I type annotated this because why not.\n \"\"\"\n\n def __init__(self):\n self.data_points = []\n self.num_gt_positives = 0\n\n def push(self, score:float, is_true:bool):\n self.data_points.append((score, is_true))\n \n def add_gt_positives(self, num_positives:int):\n \"\"\" Call this once per image. \"\"\"\n self.num_gt_positives += num_positives\n\n def is_empty(self) -> bool:\n return len(self.data_points) == 0 and self.num_gt_positives == 0\n\n def get_ap(self) -> float:\n \"\"\" Warning: result not cached. \"\"\"\n\n if self.num_gt_positives == 0:\n return 0\n\n # Sort descending by score\n self.data_points.sort(key=lambda x: -x[0])\n\n precisions = []\n recalls = []\n num_true = 0\n num_false = 0\n\n # Compute the precision-recall curve. The x axis is recalls and the y axis precisions.\n for datum in self.data_points:\n # datum[1] is whether the detection a true or false positive\n if datum[1]: num_true += 1\n else: num_false += 1\n \n precision = num_true / (num_true + num_false)\n recall = num_true / self.num_gt_positives\n\n precisions.append(precision)\n recalls.append(recall)\n\n # Smooth the curve by computing [max(precisions[i:]) for i in range(len(precisions))]\n # Basically, remove any temporary dips from the curve.\n # At least that's what I think, idk. COCOEval did it so I do too.\n for i in range(len(precisions)-1, 0, -1):\n if precisions[i] > precisions[i-1]:\n precisions[i-1] = precisions[i]\n\n # Compute the integral of precision(recall) d_recall from recall=0->1 using fixed-length riemann summation with 101 bars.\n y_range = [0] * 101 # idx 0 is recall == 0.0 and idx 100 is recall == 1.00\n x_range = np.array([x / 100 for x in range(101)])\n recalls = np.array(recalls)\n\n # I realize this is weird, but all it does is find the nearest precision(x) for a given x in x_range.\n # Basically, if the closest recall we have to 0.01 is 0.009 this sets precision(0.01) = precision(0.009).\n # I approximate the integral this way, because that's how COCOEval does it.\n indices = np.searchsorted(recalls, x_range, side='left')\n for bar_idx, precision_idx in enumerate(indices):\n if precision_idx < len(precisions):\n y_range[bar_idx] = precisions[precision_idx]\n\n # Finally compute the riemann sum to get our integral.\n # avg([precision(x) for x in 0:0.01:1])\n return sum(y_range) / len(y_range)\n\ndef badhash(x):\n \"\"\"\n Just a quick and dirty hash function for doing a deterministic shuffle based on image_id.\n\n Source:\n https://stackoverflow.com/questions/664014/what-integer-hash-function-are-good-that-accepts-an-integer-hash-key\n \"\"\"\n x = (((x >> 16) ^ x) * 0x045d9f3b) & 0xFFFFFFFF\n x = (((x >> 16) ^ x) * 0x045d9f3b) & 0xFFFFFFFF\n x = ((x >> 16) ^ x) & 0xFFFFFFFF\n return x\n\ndef evalimage(net:Yolact, path:str, save_path:str=None):\n frame = torch.from_numpy(cv2.imread(path)).cpu().float()\n #reshape\n batch = FastBaseTransform()(frame.unsqueeze(0))\n preds = net(batch)\n #preds\n #ipdb> preds[0]['box'].shape\n #torch.Size([100, 4])\n #\n #ipdb> preds[0]['mask'].shape\n #torch.Size([100, 32])\n #\n #ipdb> preds[0]['class'].shape\n #torch.Size([100])\n #\n #ipdb> preds[0]['score'].shape\n #torch.Size([100])\n #\n #ipdb> preds[0]['proto'].shape\n #torch.Size([138, 138, 32])\n\n\n img_numpy = prep_display(preds, frame, None, None, undo_transform=False)\n \n if save_path is None:\n img_numpy = img_numpy[:, :, (2, 1, 0)]\n\n if save_path is None:\n plt.imshow(img_numpy)\n plt.title(path)\n plt.show()\n else:\n cv2.imwrite(save_path, img_numpy)\n\ndef evalimages(net:Yolact, input_folder:str, output_folder:str):\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n\n print()\n for p in Path(input_folder).glob('*'): \n path = str(p)\n name = os.path.basename(path)\n name = '.'.join(name.split('.')[:-1]) + '.png'\n out_path = os.path.join(output_folder, name)\n\n evalimage(net, path, out_path)\n print(path + ' -> ' + out_path)\n print('Done.')\n\nfrom multiprocessing.pool import ThreadPool\n\nclass CustomDataParallel(torch.nn.DataParallel):\n \"\"\" A Custom Data Parallel class that properly gathers lists of dictionaries. \"\"\"\n def gather(self, outputs, output_device):\n # Note that I don't actually want to convert everything to the output_device\n return sum(outputs, [])\n\ndef evalvideo(net:Yolact, path:str):\n # If the path is a digit, parse it as a webcam index\n if path.isdigit():\n vid = cv2.VideoCapture(int(path))\n else:\n vid = cv2.VideoCapture(path)\n \n if not vid.isOpened():\n print('Could not open video \"%s\"' % path)\n exit(-1)\n \n net = CustomDataParallel(net).cuda()\n transform = torch.nn.DataParallel(FastBaseTransform()).cuda()\n frame_times = MovingAverage()\n fps = 0\n # The 0.8 is to account for the overhead of time.sleep\n frame_time_target = 0.8 / vid.get(cv2.CAP_PROP_FPS)\n\n def cleanup_and_exit():\n print()\n pool.terminate()\n vid.release()\n cv2.destroyAllWindows()\n exit()\n\n def get_next_frame(vid):\n return [vid.read()[1] for _ in range(args.video_multiframe)]\n\n def transform_frame(frames):\n with torch.no_grad():\n frames = [torch.from_numpy(frame).cuda().float() for frame in frames]\n return frames, transform(torch.stack(frames, 0))\n\n def eval_network(inp):\n with torch.no_grad():\n frames, imgs = inp\n return frames, net(imgs)\n\n def prep_frame(inp):\n with torch.no_grad():\n frame, preds = inp\n return prep_display(preds, frame, None, None, undo_transform=False, class_color=True)\n\n extract_frame = lambda x, i: (x[0][i] if x[1][i] is None else x[0][i].to(x[1][i]['box'].device), [x[1][i]])\n\n # Prime the network on the first frame because I do some thread unsafe things otherwise\n print('Initializing model... ', end='')\n eval_network(transform_frame(get_next_frame(vid)))\n print('Done.')\n\n # For each frame the sequence of functions it needs to go through to be processed (in reversed order)\n sequence = [prep_frame, eval_network, transform_frame]\n pool = ThreadPool(processes=len(sequence) + args.video_multiframe)\n\n active_frames = []\n\n print()\n while vid.isOpened():\n start_time = time.time()\n\n # Start loading the next frames from the disk\n next_frames = pool.apply_async(get_next_frame, args=(vid,))\n \n # For each frame in our active processing queue, dispatch a job\n # for that frame using the current function in the sequence\n for frame in active_frames:\n frame['value'] = pool.apply_async(sequence[frame['idx']], args=(frame['value'],))\n \n # For each frame whose job was the last in the sequence (i.e. for all final outputs)\n for frame in active_frames:\n if frame['idx'] == 0:\n # Wait here so that the frame has time to process and so that the video plays at the proper speed\n time.sleep(frame_time_target)\n\n cv2.imshow(path, frame['value'].get())\n if cv2.waitKey(1) == 27: # Press Escape to close\n cleanup_and_exit()\n\n # Remove the finished frames from the processing queue\n active_frames = [x for x in active_frames if x['idx'] > 0]\n\n # Finish evaluating every frame in the processing queue and advanced their position in the sequence\n for frame in list(reversed(active_frames)):\n frame['value'] = frame['value'].get()\n frame['idx'] -= 1\n\n if frame['idx'] == 0:\n # Split this up into individual threads for prep_frame since it doesn't support batch size\n active_frames += [{'value': extract_frame(frame['value'], i), 'idx': 0} for i in range(1, args.video_multiframe)]\n frame['value'] = extract_frame(frame['value'], 0)\n\n \n # Finish loading in the next frames and add them to the processing queue\n active_frames.append({'value': next_frames.get(), 'idx': len(sequence)-1})\n \n # Compute FPS\n frame_times.add(time.time() - start_time)\n fps = args.video_multiframe / frame_times.get_avg()\n\n print('\\rAvg FPS: %.2f ' % fps, end='')\n \n cleanup_and_exit()\n\ndef savevideo(net:Yolact, in_path:str, out_path:str):\n\n vid = cv2.VideoCapture(in_path)\n\n target_fps = round(vid.get(cv2.CAP_PROP_FPS))\n frame_width = round(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n frame_height = round(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n num_frames = round(vid.get(cv2.CAP_PROP_FRAME_COUNT))\n \n out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*\"mp4v\"), target_fps, (frame_width, frame_height))\n\n transform = FastBaseTransform()\n frame_times = MovingAverage()\n progress_bar = ProgressBar(30, num_frames)\n\n try:\n for i in range(num_frames):\n timer.reset()\n with timer.env('Video'):\n frame = torch.Tensor(vid.read()[1]).float().cuda()\n batch = transform(frame.unsqueeze(0))\n preds = net(batch)\n processed = prep_display(preds, frame, None, None, undo_transform=False, class_color=True)\n\n out.write(processed)\n \n if i > 1:\n frame_times.add(timer.total_time())\n fps = 1 / frame_times.get_avg()\n progress = (i+1) / num_frames * 100\n progress_bar.set_val(i+1)\n\n print('\\rProcessing Frames %s %6d / %6d (%5.2f%%) %5.2f fps '\n % (repr(progress_bar), i+1, num_frames, progress, fps), end='')\n except KeyboardInterrupt:\n print('Stopping early.')\n \n vid.release()\n out.release()\n print()\n\n\ndef evaluate(net:Yolact, dataset, train_mode=False):\n net.detect.use_fast_nms = args.fast_nms\n cfg.mask_proto_debug = args.mask_proto_debug\n\n if args.image is not None:\n if ':' in args.image:\n inp, out = args.image.split(':')\n evalimage(net, inp, out)\n else:\n evalimage(net, args.image)\n return\n elif args.images is not None:\n inp, out = args.images.split(':')\n evalimages(net, inp, out)\n return\n elif args.video is not None:\n if ':' in args.video:\n inp, out = args.video.split(':')\n savevideo(net, inp, out)\n else:\n evalvideo(net, args.video)\n return\n\n print('Others... \\n')\n\n frame_times = MovingAverage()\n dataset_size = len(dataset) if args.max_images < 0 else min(args.max_images, len(dataset))\n progress_bar = ProgressBar(30, dataset_size)\n\n\n if not args.display and not args.benchmark:\n # For each class and iou, stores tuples (score, isPositive)\n # Index ap_data[type][iouIdx][classIdx]\n ap_data = {\n 'box' : [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds],\n 'mask': [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds]\n }\n detections = Detections()\n else:\n timer.disable('Load Data')\n\n dataset_indices = list(range(len(dataset)))\n \n if args.shuffle:\n random.shuffle(dataset_indices)\n elif not args.no_sort:\n # Do a deterministic shuffle based on the image ids\n #\n # I do this because on python 3.5 dictionary key order is *random*, while in 3.6 it's\n # the order of insertion. That means on python 3.6, the images come in the order they are in\n # in the annotations file. For some reason, the first images in the annotations file are\n # the hardest. To combat this, I use a hard-coded hash function based on the image ids\n # to shuffle the indices we use. That way, no matter what python version or how pycocotools\n # handles the data, we get the same result every time.\n hashed = [badhash(x) for x in dataset.ids]\n dataset_indices.sort(key=lambda x: hashed[x])\n\n dataset_indices = dataset_indices[:dataset_size]\n\n try:\n # Main eval loop\n for it, image_idx in enumerate(dataset_indices):\n timer.reset()\n\n with timer.env('Load Data'):\n img, gt, gt_masks, h, w, num_crowd = dataset.pull_item(image_idx)\n\n # Test flag, do not upvote\n if cfg.mask_proto_debug:\n with open('scripts/info.txt', 'w') as f:\n f.write(str(dataset.ids[image_idx]))\n np.save('scripts/gt.npy', gt_masks)\n\n batch = Variable(img.unsqueeze(0))\n if args.cuda:\n batch = batch.cuda()\n\n with timer.env('Network Extra'):\n preds = net(batch)\n\n # Perform the meat of the operation here depending on our mode.\n if args.display:\n img_numpy = prep_display(preds, img, h, w)\n elif args.benchmark:\n prep_benchmark(preds, h, w)\n else:\n prep_metrics(ap_data, preds, img, gt, gt_masks, h, w, num_crowd, dataset.ids[image_idx], detections)\n \n # First couple of images take longer because we're constructing the graph.\n # Since that's technically initialization, don't include those in the FPS calculations.\n if it > 1:\n frame_times.add(timer.total_time())\n \n if args.display:\n if it > 1:\n print('Avg FPS: %.4f' % (1 / frame_times.get_avg()))\n plt.imshow(img_numpy)\n plt.title(str(dataset.ids[image_idx]))\n plt.show()\n elif not args.no_bar:\n if it > 1: fps = 1 / frame_times.get_avg()\n else: fps = 0\n progress = (it+1) / dataset_size * 100\n progress_bar.set_val(it+1)\n print('\\rProcessing Images %s %6d / %6d (%5.2f%%) %5.2f fps '\n % (repr(progress_bar), it+1, dataset_size, progress, fps), end='')\n\n\n\n if not args.display and not args.benchmark:\n print()\n if args.output_coco_json:\n print('Dumping detections...')\n if args.output_web_json:\n detections.dump_web()\n else:\n detections.dump()\n else:\n if not train_mode:\n print('Saving data...')\n with open(args.ap_data_file, 'wb') as f:\n pickle.dump(ap_data, f)\n\n return calc_map(ap_data)\n elif args.benchmark:\n print()\n print()\n print('Stats for the last frame:')\n timer.print_stats()\n avg_seconds = frame_times.get_avg()\n print('Average: %5.2f fps, %5.2f ms' % (1 / frame_times.get_avg(), 1000*avg_seconds))\n\n except KeyboardInterrupt:\n print('Stopping...')\n\n\ndef calc_map(ap_data):\n print('Calculating mAP...')\n aps = [{'box': [], 'mask': []} for _ in iou_thresholds]\n\n for _class in range(len(cfg.dataset.class_names)):\n for iou_idx in range(len(iou_thresholds)):\n for iou_type in ('box', 'mask'):\n ap_obj = ap_data[iou_type][iou_idx][_class]\n\n if not ap_obj.is_empty():\n aps[iou_idx][iou_type].append(ap_obj.get_ap())\n\n all_maps = {'box': OrderedDict(), 'mask': OrderedDict()}\n\n # Looking back at it, this code is really hard to read :/\n for iou_type in ('box', 'mask'):\n all_maps[iou_type]['all'] = 0 # Make this first in the ordereddict\n for i, threshold in enumerate(iou_thresholds):\n mAP = sum(aps[i][iou_type]) / len(aps[i][iou_type]) * 100 if len(aps[i][iou_type]) > 0 else 0\n all_maps[iou_type][int(threshold*100)] = mAP\n all_maps[iou_type]['all'] = (sum(all_maps[iou_type].values()) / (len(all_maps[iou_type].values())-1))\n \n print_maps(all_maps)\n return all_maps\n\ndef print_maps(all_maps):\n # Warning: hacky \n make_row = lambda vals: (' %5s |' * len(vals)) % tuple(vals)\n make_sep = lambda n: ('-------+' * n)\n\n print()\n print(make_row([''] + [('.%d ' % x if isinstance(x, int) else x + ' ') for x in all_maps['box'].keys()]))\n print(make_sep(len(all_maps['box']) + 1))\n for iou_type in ('box', 'mask'):\n print(make_row([iou_type] + ['%.2f' % x for x in all_maps[iou_type].values()]))\n print(make_sep(len(all_maps['box']) + 1))\n print()\n\n\n\nif __name__ == '__main__':\n parse_args()\n#false\n if args.config is not None:\n set_cfg(args.config)\n#two false\n if args.trained_model == 'interrupt':\n args.trained_model = SavePath.get_interrupt('weights/')\n elif args.trained_model == 'latest':\n args.trained_model = SavePath.get_latest('weights/', cfg.name)\n\n#ok\n if args.config is None:\n model_path = SavePath.from_str(args.trained_model)\n # TODO: Bad practice? Probably want to do a name lookup instead.\n args.config = model_path.model_name + '_config'\n print('Config not specified. Parsed %s from the file name.\\n' % args.config)\n set_cfg(args.config)\n#false\n if args.detect:\n cfg.eval_mask_branch = False\n#false\n if args.dataset is not None:\n set_dataset(args.dataset)\n\n with torch.no_grad():\n if not os.path.exists('results'): #false\n os.makedirs('results')\n#false\n if args.cuda:\n cudnn.benchmark = True\n cudnn.fastest = True\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n else:\n torch.set_default_tensor_type('torch.FloatTensor')\n#false\n if args.resume and not args.display:\n with open(args.ap_data_file, 'rb') as f:\n ap_data = pickle.load(f)\n calc_map(ap_data)\n exit()\n#false\n if args.image is None and args.video is None and args.images is None:\n dataset = COCODetection(cfg.dataset.valid_images, cfg.dataset.valid_info,\n transform=BaseTransform(), has_gt=cfg.dataset.has_gt)\n prep_coco_cats()\n else:\n dataset = None \n\n\n\n########## key process\n print('Loading model...\\n', end='')\n net = Yolact()\n print('Loading weight...\\n')\n net.load_weights(args.trained_model)\n #not know why add this\n net.eval()\n print('Net Done... \\n')\n\n if args.cuda:\n net = net.cuda()\n\n evaluate(net, dataset)\n print('Eval Done...\\n')\n\n"
] | [
[
"torch.sum",
"numpy.save",
"torch.stack",
"numpy.searchsorted",
"torch.no_grad",
"matplotlib.pyplot.title",
"torch.set_default_tensor_type",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"torch.from_numpy",
"numpy.array",
"torch.Tensor"
]
] |
devinllu/dataprep | [
"d56861e5bed3c608cace74983f797dc729072d0a"
] | [
"dataprep/clean/clean_text.py"
] | [
"\"\"\"\nClean a DataFrame column containing text data.\n\"\"\"\nimport re\nimport string\nfrom functools import partial, update_wrapper\nfrom typing import Any, Callable, Dict, List, Optional, Set, Union\nfrom unicodedata import normalize\n\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\n\nfrom ..assets.english_stopwords import english_stopwords\nfrom .utils import NULL_VALUES, to_dask\n\nREGEX_BRACKETS = {\n \"angle\": re.compile(r\"(\\<)[^<>]*(\\>)\"),\n \"curly\": re.compile(r\"(\\{)[^{}]*(\\})\"),\n \"round\": re.compile(r\"(\\()[^()]*(\\))\"),\n \"square\": re.compile(r\"(\\[)[^\\[\\]]*(\\])\"),\n}\nREGEX_DIGITS = re.compile(r\"\\d+\")\nREGEX_DIGITS_BLOCK = re.compile(r\"\\b\\d+\\b\")\nREGEX_HTML = re.compile(r\"<[A-Za-z/][^>]*>|&(?:[a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});\")\nREGEX_PUNCTUATION = re.compile(rf\"[{re.escape(string.punctuation)}]\")\nREGEX_URL = re.compile(r\"(?:https?://|www\\.)\\S+\")\nREGEX_WHITESPACE = re.compile(r\"[\\n\\t]|[ ]{2,}\")\n\n\ndef clean_text(\n df: Union[pd.DataFrame, dd.DataFrame],\n column: str,\n pipeline: Optional[List[Dict[str, Any]]] = None,\n stopwords: Optional[Set[str]] = None,\n) -> pd.DataFrame:\n \"\"\"\n Clean text data in a DataFrame column.\n\n Read more in the :ref:`User Guide <clean_text_user_guide>`.\n\n Parameters\n ----------\n df\n A pandas or Dask DataFrame containing the data to be cleaned.\n column\n The name of the column containing text data.\n pipeline\n A list of cleaning functions to be applied to the column. If None,\n use the default pipeline. See the :ref:`User Guide <clean_text_custom_pipeline>`\n for more information on customizing the pipeline.\n\n (default: None)\n stopwords\n A set of words to be removed from the column. If None, use NLTK's\n stopwords.\n\n (default: None)\n\n Examples\n --------\n Clean a column of text data using the default pipeline.\n\n >>> df = pd.DataFrame({\"text\": [\"This show was an amazing, fresh & innovative idea in the \\\n70's when it first aired.\"]})\n >>> clean_text(df, 'text')\n text\n 0 show amazing fresh innovative idea first aired\n \"\"\"\n df = to_dask(df)\n\n pipe = _get_default_pipeline(stopwords) if not pipeline else _get_custom_pipeline(pipeline)\n\n for func in pipe:\n df[column] = df[column].apply(func, meta=object)\n\n df = df.compute()\n\n return df\n\n\ndef default_text_pipeline() -> List[Dict[str, Any]]:\n \"\"\"\n Return a list of dictionaries representing the functions in the default pipeline.\n Use as a template for creating a custom pipeline.\n\n Read more in the :ref:`User Guide <clean_text_user_guide>`.\n\n Examples\n --------\n >>> default_text_pipeline()\n [{'operator': 'fillna'}, {'operator': 'lowercase'}, {'operator': 'remove_digits'},\n {'operator': 'remove_html'}, {'operator': 'remove_urls'}, {'operator': 'remove_punctuation'},\n {'operator': 'remove_accents'}, {'operator': 'remove_stopwords', 'parameters':\n {'stopwords': None}}, {'operator': 'remove_whitespace'}]\n \"\"\"\n return [\n {\"operator\": \"fillna\"},\n {\"operator\": \"lowercase\"},\n {\"operator\": \"remove_digits\"},\n {\"operator\": \"remove_html\"},\n {\"operator\": \"remove_urls\"},\n {\"operator\": \"remove_punctuation\"},\n {\"operator\": \"remove_accents\"},\n {\"operator\": \"remove_stopwords\", \"parameters\": {\"stopwords\": None}},\n {\"operator\": \"remove_whitespace\"},\n ]\n\n\ndef _get_default_pipeline(\n stopwords: Optional[Set[str]] = None,\n) -> List[Callable[..., Any]]:\n \"\"\"\n Return a list of functions defining the default pipeline.\n \"\"\"\n return [\n _fillna,\n _lowercase,\n _remove_digits,\n _remove_html,\n _remove_urls,\n _remove_punctuation,\n _remove_accents,\n lambda x: _remove_stopwords(x, stopwords),\n _remove_whitespace,\n ]\n\n\ndef _get_custom_pipeline(pipeline: List[Dict[str, Any]]) -> List[Callable[..., Any]]:\n \"\"\"\n Return a list of functions defining a custom pipeline.\n \"\"\"\n func_dict = _get_func_dict()\n custom_pipeline: List[Callable[..., Any]] = []\n\n for component in pipeline:\n # Check whether function is built in or user defined\n operator = (\n func_dict[component[\"operator\"]]\n if isinstance(component[\"operator\"], str)\n else component[\"operator\"]\n )\n # Append the function to the pipeline\n # If parameters are specified, create a partial function to lock in\n # the values and prevent them from being overwritten in subsequent loops\n if \"parameters\" in component:\n custom_pipeline.append(_wrapped_partial(operator, component[\"parameters\"]))\n else:\n custom_pipeline.append(operator)\n\n return custom_pipeline\n\n\ndef _get_func_dict() -> Dict[str, Callable[..., Any]]:\n \"\"\"\n Return a mapping of strings to function names.\n \"\"\"\n return {\n \"fillna\": _fillna,\n \"lowercase\": _lowercase,\n \"sentence_case\": _sentence_case,\n \"title_case\": _title_case,\n \"uppercase\": _uppercase,\n \"remove_accents\": _remove_accents,\n \"remove_bracketed\": _remove_bracketed,\n \"remove_digits\": _remove_digits,\n \"remove_html\": _remove_html,\n \"remove_prefixed\": _remove_prefixed,\n \"remove_punctuation\": _remove_punctuation,\n \"remove_stopwords\": _remove_stopwords,\n \"remove_urls\": _remove_urls,\n \"remove_whitespace\": _remove_whitespace,\n \"replace_bracketed\": _replace_bracketed,\n \"replace_digits\": _replace_digits,\n \"replace_prefixed\": _replace_prefixed,\n \"replace_punctuation\": _replace_punctuation,\n \"replace_stopwords\": _replace_stopwords,\n \"replace_text\": _replace_text,\n \"replace_urls\": _replace_urls,\n }\n\n\ndef _fillna(text: Any, value: Any = np.nan) -> Any:\n \"\"\"\n Replace all null values with NaN (default) or the supplied value.\n \"\"\"\n return value if text in NULL_VALUES else str(text)\n\n\ndef _lowercase(text: Any) -> Any:\n \"\"\"\n Convert all characters to lowercase.\n \"\"\"\n return str(text).lower() if pd.notna(text) else text\n\n\ndef _sentence_case(text: Any) -> Any:\n \"\"\"\n Convert first character to uppercase and remaining to lowercase.\n \"\"\"\n return str(text).capitalize() if pd.notna(text) else text\n\n\ndef _title_case(text: Any) -> Any:\n \"\"\"\n Convert first character of each word to uppercase and remaining to lowercase.\n \"\"\"\n return str(text).title() if pd.notna(text) else text\n\n\ndef _uppercase(text: Any) -> Any:\n \"\"\"\n Convert all characters to uppercase.\n \"\"\"\n return str(text).upper() if pd.notna(text) else text\n\n\ndef _remove_accents(text: Any) -> Any:\n \"\"\"\n Remove accents (diacritic marks).\n \"\"\"\n return (\n normalize(\"NFD\", str(text)).encode(\"ascii\", \"ignore\").decode(\"ascii\")\n if pd.notna(text)\n else text\n )\n\n\ndef _remove_bracketed(text: Any, brackets: Union[str, Set[str]], inclusive: bool = True) -> Any:\n \"\"\"\n Remove text between brackets.\n\n Parameters\n ----------\n brackets\n The bracket style.\n - \"angle\": <>\n - \"curly\": {}\n - \"round\": ()\n - \"square\": []\n\n inclusive\n If True (default), remove the brackets along with the text in between.\n Otherwise, keep the brackets.\n \"\"\"\n if pd.isna(text):\n return text\n\n text = str(text)\n value = \"\" if inclusive else r\"\\g<1>\\g<2>\"\n if isinstance(brackets, set):\n for bracket in brackets:\n text = re.sub(REGEX_BRACKETS[bracket], value, text)\n else:\n text = re.sub(REGEX_BRACKETS[brackets], value, text)\n\n return text\n\n\ndef _remove_digits(text: Any) -> Any:\n \"\"\"\n Remove all digits.\n \"\"\"\n return re.sub(REGEX_DIGITS, \"\", str(text)) if pd.notna(text) else text\n\n\ndef _remove_html(text: Any) -> Any:\n \"\"\"\n Remove HTML tags.\n \"\"\"\n return re.sub(REGEX_HTML, \"\", str(text)) if pd.notna(text) else text\n\n\ndef _remove_prefixed(text: Any, prefix: Union[str, Set[str]]) -> Any:\n \"\"\"\n Remove substrings that start with the prefix(es).\n \"\"\"\n if pd.isna(text):\n return text\n\n text = str(text)\n if isinstance(prefix, set):\n for pre in prefix:\n text = re.sub(rf\"{pre}\\S+\", \"\", text)\n else:\n text = re.sub(rf\"{prefix}\\S+\", \"\", text)\n\n return text\n\n\ndef _remove_punctuation(text: Any) -> Any:\n \"\"\"\n Remove punctuation marks.\n \"\"\"\n return re.sub(REGEX_PUNCTUATION, \" \", str(text)) if pd.notna(text) else text\n\n\ndef _remove_stopwords(text: Any, stopwords: Optional[Set[str]] = None) -> Any:\n \"\"\"\n Remove a set of words from the text.\n If `stopwords` is None (default), use NLTK's stopwords.\n \"\"\"\n if pd.isna(text):\n return text\n\n stopwords = english_stopwords if not stopwords else stopwords\n return \" \".join(word for word in str(text).split() if word.lower() not in stopwords)\n\n\ndef _remove_urls(text: Any) -> Any:\n \"\"\"\n Remove URLS.\n \"\"\"\n return re.sub(REGEX_URL, \"\", str(text)) if pd.notna(text) else text\n\n\ndef _remove_whitespace(text: Any) -> Any:\n \"\"\"\n Remove extra spaces along with tabs and newlines.\n \"\"\"\n return re.sub(REGEX_WHITESPACE, \" \", str(text)).strip() if pd.notna(text) else text\n\n\ndef _replace_bracketed(\n text: Any, brackets: Union[str, Set[str]], value: str, inclusive: bool = True\n) -> Any:\n \"\"\"\n Replace text between brackets with the value.\n\n Parameters\n ----------\n brackets\n The bracket style.\n - \"angle\": <>\n - \"curly\": {}\n - \"round\": ()\n - \"square\": []\n\n value\n The value to replace the text between the brackets.\n\n inclusive\n If True (default), replace the brackets with the new text as well.\n Otherwise, keep the brackets.\n \"\"\"\n if pd.isna(text):\n return text\n\n text = str(text)\n value = value if inclusive else rf\"\\g<1>{value}\\g<2>\"\n if isinstance(brackets, set):\n for bracket in brackets:\n text = re.sub(REGEX_BRACKETS[bracket], value, text)\n else:\n text = re.sub(REGEX_BRACKETS[brackets], value, text)\n\n return text\n\n\ndef _replace_digits(text: Any, value: str, block: Optional[bool] = True) -> Any:\n \"\"\"\n Replace all digits with the value. If `block` is True (default),\n only replace blocks of digits.\n \"\"\"\n if pd.isna(text):\n return text\n\n return (\n re.sub(REGEX_DIGITS_BLOCK, value, str(text))\n if block\n else re.sub(REGEX_DIGITS, value, str(text))\n )\n\n\ndef _replace_prefixed(text: Any, prefix: Union[str, Set[str]], value: str) -> Any:\n \"\"\"\n Replace all substrings starting with the prefix(es) with the value.\n \"\"\"\n if pd.isna(text):\n return text\n\n text = str(text)\n if isinstance(prefix, set):\n for pre in prefix:\n text = re.sub(rf\"{pre}\\S+\", value, text)\n else:\n text = re.sub(rf\"{prefix}\\S+\", value, text)\n\n return text\n\n\ndef _replace_punctuation(text: Any, value: str) -> Any:\n \"\"\"\n Replace all punctuation marks with the value.\n \"\"\"\n return re.sub(REGEX_PUNCTUATION, value, str(text)) if pd.notna(text) else text\n\n\ndef _replace_stopwords(text: Any, value: str, stopwords: Optional[Set[str]] = None) -> Any:\n \"\"\"\n Replace a set of words in the text with the value.\n If `stopwords` is None (default), use NLTK's stopwords.\n \"\"\"\n if pd.isna(text):\n return text\n\n stopwords = english_stopwords if not stopwords else stopwords\n return \" \".join(word if word.lower() not in stopwords else value for word in str(text).split())\n\n\ndef _replace_text(text: Any, value: Dict[str, str], block: Optional[bool] = True) -> Any:\n \"\"\"\n Replace a sequence of characters with another according to the value mapping.\n If `block` is True (default), only replace standalone blocks of the sequence.\n \"\"\"\n if pd.isna(text):\n return text\n\n text = str(text)\n for old_value, new_value in value.items():\n text = (\n re.sub(rf\"\\b{old_value}\\b\", new_value, text, flags=re.IGNORECASE)\n if block\n else re.sub(rf\"{old_value}\", new_value, text, flags=re.IGNORECASE)\n )\n\n return text\n\n\ndef _replace_urls(text: Any, value: str) -> Any:\n \"\"\"\n Replace all URLs with the value.\n \"\"\"\n return re.sub(REGEX_URL, value, str(text)) if pd.notna(text) else text\n\n\ndef _wrapped_partial(\n func: Callable[..., Callable[..., Any]], params: Dict[str, Any]\n) -> Callable[..., Callable[..., Any]]:\n \"\"\"\n Return a partial function with a name and a doc attribute.\n \"\"\"\n partial_func = partial(func, **params)\n update_wrapper(partial_func, func)\n return partial_func\n"
] | [
[
"pandas.notna",
"pandas.isna"
]
] |
xiye17/transformers | [
"924989e70d9425e3276ca76f148a0fcd4bbd58cf"
] | [
"src/transformers/training_args.py"
] | [
"import dataclasses\nimport json\nimport os\nimport warnings\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required\nfrom .trainer_utils import EvaluationStrategy\nfrom .utils import logging\n\n\nif is_torch_available():\n import torch\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\nlogger = logging.get_logger(__name__)\n\n\ndef default_logdir() -> str:\n \"\"\"\n Same default as PyTorch\n \"\"\"\n import socket\n from datetime import datetime\n\n current_time = datetime.now().strftime(\"%b%d_%H-%M-%S\")\n return os.path.join(\"runs\", current_time + \"_\" + socket.gethostname())\n\n\n@dataclass\nclass TrainingArguments:\n \"\"\"\n TrainingArguments is the subset of the arguments we use in our example scripts\n **which relate to the training loop itself**.\n\n Using :class:`~transformers.HfArgumentParser` we can turn this class\n into argparse arguments to be able to specify them on the command line.\n\n Parameters:\n output_dir (:obj:`str`):\n The output directory where the model predictions and checkpoints will be written.\n overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):\n If :obj:`True`, overwrite the content of the output directory. Use this to continue training if\n :obj:`output_dir` points to a checkpoint directory.\n do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run training or not.\n do_eval (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run evaluation on the dev set or not.\n do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run predictions on the test set or not.\n evaluation_strategy(:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`\"no\"`):\n The evaluation strategy to adopt during training. Possible values are:\n\n * :obj:`\"no\"`: No evaluation is done during training.\n * :obj:`\"steps\"`: Evaluation is done (and logged) every :obj:`eval_steps`.\n * :obj:`\"epoch\"`: Evaluation is done at the end of each epoch.\n\n prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):\n When performing evaluation and predictions, only returns the loss.\n per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):\n The batch size per GPU/TPU core/CPU for training.\n per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):\n The batch size per GPU/TPU core/CPU for evaluation.\n gradient_accumulation_steps: (:obj:`int`, `optional`, defaults to 1):\n Number of updates steps to accumulate the gradients for, before performing a backward/update pass.\n\n .. warning::\n\n When using gradient accumulation, one step is counted as one step with backward pass. Therefore,\n logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training\n examples.\n learning_rate (:obj:`float`, `optional`, defaults to 5e-5):\n The initial learning rate for Adam.\n weight_decay (:obj:`float`, `optional`, defaults to 0):\n The weight decay to apply (if not zero).\n adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):\n Epsilon for the Adam optimizer.\n max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):\n Maximum gradient norm (for gradient clipping).\n num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):\n Total number of training epochs to perform (if not an integer, will perform the decimal part percents of\n the last epoch before stopping training).\n max_steps (:obj:`int`, `optional`, defaults to -1):\n If set to a positive number, the total number of training steps to perform. Overrides\n :obj:`num_train_epochs`.\n warmup_steps (:obj:`int`, `optional`, defaults to 0):\n Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.\n logging_dir (:obj:`str`, `optional`):\n Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.\n logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Wheter to log and evalulate the first :obj:`global_step` or not.\n logging_steps (:obj:`int`, `optional`, defaults to 500):\n Number of update steps between two logs.\n save_steps (:obj:`int`, `optional`, defaults to 500):\n Number of updates steps before two checkpoint saves.\n save_total_limit (:obj:`int`, `optional`):\n If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in\n :obj:`output_dir`.\n no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to not use CUDA even when it is available or not.\n seed (:obj:`int`, `optional`, defaults to 42):\n Random seed for initialization.\n fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.\n fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):\n For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details\n on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.\n local_rank (:obj:`int`, `optional`, defaults to -1):\n During distributed training, the rank of the process.\n tpu_num_cores (:obj:`int`, `optional`):\n When training on TPU, the mumber of TPU cores (automatically passed by launcher script).\n debug (:obj:`bool`, `optional`, defaults to :obj:`False`):\n When training on TPU, whether to print debug metrics or not.\n dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)\n or not.\n eval_steps (:obj:`int`, `optional`):\n Number of update steps between two evaluations if :obj:`evaluation_strategy=\"steps\"`. Will default to the\n same value as :obj:`logging_steps` if not set.\n dataloader_num_workers (:obj:`int`, `optional`, defaults to 0):\n Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.\n past_index (:obj:`int`, `optional`, defaults to -1):\n Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can\n make use of the past hidden states for their predictions. If this argument is set to a positive int, the\n ``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model\n at the next training step under the keyword argument ``mems``.\n run_name (:obj:`str`, `optional`):\n A descriptor for the run. Notably used for wandb logging.\n disable_tqdm (:obj:`bool`, `optional`):\n Whether or not to disable the tqdm progress bars. Will default to :obj:`True` if the logging level is set\n to warn or lower (default), :obj:`False` otherwise.\n remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`):\n If using `nlp.Dataset` datasets, whether or not to automatically remove the columns unused by the model\n forward method.\n\n (Note: this behavior is not implemented for :class:`~transformers.TFTrainer` yet.)\n label_names (:obj:`List[str]`, `optional`):\n The list of keys in your dictionary of inputs that correspond to the labels.\n\n Will eventually default to :obj:`[\"labels\"]` except if the model used is one of the\n :obj:`XxxForQuestionAnswering` in which case it will default to\n :obj:`[\"start_positions\", \"end_positions\"]`.\n load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to load the best model found during training at the end of training.\n\n .. note::\n\n When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved\n after each evaluation.\n metric_for_best_model (:obj:`str`, `optional`)\n Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different\n models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`\"eval_\"`.\n Will default to :obj:`\"loss\"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation\n loss).\n\n If you set this value, :obj:`greater_is_better` will defaut to :obj:`True`. Don't forget to set it to\n :obj:`False` if your metric is better when lower.\n greater_is_better (:obj:`bool`, `optional`)\n Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better\n models should have a greater metric or not. Will default to:\n\n - :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`\"loss\"` or\n :obj:`\"eval_loss\"`.\n - :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`\"loss\"` or :obj:`\"eval_loss\"`.\n \"\"\"\n\n output_dir: str = field(\n metadata={\"help\": \"The output directory where the model predictions and checkpoints will be written.\"}\n )\n overwrite_output_dir: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Overwrite the content of the output directory.\"\n \"Use this to continue training if output_dir points to a checkpoint directory.\"\n )\n },\n )\n\n do_train: bool = field(default=False, metadata={\"help\": \"Whether to run training.\"})\n do_eval: bool = field(default=False, metadata={\"help\": \"Whether to run eval on the dev set.\"})\n do_predict: bool = field(default=False, metadata={\"help\": \"Whether to run predictions on the test set.\"})\n evaluate_during_training: bool = field(\n default=False,\n metadata={\"help\": \"Run evaluation during training at each logging step.\"},\n )\n evaluation_strategy: EvaluationStrategy = field(\n default=\"no\",\n metadata={\"help\": \"Run evaluation during training at each logging step.\"},\n )\n prediction_loss_only: bool = field(\n default=False,\n metadata={\"help\": \"When performing evaluation and predictions, only returns the loss.\"},\n )\n\n per_device_train_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for training.\"}\n )\n per_device_eval_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for evaluation.\"}\n )\n\n per_gpu_train_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Deprecated, the use of `--per_device_train_batch_size` is preferred. \"\n \"Batch size per GPU/TPU core/CPU for training.\"\n },\n )\n per_gpu_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Deprecated, the use of `--per_device_eval_batch_size` is preferred.\"\n \"Batch size per GPU/TPU core/CPU for evaluation.\"\n },\n )\n\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\"help\": \"Number of updates steps to accumulate before performing a backward/update pass.\"},\n )\n\n learning_rate: float = field(default=5e-5, metadata={\"help\": \"The initial learning rate for Adam.\"})\n weight_decay: float = field(default=0.0, metadata={\"help\": \"Weight decay if we apply some.\"})\n adam_beta1: float = field(default=0.9, metadata={\"help\": \"Beta1 for Adam optimizer\"})\n adam_beta2: float = field(default=0.999, metadata={\"help\": \"Beta2 for Adam optimizer\"})\n adam_epsilon: float = field(default=1e-8, metadata={\"help\": \"Epsilon for Adam optimizer.\"})\n max_grad_norm: float = field(default=1.0, metadata={\"help\": \"Max gradient norm.\"})\n\n num_train_epochs: float = field(default=3.0, metadata={\"help\": \"Total number of training epochs to perform.\"})\n max_steps: int = field(\n default=-1,\n metadata={\"help\": \"If > 0: set total number of training steps to perform. Override num_train_epochs.\"},\n )\n warmup_steps: int = field(default=0, metadata={\"help\": \"Linear warmup over warmup_steps.\"})\n\n logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={\"help\": \"Tensorboard log dir.\"})\n logging_first_step: bool = field(default=False, metadata={\"help\": \"Log and eval the first global_step\"})\n logging_steps: int = field(default=500, metadata={\"help\": \"Log every X updates steps.\"})\n save_steps: int = field(default=500, metadata={\"help\": \"Save checkpoint every X updates steps.\"})\n save_total_limit: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"Limit the total amount of checkpoints.\"\n \"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints\"\n )\n },\n )\n no_cuda: bool = field(default=False, metadata={\"help\": \"Do not use CUDA even when it is available\"})\n seed: int = field(default=42, metadata={\"help\": \"random seed for initialization\"})\n\n fp16: bool = field(\n default=False,\n metadata={\"help\": \"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\"},\n )\n fp16_opt_level: str = field(\n default=\"O1\",\n metadata={\n \"help\": (\n \"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\"\n )\n },\n )\n local_rank: int = field(default=-1, metadata={\"help\": \"For distributed training: local_rank\"})\n\n tpu_num_cores: Optional[int] = field(\n default=None, metadata={\"help\": \"TPU: Number of TPU cores (automatically passed by launcher script)\"}\n )\n tpu_metrics_debug: bool = field(\n default=False,\n metadata={\"help\": \"Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics\"},\n )\n debug: bool = field(default=False, metadata={\"help\": \"Whether to print debug metrics on TPU\"})\n\n dataloader_drop_last: bool = field(\n default=False, metadata={\"help\": \"Drop the last incomplete batch if it is not divisible by the batch size.\"}\n )\n eval_steps: int = field(default=None, metadata={\"help\": \"Run an evaluation every X steps.\"})\n dataloader_num_workers: int = field(\n default=0,\n metadata={\n \"help\": \"Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.\"\n },\n )\n\n past_index: int = field(\n default=-1,\n metadata={\"help\": \"If >=0, uses the corresponding part of the output as the past state for next step.\"},\n )\n\n run_name: Optional[str] = field(\n default=None, metadata={\"help\": \"An optional descriptor for the run. Notably used for wandb logging.\"}\n )\n disable_tqdm: Optional[bool] = field(\n default=None, metadata={\"help\": \"Whether or not to disable the tqdm progress bars.\"}\n )\n\n remove_unused_columns: Optional[bool] = field(\n default=True, metadata={\"help\": \"Remove columns not required by the model when using an nlp.Dataset.\"}\n )\n label_names: Optional[List[str]] = field(\n default=None, metadata={\"help\": \"The list of keys in your dictionary of inputs that correspond to the labels.\"}\n )\n\n load_best_model_at_end: Optional[bool] = field(\n default=False,\n metadata={\"help\": \"Whether or not to load the best model found during training at the end of training.\"},\n )\n metric_for_best_model: Optional[str] = field(\n default=None, metadata={\"help\": \"The metric to use to compare two different models.\"}\n )\n greater_is_better: Optional[bool] = field(\n default=None, metadata={\"help\": \"Whether the `metric_for_best_model` should be maximized or not.\"}\n )\n\n def __post_init__(self):\n if self.disable_tqdm is None:\n self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN\n if self.evaluate_during_training is not None:\n self.evaluation_strategy = (\n EvaluationStrategy.STEPS if self.evaluate_during_training else EvaluationStrategy.NO\n )\n warnings.warn(\n \"The `evaluate_during_training` argument is deprecated in favor of `evaluation_strategy` (which has more options)\",\n FutureWarning,\n )\n else:\n self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)\n\n if self.eval_steps is None:\n self.eval_steps = self.logging_steps\n\n if self.load_best_model_at_end and self.metric_for_best_model is None:\n self.metric_for_best_model = \"loss\"\n if self.greater_is_better is None and self.metric_for_best_model is not None:\n self.greater_is_better = self.metric_for_best_model not in [\"loss\", \"eval_loss\"]\n\n @property\n def train_batch_size(self) -> int:\n \"\"\"\n The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).\n \"\"\"\n if self.per_gpu_train_batch_size:\n logger.warning(\n \"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future \"\n \"version. Using `--per_device_train_batch_size` is preferred.\"\n )\n per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size\n return per_device_batch_size * max(1, self.n_gpu)\n\n @property\n def eval_batch_size(self) -> int:\n \"\"\"\n The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).\n \"\"\"\n if self.per_gpu_eval_batch_size:\n logger.warning(\n \"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future \"\n \"version. Using `--per_device_eval_batch_size` is preferred.\"\n )\n per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size\n return per_device_batch_size * max(1, self.n_gpu)\n\n @cached_property\n @torch_required\n def _setup_devices(self) -> Tuple[\"torch.device\", int]:\n logger.info(\"PyTorch: setting up devices\")\n if self.no_cuda:\n device = torch.device(\"cpu\")\n n_gpu = 0\n elif is_torch_tpu_available():\n device = xm.xla_device()\n n_gpu = 0\n elif self.local_rank == -1:\n # if n_gpu is > 1 we'll use nn.DataParallel.\n # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`\n # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will\n # trigger an error that a device index is missing. Index 0 takes into account the\n # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`\n # will use the first GPU in that env, i.e. GPU#1\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n # Here, we'll use torch.distributed.\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend=\"nccl\")\n device = torch.device(\"cuda\", self.local_rank)\n n_gpu = 1\n\n if device.type == \"cuda\":\n torch.cuda.set_device(device)\n\n return device, n_gpu\n\n @property\n @torch_required\n def device(self) -> \"torch.device\":\n \"\"\"\n The device used by this process.\n \"\"\"\n return self._setup_devices[0]\n\n @property\n @torch_required\n def n_gpu(self):\n \"\"\"\n The number of GPUs used by this process.\n\n Note:\n This will only be greater than one when you have multiple GPUs available but are not using distributed\n training. For distributed training, it will always be 1.\n \"\"\"\n return self._setup_devices[1]\n\n def to_dict(self):\n \"\"\"\n Serializes this instance while replace `Enum` by their values (for JSON serialization support).\n \"\"\"\n d = dataclasses.asdict(self)\n for k, v in d.items():\n if isinstance(v, Enum):\n d[k] = v.value\n return d\n\n def to_json_string(self):\n \"\"\"\n Serializes this instance to a JSON string.\n \"\"\"\n return json.dumps(self.to_dict(), indent=2)\n\n def to_sanitized_dict(self) -> Dict[str, Any]:\n \"\"\"\n Sanitized serialization to use with TensorBoard’s hparams\n \"\"\"\n d = self.to_dict()\n d = {**d, **{\"train_batch_size\": self.train_batch_size, \"eval_batch_size\": self.eval_batch_size}}\n\n valid_types = [bool, int, float, str]\n if is_torch_available():\n valid_types.append(torch.Tensor)\n\n return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}\n"
] | [
[
"torch.distributed.init_process_group",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.set_device"
]
] |
LB0828/EmbedKGQA_Learning | [
"db4bb5a91f99db8a36efdf3ae4f668d60ba018d3"
] | [
"KGQA/RoBERTa/pruning_model.py"
] | [
"import torch\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.nn.init import xavier_normal_\nfrom transformers import *\n\nclass PruningModel(nn.Module):\n\n def __init__(self, rel2idx, idx2rel, ls):\n super(PruningModel, self).__init__()\n self.label_smoothing = ls\n self.rel2idx = rel2idx\n self.idx2rel = idx2rel\n\n self.roberta_pretrained_weights = 'roberta-base'\n self.roberta_model = RobertaModel.from_pretrained(self.roberta_pretrained_weights)\n\n self.roberta_dim = 768\n self.mid1 = 512\n self.mid2 = 512\n self.mid3 = 256\n self.mid4 = 256\n self.fcnn_dropout = torch.nn.Dropout(0.1)\n # self.lin1 = nn.Linear(self.roberta_dim, self.mid1)\n # self.lin2 = nn.Linear(self.mid1, self.mid2)\n # self.lin3 = nn.Linear(self.mid2, self.mid3)\n # self.lin4 = nn.Linear(self.mid3, self.mid4)\n # self.hidden2rel = nn.Linear(self.mid4, len(self.rel2idx))\n self.hidden2rel = nn.Linear(self.roberta_dim, len(self.rel2idx))\n\n self.loss = torch.nn.BCELoss(reduction='sum')\n\n self.logsoftmax = torch.nn.LogSoftmax(dim=-1) \n\n def applyNonLinear(self, outputs):\n # outputs = self.fcnn_dropout(self.lin1(outputs))\n # outputs = F.relu(outputs)\n # outputs = self.fcnn_dropout(self.lin2(outputs))\n # outputs = F.relu(outputs)\n # outputs = self.fcnn_dropout(self.lin3(outputs))\n # outputs = F.relu(outputs)\n # outputs = self.fcnn_dropout(self.lin4(outputs))\n # outputs = F.relu(outputs)\n outputs = self.hidden2rel(outputs)\n # outputs = self.hidden2rel_base(outputs)\n return outputs\n\n def getQuestionEmbedding(self, question_tokenized, attention_mask):\n roberta_last_hidden_states = self.roberta_model(question_tokenized, attention_mask=attention_mask)[0]\n states = roberta_last_hidden_states.transpose(1,0)\n cls_embedding = states[0]\n question_embedding = cls_embedding\n # question_embedding = torch.mean(roberta_last_hidden_states, dim=1)\n return question_embedding\n \n def forward(self, question_tokenized, attention_mask, rel_one_hot):\n question_embedding = self.getQuestionEmbedding(question_tokenized, attention_mask)\n prediction = self.applyNonLinear(question_embedding)\n prediction = torch.sigmoid(prediction)\n actual = rel_one_hot\n if self.label_smoothing:\n actual = ((1.0-self.label_smoothing)*actual) + (1.0/actual.size(1)) \n loss = self.loss(prediction, actual)\n return loss\n \n\n def get_score_ranked(self, question_tokenized, attention_mask):\n question_embedding = self.getQuestionEmbedding(question_tokenized.unsqueeze(0), attention_mask.unsqueeze(0))\n prediction = self.applyNonLinear(question_embedding)\n prediction = torch.sigmoid(prediction).squeeze()\n # top2 = torch.topk(scores, k=2, largest=True, sorted=True)\n # return top2\n return prediction\n \n\n\n\n\n"
] | [
[
"torch.nn.LogSoftmax",
"torch.nn.BCELoss",
"torch.sigmoid",
"torch.nn.Dropout"
]
] |
Labyrinthine-Unreal/lbrys_sub | [
"c1a5b7d168d107b4820d5c40fbcfc895bf124f86"
] | [
"model.py"
] | [
"# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import MinMaxScaler as mini\nfrom sklearn.model_selection import train_test_split\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\n\ndata=pd.read_csv('c_force_data.csv')\n#print('COLLECTEDFEATURESINCLUDEDINTHEDATASET')\nX=data.drop(['Deck1_damage'],axis=1)\ny=data['Deck1_damage']\n# y=labelencoder.fit_transform(y)\n# mini = mini()\n# X=mini.fit(X)\n# X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=.12)\n\n# mini = mini()\n# X = mini.fit(X)\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=.13)\nfrom sklearn.linear_model import LinearRegression,LogisticRegression\nregressor = LinearRegression()\n\n#Fitting model with trainig data\nregressor.fit(X_train, y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/c-force_model.pkl','wb'))\n\ndata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_ltcusd.csv')\ndf0,df1 = data.shape[0], data.shape[1]\nprint('{} Dates '.format(df0))\n# data= data.drop(['Date'], axis =1)\n# data = data.drop('Adj Close',axis=1)\n#Splitting Training and Test Set\n#Since we have a very small dataset, we will train our model with all availabe data.\nX= data.drop(['close'],axis=1)\ny= data['close']\nmini = mini()\nX = mini.fit_transform(X)\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n\n#Fitting model with trainig data\nregressor.fit(X_train, y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/lit_model.pkl','wb'))\n\n# Loading model to compare the results\nmodel = pickle.load(open('models/lit_model.pkl','rb'))\nfuture_x = X\n# X = X[3295:3302]\n# future_x = X[-1]\n# x = X[:-1]\n# bata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_ltcusd.csv')\n# date = bata['time']\n# date = date.tail()\n# print(date)\n# bata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_ltcusd.csv')\n# date = bata['time']\n# print('PREDICTED Close')\n# y = model.predict(future_x)\n# print(y[-1:])\n\n\ndata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_btcusd.csv')\ndf0,df1 = data.shape[0], data.shape[1]\nprint('{} Dates '.format(df0))\n# data= data.drop(['Date'], axis =1)\n# data = data.drop('Adj Close',axis=1)\n#Splitting Training and Test Set\n#Since we have a very small dataset, we will train our model with all availabe data.\nX= data.drop(['close'],axis=1)\ny= data['close']\n\nX = mini.fit_transform(X)\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n\n#Fitting model with trainig data\nregressor.fit(X_train, y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/bit_model.pkl','wb'))\n\n# Loading model to compare the results\nmodel = pickle.load(open('models/bit_model.pkl','rb'))\nfuture_x = X\n# X = X[3295:3302]\n# future_x = X[-1]\n# x = X[:-1]\nbata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_btcusd.csv')\ndate = bata['time']\ndate = date.tail()\nprint(date)\nbata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_btcusd.csv')\ndate = bata['time']\nprint('PREDICTED Close')\ny = model.predict(future_x)\nprint(y[-1:])\n\n\neth_data = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_ethusd.csv')\ndf0,df1 = eth_data.shape[0], eth_data.shape[1]\nprint('{} dates'.format(df0))\n# eth_data= eth_data.drop(['Date'], axis =1)\n# eth_data = eth_data.drop('Adj Close',axis=1)\neth_X= eth_data.drop(['close'],axis=1)\neth_y= eth_data['close']\neth_X = mini.fit_transform(eth_X)\neth_X_train,eth_X_test,eth_y_train,eth_y_test = train_test_split(eth_X,eth_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n\n#Fitting model with trainig data\nregressor.fit(eth_X_train, eth_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/eth_model.pkl','wb'))\n\n# Loading model to compare the results\nmodel = pickle.load(open('models/eth_model.pkl','rb'))\neth_future_x = eth_X\n# eth_X = eth_X[-1:]\n # future_x = X[-1]\n # x = X[:-1]\neth_bata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_ethusd.csv')\neth_date = eth_bata['time']\neth_date = eth_date.tail()\neth_bata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_ethusd.csv')\neth_date = eth_bata['time']\nprint('PREDICTED Close')\neth_y = model.predict(eth_future_x)\nprint('accuracy {}'.format(model.score(eth_X_test,eth_y_test)))\neth_output =eth_y[-1:]\nprint(eth_output)\n\nxlm_data = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_xlmusd.csv')\ndf0,df1 = xlm_data.shape[0], xlm_data.shape[1]\nprint('{} dates'.format(df0))\n# xlm_data= xlm_data.drop(['Date'], axis =1)\n# xlm_data = xlm_data.drop('Adj Close',axis=1)\nxlm_X= xlm_data.drop(['close'],axis=1)\nxlm_y= xlm_data['close']\nxlm_X = mini.fit_transform(xlm_X)\nxlm_X_train,xlm_X_test,xlm_y_train,xlm_y_test = train_test_split(xlm_X,xlm_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n\n#Fitting model with trainig data\nregressor.fit(xlm_X_train, xlm_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/xlm_model.pkl','wb'))\n\n# Loading model to compare the results\nmodel = pickle.load(open('models/xlm_model.pkl','rb'))\nxlm_future_x = xlm_X\n# xlm_X = xlm_X[-1:]\n # future_x = X[-1]\n # x = X[:-1]\nxlm_bata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_xlmusd.csv')\nxlm_date = xlm_bata['time']\nxlm_date = xlm_date.tail()\nxlm_bata = pd.read_csv('data/crypto/crypto_portfolio/1m/bitfinex_xlmusd.csv')\nxlm_date = xlm_bata['time']\nprint('PREDICTED Close')\nxlm_y = model.predict(xlm_future_x)\nprint('accuracy {}'.format(model.score(xlm_X_test,xlm_y_test)))\nxlm_output =xlm_y[-1:]\nprint(xlm_output)\n\nAAPL_data = pd.read_csv('data/stocks/stocks_portfolio/AAPL.csv')\ndf0,df1 = AAPL_data.shape[0], AAPL_data.shape[1]\nprint('{} dates'.format(df0))\nAAPL_data = AAPL_data.fillna(28.630752329973355)\nAAPL_data= AAPL_data.drop(['Date'], axis =1)\nAAPL_data = AAPL_data.drop('Adj Close',axis=1)\nAAPL_X= AAPL_data.drop(['Close'],axis=1)\nAAPL_y= AAPL_data['Close']\nAAPL_X = mini.fit_transform(AAPL_X)\nAAPL_X_train,AAPL_X_test,AAPL_y_train,AAPL_y_test = train_test_split(AAPL_X,AAPL_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n\n#Fitting model with trainig data\nregressor.fit(AAPL_X_train, AAPL_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/AAPL_model.pkl','wb'))\n\n# Loading model to compare the results\nmodel = pickle.load(open('models/AAPL_model.pkl','rb'))\nAAPL_future_x = AAPL_X\n# AAPL_X = AAPL_X[9733:9740]\n # future_x = X[-1]\n # x = X[:-1]\nAAPL_bata = pd.read_csv('data/stocks/stocks_portfolio/AAPL.csv')\nAAPL_date = AAPL_bata['Date']\nAAPL_date = AAPL_date.tail()\nprint(AAPL_date)\nAAPL_bata = pd.read_csv('data/stocks/stocks_portfolio/AAPL.csv')\nAAPL_date = AAPL_bata['Date']\nprint('PREDICTED Close')\nAAPL_y = model.predict(AAPL_future_x)\nprint(AAPL_y[-1:])\nAAPL_output =AAPL_y[-1:]\n\n\nMSFT_data = pd.read_csv('data/stocks/stocks_portfolio/MSFT.csv')\ndf0,df1 = MSFT_data.shape[0], MSFT_data.shape[1]\nprint('{} dates'.format(df0))\nMSFT_data= MSFT_data.drop(['Date'], axis =1)\nMSFT_data = MSFT_data.drop('Adj Close',axis=1)\nMSFT_X= MSFT_data.drop(['Close'],axis=1)\nMSFT_y= MSFT_data['Close']\nMSFT_y.mean()\nMSFT_X = mini.fit_transform(MSFT_X)\nMSFT_X_train,MSFT_X_test,MSFT_y_train,MSFT_y_test = train_test_split(MSFT_X,MSFT_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n\n#Fitting model with trainig data\nregressor.fit(MSFT_X_train, MSFT_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/MSFT_model.pkl','wb'))\n\n# Loading model to compare the results\nmodel = pickle.load(open('models/MSFT_model.pkl','rb'))\nMSFT_future_x = MSFT_X\n# MSFT_X = MSFT_X[8407:8414]\n # future_x = X[-1]\n # x = X[:-1]\nMSFT_bata = pd.read_csv('data/stocks/stocks_portfolio/MSFT.csv')\nMSFT_date = MSFT_bata['Date']\nMSFT_date = MSFT_date.tail()\nprint(MSFT_date)\nMSFT_bata = pd.read_csv('data/stocks/stocks_portfolio/MSFT.csv')\nMSFT_date = MSFT_bata['Date']\nprint('PREDICTED Close')\nMSFT_y = model.predict(MSFT_future_x)\nprint(MSFT_y[-1:])\n\n\nMSFT_output =MSFT_y[-1:]\n\n\n# # Importing the libraries\n# import numpy as np\n# import matplotlib.pyplot as plt\n# import pandas as pd\n# import pickle\n# from sklearn.linear_model import LinearRegression\n# from sklearn.preprocessing import MinMaxScaler as mini\n# from sklearn.model_selection import train_test_split\nroku_data = pd.read_csv('data/stocks/stocks_portfolio/ROKU.csv')\nroku_df0,roku_df1 = roku_data.shape[0], roku_data.shape[1]\nprint('{} dates'.format(roku_df0))\nroku_data= roku_data.drop(['Date'], axis =1)\nroku_data = roku_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small roku_dataset, we will train our model with all availabe roku_data.\nroku_X= roku_data.drop(['Close'],axis=1)\nroku_y= roku_data['Close']\nroku_X = mini.fit_transform(roku_X)\n# #\nroku_X_train,roku_X_test,roku_y_train,roku_y_test = train_test_split(roku_X,roku_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig roku_data\nregressor.fit(roku_X_train, roku_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/roku_model.pkl','wb'))\n#\n# # Loading model to compare the results\nroku_model = pickle.load(open('models/roku_model.pkl','rb'))\nroku_future_x = roku_X\n# roku_X = roku_X[:]\n# future_x = X[-1]\n# x = X[:-1]\nroku_bata = pd.read_csv('data/stocks/stocks_portfolio/ROKU.csv')\nroku_date = roku_bata['Date']\nroku_date = roku_date.tail()\nroku_bata = pd.read_csv('data/stocks/stocks_portfolio/ROKU.csv')\nroku_date = roku_bata['Date']\nprint(roku_date.tail())\nprint('PREDICTED Close')\nroku_y = roku_model.predict(roku_future_x)\nprint(roku_y[-1:])\n\ngspc_data = pd.read_csv('data/stocks/stocks_portfolio/^GSPC.csv')\ngspc_df0,gspc_df1 = gspc_data.shape[0], gspc_data.shape[1]\nprint('{} dates'.format(gspc_df0))\ngspc_data= gspc_data.drop(['Date'], axis =1)\ngspc_data = gspc_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small gspc_dataset, we will train our model with all availabe gspc_data.\ngspc_X= gspc_data.drop(['Close'],axis=1)\ngspc_y= gspc_data['Close']\ngspc_X = mini.fit_transform(gspc_X)\n# #\ngspc_X_train,gspc_X_test,gspc_y_train,gspc_y_test = train_test_split(gspc_X,gspc_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig gspc_data\nregressor.fit(gspc_X_train, gspc_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/^GSPC_model.pkl','wb'))\n#\n# # Loading model to compare the results\ngspc_model = pickle.load(open('models/^GSPC_model.pkl','rb'))\ngspc_future_x = gspc_X\n# gspc_X = gspc_X[:]\n# future_x = X[-1]\n# x = X[:-1]\ngspc_bata = pd.read_csv('data/stocks/stocks_portfolio/^GSPC.csv')\ngspc_date = gspc_bata['Date']\ngspc_date = gspc_date.tail()\ngspc_bata = pd.read_csv('data/stocks/stocks_portfolio/^GSPC.csv')\ngspc_date = gspc_bata['Date']\nprint(gspc_date.tail())\nprint('PREDICTED Close')\ngspc_y = gspc_model.predict(gspc_future_x)\nprint(gspc_y[-1:])\n\nfb_data = pd.read_csv('data/stocks/stocks_portfolio/fb.csv')\nfb_df0,fb_df1 = fb_data.shape[0], fb_data.shape[1]\nprint('{} dates'.format(fb_df0))\nfb_data= fb_data.drop(['Date'], axis =1)\nfb_data = fb_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small fb_dataset, we will train our model with all availabe fb_data.\nfb_X= fb_data.drop(['Close'],axis=1)\nfb_y= fb_data['Close']\nfb_X = mini.fit_transform(fb_X)\n# #\nfb_X_train,fb_X_test,fb_y_train,fb_y_test = train_test_split(fb_X,fb_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig fb_data\nregressor.fit(fb_X_train, fb_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/fb_model.pkl','wb'))\n#\n# # Loading model to compare the results\nfb_model = pickle.load(open('models/fb_model.pkl','rb'))\nfb_future_x = fb_X\n# fb_X = fb_X[:]\n# future_x = X[-1]\n# x = X[:-1]\nfb_bata = pd.read_csv('data/stocks/stocks_portfolio/fb.csv')\nfb_date = fb_bata['Date']\nfb_date = fb_date.tail()\nfb_bata = pd.read_csv('data/stocks/stocks_portfolio/fb.csv')\nfb_date = fb_bata['Date']\nprint(fb_date.tail())\nprint('PREDICTED Close')\nfb_y = fb_model.predict(fb_future_x)\nprint(fb_y[-1:])\n\nar_data = pd.read_csv('data/stocks/stocks_portfolio/ar.csv')\nar_df0,ar_df1 = ar_data.shape[0], ar_data.shape[1]\nprint('{} dates'.format(ar_df0))\nar_data= ar_data.drop(['Date'], axis =1)\nar_data = ar_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small ar_dataset, we will train our model with all availabe ar_data.\nar_X= ar_data.drop(['Close'],axis=1)\nar_y= ar_data['Close']\nar_X = mini.fit_transform(ar_X)\n# #\nar_X_train,ar_X_test,ar_y_train,ar_y_test = train_test_split(ar_X,ar_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig ar_data\nregressor.fit(ar_X_train, ar_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/ar_model.pkl','wb'))\n#\n# # Loading model to compare the results\nar_model = pickle.load(open('models/ar_model.pkl','rb'))\nar_future_x = ar_X\n# ar_X = ar_X[:]\n# future_x = X[-1]\n# x = X[:-1]\nar_bata = pd.read_csv('data/stocks/stocks_portfolio/ar.csv')\nar_date = ar_bata['Date']\nar_date = ar_date.tail()\nar_bata = pd.read_csv('data/stocks/stocks_portfolio/ar.csv')\nar_date = ar_bata['Date']\nprint(ar_date.tail())\nprint('PREDICTED Close')\nar_y = ar_model.predict(ar_future_x)\nprint(ar_y[-1:]) \n\nchk_data = pd.read_csv('data/stocks/stocks_portfolio/chk.csv')\nchk_df0,chk_df1 = chk_data.shape[0], chk_data.shape[1]\nprint('{} dates'.format(chk_df0))\nchk_data= chk_data.drop(['Date'], axis =1)\nchk_data = chk_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small chk_dataset, we will train our model with all availabe chk_data.\nchk_X= chk_data.drop(['Close'],axis=1)\nchk_y= chk_data['Close']\nchk_X = mini.fit_transform(chk_X)\n# #\nchk_X_train,chk_X_test,chk_y_train,chk_y_test = train_test_split(chk_X,chk_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig chk_data\nregressor.fit(chk_X_train, chk_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/chk_model.pkl','wb'))\n#\n# # Loading model to compchke the results\nchk_model = pickle.load(open('models/chk_model.pkl','rb'))\nchk_future_x = chk_X\n# chk_X = chk_X[:]\n# future_x = X[-1]\n# x = X[:-1]\nchk_bata = pd.read_csv('data/stocks/stocks_portfolio/chk.csv')\nchk_date = chk_bata['Date']\nchk_date = chk_date.tail()\nchk_bata = pd.read_csv('data/stocks/stocks_portfolio/chk.csv')\nchk_date = chk_bata['Date']\nprint(chk_date.tail())\nprint('PREDICTED Close')\nchk_y = chk_model.predict(chk_future_x)\nprint(chk_y[-1:])\n\ngrpn_data = pd.read_csv('data/stocks/stocks_portfolio/grpn.csv')\ngrpn_df0,grpn_df1 = grpn_data.shape[0], grpn_data.shape[1]\nprint('{} dates'.format(grpn_df0))\ngrpn_data= grpn_data.drop(['Date'], axis =1)\ngrpn_data = grpn_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small grpn_dataset, we will train our model with all availabe grpn_data.\ngrpn_X= grpn_data.drop(['Close'],axis=1)\ngrpn_y= grpn_data['Close']\ngrpn_X = mini.fit_transform(grpn_X)\n# #\ngrpn_X_train,grpn_X_test,grpn_y_train,grpn_y_test = train_test_split(grpn_X,grpn_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig grpn_data\nregressor.fit(grpn_X_train, grpn_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/grpn_model.pkl','wb'))\n#\n# # Loading model to compgrpne the results\ngrpn_model = pickle.load(open('models/grpn_model.pkl','rb'))\ngrpn_future_x = grpn_X\n# grpn_X = grpn_X[:]\n# future_x = X[-1]\n# x = X[:-1]\ngrpn_bata = pd.read_csv('data/stocks/stocks_portfolio/grpn.csv')\ngrpn_date = grpn_bata['Date']\ngrpn_date = grpn_date.tail()\ngrpn_bata = pd.read_csv('data/stocks/stocks_portfolio/grpn.csv')\ngrpn_date = grpn_bata['Date']\nprint(grpn_date.tail())\nprint('PREDICTED Close')\ngrpn_y = grpn_model.predict(grpn_future_x)\nprint(grpn_y[-1:])\n\npcg_data = pd.read_csv('data/stocks/stocks_portfolio/pcg.csv')\npcg_df0,pcg_df1 = pcg_data.shape[0], pcg_data.shape[1]\nprint('{} dates'.format(pcg_df0))\npcg_data= pcg_data.drop(['Date'], axis =1)\npcg_data = pcg_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small pcg_dataset, we will train our model with all availabe pcg_data.\npcg_X= pcg_data.drop(['Close'],axis=1)\npcg_y= pcg_data['Close']\npcg_X = mini.fit_transform(pcg_X)\n# #\npcg_X_train,pcg_X_test,pcg_y_train,pcg_y_test = train_test_split(pcg_X,pcg_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig pcg_data\nregressor.fit(pcg_X_train, pcg_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/pcg_model.pkl','wb'))\n#\n# # Loading model to comppcge the results\npcg_model = pickle.load(open('models/pcg_model.pkl','rb'))\npcg_future_x = pcg_X\n# pcg_X = pcg_X[:]\n# future_x = X[-1]\n# x = X[:-1]\npcg_bata = pd.read_csv('data/stocks/stocks_portfolio/pcg.csv')\npcg_date = pcg_bata['Date']\npcg_date = pcg_date.tail()\npcg_bata = pd.read_csv('data/stocks/stocks_portfolio/pcg.csv')\npcg_date = pcg_bata['Date']\nprint(pcg_date.tail())\nprint('PREDICTED Close')\npcg_y = pcg_model.predict(pcg_future_x)\nprint(pcg_y[-1:])\n\nspy_data = pd.read_csv('data/stocks/stocks_portfolio/spy.csv')\nspy_df0,spy_df1 = spy_data.shape[0], spy_data.shape[1]\nprint('{} dates'.format(spy_df0))\nspy_data= spy_data.drop(['Date'], axis =1)\nspy_data = spy_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small spy_dataset, we will train our model with all availabe spy_data.\nspy_X= spy_data.drop(['Close'],axis=1)\nspy_y= spy_data['Close']\nspy_X = mini.fit_transform(spy_X)\n# #\nspy_X_train,spy_X_test,spy_y_train,spy_y_test = train_test_split(spy_X,spy_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig spy_data\nregressor.fit(spy_X_train, spy_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/spy_model.pkl','wb'))\n#\n# # Loading model to compspye the results\nspy_model = pickle.load(open('models/spy_model.pkl','rb'))\nspy_future_x = spy_X\n# spy_X = spy_X[:]\n# future_x = X[-1]\n# x = X[:-1]\nspy_bata = pd.read_csv('data/stocks/stocks_portfolio/spy.csv')\nspy_date = spy_bata['Date']\nspy_date = spy_date.tail()\nspy_bata = pd.read_csv('data/stocks/stocks_portfolio/spy.csv')\nspy_date = spy_bata['Date']\nprint(spy_date.tail())\nprint('PREDICTED Close')\nspy_y = spy_model.predict(spy_future_x)\nprint(spy_y[-1:]) \n\ntsla_data = pd.read_csv('data/stocks/stocks_portfolio/tsla.csv')\ntsla_df0,tsla_df1 = tsla_data.shape[0], tsla_data.shape[1]\nprint('{} dates'.format(tsla_df0))\ntsla_data= tsla_data.drop(['Date'], axis =1)\ntsla_data = tsla_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small tsla_dataset, we will train our model with all availabe tsla_data.\ntsla_X= tsla_data.drop(['Close'],axis=1)\ntsla_y= tsla_data['Close']\ntsla_X = mini.fit_transform(tsla_X)\n# #\ntsla_X_train,tsla_X_test,tsla_y_train,tsla_y_test = train_test_split(tsla_X,tsla_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig tsla_data\nregressor.fit(tsla_X_train, tsla_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/tsla_model.pkl','wb'))\n#\n# # Loading model to comptslae the results\ntsla_model = pickle.load(open('models/tsla_model.pkl','rb'))\ntsla_future_x = tsla_X\n# tsla_X = tsla_X[:]\n# future_x = X[-1]\n# x = X[:-1]\ntsla_bata = pd.read_csv('data/stocks/stocks_portfolio/tsla.csv')\ntsla_date = tsla_bata['Date']\ntsla_date = tsla_date.tail()\ntsla_bata = pd.read_csv('data/stocks/stocks_portfolio/tsla.csv')\ntsla_date = tsla_bata['Date']\nprint(tsla_date.tail())\nprint('PREDICTED Close')\ntsla_y = tsla_model.predict(tsla_future_x)\nprint(tsla_y[-1:])\n\nxom_data = pd.read_csv('data/stocks/stocks_portfolio/xom.csv')\nxom_df0,xom_df1 = xom_data.shape[0], xom_data.shape[1]\nprint('{} dates'.format(xom_df0))\nxom_data= xom_data.drop(['Date'], axis =1)\nxom_data = xom_data.drop('Adj Close',axis=1)\n# #Splitting Training and Test Set\n# #Since we have a very small xom_dataset, we will train our model with all availabe xom_data.\nxom_X= xom_data.drop(['Close'],axis=1)\nxom_y= xom_data['Close']\nxom_X = mini.fit_transform(xom_X)\n# #\nxom_X_train,xom_X_test,xom_y_train,xom_y_test = train_test_split(xom_X,xom_y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n#\n# #Fitting model with trainig xom_data\nregressor.fit(xom_X_train, xom_y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/xom_model.pkl','wb'))\n#\n# # Loading model to compxome the results\nxom_model = pickle.load(open('models/xom_model.pkl','rb'))\nxom_future_x = xom_X\n# xom_X = xom_X[:]\n# future_x = X[-1]\n# x = X[:-1]\nxom_bata = pd.read_csv('data/stocks/stocks_portfolio/xom.csv')\nxom_date = xom_bata['Date']\nxom_date = xom_date.tail()\nxom_bata = pd.read_csv('data/stocks/stocks_portfolio/xom.csv')\nxom_date = xom_bata['Date']\nprint(xom_date.tail())\nprint('PREDICTED Close')\nxom_y = xom_model.predict(xom_future_x)\nprint(xom_y[-1:]) \n\ndata = pd.read_csv('data/stocks/stocks_portfolio/NIO.csv')\ndf0,df1 = data.shape[0], data.shape[1]\nprint('{} Dates '.format(df0))\ndata= data.drop(['Date'], axis =1)\ndata = data.drop('Adj Close',axis=1)\n#Splitting Training and Test Set\n#Since we have a very small dataset, we will train our model with all availabe data.\nX= data.drop(['Close'],axis=1)\ny= data['Close']\n# mini = MinMaxScaler()\n# X = mini.fit_transform(X)\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=.64)\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\n\n#Fitting model with trainig data\nregressor.fit(X_train, y_train)\n\n# Saving model to disk\npickle.dump(regressor, open('models/nio_model.pkl','wb'))\n\n# Loading model to compare the results\nmodel = pickle.load(open('models/nio_model.pkl','rb'))\nfuture_x = X\n# X = X[3295:3302]\n# future_x = X[-1:]\n# x = X[:-1]\nbata = pd.read_csv('data/stocks/stocks_portfolio/NIO.csv')\ndate = bata['Date']\ndate = date.tail()\nprint(date)\nbata = pd.read_csv('data/stocks/stocks_portfolio/NIO.csv')\ndate = bata['Date']\nprint('PREDICTED Close')\ny = model.predict(future_x)\nprint(y[-1:])"
] | [
[
"pandas.read_csv",
"sklearn.linear_model.LinearRegression",
"sklearn.model_selection.train_test_split"
]
] |
uncle-ben-z/cloudutils | [
"c1b5a751af3f9c322963be40f5a77dd282ef4257"
] | [
"projection.py"
] | [
"import os\nimport cv2\nimport numpy as np\nimport xml.etree.ElementTree as ET\nimport matplotlib\n\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import pyplot as plt\n\n\n# cf. nas/repos/cloud-colorizer/projection/... and repos/projection_utils/...\n\nclass View:\n \"\"\" View class representing the extrinsics and intrinsics of an image. \"\"\"\n\n def __init__(self, label, Rt, camera):\n self.label = label\n self.Rt = Rt\n self.camera = camera\n\n def __str__(self):\n txt = \"Label: \\t\" + self.label\n txt += \"\\nRt: \\n\"\n txt += str(self.Rt)\n txt += \"\\nCamera: \\t\" + str(self.camera)\n return txt\n\n @property\n def world_origin(self):\n \"\"\" Returns the origin in world coordinates. \"\"\"\n return (self.Rt @ np.array([0, 0, 0, 1]).T)[:3]\n\n @property\n def viewing_direction(self):\n # TODO: test\n \"\"\" Returns the viewing direction in the world coordinate system. \"\"\"\n return self.world_origin - (self.Rt @ np.array([0, 0, -1, 1]).T)[:3]\n\n def viewing_deviation(self, normal):\n # TODO: test\n \"\"\" Computes the angular deviation between the world viewing direction and a normal. \"\"\"\n nominator = np.dot(normal, self.world_viewing_direction)\n denominator = np.linalg.norm(normal) * np.linalg.norm(self.world_viewing_direction)\n return np.degrees(np.arccos(nominator / denominator))\n\n def _world2camera(self, p):\n p = np.append(p, 1)\n \"\"\" Transform from world to camera coordinate system. \"\"\"\n return np.linalg.inv(self.Rt) @ p\n\n def _camera2image(self, p, scale=1):\n \"\"\" Transform from camera to image coordinates. \"\"\"\n u, v = self.camera._camera2image(p, scale)\n return u, v\n\n def _world2image(self, p, scale=1):\n p = self._world2camera(p)\n return self._camera2image(p, scale)\n\n def project(self, p, scale=1):\n return np.int32(self._world2image(p, scale))\n\n\nclass Camera:\n \"\"\" Camera class representing the intrinsic properties of a camera. \"\"\"\n\n def __init__(self, f, k1, k2, k3=0, k4=0, p1=0, p2=0, p3=0, p4=0, b1=0, b2=0, cx=0, cy=0, w=0, h=0,\n pixel_width=1., pixel_height=1., focal_length=1.):\n self.f = f\n self.k1 = k1\n self.k2 = k2\n self.k3 = k3\n self.k4 = k4\n self.p1 = p1\n self.p2 = p2\n self.p3 = p3\n self.p4 = p4\n self.b1 = b1\n self.b2 = b2\n self.cx = cx\n self.cy = cy\n self.w = w\n self.h = h\n self.pixel_width = pixel_width\n self.pixel_height = pixel_height\n self.focal_length = focal_length\n\n def __str__(self):\n out = \"\\nf: \\t\" + str(self.f)\n out += \"\\ncx: \\t\" + str(self.cx)\n out += \"\\ncy: \\t\" + str(self.cy)\n out += \"\\nk1: \\t\" + str(self.k1)\n out += \"\\nk2: \\t\" + str(self.k2)\n out += \"\\nk3: \\t\" + str(self.k3)\n out += \"\\nk4: \\t\" + str(self.k4)\n out += \"\\np1: \\t\" + str(self.p1)\n out += \"\\np2: \\t\" + str(self.p2)\n out += \"\\np3: \\t\" + str(self.p3)\n out += \"\\np4: \\t\" + str(self.p4)\n out += \"\\nb1: \\t\" + str(self.b1)\n out += \"\\nb2: \\t\" + str(self.b2)\n out += \"\\nw: \\t\" + str(self.w)\n out += \"\\nh: \\t\" + str(self.h)\n return out\n\n def _radial_distortion(self, x, y):\n \"\"\" Corrects the radial distortion. \"\"\"\n r = np.sqrt(x ** 2 + y ** 2)\n x = x * (1 + self.k1 * r ** 2 + self.k2 * r ** 4 + self.k3 * r ** 6 + self.k4 * r ** 8) + (\n self.p1 * (r ** 2 + 2 * x ** 2) + 2 * self.p2 * x * y)\n y = y * (1 + self.k1 * r ** 2 + self.k2 * r ** 4 + self.k3 * r ** 6 + self.k4 * r ** 8) + (\n self.p2 * (r ** 2 + 2 * y ** 2) + 2 * self.p1 * x * y)\n\n # 1 0.997122 1 0.997102 1 0.997830 1 0.995084 1 944.612335\n return x, y\n\n def _camera2image(self, p, scale=1):\n \"\"\" Transforms point from camera to image coordinates. \"\"\"\n # project\n x, y, z = p[:3]\n x /= z\n y /= z\n\n # correct radial distortion\n x, y = self._radial_distortion(x, y)\n\n # transform to image coordinates\n u = self.w * 0.5 + self.cx + x * self.f + x * self.b1 + y * self.b2\n v = self.h * 0.5 + self.cy + y * self.f\n\n # apply scale\n u, v = scale * u, scale * v\n return u, v\n\n\ndef parse_bundler(path, w=0, h=0):\n \"\"\" Parse bundler cameras file. \"\"\"\n # parse image labels\n labels = [label.split('.')[0] for label in open(os.path.join(os.path.dirname(path), \"list.txt\"), 'r').readlines()]\n\n with open(path, 'r') as f:\n f.readline()\n num_cameras, num_points = np.int32(f.readline().split())\n\n # parse views\n views = {}\n for i in range(num_cameras):\n focal, k1, k2 = np.float32(f.readline().split())\n Rt = np.eye(4)\n Rt[0, :3] = np.float32(f.readline().split())\n Rt[1, :3] = np.float32(f.readline().split())\n Rt[2, :3] = np.float32(f.readline().split())\n Rt[3, :3] = -Rt[:3, :3].T @ np.float32(f.readline().split())\n\n Rt = Rt.T\n Rt[:, 1:3] *= -1\n\n views[labels[i]] = View(labels[i], Rt, Camera(focal, k1, k2, w=w, h=h))\n\n return views\n\n\ndef parse_agisoft_xml(path):\n \"\"\" Parse agisoft cameras xml. \"\"\"\n root = ET.parse(path).getroot()\n\n # parse intrinsics\n intrinsics = {}\n # loop over intrinsics\n for sensor in root.iter('sensor'):\n id = np.int32(sensor.attrib['id'])\n calib = sensor.find('calibration')\n f = float(calib.find('f').text)\n cx = float(calib.find('cx').text) if calib.find('cx') is not None else 0\n cy = float(calib.find('cy').text) if calib.find('cy') is not None else 0\n k1 = float(calib.find('k1').text) if calib.find('k1') is not None else 0\n k2 = float(calib.find('k2').text) if calib.find('k2') is not None else 0\n k3 = float(calib.find('k3').text) if calib.find('k3') is not None else 0\n k4 = float(calib.find('k4').text) if calib.find('k4') is not None else 0\n p1 = float(calib.find('p1').text) if calib.find('p1') is not None else 0\n p2 = float(calib.find('p2').text) if calib.find('p2') is not None else 0\n p3 = float(calib.find('p1').text) if calib.find('p1') is not None else 0\n p4 = float(calib.find('p2').text) if calib.find('p2') is not None else 0\n b1 = float(calib.find('b1').text) if calib.find('p1') is not None else 0\n b2 = float(calib.find('b2').text) if calib.find('p2') is not None else 0\n w = int(calib.find('resolution').attrib['width'])\n h = int(calib.find('resolution').attrib['height'])\n\n for prop in sensor.iter('property'):\n if prop.attrib['name'] == \"pixel_width\":\n pixel_width = float(prop.attrib['value'])\n elif prop.attrib['name'] == \"pixel_height\":\n pixel_height = float(prop.attrib['value'])\n elif prop.attrib['name'] == \"focal_length\":\n focal_length = float(prop.attrib['value'])\n\n # store intrinsics\n intrinsics[id] = Camera(f, k1, k2, k3, k4, p1, p2, p3, p4, b1, b2, cx, cy, w, h,\n pixel_width, pixel_height, focal_length)\n\n # parse chunk transform\n \"\"\" Get the transform for the agisoft chunk \"\"\"\n chunk_transform = np.eye(4)\n # rotation\n rotation = root.find('./chunk/transform/rotation').text\n rotation = np.array(rotation.split())\n rotation = rotation.astype(np.float).reshape((3, 3))\n rotation *= float(root.find('./chunk/transform/scale').text) # TODO: right?\n chunk_transform[:3, :3] = rotation\n # translation\n translation = root.find('./chunk/transform/translation').text\n translation = np.array(translation.split())\n translation = translation.astype(np.float)\n chunk_transform[:3, 3] = translation\n\n # parse views\n views = {}\n # find images and corresponding views\n for view in root.iter('camera'):\n # skip in case camera is disabled or no transform available\n if 'enabled' in view.attrib or view.find('transform') is None:\n continue\n\n # get the inverted camera matrix\n Rt = view.find('transform').text\n Rt = np.float32(Rt.split()).reshape((4, 4))\n # undo chunk transform\n Rt = chunk_transform @ Rt\n\n label = view.attrib['label']\n views[label] = View(label, Rt, intrinsics[np.int32(view.attrib['sensor_id'])])\n\n return views\n\n"
] | [
[
"numpy.sqrt",
"numpy.eye",
"numpy.append",
"numpy.array",
"numpy.linalg.inv",
"numpy.arccos",
"numpy.int32",
"matplotlib.use",
"numpy.dot",
"numpy.linalg.norm"
]
] |
sagarpahwa/qiskit-aer | [
"77e40c8d99fd0490d85285e96f87e4905017b646"
] | [
"qiskit/providers/aer/pulse/controllers/unitary_controller.py"
] | [
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2019, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# pylint: disable=no-name-in-module, import-error, invalid-name\n\n\"\"\"\nController for solving unitary evolution of a state-vector.\n\"\"\"\n\nimport time\nimport numpy as np\nfrom scipy.linalg.blas import get_blas_funcs\nfrom qiskit.tools.parallel import parallel_map, CPU_COUNT\nfrom .pulse_sim_options import PulseSimOptions\nfrom .pulse_de_solver import setup_de_solver\n\n# Imports from qutip_extra_lite\nfrom .pulse_utils import occ_probabilities, write_shots_memory\n\ndznrm2 = get_blas_funcs(\"znrm2\", dtype=np.float64)\n\n\ndef _full_simulation(exp, y0, pulse_sim_desc, pulse_de_model, solver_options=None):\n \"\"\"\n Set up full simulation, i.e. combining different (ideally modular) computational\n resources into one function.\n \"\"\"\n\n solver_options = PulseSimOptions() if solver_options is None else solver_options\n\n psi, ode_t = unitary_evolution(exp, y0, pulse_de_model, solver_options)\n\n # ###############\n # do measurement\n # ###############\n rng = np.random.RandomState(exp['seed'])\n\n shots = pulse_sim_desc.shots\n # Init memory\n memory = np.zeros((shots, pulse_sim_desc.memory_slots), dtype=np.uint8)\n\n qubits = []\n memory_slots = []\n tlist = exp['tlist']\n for acq in exp['acquire']:\n if acq[0] == tlist[-1]:\n qubits += list(acq[1])\n memory_slots += list(acq[2])\n qubits = np.array(qubits, dtype='uint32')\n memory_slots = np.array(memory_slots, dtype='uint32')\n\n probs = occ_probabilities(qubits, psi, pulse_sim_desc.measurement_ops)\n rand_vals = rng.rand(memory_slots.shape[0] * shots)\n write_shots_memory(memory, memory_slots, probs, rand_vals)\n\n return [memory, psi, ode_t]\n\n\ndef run_unitary_experiments(pulse_sim_desc, pulse_de_model, solver_options=None):\n \"\"\" Runs unitary experiments for a given op_system\n\n Parameters:\n pulse_sim_desc (PulseSimDescription): description of pulse simulation\n pulse_de_model (PulseInternalDEModel): description of de model\n solver_options (PulseSimOptions): options\n\n Returns:\n tuple: two lists with experiment results\n\n Raises:\n Exception: if initial state is of incorrect format\n \"\"\"\n\n solver_options = PulseSimOptions() if solver_options is None else solver_options\n\n if not pulse_sim_desc.initial_state.isket:\n raise Exception(\"Initial state must be a state vector.\")\n\n y0 = pulse_sim_desc.initial_state.full().ravel()\n\n # set num_cpus to the value given in settings if none in Options\n if not solver_options.num_cpus:\n solver_options.num_cpus = CPU_COUNT\n\n # setup seeds array\n seed = pulse_sim_desc.seed or np.random.randint(np.iinfo(np.int32).max - 1)\n prng = np.random.RandomState(seed)\n for exp in pulse_sim_desc.experiments:\n exp['seed'] = prng.randint(np.iinfo(np.int32).max - 1)\n\n map_kwargs = {'num_processes': solver_options.num_cpus}\n\n # run simulation on each experiment in parallel\n start = time.time()\n exp_results = parallel_map(_full_simulation,\n pulse_sim_desc.experiments,\n task_args=(y0, pulse_sim_desc, pulse_de_model, solver_options, ),\n **map_kwargs\n )\n end = time.time()\n exp_times = (np.ones(len(pulse_sim_desc.experiments)) *\n (end - start) / len(pulse_sim_desc.experiments))\n\n return exp_results, exp_times\n\n\ndef unitary_evolution(exp, y0, pulse_de_model, solver_options=None):\n \"\"\"\n Calculates evolution when there is no noise, or any measurements that are not at the end\n of the experiment.\n\n Parameters:\n exp (dict): dictionary containing experiment description\n y0 (array): initial state\n pulse_de_model (PulseInternalDEModel): container for de model\n solver_options (PulseSimOptions): options\n\n Returns:\n array: results of experiment\n\n Raises:\n Exception: if ODE solving has errors\n \"\"\"\n\n solver_options = PulseSimOptions() if solver_options is None else solver_options\n\n ODE = setup_de_solver(exp, y0, pulse_de_model, solver_options.de_options)\n\n tlist = exp['tlist']\n\n for t in tlist[1:]:\n ODE.integrate(t)\n if ODE.successful():\n psi = ODE.y / dznrm2(ODE.y)\n else:\n err_msg = 'ODE method exited with status: %s' % ODE.return_code()\n raise Exception(err_msg)\n\n # apply final rotation to come out of rotating frame\n psi_rot = np.exp(-1j * pulse_de_model.h_diag_elems * ODE.t)\n psi *= psi_rot\n\n return psi, ODE.t\n"
] | [
[
"numpy.zeros",
"numpy.exp",
"numpy.random.RandomState",
"scipy.linalg.blas.get_blas_funcs",
"numpy.iinfo",
"numpy.array"
]
] |
luqidndx/PyWake | [
"3d046eb14c4597de49ac2fee3771b8e0e68820ad"
] | [
"py_wake/tests/test_blockage_models/test_selfsimilarity.py"
] | [
"import pytest\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom py_wake.deficit_models import SelfSimilarityDeficit\nfrom py_wake.deficit_models.no_wake import NoWakeDeficit\nfrom py_wake.deficit_models.noj import NOJDeficit\nfrom py_wake.examples.data import hornsrev1\nfrom py_wake.examples.data.hornsrev1 import Hornsrev1Site\nfrom py_wake.superposition_models import LinearSum\nfrom py_wake.tests import npt\nfrom py_wake.wind_farm_models.engineering_models import All2AllIterative\n\n\[email protected](scope='module')\ndef setup():\n site = Hornsrev1Site()\n windTurbines = hornsrev1.HornsrevV80()\n ss = SelfSimilarityDeficit()\n return site, windTurbines, ss\n\n\ndef test_selfsimilarity_reference_figures(setup):\n ss = setup[2]\n ws = 10\n D = 80\n R = D / 2\n WS_ilk = np.array([[[ws]]])\n D_src_il = np.array([[D]])\n ct_ilk = np.array([[[.8]]])\n\n x1, y1 = -np.arange(200), np.array([0])\n deficit_centerline = ss.calc_deficit(WS_ilk=WS_ilk, D_src_il=D_src_il,\n dw_ijlk=x1.reshape((1, len(x1), 1, 1)),\n cw_ijlk=y1.reshape((1, len(y1), 1, 1)), ct_ilk=ct_ilk)[0, :, 0, 0]\n\n x2, y2 = np.array([-2 * R]), np.arange(200)\n deficit_radial = ss.calc_deficit(WS_ilk=WS_ilk, D_src_il=D_src_il,\n dw_ijlk=x2.reshape((1, len(x2), 1, 1)),\n cw_ijlk=y2.reshape((1, len(y2), 1, 1)), ct_ilk=ct_ilk)[0, :, 0, 0]\n\n r12 = np.sqrt(ss.lambda_ * (ss.eta + (x2 / R) ** 2)) # Eq. (13) from [1]\n\n if 0:\n plt.title('Fig 11 from [1]')\n plt.xlabel('x/R')\n plt.ylabel('a')\n plt.plot(x1 / R, deficit_centerline / ws)\n print(list(np.round(deficit_centerline[::20], 6)))\n\n plt.figure()\n plt.title('Fig 10 from [1]')\n print(list(np.round(deficit_radial[::20] / deficit_radial[0], 6)))\n plt.xlabel('y/R12 (epsilon)')\n plt.ylabel('f')\n plt.plot((y2 / R) / r12, deficit_radial / deficit_radial[0])\n plt.show()\n\n fig11_ref = np.array([[-0.025, -1, -2, -3, -4, -5], [0.318, 0.096, 0.035, 0.017, 0.010, 0.0071]]).T\n npt.assert_array_almost_equal(np.interp(-fig11_ref[:, 0], -x1 / R, deficit_centerline / ws), fig11_ref[:, 1], 1)\n npt.assert_array_almost_equal(deficit_centerline[::20], [0, 1.806478, 0.95716, 0.548851, 0.345007,\n 0.233735, 0.1677, 0.125738, 0.097573, 0.077819])\n\n fig10_ref = np.array([[0, 1, 2, 3], [1, .5, .15, .045]]).T\n npt.assert_array_almost_equal(np.interp(fig10_ref[:, 0], (y2 / R) / r12, deficit_radial / deficit_radial[0]),\n fig10_ref[:, 1], 1)\n npt.assert_array_almost_equal(deficit_radial[::20] / deficit_radial[0],\n [1.0, 0.933011, 0.772123, 0.589765, 0.430823, 0.307779,\n 0.217575, 0.153065, 0.107446, 0.075348])\n\n\ndef test_blockage_map(setup):\n site, windTurbines, ss = setup\n wm = All2AllIterative(site, windTurbines, wake_deficitModel=NoWakeDeficit(),\n superpositionModel=LinearSum(), blockage_deficitModel=ss)\n\n flow_map = wm(x=[0], y=[0], wd=[270], ws=[10]).flow_map()\n X_j, Y_j = flow_map.XY\n WS_eff = flow_map.WS_eff_xylk[:, :, 0, 0]\n\n if 0:\n plt.contourf(X_j, Y_j, WS_eff)\n plt.plot(X_j[200, ::50], Y_j[200, ::50], '.-')\n plt.plot(X_j[250, ::50], Y_j[250, ::50], '.-')\n print(list(np.round(WS_eff[200, ::50], 6)))\n print(list(np.round(WS_eff[250, ::50], 6)))\n ss.windTurbines.plot([0], [0], wd=[270])\n plt.show()\n\n npt.assert_array_almost_equal(WS_eff[200, ::50], [9.940967, 9.911659, 9.855934,\n 9.736016, 9.44199, 10.0, 10.0, 10.0, 10.0, 10.0])\n npt.assert_array_almost_equal(WS_eff[250, ::50], [9.937601, 9.90397, 9.834701,\n 9.659045, 9.049764, 10.0, 10.0, 10.0, 10.0, 10.0])\n\n\ndef test_wake_and_blockage(setup):\n site, windTurbines, ss = setup\n noj_ss = All2AllIterative(site, windTurbines, wake_deficitModel=NOJDeficit(),\n blockage_deficitModel=ss, superpositionModel=LinearSum())\n\n flow_map = noj_ss(x=[0], y=[0], wd=[270], ws=[10]).flow_map()\n X_j, Y_j = flow_map.XY\n WS_eff = flow_map.WS_eff_xylk[:, :, 0, 0]\n\n npt.assert_array_almost_equal(WS_eff[200, ::50], [9.940967, 9.911659, 9.855934, 9.736016, 9.44199, 4.560631,\n 5.505472, 6.223921, 6.782925, 7.226399])\n npt.assert_array_almost_equal(WS_eff[250, ::50], [9.937601, 9.90397, 9.834701, 9.659045, 9.049764, 4.560631,\n 5.505472, 6.223921, 6.782925, 7.226399])\n\n if 0:\n plt.contourf(X_j, Y_j, WS_eff)\n plt.plot(X_j[200, ::50], Y_j[200, ::50], '.-')\n plt.plot(X_j[250, ::50], Y_j[250, ::50], '.-')\n print(list(np.round(WS_eff[200, ::50], 6)))\n print(list(np.round(WS_eff[250, ::50], 6)))\n ss.windTurbines.plot([0], [0], wd=[270])\n plt.show()\n\n\ndef test_aep_two_turbines(setup):\n site, windTurbines, ss = setup\n\n nwm_ss = All2AllIterative(site, windTurbines, wake_deficitModel=NoWakeDeficit(),\n blockage_deficitModel=ss, superpositionModel=LinearSum())\n\n sim_res = nwm_ss(x=[0, 80 * 3], y=[0, 0])\n aep_no_blockage = sim_res.aep_ilk(with_wake_loss=False).sum(2)\n aep = sim_res.aep_ilk().sum(2)\n\n # blockage reduce aep(wd=270) by .5%\n npt.assert_almost_equal((aep_no_blockage[0, 270] - aep[0, 270]) / aep_no_blockage[0, 270] * 100, 0.4896853)\n\n if 0:\n plt.plot(sim_res.WS_eff_ilk[:, :, 7].T)\n plt.show()\n"
] | [
[
"numpy.sqrt",
"numpy.interp",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.contourf",
"numpy.round",
"matplotlib.pyplot.xlabel"
]
] |
NikolaySokolov152/Unet_multiclass | [
"d07f6809b422519097560b07f67d0f139e718381"
] | [
"splitImages.py"
] | [
"import skimage.io as io\nimport numpy as np\nimport os\n\ndef to_0_255_format_img(in_img):\n max_val = in_img[:,:].max()\n if max_val <= 1:\n out_img = np.round(in_img * 255)\n return out_img.astype(np.uint8)\n else:\n return in_img\n\ndef to_0_1_format_img(in_img):\n max_val = in_img[:,:].max()\n if max_val <= 1:\n return in_img\n else:\n out_img = in_img / 255\n return out_img\n\ndef split_image(img, tiled_name, save_dir = None, size = 256, overlap = 64, unique_area = 0):\n '''\n Split image to array of smaller images.\n \n Parameters\n ----------\n img : np.array\n Input image.\n tiled_name : list of np.arrays\n Tiled input imagr.\n save_dir : string, optional\n A folder with saved tiled images in png format. The default is \"split_test/\". If save_dir = None, tiles don't save to hard drive.\n size : int, optional\n Size of tile side. Tiles are always square. The default is 256.\n overlap : int, optional\n Overlap by two tiles. The default is 64.\n unique_area : int, optional\n If resulting overlap of two tiles is too big, you can skip one tile. Used when preparing a training test. In test process should be 0. The default is 0.\n\n Returns\n -------\n 1. Array of tiled images.\n 2. Number of tiles per column, number of images per line.\n\n '''\n \n tiled_img = []\n h, w = img.shape[0:2]\n step = size - overlap\n count = 0\n rows = 0\n for y in range(0, h, step):\n rows += 1\n start_y = y\n end_y = start_y + size\n \n if (h - y <= size):\n if(h - y <= unique_area):\n break\n start_y = h - size\n end_y = h\n \n for x in range(0, w, step):\n start_x = x\n end_x = x + size\n\n if (w - x <= size):\n if (w - x < unique_area):\n break\n start_x = w - size\n end_x = w\n \n tiled_img.append(img[start_y : end_y, start_x : end_x])\n if(save_dir != None):\n io.imsave( os.path.join(save_dir, (tiled_name + \"_\" + str(count) + \".png\")), to_0_255_format_img(img[start_y : end_y, start_x : end_x]))\n count += 1\n if(end_x == w): # reached the end of the line\n break\n if(end_y == h):# reached the end of the height\n break\n \n cols = int(count / rows)\n return tiled_img, (rows, cols)\n\ndef glit_image(img_arr, out_size, tile_info, overlap = 64):\n '''\n Glit array of images to one big image\n\n Parameters\n ----------\n img_arr : list of np.array\n Tiles.\n out_size : (int, int)\n Shape of original image.\n tile_info : (int, int)\n Information about splitting (rows, cols).\n overlap : int, optional\n Overlap value. The default is 64.\n\n Returns\n -------\n np.array\n Glitted image.\n\n '''\n size = img_arr[0].shape[0]\n h, w = out_size[0:2]\n #print(h,w)\n count_x = tile_info[1]\n count_y = tile_info[0]\n \n out = np.zeros(out_size)\n \n \n # corners\n out[0 : size, 0 : size] = img_arr[0]\n out[h - size : h, w - size : w] = img_arr[len(img_arr) - 1]\n out[0 : size, w - size : w] = img_arr[count_x - 1]\n out[h - size : h, 0 : size] = img_arr[len(img_arr) - count_x]\n \n half = int(overlap / 2)\n area = size - overlap\n \n for x in range(1, count_x - 1):\n #first row\n out[0 : size, half + x * area : half + (x + 1) * area] = img_arr[x][0 : size, half : half + area] \n #last row\n out[h - size : h, half + x * area : half + (x + 1) * area] = img_arr[(count_y - 1) * count_x + x][0 : size, half : size - half]\n\n for y in range(1, count_y - 1):\n # first column\n out[half + y * area : half + (y + 1) * area, 0 : size] = img_arr[y * count_x][half : size - half, 0 : size] \n # last column\n out[half + y * area : half + (y + 1) * area, w - size : w] = img_arr[(y + 1) * count_x - 1][half : size - half, 0 : size] \n \n \n # inner area\n for y in range(1, count_y - 1):\n for x in range(1, count_x - 1):\n out[half + y * area : half + (y + 1) * area, half + x * area : half + (x + 1) * area] = img_arr[y * count_x + x][half : size - half, half : size - half] \n \n \n return to_0_255_format_img(out)\n\ndef test_split(filepath, filename, tiled_save_folder = \"split_test\", tiledfilename = \"test\"):\n \n if not os.path.isdir(tiled_save_folder):\n print(\"create output directory:\" + tiled_save_folder)\n os.makedirs(tiled_save_folder)\n\n img = io.imread(os.path.join(filepath, filename), as_gray=True)\n img = to_0_1_format_img(img)\n arr, s = split_image(img, tiledfilename, save_dir = tiled_save_folder, size = 256, overlap = 64)\n\n print(\"x,y:\", s)\n\n out = glit_image(arr, img.shape, s, overlap = 64)\n io.imsave(os.path.join(filepath, \"test_out.png\"), out)\n\n print(\"img-glit_out:\", (to_0_255_format_img(img)-out).sum())\n\ndef test_glit(overlap=128, glit_save_folder = \"glit_test\", glitfilename = \"glit_test_white_black_square\"): #white_black_square\n\n if (overlap == 128): # (5,7) with overlap 128 and (4,5) with overlap 64\n count_x, count_y = (5, 7)\n elif (overlap == 64):\n count_x, count_y = (4, 5)\n else:\n print(\"no calculated data\")\n return\n\n test_list = []\n for i in range(count_x * count_y):\n if i % 2 == 0:\n test_list.append(np.zeros((256, 256), np.float32))\n else:\n test_list.append(np.ones((256, 256), np.float32))\n\n res_img = glit_image(test_list, (768, 1024), (count_x, count_y), overlap=overlap)\n\n if not os.path.isdir(glit_save_folder):\n print(\"create out_dir:\" + glit_save_folder)\n os.makedirs(glit_save_folder)\n io.imsave(os.path.join(glit_save_folder, glitfilename + \".png\"), res_img)\n\n\nif __name__ == \"__main__\":\n test_split('data\\\\test', \"testing.png\")\n test_glit()"
] | [
[
"numpy.round",
"numpy.ones",
"numpy.zeros"
]
] |
llv22/baal_tf2.4_mac | [
"6eed225f8b57e61d8d16b1868ea655384c566700"
] | [
"experiments/vgg_mcdropout_cifar10.py"
] | [
"import argparse\nimport random\nfrom copy import deepcopy\n\nimport torch\nimport torch.backends\nfrom torch import optim\nfrom torch.hub import load_state_dict_from_url\nfrom torch.nn import CrossEntropyLoss\nfrom torchvision import datasets\nfrom torchvision.models import vgg16\nfrom torchvision.transforms import transforms\nfrom tqdm import tqdm\n\nfrom baal.active import get_heuristic, ActiveLearningDataset\nfrom baal.active.active_loop import ActiveLearningLoop\nfrom baal.bayesian.dropout import patch_module\nfrom baal import ModelWrapper\n\n\"\"\"\nMinimal example to use BaaL.\n\"\"\"\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epoch\", default=100, type=int)\n parser.add_argument(\"--batch_size\", default=32, type=int)\n parser.add_argument(\"--initial_pool\", default=1000, type=int)\n parser.add_argument(\"--query_size\", default=100, type=int)\n parser.add_argument(\"--lr\", default=0.001)\n parser.add_argument(\"--heuristic\", default=\"bald\", type=str)\n parser.add_argument(\"--iterations\", default=20, type=int)\n parser.add_argument(\"--shuffle_prop\", default=0.05, type=float)\n parser.add_argument(\"--learning_epoch\", default=20, type=int)\n return parser.parse_args()\n\n\ndef get_datasets(initial_pool):\n transform = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(30),\n transforms.ToTensor(),\n transforms.Normalize(3 * [0.5], 3 * [0.5]),\n ]\n )\n test_transform = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(3 * [0.5], 3 * [0.5]),\n ]\n )\n # Note: We use the test set here as an example. You should make your own validation set.\n train_ds = datasets.CIFAR10(\n \".\", train=True, transform=transform, target_transform=None, download=True\n )\n test_set = datasets.CIFAR10(\n \".\", train=False, transform=test_transform, target_transform=None, download=True\n )\n\n active_set = ActiveLearningDataset(train_ds, pool_specifics={\"transform\": test_transform})\n\n # We start labeling randomly.\n active_set.label_randomly(initial_pool)\n return active_set, test_set\n\n\ndef main():\n args = parse_args()\n use_cuda = torch.cuda.is_available()\n torch.backends.cudnn.benchmark = True\n random.seed(1337)\n torch.manual_seed(1337)\n if not use_cuda:\n print(\"warning, the experiments would take ages to run on cpu\")\n\n hyperparams = vars(args)\n\n active_set, test_set = get_datasets(hyperparams[\"initial_pool\"])\n\n heuristic = get_heuristic(hyperparams[\"heuristic\"], hyperparams[\"shuffle_prop\"])\n criterion = CrossEntropyLoss()\n model = vgg16(pretrained=False, num_classes=10)\n weights = load_state_dict_from_url(\"https://download.pytorch.org/models/vgg16-397923af.pth\")\n weights = {k: v for k, v in weights.items() if \"classifier.6\" not in k}\n model.load_state_dict(weights, strict=False)\n\n # change dropout layer to MCDropout\n model = patch_module(model)\n\n if use_cuda:\n model.cuda()\n optimizer = optim.SGD(model.parameters(), lr=hyperparams[\"lr\"], momentum=0.9)\n\n # Wraps the model into a usable API.\n model = ModelWrapper(model, criterion)\n\n logs = {}\n logs[\"epoch\"] = 0\n\n # for prediction we use a smaller batchsize\n # since it is slower\n active_loop = ActiveLearningLoop(\n active_set,\n model.predict_on_dataset,\n heuristic,\n hyperparams.get(\"query_size\", 1),\n batch_size=10,\n iterations=hyperparams[\"iterations\"],\n use_cuda=use_cuda,\n )\n # We will reset the weights at each active learning step.\n init_weights = deepcopy(model.state_dict())\n\n for epoch in tqdm(range(args.epoch)):\n # Load the initial weights.\n model.load_state_dict(init_weights)\n model.train_on_dataset(\n active_set,\n optimizer,\n hyperparams[\"batch_size\"],\n hyperparams[\"learning_epoch\"],\n use_cuda,\n )\n\n # Validation!\n model.test_on_dataset(test_set, hyperparams[\"batch_size\"], use_cuda)\n metrics = model.metrics\n should_continue = active_loop.step()\n if not should_continue:\n break\n\n val_loss = metrics[\"test_loss\"].value\n logs = {\n \"val\": val_loss,\n \"epoch\": epoch,\n \"train\": metrics[\"train_loss\"].value,\n \"labeled_data\": active_set.labelled,\n \"Next Training set size\": len(active_set),\n }\n print(logs)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.manual_seed",
"torch.cuda.is_available",
"torch.hub.load_state_dict_from_url",
"torch.nn.CrossEntropyLoss"
]
] |
KristianHolsheimer/keras-gym | [
"0296ddcc8685e1ce732c3173caaa0fd25af9ef58"
] | [
"keras_gym/policies/value_based.py"
] | [
"import numpy as np\n\nfrom ..base.mixins import RandomStateMixin\nfrom ..policies.base import BasePolicy\nfrom ..utils import argmax\n\n\n__all__ = (\n 'EpsilonGreedy',\n # 'BoltzmannPolicy', #TODO: implement\n)\n\n\nclass EpsilonGreedy(BasePolicy, RandomStateMixin):\n \"\"\"\n Value-based policy to select actions using epsilon-greedy strategy.\n\n Parameters\n ----------\n q_function : callable\n\n A state-action value function object.\n\n epsilon : float between 0 and 1\n\n The probability of selecting an action uniformly at random.\n\n random_seed : int, optional\n\n Sets the random state to get reproducible results.\n\n \"\"\"\n def __init__(self, q_function, epsilon=0.1, random_seed=None):\n self.q_function = q_function\n self.epsilon = epsilon\n self.random_seed = random_seed # sets self.random in RandomStateMixin\n\n def __call__(self, s):\n if self.random.rand() < self.epsilon:\n return self.q_function.env.action_space.sample()\n\n a = self.greedy(s)\n return a\n\n def set_epsilon(self, epsilon):\n \"\"\"\n Change the value for ``epsilon``.\n\n Parameters\n ----------\n epsilon : float between 0 and 1\n\n The probability of selecting an action uniformly at random.\n\n Returns\n -------\n self\n\n The updated instance.\n\n \"\"\"\n self.epsilon = epsilon\n return self\n\n def greedy(self, s):\n Q = self.q_function(s) # shape: [num_actions]\n a = argmax(Q)\n return a\n\n def dist_params(self, s):\n Q = self.q_function(s) # shape: [num_actions]\n a = argmax(Q)\n n = self.q_function.num_actions\n p = np.ones(n) * self.epsilon / n\n p[a] += 1 - self.epsilon\n assert p.sum() == 1\n return p\n"
] | [
[
"numpy.ones"
]
] |
Lab41/attalos | [
"43b5b61f6b2a2b5f4a49ef1286b4577f1bf4e140"
] | [
"attalos/evaluation/evaluation.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport numpy as np\nimport scipy as sp\n\nfrom sklearn import metrics\n\nclass Evaluation(object):\n \"\"\"\n Assumes:\n predicted: matrix of label prediction confidence [trial, tag]\n eval_data: matrix of ground truth classifications [trial, tag]\n \"\"\"\n def __init__(self, truth, predictions, k=0.5): \n self.predictions_raw = predictions\n\n self.set_k(k)\n\n self.ground_truth = truth\n self.ntrials = predictions.shape[0]\n self.ntags = predictions.shape[1]\n\n # Defaults are precision, recall, and F1.\n # May be extended by appending other metric functions to list\n self.metrics = [self.precision, self.recall, self.f1]\n\n def set_k(self, k):\n \"\"\"\n Performs two possible functions:\n If k is greater than 1, computes predictions for top-k\n from the raw predictions.\n If k is between 0 and 1, sets k as a threshold for\n positive predictions.\n \"\"\"\n if k <= 0:\n return\n elif k < 1 and k > 0:\n self.predictions = self.confidence_threshold(k).astype(int)\n return\n elif k > self.predictions_raw.shape[1]:\n return\n\n predictions = np.zeros(self.predictions_raw.shape)\n\n for raw_r, prediction_r in zip(self.predictions_raw, predictions):\n top_indices = np.argsort(raw_r)[-int(k):]\n prediction_r[top_indices] = 1\n\n self.predictions = predictions.astype(int)\n self.k = k\n\n def confidence_threshold(self, threshold):\n \"\"\"\n Sets the given float as a threshold for positive prediction,\n producing binary predictions from the raw confidence scores.\n \"\"\"\n temp = np.copy(self.predictions_raw)\n temp[np.abs(self.predictions_raw) <= threshold] = 0\n temp[np.abs(self.predictions_raw) > threshold] = 1\n self.k = threshold\n return temp\n\n # Beginning of metrics\n\n def precision(self):\n \"\"\"\n Unweighted mean precision score across predicted tags\n \"\"\"\n try:\n self.m_precision = metrics.precision_score(\n self.ground_truth, self.predictions, \n average='macro')\n except metrics.base.UndefinedMetricWarning:\n pass\n return self.m_precision\n\n def recall(self):\n \"\"\"\n Unweighted mean recall score across predicted tags\n \"\"\"\n try:\n self.m_recall = metrics.recall_score(\n self.ground_truth, self.predictions, \n average='macro')\n except metrics.base.UndefinedMetricWarning:\n pass\n return self.m_recall\n\n def f1(self):\n \"\"\"\n Computes recall score for top-k or above-threshold predictions\n \"\"\"\n self.f1 = metrics.f1_score(\n self.ground_truth, self.predictions, \n average='macro')\n return self.f1\n\n def roc_auc(self):\n \"\"\"\n Computes the area under the ROC curve, given the raw\n confidence scores.\n Assumes:\n each column has at least two values \n (i.e. each example tag appears more than once)\n \"\"\"\n try:\n self.roc_auc = metrics.roc_auc_score(\n self.ground_truth, self.predictions_raw, \n average='macro')\n return self.roc_auc\n except ValueError:\n return 'Area Under Curve could not be computed ...'\n\n def coverage_error(self):\n \"\"\"\n The coverage_error function computes the average number of labels \n that have to be included in the final prediction such that all true \n labels are predicted. This is useful if you want to know how many \n top-scored-labels you have to predict in average without missing any \n true one. The best value of this metrics is thus the average number \n of true labels.\n \"\"\"\n self.coverage_error = metrics.coverage_error(\n self.ground_truth, self.predictions_raw)\n avg_true_labels = np.count_nonzero(self.ground_truth) / self.ntrials\n ce_message = 'Coverage Error [' + str(avg_true_labels) + ', ~): '\n return ce_message + str(self.coverage_error)\n\n def ranking_precision(self):\n \"\"\"\n Label ranking average precision (LRAP) is the average over each \n ground truth label assigned to each sample, of the ratio of \n true vs. total labels with lower score. This metric will yield \n better scores if you are able to give better rank to the labels \n associated with each sample. The obtained score is always strictly \n greater than 0, and the best value is 1.\n \"\"\"\n self.ranking_precision = metrics.label_ranking_average_precision_score(\n self.ground_truth, self.predictions_raw)\n return self.ranking_precision\n\n def ranking_loss(self):\n \"\"\"\n Computes the ranking loss, which averages the number of\n incorrectly-ordered labels (i.e. true labels have a lower \n score than false labels, weighted by the the inverse number \n of false and true labels) based on raw precision scores.\n \"\"\"\n self.ranking_loss = metrics.label_ranking_loss(\n self.ground_truth, self.predictions_raw)\n return self.ranking_loss\n\n def spearman(self):\n \"\"\"\n Computes the average Spearman's correlation coefficient over\n the raw predictions and ground truths.\n \"\"\"\n scores = np.empty((self.ntags, 1))\n for tag_n in range(0, self.ntags):\n [spearman_value, p_value] = sp.stats.spearmanr(\n self.ground_truth[:,tag_n], self.predictions_raw[:,tag_n])\n if (math.isnan(spearman_value)):\n spearman_value = 0.0\n scores[tag_n] = spearman_value\n\n self.spearman = np.average(scores)\n return self.spearman\n\n # End of metrics\n\n def print_evaluation(self):\n \"\"\"\n Prints the results of evaluation metrics.\n \"\"\"\n print('---Evaluation---')\n if self.k >= 1:\n print('---(where k = ' \n + str(self.k) \n + ')---')\n else:\n print('---where confidence > ' \n + str(self.k) \n + ' is classified as positive---')\n for metric in self.metrics:\n print(metric())\n\n def evaluate(self):\n return [metric() for metric in self.metrics]\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(description='Test Evaluation')\n parser.add_argument('evaluation_dataset_filename',\n dest='evaluation_dataset_filename',\n type=str,\n help='Evaluation Dataset Filename')\n parser.add_argument('prediction_matrix_filename',\n dest='prediction_matrix_filename',\n type=str,\n help='Prediction Matrix Filename')\n \n args = parser.parse_args()\n\n evaluation_dataset = np.loadtxt(evaluation_dataset_filename)\n prediction_matrix = np.loadtxt(prediction_matrix_filename)\n\n evaluated = Evaluation(evaluation_dataset, prediction_matrix)\n\n evaluated.print_evaluation()\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"sklearn.metrics.coverage_error",
"numpy.empty",
"numpy.zeros",
"numpy.argsort",
"numpy.abs",
"numpy.copy",
"numpy.count_nonzero",
"sklearn.metrics.f1_score",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.precision_score",
"sklearn.metrics.label_ranking_average_precision_score",
"sklearn.metrics.label_ranking_loss",
"sklearn.metrics.recall_score",
"numpy.average",
"scipy.stats.spearmanr",
"numpy.loadtxt"
]
] |
syang1993/OpenNMT-tf | [
"eaeb2970c14dd6a6e1b9d261e15a645b589852e8"
] | [
"opennmt/decoders/rnn_decoder.py"
] | [
"\"\"\"Define RNN-based decoders.\"\"\"\n\nimport inspect\n\nimport tensorflow as tf\n\nfrom tensorflow.python.estimator.util import fn_args\n\nfrom opennmt.decoders.decoder import Decoder, logits_to_cum_log_probs, build_output_layer\nfrom opennmt.utils.cell import build_cell\n\n\nclass RNNDecoder(Decoder):\n \"\"\"A basic RNN decoder.\"\"\"\n\n def __init__(self,\n num_layers,\n num_units,\n bridge=None,\n cell_class=tf.contrib.rnn.LSTMCell,\n dropout=0.3,\n residual_connections=False):\n \"\"\"Initializes the decoder parameters.\n\n Args:\n num_layers: The number of layers.\n num_units: The number of units in each layer.\n bridge: A :class:`opennmt.layers.bridge.Bridge` to pass the encoder state\n to the decoder.\n cell_class: The inner cell class or a callable taking :obj:`num_units` as\n argument and returning a cell.\n dropout: The probability to drop units in each layer output.\n residual_connections: If ``True``, each layer input will be added to its\n output.\n \"\"\"\n self.num_layers = num_layers\n self.num_units = num_units\n self.bridge = bridge\n self.cell_class = cell_class\n self.dropout = dropout\n self.residual_connections = residual_connections\n\n def _init_state(self, zero_state, initial_state=None):\n if initial_state is None:\n return zero_state\n elif self.bridge is None:\n raise ValueError(\"A bridge must be configured when passing encoder state\")\n else:\n return self.bridge(initial_state, zero_state)\n\n def _build_cell(self,\n mode,\n batch_size,\n initial_state=None,\n memory=None,\n memory_sequence_length=None,\n dtype=None,\n alignment_history=False):\n _ = memory_sequence_length\n _ = alignment_history\n\n if memory is None and dtype is None:\n raise ValueError(\"dtype argument is required when memory is not set\")\n\n cell = build_cell(\n self.num_layers,\n self.num_units,\n mode,\n dropout=self.dropout,\n residual_connections=self.residual_connections,\n cell_class=self.cell_class)\n\n initial_state = self._init_state(\n cell.zero_state(batch_size, dtype or memory.dtype), initial_state=initial_state)\n\n return cell, initial_state\n\n def decode(self,\n inputs,\n sequence_length,\n vocab_size=None,\n initial_state=None,\n sampling_probability=None,\n embedding=None,\n output_layer=None,\n mode=tf.estimator.ModeKeys.TRAIN,\n memory=None,\n memory_sequence_length=None):\n _ = memory\n _ = memory_sequence_length\n\n batch_size = tf.shape(inputs)[0]\n\n if (sampling_probability is not None\n and (tf.contrib.framework.is_tensor(sampling_probability)\n or sampling_probability > 0.0)):\n if embedding is None:\n raise ValueError(\"embedding argument must be set when using scheduled sampling\")\n\n tf.summary.scalar(\"sampling_probability\", sampling_probability)\n helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(\n inputs,\n sequence_length,\n embedding,\n sampling_probability)\n else:\n helper = tf.contrib.seq2seq.TrainingHelper(inputs, sequence_length)\n\n cell, initial_state = self._build_cell(\n mode,\n batch_size,\n initial_state=initial_state,\n memory=memory,\n memory_sequence_length=memory_sequence_length,\n dtype=inputs.dtype)\n\n if output_layer is None:\n output_layer = build_output_layer(self.num_units, vocab_size, dtype=inputs.dtype)\n\n # With TrainingHelper, project all timesteps at once.\n fused_projection = isinstance(helper, tf.contrib.seq2seq.TrainingHelper)\n\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell,\n helper,\n initial_state,\n output_layer=output_layer if not fused_projection else None)\n\n outputs, state, length = tf.contrib.seq2seq.dynamic_decode(decoder)\n\n if fused_projection and output_layer is not None:\n logits = output_layer(outputs.rnn_output)\n else:\n logits = outputs.rnn_output\n\n return (logits, state, length)\n\n def dynamic_decode(self,\n embedding,\n start_tokens,\n end_token,\n vocab_size=None,\n initial_state=None,\n output_layer=None,\n maximum_iterations=250,\n mode=tf.estimator.ModeKeys.PREDICT,\n memory=None,\n memory_sequence_length=None,\n dtype=None,\n return_alignment_history=False):\n batch_size = tf.shape(start_tokens)[0]\n\n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n embedding,\n start_tokens,\n end_token)\n\n cell, initial_state = self._build_cell(\n mode,\n batch_size,\n initial_state=initial_state,\n memory=memory,\n memory_sequence_length=memory_sequence_length,\n dtype=dtype,\n alignment_history=return_alignment_history)\n\n if output_layer is None:\n output_layer = build_output_layer(self.num_units, vocab_size, dtype=dtype or memory.dtype)\n\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell,\n helper,\n initial_state,\n output_layer=output_layer)\n\n outputs, state, length = tf.contrib.seq2seq.dynamic_decode(\n decoder, maximum_iterations=maximum_iterations)\n\n predicted_ids = outputs.sample_id\n log_probs = logits_to_cum_log_probs(outputs.rnn_output, length)\n\n # Make shape consistent with beam search.\n predicted_ids = tf.expand_dims(predicted_ids, 1)\n length = tf.expand_dims(length, 1)\n log_probs = tf.expand_dims(log_probs, 1)\n\n if return_alignment_history:\n alignment_history = _get_alignment_history(state)\n if alignment_history is not None:\n alignment_history = tf.expand_dims(alignment_history, 1)\n return (predicted_ids, state, length, log_probs, alignment_history)\n return (predicted_ids, state, length, log_probs)\n\n def dynamic_decode_and_search(self,\n embedding,\n start_tokens,\n end_token,\n vocab_size=None,\n initial_state=None,\n output_layer=None,\n beam_width=5,\n length_penalty=0.0,\n maximum_iterations=250,\n mode=tf.estimator.ModeKeys.PREDICT,\n memory=None,\n memory_sequence_length=None,\n dtype=None,\n return_alignment_history=False):\n if (return_alignment_history and\n \"reorder_tensor_arrays\" not in fn_args(tf.contrib.seq2seq.BeamSearchDecoder.__init__)):\n tf.logging.warn(\"The current version of tf.contrib.seq2seq.BeamSearchDecoder \"\n \"does not support returning the alignment history. None will \"\n \"be returned instead. Consider upgrading TensorFlow.\")\n alignment_history = False\n else:\n alignment_history = return_alignment_history\n\n batch_size = tf.shape(start_tokens)[0]\n\n # Replicate batch `beam_width` times.\n if initial_state is not None:\n initial_state = tf.contrib.seq2seq.tile_batch(\n initial_state, multiplier=beam_width)\n if memory is not None:\n memory = tf.contrib.seq2seq.tile_batch(\n memory, multiplier=beam_width)\n if memory_sequence_length is not None:\n memory_sequence_length = tf.contrib.seq2seq.tile_batch(\n memory_sequence_length, multiplier=beam_width)\n\n cell, initial_state = self._build_cell(\n mode,\n batch_size * beam_width,\n initial_state=initial_state,\n memory=memory,\n memory_sequence_length=memory_sequence_length,\n dtype=dtype,\n alignment_history=alignment_history)\n\n if output_layer is None:\n output_layer = build_output_layer(self.num_units, vocab_size, dtype=dtype or memory.dtype)\n\n decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n cell,\n embedding,\n start_tokens,\n end_token,\n initial_state,\n beam_width,\n output_layer=output_layer,\n length_penalty_weight=length_penalty)\n\n outputs, beam_state, length = tf.contrib.seq2seq.dynamic_decode(\n decoder, maximum_iterations=maximum_iterations)\n\n predicted_ids = tf.transpose(outputs.predicted_ids, perm=[0, 2, 1])\n log_probs = beam_state.log_probs\n state = beam_state.cell_state\n\n if return_alignment_history:\n alignment_history = _get_alignment_history(state)\n if alignment_history is not None:\n alignment_history = tf.reshape(\n alignment_history, [batch_size, beam_width, -1, tf.shape(memory)[1]])\n return (predicted_ids, state, length, log_probs, alignment_history)\n return (predicted_ids, state, length, log_probs)\n\n\ndef _get_alignment_history(cell_state):\n \"\"\"Returns the alignment history from the cell state.\"\"\"\n if not hasattr(cell_state, \"alignment_history\") or cell_state.alignment_history == ():\n return None\n alignment_history = cell_state.alignment_history\n if isinstance(alignment_history, tf.TensorArray):\n alignment_history = alignment_history.stack()\n alignment_history = tf.transpose(alignment_history, perm=[1, 0, 2])\n return alignment_history\n\ndef _build_attention_mechanism(attention_mechanism,\n num_units,\n memory,\n memory_sequence_length=None):\n \"\"\"Builds an attention mechanism from a class or a callable.\"\"\"\n if inspect.isclass(attention_mechanism):\n kwargs = {}\n if \"dtype\" in fn_args(attention_mechanism):\n # For TensorFlow 1.5+, dtype should be set in the constructor.\n kwargs[\"dtype\"] = memory.dtype\n return attention_mechanism(\n num_units, memory, memory_sequence_length=memory_sequence_length, **kwargs)\n elif callable(attention_mechanism):\n return attention_mechanism(\n num_units, memory, memory_sequence_length)\n else:\n raise ValueError(\"Unable to build the attention mechanism\")\n\n\nclass AttentionalRNNDecoder(RNNDecoder):\n \"\"\"A RNN decoder with attention.\n\n It simple overrides the cell construction to add an attention wrapper.\n \"\"\"\n\n def __init__(self,\n num_layers,\n num_units,\n bridge=None,\n attention_mechanism_class=tf.contrib.seq2seq.LuongAttention,\n output_is_attention=True,\n cell_class=tf.contrib.rnn.LSTMCell,\n dropout=0.3,\n residual_connections=False):\n \"\"\"Initializes the decoder parameters.\n\n Args:\n num_layers: The number of layers.\n num_units: The number of units in each layer.\n bridge: A :class:`opennmt.layers.bridge.Bridge` to pass the encoder state\n to the decoder.\n attention_mechanism_class: A class inheriting from\n ``tf.contrib.seq2seq.AttentionMechanism`` or a callable that takes\n ``(num_units, memory, memory_sequence_length)`` as arguments and returns\n a ``tf.contrib.seq2seq.AttentionMechanism``.\n output_is_attention: If ``True``, the final decoder output (before logits)\n is the output of the attention layer. In all cases, the output of the\n attention layer is passed to the next step.\n cell_class: The inner cell class or a callable taking :obj:`num_units` as\n argument and returning a cell.\n dropout: The probability to drop units in each layer output.\n residual_connections: If ``True``, each layer input will be added to its\n output.\n \"\"\"\n super(AttentionalRNNDecoder, self).__init__(\n num_layers,\n num_units,\n bridge=bridge,\n cell_class=cell_class,\n dropout=dropout,\n residual_connections=residual_connections)\n self.attention_mechanism_class = attention_mechanism_class\n self.output_is_attention = output_is_attention\n\n def _build_cell(self,\n mode,\n batch_size,\n initial_state=None,\n memory=None,\n memory_sequence_length=None,\n dtype=None,\n alignment_history=False):\n attention_mechanism = _build_attention_mechanism(\n self.attention_mechanism_class,\n self.num_units,\n memory,\n memory_sequence_length=memory_sequence_length)\n\n cell, initial_cell_state = RNNDecoder._build_cell(\n self,\n mode,\n batch_size,\n initial_state=initial_state,\n dtype=memory.dtype)\n\n cell = tf.contrib.seq2seq.AttentionWrapper(\n cell,\n attention_mechanism,\n attention_layer_size=self.num_units,\n alignment_history=alignment_history,\n output_attention=self.output_is_attention,\n initial_cell_state=initial_cell_state)\n\n if mode == tf.estimator.ModeKeys.TRAIN and self.dropout > 0.0:\n cell = tf.contrib.rnn.DropoutWrapper(\n cell, output_keep_prob=1.0 - self.dropout)\n\n initial_state = cell.zero_state(batch_size, memory.dtype)\n\n return cell, initial_state\n\n\nclass MultiAttentionalRNNDecoder(RNNDecoder):\n \"\"\"A RNN decoder with multi-attention.\n\n This decoder can attend the encoder outputs after multiple RNN layers using\n one or multiple attention mechanisms. Additionally, the cell state of this\n decoder is not initialized from the encoder state (i.e. a\n :class:`opennmt.layers.bridge.ZeroBridge` is imposed).\n \"\"\"\n\n def __init__(self,\n num_layers,\n num_units,\n attention_layers=None,\n attention_mechanism_class=tf.contrib.seq2seq.LuongAttention,\n cell_class=tf.contrib.rnn.LSTMCell,\n dropout=0.3,\n residual_connections=False):\n \"\"\"Initializes the decoder parameters.\n\n Args:\n num_layers: The number of layers.\n num_units: The number of units in each layer.\n attention_layers: A list of integers, the layers after which to add\n attention. If ``None``, attention will only be added after the last\n layer.\n attention_mechanism_class: A class or list of classes inheriting from\n ``tf.contrib.seq2seq.AttentionMechanism``. Alternatively, the class can\n be replaced by a callable that takes\n ``(num_units, memory, memory_sequence_length)`` as arguments and returns\n a ``tf.contrib.seq2seq.AttentionMechanism``.\n cell_class: The inner cell class or a callable taking :obj:`num_units` as\n argument and returning a cell.\n dropout: The probability to drop units in each layer output.\n residual_connections: If ``True``, each layer input will be added to its\n output.\n \"\"\"\n super(MultiAttentionalRNNDecoder, self).__init__(\n num_layers,\n num_units,\n cell_class=cell_class,\n dropout=dropout,\n residual_connections=residual_connections)\n\n attention_layers = attention_layers or [-1]\n attention_layers = [l % num_layers for l in attention_layers]\n\n if not isinstance(attention_mechanism_class, list):\n attention_mechanism_class = [attention_mechanism_class for _ in attention_layers]\n\n self.attention_mechanism_class = attention_mechanism_class\n self.attention_layers = attention_layers\n\n def _build_cell(self,\n mode,\n batch_size,\n initial_state=None,\n memory=None,\n memory_sequence_length=None,\n dtype=None,\n alignment_history=False):\n attention_mechanisms = [\n _build_attention_mechanism(\n attention_mechanism,\n self.num_units,\n memory,\n memory_sequence_length=memory_sequence_length)\n for attention_mechanism in self.attention_mechanism_class]\n\n cell = build_cell(\n self.num_layers,\n self.num_units,\n mode,\n dropout=self.dropout,\n residual_connections=self.residual_connections,\n cell_class=self.cell_class,\n attention_layers=self.attention_layers,\n attention_mechanisms=attention_mechanisms)\n\n initial_state = cell.zero_state(batch_size, memory.dtype)\n\n return cell, initial_state\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.logging.warn",
"tensorflow.shape",
"tensorflow.contrib.seq2seq.BasicDecoder",
"tensorflow.contrib.seq2seq.tile_batch",
"tensorflow.contrib.seq2seq.TrainingHelper",
"tensorflow.contrib.seq2seq.GreedyEmbeddingHelper",
"tensorflow.expand_dims",
"tensorflow.contrib.rnn.DropoutWrapper",
"tensorflow.python.estimator.util.fn_args",
"tensorflow.transpose",
"tensorflow.contrib.seq2seq.BeamSearchDecoder",
"tensorflow.contrib.seq2seq.AttentionWrapper",
"tensorflow.contrib.framework.is_tensor",
"tensorflow.contrib.seq2seq.ScheduledEmbeddingTrainingHelper",
"tensorflow.contrib.seq2seq.dynamic_decode"
]
] |
alexfmsu/pyquantum | [
"78b09987cbfecf549e67b919bb5cb2046b21ad44"
] | [
"plot_M_diff.py"
] | [
"from PyQuantum.Tools.CSV import *\nimport plotly.graph_objs as go\nimport numpy as np\nfrom PyQuantum.Tools.PlotBuilder2D import *\n\n# data = []\n\n# data.append(go.Scatter(\n# x=[1, 2, 3],\n# y=[4, 5, 6],\n# name=\"w_0['title']\",\n# ))\n\n# plot_builder = PlotBuilder2D({\n# 'title': 'M[p<sub>sink</sub>]<sub>|t<sub>0</sub>〉</sub> - M[p<sub>sink</sub>]<sub>|s<sub>2</sub>〉</sub>',\n# # 'title': '(M[p<sub>sink</sub>]<sub>|t<sub>0</sub>〉</sub> - M[p<sub>sink</sub>]<sub>|s<sub>2</sub>〉</sub>) (t)',\n\n# 'x_title': 'l/g',\n# 'y_title': 'ΔM, ',\n\n# 'data': data,\n\n# 'to_file': False,\n\n# 'html': 'M' + '.html',\n# 'online': False,\n# })\n\n# plot_builder.make_plot()\n\n\n# exit(0)\n# def df2(x, y):\n# f_xn = y[-1]\n# xn = x[-1]\n\n# DF = []\n# t = []\n\n# for i in range(1, len(y)-1):\n# df_ = (y[i+1]-y[i-1]) / (x[i+1] - x[i-1])\n# DF.append(df_)\n# t.append(x[i])\n\n# return DF, t\n\n\ndata = []\n\nM_str = ''\n\nM_list = {\n 's_2': [],\n 't_0': []\n}\n\nfor w_0 in [\n {\n 'name': 's_2',\n 'title': '|s<sub>2</sub>〉',\n },\n {\n 'name': 't_0',\n 'title': '|t<sub>0</sub>〉',\n }\n]:\n gamma = []\n\n # M_list = []\n\n for coeff in list(np.arange(0.01, 1.01, 0.01)) + list(np.arange(10.0, 210.0, 10.0)):\n # for coeff in np.arange(10.0, 210.0, 10.0):\n # for coeff in [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0]:\n # for coeff in [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0]:\n # for coeff in [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]:\n # for coeff in np.arange(0.01, 1.01, 0.01):\n coeff = np.round(coeff, 3)\n\n T_list = list_from_csv('MM/M_' + str(coeff) + '/T_' + w_0['name'] + '.csv')\n T_list = np.array(T_list)\n\n sink_list = list_from_csv('MM/M_' + str(coeff) + '/sink_' + w_0['name'] + '.csv')\n sink_list = np.array(sink_list)\n\n sink_list /= np.sum(sink_list)\n\n M = 0\n\n for i in range(len(T_list)):\n M += sink_list[i] * T_list[i]\n\n M_list[w_0['name']].append(M)\n\n gamma.append(coeff)\n\n\nM_diff = []\n\n# print(M_list['s_2'])\n# print(M_list['t_0'])\n\nfor j in range(len(M_list['s_2'])):\n M_diff.append(M_list['t_0'][j] - M_list['s_2'][j])\n # M_list = np.array(M_list)\n# print(M_diff)\n\nM_diff = np.array(M_diff)\n\nif max(M_diff) > 1e6:\n M_diff *= 1e-6\n M_str = '10<sup>6</sup>'\nelif max(M_diff) > 1e3:\n M_diff *= 1e-3\n M_str = '10<sup>3</sup>'\nelif max(M_diff) > 1e-3:\n M_diff *= 1e3\n # M_str = '10<sup>-3</sup>'\n M_str = 'ms'\nelif max(M_diff) > 1e-6:\n M_diff *= 1e6\n # M_str = '10<sup>-6</sup>'\n M_str = 'mks'\nelif max(M_diff) > 1e-9:\n M_diff *= 1e9\n M_str = 'ns'\n # M_str = '10<sup>-9</sup>'\n\ndata.append(go.Scatter(\n x=gamma,\n y=M_diff,\n name=w_0['title'],\n))\n\nplot_builder = PlotBuilder2D({\n 'title': 'M[p<sub>sink</sub>]<sub>|t<sub>0</sub>〉</sub> - M[p<sub>sink</sub>]<sub>|s<sub>2</sub>〉</sub>',\n # 'title': '(M[p<sub>sink</sub>]<sub>|t<sub>0</sub>〉</sub> - M[p<sub>sink</sub>]<sub>|s<sub>2</sub>〉</sub>) (t)',\n\n 'x_title': 'l/g',\n 'y_title': 'ΔM, ' + M_str,\n\n 'data': data,\n\n 'to_file': False,\n\n 'html': 'M' + '.html',\n 'online': False,\n})\n\nplot_builder.make_plot()\n"
] | [
[
"numpy.array",
"numpy.sum",
"numpy.arange",
"numpy.round"
]
] |
lucaslehnert/rewardpredictive | [
"273da5a2566a263678159ed81dfb202b180a45a1"
] | [
"rewardpredictive/mdp.py"
] | [
"#\n# Copyright (c) 2020 Lucas Lehnert <[email protected]>\n#\n# This source code is licensed under an MIT license found in the LICENSE file in the root directory of this project.\n#\nfrom itertools import product, combinations\n\nimport numpy as np\nimport rlutils as rl\nfrom rlutils.environment.gridworld import pt_to_idx, idx_to_pt, \\\n generate_gridworld_transition_function, \\\n generate_gridworld_transition_function_with_barrier, \\\n generate_mdp_from_transition_and_reward_function\n\nfrom .utils import cluster_idx_to_phi_mat\nfrom .enumerate_partitions import enumerate_n_partitions\n\n\nclass InflatedTabularMDP(rl.environment.TabularMDP):\n def __init__(self, env, phi_mat, agg_weights=None, start_states=None, goal_states=None):\n '''\n\n :param env:\n :param phi_mat:\n :param agg_weights: If None, then uniform aggregation weights are used.\n '''\n num_a = env.num_actions()\n num_s = np.shape(phi_mat)[0]\n if agg_weights is None:\n uniform_weights = phi_mat.transpose()\n uniform_weights = uniform_weights / np.sum(uniform_weights, axis=-1, keepdims=True)\n uniform_weights = np.reshape(uniform_weights, [1, 1] + list(np.shape(uniform_weights)))\n agg_weights = np.concatenate([uniform_weights] * num_s, axis=1)\n agg_weights = np.concatenate([agg_weights] * num_a, axis=0)\n\n t_mat_lat, r_mat_lat = env.get_t_mat_r_mat()\n\n t_mat = np.zeros([num_a, num_s, num_s], dtype=np.float32)\n r_mat = np.zeros([num_a, num_s, num_s], dtype=np.float32)\n\n for s, a in product(range(num_s), range(num_a)):\n s_phi = np.where(phi_mat[s] == 1.)[0][0]\n t_mat[a, s] = np.matmul(t_mat_lat[a, s_phi], agg_weights[a, s])\n r_mat[a, s] = np.matmul(r_mat_lat[a, s_phi], agg_weights[a, s])\n\n if start_states is None:\n if len(env.start_state_list()) > 0:\n start_bits = [np.matmul(phi_mat, rl.one_hot(i, env.num_states())) for i in env.start_state_list()]\n start_states = np.concatenate([np.where(b == 1.)[0] for b in start_bits])\n else:\n start_states = []\n if goal_states is None:\n if len(env.goal_state_list()) > 0:\n goal_bits = [np.matmul(phi_mat, rl.one_hot(i, env.num_states())) for i in env.goal_state_list()]\n goal_states = np.concatenate([np.where(b == 1.)[0] for b in goal_bits])\n else:\n goal_states = []\n\n self.env_latent = env\n super().__init__(t_mat, r_mat, start_states, goal_states)\n\n def __str__(self):\n return 'InflatedTabularMDP({})'.format(str(self.env_latent))\n\n\nclass NaviA(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05):\n barrier_idx_list = []\n t_fn = generate_gridworld_transition_function_with_barrier(10, 10, slip_prob, barrier_idx_list)\n\n start_list_idx = [pt_to_idx((0, 5), (10, 10))]\n goal_list_idx = [pt_to_idx((9, 0), (10, 10))]\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_list_idx\n if s_2 in goal_list_idx:\n return 1.0\n else:\n return 0.0\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(100, 4, t_fn, r_fn,\n reward_matrix=True,\n dtype=np.float32)\n super().__init__(t_mat, r_mat, start_list_idx, goal_list_idx)\n\n def __str__(self):\n return 'NaviA'\n\n\nclass NaviB(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05):\n barrier_idx_list = []\n t_fn = generate_gridworld_transition_function_with_barrier(10, 10, slip_prob, barrier_idx_list)\n\n start_list_idx = [pt_to_idx((0, 4), (10, 10))]\n goal_list_idx = [pt_to_idx((9, 9), (10, 10))]\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_list_idx\n if s_2 in goal_list_idx:\n return 1.0\n else:\n return 0.0\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(100, 4, t_fn, r_fn,\n reward_matrix=True,\n dtype=np.float32)\n super().__init__(t_mat, r_mat, start_list_idx, goal_list_idx)\n\n def __str__(self):\n return 'NaviB'\n\n\nclass MazeA(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05):\n barrier_idx_list = []\n barrier_idx_list += [(pt_to_idx((1, i), (10, 10)), pt_to_idx((2, i), (10, 10))) for i in np.arange(0, 8)]\n barrier_idx_list += [(pt_to_idx((3, i), (10, 10)), pt_to_idx((4, i), (10, 10))) for i in np.arange(2, 10)]\n barrier_idx_list += [(pt_to_idx((5, i), (10, 10)), pt_to_idx((6, i), (10, 10))) for i in np.arange(0, 8)]\n barrier_idx_list += [(pt_to_idx((7, i), (10, 10)), pt_to_idx((8, i), (10, 10))) for i in np.arange(2, 10)]\n t_fn = generate_gridworld_transition_function_with_barrier(10, 10, slip_prob, barrier_idx_list)\n\n start_list_idx = [pt_to_idx((0, 0), (10, 10))]\n goal_list_idx = [pt_to_idx((9, 0), (10, 10))]\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_list_idx\n if s_2 in goal_list_idx:\n return 1.0\n else:\n return 0.0\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(100, 4, t_fn, r_fn,\n reward_matrix=True,\n dtype=np.float32)\n super().__init__(t_mat, r_mat, start_list_idx, goal_list_idx)\n\n def __str__(self):\n return 'MazeA'\n\n\nclass MazeB(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05):\n barrier_idx_list = []\n barrier_idx_list += [(pt_to_idx((1, i), (10, 10)), pt_to_idx((2, i), (10, 10))) for i in np.arange(2, 10)]\n barrier_idx_list += [(pt_to_idx((3, i), (10, 10)), pt_to_idx((4, i), (10, 10))) for i in np.arange(0, 8)]\n barrier_idx_list += [(pt_to_idx((5, i), (10, 10)), pt_to_idx((6, i), (10, 10))) for i in np.arange(2, 10)]\n barrier_idx_list += [(pt_to_idx((7, i), (10, 10)), pt_to_idx((8, i), (10, 10))) for i in np.arange(0, 8)]\n t_fn = generate_gridworld_transition_function_with_barrier(10, 10, slip_prob, barrier_idx_list)\n\n start_list_idx = [pt_to_idx((0, 9), (10, 10))]\n goal_list_idx = [pt_to_idx((9, 9), (10, 10))]\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_list_idx\n if s_2 in goal_list_idx:\n return 1.0\n else:\n return 0.0\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(100, 4, t_fn, r_fn,\n reward_matrix=True,\n dtype=np.float32)\n super().__init__(t_mat, r_mat, start_list_idx, goal_list_idx)\n\n def __str__(self):\n return 'MazeB'\n\n\ndef color_right_half_10_by_10_grid(task):\n s_idx_unique = list(filter(lambda i: idx_to_pt(i, (10, 10))[0] < 5, range(100)))\n s_idx_duplicate = list(filter(lambda i: idx_to_pt(i, (10, 10))[0] >= 5, range(100)))\n partition_map = s_idx_unique + s_idx_duplicate + s_idx_duplicate + s_idx_duplicate\n phi_mat = cluster_idx_to_phi_mat(partition_map)\n return InflatedTabularMDP(task, phi_mat), phi_mat\n\n\ndef double_state_space(task):\n cluster_idx = np.concatenate([np.arange(task.num_states()), np.arange(task.num_states())])\n phi_mat = cluster_idx_to_phi_mat(cluster_idx)\n return InflatedTabularMDP(task, phi_mat), phi_mat\n\n\ndef identity_inflation(task):\n agg_weights_pinv = np.concatenate((np.eye(task.num_states(), dtype=np.float32),\n np.zeros((task.num_states(), task.num_states()), dtype=np.float32)), axis=0)\n agg_weights = np.linalg.pinv(agg_weights_pinv)\n agg_weights = np.stack([agg_weights for _ in range(task.num_states() * 2)])\n agg_weights = np.stack([agg_weights for _ in range(task.num_actions())])\n phi_mat = np.concatenate((np.eye(task.num_states(), dtype=np.float32),\n np.eye(task.num_states(), dtype=np.float32)), axis=0)\n\n mdp_infl = InflatedTabularMDP(task,\n phi_mat=phi_mat,\n agg_weights=agg_weights,\n start_states=task.start_state_list(),\n goal_states=task.goal_state_list())\n return mdp_infl, phi_mat\n\n\nclass TaskSequence(object):\n def __init__(self):\n self.task_sequence = []\n\n @classmethod\n def get_classname(cls):\n return cls.__name__\n\n\nclass NavigationTaskSequence(TaskSequence):\n def __init__(self):\n super().__init__()\n self.task_sequence = load_navigation_sequence()[0]\n\n\nclass MazeTaskSequence(TaskSequence):\n def __init__(self):\n super().__init__()\n self.task_sequence = load_maze_sequence()[0]\n\n\nclass ShortMazeTaskSequence(TaskSequence):\n def __init__(self):\n super().__init__()\n self.task_sequence = load_short_maze_sequence()[0]\n\n\ndef load_task_sequence_from_string(task_seq_name: str):\n return globals()[task_seq_name]()\n\n\ndef load_navigation_sequence():\n task_1, phi_mat_1 = double_state_space(NaviA())\n task_2, phi_mat_2 = color_right_half_10_by_10_grid(NaviB())\n task_3, phi_mat_3 = double_state_space(NaviA())\n task_4, phi_mat_4 = double_state_space(NaviB())\n task_5, phi_mat_5 = color_right_half_10_by_10_grid(NaviA())\n task_seq = [task_1, task_2, task_3, task_4, task_5]\n phi_mat_seq = [phi_mat_1, phi_mat_2, phi_mat_3, phi_mat_4, phi_mat_5]\n return task_seq, phi_mat_seq\n\n\ndef load_maze_sequence(slip_prob=0.05):\n task_1, phi_mat_1 = double_state_space(MazeA(slip_prob=slip_prob))\n task_2, phi_mat_2 = color_right_half_10_by_10_grid(MazeB(slip_prob=slip_prob))\n task_3, phi_mat_3 = double_state_space(MazeA(slip_prob=slip_prob))\n task_4, phi_mat_4 = double_state_space(MazeB(slip_prob=slip_prob))\n task_5, phi_mat_5 = color_right_half_10_by_10_grid(MazeA(slip_prob=slip_prob))\n\n task_seq = [task_1, task_2, task_3, task_4, task_5]\n phi_mat_seq = [phi_mat_1, phi_mat_2, phi_mat_3, phi_mat_4, phi_mat_5]\n return task_seq, phi_mat_seq\n\n\ndef load_short_maze_sequence():\n task_1, phi_mat_1 = double_state_space(MazeA())\n task_2, phi_mat_2 = color_right_half_10_by_10_grid(MazeB())\n task_3, phi_mat_3 = double_state_space(MazeB())\n task_seq = [task_1, task_2, task_3]\n phi_mat_seq = [phi_mat_1, phi_mat_2, phi_mat_3]\n return task_seq, phi_mat_seq\n\n\nclass ColumnWorld0(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05, dtype=np.float32):\n t_fn = generate_gridworld_transition_function(3, 3, slip_prob=slip_prob)\n\n def r_fn(s_1, a, s_2):\n x, y = idx_to_pt(s_2, (3, 3))\n if x == 0:\n return 1.\n else:\n return 0.\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(\n num_states=3 * 3,\n num_actions=4,\n transition_fn=t_fn,\n reward_fn=r_fn,\n reward_matrix=True,\n dtype=dtype\n )\n super().__init__(t_mat, r_mat, np.arange(9), [])\n\n\nclass ColumnWorld1(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05, dtype=np.float32):\n t_fn = generate_gridworld_transition_function(3, 3, slip_prob=slip_prob)\n\n def r_fn(s_1, a, s_2):\n x, y = idx_to_pt(s_2, (3, 3))\n if x == 1:\n return 1.\n else:\n return 0.\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(\n num_states=3 * 3,\n num_actions=4,\n transition_fn=t_fn,\n reward_fn=r_fn,\n reward_matrix=True,\n dtype=dtype\n )\n super().__init__(t_mat, r_mat, np.arange(9), [])\n\n\nclass ColumnWorld2(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05, dtype=np.float32):\n t_fn = generate_gridworld_transition_function(3, 3, slip_prob=slip_prob)\n\n def r_fn(s_1, a, s_2):\n x, y = idx_to_pt(s_2, (3, 3))\n if x == 2:\n return 1.\n else:\n return 0.\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(\n num_states=3 * 3,\n num_actions=4,\n transition_fn=t_fn,\n reward_fn=r_fn,\n reward_matrix=True,\n dtype=dtype\n )\n super().__init__(t_mat, r_mat, np.arange(9), [])\n\n\ndef load_column_world_sequence():\n return [\n ColumnWorld0(),\n ColumnWorld1(),\n ColumnWorld2()\n ]\n\n\nclass GridWord3x3WithGoals(rl.environment.TabularMDP):\n def __init__(self, goal_state_idx_list, slip_prob=0.05, dtype=np.float32):\n assert np.min(goal_state_idx_list) >= 0\n assert np.max(goal_state_idx_list) < 9\n\n t_fn = generate_gridworld_transition_function(3, 3, slip_prob=slip_prob)\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_state_idx_list\n if s_2 in goal_state_idx_list:\n return 1.\n else:\n return 0.\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(\n num_states=3 * 3,\n num_actions=4,\n transition_fn=t_fn,\n reward_fn=r_fn,\n reward_matrix=True,\n dtype=dtype\n )\n super().__init__(t_mat, r_mat, np.arange(9), [])\n\n\nclass GridWord3x3WithGoalsAndWalls(rl.environment.TabularMDP):\n @staticmethod\n def _get_wall_position_0():\n \"\"\"\n .|. .\n .|. .\n . . .\n \"\"\"\n barrier_idx_list = [\n (pt_to_idx((0, 0), (3, 3)), pt_to_idx((1, 0), (3, 3))),\n (pt_to_idx((0, 1), (3, 3)), pt_to_idx((1, 1), (3, 3)))\n ]\n return barrier_idx_list\n\n @staticmethod\n def _get_wall_position_1():\n \"\"\"\n . .|.\n . .|.\n . . .\n \"\"\"\n barrier_idx_list = [\n (pt_to_idx((1, 0), (3, 3)), pt_to_idx((2, 0), (3, 3))),\n (pt_to_idx((1, 1), (3, 3)), pt_to_idx((2, 1), (3, 3)))\n ]\n return barrier_idx_list\n\n @staticmethod\n def _get_wall_position_2():\n \"\"\"\n . . .\n .|. .\n .|. .\n \"\"\"\n barrier_idx_list = [\n (pt_to_idx((0, 1), (3, 3)), pt_to_idx((1, 1), (3, 3))),\n (pt_to_idx((0, 2), (3, 3)), pt_to_idx((1, 2), (3, 3)))\n ]\n return barrier_idx_list\n\n @staticmethod\n def _get_wall_position_3():\n \"\"\"\n . . .\n . .|.\n . .|.\n \"\"\"\n barrier_idx_list = [\n (pt_to_idx((1, 1), (3, 3)), pt_to_idx((2, 1), (3, 3))),\n (pt_to_idx((1, 2), (3, 3)), pt_to_idx((2, 2), (3, 3)))\n ]\n return barrier_idx_list\n\n def __init__(self, goal_state_idx_list, wall_position_idx, slip_prob=0.05, dtype=np.float32):\n assert np.min(goal_state_idx_list) >= 0\n assert np.max(goal_state_idx_list) < 9\n\n if wall_position_idx == 0:\n barrier_idx_list = GridWord3x3WithGoalsAndWalls._get_wall_position_0()\n elif wall_position_idx == 1:\n barrier_idx_list = GridWord3x3WithGoalsAndWalls._get_wall_position_1()\n elif wall_position_idx == 2:\n barrier_idx_list = GridWord3x3WithGoalsAndWalls._get_wall_position_2()\n elif wall_position_idx == 3:\n barrier_idx_list = GridWord3x3WithGoalsAndWalls._get_wall_position_3()\n\n t_fn = generate_gridworld_transition_function_with_barrier(3, 3, slip_prob, barrier_idx_list)\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_state_idx_list\n if s_2 in goal_state_idx_list:\n return 1.\n else:\n return 0.\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(\n num_states=3 * 3,\n num_actions=4,\n transition_fn=t_fn,\n reward_fn=r_fn,\n reward_matrix=True,\n dtype=dtype\n )\n super().__init__(t_mat, r_mat, np.arange(9), [])\n\n\ndef load_two_goal_gridworld_sequence():\n return [GridWord3x3WithGoals(g) for g in combinations(range(9), 2)]\n\n\ndef load_two_goal_with_wall_gridworld_sequence():\n return [GridWord3x3WithGoalsAndWalls(g, w) for g, w in product(combinations(range(9), 2), range(4))]\n\n\nclass RandomMDP(rl.environment.TabularMDP):\n def __init__(self, num_states, num_actions, scale=10.0, num_goal_cells=1):\n t_mat = np.random.uniform(size=[num_actions, num_states, num_states]).astype(dtype=np.float32)\n t_mat = np.exp(scale * t_mat)\n t_mat = t_mat / np.sum(t_mat, axis=-1, keepdims=True)\n assert not np.any(np.isnan(t_mat))\n\n r_idx_list = np.random.choice(np.arange(num_states), size=num_goal_cells, replace=False)\n r_state = np.zeros(num_states)\n for i in r_idx_list:\n r_state[i] = 1.\n\n def r_fn(s, a, s_next):\n return r_state[s_next]\n\n r_mat = np.zeros([num_actions, num_states, num_states])\n for a in range(num_actions):\n for s_1 in range(num_states):\n for s_2 in range(num_states):\n r_mat[a, s_1, s_2] = r_fn(s_1, a, s_2)\n\n super().__init__(t_mat, r_mat, idx_start_list=np.arange(num_states), idx_goal_list=[], name='RandomMDP')\n\n\ndef load_random_mdp_sequence():\n partition_list = enumerate_n_partitions(9, 3)\n partition_idx = np.random.randint(0, np.shape(partition_list)[0])\n phi_mat = cluster_idx_to_phi_mat(partition_list[partition_idx])\n\n latent_mdp_list = [RandomMDP(3, 3) for _ in range(100)]\n mdp_list = [InflatedTabularMDP(env, phi_mat) for env in latent_mdp_list]\n return mdp_list\n\n\nclass TaskASlightRewardChange(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05):\n barrier_idx_list = []\n t_fn = generate_gridworld_transition_function_with_barrier(10, 10, slip_prob, barrier_idx_list)\n\n start_list_idx = [pt_to_idx((0, 9), (10, 10))]\n goal_list_idx = [pt_to_idx((8, 0), (10, 10))]\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_list_idx\n if s_2 in goal_list_idx:\n return 1.0\n else:\n return 0.0\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(100, 4, t_fn, r_fn,\n reward_matrix=True,\n dtype=np.float32)\n super().__init__(t_mat, r_mat, start_list_idx, goal_list_idx)\n\n def __str__(self):\n return 'TaskASlightRewardChange'\n\n\nclass TaskBSlightRewardChange(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05):\n barrier_idx_list = []\n t_fn = generate_gridworld_transition_function_with_barrier(10, 10, slip_prob, barrier_idx_list)\n\n start_list_idx = [pt_to_idx((0, 9), (10, 10))]\n goal_list_idx = [pt_to_idx((9, 1), (10, 10))]\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_list_idx\n if s_2 in goal_list_idx:\n return 1.0\n else:\n return 0.0\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(100, 4, t_fn, r_fn,\n reward_matrix=True,\n dtype=np.float32)\n super().__init__(t_mat, r_mat, start_list_idx, goal_list_idx)\n\n def __str__(self):\n return 'TaskBSlightRewardChange'\n\n\nclass TaskASignificantRewardChange(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05):\n barrier_idx_list = []\n t_fn = generate_gridworld_transition_function_with_barrier(10, 10, slip_prob, barrier_idx_list)\n\n start_list_idx = [pt_to_idx((0, 9), (10, 10))]\n goal_list_idx = [pt_to_idx((9, 9), (10, 10))]\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_list_idx\n if s_2 in goal_list_idx:\n return 1.0\n else:\n return 0.0\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(100, 4, t_fn, r_fn,\n reward_matrix=True,\n dtype=np.float32)\n super().__init__(t_mat, r_mat, start_list_idx, goal_list_idx)\n\n def __str__(self):\n return 'TaskASignificantRewardChange'\n\n\nclass TaskBSignificantRewardChange(rl.environment.TabularMDP):\n def __init__(self, slip_prob=0.05):\n barrier_idx_list = []\n t_fn = generate_gridworld_transition_function_with_barrier(10, 10, slip_prob, barrier_idx_list)\n\n start_list_idx = [pt_to_idx((0, 9), (10, 10))]\n goal_list_idx = [pt_to_idx((0, 0), (10, 10))]\n\n def r_fn(s_1, a, s_2):\n nonlocal goal_list_idx\n if s_2 in goal_list_idx:\n return 1.0\n else:\n return 0.0\n\n t_mat, r_mat = generate_mdp_from_transition_and_reward_function(100, 4, t_fn, r_fn,\n reward_matrix=True,\n dtype=np.float32)\n super().__init__(t_mat, r_mat, start_list_idx, goal_list_idx)\n\n def __str__(self):\n return 'TaskBSignificantRewardChange'\n"
] | [
[
"numpy.random.uniform",
"numpy.sum",
"numpy.matmul",
"numpy.zeros",
"numpy.exp",
"numpy.arange",
"numpy.where",
"numpy.max",
"numpy.shape",
"numpy.min",
"numpy.isnan",
"numpy.linalg.pinv",
"numpy.concatenate"
]
] |
keithyipkw/InSync | [
"3744b45f31f713de2dfc8c30507e67db96915e07"
] | [
"benchmark/overhead.py"
] | [
"import sys\nimport numpy as np\nimport pandas as pd\n\ndef main():\n df = pd.read_csv(sys.argv[1], names=[\"Method\", \"Time\"])\n print(df.groupby(\"Method\").describe().to_csv())\n\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"pandas.read_csv"
]
] |
ProjectBlackFalcon/DatBot | [
"8b2cc64af78757b832d8bc6a1373fb74b7a4316f"
] | [
"ModelTests/test_Pathfinder.py"
] | [
"from Pathfinder import PathFinder\nimport numpy as np\nimport winsound\n\n\ndef generate_map_collage():\n maps_coords = pf.get_maps_coords()\n maps = []\n shape = (abs(end[1] - start[1]) + 1, abs(end[0] - start[0]) + 1)\n counter = 0\n for coord in maps_coords:\n map_infos = pf.llf.coord_fetch_map(coord, pf.worldmap)\n counter += 1\n print('{}/{}'.format(counter, shape[0]*shape[1]))\n if map_infos is not None and np.array(map_infos).shape == (40, 14):\n maps.append(map_infos)\n elif map_infos is not None and np.array(map_infos).shape != (40, 14):\n maps.append([[5]*14]*40)\n else:\n maps.append([[1] * 14] * 40)\n glued = pf.glue_maps(maps, shape)\n # print(glued)\n # print(pf.adapted_maps)\n pf.map_to_image(pf.adapt_shape_maps(glued), 1)\n\n\ndef generate_path():\n pf.get_path()\n pf.add_path_to_adapted_maps()\n pf.add_map_change_coords_to_adapted_maps()\n pf.map_to_image(pf.adapted_maps, 1)\n print(pf.path_cells)\n print(pf.get_map_change_cells())\n return(pf.get_map_change_cells())\n\nlel = [(4, -19), (-5, -23), (-13, -28), (-3, -42), (-17, -47), (-32, -56), (-27, -36), (-20, -20), (-16, 1), (-25, 12), (-15, 25), (-26, 35)]\n\nstart = (4, -19)\nend = (-5, -23)\nworldmap = 1\npf = PathFinder(start, end, None, None, worldmap)\npath = generate_path()\n\n\nprint('Done !')\nwinsound.PlaySound('../Utils/sound.wav', winsound.SND_FILENAME)\n\n\n__author__ = 'Alexis'\n\n"
] | [
[
"numpy.array"
]
] |
krunt/lean_transformer | [
"90abdb87bb08566eaba0a45bc29ec6a3220333ac"
] | [
"tests/test_utils.py"
] | [
"import pytest\nimport torch\nimport torch.nn.functional as F\nfrom lean_transformer.utils import pad_to_multiple, GELU\nimport numpy as np\n\n\[email protected]\ndef test_pad_to_multiple():\n x = torch.randn(3, 3)\n\n assert pad_to_multiple(x, multiple=3, dims=0) is x\n assert pad_to_multiple(x, multiple=3, dims=1) is x\n assert pad_to_multiple(x, multiple=2, dims=1) is not x\n assert pad_to_multiple(x, multiple=4, dims=1) is not x\n assert torch.allclose(pad_to_multiple(x, multiple=2, dims=1), pad_to_multiple(x, multiple=4, dims=1))\n assert pad_to_multiple(x, multiple=2, dims=0).shape == (4, 3)\n assert pad_to_multiple(x, multiple=4, dims=1).shape == (3, 4)\n assert pad_to_multiple(x, multiple=2, dims=[0, 1]).shape == (4, 4)\n assert torch.allclose(pad_to_multiple(x, multiple=4, dims=1).sum(), x.sum())\n assert pad_to_multiple(x, multiple=10, dims=0)[3:].norm() == 0\n assert pad_to_multiple(x, multiple=4, dims=[0, 1]).shape == (4, 4)\n assert pad_to_multiple(x, multiple=3, dims=[0, 1]) is x\n\[email protected]\ndef test_gelu():\n gelu_ours = GELU.apply(torch.linspace(-5, 5, 1000))\n gelu_ref = F.gelu(torch.linspace(-5, 5, 1000))\n assert abs(gelu_ours - gelu_ref).max().item() <= 5e-4"
] | [
[
"torch.randn",
"torch.linspace"
]
] |
cvxgrp/sccf | [
"3c5f65e1a6df1a1b9cf58b60dd2b41f5c46be42e"
] | [
"test.py"
] | [
"import unittest\n\nimport sccf\nimport cvxpy as cp\nimport numpy as np\n\n\nclass TestMinExpression(unittest.TestCase):\n def test(self):\n x = cp.Variable(10)\n x.value = np.zeros(10)\n expr = cp.sum_squares(x)\n with self.assertRaises(AssertionError):\n sccf.minimum(-expr, 1.0)\n min_expr = sccf.minimum(expr, 1.0)\n self.assertEqual(min_expr.value, 0.0)\n x.value = np.ones(10)\n self.assertEqual(min_expr.value, 1.0)\n\n\nclass TestSumOfMinExpressions(unittest.TestCase):\n def test(self):\n x = cp.Variable(10)\n x.value = np.zeros(10)\n expr1 = sccf.minimum(cp.sum_squares(x), 1.0)\n expr2 = sccf.minimum(cp.sum_squares(x - 1), 1.0)\n sum_exprs = expr1 + expr2\n self.assertEqual(len(sum_exprs.min_exprs), 2)\n self.assertEqual(sum_exprs.value, 1.0)\n sum_exprs += 1.0\n self.assertEqual(sum_exprs.value, 2.0)\n sum_exprs += cp.sum_squares(x - 1)\n self.assertEqual(sum_exprs.value, 12.0)\n\n\nclass TestProblem(unittest.TestCase):\n def test(self):\n x = cp.Variable(1)\n x.value = 2*np.ones(1)\n expr1 = sccf.minimum(cp.sum(cp.huber(x)), 1.0)\n expr2 = sccf.minimum(cp.sum(cp.huber(x - 1.0)), 1.0)\n obj = expr1 + expr2\n starting = obj.value\n prob = sccf.Problem(obj)\n ending = prob.solve()\n self.assertLessEqual(ending[\"final_objective_value\"], starting)\n\nif __name__ == '__main__':\n unittest.main()"
] | [
[
"numpy.ones",
"numpy.zeros"
]
] |
YuliusDennyPrabowo/pandas | [
"b74e2ce0f63f616474edc310897a67d501a9e32d"
] | [
"pandas/tests/arrays/test_datetimelike.py"
] | [
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pytest\n\nimport pandas.compat as compat\n\nimport pandas as pd\nfrom pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray\nimport pandas.util.testing as tm\n\n\n# TODO: more freq variants\[email protected](params=['D', 'B', 'W', 'M', 'Q', 'Y'])\ndef period_index(request):\n \"\"\"\n A fixture to provide PeriodIndex objects with different frequencies.\n\n Most PeriodArray behavior is already tested in PeriodIndex tests,\n so here we just test that the PeriodArray behavior matches\n the PeriodIndex behavior.\n \"\"\"\n freqstr = request.param\n # TODO: non-monotone indexes; NaTs, different start dates\n pi = pd.period_range(start=pd.Timestamp('2000-01-01'),\n periods=100,\n freq=freqstr)\n return pi\n\n\[email protected](params=['D', 'B', 'W', 'M', 'Q', 'Y'])\ndef datetime_index(request):\n \"\"\"\n A fixture to provide DatetimeIndex objects with different frequencies.\n\n Most DatetimeArray behavior is already tested in DatetimeIndex tests,\n so here we just test that the DatetimeArray behavior matches\n the DatetimeIndex behavior.\n \"\"\"\n freqstr = request.param\n # TODO: non-monotone indexes; NaTs, different start dates, timezones\n pi = pd.date_range(start=pd.Timestamp('2000-01-01'),\n periods=100,\n freq=freqstr)\n return pi\n\n\[email protected]\ndef timedelta_index(request):\n \"\"\"\n A fixture to provide TimedeltaIndex objects with different frequencies.\n Most TimedeltaArray behavior is already tested in TimedeltaIndex tests,\n so here we just test that the TimedeltaArray behavior matches\n the TimedeltaIndex behavior.\n \"\"\"\n # TODO: flesh this out\n return pd.TimedeltaIndex(['1 Day', '3 Hours', 'NaT'])\n\n\nclass SharedTests(object):\n index_cls = None\n\n def test_compare_len1_raises(self):\n # make sure we raise when comparing with different lengths, specific\n # to the case where one has length-1, which numpy would broadcast\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n\n idx = self.index_cls._simple_new(data, freq='D')\n arr = self.array_cls(idx)\n\n with pytest.raises(ValueError, match=\"Lengths must match\"):\n arr == arr[:1]\n\n # test the index classes while we're at it, GH#23078\n with pytest.raises(ValueError, match=\"Lengths must match\"):\n idx <= idx[[0]]\n\n def test_take(self):\n data = np.arange(100, dtype='i8') * 24 * 3600 * 10**9\n np.random.shuffle(data)\n\n idx = self.index_cls._simple_new(data, freq='D')\n arr = self.array_cls(idx)\n\n takers = [1, 4, 94]\n result = arr.take(takers)\n expected = idx.take(takers)\n\n tm.assert_index_equal(self.index_cls(result), expected)\n\n takers = np.array([1, 4, 94])\n result = arr.take(takers)\n expected = idx.take(takers)\n\n tm.assert_index_equal(self.index_cls(result), expected)\n\n def test_take_fill(self):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n\n idx = self.index_cls._simple_new(data, freq='D')\n arr = self.array_cls(idx)\n\n result = arr.take([-1, 1], allow_fill=True, fill_value=None)\n assert result[0] is pd.NaT\n\n result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan)\n assert result[0] is pd.NaT\n\n result = arr.take([-1, 1], allow_fill=True, fill_value=pd.NaT)\n assert result[0] is pd.NaT\n\n with pytest.raises(ValueError):\n arr.take([0, 1], allow_fill=True, fill_value=2)\n\n with pytest.raises(ValueError):\n arr.take([0, 1], allow_fill=True, fill_value=2.0)\n\n with pytest.raises(ValueError):\n arr.take([0, 1], allow_fill=True,\n fill_value=pd.Timestamp.now().time)\n\n def test_concat_same_type(self):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n\n idx = self.index_cls._simple_new(data, freq='D').insert(0, pd.NaT)\n arr = self.array_cls(idx)\n\n result = arr._concat_same_type([arr[:-1], arr[1:], arr])\n expected = idx._concat_same_dtype([idx[:-1], idx[1:], idx], None)\n\n tm.assert_index_equal(self.index_cls(result), expected)\n\n def test_unbox_scalar(self):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n arr = self.array_cls(data, freq='D')\n result = arr._unbox_scalar(arr[0])\n assert isinstance(result, (int, compat.long))\n\n result = arr._unbox_scalar(pd.NaT)\n assert isinstance(result, (int, compat.long))\n\n with pytest.raises(ValueError):\n arr._unbox_scalar('foo')\n\n def test_check_compatible_with(self):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n arr = self.array_cls(data, freq='D')\n\n arr._check_compatible_with(arr[0])\n arr._check_compatible_with(arr[:1])\n arr._check_compatible_with(pd.NaT)\n\n def test_scalar_from_string(self):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n arr = self.array_cls(data, freq='D')\n result = arr._scalar_from_string(str(arr[0]))\n assert result == arr[0]\n\n def test_reduce_invalid(self):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n arr = self.array_cls(data, freq='D')\n\n with pytest.raises(TypeError, match='cannot perform'):\n arr._reduce(\"not a method\")\n\n @pytest.mark.parametrize('method', ['pad', 'backfill'])\n def test_fillna_method_doesnt_change_orig(self, method):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n arr = self.array_cls(data, freq='D')\n arr[4] = pd.NaT\n\n fill_value = arr[3] if method == 'pad' else arr[5]\n\n result = arr.fillna(method=method)\n assert result[4] == fill_value\n\n # check that the original was not changed\n assert arr[4] is pd.NaT\n\n def test_searchsorted(self):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n arr = self.array_cls(data, freq='D')\n\n # scalar\n result = arr.searchsorted(arr[1])\n assert result == 1\n\n result = arr.searchsorted(arr[2], side=\"right\")\n assert result == 3\n\n # own-type\n result = arr.searchsorted(arr[1:3])\n expected = np.array([1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.searchsorted(arr[1:3], side=\"right\")\n expected = np.array([2, 3], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n\n # Following numpy convention, NaT goes at the beginning\n # (unlike NaN which goes at the end)\n result = arr.searchsorted(pd.NaT)\n assert result == 0\n\n def test_setitem(self):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n arr = self.array_cls(data, freq='D')\n\n arr[0] = arr[1]\n expected = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n expected[0] = expected[1]\n\n tm.assert_numpy_array_equal(arr.asi8, expected)\n\n arr[:2] = arr[-2:]\n expected[:2] = expected[-2:]\n tm.assert_numpy_array_equal(arr.asi8, expected)\n\n def test_setitem_raises(self):\n data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9\n arr = self.array_cls(data, freq='D')\n val = arr[0]\n\n with pytest.raises(IndexError, match=\"index 12 is out of bounds\"):\n arr[12] = val\n\n with pytest.raises(TypeError, match=\"'value' should be a.* 'object'\"):\n arr[0] = object()\n\n\nclass TestDatetimeArray(SharedTests):\n index_cls = pd.DatetimeIndex\n array_cls = DatetimeArray\n\n def test_round(self, tz_naive_fixture):\n # GH#24064\n tz = tz_naive_fixture\n dti = pd.date_range('2016-01-01 01:01:00', periods=3, freq='H', tz=tz)\n\n result = dti.round(freq='2T')\n expected = dti - pd.Timedelta(minutes=1)\n tm.assert_index_equal(result, expected)\n\n def test_array_object_dtype(self, tz_naive_fixture):\n # GH#23524\n tz = tz_naive_fixture\n dti = pd.date_range('2016-01-01', periods=3, tz=tz)\n arr = DatetimeArray(dti)\n\n expected = np.array(list(dti))\n\n result = np.array(arr, dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n # also test the DatetimeIndex method while we're at it\n result = np.array(dti, dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_array(self, tz_naive_fixture):\n # GH#23524\n tz = tz_naive_fixture\n dti = pd.date_range('2016-01-01', periods=3, tz=tz)\n arr = DatetimeArray(dti)\n\n expected = dti.asi8.view('M8[ns]')\n result = np.array(arr)\n tm.assert_numpy_array_equal(result, expected)\n\n # check that we are not making copies when setting copy=False\n result = np.array(arr, copy=False)\n assert result.base is expected.base\n assert result.base is not None\n\n def test_array_i8_dtype(self, tz_naive_fixture):\n # GH#23524\n tz = tz_naive_fixture\n dti = pd.date_range('2016-01-01', periods=3, tz=tz)\n arr = DatetimeArray(dti)\n\n expected = dti.asi8\n result = np.array(arr, dtype='i8')\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.array(arr, dtype=np.int64)\n tm.assert_numpy_array_equal(result, expected)\n\n # check that we are not making copies when setting copy=False\n result = np.array(arr, dtype='i8', copy=False)\n assert result.base is expected.base\n assert result.base is not None\n\n def test_from_array_keeps_base(self):\n # Ensure that DatetimeArray._data.base isn't lost.\n arr = np.array(['2000-01-01', '2000-01-02'], dtype='M8[ns]')\n dta = DatetimeArray(arr)\n\n assert dta._data is arr\n dta = DatetimeArray(arr[:0])\n assert dta._data.base is arr\n\n def test_from_dti(self, tz_naive_fixture):\n tz = tz_naive_fixture\n dti = pd.date_range('2016-01-01', periods=3, tz=tz)\n arr = DatetimeArray(dti)\n assert list(dti) == list(arr)\n\n # Check that Index.__new__ knows what to do with DatetimeArray\n dti2 = pd.Index(arr)\n assert isinstance(dti2, pd.DatetimeIndex)\n assert list(dti2) == list(arr)\n\n def test_astype_object(self, tz_naive_fixture):\n tz = tz_naive_fixture\n dti = pd.date_range('2016-01-01', periods=3, tz=tz)\n arr = DatetimeArray(dti)\n asobj = arr.astype('O')\n assert isinstance(asobj, np.ndarray)\n assert asobj.dtype == 'O'\n assert list(asobj) == list(dti)\n\n @pytest.mark.parametrize('freqstr', ['D', 'B', 'W', 'M', 'Q', 'Y'])\n def test_to_perioddelta(self, datetime_index, freqstr):\n # GH#23113\n dti = datetime_index\n arr = DatetimeArray(dti)\n\n expected = dti.to_perioddelta(freq=freqstr)\n result = arr.to_perioddelta(freq=freqstr)\n assert isinstance(result, TimedeltaArray)\n\n # placeholder until these become actual EA subclasses and we can use\n # an EA-specific tm.assert_ function\n tm.assert_index_equal(pd.Index(result), pd.Index(expected))\n\n @pytest.mark.parametrize('freqstr', ['D', 'B', 'W', 'M', 'Q', 'Y'])\n def test_to_period(self, datetime_index, freqstr):\n dti = datetime_index\n arr = DatetimeArray(dti)\n\n expected = dti.to_period(freq=freqstr)\n result = arr.to_period(freq=freqstr)\n assert isinstance(result, PeriodArray)\n\n # placeholder until these become actual EA subclasses and we can use\n # an EA-specific tm.assert_ function\n tm.assert_index_equal(pd.Index(result), pd.Index(expected))\n\n @pytest.mark.parametrize('propname', pd.DatetimeIndex._bool_ops)\n def test_bool_properties(self, datetime_index, propname):\n # in this case _bool_ops is just `is_leap_year`\n dti = datetime_index\n arr = DatetimeArray(dti)\n assert dti.freq == arr.freq\n\n result = getattr(arr, propname)\n expected = np.array(getattr(dti, propname), dtype=result.dtype)\n\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize('propname', pd.DatetimeIndex._field_ops)\n def test_int_properties(self, datetime_index, propname):\n dti = datetime_index\n arr = DatetimeArray(dti)\n\n result = getattr(arr, propname)\n expected = np.array(getattr(dti, propname), dtype=result.dtype)\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_take_fill_valid(self, datetime_index, tz_naive_fixture):\n dti = datetime_index.tz_localize(tz_naive_fixture)\n arr = DatetimeArray(dti)\n\n now = pd.Timestamp.now().tz_localize(dti.tz)\n result = arr.take([-1, 1], allow_fill=True, fill_value=now)\n assert result[0] == now\n\n with pytest.raises(ValueError):\n # fill_value Timedelta invalid\n arr.take([-1, 1], allow_fill=True, fill_value=now - now)\n\n with pytest.raises(ValueError):\n # fill_value Period invalid\n arr.take([-1, 1], allow_fill=True, fill_value=pd.Period('2014Q1'))\n\n tz = None if dti.tz is not None else 'US/Eastern'\n now = pd.Timestamp.now().tz_localize(tz)\n with pytest.raises(TypeError):\n # Timestamp with mismatched tz-awareness\n arr.take([-1, 1], allow_fill=True, fill_value=now)\n\n def test_concat_same_type_invalid(self, datetime_index):\n # different timezones\n dti = datetime_index\n arr = DatetimeArray(dti)\n\n if arr.tz is None:\n other = arr.tz_localize('UTC')\n else:\n other = arr.tz_localize(None)\n\n with pytest.raises(AssertionError):\n arr._concat_same_type([arr, other])\n\n def test_concat_same_type_different_freq(self):\n # we *can* concatentate DTI with different freqs.\n a = DatetimeArray(pd.date_range('2000', periods=2, freq='D',\n tz='US/Central'))\n b = DatetimeArray(pd.date_range('2000', periods=2, freq='H',\n tz='US/Central'))\n result = DatetimeArray._concat_same_type([a, b])\n expected = DatetimeArray(pd.to_datetime([\n '2000-01-01 00:00:00', '2000-01-02 00:00:00',\n '2000-01-01 00:00:00', '2000-01-01 01:00:00',\n ]).tz_localize(\"US/Central\"))\n\n tm.assert_datetime_array_equal(result, expected)\n\n\nclass TestTimedeltaArray(SharedTests):\n index_cls = pd.TimedeltaIndex\n array_cls = TimedeltaArray\n\n def test_from_tdi(self):\n tdi = pd.TimedeltaIndex(['1 Day', '3 Hours'])\n arr = TimedeltaArray(tdi)\n assert list(arr) == list(tdi)\n\n # Check that Index.__new__ knows what to do with TimedeltaArray\n tdi2 = pd.Index(arr)\n assert isinstance(tdi2, pd.TimedeltaIndex)\n assert list(tdi2) == list(arr)\n\n def test_astype_object(self):\n tdi = pd.TimedeltaIndex(['1 Day', '3 Hours'])\n arr = TimedeltaArray(tdi)\n asobj = arr.astype('O')\n assert isinstance(asobj, np.ndarray)\n assert asobj.dtype == 'O'\n assert list(asobj) == list(tdi)\n\n def test_to_pytimedelta(self, timedelta_index):\n tdi = timedelta_index\n arr = TimedeltaArray(tdi)\n\n expected = tdi.to_pytimedelta()\n result = arr.to_pytimedelta()\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_total_seconds(self, timedelta_index):\n tdi = timedelta_index\n arr = TimedeltaArray(tdi)\n\n expected = tdi.total_seconds()\n result = arr.total_seconds()\n\n tm.assert_numpy_array_equal(result, expected.values)\n\n @pytest.mark.parametrize('propname', pd.TimedeltaIndex._field_ops)\n def test_int_properties(self, timedelta_index, propname):\n tdi = timedelta_index\n arr = TimedeltaArray(tdi)\n\n result = getattr(arr, propname)\n expected = np.array(getattr(tdi, propname), dtype=result.dtype)\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_take_fill_valid(self, timedelta_index):\n tdi = timedelta_index\n arr = TimedeltaArray(tdi)\n\n td1 = pd.Timedelta(days=1)\n result = arr.take([-1, 1], allow_fill=True, fill_value=td1)\n assert result[0] == td1\n\n now = pd.Timestamp.now()\n with pytest.raises(ValueError):\n # fill_value Timestamp invalid\n arr.take([0, 1], allow_fill=True, fill_value=now)\n\n with pytest.raises(ValueError):\n # fill_value Period invalid\n arr.take([0, 1], allow_fill=True, fill_value=now.to_period('D'))\n\n\nclass TestPeriodArray(SharedTests):\n index_cls = pd.PeriodIndex\n array_cls = PeriodArray\n\n def test_from_pi(self, period_index):\n pi = period_index\n arr = PeriodArray(pi)\n assert list(arr) == list(pi)\n\n # Check that Index.__new__ knows what to do with PeriodArray\n pi2 = pd.Index(arr)\n assert isinstance(pi2, pd.PeriodIndex)\n assert list(pi2) == list(arr)\n\n def test_astype_object(self, period_index):\n pi = period_index\n arr = PeriodArray(pi)\n asobj = arr.astype('O')\n assert isinstance(asobj, np.ndarray)\n assert asobj.dtype == 'O'\n assert list(asobj) == list(pi)\n\n @pytest.mark.parametrize('how', ['S', 'E'])\n def test_to_timestamp(self, how, period_index):\n pi = period_index\n arr = PeriodArray(pi)\n\n expected = DatetimeArray(pi.to_timestamp(how=how))\n result = arr.to_timestamp(how=how)\n assert isinstance(result, DatetimeArray)\n\n # placeholder until these become actual EA subclasses and we can use\n # an EA-specific tm.assert_ function\n tm.assert_index_equal(pd.Index(result), pd.Index(expected))\n\n @pytest.mark.parametrize('propname', PeriodArray._bool_ops)\n def test_bool_properties(self, period_index, propname):\n # in this case _bool_ops is just `is_leap_year`\n pi = period_index\n arr = PeriodArray(pi)\n\n result = getattr(arr, propname)\n expected = np.array(getattr(pi, propname))\n\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize('propname', PeriodArray._field_ops)\n def test_int_properties(self, period_index, propname):\n pi = period_index\n arr = PeriodArray(pi)\n\n result = getattr(arr, propname)\n expected = np.array(getattr(pi, propname))\n\n tm.assert_numpy_array_equal(result, expected)\n"
] | [
[
"numpy.random.shuffle",
"pandas.date_range",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.Period",
"pandas.core.arrays.DatetimeArray",
"pandas.Timestamp.now",
"pandas.core.arrays.TimedeltaArray",
"pandas.core.arrays.PeriodArray",
"pandas.TimedeltaIndex",
"pandas.Timedelta",
"numpy.arange",
"pandas.to_datetime",
"pandas.util.testing.assert_index_equal",
"numpy.array",
"pandas.util.testing.assert_datetime_array_equal",
"pandas.core.arrays.DatetimeArray._concat_same_type",
"pandas.Index",
"pandas.Timestamp"
]
] |
zhanzheng8585/biprop | [
"ce6a364c8323f102bd41ebb332e1e841ec78c79d"
] | [
"models/resnet_BinAct.py"
] | [
"import torch.nn as nn\n\nfrom utils.builder import get_builder\nfrom args import args\n\n\nfrom collections import OrderedDict\n\n# Binary activation function with gradient estimator\nimport torch\nclass F_BinAct(torch.autograd.Function):\n @staticmethod\n def forward(ctx, inp):\n # Save input for backward\n ctx.save_for_backward(inp)\n # Unscaled sign function\n return torch.sign(inp)\n\n @staticmethod\n def backward(ctx, grad_out):\n # Get input from saved ctx\n inp, = ctx.saved_tensors\n # Clone grad_out\n grad_input = grad_out.clone()\n # Gradient approximation from quadratic spline\n inp = torch.clamp(inp, min=-1.0, max=1.0)\n inp = 2*(1 - torch.abs(inp))\n # Return gradient\n return grad_input * inp\n\nclass BiRealAct(nn.Module):\n def __init__(self):\n super(BiRealAct, self).__init__()\n\n def forward(self, input):\n return F_BinAct.apply(input)\n\n\n# BasicBlock {{{\nclass BasicBlock(nn.Module):\n M = 2\n expansion = 1\n\n def __init__(self, builder, inplanes, planes, stride=1, downsample=None, base_width=64):\n super(BasicBlock, self).__init__()\n if base_width / 64 > 1:\n raise ValueError(\"Base width >64 does not work for BasicBlock\")\n\n self.conv1 = builder.conv3x3(inplanes, planes, stride)\n self.bn1 = builder.batchnorm(planes)\n self.relu = (lambda: BiRealAct())()\n self.conv2 = builder.conv3x3(planes, planes)\n self.bn2 = builder.batchnorm(planes, last_bn=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n if self.bn1 is not None:\n out = self.bn1(out)\n\n out = self.relu(out)\n\n out = self.conv2(out)\n\n if self.bn2 is not None:\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\n# BasicBlock }}}\nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, builder, inplanes, planes, stride, groups, base_width, widen_factor, downsample=None):\n \"\"\" Constructor\n Args:\n inplanes: input channel dimensionality\n planes: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n groups: num of convolution groups.\n base_width: base number of channels in each group.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n width_ratio = planes / (widen_factor * 64.)\n D = groups * int(base_width * width_ratio)\n self.conv1 = builder.conv1x1(inplanes, D)\n self.bn1 = builder.batchnorm(D)\n self.conv2 = builder.group_conv3x3(D, D, groups=groups)\n self.bn2 = builder.batchnorm(D)\n self.conv3 = builder.conv1x1(D, planes)\n self.bn3 = builder.batchnorm(planes, last_bn=True)\n self.relu = (lambda: BiRealAct())()\n self.downsample = downsample\n self.stride = stride\n\n self.shortcut = nn.Sequential()\n if inplanes != planes:\n self.shortcut.add_module('shortcut_conv',\n builder.conv1x1(inplanes, planes))\n self.shortcut.add_module('shortcut_bn', builder.batchnorm(planes))\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n out = self.relu(out)\n\n return out\n\n # bottleneck = self.conv_reduce.forward(x)\n # bottleneck = self.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n # bottleneck = self.conv_conv.forward(bottleneck)\n # bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n # bottleneck = self.conv_expand.forward(bottleneck)\n # bottleneck = self.bn_expand.forward(bottleneck)\n # residual = self.shortcut.forward(x)\n # return F.relu(residual + bottleneck, inplace=True)\n\nclass Bottleneck2(nn.Module):\n M = 3\n expansion = 4\n\n def __init__(self, builder, inplanes, planes, groups, stride=1, downsample=None, base_width=64):\n super(Bottleneck, self).__init__()\n width = int(planes * base_width / 64)\n self.conv1 = builder.conv1x1(inplanes, width)\n self.bn1 = builder.batchnorm(width)\n self.conv2 = builder.conv3x3(width, width, stride=stride)\n self.bn2 = builder.batchnorm(width)\n self.conv3 = builder.conv1x1(width, planes * self.expansion)\n self.bn3 = builder.batchnorm(planes * self.expansion, last_bn=True)\n self.relu = (lambda: BiRealAct())()\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n out = self.relu(out)\n\n return out\n\n# Bottleneck {{{\nclass Bottleneck(nn.Module):\n M = 3\n expansion = 4\n\n def __init__(self, builder, inplanes, planes, stride=1, downsample=None, base_width=64):\n super(Bottleneck, self).__init__()\n width = int(planes * base_width / 64)\n self.conv1 = builder.conv1x1(inplanes, width)\n self.bn1 = builder.batchnorm(width)\n self.conv2 = builder.conv3x3(width, width, stride=stride)\n self.bn2 = builder.batchnorm(width)\n self.conv3 = builder.conv1x1(width, planes * self.expansion)\n self.bn3 = builder.batchnorm(planes * self.expansion, last_bn=True)\n self.relu = (lambda: BiRealAct())()\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n out = self.relu(out)\n\n return out\n\n\n# Bottleneck }}}\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n\n def __init__(self, builder, groups, layers, num_classes=1000, base_width=64, widen_factor=4):\n \"\"\" Constructor\n Args:\n groups: number of convolution groups.\n layers: number of layers.\n num_classes: number of classes\n base_width: base number of channels in each group.\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.groups = groups\n self.layers = layers\n self.block_depth = (self.layers - 2) // 9\n self.base_width = base_width\n if self.base_width // 64 > 1:\n print(f\"==> Using {self.base_width // 64}x wide model\")\n\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]\n\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.bn1 = builder.batchnorm(64)\n self.relu = (lambda: BiRealAct())()\n\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], self.groups[0], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], self.groups[0], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], self.groups[0], 2)\n self.classifier = nn.Linear(self.stages[3], num_classes)\n init.kaiming_normal(self.classifier.weight)\n\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, inplanes, planes, groups, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n inplanes: number of input channels\n planes: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(inplanes, planes, pool_stride, self.groups,\n self.base_width, self.widen_factor))\n else:\n block.add_module(name_,\n ResNeXtBottleneck(planes, planes, 1, self.groups, self.base_width,\n self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n x = self.stage_3.forward(x)\n x = F.avg_pool2d(x, 8, 1)\n x = x.view(-1, self.stages[3])\n return self.classifier(x)\n\n\nclass BasicBlock_C(nn.Module):\n \"\"\"\n increasing groups is a more effective way of\n gaining accuracy than going deeper or wider\n \"\"\"\n\n def __init__(self, builder, inplanes, bottleneck_width=4, groups=32, stride=1, expansion=2):\n super(BasicBlock_C, self).__init__()\n inner_width = groups * bottleneck_width\n width = int(inplanes * bottleneck_width / 64)\n width_ratio = inplanes / (expansion * 64.)\n D = groups * int(bottleneck_width * width_ratio)\n self.expansion = expansion\n self.relu = (lambda: BiRealAct())()\n self.basic = nn.Sequential(OrderedDict(\n [\n ('conv1_0', builder.conv1x1(inplanes, inner_width, stride)),\n ('bn1', builder.batchnorm(inner_width)),\n ('act0', (lambda: BiRealAct())()),\n ('conv3_0', builder.group_conv3x3(inner_width, inner_width, groups=groups, stride=stride)),\n ('bn2', builder.batchnorm(inner_width)),\n ('act1', (lambda: BiRealAct())()),\n ('conv1_1', builder.conv1x1(inner_width, inner_width * self.expansion)),\n ('bn3', builder.batchnorm(inner_width * self.expansion))]))\n self.shortcut = nn.Sequential()\n if stride != 1 or inplanes != inner_width * self.expansion:\n self.shortcut = nn.Sequential(\n builder.conv1x1(inplanes, inner_width * self.expansion)\n )\n self.bn0 = builder.batchnorm(self.expansion * inner_width)\n\n def forward(self, x):\n out = self.basic(x)\n out += self.shortcut(x)\n out = self.relu(self.bn0(out))\n return out\n\nclass ResNeXt_BinAct(nn.Module):\n def __init__(self, builder, layers, groups, bottleneck_width=64, expansion=2, num_classes=10):\n super(ResNeXt_BinAct, self).__init__()\n self.groups = groups\n self.bottleneck_width = bottleneck_width\n self.inplanes = 64\n self.expansion = expansion\n\n # self.conv0 = nn.Conv2d(3, self.in_planes, kernel_size=3, stride=1, padding=1)\n # self.bn0 = nn.BatchNorm2d(self.in_planes)\n # self.pool0 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n # self.layer1=self._make_layer(num_blocks[0],1)\n # self.layer2=self._make_layer(num_blocks[1],2)\n # self.layer3=self._make_layer(num_blocks[2],2)\n # self.layer4=self._make_layer(num_blocks[3],2)\n # self.linear = nn.Linear(self.groups * self.bottleneck_width, num_classes)\n if args.first_layer_dense:\n self.conv1 = nn.Conv2d(\n 3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False\n )\n else:\n self.conv1 = builder.conv7x7(3, 64, stride=2, first_layer=True)\n\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.bn1 = builder.batchnorm(64)\n self.relu = (lambda: BiRealAct())()\n self.layer1 = self._make_layer(builder, 64, layers[0])\n self.layer2 = self._make_layer(builder, 64*(self.expansion+1), layers[1], stride=2)\n self.layer3 = self._make_layer(builder, 128*(self.expansion+1), layers[2], stride=2)\n self.layer4 = self._make_layer(builder, 256*(self.expansion+1), layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n if args.last_layer_dense:\n self.fc = nn.Conv2d(512 * self.expansion, args.num_classes, 1)\n else:\n self.fc = builder.conv1x1(512 * self.expansion, num_classes)\n\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n # out = self.pool0(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = self.fc(out)\n out = out.view(out.size(0), -1)\n return out\n\n def _make_layer(self, builder, planes, num_blocks, stride=1):\n downsample = None\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(BasicBlock_C(builder, planes, self.bottleneck_width, self.groups, stride, self.expansion))\n self.inplanes = self.expansion * self.bottleneck_width * self.groups\n self.bottleneck_width *= 2\n return nn.Sequential(*layers)\n\n\n# ResNet_BinAct {{{\nclass ResNet_BinAct(nn.Module):\n def __init__(self, builder, block, layers, num_classes=1000, base_width=64):\n self.inplanes = 64\n super(ResNet_BinAct, self).__init__()\n\n self.base_width = base_width\n if self.base_width // 64 > 1:\n print(f\"==> Using {self.base_width // 64}x wide model\")\n\n if args.first_layer_dense:\n self.conv1 = nn.Conv2d(\n 3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False\n )\n else:\n self.conv1 = builder.conv7x7(3, 64, stride=2, first_layer=True)\n\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.bn1 = builder.batchnorm(64)\n self.relu = (lambda: BiRealAct())()\n self.layer1 = self._make_layer(builder, block, 64, layers[0])\n self.layer2 = self._make_layer(builder, block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(builder, block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(builder, block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n\n # self.fc = nn.Linear(512 * block.expansion, num_classes)\n if args.last_layer_dense:\n self.fc = nn.Conv2d(512 * block.expansion, args.num_classes, 1)\n else:\n self.fc = builder.conv1x1(512 * block.expansion, num_classes)\n\n def _make_layer(self, builder, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n dconv = builder.conv1x1(\n self.inplanes, planes * block.expansion, stride=stride\n )\n dbn = builder.batchnorm(planes * block.expansion)\n if dbn is not None:\n downsample = nn.Sequential(dconv, dbn)\n else:\n downsample = dconv\n\n layers = []\n layers.append(block(builder, self.inplanes, planes, stride, downsample, base_width=self.base_width))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(builder, self.inplanes, planes, base_width=self.base_width))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n\n if self.bn1 is not None:\n x = self.bn1(x)\n x = self.maxpool(x)\n x = self.relu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = self.fc(x)\n x = x.view(x.size(0), -1)\n\n return x\n\n# ResNet_BinAct }}}\n\n\n# WideResNet_BinAct {{{\nclass WideResNet_BinAct(nn.Module):\n def __init__(self, builder, block, layers, num_classes=1000, base_width=64, widen_factor=1):\n self.inplanes = 64\n super(WideResNet_BinAct, self).__init__()\n\n self.base_width = base_width\n if self.base_width // 64 > 1:\n print(f\"==> Using {self.base_width // 64}x wide model\")\n\n if args.first_layer_dense:\n self.conv1 = nn.Conv2d(\n 3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False\n )\n else:\n self.conv1 = builder.conv7x7(3, 64, stride=2, first_layer=True)\n\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.bn1 = builder.batchnorm(64)\n self.relu = (lambda: BiRealAct())()\n self.layer1 = self._make_layer(builder, block, 64, layers[0])\n self.layer2 = self._make_layer(builder, block, 64*(widen_factor+1), layers[1], stride=2)\n self.layer3 = self._make_layer(builder, block, 128*(widen_factor+1), layers[2], stride=2)\n self.layer4 = self._make_layer(builder, block, 256*(widen_factor+1), layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n\n # self.fc = nn.Linear(512 * block.expansion, num_classes)\n if args.last_layer_dense:\n self.fc = nn.Conv2d(256*(widen_factor+1) * block.expansion, args.num_classes, 1)\n else:\n self.fc = builder.conv1x1(256*(widen_factor+1) * block.expansion, num_classes)\n\n def _make_layer(self, builder, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n dconv = builder.conv1x1(\n self.inplanes, planes * block.expansion, stride=stride\n )\n dbn = builder.batchnorm(planes * block.expansion)\n if dbn is not None:\n downsample = nn.Sequential(dconv, dbn)\n else:\n downsample = dconv\n\n layers = []\n layers.append(block(builder, self.inplanes, planes, stride, downsample, base_width=self.base_width))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(builder, self.inplanes, planes, base_width=self.base_width))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n\n if self.bn1 is not None:\n x = self.bn1(x)\n x = self.maxpool(x)\n x = self.relu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = self.fc(x)\n x = x.view(x.size(0), -1)\n\n return x\n\n# WideResNet_BinAct }}}\n\n\n# Imagenet Networks\ndef ResNet18_BinAct(pretrained=False):\n return ResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 1000)\n\ndef ResNet34_BinAct(pretrained=False):\n return ResNet_BinAct(get_builder(), BasicBlock, [3, 4, 6, 3], 1000)\n\ndef ResNet50_BinAct(pretrained=False):\n return ResNet_BinAct(get_builder(), Bottleneck, [3, 4, 6, 3], 1000)\n\ndef ResNet101_BinAct(pretrained=False):\n return ResNet_BinAct(get_builder(), Bottleneck, [3, 4, 23, 3], 1000)\n\n\ndef WideResNet18_2_BinAct(pretrained=False):\n return WideResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 1000, widen_factor=2)\n\ndef WideResNet18_3_BinAct(pretrained=False):\n return WideResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 1000, widen_factor=2.5)\n\ndef WideResNet34_2_BinAct(pretrained=False):\n return WideResNet_BinAct(get_builder(), BasicBlock, [3, 4, 6, 3], 1000, widen_factor=2)\n\ndef WideResNet34_3_BinAct(pretrained=False):\n return WideResNet_BinAct(get_builder(), BasicBlock, [3, 4, 6, 3], 1000, widen_factor=3)\n\ndef WideResNet50_2_BinAct(pretrained=False):\n return ResNet_BinAct(\n get_builder(), Bottleneck, [3, 4, 6, 3], num_classes=1000, base_width=64 * 2\n )\n\n\n\n# CIFAR-10 Networks\ndef ResNext_BinAct(pretrained=False):\n return ResNeXt_BinAct(get_builder(), [1, 2, 6, 2], groups=4, expansion=2)\n\n\ndef cifarResNet18_BinAct(pretrained=False):\n return ResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 10)\n\ndef cifarWideResNet18_2_BinAct(pretrained=False):\n return ResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 10, widen_factor=2)\n\ndef cifarWideResNet18_3_BinAct(pretrained=False):\n return ResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 10, widen_factor=3)\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.sign",
"torch.nn.Conv2d",
"torch.abs",
"torch.nn.Sequential",
"torch.clamp"
]
] |
lioncorpo/sfm.lion-judge-corporation | [
"95fb11bff263c3faab62269cc907eec18b527e22"
] | [
"opensfm/pairs_selection.py"
] | [
"import logging\nfrom collections import defaultdict\n\nimport numpy as np\nimport scipy.spatial as spatial\nfrom opensfm import bow, context, feature_loader, vlad\nfrom opensfm.dataset import DataSetBase\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef has_gps_info(exif):\n return (\n exif\n and \"gps\" in exif\n and \"latitude\" in exif[\"gps\"]\n and \"longitude\" in exif[\"gps\"]\n )\n\n\ndef match_candidates_by_distance(\n images_ref, images_cand, exifs, reference, max_neighbors, max_distance\n):\n \"\"\"Find candidate matching pairs by GPS distance.\n\n The GPS altitude is ignored because we want images of the same position\n at different altitudes to be matched together. Otherwise, for drone\n datasets, flights at different altitudes do not get matched.\n \"\"\"\n if len(images_cand) == 0:\n return set()\n\n if max_neighbors <= 0 and max_distance <= 0:\n return set()\n max_neighbors = max_neighbors or 99999999\n max_distance = max_distance or 99999999.0\n k = min(len(images_cand), max_neighbors)\n\n points = np.zeros((len(images_cand), 3))\n for i, image in enumerate(images_cand):\n gps = exifs[image][\"gps\"]\n points[i] = reference.to_topocentric(gps[\"latitude\"], gps[\"longitude\"], 0)\n\n tree = spatial.cKDTree(points)\n\n pairs = set()\n for image_ref in images_ref:\n nn = k + 1 if image_ref in images_cand else k\n\n gps = exifs[image_ref][\"gps\"]\n point = reference.to_topocentric(gps[\"latitude\"], gps[\"longitude\"], 0)\n distances, neighbors = tree.query(\n point, k=nn, distance_upper_bound=max_distance\n )\n\n if type(neighbors) == int: # special case with only one NN\n neighbors = [neighbors]\n\n for j in neighbors:\n if j >= len(images_cand):\n continue\n image_cand = images_cand[j]\n if image_cand != image_ref:\n pairs.add(tuple(sorted((image_ref, image_cand))))\n return pairs\n\n\ndef match_candidates_with_bow(\n data: DataSetBase,\n images_ref,\n images_cand,\n exifs,\n reference,\n max_neighbors,\n max_gps_distance,\n max_gps_neighbors,\n enforce_other_cameras,\n):\n \"\"\"Find candidate matching pairs using BoW-based distance.\n\n If max_gps_distance > 0, then we use first restrain a set of\n candidates using max_gps_neighbors neighbors selected using\n GPS distance.\n\n If enforce_other_cameras is True, we keep max_neighbors images\n with same cameras AND max_neighbors images from any other different\n camera.\n \"\"\"\n if max_neighbors <= 0:\n return set()\n\n results = compute_bow_affinity(\n data,\n images_ref,\n images_cand,\n exifs,\n reference,\n max_gps_distance,\n max_gps_neighbors,\n )\n\n return construct_pairs(results, max_neighbors, exifs, enforce_other_cameras)\n\n\ndef compute_bow_affinity(\n data: DataSetBase,\n images_ref,\n images_cand,\n exifs,\n reference,\n max_gps_distance,\n max_gps_neighbors,\n):\n \"\"\"Compute afinity scores between references and candidates\n images using BoW-based distance.\n \"\"\"\n preempted_candidates, need_load = preempt_candidates(\n images_ref, images_cand, exifs, reference, max_gps_neighbors, max_gps_distance\n )\n\n # construct BoW histograms\n logger.info(\"Computing %d BoW histograms\" % len(need_load))\n histograms = load_histograms(data, need_load)\n\n # parallel VLAD neighbors computation\n args, processes, batch_size = create_parallel_matching_args(\n data, preempted_candidates, histograms\n )\n logger.info(\"Computing BoW candidates with %d processes\" % processes)\n return context.parallel_map(match_bow_unwrap_args, args, processes, batch_size)\n\n\ndef match_candidates_with_vlad(\n data: DataSetBase,\n images_ref,\n images_cand,\n exifs,\n reference,\n max_neighbors,\n max_gps_distance,\n max_gps_neighbors,\n enforce_other_cameras,\n):\n \"\"\"Find candidate matching pairs using VLAD-based distance.\n If max_gps_distance > 0, then we use first restrain a set of\n candidates using max_gps_neighbors neighbors selected using\n GPS distance.\n\n If enforce_other_cameras is True, we keep max_neighbors images\n with same cameras AND max_neighbors images from any other different\n camera.\n\n If enforce_other_cameras is False, we keep max_neighbors images\n from all cameras.\n \"\"\"\n if max_neighbors <= 0:\n return set()\n\n results = compute_vlad_affinity(\n data,\n images_ref,\n images_cand,\n exifs,\n reference,\n max_gps_distance,\n max_gps_neighbors,\n )\n\n return construct_pairs(results, max_neighbors, exifs, enforce_other_cameras)\n\n\ndef compute_vlad_affinity(\n data: DataSetBase,\n images_ref,\n images_cand,\n exifs,\n reference,\n max_gps_distance,\n max_gps_neighbors,\n):\n \"\"\"Compute afinity scores between references and candidates\n images using VLAD-based distance.\n \"\"\"\n preempted_candidates, need_load = preempt_candidates(\n images_ref, images_cand, exifs, reference, max_gps_neighbors, max_gps_distance\n )\n\n # construct VLAD histograms\n logger.info(\"Computing %d VLAD histograms\" % len(need_load))\n histograms = vlad_histograms(need_load, data)\n\n # parallel VLAD neighbors computation\n args, processes, batch_size = create_parallel_matching_args(\n data, preempted_candidates, histograms\n )\n logger.info(\"Computing VLAD candidates with %d processes\" % processes)\n return context.parallel_map(match_vlad_unwrap_args, args, processes, batch_size)\n\n\ndef preempt_candidates(\n images_ref, images_cand, exifs, reference, max_gps_neighbors, max_gps_distance\n):\n \"\"\"Preempt candidates using GPS to reduce set of images\n from which to load data to save RAM.\n \"\"\"\n\n # preempt candidates images using GPS\n preempted_cand = {im: images_cand for im in images_ref}\n if max_gps_distance > 0 or max_gps_neighbors > 0:\n gps_pairs = match_candidates_by_distance(\n images_ref,\n images_cand,\n exifs,\n reference,\n max_gps_neighbors,\n max_gps_distance,\n )\n preempted_cand = defaultdict(list)\n for p in gps_pairs:\n if p[0] in images_ref:\n preempted_cand[p[0]].append(p[1])\n if p[1] in images_ref:\n preempted_cand[p[1]].append(p[0])\n\n # reduce sets of images from which to load histograms (RAM saver)\n need_load = set(preempted_cand.keys())\n for k, v in preempted_cand.items():\n need_load.update(v)\n need_load.add(k)\n return preempted_cand, need_load\n\n\ndef construct_pairs(results, max_neighbors, exifs, enforce_other_cameras):\n \"\"\"Construct final sets of pairs to match\"\"\"\n pairs = {}\n for im, distances, other in results:\n order = np.argsort(distances)\n if enforce_other_cameras:\n pairs.update(\n pairs_from_neighbors(im, exifs, distances, order, other, max_neighbors)\n )\n else:\n for i in order[:max_neighbors]:\n pairs[tuple(sorted((im, other[i])))] = distances[i]\n return pairs\n\n\ndef create_parallel_matching_args(data: DataSetBase, preempted_cand, histograms):\n \"\"\"Create arguments to matching function\"\"\"\n args = list(match_histogram_arguments(preempted_cand, histograms))\n\n # parallel VLAD neighbors computation\n per_process = 512\n processes = context.processes_that_fit_in_memory(\n data.config[\"processes\"], per_process\n )\n batch_size = max(1, len(args) / (2 * processes))\n return args, processes, batch_size\n\n\ndef match_histogram_arguments(candidates, histograms):\n \"\"\" Generate arguments for parralel processing of BoW \"\"\"\n for im, cands in candidates.items():\n yield (im, cands, histograms)\n\n\ndef match_bow_unwrap_args(args):\n \"\"\" Wrapper for parralel processing of BoW \"\"\"\n image, other_images, histograms = args\n return bow_distances(image, other_images, histograms)\n\n\ndef match_vlad_unwrap_args(args):\n \"\"\" Wrapper for parralel processing of VLAD \"\"\"\n image, other_images, histograms = args\n return vlad.vlad_distances(image, other_images, histograms)\n\n\ndef match_candidates_by_time(images_ref, images_cand, exifs, max_neighbors):\n \"\"\"Find candidate matching pairs by time difference.\"\"\"\n if max_neighbors <= 0:\n return set()\n k = min(len(images_cand), max_neighbors)\n\n times = np.zeros((len(images_cand), 1))\n for i, image in enumerate(images_cand):\n times[i] = exifs[image][\"capture_time\"]\n\n tree = spatial.cKDTree(times)\n\n pairs = set()\n for image_ref in images_ref:\n nn = k + 1 if image_ref in images_cand else k\n\n time = exifs[image_ref][\"capture_time\"]\n distances, neighbors = tree.query([time], k=nn)\n\n if type(neighbors) == int: # special case with only one NN\n neighbors = [neighbors]\n\n for j in neighbors:\n if j >= len(images_cand):\n continue\n image_cand = images_cand[j]\n if image_ref != image_cand:\n pairs.add(tuple(sorted((image_ref, image_cand))))\n return pairs\n\n\ndef match_candidates_by_order(images_ref, images_cand, max_neighbors):\n \"\"\"Find candidate matching pairs by sequence order.\"\"\"\n if max_neighbors <= 0:\n return set()\n n = (max_neighbors + 1) // 2\n\n pairs = set()\n for i, image_ref in enumerate(images_ref):\n a = max(0, i - n)\n b = min(len(images_cand), i + n)\n for j in range(a, b):\n image_cand = images_cand[j]\n if image_ref != image_cand:\n pairs.add(tuple(sorted([image_ref, image_cand])))\n return pairs\n\n\ndef match_candidates_from_metadata(\n images_ref, images_cand, exifs, data: DataSetBase, config_override\n):\n \"\"\"Compute candidate matching pairs between between images_ref and images_cand\n\n Returns a list of pairs (im1, im2) such that (im1 in images_ref) is true.\n Returned pairs are unique given that (i, j) == (j, i).\n \"\"\"\n\n overriden_config = data.config.copy()\n overriden_config.update(config_override)\n\n max_distance = overriden_config[\"matching_gps_distance\"]\n gps_neighbors = overriden_config[\"matching_gps_neighbors\"]\n time_neighbors = overriden_config[\"matching_time_neighbors\"]\n order_neighbors = overriden_config[\"matching_order_neighbors\"]\n bow_neighbors = overriden_config[\"matching_bow_neighbors\"]\n bow_gps_distance = overriden_config[\"matching_bow_gps_distance\"]\n bow_gps_neighbors = overriden_config[\"matching_bow_gps_neighbors\"]\n bow_other_cameras = overriden_config[\"matching_bow_other_cameras\"]\n vlad_neighbors = overriden_config[\"matching_vlad_neighbors\"]\n vlad_gps_distance = overriden_config[\"matching_vlad_gps_distance\"]\n vlad_gps_neighbors = overriden_config[\"matching_vlad_gps_neighbors\"]\n vlad_other_cameras = overriden_config[\"matching_vlad_other_cameras\"]\n\n if not data.reference_lla_exists():\n data.invent_reference_lla()\n reference = data.load_reference()\n\n if not all(map(has_gps_info, exifs.values())):\n if gps_neighbors != 0:\n logger.warn(\n \"Not all images have GPS info. \" \"Disabling matching_gps_neighbors.\"\n )\n gps_neighbors = 0\n max_distance = 0\n\n images_ref.sort()\n\n if (\n max_distance\n == gps_neighbors\n == time_neighbors\n == order_neighbors\n == bow_neighbors\n == vlad_neighbors\n == 0\n ):\n # All pair selection strategies deactivated so we match all pairs\n d = set()\n t = set()\n o = set()\n b = set()\n v = set()\n pairs = {\n tuple(sorted([i, j])) for i in images_ref for j in images_cand if i != j\n }\n else:\n d = match_candidates_by_distance(\n images_ref, images_cand, exifs, reference, gps_neighbors, max_distance\n )\n t = match_candidates_by_time(images_ref, images_cand, exifs, time_neighbors)\n o = match_candidates_by_order(images_ref, images_cand, order_neighbors)\n b = match_candidates_with_bow(\n data,\n images_ref,\n images_cand,\n exifs,\n reference,\n bow_neighbors,\n bow_gps_distance,\n bow_gps_neighbors,\n bow_other_cameras,\n )\n v = match_candidates_with_vlad(\n data,\n images_ref,\n images_cand,\n exifs,\n reference,\n vlad_neighbors,\n vlad_gps_distance,\n vlad_gps_neighbors,\n vlad_other_cameras,\n )\n pairs = d | t | o | set(b) | set(v)\n\n pairs = ordered_pairs(pairs, images_ref)\n\n report = {\n \"num_pairs_distance\": len(d),\n \"num_pairs_time\": len(t),\n \"num_pairs_order\": len(o),\n \"num_pairs_bow\": len(b),\n \"num_pairs_vlad\": len(v),\n }\n return pairs, report\n\n\ndef bow_distances(image, other_images, histograms):\n \"\"\"Compute BoW-based distance (L1 on histogram of words)\n between an image and other images.\n \"\"\"\n if image not in histograms:\n return image, [], []\n\n distances = []\n other = []\n h = histograms[image]\n for im2 in other_images:\n if im2 != image and im2 in histograms:\n h2 = histograms[im2]\n distances.append(np.fabs(h - h2).sum())\n other.append(im2)\n return image, distances, other\n\n\ndef load_histograms(data: DataSetBase, images):\n \"\"\" Load BoW histograms of given images \"\"\"\n min_num_feature = 8\n\n histograms = {}\n bows = bow.load_bows(data.config)\n for im in images:\n filtered_words = feature_loader.instance.load_words(data, im, masked=True)\n if filtered_words is None:\n logger.error(\"No words in image {}\".format(im))\n continue\n if len(filtered_words) <= min_num_feature:\n logger.warning(\n \"Too few filtered features in image {}: {}\".format(\n im, len(filtered_words)\n )\n )\n continue\n\n histograms[im] = bows.histogram(filtered_words[:, 0])\n return histograms\n\n\ndef vlad_histograms(images, data: DataSetBase):\n \"\"\"Construct VLAD histograms from the image features.\n\n Returns a dictionary of VLAD vectors for the images.\n \"\"\"\n vlads = {}\n for im in images:\n im_vlad = vlad.instance.vlad_histogram(data, im)\n if im_vlad is not None:\n vlads[im] = im_vlad\n return vlads\n\n\ndef pairs_from_neighbors(image, exifs, distances, order, other, max_neighbors):\n \"\"\"Construct matching pairs given closest ordered neighbors.\n\n Pairs will of form (image, im2), im2 being the closest max_neighbors\n given by (order, other) having the same cameras OR the closest max_neighbors\n having from any other camera.\n \"\"\"\n same_camera, other_cameras = [], []\n for i in order:\n im2 = other[i]\n d = distances[i]\n if exifs[im2][\"camera\"] == exifs[image][\"camera\"]:\n if len(same_camera) < max_neighbors:\n same_camera.append((im2, d))\n else:\n if len(other_cameras) < max_neighbors:\n other_cameras.append((im2, d))\n if len(same_camera) + len(other_cameras) >= 2 * max_neighbors:\n break\n\n pairs = {}\n for im2, d in same_camera + other_cameras:\n pairs[tuple(sorted((image, im2)))] = d\n return pairs\n\n\ndef ordered_pairs(pairs, images_ref):\n \"\"\"Image pairs that need matching skipping duplicates.\n\n Returns a list of pairs (im1, im2) such that (im1 in images_ref) is true.\n \"\"\"\n per_image = defaultdict(list)\n for im1, im2 in pairs:\n per_image[im1].append(im2)\n per_image[im2].append(im1)\n\n ordered = set()\n remaining = set(images_ref)\n if len(remaining) > 0:\n next_image = remaining.pop()\n while next_image:\n im1 = next_image\n next_image = None\n\n for im2 in per_image[im1]:\n if (im2, im1) not in ordered:\n ordered.add((im1, im2))\n if not next_image and im2 in remaining:\n next_image = im2\n remaining.remove(im2)\n\n if not next_image and remaining:\n next_image = remaining.pop()\n\n return list(ordered)\n"
] | [
[
"numpy.fabs",
"scipy.spatial.cKDTree",
"numpy.argsort"
]
] |
NickKaparinos/Kaggle-Dogs-vs.-Cats-Redux | [
"8bc0296648f7e376c97dba3eeadf5872e7499656"
] | [
"main_pytorch.py"
] | [
"\"\"\"\nNick Kaparinos\nDogs vs. Cats\nKaggle Competition\nGrid Search using pytorch\n\"\"\"\n\nimport pandas as pd\nfrom random import seed\nfrom utilities import *\nfrom torch.utils.data import DataLoader\nfrom torchvision.models import vgg16\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch\nimport time\n\n# Options\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\ndo_preprocess = False\nuse_vgg = False\nIMG_SIZE = 75\nstart = time.perf_counter()\nseed(0)\nnp.random.seed(0)\ntf.random.set_seed(0)\ntorch.manual_seed(0)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Using: {device}\")\n\n# Tensorboard\nLOG_DIR = 'logs/pytorch'\nwriter = SummaryWriter(log_dir=LOG_DIR)\n\n# Preprocess Data\nif do_preprocess:\n preprocess_data(IMG_SIZE, convert_to_grayscale=False)\n\n# Read Data\nX_train = np.load(\"X_train.npy\")\ny_train = np.load(\"y_train.npy\")\nX_test = np.load(\"X_test.npy\")\ninput_channels = X_train.shape[3]\nIMG_SIZE = X_train.shape[1]\n\n# Convert to tensors\nX_train = torch.Tensor(X_train)\nX_test = torch.Tensor(X_test)\ny_train = torch.Tensor(y_train)\n\n# Scale\nX_train = X_train / 255.0\nX_test = X_test / 255.0\n\n# Model\nif use_vgg:\n model = vgg16(pretrained=True)\n\n # Freeze parameters\n for param in model.parameters():\n param.requires_grad = False\n\n # Add final layer\n num_features = model.classifier._modules['6'].in_features\n model.classifier._modules['6'] = nn.Linear(num_features, 1)\n model.classifier._modules['7'] = nn.Sigmoid()\n model = model.to(device)\nelse:\n model = pytorch_model(input_channels=input_channels).to(device)\nprint(model)\n\nlearning_rate = 5e-5\nepochs = 5\nloss_fn = torch.nn.BCELoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n# Training\nX_train = X_train.to(device)\ny_train = y_train.to(device)\n\n# Validation split\nVAL_PCT = 0.1\nval_size = int(len(X_train) * VAL_PCT)\nX_val = X_train[-val_size:]\ny_val = y_train[-val_size:]\nX_train = X_train[:-val_size]\ny_train = y_train[:-val_size]\n\n# Data loaders\nbatch_size = 128\ntrain_dataset = [(X_train[i], y_train[i]) for i in range(X_train.size()[0])]\nvalidation_dataset = [(X_val[i], y_val[i]) for i in range(X_val.size()[0])]\ntrain_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\ntest_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=True)\n\n# Training\nfor epoch in range(epochs):\n print(f\"-----------------Epoch {epoch + 1}-----------------\")\n pytorch_train_loop(train_dataloader, model, loss_fn, optimizer, writer, epoch)\n pytorch_test_loop(test_dataloader, model, loss_fn, writer, epoch)\nprint(\"Training done!\")\n\n# Execution Time\nwriter.close()\nend = time.perf_counter()\nprint(f\"\\nExecution time = {end - start:.2f} second(s)\")\ndebug = True\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.manual_seed",
"pandas.set_option",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.BCELoss",
"torch.Tensor"
]
] |
Alexzsh/chinese_short_text_classification | [
"de16359b4c83cc18c0478c33e211cc3f85b8e36b"
] | [
"data/pre_process.py"
] | [
"# coding: utf-8\n\nimport sys\nfrom collections import Counter\nimport gc\nimport numpy as np\nimport tensorflow.contrib.keras as kr\nimport jieba\nimport pandas as pd\nimport re\nif sys.version_info[0] > 2:\n is_py3 = True\nelse:\n #reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n is_py3 = False\ndef native_word(word, encoding='utf-8'):\n \"\"\"如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码\"\"\"\n if not is_py3:\n return word.encode(encoding)\n else:\n return word\ndef native_content(content):\n if not is_py3:\n return content.decode('utf-8')\n else:\n return content\ndef open_file(filename, mode='r'):\n \"\"\"\n 常用文件操作,可在python2和python3间切换.\n mode: 'r' or 'w' for read or write\n \"\"\"\n if is_py3:\n return open(filename, mode, encoding='utf-8', errors='ignore')\n else:\n return open(filename, mode)\ndef read_file(filename):\n \"\"\"读取文件数据\"\"\"\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n content,label= line.strip().split('__label__')\n if content:\n contents.append((native_content(content)))\n labels.append(native_content(label))\n except:\n pass\n return contents, labels\ndef read_csv(filename):\n df = pd.read_csv(filename,'r',encoding='utf-8',sep=',')\n contents = df['content'].values.tolist()\n labels = (df['label'].values.tolist())\n return contents,labels\ndef build_vocab(train_dir, vocab_dir, vocab_size=5000):\n \"\"\"根据训练集构建词汇表,存储\"\"\"\n data_train, _ = read_file(train_dir)\n # isChinese = re.compile(r'[\\u4e00-\\u9fa5]+$')\n all_data = []\n for content in data_train:\n # s=list(filter(lambda x:isChinese.match(x), list(jieba.cut(content))))\n all_data.extend(content.split(' '))\n print(len(all_data))\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size - 1)\n words, _ = list(zip(*count_pairs))\n # 添加一个 <PAD> 来将所有文本pad为同一长度\n words = ['<PAD>'] + list(words)\n open_file(vocab_dir, mode='w').write('\\n'.join(words) + '\\n')\ndef read_vocab(vocab_dir):\n \"\"\"读取词汇表\"\"\"\n # words = open_file(vocab_dir).read().strip().split('\\n')\n with open_file(vocab_dir) as fp:\n # 如果是py2 则每个值都转化为unicode\n words = [native_content(_.strip()) for _ in fp.readlines()]\n word_to_id = dict(zip(words, range(len(words))))\n return words, word_to_id\ndef read_vec(vocab_dir):\n word2id={}\n with open(vocab_dir,'r',encoding='utf-8') as f:\n # for line in f.readlines():\n # word,vec=line.split(' ')[0],line.split(' ')[1:]\n # word2id[word]=vec\n word2id={line.split(' ')[0]:line.split(' ')[1:] for line in f.readlines()}\n return word2id\ndef loadWord2Vec(filename):\n vocab = []\n embd = []\n cnt = 0\n fr = open(filename,'r')\n line = fr.readline().decode('utf-8').strip()\n #print line\n word_dim = int(line.split(' ')[1])\n vocab.append(\"unk\")\n embd.append([0]*word_dim)\n for line in fr :\n row = line.strip().split(' ')\n vocab.append(row[0])\n embd.append(row[1:])\n print(\"loaded word2vec\")\n fr.close()\n vocab_index={word:index for index,word in enumerate(vocab)}\n return vocab,embd,vocab_index\ndef word2vec(x):\n # with open(\"cnews/newsblogbbs.vec\", 'r',encoding='utf-8') as f:\n pass\ndef read_category():\n \"\"\"读取分类目录,固定\"\"\"\n categories = ['服饰内衣', '图书', '汽车用品', '运动户外', '家装建材', '礼品箱包']\n\n categories = [native_content(x) for x in categories]\n\n cat_to_id = dict(zip(categories, range(len(categories))))\n\n return categories, cat_to_id\ndef to_words(content, words):\n \"\"\"将id表示的内容转换为文字\"\"\"\n return ''.join(words[x] for x in content)\ndef process_file(filename, word_to_id, cat_to_id, max_length=20):\n \"\"\"将文件转换为id表示\"\"\"\n contents, labels = read_file(filename)\n\n data_id, label_id = [], []\n for i in range(1,len(contents)):\n data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])\n label_id.append(cat_to_id[labels[i]])\n print(label_id)\n # 使用keras提供的pad_sequences来将文本pad为固定长度\n x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)\n y_pad = kr.utils.to_categorical(label_id) # 将标签转换为one-hot表示\n gc.collect()\n return x_pad, y_pad\ndef batch_iter(x, y, batch_size=64):\n \"\"\"生成批次数据\"\"\"\n data_len = len(x)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n indices = np.random.permutation(np.arange(data_len))\n x_shuffle = x[indices]\n y_shuffle = y[indices]\n\n for i in range(num_batch):\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]\n \nif __name__ =='__main__':\n read_vec('cnews/newsblogbbs.vec')\n # filename='cnews/newsblogbbs.vec'\n # res=['<PAD>']\n # a=True\n # with open(filename,'r',encoding='utf-8') as f:\n # for i in f:\n # if a:\n # a=False\n # continue\n # word=i.split(' ')[0]\n # res.append(word)\n # print(len(res))\n # with open('cnews/data.vocab.txt','w',encoding='utf-8') as fw:\n # for word in res:\n # fw.write(word+\"\\n\")"
] | [
[
"pandas.read_csv",
"tensorflow.contrib.keras.utils.to_categorical",
"numpy.arange",
"tensorflow.contrib.keras.preprocessing.sequence.pad_sequences"
]
] |
mosaic-group/PyLibAPR | [
"4b5af50c26b4770c460460f9491bd840af2537da"
] | [
"demo/apr_iteration_demo.py"
] | [
"import pyapr\nimport numpy as np\nfrom time import time\n\n\ndef main():\n \"\"\"\n This demo implements a piecewise constant reconstruction using the wrapped PyLinearIterator. The Python reconstruction\n is timed and compared to the internal C++ version.\n\n Note: The current Python reconstruction is very slow and needs to be improved. For now, this demo is best used as a\n coding example of the loop structure to access particles and their spatial properties.\n \"\"\"\n\n io_int = pyapr.filegui.InteractiveIO()\n fpath_apr = io_int.get_apr_file_name() # get APR file path from gui\n\n # Instantiate APR and particle objects\n parts = pyapr.ShortParticles()\n apr = pyapr.APR()\n\n # Read from APR file\n pyapr.io.read(fpath_apr, apr, parts)\n\n # Illustrates the usage of the Python-wrapped linear iterator by computing the piecewise constant reconstruction\n start = time()\n org_dims = apr.org_dims() # dimension order (y, x, z)\n py_recon = np.empty((org_dims[2], org_dims[1], org_dims[0]), dtype=np.uint16)\n max_level = apr.level_max()\n\n apr_it = apr.iterator() # PyLinearIterator\n\n # particles at the maximum level coincide with pixels\n level = max_level\n for z in range(apr_it.z_num(level)):\n for x in range(apr_it.x_num(level)):\n for idx in range(apr_it.begin(level, z, x), apr_it.end()):\n py_recon[z, x, apr_it.y(idx)] = parts[idx]\n\n # loop over levels up to level_max-1\n for level in range(apr_it.level_min(), apr_it.level_max()):\n\n step_size = 2 ** (max_level - level) # this is the size (in pixels) of the particle cells at level\n\n for z in range(apr_it.z_num(level)):\n for x in range(apr_it.x_num(level)):\n for idx in range(apr_it.begin(level, z, x), apr_it.end()):\n y = apr_it.y(idx)\n\n y_start = y * step_size\n x_start = x * step_size\n z_start = z * step_size\n\n y_end = min(y_start+step_size, py_recon.shape[2])\n x_end = min(x_start+step_size, py_recon.shape[1])\n z_end = min(z_start+step_size, py_recon.shape[0])\n\n py_recon[z_start:z_end, x_start:x_end, y_start:y_end] = parts[idx]\n\n py_time = time()-start\n print('python reconstruction took {} seconds'.format(py_time))\n\n # Compare to the c++ reconstruction\n start = time()\n tmp = pyapr.numerics.reconstruction.recon_pc(apr, parts)\n cpp_recon = np.array(tmp, copy=False)\n cpp_time = time()-start\n print('c++ reconstruction took {} seconds'.format(cpp_time))\n print('c++ was {} times faster'.format(py_time / cpp_time))\n\n # check that both methods produce the same results (on a subset of the image if it is larger than 128^3 pixels)\n zm = min(org_dims[2], 128)\n xm = min(org_dims[1], 128)\n ym = min(org_dims[0], 128)\n\n success = np.allclose(py_recon[:zm, :xm, :ym], cpp_recon[:zm, :xm, :ym])\n if not success:\n print('Python and C++ reconstructions seem to give different results...')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.allclose",
"numpy.empty"
]
] |
khanfarhan10/gvae | [
"a14fa36c5249373235c7cf339e26328791c6c8c4"
] | [
"ops.py"
] | [
"import math\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.contrib import slim\nfrom tensorflow.contrib import layers as tflayers\n\[email protected]_arg_scope\ndef conv2d_transpose(\n inputs,\n out_shape,\n kernel_size=(5, 5),\n stride=(1, 1),\n activation_fn=tf.nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=tflayers.xavier_initializer(),\n scope=None,\n reuse=None):\n batchsize = tf.shape(inputs)[0]\n in_channels = int(inputs.get_shape()[-1])\n\n output_shape = tf.stack([batchsize, out_shape[0], out_shape[1], out_shape[2]])\n filter_shape = [kernel_size[0], kernel_size[1], out_shape[2], in_channels]\n\n with tf.variable_scope(scope, 'Conv2d_transpose', [inputs], reuse=reuse) as sc:\n w = tf.get_variable('weights', filter_shape,\n initializer=weights_initializer)\n\n outputs = tf.nn.conv2d_transpose(inputs, w, output_shape=output_shape,\n strides=[1, stride[0], stride[1], 1])\n\n if not normalizer_fn:\n biases = tf.get_variable('biases', [out_shape[2]], initializer=tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases)\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n\n return outputs\n\[email protected]_arg_scope\ndef add_linear(\n inputs,\n targets,\n activation_fn=None,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=tflayers.xavier_initializer(),\n scope=None,\n reuse=None):\n with tf.variable_scope(scope, 'AddLinear', [inputs], reuse=reuse) as sc:\n shape_targets = targets.get_shape()\n targets_size = int(np.prod([int(s) for s in shape_targets[1:]]))\n outputs = slim.fully_connected(inputs, targets_size, activation_fn=None, weights_initializer=weights_initializer)\n outputs = tf.reshape(outputs, tf.shape(targets))\n outputs = outputs + targets\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n\n return outputs\n\[email protected]_arg_scope\ndef add_resnet_conv(\n inputs,\n channels,\n nlayers=1,\n kernel_size=(5, 5),\n activation_fn=tf.nn.elu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=tflayers.xavier_initializer(),\n scope=None,\n reuse=None):\n with tf.variable_scope(scope, 'Resnet_conv', [inputs], reuse=reuse) as sc:\n channels_in = int(inputs.get_shape()[3])\n net = inputs\n with slim.arg_scope([slim.conv2d], kernel_size=kernel_size, stride=(1, 1)):\n for i in range(nlayers):\n net = activation_fn(net)\n res = slim.conv2d(net, channels,\n activation_fn=activation_fn,\n normalizer_fn=normalizer_fn, normalizer_params=normalizer_params,\n scope=\"res_%d_0\" % i)\n res = slim.conv2d(net, channels_in,\n activation_fn=None, scope=\"res_%d_1\" % i)\n net += res\n\n return net\n\[email protected]_arg_scope\ndef masked_linear_layer(\n inputs,\n out_dim,\n mask,\n activation_fn=None,\n weights_initializer=tflayers.xavier_initializer(),\n scope=None,\n reuse=None):\n with tf.variable_scope(scope, 'MADE', [inputs], reuse=reuse) as sc:\n in_dim = int(inputs.get_shape()[1])\n W = tf.get_variable('weights', [in_dim, out_dim],\n initializer=weights_initializer)\n biases = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer(0.0))\n\n out = tf.matmul(inputs, mask*W) + biases\n\n if not activation_fn is None:\n out = activation_fn(out)\n\n return out\n\ndef get_pdf_gauss(loc, log_scale, sample):\n scale = tf.exp(log_scale)\n pdf = -tf.reduce_sum(0.5 * tf.square((sample - loc)/scale) + log_scale + 0.5*np.log(2*np.pi), [1])\n return pdf\n\ndef get_pdf_stdgauss(sample):\n pdf = -tf.reduce_sum(0.5 * tf.square(sample) + 0.5*np.log(2*np.pi), [1])\n return pdf\n\ndef custom_initializer(seed=None, dtype=tf.float32, trp=False):\n def _initializer(shape, dtype=dtype, partition_info=None):\n if len(shape) == 2:\n N = float(shape[1])\n elif len(shape) == 4 and not trp:\n N = float(shape[0]) * float(shape[1]) * float(shape[2])\n elif len(shape) == 4 and trp:\n N = float(shape[0]) * float(shape[1]) * float(shape[3])\n else:\n raise ValueError(\"weights need to be either 2 or 4!\")\n stddev = 1./math.sqrt(N)\n return tf.truncated_normal(shape, 0.0, stddev, dtype, seed=seed)\n return _initializer\n\ndef flatten_spatial(x):\n x_shape = x.get_shape().as_list()\n x_dim = np.prod(x_shape[1:])\n x_flat = tf.reshape(x, [-1, x_dim])\n return x_flat\n\ndef norm(x, axes=None, keep_dims=False):\n return tf.sqrt(tf.reduce_sum(x*x, reduction_indices=axes, keep_dims=keep_dims))\n\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak*x)\n\ndef reduce_geomean(x, axis=None):\n \"Computes log(sum_i exp(x_i) / N).\"\n N = tf.reduce_prod(tf.shape(x)[axis])\n out = tf.reduce_logsumexp(x, axis=axis) - tf.log(tf.to_float(N))\n return out\n\ndef tril_matrix(n, unit_diag=True):\n offset = 0\n nentries = n*(n+1)/2\n if unit_diag:\n offset = -1\n nentries = (n-1)*n/2\n\n indices = list(zip(*np.tril_indices(n, offset)))\n indices = tf.constant([list(i) for i in indices], dtype=tf.int32)\n\n weights = tf.get_variable('weights', [nentries], initializer=tf.constant_initializer(0.0))\n\n matrix = tf.sparse_to_dense(sparse_indices=indices, output_shape=[n, n],\n sparse_values=weights, default_value=0, validate_indices=True)\n\n if unit_diag:\n matrix += tf.constant(np.eye(n, dtype=np.float32))\n\n return matrix\n\n\ndef variable_summaries(name, var):\n \"\"\"Attach a lot of summaries to a Tensor.\"\"\"\n with tf.name_scope(name):\n mean = tf.reduce_mean(var)\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n summaries = tf.summary.merge([\n tf.summary.scalar(\"mean\", mean),\n tf.summary.scalar(\"stddev\", stddev),\n # tf.scalar_summary(\"median/\" + name, tf.reduce_median(var))\n tf.summary.histogram(\"hist\", var),\n ])\n return summaries\n"
] | [
[
"tensorflow.reduce_logsumexp",
"tensorflow.summary.scalar",
"tensorflow.reshape",
"tensorflow.contrib.slim.conv2d",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.name_scope",
"numpy.log",
"tensorflow.reduce_sum",
"tensorflow.summary.histogram",
"numpy.eye",
"tensorflow.stack",
"tensorflow.shape",
"tensorflow.constant_initializer",
"tensorflow.nn.conv2d_transpose",
"tensorflow.sparse_to_dense",
"tensorflow.to_float",
"tensorflow.contrib.slim.arg_scope",
"numpy.prod",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.nn.bias_add",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.truncated_normal",
"tensorflow.reduce_mean",
"numpy.tril_indices",
"tensorflow.exp",
"tensorflow.square",
"tensorflow.get_variable",
"tensorflow.maximum"
]
] |
pranav-dahiya/Twitter | [
"f38d546d2dda69acb03abb2347e316578fc28a38"
] | [
"supervised.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn.svm import SVC, SVR\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import make_scorer\n\n\ndef cf_matrix_00(y_true, y_pred):\n cf_matr = confusion_matrix(y_true, y_pred)\n return cf_matr[0,0] / np.sum(cf_matr[0,:])\ndef cf_matrix_01(y_true, y_pred):\n cf_matr = confusion_matrix(y_true, y_pred)\n return cf_matr[0,1] / np.sum(cf_matr[0,:])\ndef cf_matrix_02(y_true, y_pred):\n cf_matr = confusion_matrix(y_true, y_pred)\n return cf_matr[0,2] / np.sum(cf_matr[0,:])\ndef cf_matrix_10(y_true, y_pred):\n cf_matr = confusion_matrix(y_true, y_pred)\n return cf_matr[1,0] / np.sum(cf_matr[1,:])\ndef cf_matrix_11(y_true, y_pred):\n cf_matr = confusion_matrix(y_true, y_pred)\n return cf_matr[1,1] / np.sum(cf_matr[1,:])\ndef cf_matrix_12(y_true, y_pred):\n cf_matr = confusion_matrix(y_true, y_pred)\n return cf_matr[1,2] / np.sum(cf_matr[1,:])\ndef cf_matrix_20(y_true, y_pred):\n cf_matr = confusion_matrix(y_true, y_pred)\n return cf_matr[2,0] / np.sum(cf_matr[2,:])\ndef cf_matrix_21(y_true, y_pred):\n cf_matr = confusion_matrix(y_true, y_pred)\n return cf_matr[2,1] / np.sum(cf_matr[2,:])\ndef cf_matrix_22(y_true, y_pred):\n cf_matr = confusion_matrix(y_true, y_pred)\n return cf_matr[2,2] / np.sum(cf_matr[2,:])\n\n\nscores = {'cf_matrix_00': make_scorer(cf_matrix_00),\n 'cf_matrix_01': make_scorer(cf_matrix_01),\n # 'cf_matrix_02': make_scorer(cf_matrix_02),\n 'cf_matrix_10': make_scorer(cf_matrix_10),\n 'cf_matrix_11': make_scorer(cf_matrix_11),\n # 'cf_matrix_12': make_scorer(cf_matrix_12),\n # 'cf_matrix_20': make_scorer(cf_matrix_20),\n # 'cf_matrix_21': make_scorer(cf_matrix_21),\n # 'cf_matrix_22': make_scorer(cf_matrix_22),\n 'accuracy': 'balanced_accuracy',\n 'precision': 'precision_weighted',\n 'recall': 'recall_weighted',\n 'f1': 'f1_weighted'}\n\ndata = pd.read_json('data_formspring.json')\n\nvectorizer = TfidfVectorizer(ngram_range=(2,3), stop_words='english')\nX = vectorizer.fit_transform(data['content'])\ny = data['label']\nmodel = SVR()\nmodel.fit(X, y)\ndata['ngram'] = pd.Series(model.predict(X))\n\n# X = data[['profane_words', 'profanity_score', 'num_usertags',\n# 'upper_case_density', 'sentiment', 'length', 'num_pronoun', 'ngram']]\n\nX = data[['num_url', 'num_emoji', 'profane_words', 'profanity_score', 'num_exclamation_question',\n 'num_stops', 'num_dash', 'num_star_dollar', 'num_ampersand', 'num_hashtags', 'num_usertags',\n 'upper_case_density', 'sentiment', 'ngram']]\n\nprint(y.value_counts())\n\nmodel = SVC(class_weight='balanced')\nresults = cross_validate(model, X, y, scoring=scores, cv=10, n_jobs=-1)\ncf_matr = [[np.mean(results['test_cf_matrix_'+str(i)+str(j)]) for j in range(2)] for i in range(2)]\nfor row in cf_matr:\n for val in row:\n print(val, \"&\", end=\" \")\n print()\nprint(\"${:.2f}\".format(np.mean(results['test_accuracy'])), \"\\pm\", \"{:.2f}$\".format(np.std(results['test_accuracy'])),\n \"&\", \"${:.2f}\".format(np.mean(results['test_precision'])), \"\\pm\", \"{:.2f}$\".format(np.std(results['test_precision'])),\n \"&\", \"${:.2f}\".format(np.mean(results['test_f1'])), \"\\pm\", \"{:.2f}$\".format(np.std(results['test_f1'])), \"&\",\n \"${:.2f}\".format(np.mean(results['test_recall'])), \"\\pm\", \"{:.2f}$\".format(np.std(results['test_recall'])))\n\n# class1 = data.loc[data['label'] == 0]\n# class2 = data.loc[data['label'] == 1]\n#\n# for feature in ['num_url', 'num_emoji', 'profane_words', 'profanity_score', 'num_exclamation_question',\n# 'num_stops', 'num_dash', 'num_star_dollar', 'num_ampersand', 'num_hashtags', 'num_usertags',\n# 'upper_case_density', 'sentiment', ]\n"
] | [
[
"numpy.sum",
"sklearn.svm.SVC",
"sklearn.svm.SVR",
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.read_json",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.make_scorer",
"numpy.std",
"sklearn.model_selection.cross_validate",
"numpy.mean"
]
] |
demokratiefabrik/fabrikApi | [
"a56bb57d59a5e7cbbeeb77889c02d82f2a04c682"
] | [
"fabrikApi/plugins/CIR/views/plots/ARCHIV/beeplot.py"
] | [
"from io import StringIO\nimport matplotlib.pyplot as p\nimport numpy as np\nimport pandas as pd\nimport seaborn as s\nimport mplcursors\nimport matplotlib.collections\nfrom mpld3 import plugins\n\n# %matplotlib widget\n\nf = StringIO()\n\n# pip install matplotlib seaborn\n\ndef beeplot():\n\n # dataset\n # iris2 = pd.DataFrame(np.random.randn(777, 1), columns=list('A'))\n beta = 20\n iris = pd.DataFrame(np.random.exponential(beta, 50), columns=['sepal_length',])\n # iris = pd.DataFrame(np.array([[1, 2, 3], [4, 2, 6], [7, 1, 9],[1, 2, 3], [4, 2, 6], [7, 1, 9],[1, 2, 3], [4, 2, 6], [7, 1, 9]]),\n # columns=['sepal_length', 'species', 'c'])\n\n # max(iris['sepal_length'])\n # canvas (seaborn)\n # s.set() \n custom_params = {\n # Borders\n \"font.sans-serif\": 'Roboto',\n # Axis\n \"axes.spines.right\": False, \"axes.spines.top\": False, \"axes.spines.bottom\": False, \"axes.spines.left\": False}\n s.set_theme(style=\"ticks\", rc=custom_params)\n\n # s.set_theme(style=\"whitegrid\")\n # style=\"darkgrid\"\n\n\n # GROUP BY: y='species', \n \n # ax = s.stripplot(x='sepal_length', data=iris)\n # ax = s.violinplot(x=\"sepal_length\", data=iris, inner=None)\n # marker=\"D\", \n DOT_SIZE = 6\n\n fig, ax = p.subplots()\n \n ax = s.swarmplot(x='sepal_length', data=iris, color=\"white\", edgecolor=\"gray\", palette=\"Set2\", size=DOT_SIZE, alpha=.25, ax=ax)\n \n # tooltips\n # annot_x = (p.xlim()[1] + p.xlim()[0])/2\n # annot_y = (p.ylim()[1] + p.ylim()[0])/2\n # txt = ax.text(annot_x, annot_y, \"Chart Ready\", \n # ha='center', fontsize=36, color='#DD4012')\n # def hover(event):\n # txt.set_text(\"\")\n # fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n \n # fig, ax = plt.subplots()\n # sc = ax.scatter(x,y)\n # by default the tooltip is displayed \"onclick\"\n # we can change it by setting hover to True\n # cursor = mplcursors.cursor(fig, hover=True)\n # # by default the annotation displays the xy positions\n # # this is to change it to the countries name\n # @cursor.connect(\"add\")\n # def on_add(sel):\n # sel.annotation.set(text='TESTTEST') # tt[sel.target.index]\n\n # # labels\n # p.title('Graph')\n\n\n scatter_collections = [c for c in ax.collections if isinstance(c, matplotlib.collections.PathCollection)]\n \n # assert len(scatter_collections) == 1\n \n tooltip = plugins.PointLabelTooltip(scatter_collections[0], \n labels=list(iris['sepal_length']))\n \n plugins.connect(fig, tooltip)\n\n\n # OUTPUT\n p.savefig(f, format='svg')\n content = f.getvalue()\n f.truncate(0)\n return content\n "
] | [
[
"numpy.random.exponential",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots"
]
] |
sina-moammar/2021-neuroscience-project | [
"6e7fa52e0496361d20253945633edaab79734e0b"
] | [
"tests/test_cortex_model.py"
] | [
"import numpy as np\nimport networkit as nk\nfrom collections import defaultdict\nfrom typing import Dict\nimport pytest\n\nfrom cortex_model import cortex_model\n\n\ndef default_params() -> Dict[str, any]:\n size = 4\n graph = nk.graph.Graph(size, directed=True)\n graph.addEdge(0, 1)\n graph.addEdge(0, 2)\n graph.addEdge(1, 3)\n graph.addEdge(2, 3)\n return {\n 'graph': graph,\n 'inh_per': 0.2,\n 'v_th': np.float32(15),\n 'v_reset': np.float32(13.5),\n 'v_rev': np.float32(33.5),\n 't_m': np.float32(30),\n 't_ref': np.float32(3),\n 't_delay': np.float32(1),\n 't_stdp': np.float32(5),\n 'theta_stdp': np.float32(0.4),\n 'g_c': np.float32(.15)\n }\n \n\ndef test_constructor():\n params = default_params()\n graph = params['graph']\n g_levels = np.random.randint(2, 100, 1)\n\n model = cortex_model(**params, g_levels=g_levels)\n\n assert model.v_th == params['v_th']\n assert model.v_reset == params['v_reset']\n assert model.v_rev == params['v_rev']\n assert model.t_m == params['t_m']\n assert model.t_ref == params['t_ref']\n assert model.t_delay == params['t_delay']\n assert model.t_stdp == params['t_stdp']\n assert model.theta_stdp == params['theta_stdp']\n assert model.g_c == np.float32(params['g_c'] / g_levels)\n\n assert model.size == graph.numberOfNodes()\n assert np.array_equal(model.g_s, np.array([[0, 1, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 1],\n [0, 0, 0, 0]]) * g_levels)\n assert np.all(model.v_s == np.zeros(model.size, np.float32))\n assert np.all(model.is_fired == np.zeros(model.size, bool))\n assert np.all(model.fired_neurons == np.array([]))\n assert np.all(model.is_in_ref == np.zeros(model.size, bool))\n assert np.all(model.steps_after_spike == np.zeros(model.size, np.uint16))\n assert model.post_syn_neurons == defaultdict(list)\n assert len(model.is_inh) == model.size\n\n assert model.v_exp_step == np.exp(-model.t_delay / model.t_m)\n assert model.ref_steps == np.uint16(model.t_ref / model.t_delay)\n assert np.all(model.pre_syn_spikes == np.zeros(model.size))\n assert np.all(model.neurons == np.arange(model.size))\n \n \ndef test_post_syn_neurons():\n params = default_params()\n \n model = cortex_model(**params)\n \n assert np.array_equal(model.get_post_syn_neurons(0), [1, 2])\n assert np.array_equal(model.get_post_syn_neurons(1), [3])\n assert np.array_equal(model.get_post_syn_neurons(2), [3])\n assert np.array_equal(model.get_post_syn_neurons(3), [])\n \n \ndef test_neurons_dynamics():\n params = default_params()\n v_exp_step = np.exp(-params['t_delay'] / params['t_m'])\n\n # full mode\n g_levels = np.random.randint(3, 100, 1)\n model = cortex_model(**params, g_levels=g_levels, mode='full')\n \n model.is_inh[:] = [0, 0, 1, 0]\n v_s_0 = np.array([10, 0.98, 0.95, 0.3], np.float32) * params['v_th']\n model.v_s[:] = v_s_0\n model.g_s[0, [1, 2]] -= 1\n \n # step 1\n model.neurons_dynamics()\n v_s_1 = v_s_0 * v_exp_step\n assert np.array_equal(model.v_s, v_s_1)\n assert np.array_equal(model.is_fired, [1, 0, 0, 0])\n assert np.array_equal(model.fired_neurons, [0])\n assert np.array_equal(model.is_in_ref, [1, 0, 0, 0])\n assert np.array_equal(model.steps_after_spike, [0, 0, 0, 0])\n \n # step 2\n model.neurons_dynamics()\n v_s_2 = v_s_1 * v_exp_step\n v_s_2[0] = params['v_reset']\n v_s_2[1:3] += (params['v_rev'] - v_s_2[1:3]) * np.float32(params['g_c'] / g_levels) * model.g_s[0, [1, 2]]\n assert np.array_equal(model.v_s, v_s_2)\n assert np.array_equal(model.is_fired, [0, 1, 1, 0])\n assert np.array_equal(model.fired_neurons, [1, 2])\n assert np.array_equal(model.is_in_ref, [1, 1, 1, 0])\n assert np.array_equal(model.steps_after_spike, [1, 0, 0, 0])\n \n # step 3\n model.neurons_dynamics()\n v_s_3 = v_s_2 * v_exp_step\n v_s_3[0:3] = params['v_reset']\n assert np.array_equal(model.v_s, v_s_3)\n assert np.array_equal(model.is_fired, [0, 0, 0, 0])\n assert np.array_equal(model.fired_neurons, [])\n assert np.array_equal(model.is_in_ref, [1, 1, 1, 0])\n assert np.array_equal(model.steps_after_spike, [2, 1, 1, 0])\n \n # step 4\n model.neurons_dynamics()\n assert np.array_equal(model.is_in_ref, [0, 1, 1, 0])\n assert np.array_equal(model.steps_after_spike, [0, 2, 2, 0])\n \n \n # mask mode\n g_levels = 2\n with pytest.raises(AssertionError):\n model = cortex_model(**params, g_levels=g_levels, mode='mask')\n \n model = cortex_model(**params, mode='mask')\n \n model.is_inh[:] = [0, 0, 1, 0]\n v_s_0 = np.array([10, 0.98, 0.95, 0.3], np.float32) * params['v_th']\n model.v_s[:] = v_s_0\n \n # step 1\n model.neurons_dynamics()\n v_s_1 = v_s_0 * v_exp_step\n assert np.array_equal(model.v_s, v_s_1)\n assert np.array_equal(model.is_fired, [1, 0, 0, 0])\n assert np.array_equal(model.fired_neurons, [0])\n assert np.array_equal(model.is_in_ref, [1, 0, 0, 0])\n assert np.array_equal(model.steps_after_spike, [0, 0, 0, 0])\n \n # step 2\n model.neurons_dynamics()\n v_s_2 = v_s_1 * v_exp_step\n v_s_2[0] = params['v_reset']\n v_s_2[1:3] += (params['v_rev'] - v_s_2[1:3]) * params['g_c']\n assert np.array_equal(model.v_s, v_s_2)\n assert np.array_equal(model.is_fired, [0, 1, 1, 0])\n assert np.array_equal(model.fired_neurons, [1, 2])\n assert np.array_equal(model.is_in_ref, [1, 1, 1, 0])\n assert np.array_equal(model.steps_after_spike, [1, 0, 0, 0])\n \n # step 3\n model.neurons_dynamics()\n v_s_3 = v_s_2 * v_exp_step\n v_s_3[0:3] = params['v_reset']\n assert np.array_equal(model.v_s, v_s_3)\n assert np.array_equal(model.is_fired, [0, 0, 0, 0])\n assert np.array_equal(model.fired_neurons, [])\n assert np.array_equal(model.is_in_ref, [1, 1, 1, 0])\n assert np.array_equal(model.steps_after_spike, [2, 1, 1, 0])\n \n # step 4\n model.neurons_dynamics()\n assert np.array_equal(model.is_in_ref, [0, 1, 1, 0])\n assert np.array_equal(model.steps_after_spike, [0, 2, 2, 0])\n\n\ndef test_c_syn():\n params = default_params()\n\n model = cortex_model(**params)\n \n model.is_inh[:] = [0, 0, 1, 0]\n v_s_0 = np.array([10, 0.98, 0.95, 0.3], np.float32) * params['v_th']\n model.v_s[:] = v_s_0\n \n # step 0\n assert model.c_syn() == 0\n \n # step 1\n model.neurons_dynamics()\n assert model.c_syn() == 1 / 16\n \n # step 2\n model.neurons_dynamics()\n assert model.c_syn() == 1 / 4\n \n # step 3\n model.neurons_dynamics()\n assert model.c_syn() == 0\n \n \ndef test_restart():\n params = default_params()\n\n model = cortex_model(**params)\n \n model.is_inh[:] = [0, 0, 1, 0]\n v_s_0 = np.array([10, 0.98, 0.95, 0.3], np.float32) * params['v_th']\n model.v_s[:] = v_s_0\n \n model.neurons_dynamics()\n model.restart()\n \n assert np.all((0 <= model.v_s) & (model.v_s < params['v_th']))\n assert np.array_equal(model.neurons[model.is_fired], model.fired_neurons) \n assert np.array_equal(model.is_in_ref, model.is_fired)\n assert np.array_equiv(model.steps_after_spike, 0)\n \n fired_count = 0\n sample_size = 1000\n abv_th_per = np.random.rand()\n for _ in range(sample_size):\n model.restart(abv_th_per)\n fired_count += len(model.fired_neurons)\n p_fired = fired_count / sample_size / model.size\n # 6 sigma error margin\n error_margin = 6 * np.sqrt(abv_th_per * (1 - abv_th_per) / model.size / sample_size)\n assert (p_fired - error_margin) < abv_th_per < (p_fired + error_margin)\n "
] | [
[
"numpy.sqrt",
"numpy.array_equiv",
"numpy.zeros",
"numpy.float32",
"numpy.uint16",
"numpy.exp",
"numpy.arange",
"numpy.all",
"numpy.random.rand",
"numpy.array_equal",
"numpy.array",
"numpy.random.randint"
]
] |
andresgreen-byte/Laboratorio-1--Inversion-de-Capital | [
"8a4707301d19c3826c31026c4077930bcd6a8182"
] | [
"env/Lib/site-packages/pandas/tests/series/methods/test_isin.py"
] | [
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Series,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import PeriodArray\n\n\nclass TestSeriesIsIn:\n def test_isin(self):\n s = Series([\"A\", \"B\", \"C\", \"a\", \"B\", \"B\", \"A\", \"C\"])\n\n result = s.isin([\"A\", \"C\"])\n expected = Series([True, False, True, False, False, False, True, True])\n tm.assert_series_equal(result, expected)\n\n # GH#16012\n # This specific issue has to have a series over 1e6 in len, but the\n # comparison array (in_list) must be large enough so that numpy doesn't\n # do a manual masking trick that will avoid this issue altogether\n s = Series(list(\"abcdefghijk\" * 10 ** 5))\n # If numpy doesn't do the manual comparison/mask, these\n # unorderable mixed types are what cause the exception in numpy\n in_list = [-1, \"a\", \"b\", \"G\", \"Y\", \"Z\", \"E\", \"K\", \"E\", \"S\", \"I\", \"R\", \"R\"] * 6\n\n assert s.isin(in_list).sum() == 200000\n\n def test_isin_with_string_scalar(self):\n # GH#4763\n s = Series([\"A\", \"B\", \"C\", \"a\", \"B\", \"B\", \"A\", \"C\"])\n msg = (\n r\"only list-like objects are allowed to be passed to isin\\(\\), \"\n r\"you passed a \\[str\\]\"\n )\n with pytest.raises(TypeError, match=msg):\n s.isin(\"a\")\n\n s = Series([\"aaa\", \"b\", \"c\"])\n with pytest.raises(TypeError, match=msg):\n s.isin(\"aaa\")\n\n def test_isin_with_i8(self):\n # GH#5021\n\n expected = Series([True, True, False, False, False])\n expected2 = Series([False, True, False, False, False])\n\n # datetime64[ns]\n s = Series(date_range(\"jan-01-2013\", \"jan-05-2013\"))\n\n result = s.isin(s[0:2])\n tm.assert_series_equal(result, expected)\n\n result = s.isin(s[0:2].values)\n tm.assert_series_equal(result, expected)\n\n # fails on dtype conversion in the first place\n result = s.isin(np.asarray(s[0:2].values).astype(\"datetime64[D]\"))\n tm.assert_series_equal(result, expected)\n\n result = s.isin([s[1]])\n tm.assert_series_equal(result, expected2)\n\n result = s.isin([np.datetime64(s[1])])\n tm.assert_series_equal(result, expected2)\n\n result = s.isin(set(s[0:2]))\n tm.assert_series_equal(result, expected)\n\n # timedelta64[ns]\n s = Series(pd.to_timedelta(range(5), unit=\"d\"))\n result = s.isin(s[0:2])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"empty\", [[], Series(dtype=object), np.array([])])\n def test_isin_empty(self, empty):\n # see GH#16991\n s = Series([\"a\", \"b\"])\n expected = Series([False, False])\n\n result = s.isin(empty)\n tm.assert_series_equal(expected, result)\n\n def test_isin_read_only(self):\n # https://github.com/pandas-dev/pandas/issues/37174\n arr = np.array([1, 2, 3])\n arr.setflags(write=False)\n s = Series([1, 2, 3])\n result = s.isin(arr)\n expected = Series([True, True, True])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"dtype\", [object, None])\n def test_isin_dt64_values_vs_ints(self, dtype):\n # GH#36621 dont cast integers to datetimes for isin\n dti = date_range(\"2013-01-01\", \"2013-01-05\")\n ser = Series(dti)\n\n comps = np.asarray([1356998400000000000], dtype=dtype)\n\n res = dti.isin(comps)\n expected = np.array([False] * len(dti), dtype=bool)\n tm.assert_numpy_array_equal(res, expected)\n\n res = ser.isin(comps)\n tm.assert_series_equal(res, Series(expected))\n\n res = pd.core.algorithms.isin(ser, comps)\n tm.assert_numpy_array_equal(res, expected)\n\n def test_isin_tzawareness_mismatch(self):\n dti = date_range(\"2013-01-01\", \"2013-01-05\")\n ser = Series(dti)\n\n other = dti.tz_localize(\"UTC\")\n\n res = dti.isin(other)\n expected = np.array([False] * len(dti), dtype=bool)\n tm.assert_numpy_array_equal(res, expected)\n\n res = ser.isin(other)\n tm.assert_series_equal(res, Series(expected))\n\n res = pd.core.algorithms.isin(ser, other)\n tm.assert_numpy_array_equal(res, expected)\n\n def test_isin_period_freq_mismatch(self):\n dti = date_range(\"2013-01-01\", \"2013-01-05\")\n pi = dti.to_period(\"M\")\n ser = Series(pi)\n\n # We construct another PeriodIndex with the same i8 values\n # but different dtype\n dtype = dti.to_period(\"Y\").dtype\n other = PeriodArray._simple_new(pi.asi8, dtype=dtype)\n\n res = pi.isin(other)\n expected = np.array([False] * len(pi), dtype=bool)\n tm.assert_numpy_array_equal(res, expected)\n\n res = ser.isin(other)\n tm.assert_series_equal(res, Series(expected))\n\n res = pd.core.algorithms.isin(ser, other)\n tm.assert_numpy_array_equal(res, expected)\n\n @pytest.mark.parametrize(\"values\", [[-9.0, 0.0], [-9, 0]])\n def test_isin_float_in_int_series(self, values):\n # GH#19356 GH#21804\n ser = Series(values)\n result = ser.isin([-9, -0.5])\n expected = Series([True, False])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"dtype\", [\"boolean\", \"Int64\", \"Float64\"])\n @pytest.mark.parametrize(\n \"data,values,expected\",\n [\n ([0, 1, 0], [1], [False, True, False]),\n ([0, 1, 0], [1, pd.NA], [False, True, False]),\n ([0, pd.NA, 0], [1, 0], [True, False, True]),\n ([0, 1, pd.NA], [1, pd.NA], [False, True, True]),\n ([0, 1, pd.NA], [1, np.nan], [False, True, False]),\n ([0, pd.NA, pd.NA], [np.nan, pd.NaT, None], [False, False, False]),\n ],\n )\n def test_isin_masked_types(self, dtype, data, values, expected):\n # GH#42405\n ser = Series(data, dtype=dtype)\n\n result = ser.isin(values)\n expected = Series(expected, dtype=\"boolean\")\n\n tm.assert_series_equal(result, expected)\n\n\[email protected]\ndef test_isin_large_series_mixed_dtypes_and_nan():\n # https://github.com/pandas-dev/pandas/issues/37094\n # combination of object dtype for the values and > 1_000_000 elements\n ser = Series([1, 2, np.nan] * 1_000_000)\n result = ser.isin({\"foo\", \"bar\"})\n expected = Series([False] * 3 * 1_000_000)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"array,expected\",\n [\n (\n [0, 1j, 1j, 1, 1 + 1j, 1 + 2j, 1 + 1j],\n Series([False, True, True, False, True, True, True], dtype=bool),\n )\n ],\n)\ndef test_isin_complex_numbers(array, expected):\n # GH 17927\n result = Series(array).isin([1j, 1 + 1j, 1 + 2j])\n tm.assert_series_equal(result, expected)\n"
] | [
[
"pandas._testing.assert_numpy_array_equal",
"pandas.Series",
"pandas.date_range",
"pandas.core.arrays.PeriodArray._simple_new",
"numpy.asarray",
"pandas._testing.assert_series_equal",
"pandas.core.algorithms.isin",
"numpy.datetime64",
"numpy.array"
]
] |
Ynakatsuka/birdclef-2021 | [
"d7cf7b39e3164a75547ee50cc9a29bd5ed4c29bd"
] | [
"src/kvt/models/layers/pooling.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn.parameter import Parameter\n\nAdaptiveAvgPool2d = nn.AdaptiveAvgPool2d\n\n\nclass AdaptiveConcatPool2d(nn.Module):\n def __init__(self, sz=None):\n super().__init__()\n sz = sz or (1, 1)\n self.ap = nn.AdaptiveAvgPool2d(sz)\n self.mp = nn.AdaptiveMaxPool2d(sz)\n\n def forward(self, x):\n return torch.cat([self.mp(x), self.ap(x)], 1)\n\n\ndef gem(x, p=3, eps=1e-6):\n return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1.0 / p)\n\n\nclass GeM(nn.Module):\n def __init__(self, p=3, inference_p=3, eps=1e-6):\n super(GeM, self).__init__()\n self.p = Parameter(torch.ones(1) * p)\n self.inference_p = inference_p\n self.eps = eps\n\n def forward(self, x):\n if self.training:\n return gem(x, p=self.p, eps=self.eps)\n else:\n return gem(x, p=self.inference_p, eps=self.eps)\n\n def __repr__(self):\n return (\n self.__class__.__name__\n + \"(\"\n + \"p=\"\n + \"{:.4f}\".format(self.p.data.tolist()[0])\n + \", \"\n + \"eps=\"\n + str(self.eps)\n + \")\"\n )\n"
] | [
[
"torch.nn.AdaptiveAvgPool2d",
"torch.ones",
"torch.nn.AdaptiveMaxPool2d"
]
] |
FudanYuan/faultLocalization | [
"5133ee5af48f7d4c8ba05ef9a545139a38f417da"
] | [
"hotspot/code/localization.py"
] | [
"## coding: utf-8\nimport pandas as pd\nimport numpy as np\nimport time\nimport math\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nfrom package.utils import KPIPoint\nfrom package.utils import KPISet\nfrom package.utils import Transformer\nfrom package.HotSpot import HotSpot\n\ndef valid():\n #### 加载数据集\n # kSet_pred = KPISet({}, {})\n # kSet_pred.load('../result/metadata/KPISetValidPredict')\n # kSet_pred.test()\n\n # 使用前一周数据填充第二周数据\n # 读取源数据\n timestamp_start = 1535731200\n timestamp = 1536394500\n timestamp_interval = 5 * 60\n\n # 当前周数据\n file_path = '../2019AIOps_data_valid/'\n kSet = Transformer().transformKPIData2KPISet(file_path, timestamp, timestamp, timestamp_interval)\n # kSet.test()\n\n # 填充预测值\n leaf_true = []\n leaf_pred = []\n for leaf in kSet._KPIPoints[timestamp]._leaf:\n index = (timestamp - timestamp_start) / timestamp_interval\n ts = pd.read_csv('../result/leaves/result_arima/success/%s.csv' % ('&').join(leaf))\n predict = ts['pred'][index]\n kSet._KPIPoints[timestamp]._leaf[leaf][1] = predict\n leaf_true.append(kSet._KPIPoints[timestamp]._leaf[leaf][0])\n leaf_pred.append(predict)\n # print(('&').join(leaf), kSet._KPIPoints[timestamp]._leaf[leaf])\n\n # plt.figure(figsize=(40, 10))\n # plt.plot(leaf_true, label=\"true\", color='green')\n # plt.plot(leaf_pred, label=\"predicted\")\n # plt.legend()\n # plt.show()\n\n #### 读取异常时间戳\n outlier = pd.read_csv('../Anomalytime_data_valid.csv')\n timestamps = outlier['timestamp'].tolist()\n ps_threshold = 0.99\n ep_threshold = 0.01\n max_iter = 10\n\n res = {}\n res['timestamp'] = []\n res['detect_set'] = []\n sTime = time.time()\n for timestamp in tqdm(timestamps):\n ts = timestamp / 1000\n kPoint = kSet._KPIPoints[ts]\n layer_max = len(kPoint._attribute_names)\n hotSpot = HotSpot(kPoint, layer_max, ps_threshold, ep_threshold, max_iter)\n rootCauseSet = hotSpot.find_root_cause_set_revised()\n print(rootCauseSet[0][0])\n res['timestamp'].append(timestamp)\n res['detect_set'].append(list(rootCauseSet[0][0]))\n break\n\n eTime = time.time()\n print('runtime %fs' % (eTime - sTime))\n\n print(res)\n\n # 保存文件\n res = pd.DataFrame(res)\n res = res.sort_values(by='timestamp').reset_index(drop=True)\n res = res.merge(outlier, on='timestamp', how='left')\n res.to_csv('../result/root_cause_set_valid.csv', index=False)\n\n # 评估\n TP = 0\n FN = 0\n FP = 0\n for ts in res['timestamp'].tolist():\n root_cause = res[res['timestamp'] == ts]['real_set']\n root_cause_cal = res[res['timestamp'] == ts]['detect_set']\n tmp = 0\n for s1 in root_cause:\n for s2 in root_cause_cal:\n if len(s1) == len(s2) and len(set(s1).intersection(set(s2))) == len(s1):\n tmp += 1\n break\n TP += tmp\n FN += len(root_cause) - tmp\n FP += len(root_cause_cal) - tmp\n if TP == 0:\n TP += 1\n print(TP, FP, FN)\n Precision = TP / (TP + FP)\n Recall = TP / (TP + FN)\n FScore = (2 * Precision * Recall) / (Precision + Recall)\n print('F-score = %f' % FScore)\n\ndef valid2():\n #### 加载数据集\n # kSet_pred = KPISet({}, {})\n # kSet_pred.load('../result/metadata/KPISetValidPredict')\n # kSet_pred.test()\n\n # 使用前一周数据填充第二周数据\n # 读取源数据\n timestamp = 1536394500\n timestamp_interval = 5 * 60\n T = timestamp_interval * 288 * 7\n\n # 前一周数据\n file_path = '../2019AIOps_data/'\n kSet_train = Transformer().transformKPIData2KPISet(file_path, timestamp - T, timestamp - T, timestamp_interval)\n # kSet_train.test()\n\n # 当前周数据\n file_path = '../2019AIOps_data_valid/'\n kSet = Transformer().transformKPIData2KPISet(file_path, timestamp, timestamp, timestamp_interval)\n # kSet.test()\n\n # 填充预测值\n leaf_true = []\n leaf_pred = []\n for leaf in kSet._KPIPoints[timestamp]._leaf:\n predict = 0\n if leaf in kSet_train._KPIPoints[timestamp-T]._leaf:\n predict = kSet_train._KPIPoints[timestamp-T]._leaf[leaf][0]\n kSet._KPIPoints[timestamp]._leaf[leaf][1] = predict\n leaf_true.append(kSet._KPIPoints[timestamp]._leaf[leaf][0])\n leaf_pred.append(predict)\n # print(('&').join(leaf), kSet._KPIPoints[timestamp]._leaf[leaf])\n\n # plt.figure(figsize=(40, 10))\n # plt.plot(leaf_true, label=\"true\", color='green')\n # plt.plot(leaf_pred, label=\"predicted\")\n # plt.legend()\n # plt.show()\n\n #### 读取异常时间戳\n outlier = pd.read_csv('../Anomalytime_data_valid.csv')\n timestamps = outlier['timestamp'].tolist()\n ps_threshold = 0.99\n ep_threshold = 0.01\n max_iter = 10\n\n res = {}\n res['timestamp'] = []\n res['detect_set'] = []\n sTime = time.time()\n for timestamp in tqdm(timestamps):\n ts = timestamp / 1000\n kPoint = kSet._KPIPoints[ts]\n layer_max = len(kPoint._attribute_names)\n hotSpot = HotSpot(kPoint, layer_max, ps_threshold, ep_threshold, max_iter)\n rootCauseSet = hotSpot.find_root_cause_set_revised()\n print(rootCauseSet[0][0])\n res['timestamp'].append(timestamp)\n res['detect_set'].append(list(rootCauseSet[0][0]))\n break\n\n eTime = time.time()\n print('runtime %fs' % (eTime - sTime))\n\n print(res)\n\n # 保存文件\n res = pd.DataFrame(res)\n res = res.sort_values(by='timestamp').reset_index(drop=True)\n res = res.merge(outlier, on='timestamp', how='left')\n res.to_csv('../result/root_cause_set_valid.csv', index=False)\n\n # 评估\n TP = 0\n FN = 0\n FP = 0\n for ts in res['timestamp'].tolist():\n root_cause = res[res['timestamp'] == ts]['real_set']\n root_cause_cal = res[res['timestamp'] == ts]['detect_set']\n tmp = 0\n for s1 in root_cause:\n for s2 in root_cause_cal:\n if len(s1) == len(s2) and len(set(s1).intersection(set(s2))) == len(s1):\n tmp += 1\n break\n TP += tmp\n FN += len(root_cause) - tmp\n FP += len(root_cause_cal) - tmp\n if TP == 0:\n TP += 1\n print(TP, FP, FN)\n Precision = TP / (TP + FP)\n Recall = TP / (TP + FN)\n FScore = (2 * Precision * Recall) / (Precision + Recall)\n print('F-score = %f' % FScore)\n\ndef test():\n #### 加载数据集\n kSet_pred = KPISet({}, {})\n kSet_pred.load('../result/metadata/KPISetTestPredict2')\n # kSet_pred.test()\n #### 读取异常时间戳\n outlier = pd.read_csv('../Anomalytime_data_test1.csv')\n outlier = outlier['timestamp'].tolist()\n ps_threshold = 0.98\n ep_threshold = 0.01\n max_iter = 10\n\n res = {}\n res['timestamp'] = []\n res['set'] = []\n sTime = time.time()\n for timestamp in tqdm(outlier):\n ts = timestamp / 1000\n kPoint = kSet_pred._KPIPoints[ts]\n layer_max = len(kPoint._attribute_names)\n hotSpot = HotSpot(kPoint, layer_max, ps_threshold, ep_threshold, max_iter)\n rootCauseSet = hotSpot.find_root_cause_set_revised()\n res['timestamp'].append(timestamp)\n sets = []\n for ele in rootCauseSet[0][0]:\n sets.append(\"&\".join(ele))\n res['set'].append(';'.join(sets))\n break\n eTime = time.time()\n print('runtime %fs' % (eTime - sTime))\n res = pd.DataFrame(res)\n res.to_csv('../result/submit%s.csv' % time.strftime(\"%Y%m%d%H%M%S\", time.localtime(eTime)), index=False)\n\nif __name__ == \"__main__\":\n valid()\n # valid2()\n # test()"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
SerafinH/triplet_loss_kws | [
"a203f6063517fd5236c9df9ce28318fec7afb141"
] | [
"loss/triplet.py"
] | [
"import torch\nimport torch.nn.functional as F\nfrom nemo.backends.pytorch.nm import LossNM\nfrom nemo.core.neural_types import *\nfrom nemo.utils.decorators import add_port_docs\n\n\nclass OnlineTripletLoss(LossNM):\n \"\"\"\n Online Triplet loss\n Takes a batch of embeddings and corresponding labels.\n Triplets are generated using triplet_selector object that take embeddings and targets and return indices of\n triplets\n \"\"\"\n\n @property\n @add_port_docs()\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n \"embeds\": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),\n \"targets\": NeuralType('B', LabelsType()),\n }\n\n @property\n @add_port_docs()\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n loss:\n NeuralType(LossType)\n \"\"\"\n return {\"loss\": NeuralType(elements_type=LossType())}\n\n def __init__(self, margin, triplet_selector):\n super().__init__()\n self.margin = margin\n self.triplet_selector = triplet_selector\n\n def _loss(self, embeddings, target):\n embeddings = torch.flatten(embeddings, start_dim=-2)\n triplets = self.triplet_selector.get_triplets(embeddings, target)\n\n if embeddings.is_cuda:\n triplets = triplets.cuda()\n\n ap_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 1]]).pow(2).sum(1) # .pow(.5)\n an_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 2]]).pow(2).sum(1) # .pow(.5)\n losses = F.relu(ap_distances - an_distances + self.margin)\n\n return losses.mean()\n\n def _loss_function(self, **kwargs):\n return self._loss(*(kwargs.values()))\n"
] | [
[
"torch.flatten",
"torch.nn.functional.relu"
]
] |
jardinier/phlox | [
"f312569ec983b5f27c75846b34debc04fe7bdf98"
] | [
"PhloxAR/dc1394/dc_cam.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\nfrom __future__ import absolute_import, unicode_literals\nfrom ctypes import byref, POINTER, c_char, c_uint32, c_float, c_int\nfrom numpy import fromstring, ndarray\nfrom threading import Thread, Lock, Condition\nfrom Queue import Queue\n\nfrom .core import *\nfrom .mode import mode_map, create_mode\n\n\nclass DCError(Exception):\n \"\"\"\n Base class for exceptions.\n \"\"\"\n func = None\n args = None\n errs = None\n\n def __repr__(self):\n return \"DCError: {}{} -> {}{}\".format(self.func.__name__,\n self.args, self.errs,\n err_val[self.errs])\n\n\nclass DCCameraError(DCError, RuntimeError):\n pass\n\n\nclass DCLibrary(object):\n \"\"\"\n This wraps the dc1394 library object which is a nuisance to have around.\n This is bad design on behave of DC1394. Oh well... This object must stay\n valid until all cameras are closed. But then use it well: it not only\n opens the library, collects a reference to the library and the camera list.\n \"\"\"\n def __init__(self):\n self._dll = dll\n self._handle = dll.dc1394_new()\n\n def __del__(self):\n self.close()\n\n @property\n def handle(self):\n \"\"\"\n The handle to the library context.\n \"\"\"\n return self._handle\n\n @property\n def dll(self):\n return self._dll\n\n def close(self):\n \"\"\"\n Close the library permanently. All camera handles created with it\n are invalidated by this.\n \"\"\"\n if self._handle is not None:\n self._dll.dc1394_free(self._handle)\n\n self._handle = None\n\n def enumerate_cameras(self):\n \"\"\"\n Enumerate the cameras currently attached to the bus.\n :return: a list of dictionaries with the following keys per camera:\n - unit\n - guid\n - vendor\n - model\n \"\"\"\n l = POINTER(camera_list_t)()\n self._dll.dc1394_camera_enumerate(self.handle, byref(l))\n\n # cams -> clist renamed for using cam as a camera\n clist = []\n for i in range(l.contents.num):\n ids = l.contents.ids[i]\n # we can be nice to the users, providing some more\n # than mere GUID and unitIDs\n # also, if this fails, we have a problem.\n cam = self._dll.dc1394_camera_new(self.handle, ids.guid)\n\n # it seems not all cameras have these fields:\n vendor = cam.contents.vendor if cam.contents.vendor else 'unknown'\n model = cam.contents.model if cam.contents.model else 'unknown'\n\n clist.append({\n 'uint': ids.unit,\n 'guid': ids.guid,\n 'vendor': vendor,\n 'model': model\n })\n\n self._dll.dc1394_camera_free(cam)\n\n self._dll.dc1394_camera_free_list(l)\n\n return clist\n\n\nclass DCImage(ndarray):\n \"\"\"\n This class is the image returned by the camera. It is basically a\n numpy array with some additional information (like timestamps).\n It is not based on the video_frame structure of the dc1394, but\n rather augments the information from numpy through information\n of the acquisition of this image.\n \"\"\"\n # ROI position\n _position = None\n # Size of a data packet in bytes\n _packet_size = None\n # Number of packets per frame\n _packets_per_frame = None\n # IEEE bus time when the picture was acquired\n _timestamp = None\n # Number of frames in the ring buffer that are yet to be accessed by user\n _frames_behind = None\n # frame position in the ring buffer\n _id = None\n\n @property\n def position(self):\n \"\"\"\n ROI position (offset)\n \"\"\"\n return self._position\n\n @property\n def packet_size(self):\n \"\"\"\n The size of a data packet in bytes.\n \"\"\"\n return self._packet_size\n\n @property\n def packets_per_frame(self):\n \"\"\"\n Number of packets per frame.\n \"\"\"\n return self._packets_per_frame\n\n @property\n def timestamp(self):\n \"\"\"\n The IEEE Bus time when the picture was acquired (microseconds)\n \"\"\"\n return self._timestamp\n\n @property\n def frames_behind(self):\n \"\"\"\n The number of frames in the ring buffer that are yet to be accessed\n by the user\n \"\"\"\n return self._frames_behind\n\n @property\n def id(self):\n \"\"\"\n The frame position in the ring buffer.\n \"\"\"\n return self._id\n\n\nclass _DCCamAcquisitionThread(Thread):\n \"\"\"\n This class is created and launched whenever a camera is start.\n It continuously acquires the pictures from the camera and sets\n a condition to inform other threads of the arrival of a new\n picture.\n \"\"\"\n _cam = None\n _should_abort = None\n _last_frame = None\n _condition = None\n # a Lock object from the threading module\n _abort_lock = None\n\n def __init__(self, cam, condition):\n super(_DCCamAcquisitionThread, self).__init__()\n self._cam = cam\n self._should_abort = False\n self._last_frame = None\n self._condition = condition\n self._abort_lock = Lock()\n self.start()\n\n def abort(self):\n self._abort_lock.acquire()\n self._should_abort = True\n self._abort_lock.release()\n\n def run(self):\n \"\"\"\n Core function which contains the acquisition loop\n \"\"\"\n while True:\n self._abort_lock.acquire()\n sa = self._should_abort\n self._abort_lock.release()\n\n if sa:\n break\n\n if self._last_frame:\n self._cam.dll.dc1394_capture_enqueue(self._cam.cam,\n self._last_frame)\n\n frame = POINTER(video_frame_t)()\n self._cam.dll.dc1394_capture_dequeue(\n self._cam.cam, capture_policies['CAPTURE_POLICY_WAIT'],\n byref(frame))\n\n dtype = c_char * frame.contents.image_bytes\n buf = dtype.from_address(frame.contents.image)\n\n self._last_frame = frame\n self._condition.acquire()\n\n # generate an Image class from the buffer\n img = fromstring(buf, dtype=self._cam.mode.dtype).reshape(\n self._cam.mode.shape\n ).view(DCImage)\n\n img._position = frame.contents.position\n img._packet_size = frame.contents.packet_size,\n img._packets_per_frame = frame.contents.packets_per_frame\n img._timestamp = frame.contents.timestamp\n img._frames_behind = frame.contents.frames_behind\n img._id = frame.contents.id\n self._cam._current_img = img\n\n # is the camera streaming to a queue?\n if self._cam.queue:\n # will throw an exception if you're to slow while processing\n self._cam.queue.put_nowait(img)\n\n self._condition.notifyAll()\n self._condition.release()\n\n # return the last frame\n if self._last_frame:\n self._cam.dll.dc1394_capture_enqueue(self._cam.cam,\n self._last_frame)\n self._last_frame = None\n\n\nclass DCCameraProperty(object):\n \"\"\"\n This class implements a simple Property of the camera.\n \"\"\"\n def __init__(self, cam, name, cid, absolute_capable):\n self._id = cid\n self._name = name\n self._absolute_capable = absolute_capable\n self._dll = cam.dll\n self._cam = cam\n\n @property\n def val(self):\n \"\"\"\n The current value of this property\n \"\"\"\n if self._name == \"white_balance\":\n # white has its own call since it returns 2 values\n blue = c_uint32()\n red = c_uint32()\n self._dll.dc1394_feature_whitebalance_get_value(\n self._cam.cam, byref(blue), byref(red)\n )\n return blue.value, red.value\n\n if self._absolute_capable:\n val = c_float()\n self._dll.dc1394_feature_get_absolute_value(\n self._cam.cam, self._id, byref(val)\n )\n\n if self._name == \"shutter\":\n # We want shutter in ms -> if it is absolute capable.\n val.value *= 1000.\n else:\n val = c_uint32()\n self._dll.dc1394_feature_get_value(\n self._cam.cam, self._id, byref(val)\n )\n return val.value\n\n @val.setter\n def val(self, value):\n if self._name == \"white_balance\":\n # white has its own call since it returns 2 values\n blue, red = value\n self._dll.dc1394_feature_whitebalance_set_value(\n self._cam.cam, blue, red\n )\n else:\n if self._absolute_capable:\n val = float(value)\n # We want shutter in ms\n if self._name == \"shutter\":\n val /= 1000.\n self._dll.dc1394_feature_set_absolute_value(\n self._cam.cam, self._id, val\n )\n else:\n val = int(value)\n self._dll.dc1394_feature_set_value(\n self._cam.cam, self._id, val\n )\n\n @property\n def range(self):\n \"\"\"\n The RO foo property.\n \"\"\"\n if self._absolute_capable:\n minval, maxval = c_float(), c_float()\n self._dll.dc1394_feature_get_absolute_boundaries(self._cam.cam,\n self._id,\n byref(minval),\n byref(maxval))\n # We want shutter in ms\n if self._name == \"shutter\":\n minval.value *= 1000\n maxval.value *= 1000\n else:\n minval, maxval = c_uint32(), c_uint32()\n self._dll.dc1394_feature_get_boundaries(\n self._cam.cam, self._id, byref(minval), byref(maxval)\n )\n return minval.value, maxval.value\n\n @property\n def can_be_disabled(self):\n \"\"\"\n Can this property be disabled\n \"\"\"\n k = bool_t()\n self._dll.dc1394_feature_is_switchable(self._cam.cam, self._id,\n byref(k))\n return bool(k.value)\n\n @property\n def on(self):\n \"\"\"\n Toggle this features on and off;\n For the trigger this means the external trigger ON/OFF\n \"\"\"\n\n k = bool_t()\n if self._name.lower() == \"trigger\":\n self._dll.dc1394_external_trigger_get_power(\n self._cam.cam, byref(k)\n )\n else:\n self._dll.dc1394_feature_get_power(self._cam.cam, self._id,\n byref(k))\n return bool(k.value)\n\n @on.setter\n def on(self, value):\n k = bool(value)\n if self._name.lower() == \"trigger\":\n self._dll.dc1394_external_trigger_set_power(self._cam.cam, k)\n else:\n self._dll.dc1394_feature_set_power(self._cam.cam, self._id, k)\n\n @property\n def pos_modes(self):\n \"\"\"\n The possible control modes for this features (auto,manual,...)\n \"\"\"\n if self._name.lower() == \"trigger\":\n # we need a trick:\n finfo = feature_info_t()\n finfo.id = self._id\n self._dll.dc1394_feature_get(self._cam.cam, byref(finfo))\n modes = finfo.trigger_modes\n\n return [trigger_modes[modes.modes[i]] for i in range(modes.num)]\n modes = feature_modes_t()\n self._dll.dc1394_feature_get_modes(self._cam.cam, self._id,\n byref(modes))\n return [feature_modes[modes.modes[i]] for i in range(modes.num)]\n\n @property\n def mode(self):\n \"\"\"The current control mode this features is running in.\n For the trigger it shows the trigger modes (from the dc1394\n website):\n mode 0: Exposure starts with a falling edge and stops when\n the the exposure specified by the SHUTTER features\n is elapsed.\n mode 1: Exposure starts with a falling edge and stops with\n the next rising edge.\n mode 2: The camera starts the exposure at the first falling\n edge and stops the integration at the nth falling\n edge. The parameter n is a parameter of the trigger\n that can be set with camera.trigger.val parameter.\n mode 3: This is an internal trigger mode. The trigger is\n generated every n*(period of fastest framerate).\n Once again, the parameter n can be set with\n camera.trigger.val.\n mode 4: A multiple exposure mode. N exposures are performed\n each time a falling edge is observed on the trigger\n signal. Each exposure is as long as defined by the\n SHUTTER (camera.shutter) features.\n mode 5: Another multiple exposure mode. Same as Mode 4\n except that the exposure is is defined by the\n length of the trigger pulse instead of the SHUTTER\n features.\n mode 14 and 15: vendor specified trigger mode.\n \"\"\"\n if self._name.lower() == \"trigger\":\n mode = trigger_mode_t()\n self._dll.dc1394_external_trigger_get_mode(self._cam.cam,\n byref(mode))\n return trigger_modes[mode.value]\n\n mode = feature_mode_t()\n self._dll.dc1394_feature_get_mode(self._cam.cam, self._id, byref(mode))\n return feature_modes[mode.value]\n\n @mode.setter\n def mode(self, value):\n if value in self.pos_modes:\n if self._name.lower() == \"trigger\":\n key = trigger_modes[value]\n self._dll.dc1394_external_trigger_set_mode(self._cam.cam, key)\n else:\n key = feature_modes[value]\n self._dll.dc1394_feature_set_mode(self._cam.cam, self._id, key)\n else:\n print(\"Invalid %s mode: %s\" % (self._name, value))\n\n def polarity_capable(self):\n \"\"\"\n Is this features polarity capable? This is valid for the trigger\n only.\n \"\"\"\n finfo = feature_info_t()\n finfo.id = self._id\n self._dll.dc1394_feature_get(self._cam.cam, byref(finfo))\n # polarity_capable is an bool_t = int field:\n return bool(finfo.polarity_capable)\n\n @property\n def polarity(self):\n \"\"\"\n The polarity of the external trigger. If the trigger\n has polarity (camera.trigger.polarity_capable == True),\n then it has two possible values. These are returned by:\n camera.trigger.pos_polarities.\n \"\"\"\n pol = trigger_polarity_t()\n self._dll.dc1394_external_trigger_get_polarity(self._cam.cam,\n byref(pol))\n if pol.value in trigger_polarities:\n return trigger_polarities[pol.value]\n else:\n return pol.value\n\n @polarity.setter\n def polarity(self, pol):\n if self.polarity_capable:\n if pol in trigger_polarities:\n key = trigger_polarities[pol]\n self._dll.dc1394_external_trigger_set_polarity(\n self._cam.cam, key\n )\n else:\n print(\"Invalid external trigger polarity: %s\" % pol)\n\n def pos_polarities(self):\n return trigger_polarities.keys()\n\n @property\n def source(self):\n \"\"\"\n Actual source of the external trigger\n \"\"\"\n source = trigger_source_t()\n self._dll.dc1394_external_trigger_get_source(self._cam.cam,\n byref(source))\n return trigger_sources[source.value]\n\n @source.setter\n def source(self, src):\n if src in trigger_sources:\n key = trigger_sources[src]\n self._dll.dc1394_external_trigger_set_source(self._cam.cam, key)\n else:\n print(\"Invalid external trigger source: %s\" % src)\n\n def pos_sources(self):\n \"\"\"\n List the possible external trigger sources of the camera\n \"\"\"\n src = trigger_sources_t()\n self._dll.dc1394_external_trigger_get_supported_sources(self._cam.cam,\n byref(src))\n return [trigger_sources[src.sources[i]] for i in range(src.num)]\n\n @property\n def software_trigger(self):\n \"\"\"\n Set and get the software trigger (active or not).\n \"\"\"\n res = switch_t()\n self._dll.dc1394_software_trigger_get_power(self._cam.cam, byref(res))\n return bool(res.value)\n\n @software_trigger.setter\n def software_trigger(self, value):\n k = bool(value)\n self._dll.dc1394_software_trigger_set_power(self._cam.cam, k)\n\n\nclass DCCamera(object):\n \"\"\"\n This class represents a IEEE1394 Camera on the BUS. It currently\n supports all features of the cameras except white balancing.\n You can pass all features the camera supports as additional arguments\n to this classes constructor. For example: shutter = 7.4, gain = 8\n The cameras pictures can be accessed in two ways. Either way, use\n start() to begin the capture. If you are always interested in the\n latest picture use the new_image Condition, wait for it, then use\n cam.current_image for your processing. This mode is called interactive\n because it is used in live displays. An alternative way is to use\n shot() which guarantees to deliver all pictures the camera acquires\n in the correct order. Note though that you have to process these\n pictures with a certain speed, otherwise the caching queue will\n overrun. This mode is called serial. Note that you can theoretically\n also use the first acquisition mode here, but this seldom makes\n sense since you need a processing of the pictures anyway.\n :arg lib: the library to open the camera for\n :type lib: :class:`~DCLibrary`\n :arg guid: GUID of this camera. Can be a hexstring or the integer\n value\n :arg mode: acquisition mode, e.g. (640, 480, \"Y8\"). If you pass None,\n the current mode is kept. One can also use a string, such\n as 'FORMAT7_0'\n :type mode: :class:`tuple`, :class:`string` or :const:`None`\n :arg framerate: desired framerate, if you pass None, the current camera\n setting is kept\n :type framerate: :class:`float` or :const:`None`\n :arg isospeed: desired isospeed, you might want to use 800 if your bus\n supports it\n :type isospeed: :class:`int`\n \"\"\"\n _lib = None\n _guid = None\n _cam = None\n _dll = None\n _handle = None\n _mode = None\n _running = None\n _running_lock = None\n _new_image = None\n _current_img = None\n _worker = None\n _queue = None\n _features = None\n _operation_mode = None\n _all_modes = None\n _all_features = None\n _last_frame = None\n _frames_behind = None\n _abort_lock = None\n _absolute_capable = None\n _framerate = None\n\n def __init__(self, lib, guid, mode=None, framerate=None,\n isospeed=400, **feat):\n self._lib = lib\n\n if isinstance(guid, str):\n guid = int(guid, 16)\n\n self._guid = guid\n self._cam = None\n # it is the same as _dll anyway...\n self._dll = lib.dll\n\n self._running = False\n self._running_lock = Lock()\n\n # For image acquisition\n self._new_image = Condition()\n self._current_img = None\n self._worker = None\n\n self._framerate = framerate\n\n self.open()\n\n try:\n # Gather some information about this camera\n # Will also set the properties accordingly\n self._all_features = self._get_all_features()\n self._all_modes = self._get_supported_modes()\n\n # Set all features to manual (if possible)\n for f in self._all_features:\n if 'manual' in self.__getattribute__(f).pos_modes:\n self.__getattribute__(f).mode = 'manual'\n\n # Set acquisition mode and framerate, if no mode is requested,\n # we set a standard mode.\n self.mode = tuple(mode) if mode is not None else self.modes[0]\n\n # set the framerate:\n self.fps = self._mode.framerates[-1]\n\n try:\n self._framerate.mode = \"auto\"\n except AttributeError:\n pass # Feature not around, so what?\n\n # Set isospeed\n if isospeed:\n # If the speed is >= 800, set other operation mode\n # this is done automatically by the isospeed setting\n # self._operation_mode = \"legacy\" if isospeed < 800 else \"1394b\"\n self.isospeed = isospeed\n\n # Set other parameters\n for n, v in feat.items():\n if v is None:\n continue\n self.__getattribute__(n).val = v\n except DCCameraError:\n self.close()\n raise\n\n def __del__(self):\n self.close()\n\n def start(self, bufsize=4, interactive=False):\n \"\"\"\n Start the camera in free running acquisition\n bufsize - how many DMA buffers should be used? If this value is\n high, the lag between your currently processed picture\n and reality might be higher but your risk to miss a frame\n is also much lower.\n interactive - If this is true, shot() is not supported and no queue\n overrun can occur\n \"\"\"\n if self.running:\n return\n\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n self._dll.dc1394_capture_setup(self._cam, bufsize,\n capture_flags[\"CAPTURE_FLAGS_DEFAULT\"])\n\n # Start the acquisition\n self._dll.dc1394_video_set_transmission(self._cam, 1)\n\n self._queue = None if interactive else Queue(1000)\n\n # Now, start the Worker thread\n self._worker = _DCCamAcquisitionThread(self, self._new_image)\n\n self._running_lock.acquire()\n self._running = True\n self._running_lock.release()\n\n def stop(self):\n \"\"\"Stop the camera and return all frames to the driver\"\"\"\n\n if not self.running:\n return\n\n assert self._cam # Otherwise it couldn't be running\n self._worker.abort()\n self._worker.join()\n\n self._queue = None\n\n # stop the camera:\n self._dll.dc1394_capture_stop(self._cam)\n self._dll.dc1394_video_set_transmission(self._cam, 0)\n\n # stop the thread:\n self._running_lock.acquire()\n self._running = False\n self._running_lock.release()\n\n def reset_bus(self):\n \"\"\"\n This function resets the bus the camera is attached to. Note that\n this means that all cameras have to re-enumerate and will drop frames.\n So only use this if you know what you are doing.\n Note that the camera the camera is closed after this and it is not\n guaranteed that you can reopen it with :method:`open` again. To be sure,\n you have to recreate a new Camera object.\n \"\"\"\n if self.running:\n self.stop()\n\n self._dll.dc1394_reset_bus(self._cam)\n self.close()\n\n # This is needed so the generation is updated on Linux\n self._lib.enumerate_cameras()\n\n def shot(self):\n \"\"\"\n If the camera is running, this will acquire one frame from it and\n return it as a Image (numpy array + some informations).The memory\n is not copied, therefore you should not write on the array.\n Note that acquisition is always running in the background. This\n function alone is guaranteed to return all frames in running order.\n Use this function for your image processing, use cam.current_image\n for visualisation.\n \"\"\"\n if not self.running:\n raise DCCameraError(\"Camera is not running!\")\n if not self._queue:\n raise DCCameraError(\"Camera is running in interactive mode!\")\n\n return self._queue.get()\n\n def open(self):\n \"\"\"\n Open the camera\n \"\"\"\n self._cam = self._dll.dc1394_camera_new(self._lib.handle, self._guid)\n if not self._cam:\n raise DCCameraError(\"Couldn't access camera!\")\n\n def close(self):\n \"\"\"Close the camera. Stops it, if it was running\"\"\"\n if self.running:\n self.stop()\n\n if self._cam:\n self._dll.dc1394_camera_free(self._cam)\n self._cam = None\n\n # information gathering functions\n def _get_supported_modes(self):\n \"\"\"\n Get all the supported video modes of the camera. This calls the\n builtin dc1394 function and converts the returned codes to a\n readable list. Any element of this list can be used to set a video\n mode of the camera.\n Parameters: None\n Returns: list of available video modes\n \"\"\"\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n modes = video_modes_t()\n\n self._dll.dc1394_video_get_supported_modes(self._cam, byref(modes))\n return [mode_map[i](self._cam, i) for i in modes.modes[:modes.num]]\n\n def _get_all_features(self):\n \"\"\"\n Use a built in dc1394 function to read out all available features\n of the given camera.\n All features, which are capable of absolute values, are set to\n absolute value mode.\n Parameters: None Return value: fills up and returns the self._features\n list\n \"\"\"\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n fs = featureset_t()\n self._dll.dc1394_feature_get_all(self._cam, byref(fs))\n self._features = []\n\n # We set all features that are capable of it to absolute values\n for i in range(FEATURE_NUM):\n s = fs.feature[i]\n if s.available:\n if s.absolute_capable:\n self._dll.dc1394_feature_set_absolute_control(self._cam,\n s.id, 1)\n name = features[s.id]\n self._features.append(name)\n self.__dict__[name] = DCCameraProperty(self, name, s.id,\n s.absolute_capable)\n\n return self._features\n\n def get_register(self, offset):\n \"\"\"\n Get the control register value of the camera a the given offset\n \"\"\"\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n val = c_uint32()\n self._dll.dc1394_get_control_registers(self._cam, offset, byref(val), 1)\n return val.value\n\n def set_register(self, offset, value):\n \"\"\"\n Set the control register value of the camera at the given offset to\n the given value\n \"\"\"\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n val = c_uint32(value)\n self._dll.dc1394_set_control_registers(self._cam, offset, byref(val), 1)\n\n @property\n def cam(self):\n \"\"\"\n Return dc1394 new camera\n \"\"\"\n return self._cam\n\n @property\n def dll(self):\n return self._dll\n\n @property\n def queue(self):\n return self._queue\n\n @property\n def broadcast(self):\n \"\"\"\n This sets if the camera tries to synchronize with other cameras on\n the bus.\n Note: that behaviour might be strange if one camera tries to\n broadcast and another not.\n Note 2: that this features is currently only supported under linux\n and I have not seen it working yet though I tried it with\n cameras that should support it. So use on your own risk!\n \"\"\"\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n k = bool_t()\n self._dll.dc1394_camera_get_broadcast(self._cam, byref(k))\n if k.value == 1:\n return True\n else:\n return False\n\n @broadcast.setter\n def broadcast(self, value):\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n use = 1 if value else 0\n self._dll.dc1394_camera_set_broadcast(self._cam, use)\n\n @property\n def current_image(self):\n \"\"\"\n Thread safe access to the current image of the camera\n \"\"\"\n # We do proper locking\n self._new_image.acquire()\n self._new_image.wait()\n i = self._current_img\n self._new_image.release()\n return i\n\n @property\n def new_image(self):\n \"\"\"\n The Condition to wait for when you want a new Image\n \"\"\"\n return self._new_image\n\n @property\n def running(self):\n \"\"\"\n This is a thread safe propertie which can check\n if the camera is (still) running\n \"\"\"\n self._running_lock.acquire()\n rv = self._running\n self._running_lock.release()\n return rv\n\n @property\n def model(self):\n \"\"\"\n The name of this camera (string)\n \"\"\"\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n return self._cam.contents.model\n\n @property\n def guid(self):\n \"\"\"\n The Guid of this camera as string\n \"\"\"\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n return hex(self._cam.contents.guid)[2:-1]\n\n @property\n def vendor(self):\n \"\"\"\n The vendor of this camera (string)\n \"\"\"\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n return self._cam.contents.vendor\n\n @property\n def mode(self):\n \"\"\"\n The current video mode of the camera.\n The video modes are what let you choose the image size and _color\n format. Two special format classes exist: the :class:`Exif`\n mode (which is actually not supported by any known camera)\n and :class:`Format7` which is the scalable image format.\n Format7 allows you to change the image size, framerate, _color\n coding and crop region.\n Important note: your camera will not support all the video modes\n but will only supports a more or less limited subset of them.\n Use :attr:`modes` to obtain a list of valid modes for this camera.\n This property can be written as either a string describing a simple\n mode: \"640x480_Y8\", as a tuple (640, 480, \"Y8\") or as a Mode class.\n If you want to use Format7 use the Format7 class.\n \"\"\"\n # vmod = video_mode_t()\n # self._dll.dc1394_video_get_mode(self._cam, byref(vmod))\n return self._mode\n\n @mode.setter\n def mode(self, mode):\n if isinstance(mode, (tuple, str)):\n try:\n mode = create_mode(self._cam, mode)\n except KeyError:\n raise DCCameraError(\"Invalid mode for this camera!\")\n self._mode = mode\n self._dll.dc1394_video_set_mode(self._cam, mode.mode_id)\n\n @property\n def fps(self):\n \"\"\"\n The framerate belonging to the current camera mode.\n For non-scalable video formats (not :class:`Format7`) there is a\n set of standard frame rates one can choose from. A list\n of all the framerates supported by your camera for a specific\n video mode can be obtained from :attr:`Mode.rates`.\n .. note::\n You may also be able to set the framerate with the\n :attr:`framerate` features if present.\n .. note::\n Framerates are used with fixed-size image formats (Format_0\n to Format_2). In :class:`Format7` modes the camera can tell\n an actual value, but one can not set it. Unfortunately the\n returned framerate may have no sense at all. If you use\n Format_7 you should set the framerate by adjusting the number\n of bytes per packet (:attr:`Format7.packet_size`) and/or the\n shutter time.\n \"\"\"\n ft = framerate_t()\n self._dll.dc1394_video_get_framerate(self._cam, byref(ft))\n return framerates[ft.value]\n\n @fps.setter\n def fps(self, framerate):\n wanted_frate = framerates[framerate]\n self._dll.dc1394_video_set_framerate(self._cam, wanted_frate)\n\n @property\n def isospeed(self):\n \"\"\"\n The isospeed of the camera.\n If queried, returns the actual isospeed value.\n If set, it tries setting the speed.\n One can get the actual set value of the camera or set from:\n 100, 200, 400, 800, 1600, 3200 if the camera supports them.\n Above 400 the 1394b high speed mode has to be available\n (the function tries to set it).\n \"\"\"\n sp = iso_speed_t()\n self._dll.dc1394_video_get_iso_speed(self._cam, byref(sp))\n return iso_speeds[sp.value]\n\n @isospeed.setter\n def isospeed(self, speed):\n if speed in iso_speeds:\n try:\n self._operation_mode = 'legacy' if speed < 800 else '1394b'\n except RuntimeError:\n raise DCCameraError(\n \"1394b mode is not supported by hardware, but needed!\"\n )\n else:\n sp = iso_speeds[speed]\n self._dll.dc1394_video_set_iso_speed(self._cam, sp)\n else:\n raise DCCameraError(\"Invalid isospeed: %s\" % speed)\n\n @property\n def operation_mode(self):\n \"\"\"\n This can toggle the camera mode into B mode (high speeds). This is\n a private property because you definitively do not want to change\n this as a user.\n \"\"\"\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n k = c_int()\n self._dll.dc1394_video_get_operation_mode(self._cam, byref(k))\n if k.value == 480:\n return \"legacy\"\n else:\n return \"1394b\"\n\n @operation_mode.setter\n def operation_mode(self, value):\n if not self._cam:\n raise DCCameraError(\"The camera is not opened!\")\n\n use = 480 if value == \"legacy\" else 481\n self._dll.dc1394_video_set_operation_mode(self._cam, use)\n\n @property\n def modes(self):\n \"\"\"\n Return all supported modes for this camera\n \"\"\"\n return self._all_modes\n\n @property\n def features(self):\n \"\"\"\n Return all features of this camera. You can use __getattr__ to\n directly access them then.\n \"\"\"\n return self._all_features\n\n\nclass SynchronizedCams(object):\n \"\"\"\n This class synchronizes two (not more!) cameras by droping frames\n from one until the timestamps of the acquired pictures are in sync.\n Make sure that the cameras are in the same mode (framerate, shutter)\n \"\"\"\n _cam0 = None\n _cam1 = None\n\n def __init__(self, cam0, cam1):\n \"\"\"\n Assumes point gray cameras which can do auto sync\n \"\"\"\n self._cam0 = cam0\n self._cam1 = cam1\n\n def close(self):\n \"\"\"\n Close both cameras.\n \"\"\"\n self._cam0.close()\n self._cam1.close()\n\n @property\n def cams(self):\n return self._cam0, self._cam1\n\n @property\n def cam0(self):\n return self._cam0\n\n @property\n def cam1(self):\n return self._cam1\n\n def start(self, buffers=4):\n self._cam0.start(buffers)\n self._cam1.start(buffers)\n self.sync()\n\n def stop(self):\n self._cam0.stop()\n self._cam1.stop()\n\n def shot(self):\n \"\"\"\n This function acquires two synchronized pictures from\n the cameras. Use this if you need pictures which were\n acquired around the same time. Do not use the cams individual shot\n functions. If you need a current image you can use cam.current_image\n at all times. You can also wait for the Condition cam.new_image\n and then use cam.current_image.\n note that the user has to check for themselves if the cameras\n are out of sync and must make sure they get back in sync.\n \"\"\"\n i1 = self._cam0.shot()\n i2 = self._cam1.shot()\n\n return i1, i2\n\n def sync(self):\n \"\"\"\n Try to sync the two cameras to each other. This will only work if both\n cameras synchronize on the bus time.\n \"\"\"\n ldiff = 100000000\n while True:\n t1 = self._cam0.shot().timestamp\n t2 = self._cam1.shot().timestamp\n\n diff = t1 - t2\n\n if abs(diff) < 500:\n break\n\n if diff < 0:\n self._cam0.shot()\n else:\n self._cam1.shot()\n\n ldiff = diff\n"
] | [
[
"numpy.fromstring"
]
] |
cdrhim/KoELECTRA | [
"19fd5f2b698b297a8cf1e2c0c3995c93509298f3"
] | [
"pretrain/build_pretraining_dataset.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Writes out text data as tfrecords that ELECTRA can be pre-trained on.\"\"\"\n\nimport argparse\nimport multiprocessing\nimport os\nimport random\nimport time\nimport tensorflow.compat.v1 as tf\n\nfrom model import tokenization\nfrom util import utils\n\n\ndef create_int_feature(values):\n feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n\nclass ExampleBuilder(object):\n \"\"\"Given a stream of input text, creates pretraining examples.\"\"\"\n\n def __init__(self, tokenizer, max_length):\n self._tokenizer = tokenizer\n self._current_sentences = []\n self._current_length = 0\n self._max_length = max_length\n self._target_length = max_length\n\n def add_line(self, line):\n \"\"\"Adds a line of text to the current example being built.\"\"\"\n line = line.strip().replace(\"\\n\", \" \")\n if (not line) and self._current_length != 0: # empty lines separate docs\n return self._create_example()\n bert_tokens = self._tokenizer.tokenize(line)\n bert_tokids = self._tokenizer.convert_tokens_to_ids(bert_tokens)\n self._current_sentences.append(bert_tokids)\n self._current_length += len(bert_tokids)\n if self._current_length >= self._target_length: # If cur_len is longer than max_len\n return self._create_example()\n return None\n\n def _create_example(self):\n \"\"\"Creates a pre-training example from the current list of sentences.\"\"\"\n # small chance to only have one segment as in classification tasks\n if random.random() < 0.1:\n first_segment_target_length = 100000\n else:\n # -3 due to not yet having [CLS]/[SEP] tokens in the input text\n first_segment_target_length = (self._target_length - 3) // 2\n\n first_segment = []\n second_segment = []\n for sentence in self._current_sentences:\n # the sentence goes to the first segment if (1) the first segment is\n # empty, (2) the sentence doesn't put the first segment over length or\n # (3) 50% of the time when it does put the first segment over length\n if (len(first_segment) == 0 or\n len(first_segment) + len(sentence) < first_segment_target_length or\n (len(second_segment) == 0 and\n len(first_segment) < first_segment_target_length and\n random.random() < 0.5)):\n first_segment += sentence\n else:\n second_segment += sentence\n\n # trim to max_length while accounting for not-yet-added [CLS]/[SEP] tokens\n first_segment = first_segment[:self._max_length - 2]\n second_segment = second_segment[:max(0, self._max_length -\n len(first_segment) - 3)]\n\n # prepare to start building the next example\n self._current_sentences = []\n self._current_length = 0\n # small chance for random-length instead of max_length-length example\n if random.random() < 0.05:\n self._target_length = random.randint(5, self._max_length)\n else:\n self._target_length = self._max_length\n\n return self._make_tf_example(first_segment, second_segment)\n\n def _make_tf_example(self, first_segment, second_segment):\n \"\"\"Converts two \"segments\" of text into a tf.train.Example.\"\"\"\n vocab = self._tokenizer.vocab\n input_ids = [vocab[\"[CLS]\"]] + first_segment + [vocab[\"[SEP]\"]]\n segment_ids = [0] * len(input_ids)\n if second_segment:\n input_ids += second_segment + [vocab[\"[SEP]\"]]\n segment_ids += [1] * (len(second_segment) + 1)\n input_mask = [1] * len(input_ids)\n input_ids += [0] * (self._max_length - len(input_ids))\n input_mask += [0] * (self._max_length - len(input_mask))\n segment_ids += [0] * (self._max_length - len(segment_ids))\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n \"input_ids\": create_int_feature(input_ids),\n \"input_mask\": create_int_feature(input_mask),\n \"segment_ids\": create_int_feature(segment_ids)\n }))\n return tf_example\n\n\nclass ExampleWriter(object):\n \"\"\"Writes pre-training examples to disk.\"\"\"\n\n def __init__(self, job_id, vocab_file, output_dir, max_seq_length,\n num_jobs, blanks_separate_docs, do_lower_case,\n num_out_files=1000):\n self._blanks_separate_docs = blanks_separate_docs\n tokenizer = tokenization.FullTokenizer(\n vocab_file=vocab_file,\n do_lower_case=do_lower_case)\n self._example_builder = ExampleBuilder(tokenizer, max_seq_length)\n self._writers = []\n for i in range(num_out_files):\n if i % num_jobs == job_id:\n output_fname = os.path.join(\n output_dir, \"pretrain_data.tfrecord-{:}-of-{:}\".format(\n i, num_out_files))\n self._writers.append(tf.io.TFRecordWriter(output_fname))\n self.n_written = 0\n\n def write_examples(self, input_file):\n \"\"\"Writes out examples from the provided input file.\"\"\"\n with tf.io.gfile.GFile(input_file) as f:\n for line in f:\n line = line.strip()\n if line or self._blanks_separate_docs:\n example = self._example_builder.add_line(line)\n if example:\n self._writers[self.n_written % len(self._writers)].write(\n example.SerializeToString())\n self.n_written += 1\n example = self._example_builder.add_line(\"\")\n if example:\n self._writers[self.n_written % len(self._writers)].write(\n example.SerializeToString())\n self.n_written += 1\n\n def finish(self):\n for writer in self._writers:\n writer.close()\n\n\ndef write_examples(job_id, args):\n \"\"\"A single process creating and writing out pre-processed examples.\"\"\"\n\n def log(*args):\n msg = \" \".join(map(str, args))\n print(\"Job {}:\".format(job_id), msg)\n\n log(\"Creating example writer\")\n example_writer = ExampleWriter(\n job_id=job_id,\n vocab_file=args.vocab_file,\n output_dir=args.output_dir,\n max_seq_length=args.max_seq_length,\n num_jobs=args.num_processes,\n blanks_separate_docs=args.blanks_separate_docs,\n do_lower_case=args.do_lower_case\n )\n log(\"Writing tf examples\")\n fnames = sorted(tf.io.gfile.listdir(args.corpus_dir))\n fnames = [f for (i, f) in enumerate(fnames)\n if i % args.num_processes == job_id]\n random.shuffle(fnames)\n start_time = time.time()\n for file_no, fname in enumerate(fnames):\n if file_no > 0:\n elapsed = time.time() - start_time\n log(\"processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, \"\n \"{:} examples written\".format(\n file_no, len(fnames), 100.0 * file_no / len(fnames), int(elapsed),\n int((len(fnames) - file_no) / (file_no / elapsed)),\n example_writer.n_written))\n example_writer.write_examples(os.path.join(args.corpus_dir, fname))\n example_writer.finish()\n log(\"Done!\")\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"--corpus-dir\", required=True,\n help=\"Location of pre-training text files.\")\n parser.add_argument(\"--vocab-file\", required=True,\n help=\"Location of vocabulary file.\")\n parser.add_argument(\"--output-dir\", required=True,\n help=\"Where to write out the tfrecords.\")\n parser.add_argument(\"--max-seq-length\", default=512, type=int,\n help=\"Number of tokens per example.\")\n parser.add_argument(\"--num-processes\", default=4, type=int,\n help=\"Parallelize across multiple processes.\")\n parser.add_argument(\"--blanks-separate-docs\", default=False, type=bool,\n help=\"Whether blank lines indicate document boundaries.\")\n parser.add_argument(\"--do-lower-case\", dest='do_lower_case',\n action='store_true', help=\"Lower case input text.\")\n parser.add_argument(\"--no-lower-case\", dest='do_lower_case',\n action='store_false', help=\"Don't lower case input text.\")\n parser.set_defaults(do_lower_case=False)\n args = parser.parse_args()\n\n print(args)\n\n utils.rmkdir(args.output_dir)\n if args.num_processes == 1:\n write_examples(0, args)\n else:\n jobs = []\n for i in range(args.num_processes):\n job = multiprocessing.Process(target=write_examples, args=(i, args))\n jobs.append(job)\n job.start()\n for job in jobs:\n job.join()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.compat.v1.io.TFRecordWriter",
"tensorflow.compat.v1.io.gfile.listdir",
"tensorflow.compat.v1.io.gfile.GFile"
]
] |
crisdeodates/AI-depthai-experiments | [
"dd8a24db648338b8e4d7c9ec6c860985f9aeb56d"
] | [
"gen2-blur-faces/main.py"
] | [
"import blobconverter\nimport cv2\nimport depthai as dai\nimport numpy as np\n\nclass HostSync:\n def __init__(self):\n self.arrays = {}\n def add_msg(self, name, msg):\n if not name in self.arrays:\n self.arrays[name] = []\n self.arrays[name].append(msg)\n def get_msgs(self, seq):\n ret = {}\n for name, arr in self.arrays.items():\n for i, msg in enumerate(arr):\n if msg.getSequenceNum() == seq:\n ret[name] = msg\n self.arrays[name] = arr[i:]\n break\n return ret\n\ndef create_pipeline():\n print(\"Creating pipeline...\")\n pipeline = dai.Pipeline()\n\n # ColorCamera\n print(\"Creating Color Camera...\")\n cam = pipeline.create(dai.node.ColorCamera)\n cam.setPreviewSize(300, 300)\n cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)\n cam.setVideoSize(1080,1080)\n cam.setInterleaved(False)\n\n cam_xout = pipeline.create(dai.node.XLinkOut)\n cam_xout.setStreamName(\"frame\")\n cam.video.link(cam_xout.input)\n\n # NeuralNetwork\n print(\"Creating Face Detection Neural Network...\")\n face_det_nn = pipeline.create(dai.node.MobileNetDetectionNetwork)\n face_det_nn.setConfidenceThreshold(0.5)\n face_det_nn.setBlobPath(blobconverter.from_zoo(\n name=\"face-detection-retail-0004\",\n shaves=6,\n ))\n # Link Face ImageManip -> Face detection NN node\n cam.preview.link(face_det_nn.input)\n\n objectTracker = pipeline.create(dai.node.ObjectTracker)\n objectTracker.setDetectionLabelsToTrack([1]) # track only person\n # possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS, SHORT_TERM_IMAGELESS, SHORT_TERM_KCF\n objectTracker.setTrackerType(dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM)\n # take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID\n objectTracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.SMALLEST_ID)\n\n # Linking\n face_det_nn.passthrough.link(objectTracker.inputDetectionFrame)\n face_det_nn.passthrough.link(objectTracker.inputTrackerFrame)\n face_det_nn.out.link(objectTracker.inputDetections)\n # Send face detections to the host (for bounding boxes)\n\n pass_xout = pipeline.create(dai.node.XLinkOut)\n pass_xout.setStreamName(\"pass_out\")\n objectTracker.passthroughTrackerFrame.link(pass_xout.input)\n\n tracklets_xout = pipeline.create(dai.node.XLinkOut)\n tracklets_xout.setStreamName(\"tracklets\")\n objectTracker.out.link(tracklets_xout.input)\n print(\"Pipeline created.\")\n return pipeline\n\nwith dai.Device(create_pipeline()) as device:\n frame_q = device.getOutputQueue(\"frame\")\n tracklets_q = device.getOutputQueue(\"tracklets\")\n pass_q = device.getOutputQueue(\"pass_out\")\n sync=HostSync()\n while True:\n sync.add_msg(\"color\", frame_q.get())\n\n # Using tracklets instead of ImgDetections in case NN inaccuratelly detected face, so blur\n # will still happen on all tracklets (even LOST ones)\n nn_in = tracklets_q.tryGet()\n if nn_in is not None:\n seq = pass_q.get().getSequenceNum()\n msgs = sync.get_msgs(seq)\n\n if not 'color' in msgs: continue\n frame = msgs[\"color\"].getCvFrame()\n\n for t in nn_in.tracklets:\n # Expand the bounding box a bit so it fits the face nicely (also convering hair/chin/beard)\n t.roi.x -= t.roi.width / 10\n t.roi.width = t.roi.width * 1.2\n t.roi.y -= t.roi.height / 7\n t.roi.height = t.roi.height * 1.2\n\n roi = t.roi.denormalize(frame.shape[1], frame.shape[0])\n bbox = [int(roi.topLeft().x), int(roi.topLeft().y), int(roi.bottomRight().x), int(roi.bottomRight().y)]\n\n face = frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]\n fh, fw, fc = face.shape\n frame_h, frame_w, frame_c = frame.shape\n\n # Create blur mask around the face\n mask = np.zeros((frame_h, frame_w), np.uint8)\n polygon = cv2.ellipse2Poly((bbox[0] + int(fw /2), bbox[1] + int(fh/2)), (int(fw /2), int(fh/2)), 0,0,360,delta=1)\n cv2.fillConvexPoly(mask, polygon, 255)\n\n frame_copy = frame.copy()\n frame_copy = cv2.blur(frame_copy, (80, 80))\n face_extracted = cv2.bitwise_and(frame_copy, frame_copy, mask=mask)\n background_mask = cv2.bitwise_not(mask)\n background = cv2.bitwise_and(frame, frame, mask=background_mask)\n # Blur the face\n frame = cv2.add(background, face_extracted)\n\n cv2.imshow(\"Frame\", cv2.resize(frame, (900,900)))\n\n if cv2.waitKey(1) == ord('q'):\n break\n\n"
] | [
[
"numpy.zeros"
]
] |
vbgupta/DS440-Transfer-Learning-Address-Sustainability-Issues | [
"b6ef51fa1e4253e92b8179b1953c43e6b983b944"
] | [
"models/src/modeling/main.py"
] | [
"#-----Main Model File----#\n\nclass Model:\n\n def __init__(self, data):\n\n self.data = data\n\n def preprocess(self):\n\n self.data['License_Class'] = self.data['License_Class'].astype('category').cat.codes\n train = self.data[self.data['year'] != 2021]\n test = self.data[self.data['year'] == 2021]\n print(f\"Train Data Years: {train.year.unique()}\")\n print(f\"Test Data Years: {test.year.unique()}\")\n\n X_train = train.drop(['pm25','aqi'], axis = 1)\n y_train = train[['aqi']]\n X_test = test.drop(['pm25','aqi'], axis = 1)\n y_test = test[['aqi']]\n\n print(f\"X_train Shape: {X_train.shape}\")\n print(f\"X_test Shape: {X_test.shape}\")\n return X_train, y_train, y_test, X_test, train\n\n def pca(self, train):\n\n pca = PCA(n_components=5)\n pca.fit(train)\n print(f\"explained_variance_ratio_: {pca.explained_variance_ratio_}\")\n print(f\"singular_values_:{pca.singular_values_}\")\n print(f\"Components: {pca.components_}\")\n\n return pca\n\n def build_rfr(self, X_train, y_train, y_test, X_test):\n\n rfr = RandomForestRegressor(n_estimators=200, criterion=\"mse\", \n min_samples_leaf=3, min_samples_split=3, \n max_depth=10).fit(X_train, y_train)\n\n print(f\"Random Forest Regressor Score: {rfr.score(X_test, y_test) * 100}\")\n\n return rfr\n\n def build_NN(self, X_train, y_train, X_test, y_test):\n\n tf.random.set_seed(42) #first we set random seed\n model = tf.keras.Sequential([\n tf.keras.layers.Dense(32,activation=\"relu\",input_shape=(32,)),\n tf.keras.layers.Dense(1)])\n model.compile( loss = tf.keras.losses.mae, #mae stands for mean absolute error\n optimizer = tf.keras.optimizers.SGD(), #stochastic GD\n metrics = ['mae'])\n model.fit(X_train.values.reshape(32, 936), y_train.values.reshape(-1 , 936), epochs = 10)\n predictons = model.evaluate(X_test.values.reshape(32, 318), y_test.values.reshape(-1 ,318))\n\n print(model.predict([[4, 300, 500]]))\n\n return model, predictions\n\n def plot_preds(self, X_train, y_train, X_test, y_test, model, predictions):\n \n plt.figure(figsize=(12,6))\n plt.scatter(X_train, y_train, c=\"b\", label=\"Training data\")\n plt.scatter(X_test, y_test, c=\"g\", label=\"Testing data\")\n plt.scatter(X_test, predictions, c=\"r\", label=\"Predictions\")\n plt.legend()\n\n return _\n\n\nif __name__ == \"__main__\":\n from database.fetch_data import nyc_v2\n from sklearn.ensemble import RandomForestRegressor\n import pandas as pd\n import warnings\n from sklearn.decomposition import PCA\n import tensorflow as tf\n import pathlib\n from path import Path\n import os\n d = Path(__file__).parent\n\n warnings.filterwarnings(\"ignore\")\n data = nyc_v2()\n print(data.head())\n\n model = Model(data)\n X_train, y_train, y_test, X_test, train = model.preprocess()\n pca_analysis = model.pca(train)\n model_RFR = model.build_rfr( X_train, y_train, y_test, X_test)\n model_NN, predictions = model.build_NN( X_train, y_train, y_test, X_test)\n #plots = model.plot_preds(X_train, y_train, y_test, X_test, model_NN, predictions)\n"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.layers.Dense",
"tensorflow.random.set_seed",
"sklearn.decomposition.PCA"
]
] |
notadamking/ta-modin | [
"e0c2e3b36d21b05798f5ffbb4d30d1898573a375"
] | [
"dev/generate_image_bb.py"
] | [
"import numpy as np\nimport platform\n\nif platform.system() == 'Windows':\n import pandas as pd\nelse:\n import modin.pandas as pd\n\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.append(\"..\") # Adds higher directory to python modules path.\nfrom ta import *\n\n# Load data\ndf = pd.read_csv('../data/datas.csv', sep=',')\ndf = utils.dropna(df)\n\n# Add all ta features filling nans values\ndf = add_all_ta_features(df, \"Open\", \"High\", \"Low\", \"Close\", \"Volume_BTC\",\n fillna=True)\n\nplt.plot(df[40700:41000].Close)\nplt.plot(df[40700:41000].volatility_bbh, label='High BB')\nplt.plot(df[40700:41000].volatility_bbl, label='Low BB')\nplt.plot(df[40700:41000].volatility_bbm, label='EMA BB')\nplt.title('Bollinger Bands')\nplt.legend()\nplt.savefig(\"bb.png\")\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig"
]
] |
RicoFio/5294BIDA6Y_tech_tutorial | [
"a9f966d6022f5d7941100a377d7f9c5face71f00"
] | [
"api/api.py"
] | [
"### IMPORTS ###\nimport os\nimport time\nfrom typing import (\n List,\n Dict,\n Union,\n)\nimport threading\nimport uuid\n\nimport numpy as np\nfrom pydantic import (\n BaseModel,\n validator,\n Field,\n)\n\nfrom fastapi import (\n FastAPI,\n Response,\n status,\n)\nfrom fastapi.middleware.cors import CORSMiddleware\n\nimport diskcache as dc\nfrom stressypy import create_job\n###############\n\n### FastAPI setup ###\nDEBUG = os.environ['DEBUG'] if os.environ['DEBUG'] else True\n\nconfig = {\n \"DEBUG\": DEBUG,\n \"CACHE_TYPE\": \"SimpleCache\",\n \"CACHE_DEFAULT_TIMEOUT\": 3600 # one hour\n}\n\napp = FastAPI(\n debug=DEBUG,\n title='LinReg API',\n description='An amazing API for some OP linear regression',\n version='0.0.1',\n docs_url='/',\n)\n\norigins = [\n \"http://localhost:8000\",\n \"http://0.0.0.0:8000\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\ncache = dc.FanoutCache('tmp')\n#####################\n\nWORKERS = int(os.getenv(\"WORKERS\", 1))\n\n\nclass LinRegLocks:\n \"\"\"\n A commodity class taking care to limit the number of concurrent\n operations to the number of workers available to the server\n \"\"\"\n locks = {f'worker_{i}': threading.Lock() for i in range(WORKERS)}\n\n def __enter__(self):\n self.lock = self._get_lock()\n self.lock.acquire()\n\n def __exit__(self, *args, **kwargs):\n try:\n self.lock.release()\n except:\n pass\n\n def _get_lock(self):\n while self._all_locked():\n time.sleep(1)\n\n for lock in self.locks.values():\n if not lock.locked():\n return lock\n\n def _all_locked(self):\n return all([lock.locked() for lock in self.locks.values()])\n\n\nlinreg_lock = LinRegLocks()\n\n\nclass DataToFit(BaseModel):\n \"\"\"\n Pydantic definition of the data users can\n input to generate a fit together with\n the required validation\n \"\"\"\n xs: List[float] = Field(example=[1, 2, 3])\n ys: List[float] = Field(example=[1, 2, 3])\n\n @validator('xs')\n def points_must_be_of_same_size(cls, v, values, **kwargs):\n if 'xs' in values and len(v) != len(values['ys']):\n raise ValueError('xs and ys have to be of same size')\n return v\n\n @validator('xs')\n def points_must_be_at_least_two(cls, v, values, **kwargs):\n if 'xs' in values and len(v) < 2:\n raise ValueError('xs and ys have to be at least 2')\n return v\n\n\nclass DataFittedModel(BaseModel):\n \"\"\"Pydantic definition of the fitted model\"\"\"\n model_id: int\n model: Dict\n\n\nclass DataToPredict(BaseModel):\n \"\"\"\n Pydantic definition of the data users can provide for inference\n \"\"\"\n xs: List[float]\n\n\ndef linreg(x: np.array, y: np.array) -> Dict[str, float]:\n \"\"\"\n The actual workhorse\n :returns\n dict with fitted slope and intercept\n \"\"\"\n A = np.vstack([x, np.ones(len(x))]).T\n slope, intercept = np.linalg.lstsq(A, y, rcond=None)[0]\n return {'slope': slope, 'intercept': intercept}\n\n\[email protected](\"/fit\", status_code=status.HTTP_201_CREATED)\ndef linear_fit(points_to_fit: DataToFit,\n response: Response) -> Union[Dict[str, Union[str, Dict[str, float]]], Response]:\n \"\"\"\n The endpoint to fit a line to a set of datapoints\n :param points_to_fit:\n :param response:\n :return:\n \"\"\"\n # First check if all locks are already used up\n # If that's the case return 429\n if linreg_lock._all_locked():\n response.status_code = status.HTTP_429_TOO_MANY_REQUESTS\n return response\n\n # Now we can build the model\n # We use a thread lock to simulate a single threaded execution\n with linreg_lock:\n model = linreg(points_to_fit.xs, points_to_fit.ys)\n # Simulate that this takes A LOT of CPU for 20 seconds\n job = create_job(1, 20)\n job.run()\n\n # Create a pseudo-random ID for it\n model_id = str(uuid.uuid4())\n # Store it temporarily\n cache.set(model_id, model)\n\n # Return the model id and its parameters\n output = {\n 'model_id': model_id,\n 'model': model,\n }\n\n return output\n\n\[email protected](\"/predict/{model_id}\", status_code=status.HTTP_200_OK)\ndef predict(points_to_predict: DataToPredict, model_id: str):\n \"\"\"\n The endpoint to predict the ys for the given xs given the\n previously fitted model\n :param points_to_predict:\n :param model_id:\n :return:\n \"\"\"\n # Check if model has been fitted before\n if not (model := cache.get(model_id)):\n return {'error': f'model_id {model_id} not found in cache. please fit your model first'}, 404\n else:\n # Make predictions\n predictions = model['intercept'] + model['slope'] * np.array(points_to_predict.xs)\n response = {'ys': list(predictions)}\n\n return response\n"
] | [
[
"numpy.array",
"numpy.linalg.lstsq"
]
] |
Fortuz/MARG_project | [
"a83aa84ac001ff7cad93934a3ea8bbd05901d992"
] | [
"modules/UR_module/src/scheduler.py"
] | [
"#encoding: utf-8\r\nimport numpy as np\r\nfrom RTDEhandler import RTDEhandler\r\nfrom URconnect import URCom\r\nimport socket\r\nfrom threading import Thread\r\nimport time\r\nfrom queue import Queue\r\n\r\nclass scheduler:\r\n def __init__(self, robotip):\r\n self.robot=URCom(robotip,30002)\r\n self.servsock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.IP=socket.gethostbyname(socket.gethostname())\r\n self.rtde=RTDEhandler(robotip)\r\n self.rtde.send_recipe('actual_TCP_pose')\r\n self.rtde.start_comm()\r\n self.rtde.get_data()\r\n self.posinfo=[]\r\n self.posqueue=[True, Queue(maxsize=1000)]\r\n\r\n def saveposinfo(self,filename):\r\n np.savetxt(filename,np.round(np.array(self.posinfo).reshape(-1,7),decimals=6),delimiter=\",\")\r\n\r\n def resetposinfo(self):\r\n self.posinfo=[]\r\n\r\n def monitorpos(self,sleeptime):\r\n while True:\r\n self.posqueue[1].put(np.array([time.time()]+[x for x in self.rtde.get_data()]))\r\n #self.posinfo.append(np.array([time.time()]+[x for x in self.rtde.get_data()]))\r\n time.sleep(sleeptime)\r\n\r\n def startmonitor(self,sleeptime):\r\n Thread(target=self.monitorpos, args=[sleeptime]).start()\r\n\r\n def load_testdata(self, testplan, points):\r\n with open(testplan, 'r') as f:\r\n pathlines = f.readlines()\r\n\r\n with open(points, 'r') as f:\r\n pointlines = f.readlines()\r\n\r\n self.pointdict = {}\r\n for point in pointlines:\r\n params = point.split(',')\r\n self.pointdict[params[0]] = np.array([float(x) for x in params[1:]])\r\n\r\n self.times = []\r\n self.pathlists = []\r\n self.speeds = []\r\n self.accs = []\r\n for path in pathlines:\r\n params = path.split('\\t')\r\n self.pathlists.append(params[0].split(','))\r\n self.times.append(float(params[1]))\r\n self.speeds.append(params[2])\r\n self.accs.append(params[3])\r\n\r\n def blockit(self):\r\n self.server.recv(400)\r\n\r\n def sendgoto(self, goal, v, a):\r\n preproc=np.round(goal,decimals=4)\r\n cmd=\",\".join(['gotol']+[str(x) for x in preproc]+[str(v),str(a)])+'\\n'\r\n print(cmd)\r\n self.server.send(cmd.encode('ASCII'))\r\n\r\n def sendwait(self,time):\r\n cmd = \",\".join(['wait',str(time)])+'\\n'\r\n print(cmd)\r\n self.server.send(cmd.encode('ASCII'))\r\n\r\n def connect(self):\r\n self.robot.conn()\r\n\r\n robotprog = open('robotprog.txt', 'r')\r\n robotprog = robotprog.read()\r\n robotprog = robotprog.replace('//GEPIP//', self.IP)\r\n self.robot.send(robotprog)\r\n print(robotprog)\r\n\r\n self.servsock.bind((self.IP, 12345))\r\n print(\"Bind\")\r\n self.servsock.listen(1)\r\n print(\"listen\")\r\n self.server, self.client_address = self.servsock.accept()\r\n print(\"Accept\")\r\n\r\n self.sendgoto(self.pointdict[\"home\"],300,300)\r\n self.blockit()\r\n\r\n def protocol(self, startcycle, endcycle):\r\n for i in range(len(self.pathlists)):\r\n input(\"Press Enter to start observation\")\r\n startcycle()\r\n stime=time.time()\r\n first = True\r\n while time.time()-stime<self.times[i]:\r\n for pos in self.pathlists[i]:\r\n self.step(self.pointdict[pos],self.speeds[i],self.accs[i],skip=first)\r\n if first:\r\n first = False\r\n\r\n self.sendgoto(self.pointdict[\"home\"], 300, 300)\r\n self.blockit()\r\n endcycle()\r\n\r\n input(\"Press Enter to start measurement\")\r\n startcycle()\r\n stime = time.time()\r\n first = True\r\n while time.time() - stime < self.times[i]:\r\n for pos in self.pathlists[i]:\r\n self.step(self.pointdict[pos], self.speeds[i], self.accs[i], skip=first)\r\n if first:\r\n first = False\r\n\r\n self.sendgoto(self.pointdict[\"home\"], 300, 300)\r\n self.blockit()\r\n endcycle()\r\n\r\n\r\n def start(self, startcylce, endcycle):\r\n Thread(target=self.protocol, args=[startcylce, endcycle]).start()\r\n\r\n def step(self, nextpos, speed, acc, skip=False):\r\n currpos = np.array(self.rtde.get_data())\r\n\r\n currtop = currpos\r\n currtop[2] += 0.15\r\n nexttop = nextpos.copy()\r\n nexttop[2] += 0.15\r\n\r\n if not skip:\r\n self.sendgoto(currtop, speed, acc)\r\n self.blockit()\r\n self.sendgoto(nexttop, speed, acc)\r\n self.blockit()\r\n self.sendgoto(nextpos, speed, acc)\r\n self.blockit()\r\n\r\n"
] | [
[
"numpy.round",
"numpy.array"
]
] |
oslokommune/dataplatform-batch-jobs | [
"c6f2a31d86676dae62bbdcd8822c6f9320c9aec2"
] | [
"tests/s3_access_log_aggregator/test_aggregate_to_db.py"
] | [
"from io import BytesIO\nfrom unittest.mock import Mock\n\nimport pandas as pd\nfrom fastparquet import write as pq_write\n\nfrom batch.models import DatasetRetrievals\nfrom batch.s3_access_log_aggregator.aggregate_to_db import (\n count_get_requests,\n aggregate_to_db,\n read_parquet,\n)\n\n\ndef test_read_parquet():\n input_df = pd.read_csv(\"tests/s3_access_log_aggregator/data/processed-1.csv\")\n input_data = BytesIO()\n # Don't permit actually closing the IO stream, since that will discard the\n # buffer before we get a chance to read it.\n input_data.close = Mock()\n pq_write(input_data, input_df, open_with=lambda io, mode: io)\n input_data.seek(0)\n input_source = Mock(path=input_data)\n\n df = read_parquet(input_source)\n\n assert len(df) == 4\n assert \"operation\" in df\n assert \"dataset_id\" in df\n\n\ndef test_aggregate_to_db(test_db_session):\n input_df_1 = pd.read_csv(\"tests/s3_access_log_aggregator/data/processed-1.csv\")\n input_df_2 = pd.read_csv(\"tests/s3_access_log_aggregator/data/processed-2.csv\")\n input_data_1 = BytesIO()\n input_data_2 = BytesIO()\n input_data_1.close = Mock()\n input_data_2.close = Mock()\n pq_write(input_data_1, input_df_1, open_with=lambda io, mode: io)\n pq_write(input_data_2, input_df_2, open_with=lambda io, mode: io)\n input_data_1.seek(0)\n input_data_2.seek(0)\n input_source_1 = Mock(path=input_data_1)\n input_source_2 = Mock(path=input_data_2)\n\n result = BytesIO()\n result.close = Mock()\n output_target = Mock(open=Mock(return_value=result))\n\n aggregate_to_db([input_source_1, input_source_2], output_target, \"2020-03-03\")\n df = pd.read_parquet(result)\n\n assert len(df) == 2\n assert df.loc[df[\"dataset_id\"] == \"renovasjonsbiler-status\"][\"count\"].squeeze() == 3\n assert (\n df.loc[df[\"dataset_id\"] == \"renovasjonsbiler-status-2\"][\"count\"].squeeze() == 2\n )\n\n assert test_db_session.query(DatasetRetrievals).count() == 2\n assert (\n test_db_session.query(DatasetRetrievals.count)\n .filter_by(dataset_id=\"renovasjonsbiler-status\")\n .scalar()\n == 3\n )\n assert (\n test_db_session.query(DatasetRetrievals.count)\n .filter_by(dataset_id=\"renovasjonsbiler-status-2\")\n .scalar()\n == 2\n )\n\n\ndef test_count_get_requests():\n d = {\n \"dataset_id\": [\"1234\", \"5678\", \"094563\", \"1234\", \"5678\", \"1234\"],\n \"operation\": [\n \"REST.PUT.PART\",\n \"REST.GET.OBJECT\",\n \"REST.GET.OBJECT\",\n \"REST.GET.OBJECT\",\n \"REST.GET.OBJECT\",\n \"REST.GET.OBJECT\",\n ],\n }\n df = pd.DataFrame(data=d)\n result = count_get_requests(df)\n # Rows\n assert len(result) == 3\n # Count\n assert result.loc[result[\"dataset_id\"] == \"1234\"][\"count\"].values[0] == 2\n assert result.loc[result[\"dataset_id\"] == \"5678\"][\"count\"].values[0] == 2\n assert result.loc[result[\"dataset_id\"] == \"094563\"][\"count\"].values[0] == 1\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"pandas.read_parquet"
]
] |
tkon3/pytorch-optimizer | [
"e5578453b79143331c30fd76b08721b45dce86d3"
] | [
"torch_optimizer/adamod.py"
] | [
"import math\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\nfrom .types import Betas2, OptFloat, OptLossClosure, Params\n\n\n__all__ = ('AdaMod',)\n\n\nclass AdaMod(Optimizer):\n r\"\"\"Implements AccSGD algorithm.\n\n It has been proposed in `Adaptive and Momental Bounds for Adaptive\n Learning Rate Methods`__.\n\n Arguments:\n params: iterable of parameters to optimize or dicts defining\n parameter groups\n lr: learning rate (default: 1e-3)\n betas: coefficients used for computing running averages of gradient\n and its square (default: (0.9, 0.999))\n beta3: smoothing coefficient for adaptive learning rates\n (default: 0.9999)\n eps: term added to the denominator to improve numerical stability\n (default: 1e-8)\n weight_decay: weight decay (L2 penalty) (default: 0)\n\n Example:\n >>> import torch_optimizer as optim\n >>> optimizer = optim.AdaMod(model.parameters(), lr=0.1)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ https://arxiv.org/abs/1910.12249\n \"\"\"\n\n def __init__(\n self,\n params: Params,\n lr: float = 1e-3,\n betas: Betas2 = (0.9, 0.999),\n beta3: float = 0.999,\n eps: float = 1e-8,\n weight_decay: float = 0,\n ) -> None:\n if not 0.0 <= lr:\n raise ValueError(f'Invalid learning rate: {lr}')\n if not 0.0 <= eps:\n raise ValueError(f'Invalid epsilon value: {eps}')\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')\n if not 0.0 <= beta3 < 1.0:\n raise ValueError(f'Invalid beta3 parameter: {beta3}')\n if not 0.0 <= weight_decay:\n raise ValueError(f'Invalid weight_decay value: {weight_decay}')\n defaults = dict(\n lr=lr, betas=betas, beta3=beta3, eps=eps, weight_decay=weight_decay\n )\n super(AdaMod, self).__init__(params, defaults)\n\n def step(self, closure: OptLossClosure = None) -> OptFloat:\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure: A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n msg = 'AdaMod does not support sparse gradients'\n raise RuntimeError(msg)\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p)\n # Exponential moving average of actual learning rates\n state['exp_avg_lr'] = torch.zeros_like(p)\n\n exp_avg, exp_avg_sq, exp_avg_lr = (\n state['exp_avg'],\n state['exp_avg_sq'],\n state['exp_avg_lr'],\n )\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = (\n group['lr']\n * math.sqrt(bias_correction2)\n / bias_correction1\n )\n\n if group['weight_decay'] != 0:\n p.data.add_(-group['weight_decay'] * group['lr'], p.data)\n\n # Applies momental bounds on actual learning rates\n step_size = torch.full_like(denom, step_size)\n step_size.div_(denom)\n exp_avg_lr.mul_(group['beta3']).add_(\n 1 - group['beta3'], step_size\n )\n step_size = torch.min(step_size, exp_avg_lr)\n step_size.mul_(exp_avg)\n\n p.data.add_(-step_size)\n\n return loss\n"
] | [
[
"torch.min",
"torch.full_like",
"torch.zeros_like"
]
] |
alienkrieg/peal | [
"66b48c0f3973c7f3bceece64dc6e896c9802e29a"
] | [
"peal/operators/reproduction.py"
] | [
"\"\"\"Module that provides operators that reproduce individuals.\"\"\"\n\nimport numpy as np\n\nfrom peal.community import Community\nfrom peal.operators.iteration import (\n SingleIteration,\n RandomStraightIteration,\n)\nfrom peal.operators.operator import Operator\nfrom peal.population import Population\n\n\nclass Copy(Operator):\n \"\"\"Simple reproduction operator that copies single individuals or\n populations.\n \"\"\"\n\n def __init__(self):\n super().__init__(iter_type=SingleIteration())\n\n def _process_population(\n self,\n container: Population,\n ) -> Population:\n return container.deepcopy()\n\n def _process_community(\n self,\n container: Community,\n ) -> Community:\n return container.deepcopy()\n\n\nclass Crossover(Operator):\n \"\"\"Crossover reproduction operator.\n\n Args:\n npoints (int, optional): The number of points to use for the\n gene split in the crossover operation. Defaults to 2.\n probability (float, optional): The probability of performing the\n crossover operation. Defaults to 0.5.\n \"\"\"\n\n def __init__(self, npoints: int = 2, probability: float = 0.5):\n super().__init__(\n RandomStraightIteration(batch_size=2, probability=probability)\n )\n self._npoints = npoints\n\n def _process_population(\n self,\n container: Population,\n ) -> Population:\n ind1, ind2 = container\n points = np.insert(\n np.sort(\n np.random.randint(\n 1,\n len(ind1.genes),\n size=self._npoints\n )\n ),\n [0, -1],\n [0, len(ind1.genes)]\n )\n start = self._npoints % 2\n off1, off2 = ind1.copy(), ind2.copy()\n for i in range(start, self._npoints+(1-start), 2):\n off1.genes[points[i]:points[i+1]] = ind2.genes[\n points[i]:points[i+1]\n ]\n off2.genes[points[i]:points[i+1]] = ind1.genes[\n points[i]:points[i+1]\n ]\n return Population((off1, off2))\n\n\nclass DiscreteRecombination(Operator):\n \"\"\"Reproduction operator that mixes the genes of multiple\n individuals to create a new individual. Each individual gives the\n same proportion of their genes.\n This only works as intuitively explained if the number of input\n individuals doesn't exceed the number of genes an individual has and\n if each individual has the same number of genes.\n\n Args:\n in_size (int, optional): The number of input individuals to mix\n for the one output individual. Defaults to 2.\n probability (float, optional): The probability of performing the\n crossover operation. Defaults to 0.5.\n \"\"\"\n\n def __init__(self, in_size: int = 2, probability: float = 0.5):\n super().__init__(\n RandomStraightIteration(\n batch_size=in_size,\n probability=probability,\n ),\n )\n\n def _process_population(\n self,\n container: Population,\n ) -> Population:\n if container.size == 1:\n return container.deepcopy()\n\n parts = [\n container[0].genes.shape[0] // container.size\n for _ in range(container.size)\n ]\n missing = container[0].genes.shape[0] % container.size\n for i in range(missing):\n parts[i] += 1\n parts.insert(0, 0)\n\n genes = np.zeros_like(container[0].genes)\n shuffled_indices = np.arange(container[0].genes.shape[0])\n np.random.shuffle(shuffled_indices)\n for i in range(len(parts)-1):\n genes[shuffled_indices[parts[i]:parts[i]+parts[i+1]]] = (\n container[i].genes[\n shuffled_indices[parts[i]:parts[i]+parts[i+1]]\n ]\n )\n new_ind = container[0].copy()\n new_ind.genes = genes\n return Population(new_ind)\n"
] | [
[
"numpy.arange",
"numpy.zeros_like",
"numpy.random.shuffle"
]
] |
ChisenZhang/Curve-Text-Detector | [
"2126a660d3baf3e84a76e77352597576519c35a2"
] | [
"lib/fast_rcnn/config.py"
] | [
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Fast R-CNN config system.\n\nThis file specifies default config options for Fast R-CNN. You should not\nchange values in this file. Instead, you should write a config file (in yaml)\nand use cfg_from_file(yaml_file) to load it and override the default options.\n\nMost tools in $ROOT/tools take a --cfg option to specify an override file.\n - See tools/{train,test}_net.py for example code that uses cfg_from_file()\n - See experiments/cfgs/*.yml for example YAML config override files\n\"\"\"\n\nimport os\nimport os.path as osp\nimport numpy as np\n# `pip install easydict` if you don't have it\nfrom easydict import EasyDict as edict\n\n__C = edict()\n# Consumers can get config by:\n# from fast_rcnn_config import cfg\ncfg = __C\n\n#\n# Training options\n#\n\n__C.TRAIN = edict()\n\n# Scales to use during training (can list multiple scales)\n# Each scale is the pixel size of an image's shortest side\n__C.TRAIN.SCALES = (600,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TRAIN.MAX_SIZE = 1000\n\n# Images to use per minibatch\n__C.TRAIN.IMS_PER_BATCH = 2\n\n# Minibatch size (number of regions of interest [ROIs])\n__C.TRAIN.BATCH_SIZE = 128\n\n# Fraction of minibatch that is labeled foreground (i.e. class > 0)\n__C.TRAIN.FG_FRACTION = 0.25\n\n# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)\n__C.TRAIN.FG_THRESH = 0.5\n\n# Overlap threshold for a ROI to be considered background (class = 0 if\n# overlap in [LO, HI))\n__C.TRAIN.BG_THRESH_HI = 0.5\n__C.TRAIN.BG_THRESH_LO = 0.1\n\n# Use horizontally-flipped images during training?\n__C.TRAIN.USE_FLIPPED = True\n\n# Train bounding-box regressors\n__C.TRAIN.BBOX_REG = True\n\n# Overlap required between a ROI and ground-truth box in order for that ROI to\n# be used as a bounding-box regression training example\n__C.TRAIN.BBOX_THRESH = 0.5\n\n# Iterations between snapshots\n__C.TRAIN.SNAPSHOT_ITERS = 10000\n\n# solver.prototxt specifies the snapshot path prefix, this adds an optional\n# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel\n__C.TRAIN.SNAPSHOT_INFIX = ''\n\n# Use a prefetch thread in roi_data_layer.layer\n# So far I haven't found this useful; likely more engineering work is required\n__C.TRAIN.USE_PREFETCH = False\n\n# Normalize the targets (subtract empirical mean, divide by empirical stddev)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS = True\n# Deprecated (inside weights)\n__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n\n# curve\n__C.TRAIN.INFO_INSIDE_WEIGHTS = (0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0) # 32\n\n\n# Normalize the targets using \"precomputed\" (or made up) means and stdevs\n# (BBOX_NORMALIZE_TARGETS must also be True)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False\n__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)\n__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)\n\n__C.TRAIN.INFO_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) # 32\n__C.TRAIN.INFO_NORMALIZE_STDS = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) # 32\n\n__C.TRAIN.RPN_NORMALIZE_TARGETS = False\n__C.TRAIN.RPN_NORMALIZE_MEANS = None\n__C.TRAIN.RPN_NORMALIZE_STDS = None\n\n# Train using these proposals\n__C.TRAIN.PROPOSAL_METHOD = 'selective_search'\n\n# Make minibatches from images that have similar aspect ratios (i.e. both\n# tall and thin or both short and wide) in order to avoid wasting computation\n# on zero-padding.\n__C.TRAIN.ASPECT_GROUPING = True\n\n# Use RPN to detect objects\n__C.TRAIN.HAS_RPN = False\n# IOU >= thresh: positive example\n__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\n# IOU < thresh: negative example\n__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\n# If an anchor statisfied by positive and negative conditions set to negative\n__C.TRAIN.RPN_CLOBBER_POSITIVES = False\n# Max number of foreground examples\n__C.TRAIN.RPN_FG_FRACTION = 0.5\n# Total number of examples\n__C.TRAIN.RPN_BATCHSIZE = 256\n# NMS threshold used on RPN proposals\n__C.TRAIN.RPN_NMS_THRESH = 0.5\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TRAIN.RPN_POST_NMS_TOP_N = 2000\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n__C.TRAIN.RPN_MIN_SIZE = 16\n# Deprecated (outside weights)\n__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n# Give the positive RPN examples weight of p * 1 / {num positives}\n# and give negatives a weight of (1 - p)\n# Set to -1.0 to use uniform example weighting\n__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n\n# whether use class aware box or not\n__C.TRAIN.AGNOSTIC = False\n\n#\n# Testing options\n#\n\n__C.TEST = edict()\n\n# Scales to use during testing (can list multiple scales)\n# Each scale is the pixel size of an image's shortest side\n__C.TEST.SCALES = (600,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TEST.MAX_SIZE = 1000\n\n# Overlap threshold used for non-maximum suppression (suppress boxes with\n# IoU >= this threshold)\n__C.TEST.NMS = 0.3\n__C.TEST.PNMS = 0.3\n__C.TEST.USE_PNMS = True\n\n# Experimental: treat the (K+1) units in the cls_score layer as linear\n# predictors (trained, eg, with one-vs-rest SVMs).\n__C.TEST.SVM = False\n\n# Test using bounding-box regressors\n__C.TEST.BBOX_REG = True\n\n# Propose boxes\n__C.TEST.HAS_RPN = False\n\n# Test using these proposals\n__C.TEST.PROPOSAL_METHOD = 'selective_search'\n\n## NMS threshold used on RPN proposals\n__C.TEST.RPN_NMS_THRESH = 0.7\n## Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TEST.RPN_PRE_NMS_TOP_N = 6000\n## Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TEST.RPN_POST_NMS_TOP_N = 300\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n__C.TEST.RPN_MIN_SIZE = 16\n\n# whether use class aware box or not\n__C.TEST.AGNOSTIC = False\n\n\n#\n# MISC\n#\n\n# The mapping from image coordinates to feature map coordinates might cause\n# some boxes that are distinct in image space to become identical in feature\n# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor\n# for identifying duplicate boxes.\n# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16\n__C.DEDUP_BOXES = 1./16.\n\n# Pixel mean values (BGR order) as a (1, 1, 3) array\n# We use the same pixel mean for all networks even though it's not exactly what\n# they were trained with\n__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n\n# For reproducibility\n__C.RNG_SEED = 3\n\n# A small number that's used many times\n__C.EPS = 1e-14\n\n# Root directory of project\n__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))\n\n# Data directory\n__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))\n\n# Model directory\n__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models', 'pascal_voc'))\n\n# Name (or path to) the matlab executable\n__C.MATLAB = 'matlab'\n\n# Place outputs under an experiments directory\n__C.EXP_DIR = 'default'\n\n# Use GPU implementation of non-maximum suppression\n__C.USE_GPU_NMS = True\n\n# Default GPU device id\n__C.GPU_ID = 0\n\n\ndef get_output_dir(imdb, net=None):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))\n if net is not None:\n outdir = osp.join(outdir, net.name)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\ndef _merge_a_into_b(a, b):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n if type(a) is not edict:\n return\n\n for k, v in a.iteritems():\n # a must specify keys that are in b\n if not b.has_key(k):\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print('Error under config key: {}'.format(k))\n raise\n else:\n b[k] = v\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file and merge it into the default options.\"\"\"\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)\n\ndef cfg_from_list(cfg_list):\n \"\"\"Set config keys via list (e.g., from command line).\"\"\"\n from ast import literal_eval\n assert len(cfg_list) % 2 == 0\n for k, v in zip(cfg_list[0::2], cfg_list[1::2]):\n key_list = k.split('.')\n d = __C\n for subkey in key_list[:-1]:\n assert d.has_key(subkey)\n d = d[subkey]\n subkey = key_list[-1]\n assert d.has_key(subkey)\n try:\n value = literal_eval(v)\n except:\n # handle the case when v is a string literal\n value = v\n assert type(value) == type(d[subkey]), \\\n 'type {} does not match original type {}'.format(\n type(value), type(d[subkey]))\n d[subkey] = value\n"
] | [
[
"numpy.array"
]
] |
TayaV60/SimpleHTR | [
"5c9e3488e7a581c3f953270802d8a25d6c5493b9"
] | [
"src/model.py"
] | [
"import os\nimport sys\nfrom typing import List, Tuple\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom dataloader_iam import Batch\n\n# Disable eager mode\ntf.compat.v1.disable_eager_execution()\n\n\nclass DecoderType:\n \"\"\"CTC decoder types.\"\"\"\n BestPath = 0\n BeamSearch = 1\n WordBeamSearch = 2\n\n\nclass Model:\n \"\"\"Minimalistic TF model for HTR.\"\"\"\n\n def __init__(self,\n char_list: List[str],\n decoder_type: str = DecoderType.BestPath,\n must_restore: bool = False,\n dump: bool = False) -> None:\n \"\"\"Init model: add CNN, RNN and CTC and initialize TF.\"\"\"\n self.dump = dump\n self.char_list = char_list\n self.decoder_type = decoder_type\n self.must_restore = must_restore\n self.snap_ID = 0\n\n # Whether to use normalization over a batch or a population\n self.is_train = tf.compat.v1.placeholder(tf.bool, name='is_train')\n\n # input image batch\n self.input_imgs = tf.compat.v1.placeholder(tf.float32, shape=(None, None, None))\n\n # setup CNN, RNN and CTC\n self.setup_cnn()\n self.setup_rnn()\n self.setup_ctc()\n\n # setup optimizer to train NN\n self.batches_trained = 0\n self.batches_validated = 0\n self.update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(self.update_ops):\n self.optimizer = tf.compat.v1.train.AdamOptimizer().minimize(self.loss)\n\n # initialize TF\n self.sess, self.saver = self.setup_tf()\n\n def setup_cnn(self) -> None:\n \"\"\"Create CNN layers.\"\"\"\n cnn_in4d = tf.expand_dims(input=self.input_imgs, axis=3)\n\n # list of parameters for the layers\n kernel_vals = [5, 5, 3, 3, 3]\n feature_vals = [1, 32, 64, 128, 128, 256]\n stride_vals = pool_vals = [(2, 2), (2, 2), (1, 2), (1, 2), (1, 2)]\n num_layers = len(stride_vals)\n\n # create layers\n pool = cnn_in4d # input to first CNN layer\n for i in range(num_layers):\n kernel = tf.Variable(\n tf.random.truncated_normal([kernel_vals[i], kernel_vals[i], feature_vals[i], feature_vals[i + 1]],\n stddev=0.1))\n conv = tf.nn.conv2d(input=pool, filters=kernel, padding='SAME', strides=(1, 1, 1, 1))\n conv_norm = tf.compat.v1.layers.batch_normalization(conv, training=self.is_train)\n relu = tf.nn.relu(conv_norm)\n pool = tf.nn.max_pool2d(input=relu, ksize=(1, pool_vals[i][0], pool_vals[i][1], 1),\n strides=(1, stride_vals[i][0], stride_vals[i][1], 1), padding='VALID')\n\n self.cnn_out_4d = pool\n\n def setup_rnn(self) -> None:\n \"\"\"Create RNN layers.\"\"\"\n rnn_in3d = tf.squeeze(self.cnn_out_4d, axis=[2])\n\n # basic cells which is used to build RNN\n num_hidden = 256\n cells = [tf.compat.v1.nn.rnn_cell.LSTMCell(num_units=num_hidden, state_is_tuple=True) for _ in\n range(2)] # 2 layers\n\n # stack basic cells\n stacked = tf.compat.v1.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)\n\n # bidirectional RNN\n # BxTxF -> BxTx2H\n (fw, bw), _ = tf.compat.v1.nn.bidirectional_dynamic_rnn(cell_fw=stacked, cell_bw=stacked, inputs=rnn_in3d,\n dtype=rnn_in3d.dtype)\n\n # BxTxH + BxTxH -> BxTx2H -> BxTx1X2H\n concat = tf.expand_dims(tf.concat([fw, bw], 2), 2)\n\n # project output to chars (including blank): BxTx1x2H -> BxTx1xC -> BxTxC\n kernel = tf.Variable(tf.random.truncated_normal([1, 1, num_hidden * 2, len(self.char_list) + 1], stddev=0.1))\n self.rnn_out_3d = tf.squeeze(tf.nn.atrous_conv2d(value=concat, filters=kernel, rate=1, padding='SAME'),\n axis=[2])\n\n def setup_ctc(self) -> None:\n \"\"\"Create CTC loss and decoder.\"\"\"\n # BxTxC -> TxBxC\n self.ctc_in_3d_tbc = tf.transpose(a=self.rnn_out_3d, perm=[1, 0, 2])\n # ground truth text as sparse tensor\n self.gt_texts = tf.SparseTensor(tf.compat.v1.placeholder(tf.int64, shape=[None, 2]),\n tf.compat.v1.placeholder(tf.int32, [None]),\n tf.compat.v1.placeholder(tf.int64, [2]))\n\n # calc loss for batch\n self.seq_len = tf.compat.v1.placeholder(tf.int32, [None])\n self.loss = tf.reduce_mean(\n input_tensor=tf.compat.v1.nn.ctc_loss(labels=self.gt_texts, inputs=self.ctc_in_3d_tbc,\n sequence_length=self.seq_len,\n ctc_merge_repeated=True))\n\n # calc loss for each element to compute label probability\n self.saved_ctc_input = tf.compat.v1.placeholder(tf.float32,\n shape=[None, None, len(self.char_list) + 1])\n self.loss_per_element = tf.compat.v1.nn.ctc_loss(labels=self.gt_texts, inputs=self.saved_ctc_input,\n sequence_length=self.seq_len, ctc_merge_repeated=True)\n\n # best path decoding or beam search decoding\n if self.decoder_type == DecoderType.BestPath:\n self.decoder = tf.nn.ctc_greedy_decoder(inputs=self.ctc_in_3d_tbc, sequence_length=self.seq_len)\n elif self.decoder_type == DecoderType.BeamSearch:\n self.decoder = tf.nn.ctc_beam_search_decoder(inputs=self.ctc_in_3d_tbc, sequence_length=self.seq_len,\n beam_width=50)\n # word beam search decoding (see https://github.com/githubharald/CTCWordBeamSearch)\n elif self.decoder_type == DecoderType.WordBeamSearch:\n # prepare information about language (dictionary, characters in dataset, characters forming words)\n chars = ''.join(self.char_list)\n word_chars = open('../model/wordCharList.txt').read().splitlines()[0]\n corpus = open('../data/corpus.txt').read()\n\n # decode using the \"Words\" mode of word beam search\n from word_beam_search import WordBeamSearch\n self.decoder = WordBeamSearch(50, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'),\n word_chars.encode('utf8'))\n\n # the input to the decoder must have softmax already applied\n self.wbs_input = tf.nn.softmax(self.ctc_in_3d_tbc, axis=2)\n\n def setup_tf(self) -> Tuple[tf.compat.v1.Session, tf.compat.v1.train.Saver]:\n \"\"\"Initialize TF.\"\"\"\n print('Python: ' + sys.version)\n print('Tensorflow: ' + tf.__version__)\n\n sess = tf.compat.v1.Session() # TF session\n\n saver = tf.compat.v1.train.Saver(max_to_keep=1) # saver saves model to file\n model_dir = '../model/'\n latest_snapshot = tf.train.latest_checkpoint(model_dir) # is there a saved model?\n\n # if model must be restored (for inference), there must be a snapshot\n if self.must_restore and not latest_snapshot:\n raise Exception('No saved model found in: ' + model_dir)\n\n # load saved model if available\n if latest_snapshot:\n print('Init with stored values from ' + latest_snapshot)\n saver.restore(sess, latest_snapshot)\n else:\n print('Init with new values')\n sess.run(tf.compat.v1.global_variables_initializer())\n\n return sess, saver\n\n def to_sparse(self, texts: List[str]) -> Tuple[List[List[int]], List[int], List[int]]:\n \"\"\"Put ground truth texts into sparse tensor for ctc_loss.\"\"\"\n indices = []\n values = []\n shape = [len(texts), 0] # last entry must be max(labelList[i])\n\n # go over all texts\n for batchElement, text in enumerate(texts):\n # convert to string of label (i.e. class-ids)\n label_str = [self.char_list.index(c) for c in text]\n # sparse tensor must have size of max. label-string\n if len(label_str) > shape[1]:\n shape[1] = len(label_str)\n # put each label into sparse tensor\n for i, label in enumerate(label_str):\n indices.append([batchElement, i])\n values.append(label)\n\n return indices, values, shape\n\n def decoder_output_to_text(self, ctc_output: tuple, batch_size: int) -> List[str]:\n \"\"\"Extract texts from output of CTC decoder.\"\"\"\n\n # word beam search: already contains label strings\n if self.decoder_type == DecoderType.WordBeamSearch:\n label_strs = ctc_output\n\n # TF decoders: label strings are contained in sparse tensor\n else:\n # ctc returns tuple, first element is SparseTensor\n decoded = ctc_output[0][0]\n\n # contains string of labels for each batch element\n label_strs = [[] for _ in range(batch_size)]\n\n # go over all indices and save mapping: batch -> values\n for (idx, idx2d) in enumerate(decoded.indices):\n label = decoded.values[idx]\n batch_element = idx2d[0] # index according to [b,t]\n label_strs[batch_element].append(label)\n\n # map labels to chars for all batch elements\n return [''.join([self.char_list[c] for c in labelStr]) for labelStr in label_strs]\n\n def train_batch(self, batch: Batch) -> float:\n \"\"\"Feed a batch into the NN to train it.\"\"\"\n num_batch_elements = len(batch.imgs)\n max_text_len = batch.imgs[0].shape[0] // 4\n sparse = self.to_sparse(batch.gt_texts)\n eval_list = [self.optimizer, self.loss]\n feed_dict = {self.input_imgs: batch.imgs, self.gt_texts: sparse,\n self.seq_len: [max_text_len] * num_batch_elements, self.is_train: True}\n _, loss_val = self.sess.run(eval_list, feed_dict)\n self.batches_trained += 1\n return loss_val\n\n def validate_batch(self, batch: Batch) -> float:\n \"\"\"Feed a batch into the NN to validate it.\"\"\"\n num_batch_elements = len(batch.imgs)\n max_text_len = batch.imgs[0].shape[0] // 4\n sparse = self.to_sparse(batch.gt_texts)\n eval_list = [self.loss]\n feed_dict = {self.input_imgs: batch.imgs, self.gt_texts: sparse,\n self.seq_len: [max_text_len] * num_batch_elements, self.is_train: False}\n loss_vals = self.sess.run(eval_list, feed_dict)\n self.batches_validated += 1\n return loss_vals\n\n\n @staticmethod\n def dump_nn_output(rnn_output: np.ndarray) -> None:\n \"\"\"Dump the output of the NN to CSV file(s).\"\"\"\n dump_dir = '../dump/'\n if not os.path.isdir(dump_dir):\n os.mkdir(dump_dir)\n\n # iterate over all batch elements and create a CSV file for each one\n max_t, max_b, max_c = rnn_output.shape\n for b in range(max_b):\n csv = ''\n for t in range(max_t):\n for c in range(max_c):\n csv += str(rnn_output[t, b, c]) + ';'\n csv += '\\n'\n fn = dump_dir + 'rnnOutput_' + str(b) + '.csv'\n print('Write dump of NN to file: ' + fn)\n with open(fn, 'w') as f:\n f.write(csv)\n\n def infer_batch(self, batch: Batch, calc_probability: bool = False, probability_of_gt: bool = False):\n \"\"\"Feed a batch into the NN to recognize the texts.\"\"\"\n\n # decode, optionally save RNN output\n num_batch_elements = len(batch.imgs)\n\n # put tensors to be evaluated into list\n eval_list = []\n\n if self.decoder_type == DecoderType.WordBeamSearch:\n eval_list.append(self.wbs_input)\n else:\n eval_list.append(self.decoder)\n\n if self.dump or calc_probability:\n eval_list.append(self.ctc_in_3d_tbc)\n\n # sequence length depends on input image size (model downsizes width by 4)\n max_text_len = batch.imgs[0].shape[0] // 4\n\n # dict containing all tensor fed into the model\n feed_dict = {self.input_imgs: batch.imgs, self.seq_len: [max_text_len] * num_batch_elements,\n self.is_train: False}\n\n # evaluate model\n eval_res = self.sess.run(eval_list, feed_dict)\n\n # TF decoders: decoding already done in TF graph\n if self.decoder_type != DecoderType.WordBeamSearch:\n decoded = eval_res[0]\n # word beam search decoder: decoding is done in C++ function compute()\n else:\n decoded = self.decoder.compute(eval_res[0])\n\n # map labels (numbers) to character string\n texts = self.decoder_output_to_text(decoded, num_batch_elements)\n\n # feed RNN output and recognized text into CTC loss to compute labeling probability\n probs = None\n if calc_probability:\n sparse = self.to_sparse(batch.gt_texts) if probability_of_gt else self.to_sparse(texts)\n ctc_input = eval_res[1]\n eval_list = self.loss_per_element\n feed_dict = {self.saved_ctc_input: ctc_input, self.gt_texts: sparse,\n self.seq_len: [max_text_len] * num_batch_elements, self.is_train: False}\n loss_vals = self.sess.run(eval_list, feed_dict)\n probs = np.exp(-loss_vals)\n\n # dump the output of the NN to CSV file(s)\n if self.dump:\n self.dump_nn_output(eval_res[1])\n\n return texts, probs\n\n def save(self) -> None:\n \"\"\"Save model to file.\"\"\"\n self.snap_ID += 1\n self.saver.save(self.sess, '../model/snapshot', global_step=self.snap_ID)\n"
] | [
[
"tensorflow.random.truncated_normal",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.squeeze",
"tensorflow.compat.v1.nn.rnn_cell.MultiRNNCell",
"tensorflow.concat",
"tensorflow.compat.v1.layers.batch_normalization",
"tensorflow.nn.softmax",
"tensorflow.nn.ctc_greedy_decoder",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.train.Saver",
"tensorflow.nn.atrous_conv2d",
"tensorflow.compat.v1.nn.rnn_cell.LSTMCell",
"tensorflow.transpose",
"tensorflow.compat.v1.Session",
"tensorflow.expand_dims",
"tensorflow.nn.max_pool2d",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.nn.ctc_loss",
"tensorflow.control_dependencies",
"tensorflow.nn.ctc_beam_search_decoder",
"numpy.exp",
"tensorflow.nn.conv2d",
"tensorflow.train.latest_checkpoint",
"tensorflow.nn.relu",
"tensorflow.compat.v1.nn.bidirectional_dynamic_rnn",
"tensorflow.compat.v1.get_collection"
]
] |
zilishen/gala | [
"5415c817a7cc5e1a5086217332466ffc7af16ab3"
] | [
"gala/dynamics/tests/test_actionangle_staeckel.py"
] | [
"# Third-party\nfrom astropy.constants import G\nimport astropy.units as u\nimport numpy as np\nimport pytest\n\n# gala\nfrom gala.dynamics import get_staeckel_fudge_delta, PhaseSpacePosition\nimport gala.potential as gp\nfrom gala.units import galactic\nfrom .helpers import HAS_GALPY\n\n\[email protected](not HAS_GALPY,\n reason=\"requires galpy to run this test\")\ndef test_staeckel_fudge_delta():\n import galpy.potential as galpy_pot\n from galpy.actionAngle import estimateDeltaStaeckel\n\n ro = 8.1 * u.kpc\n vo = 229 * u.km/u.s\n\n paired_potentials = []\n\n # Miyamoto-Nagai\n potential = gp.MiyamotoNagaiPotential(m=6e10*u.Msun, a=3*u.kpc, b=0.3*u.kpc,\n units=galactic)\n amp = (G * potential.parameters['m']).to_value(vo**2 * ro)\n a = potential.parameters['a'].to_value(ro)\n b = potential.parameters['b'].to_value(ro)\n galpy_potential = galpy_pot.MiyamotoNagaiPotential(amp=amp, a=a, b=b,\n ro=ro, vo=vo)\n paired_potentials.append((potential, galpy_potential))\n\n # Hernquist\n potential = gp.HernquistPotential(m=6e10*u.Msun, c=0.3*u.kpc,\n units=galactic)\n amp = (G * potential.parameters['m']).to_value(vo**2 * ro)\n a = potential.parameters['c'].to_value(ro)\n galpy_potential = galpy_pot.HernquistPotential(amp=amp, a=a,\n ro=ro, vo=vo)\n paired_potentials.append((potential, galpy_potential))\n\n # NFW\n potential = gp.NFWPotential(m=6e11*u.Msun, r_s=15.6*u.kpc,\n units=galactic)\n amp = (G * potential.parameters['m']).to_value(vo**2 * ro)\n a = potential.parameters['r_s'].to_value(ro)\n galpy_potential = galpy_pot.NFWPotential(amp=amp, a=a, ro=ro, vo=vo)\n paired_potentials.append((potential, galpy_potential))\n\n # TEST:\n N = 1024\n rnd = np.random.default_rng(42)\n w = PhaseSpacePosition(pos=rnd.uniform(-10, 10, size=(3, N)) * u.kpc,\n vel=rnd.uniform(-100, 100, size=(3, N)) * u.km/u.s)\n\n R = w.cylindrical.rho.to_value(ro)\n z = w.z.to_value(ro)\n\n for p, galpy_p in paired_potentials:\n galpy_deltas = estimateDeltaStaeckel(galpy_p, R, z,\n no_median=True)\n gala_deltas = get_staeckel_fudge_delta(p, w).value\n print(p, np.allclose(gala_deltas, galpy_deltas))\n assert np.allclose(gala_deltas, galpy_deltas, atol=1e-6)\n"
] | [
[
"numpy.allclose",
"numpy.random.default_rng"
]
] |
choltz95/SMPyBandits | [
"04bc2b2bf10f8043afa5cac6589c191745735d9c"
] | [
"SMPyBandits/Experiment/Seznec19a_Fig2/main.py"
] | [
"\"\"\"\nauthor : Julien SEZNEC\nProduce the experiment and record the relevant data to reproduce Figure 2 of [Seznec et al., 2019a]\nReference: [Seznec et al., 2019a]\nRotting bandits are not harder than stochastic ones;\nJulien Seznec, Andrea Locatelli, Alexandra Carpentier, Alessandro Lazaric, Michal Valko ;\nProceedings of Machine Learning Research, PMLR 89:2564-2572, 2019.\nhttp://proceedings.mlr.press/v89/seznec19a.html\nhttps://arxiv.org/abs/1811.11043 (updated version)\n\"\"\"\n\nfrom SMPyBandits.Arms import RestedRottingGaussian\nfrom SMPyBandits.Policies import FEWA, EFF_FEWA, wSWA, GreedyOracle, SWUCB, DiscountedUCB as DUCB\nfrom SMPyBandits.Environment.MAB_rotting import repetedRuns\nimport numpy as np\nimport datetime\nimport os\nimport logging\nimport sys\n\ndate = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n### SET Policies\npolicies = [\n [FEWA, {'alpha': .06, 'delta': 1}],\n [EFF_FEWA, {'alpha' : 0.06, 'delta':1}],\n [wSWA, {'alpha' : 0.002}],\n [wSWA, {'alpha' : 0.02}],\n [wSWA, {'alpha' : 0.2}],\n [DUCB, {'gamma': 0.997}],\n [SWUCB, {'tau': 200}]\n]\npolicy_ind = 2 if len(sys.argv) == 1 else int(sys.argv[1])\npolicy = policies[policy_ind]\npolicy_name = str(policy[0](nbArms=2, **policy[1]))\npolicy_name_nospace = policy_name.replace (' ', '_')\n\n\nos.makedirs('./data/logging/', exist_ok = True)\nlogging.basicConfig(filename=os.path.join('./data/logging', date + '.log'), level=logging.INFO, format='%(asctime)s %(message)s')\nlogging.info(\"Policy : %s$\" % (policy_name))\n\nPARALLEL = False # Set positive int to indicate the number of core, -1 to use all the cores, and False to not parallelize\nREPETITIONS = 1 if len(sys.argv) < 3 else int(sys.argv[2]) # Set the number of repetitions\nHORIZON = 25000 # Horizon T\nsigma = 1 # Gaussian noise std\n\n### SET L/2 in figure 1\nlogging.info(\"CONFIG : CPU %s\" % os.cpu_count())\nlogging.info(\"CONFIG : REPETITIONS %s\" % REPETITIONS)\nlogging.info(\"CONFIG : HORIZON %s\" % HORIZON)\nlogging.info(\"CONFIG : SIGMA %s\" % sigma)\n\n### SET K arms\n\nmus = [0] + [0.001 * np.sqrt(10) ** (i) for i in range(9)]\n\ndef abruptDecayFunction(mui, muf, breakpoint):\n return lambda n : mui if n < breakpoint else muf\n\narms = [\n [RestedRottingGaussian, {'decayingFunction': abruptDecayFunction(mu, -mu, 1000), 'sigma': sigma, }] for mu in mus\n]\n\nrew, noisy_rew, time, pulls, cumul_pulls = repetedRuns(policy, arms, rep=REPETITIONS, T=HORIZON, parallel=PARALLEL)\noracle_rew, noisy_oracle_rew, oracle_time, oracle_pull, oracle_cumul_pulls = repetedRuns([GreedyOracle, {}], arms, rep=1, T=HORIZON, oracle=True)\nregret = oracle_rew - rew\ndiffpulls = np.abs(cumul_pulls - oracle_cumul_pulls)\nlogging.info(\"EVENT : SAVING ... \")\npath_regret = os.path.join('./data/', 'REGRET_' + policy_name_nospace + '_' + date )\npath_diffpull = os.path.join('./data/', 'DIFFPULL_' + policy_name_nospace + '_' + date )\nnp.save(path_regret, regret)\nnp.save(path_diffpull, diffpulls)\nlogging.info(\"EVENT : END ... \")\n\n\n\n\n"
] | [
[
"numpy.sqrt",
"numpy.save",
"numpy.abs"
]
] |
linghtiin/test | [
"4718dfbe33768fa6e623e27933199cbb21d440ae"
] | [
"Python/Courese/Linear Regression/Logistic Regression.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 18 02:43:27 2019\r\n\r\n@author: z\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy.stats as ss\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nh = 1\r\nsd = 1\r\nn = 50\r\nn_n = 1000\r\ndef gen_data(n, h, sd1, sd2):\r\n \"\"\" xxx \"\"\"\r\n x1 = ss.norm.rvs(-h, sd1, n)\r\n y1 = ss.norm.rvs(0, sd1, n)\r\n \r\n x2 = ss.norm.rvs(h, sd2, n)\r\n y2 = ss.norm.rvs(0, sd2, n)\r\n return (x1, y1, x2, y2)\r\n\r\ndef plot_data(x1, y1, x2, y2):\r\n plt.figure()\r\n plt.plot(x1, y1, \"o\", ms=2)\r\n plt.plot(x2, y2, \"o\", ms=2)\r\n plt.xlabel(\"$X_1$\")\r\n plt.ylabel(\"$X_2$\")\r\n \r\ndef plot_probs(ax, clf, class_no):\r\n xx1, xx2 = np.meshgrid(np.arange(-5, 5, 0.1), np.arange(-5, 5, 0.1))\r\n probs = clf.predict_proba(np.stack((xx1.ravel(), xx2.ravel()), axis=1))\r\n Z = probs[:,class_no]\r\n Z = Z.reshape(xx1.shape)\r\n CS = ax.contourf(xx1, xx2, Z)\r\n cbar = plt.colorbar(CS)\r\n plt.xlabel(\"$X_1$\")\r\n plt.ylabel(\"$X_2$\")\r\n\r\n\r\n\r\n(x1, y1, x2, y2) = gen_data(n_n, 1.5, 1, 1.5)\r\nX = np.vstack((np.vstack((x1,y1)).T, np.vstack((x2,y2)).T))\r\ny = np.hstack((np.repeat(1, n_n), np.repeat(2, n_n)))\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, random_state=1)\r\n\r\nclf = LogisticRegression()\r\nclf.fit(X_train, y_train)\r\nprint(\"测试数据:\",clf.score(X_test, y_test))\r\nprint(\"单点分类概率:\",clf.predict_proba(np.array([-2, 0]).reshape(1, -1)));\r\nprint(\"单点分类:\",clf.predict(np.array([-2, 0]).reshape(1, -1)));\r\n\r\nplt.figure(figsize=(5,8))\r\nax = plt.subplot(211)\r\nplot_probs(ax, clf, 0)\r\nplt.title(\"Pred. prob for class 1\")\r\nax = plt.subplot(212)\r\nplot_probs(ax, clf, 1)\r\nplt.title(\"Pred. prob for class 2\");\r\nplot_data(x1, y1, x2, y2)\r\n\r\n\r\n"
] | [
[
"numpy.vstack",
"scipy.stats.norm.rvs",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlabel",
"numpy.repeat",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.colorbar",
"sklearn.linear_model.LogisticRegression",
"numpy.array",
"matplotlib.pyplot.plot",
"sklearn.model_selection.train_test_split"
]
] |
mxz96102/ray | [
"02768ad707f615a47a4a5f28ae333c25012baaa1"
] | [
"python/ray/tests/test_advanced_3.py"
] | [
"# coding: utf-8\nimport glob\nimport logging\nimport os\nimport json\nimport sys\nimport socket\nimport time\n\nimport numpy as np\nimport pickle\nimport pytest\n\nimport ray\nimport ray.ray_constants as ray_constants\nimport ray.cluster_utils\nimport ray.test_utils\nfrom ray import resource_spec\nimport setproctitle\n\nfrom ray.test_utils import (check_call_ray, RayTestTimeoutException,\n wait_for_num_actors)\n\nlogger = logging.getLogger(__name__)\n\n\ndef attempt_to_load_balance(remote_function,\n args,\n total_tasks,\n num_nodes,\n minimum_count,\n num_attempts=100):\n attempts = 0\n while attempts < num_attempts:\n locations = ray.get(\n [remote_function.remote(*args) for _ in range(total_tasks)])\n names = set(locations)\n counts = [locations.count(name) for name in names]\n logger.info(\"Counts are {}.\".format(counts))\n if (len(names) == num_nodes\n and all(count >= minimum_count for count in counts)):\n break\n attempts += 1\n assert attempts < num_attempts\n\n\ndef test_load_balancing(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets\n # in a roughly equal manner.\n cluster = ray_start_cluster\n num_nodes = 3\n num_cpus = 7\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=num_cpus)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f():\n time.sleep(0.01)\n return ray.worker.global_worker.node.unique_id\n\n attempt_to_load_balance(f, [], 100, num_nodes, 10)\n attempt_to_load_balance(f, [], 1000, num_nodes, 100)\n\n\ndef test_load_balancing_with_dependencies(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets in a\n # roughly equal manner even when the tasks have dependencies.\n cluster = ray_start_cluster\n num_nodes = 3\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=1)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f(x):\n time.sleep(0.010)\n return ray.worker.global_worker.node.unique_id\n\n # This object will be local to one of the raylets. Make sure\n # this doesn't prevent tasks from being scheduled on other raylets.\n x = ray.put(np.zeros(1000000))\n\n attempt_to_load_balance(f, [x], 100, num_nodes, 25)\n\n\ndef wait_for_num_objects(num_objects, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.objects()) >= num_objects:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\"Timed out while waiting for global state.\")\n\n\ndef test_global_state_api(shutdown_only):\n\n error_message = (\"The ray global state API cannot be used \"\n \"before ray.init has been called.\")\n\n with pytest.raises(Exception, match=error_message):\n ray.objects()\n\n with pytest.raises(Exception, match=error_message):\n ray.actors()\n\n with pytest.raises(Exception, match=error_message):\n ray.nodes()\n\n with pytest.raises(Exception, match=error_message):\n ray.jobs()\n\n ray.init(num_cpus=5, num_gpus=3, resources={\"CustomResource\": 1})\n\n assert ray.cluster_resources()[\"CPU\"] == 5\n assert ray.cluster_resources()[\"GPU\"] == 3\n assert ray.cluster_resources()[\"CustomResource\"] == 1\n\n # A driver/worker creates a temporary object during startup. Although the\n # temporary object is freed immediately, in a rare case, we can still find\n # the object ref in GCS because Raylet removes the object ref from GCS\n # asynchronously.\n # Because we can't control when workers create the temporary objects, so\n # We can't assert that `ray.objects()` returns an empty dict. Here we just\n # make sure `ray.objects()` succeeds.\n assert len(ray.objects()) >= 0\n\n job_id = ray.utils.compute_job_id_from_driver(\n ray.WorkerID(ray.worker.global_worker.worker_id))\n\n client_table = ray.nodes()\n node_ip_address = ray.worker.global_worker.node_ip_address\n\n assert len(client_table) == 1\n assert client_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n @ray.remote\n class Actor:\n def __init__(self):\n pass\n\n _ = Actor.remote() # noqa: F841\n # Wait for actor to be created\n wait_for_num_actors(1)\n\n actor_table = ray.actors()\n assert len(actor_table) == 1\n\n actor_info, = actor_table.values()\n assert actor_info[\"JobID\"] == job_id.hex()\n assert \"IPAddress\" in actor_info[\"Address\"]\n assert \"IPAddress\" in actor_info[\"OwnerAddress\"]\n assert actor_info[\"Address\"][\"Port\"] != actor_info[\"OwnerAddress\"][\"Port\"]\n\n job_table = ray.jobs()\n\n assert len(job_table) == 1\n assert job_table[0][\"JobID\"] == job_id.hex()\n assert job_table[0][\"DriverIPAddress\"] == node_ip_address\n\n\n# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we\n# should use those, but they seem to conflict with Ray's use of faulthandler.\nclass CaptureOutputAndError:\n \"\"\"Capture stdout and stderr of some span.\n\n This can be used as follows.\n\n captured = {}\n with CaptureOutputAndError(captured):\n # Do stuff.\n # Access captured[\"out\"] and captured[\"err\"].\n \"\"\"\n\n def __init__(self, captured_output_and_error):\n import io\n self.output_buffer = io.StringIO()\n self.error_buffer = io.StringIO()\n self.captured_output_and_error = captured_output_and_error\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n self.old_stdout = sys.stdout\n self.old_stderr = sys.stderr\n sys.stdout = self.output_buffer\n sys.stderr = self.error_buffer\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout = self.old_stdout\n sys.stderr = self.old_stderr\n self.captured_output_and_error[\"out\"] = self.output_buffer.getvalue()\n self.captured_output_and_error[\"err\"] = self.error_buffer.getvalue()\n\n\ndef test_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=True)\n\n @ray.remote\n def f():\n # It's important to make sure that these print statements occur even\n # without calling sys.stdout.flush() and sys.stderr.flush().\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n err_lines = captured[\"err\"]\n for i in range(200):\n assert str(i) in err_lines\n\n\ndef test_not_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=False)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n\n err_lines = captured[\"err\"]\n assert len(err_lines) == 0\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_workers(shutdown_only):\n num_workers = 3\n ray.init(num_cpus=num_workers)\n\n @ray.remote\n def f():\n return id(ray.worker.global_worker), os.getpid()\n\n # Wait until all of the workers have started.\n worker_ids = set()\n while len(worker_ids) != num_workers:\n worker_ids = set(ray.get([f.remote() for _ in range(10)]))\n\n\ndef test_specific_job_id():\n dummy_driver_id = ray.JobID.from_int(1)\n ray.init(num_cpus=1, job_id=dummy_driver_id)\n\n # in driver\n assert dummy_driver_id == ray.worker.global_worker.current_job_id\n\n # in worker\n @ray.remote\n def f():\n return ray.worker.global_worker.current_job_id\n\n assert dummy_driver_id == ray.get(f.remote())\n\n ray.shutdown()\n\n\ndef test_object_ref_properties():\n id_bytes = b\"00112233445566778899\"\n object_ref = ray.ObjectRef(id_bytes)\n assert object_ref.binary() == id_bytes\n object_ref = ray.ObjectRef.nil()\n assert object_ref.is_nil()\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectRef(id_bytes + b\"1234\")\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectRef(b\"0123456789\")\n object_ref = ray.ObjectRef.from_random()\n assert not object_ref.is_nil()\n assert object_ref.binary() != id_bytes\n id_dumps = pickle.dumps(object_ref)\n id_from_dumps = pickle.loads(id_dumps)\n assert id_from_dumps == object_ref\n\n\[email protected]\ndef shutdown_only_with_initialization_check():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n assert not ray.is_initialized()\n\n\ndef test_initialized(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0)\n assert ray.is_initialized()\n\n\ndef test_initialized_local_mode(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0, local_mode=True)\n assert ray.is_initialized()\n\n\ndef test_wait_reconstruction(shutdown_only):\n ray.init(\n num_cpus=1,\n object_store_memory=int(10**8),\n _internal_config=json.dumps({\n \"object_pinning_enabled\": 0\n }))\n\n @ray.remote\n def f():\n return np.zeros(6 * 10**7, dtype=np.uint8)\n\n x_id = f.remote()\n ray.wait([x_id])\n ray.wait([f.remote()])\n assert not ray.worker.global_worker.core_worker.object_exists(x_id)\n ready_ids, _ = ray.wait([x_id])\n assert len(ready_ids) == 1\n\n\ndef test_ray_setproctitle(ray_start_2_cpus):\n @ray.remote\n class UniqueName:\n def __init__(self):\n assert setproctitle.getproctitle() == \"ray::UniqueName.__init__()\"\n\n def f(self):\n assert setproctitle.getproctitle() == \"ray::UniqueName.f()\"\n\n @ray.remote\n def unique_1():\n assert \"unique_1\" in setproctitle.getproctitle()\n\n actor = UniqueName.remote()\n ray.get(actor.f.remote())\n ray.get(unique_1.remote())\n\n\[email protected](\n os.getenv(\"TRAVIS\") is None,\n reason=\"This test should only be run on Travis.\")\ndef test_ray_stack(ray_start_2_cpus):\n def unique_name_1():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_2():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_3():\n unique_name_1()\n\n unique_name_2.remote()\n unique_name_3.remote()\n\n success = False\n start_time = time.time()\n while time.time() - start_time < 30:\n # Attempt to parse the \"ray stack\" call.\n output = ray.utils.decode(\n check_call_ray([\"stack\"], capture_stdout=True))\n if (\"unique_name_1\" in output and \"unique_name_2\" in output\n and \"unique_name_3\" in output):\n success = True\n break\n\n if not success:\n raise Exception(\"Failed to find necessary information with \"\n \"'ray stack'\")\n\n\ndef test_socket_dir_not_existing(shutdown_only):\n if sys.platform != \"win32\":\n random_name = ray.ObjectRef.from_random().hex()\n temp_raylet_socket_dir = os.path.join(ray.utils.get_ray_temp_dir(),\n \"tests\", random_name)\n temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,\n \"raylet_socket\")\n ray.init(num_cpus=2, raylet_socket_name=temp_raylet_socket_name)\n\n @ray.remote\n def foo(x):\n time.sleep(1)\n return 2 * x\n\n ray.get([foo.remote(i) for i in range(2)])\n\n\ndef test_raylet_is_robust_to_random_messages(ray_start_regular):\n node_manager_address = None\n node_manager_port = None\n for client in ray.nodes():\n if \"NodeManagerAddress\" in client:\n node_manager_address = client[\"NodeManagerAddress\"]\n node_manager_port = client[\"NodeManagerPort\"]\n assert node_manager_address\n assert node_manager_port\n # Try to bring down the node manager:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node_manager_address, node_manager_port))\n s.send(1000 * b\"asdf\")\n\n @ray.remote\n def f():\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_non_ascii_comment(ray_start_regular):\n @ray.remote\n def f():\n # 日本語 Japanese comment\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_shutdown_disconnect_global_state():\n ray.init(num_cpus=0)\n ray.shutdown()\n\n with pytest.raises(Exception) as e:\n ray.objects()\n assert str(e.value).endswith(\"ray.init has been called.\")\n\n\[email protected](\n \"ray_start_object_store_memory\", [150 * 1024 * 1024], indirect=True)\ndef test_put_pins_object(ray_start_object_store_memory):\n obj = np.ones(200 * 1024, dtype=np.uint8)\n x_id = ray.put(obj)\n x_binary = x_id.binary()\n assert (ray.get(ray.ObjectRef(x_binary)) == obj).all()\n\n # x cannot be evicted since x_id pins it\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n assert (ray.get(x_id) == obj).all()\n assert (ray.get(ray.ObjectRef(x_binary)) == obj).all()\n\n # now it can be evicted since x_id pins it but x_binary does not\n del x_id\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n assert not ray.worker.global_worker.core_worker.object_exists(\n ray.ObjectRef(x_binary))\n\n # weakref put\n y_id = ray.put(obj, weakref=True)\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n with pytest.raises(ray.exceptions.UnreconstructableError):\n ray.get(y_id)\n\n\ndef test_decorated_function(ray_start_regular):\n def function_invocation_decorator(f):\n def new_f(args, kwargs):\n # Reverse the arguments.\n return f(args[::-1], {\"d\": 5}), kwargs\n\n return new_f\n\n def f(a, b, c, d=None):\n return a, b, c, d\n\n f.__ray_invocation_decorator__ = function_invocation_decorator\n f = ray.remote(f)\n\n result_id, kwargs = f.remote(1, 2, 3, d=4)\n assert kwargs == {\"d\": 4}\n assert ray.get(result_id) == (3, 2, 1, 5)\n\n\ndef test_get_postprocess(ray_start_regular):\n def get_postprocessor(object_refs, values):\n return [value for value in values if value > 0]\n\n ray.worker.global_worker._post_get_hooks.append(get_postprocessor)\n\n assert ray.get(\n [ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]\n\n\ndef test_export_after_shutdown(ray_start_regular):\n # This test checks that we can use actor and remote function definitions\n # across multiple Ray sessions.\n\n @ray.remote\n def f():\n pass\n\n @ray.remote\n class Actor:\n def method(self):\n pass\n\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray and use the remote function and actor again.\n ray.init(num_cpus=1)\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray again and make sure that these definitions can be exported from\n # workers.\n ray.init(num_cpus=2)\n\n @ray.remote\n def export_definitions_from_worker(remote_function, actor_class):\n ray.get(remote_function.remote())\n actor_handle = actor_class.remote()\n ray.get(actor_handle.method.remote())\n\n ray.get(export_definitions_from_worker.remote(f, Actor))\n\n\ndef test_invalid_unicode_in_worker_log(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n # Wait till first worker log file is created.\n while True:\n log_file_paths = glob.glob(\"{}/worker*.out\".format(logs_dir))\n if len(log_file_paths) == 0:\n time.sleep(0.2)\n else:\n break\n\n with open(log_file_paths[0], \"wb\") as f:\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.flush()\n\n # Wait till the log monitor reads the file.\n time.sleep(1.0)\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n\n\[email protected](reason=\"This test is too expensive to run.\")\ndef test_move_log_files_to_old(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n @ray.remote\n class Actor:\n def f(self):\n print(\"function f finished\")\n\n # First create a temporary actor.\n actors = [\n Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)\n ]\n ray.get([a.f.remote() for a in actors])\n\n # Make sure no log files are in the \"old\" directory before the actors\n # are killed.\n assert len(glob.glob(\"{}/old/worker*.out\".format(logs_dir))) == 0\n\n # Now kill the actors so the files get moved to logs/old/.\n [a.__ray_terminate__.remote() for a in actors]\n\n while True:\n log_file_paths = glob.glob(\"{}/old/worker*.out\".format(logs_dir))\n if len(log_file_paths) > 0:\n with open(log_file_paths[0], \"r\") as f:\n assert \"function f finished\\n\" in f.readlines()\n break\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n\n\ndef test_lease_request_leak(shutdown_only):\n ray.init(\n num_cpus=1,\n _internal_config=json.dumps({\n \"initial_reconstruction_timeout_milliseconds\": 200\n }))\n assert len(ray.objects()) == 0\n\n @ray.remote\n def f(x):\n time.sleep(0.1)\n return\n\n # Submit pairs of tasks. Tasks in a pair can reuse the same worker leased\n # from the raylet.\n tasks = []\n for _ in range(10):\n obj_ref = ray.put(1)\n for _ in range(2):\n tasks.append(f.remote(obj_ref))\n del obj_ref\n ray.get(tasks)\n\n time.sleep(\n 1) # Sleep for an amount longer than the reconstruction timeout.\n assert len(ray.objects()) == 0, ray.objects()\n\n\[email protected](\n \"ray_start_cluster\", [{\n \"num_cpus\": 0,\n \"num_nodes\": 1,\n \"do_init\": False,\n }],\n indirect=True)\ndef test_ray_address_environment_variable(ray_start_cluster):\n address = ray_start_cluster.address\n # In this test we use zero CPUs to distinguish between starting a local\n # ray cluster and connecting to an existing one.\n\n # Make sure we connect to an existing cluster if\n # RAY_ADDRESS is set.\n os.environ[\"RAY_ADDRESS\"] = address\n ray.init()\n assert \"CPU\" not in ray.state.cluster_resources()\n del os.environ[\"RAY_ADDRESS\"]\n ray.shutdown()\n\n # Make sure we start a new cluster if RAY_ADDRESS is not set.\n ray.init()\n assert \"CPU\" in ray.state.cluster_resources()\n ray.shutdown()\n\n\ndef test_gpu_info_parsing():\n info_string = \"\"\"Model: Tesla V100-SXM2-16GB\nIRQ: 107\nGPU UUID: GPU-8eaaebb8-bb64-8489-fda2-62256e821983\nVideo BIOS: 88.00.4f.00.09\nBus Type: PCIe\nDMA Size: 47 bits\nDMA Mask: 0x7fffffffffff\nBus Location: 0000:00:1e.0\nDevice Minor: 0\nBlacklisted: No\n \"\"\"\n constraints_dict = resource_spec._constraints_from_gpu_info(info_string)\n expected_dict = {\n \"{}V100\".format(ray_constants.RESOURCE_CONSTRAINT_PREFIX): 1\n }\n assert constraints_dict == expected_dict\n\n info_string = \"\"\"Model: Tesla T4\nIRQ: 10\nGPU UUID: GPU-415fe7a8-f784-6e3d-a958-92ecffacafe2\nVideo BIOS: 90.04.84.00.06\nBus Type: PCIe\nDMA Size: 47 bits\nDMA Mask: 0x7fffffffffff\nBus Location: 0000:00:1b.0\nDevice Minor: 0\nBlacklisted: No\n \"\"\"\n constraints_dict = resource_spec._constraints_from_gpu_info(info_string)\n expected_dict = {\n \"{}T4\".format(ray_constants.RESOURCE_CONSTRAINT_PREFIX): 1\n }\n assert constraints_dict == expected_dict\n\n assert resource_spec._constraints_from_gpu_info(None) == {}\n\n\nif __name__ == \"__main__\":\n import pytest\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] | [
[
"numpy.ones",
"numpy.zeros"
]
] |
majdabd/nilearn | [
"55e3f26dbd9fc6e89516e5f37e8aae23ec6086a9"
] | [
"nilearn/tests/test_signal.py"
] | [
"\"\"\"\nTest the signals module\n\"\"\"\n# Author: Gael Varoquaux, Alexandre Abraham\n# License: simplified BSD\n\nimport os.path\nimport warnings\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pytest\n\n# Use nisignal here to avoid name collisions (using nilearn.signal is\n# not possible)\nfrom nilearn import signal as nisignal\nfrom nilearn.signal import clean\nfrom pandas import read_csv\nimport scipy.signal\n\n\ndef generate_signals(n_features=17, n_confounds=5, length=41,\n same_variance=True, order=\"C\"):\n \"\"\"Generate test signals.\n\n All returned signals have no trends at all (to machine precision).\n\n Parameters\n ----------\n n_features, n_confounds : int, optional\n respectively number of features to generate, and number of confounds\n to use for generating noise signals.\n\n length : int, optional\n number of samples for every signal.\n\n same_variance : bool, optional\n if True, every column of \"signals\" have a unit variance. Otherwise,\n a random amplitude is applied.\n\n order : \"C\" or \"F\"\n gives the contiguousness of the output arrays.\n\n Returns\n -------\n signals : numpy.ndarray, shape (length, n_features)\n unperturbed signals.\n\n noises : numpy.ndarray, shape (length, n_features)\n confound-based noises. Each column is a signal obtained by linear\n combination of all confounds signals (below). The coefficients in\n the linear combination are also random.\n\n confounds : numpy.ndarray, shape (length, n_confounds)\n random signals used as confounds.\n \"\"\"\n rng = np.random.RandomState(42)\n\n # Generate random confounds\n confounds_shape = (length, n_confounds)\n confounds = np.ndarray(confounds_shape, order=order)\n confounds[...] = rng.standard_normal(size=confounds_shape)\n confounds[...] = scipy.signal.detrend(confounds, axis=0)\n\n # Compute noise based on confounds, with random factors\n factors = rng.standard_normal(size=(n_confounds, n_features))\n noises_shape = (length, n_features)\n noises = np.ndarray(noises_shape, order=order)\n noises[...] = np.dot(confounds, factors)\n noises[...] = scipy.signal.detrend(noises, axis=0)\n\n # Generate random signals with random amplitudes\n signals_shape = noises_shape\n signals = np.ndarray(signals_shape, order=order)\n if same_variance:\n signals[...] = rng.standard_normal(size=signals_shape)\n else:\n signals[...] = (\n 4.0 * abs(rng.standard_normal(size=signals_shape[1])) + 0.5\n ) * rng.standard_normal(size=signals_shape)\n\n signals[...] = scipy.signal.detrend(signals, axis=0)\n return signals, noises, confounds\n\n\ndef generate_trends(n_features=17, length=41):\n \"\"\"Generate linearly-varying signals, with zero mean.\n\n Parameters\n ----------\n n_features, length : int\n respectively number of signals and number of samples to generate.\n\n Returns\n -------\n trends : numpy.ndarray, shape (length, n_features)\n output signals, one per column.\n \"\"\"\n rng = np.random.RandomState(42)\n trends = scipy.signal.detrend(np.linspace(0, 1.0, length), type=\"constant\")\n trends = np.repeat(np.atleast_2d(trends).T, n_features, axis=1)\n factors = rng.standard_normal(size=n_features)\n return trends * factors\n\n\ndef generate_signals_plus_trends(n_features=17, n_samples=41):\n\n signals, _, _ = generate_signals(n_features=n_features,\n length=n_samples)\n trends = generate_trends(n_features=n_features,\n length=n_samples)\n return signals + trends\n\n\ndef test_butterworth():\n rng = np.random.RandomState(42)\n n_features = 20000\n n_samples = 100\n\n sampling = 100\n low_pass = 30\n high_pass = 10\n\n # Compare output for different options.\n # single timeseries\n data = rng.standard_normal(size=n_samples)\n data_original = data.copy()\n '''\n May be only on py3.5:\n Bug in scipy 1.1.0 generates an unavoidable FutureWarning.\n (More info: https://github.com/scipy/scipy/issues/9086)\n The number of warnings generated is overwhelming TravisCI's log limit,\n causing it to fail tests.\n This hack prevents that and will be removed in future.\n '''\n buggy_scipy = (LooseVersion(scipy.__version__) < LooseVersion('1.2')\n and LooseVersion(scipy.__version__) > LooseVersion('1.0')\n )\n if buggy_scipy:\n warnings.simplefilter('ignore')\n ''' END HACK '''\n out_single = nisignal.butterworth(data, sampling,\n low_pass=low_pass, high_pass=high_pass,\n copy=True)\n np.testing.assert_almost_equal(data, data_original)\n nisignal.butterworth(data, sampling,\n low_pass=low_pass, high_pass=high_pass,\n copy=False)\n np.testing.assert_almost_equal(out_single, data)\n np.testing.assert_(id(out_single) != id(data))\n\n # multiple timeseries\n data = rng.standard_normal(size=(n_samples, n_features))\n data[:, 0] = data_original # set first timeseries to previous data\n data_original = data.copy()\n\n out1 = nisignal.butterworth(data, sampling,\n low_pass=low_pass, high_pass=high_pass,\n copy=True)\n np.testing.assert_almost_equal(data, data_original)\n np.testing.assert_(id(out1) != id(data_original))\n\n # check that multiple- and single-timeseries filtering do the same thing.\n np.testing.assert_almost_equal(out1[:, 0], out_single)\n nisignal.butterworth(data, sampling,\n low_pass=low_pass, high_pass=high_pass,\n copy=False)\n np.testing.assert_almost_equal(out1, data)\n\n # Test nyquist frequency clipping, issue #482\n out1 = nisignal.butterworth(data, sampling,\n low_pass=50.,\n copy=True)\n out2 = nisignal.butterworth(data, sampling,\n low_pass=80., # Greater than nyq frequency\n copy=True)\n np.testing.assert_almost_equal(out1, out2)\n np.testing.assert_(id(out1) != id(out2))\n\n\ndef test_standardize():\n rng = np.random.RandomState(42)\n n_features = 10\n n_samples = 17\n\n # Create random signals with offsets\n a = rng.random_sample((n_samples, n_features))\n a += np.linspace(0, 2., n_features)\n\n # transpose array to fit _standardize input.\n # Without trend removal\n b = nisignal._standardize(a, standardize='zscore')\n stds = np.std(b)\n np.testing.assert_almost_equal(stds, np.ones(n_features))\n np.testing.assert_almost_equal(b.sum(axis=0), np.zeros(n_features))\n\n # With trend removal\n a = np.atleast_2d(np.linspace(0, 2., n_features)).T\n b = nisignal._standardize(a, detrend=True, standardize=False)\n np.testing.assert_almost_equal(b, np.zeros(b.shape))\n\n length_1_signal = np.atleast_2d(np.linspace(0, 2., n_features))\n np.testing.assert_array_equal(length_1_signal,\n nisignal._standardize(length_1_signal,\n standardize='zscore'))\n\n\ndef test_detrend():\n \"\"\"Test custom detrend implementation.\"\"\"\n point_number = 703\n features = 17\n signals, _, _ = generate_signals(n_features=features,\n length=point_number,\n same_variance=True)\n trends = generate_trends(n_features=features, length=point_number)\n x = signals + trends + 1\n original = x.copy()\n\n # Mean removal only (out-of-place)\n detrended = nisignal._detrend(x, inplace=False, type=\"constant\")\n assert (abs(detrended.mean(axis=0)).max()\n < 15. * np.finfo(np.float).eps)\n\n # out-of-place detrending. Use scipy as a reference implementation\n detrended = nisignal._detrend(x, inplace=False)\n detrended_scipy = scipy.signal.detrend(x, axis=0)\n\n # \"x\" must be left untouched\n np.testing.assert_almost_equal(original, x, decimal=14)\n assert abs(detrended.mean(axis=0)).max() < 15. * np.finfo(np.float).eps\n np.testing.assert_almost_equal(detrended_scipy, detrended, decimal=14)\n # for this to work, there must be no trends at all in \"signals\"\n np.testing.assert_almost_equal(detrended, signals, decimal=14)\n\n # inplace detrending\n nisignal._detrend(x, inplace=True)\n assert abs(x.mean(axis=0)).max() < 15. * np.finfo(np.float).eps\n # for this to work, there must be no trends at all in \"signals\"\n np.testing.assert_almost_equal(detrended_scipy, detrended, decimal=14)\n np.testing.assert_almost_equal(x, signals, decimal=14)\n\n length_1_signal = x[0]\n length_1_signal = length_1_signal[np.newaxis, :]\n np.testing.assert_array_equal(length_1_signal,\n nisignal._detrend(length_1_signal))\n\n # Mean removal on integers\n detrended = nisignal._detrend(x.astype(np.int64), inplace=True,\n type=\"constant\")\n assert (abs(detrended.mean(axis=0)).max() <\n 20. * np.finfo(np.float).eps)\n\n\ndef test_mean_of_squares():\n \"\"\"Test _mean_of_squares.\"\"\"\n n_samples = 11\n n_features = 501 # Higher than 500 required\n signals, _, _ = generate_signals(n_features=n_features,\n length=n_samples,\n same_variance=True)\n # Reference computation\n var1 = np.copy(signals)\n var1 **= 2\n var1 = var1.mean(axis=0)\n\n var2 = nisignal._mean_of_squares(signals)\n\n np.testing.assert_almost_equal(var1, var2)\n\n\ndef test_row_sum_of_squares():\n \"\"\"Test _row_sum_of_squares.\"\"\"\n n_samples = 11\n n_features = 501 # Higher than 500 required\n signals, _, _ = generate_signals(n_features=n_features,\n length=n_samples,\n same_variance=True)\n # Reference computation\n var1 = signals ** 2\n var1 = var1.sum(axis=0)\n\n var2 = nisignal._row_sum_of_squares(signals)\n\n np.testing.assert_almost_equal(var1, var2)\n\n\n# This test is inspired from Scipy docstring of detrend function\ndef test_clean_detrending():\n n_samples = 21\n n_features = 501 # Must be higher than 500\n signals, _, _ = generate_signals(n_features=n_features,\n length=n_samples)\n trends = generate_trends(n_features=n_features,\n length=n_samples)\n x = signals + trends\n x_orig = x.copy()\n\n # if NANs, data out should be False with ensure_finite=True\n y = signals + trends\n y[20, 150] = np.nan\n y[5, 500] = np.nan\n y[15, 14] = np.inf\n y_orig = y.copy()\n\n y_clean = nisignal.clean(y, ensure_finite=True)\n assert np.any(np.isfinite(y_clean)), True\n # clean should not modify inputs\n # using assert_almost_equal instead of array_equal due to NaNs\n np.testing.assert_almost_equal(y_orig, y, decimal=13)\n\n # test boolean is not given to signal.clean\n pytest.raises(TypeError, nisignal.clean, x, low_pass=False)\n pytest.raises(TypeError, nisignal.clean, x, high_pass=False)\n\n # This should remove trends\n x_detrended = nisignal.clean(x, standardize=False, detrend=True,\n low_pass=None, high_pass=None)\n np.testing.assert_almost_equal(x_detrended, signals, decimal=13)\n # clean should not modify inputs\n assert np.array_equal(x_orig, x)\n\n # This should do nothing\n x_undetrended = nisignal.clean(x, standardize=False, detrend=False,\n low_pass=None, high_pass=None)\n assert not abs(x_undetrended - signals).max() < 0.06\n # clean should not modify inputs\n assert np.array_equal(x_orig, x)\n\n\ndef test_clean_t_r():\n \"\"\"Different TRs must produce different results after filtering\"\"\"\n rng = np.random.RandomState(42)\n n_samples = 34\n # n_features Must be higher than 500\n n_features = 501\n x_orig = generate_signals_plus_trends(n_features=n_features,\n n_samples=n_samples)\n random_tr_list1 = np.round(rng.uniform(size=3) * 10, decimals=2)\n random_tr_list2 = np.round(rng.uniform(size=3) * 10, decimals=2)\n for tr1, tr2 in zip(random_tr_list1, random_tr_list2):\n low_pass_freq_list = tr1 * np.array([1.0 / 100, 1.0 / 110])\n high_pass_freq_list = tr1 * np.array([1.0 / 210, 1.0 / 190])\n for low_cutoff, high_cutoff in zip(low_pass_freq_list,\n high_pass_freq_list):\n det_one_tr = nisignal.clean(x_orig, t_r=tr1, low_pass=low_cutoff,\n high_pass=high_cutoff)\n det_diff_tr = nisignal.clean(x_orig, t_r=tr2, low_pass=low_cutoff,\n high_pass=high_cutoff)\n\n if not np.isclose(tr1, tr2, atol=0.3):\n msg = ('results do not differ for different TRs: {} and {} '\n 'at cutoffs: low_pass={}, high_pass={} '\n 'n_samples={}, n_features={}'.format(\n tr1, tr2, low_cutoff, high_cutoff,\n n_samples, n_features))\n np.testing.assert_(np.any(np.not_equal(det_one_tr,\n det_diff_tr)),\n msg)\n del det_one_tr, det_diff_tr\n\n\ndef test_clean_frequencies():\n sx1 = np.sin(np.linspace(0, 100, 2000))\n sx2 = np.sin(np.linspace(0, 100, 2000))\n sx = np.vstack((sx1, sx2)).T\n sx_orig = sx.copy()\n assert clean(sx, standardize=False, high_pass=0.002, low_pass=None,\n t_r=2.5).max() > 0.1\n assert clean(sx, standardize=False, high_pass=0.2, low_pass=None,\n t_r=2.5) .max() < 0.01\n assert clean(sx, standardize=False, low_pass=0.01, t_r=2.5).max() > 0.9\n pytest.raises(ValueError, clean, sx, low_pass=0.4, high_pass=0.5, t_r=2.5)\n\n # clean should not modify inputs\n sx_cleaned = clean(sx, standardize=False, detrend=False, low_pass=0.2, t_r=2.5)\n assert np.array_equal(sx_orig, sx)\n\n\ndef test_clean_sessions():\n n_samples = 21\n n_features = 501 # Must be higher than 500\n signals, _, _ = generate_signals(n_features=n_features,\n length=n_samples)\n trends = generate_trends(n_features=n_features,\n length=n_samples)\n x = signals + trends\n x_orig = x.copy()\n # Create session info\n sessions = np.ones(n_samples)\n sessions[0:n_samples // 2] = 0\n x_detrended = nisignal.clean(x, standardize=False, detrend=True,\n low_pass=None, high_pass=None,\n sessions=sessions)\n # clean should not modify inputs\n assert np.array_equal(x_orig, x)\n\n\ndef test_clean_confounds():\n signals, noises, confounds = generate_signals(n_features=41,\n n_confounds=5, length=45)\n # No signal: output must be zero.\n eps = np.finfo(np.float).eps\n noises1 = noises.copy()\n cleaned_signals = nisignal.clean(noises, confounds=confounds,\n detrend=True, standardize=False)\n assert abs(cleaned_signals).max() < 100. * eps\n # clean should not modify inputs\n assert np.array_equal(noises, noises1)\n\n # With signal: output must be orthogonal to confounds\n cleaned_signals = nisignal.clean(signals + noises, confounds=confounds,\n detrend=False, standardize=True)\n assert abs(np.dot(confounds.T, cleaned_signals)).max() < 1000. * eps\n\n # Same output when a constant confound is added\n confounds1 = np.hstack((np.ones((45, 1)), confounds))\n cleaned_signals1 = nisignal.clean(signals + noises, confounds=confounds1,\n detrend=False, standardize=True)\n np.testing.assert_almost_equal(cleaned_signals1, cleaned_signals)\n\n # Test detrending. No trend should exist in the output.\n # Use confounds with a trend.\n temp = confounds.T\n temp += np.arange(confounds.shape[0])\n\n cleaned_signals = nisignal.clean(signals + noises, confounds=confounds,\n detrend=False, standardize=False)\n coeffs = np.polyfit(np.arange(cleaned_signals.shape[0]),\n cleaned_signals, 1)\n assert (abs(coeffs) > 1e-3).any() # trends remain\n\n cleaned_signals = nisignal.clean(signals + noises, confounds=confounds,\n detrend=True, standardize=False)\n coeffs = np.polyfit(np.arange(cleaned_signals.shape[0]),\n cleaned_signals, 1)\n assert (abs(coeffs) < 200. * eps).all() # trend removed\n\n # Test no-op\n input_signals = 10 * signals\n cleaned_signals = nisignal.clean(input_signals, detrend=False,\n standardize=False)\n np.testing.assert_almost_equal(cleaned_signals, input_signals)\n\n cleaned_signals = nisignal.clean(input_signals, detrend=False,\n standardize=True)\n np.testing.assert_almost_equal(cleaned_signals.var(axis=0),\n np.ones(cleaned_signals.shape[1]))\n\n # Test with confounds read from a file. Smoke test only (result has\n # no meaning).\n current_dir = os.path.split(__file__)[0]\n\n signals, _, confounds = generate_signals(n_features=41,\n n_confounds=3, length=20)\n filename1 = os.path.join(current_dir, \"data\", \"spm_confounds.txt\")\n filename2 = os.path.join(current_dir, \"data\",\n \"confounds_with_header.csv\")\n\n nisignal.clean(signals, detrend=False, standardize=False,\n confounds=filename1)\n nisignal.clean(signals, detrend=False, standardize=False,\n confounds=filename2)\n nisignal.clean(signals, detrend=False, standardize=False,\n confounds=confounds[:, 1])\n\n # test with confounds as a pandas DataFrame\n confounds_df = read_csv(filename2, sep='\\t')\n nisignal.clean(signals, detrend=False, standardize=False,\n confounds=confounds_df.values)\n nisignal.clean(signals, detrend=False, standardize=False,\n confounds=confounds_df)\n\n # Use a list containing two filenames, a 2D array and a 1D array\n nisignal.clean(signals, detrend=False, standardize=False,\n confounds=[filename1, confounds[:, 0:2],\n filename2, confounds[:, 2]])\n\n # Test error handling\n pytest.raises(TypeError, nisignal.clean, signals, confounds=1)\n pytest.raises(ValueError, nisignal.clean, signals, confounds=np.zeros(2))\n pytest.raises(ValueError, nisignal.clean, signals,\n confounds=np.zeros((2, 2)))\n pytest.raises(ValueError, nisignal.clean, signals,\n confounds=np.zeros((2, 3, 4)))\n pytest.raises(ValueError, nisignal.clean, signals[:-1, :],\n confounds=filename1)\n pytest.raises(TypeError, nisignal.clean, signals,\n confounds=[None])\n pytest.raises(ValueError, nisignal.clean, signals, t_r=None,\n low_pass=.01)\n\n # Test without standardizing that constant parts of confounds are\n # accounted for\n np.testing.assert_almost_equal(nisignal.clean(np.ones((20, 2)),\n standardize=False,\n confounds=np.ones(20),\n standardize_confounds=False,\n detrend=False,\n ).mean(),\n np.zeros((20, 2)))\n\n\ndef test_clean_frequencies_using_power_spectrum_density():\n\n # Create signal\n sx = np.array([np.sin(np.linspace(0, 100, 100) * 1.5),\n np.sin(np.linspace(0, 100, 100) * 3.),\n np.sin(np.linspace(0, 100, 100) / 8.),\n ]).T\n\n # Create confound\n _, _, confounds = generate_signals(\n n_features=10, n_confounds=10, length=100)\n\n # Apply low- and high-pass filter (separately)\n t_r = 1.0\n low_pass = 0.1\n high_pass = 0.4\n res_low = clean(sx, detrend=False, standardize=False, low_pass=low_pass,\n high_pass=None, t_r=t_r)\n res_high = clean(sx, detrend=False, standardize=False, low_pass=None,\n high_pass=high_pass, t_r=t_r)\n\n # Compute power spectrum density for both test\n f, Pxx_den_low = scipy.signal.welch(np.mean(res_low.T, axis=0), fs=t_r)\n f, Pxx_den_high = scipy.signal.welch(np.mean(res_high.T, axis=0), fs=t_r)\n\n # Verify that the filtered frequencies are removed\n assert np.sum(Pxx_den_low[f >= low_pass * 2.]) <= 1e-4\n assert np.sum(Pxx_den_high[f <= high_pass / 2.]) <= 1e-4\n\n\ndef test_clean_finite_no_inplace_mod():\n \"\"\"\n Test for verifying that the passed in signal array is not modified.\n For PR #2125 . This test is failing on master, passing in this PR.\n \"\"\"\n n_samples = 2\n # n_features Must be higher than 500\n n_features = 501\n x_orig, _, _ = generate_signals(n_features=n_features,\n length=n_samples)\n x_orig_inital_copy = x_orig.copy()\n\n x_orig_with_nans = x_orig.copy()\n x_orig_with_nans[0, 0] = np.nan\n x_orig_with_nans_initial_copy = x_orig_with_nans.copy()\n\n cleaned_x_orig = clean(x_orig)\n assert np.array_equal(x_orig, x_orig_inital_copy)\n\n cleaned_x_orig_with_nans = clean(x_orig_with_nans, ensure_finite=True)\n assert np.isnan(x_orig_with_nans_initial_copy[0, 0])\n assert np.isnan(x_orig_with_nans[0, 0])\n\n\ndef test_high_variance_confounds():\n\n # C and F order might take different paths in the function. Check that the\n # result is identical.\n n_features = 1001\n length = 20\n n_confounds = 5\n seriesC, _, _ = generate_signals(n_features=n_features,\n length=length, order=\"C\")\n seriesF, _, _ = generate_signals(n_features=n_features,\n length=length, order=\"F\")\n\n np.testing.assert_almost_equal(seriesC, seriesF, decimal=13)\n outC = nisignal.high_variance_confounds(seriesC, n_confounds=n_confounds,\n detrend=False)\n outF = nisignal.high_variance_confounds(seriesF, n_confounds=n_confounds,\n detrend=False)\n np.testing.assert_almost_equal(outC, outF, decimal=13)\n\n # Result must not be influenced by global scaling\n seriesG = 2 * seriesC\n outG = nisignal.high_variance_confounds(seriesG, n_confounds=n_confounds,\n detrend=False)\n np.testing.assert_almost_equal(outC, outG, decimal=13)\n assert(outG.shape == (length, n_confounds))\n\n # Changing percentile changes the result\n seriesG = seriesC\n outG = nisignal.high_variance_confounds(seriesG, percentile=1.,\n n_confounds=n_confounds,\n detrend=False)\n pytest.raises(AssertionError, np.testing.assert_almost_equal,\n outC, outG, decimal=13)\n assert(outG.shape == (length, n_confounds))\n\n # Check shape of output\n out = nisignal.high_variance_confounds(seriesG, n_confounds=7,\n detrend=False)\n assert(out.shape == (length, 7))\n\n # Adding a trend and detrending should give same results as with no trend.\n seriesG = seriesC\n trends = generate_trends(n_features=n_features, length=length)\n seriesGt = seriesG + trends\n\n outG = nisignal.high_variance_confounds(seriesG, detrend=False,\n n_confounds=n_confounds)\n outGt = nisignal.high_variance_confounds(seriesGt, detrend=True,\n n_confounds=n_confounds)\n # Since sign flips could occur, we look at the absolute values of the\n # covariance, rather than the absolute difference, and compare this to\n # the identity matrix\n np.testing.assert_almost_equal(np.abs(outG.T.dot(outG)),\n np.identity(outG.shape[1]),\n decimal=13)\n # Control for sign flips by taking the min of both possibilities\n np.testing.assert_almost_equal(\n np.min(np.abs(np.dstack([outG - outGt, outG + outGt])), axis=2),\n np.zeros(outG.shape))\n\n # Control robustness to NaNs\n seriesG[:, 0] = 0\n out1 = nisignal.high_variance_confounds(seriesG, n_confounds=n_confounds)\n seriesG[:, 0] = np.nan\n out2 = nisignal.high_variance_confounds(seriesG, n_confounds=n_confounds)\n np.testing.assert_almost_equal(out1, out2, decimal=13)\n\n\ndef test_clean_psc():\n rng = np.random.RandomState(0)\n n_samples = 500\n n_features = 5\n\n signals, _, _ = generate_signals(n_features=n_features,\n length=n_samples)\n means = rng.randn(1, n_features)\n signals += means\n\n cleaned_signals = clean(signals, standardize='psc')\n np.testing.assert_almost_equal(cleaned_signals.mean(0), 0)\n\n cleaned_signals.std(axis=0)\n np.testing.assert_almost_equal(cleaned_signals.mean(0), 0)\n np.testing.assert_almost_equal(cleaned_signals,\n signals / signals.mean(0) * 100 - 100)\n\n\ndef test_clean_zscore():\n rng = np.random.RandomState(42)\n n_samples = 500\n n_features = 5\n\n signals, _, _ = generate_signals(n_features=n_features,\n length=n_samples)\n\n signals += rng.standard_normal(size=(1, n_features))\n cleaned_signals = clean(signals, standardize='zscore')\n np.testing.assert_almost_equal(cleaned_signals.mean(0), 0)\n np.testing.assert_almost_equal(cleaned_signals.std(0), 1)\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.isclose",
"numpy.copy",
"numpy.random.RandomState",
"numpy.dstack",
"numpy.isfinite",
"numpy.testing.assert_almost_equal",
"numpy.vstack",
"numpy.ndarray",
"numpy.isnan",
"numpy.identity",
"numpy.linspace",
"numpy.mean",
"numpy.atleast_2d",
"numpy.zeros",
"pandas.read_csv",
"numpy.arange",
"numpy.std",
"numpy.finfo",
"numpy.not_equal",
"numpy.array_equal",
"numpy.array",
"numpy.dot"
]
] |
Nickwasused/DNoiSe | [
"40cb7129ee4a9a5d74cb0f673144d5302bf8ef27"
] | [
"dnoise.py"
] | [
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport json\nimport os\nimport random\nimport sqlite3\nimport sys\nimport time\nimport urllib\nfrom importlib import reload\n\nimport dns.resolver\nimport pandas\nimport requests\n\nreload(sys)\n\n#########################################################################################\n#\t\t\t\tBEGINNING OF CONFIG SECTION\t\t\t\t#\n\n# Set your pi-hole auth token - you can copy it from /etc/pihole/setupVars.conf\nauth = \"\"\n\n# Set the Fake Query Multiplier | Default: 1 = 10% Fake Querys\nmultiplier = 1.5\n\n# Set IP of the machine running this script. The script is optimized for running directly on the pi-hole server,\n# or on another un-attended machine. \"127.0.0.1\" is valid only when running directly on the pi-hole.\nclient = \"127.0.0.1\"\n\n# Set IP of your pi-hole instance. \"127.0.0.1\" is valid only when running directly on the pi-hole.\ndns.resolver.nameservers = \"127.0.0.1\"\n\n# Logging to a file. For easier debugging uncomment the second row.\nlog_file = sys.stdout\n\n# Set working directory for the script - the database with top 1M domains will be stored here.\nworking_directory = os.path.dirname(os.path.realpath(__file__))\n\n#\t\t\t\t END OF CONFIG SECTION \t\t\t\t#\n#########################################################################################\n\nif auth == \"\":\n print(\"Please Set your auth token\")\n exit()\n\ndef download_domains():\n # Download the Cisco Umbrella list. More info: https://s3-us-west-1.amazonaws.com/umbrella-static/index.html\n try:\n print(\"Downloading the domain list…\")\n urllib.request.urlretrieve(\"http://s3-us-west-1.amazonaws.com/umbrella-static/top-1m.csv.zip\",\n filename=working_directory + \"domains.zip\")\n except Exception as e:\n print(e)\n print(\"Can't download the domain list. Quitting.\")\n exit()\n\n # Create a SQLite database and import the domain list\n try:\n db = sqlite3.connect(working_directory + \"domains.sqlite\")\n db.execute(\"CREATE TABLE Domains (ID INT PRIMARY KEY, Domain TEXT)\")\n\n # Load the CSV into our database\n print(\"Importing to sqlite…\")\n df = pandas.read_csv(working_directory + \"domains.zip\", compression='zip', names=[\"ID\", \"Domain\"])\n df.to_sql(\"Domains\", db, if_exists=\"append\", index=False)\n\n db.close()\n\n os.remove(working_directory + \"domains.zip\")\n except:\n print(\"Import failed. Quitting.\")\n exit()\n\n # Running this on 1st gen Raspberry Pi can take up to 10 minutes. Be patient.\n print(\"Done.\")\n\n\n# A simple loop that makes sure we have an Internet connection - it can take a while for pi-hole to get up and running after a reboot.\nwhile True:\n try:\n urllib.request.urlopen(\"https://duckduckgo.com\")\n print(\"Got network connection.\")\n break\n except Exception as e:\n print(e)\n print(\"Network not up yet, retrying in 10 seconds.\")\n time.sleep(10)\n\n# Download the top 1M domain list if we don't have it yet.\nexists = os.path.isfile(working_directory + \"domains.sqlite\")\nif not exists:\n download_domains()\n\ndb = sqlite3.connect(working_directory + \"domains.sqlite\")\n\nwhile True:\n # We want the fake queries to blend in with the organic traffic expected at each given time of the day, so instead of having a static delay between individual queries,\n # we'll sample the network activity over the past 5 minutes and base the frequency on that. We want to add roughly 10% of additional activity in fake queries.\n time_until = int(time.mktime(datetime.datetime.now().timetuple()))\n time_from = time_until - 300\n\n # This will give us a list of all DNS queries that pi-hole handled in the past 5 minutes.\n while True:\n try:\n all_queries = requests.get(\n \"http://pi.hole/admin/api.php?getAllQueries&from=\" + str(time_from) + \"&until=\" + str(\n time_until) + \"&auth=\" + auth)\n break\n except:\n print(\" API request failed. Retrying in 15 seconds.\")\n time.sleep(15)\n\n parsed_all_queries = json.loads(all_queries.text)\n\n # When determining the rate of DNS queries on the network, we don't want our past fake queries to skew the statistics, therefore we filter out queries made by this machine.\n genuine_queries = []\n try:\n for a in parsed_all_queries[\"data\"]:\n if a[3] != client.replace(\"127.0.0.1\", \"localhost\"):\n genuine_queries.append(a)\n except:\n print(\" Pi-hole API response in wrong format. Investigate.\")\n exit()\n\n # Protection in case the pi-hole logs are empty.\n if len(genuine_queries) == 0:\n genuine_queries.append(\"Let's not devide by 0\")\n\n # We want the types of our fake queries (A/AAA/PTR/…) to proportionally match those of the real traffic.\n query_types = []\n try:\n for a in parsed_all_queries[\"data\"]:\n if a[3] != client.replace(\"127.0.0.1\", \"localhost\"):\n query_types.append(a[1])\n except:\n print(\"Pi-hole API response in wrong format. Investigate.\")\n exit()\n\n # Default to A request if pi-hole logs are empty\n if len(query_types) == 0:\n query_types.append(\"A\")\n\n while True:\n # Pick a random domain from the top 1M list\n rand = str(random.randint(1, 1000000))\n cursor = db.cursor()\n cursor.execute(\"SELECT Domain FROM Domains WHERE ID=\" + rand)\n domain = cursor.fetchone()[0]\n\n # Try to resolve the domain - that's why we're here in the first place, isn't it…\n try:\n dns.resolver.resolve(domain, random.choice(query_types))\n except:\n pass\n\n # We want to re-sample our \"queries per last 5 min\" rate every minute.\n if int(time.mktime(datetime.datetime.now().timetuple())) - time_until > 60:\n break\n\n # Since we want to add only about 10% of extra DNS queries, we multiply the wait time by 10, then add a small random delay.\n sleeper = (300.0 / (len(genuine_queries)) * 10 / multiplier) + random.uniform(0, 2)\n time.sleep(sleeper)\n \ndb.close()\n"
] | [
[
"pandas.read_csv"
]
] |
ptarau/pypro | [
"9e542ed7d70454f75ce531c918a912aa85a4cd6e"
] | [
"bak/ndb.py"
] | [
"from .db import *\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\n\n#from answerer import tsv2mat\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\n# it might work better for larger databases\ndef_learner=MLPClassifier(\n hidden_layer_sizes=(16,16),\n #random_state=1234,\n verbose=1,\n activation='relu',\n max_iter=10000\n)\n\n#def_learner=RandomForestClassifier(random_state=1234)\n\ndef set2bits(n,xs) :\n return [1 if x in xs else 0 for x in range(n)]\n\ndef bits2set(bs):\n return [i for i,b in enumerate(bs) if b==1]\n\ndef seq2nums(xs) :\n d=dict()\n i=0\n for x in xs :\n if x not in d:\n d[x]=i\n i+=1\n return d\n\ndef num2bits(l,n) : # binary encoding\n #return [n] # no encoding\n blen=len(bin(l)[2:])\n cs=bin(n)[2:]\n r=blen-len(cs)\n bs=r*[0]+[int(c) for c in cs]\n #return bs # binary encoding\n return set2bits(l, [n]) # 1hot encoding\n\n\nclass ndb(db) :\n\n def load(self,fname,learner=def_learner):\n super().load(fname)\n\n db_const_dict = seq2nums(self.index)\n db_const_count=len(db_const_dict)\n bss=[]\n for n in db_const_dict.values() :\n bs=num2bits(db_const_count,n)\n bss.append(bs)\n X=np.array(bss)\n #X=np.eye(len(db_const_dict),dtype=int)\n\n val_count = len(self.css)\n y = np.array([set2bits(val_count, xs) for xs in self.index.values()])\n print('X:',X.shape,'\\n',X)\n print('\\ny:',y.shape,'\\n',y,'\\n')\n learner.fit(X,y)\n\n self.learner,self.db_const_dict,self.y = learner,db_const_dict,y\n\n def ground_match_of(self,query_tuple):\n\n query_consts = const_of(query_tuple)\n query_consts_nums = \\\n [self.db_const_dict[c] for c in query_consts if c in self.db_const_dict]\n db_const_count = len(self.db_const_dict)\n rs = np.array([[1] * self.y.shape[1]])\n '''\n for qn in query_consts_nums:\n qa = np.array([[q for q in num2bits(db_const_count, qn)]])\n r = self.learner.predict(qa)\n #print('PROBS',self.learner.predict_proba(qa))\n rs = np.bitwise_and(rs, r)\n '''\n qas= np.array([set2bits(db_const_count,query_consts_nums)])\n #print('QQQQ',qas)\n rs = self.learner.predict(qas)\n\n matches = list(rs[0])\n matches = bits2set(matches)\n #print('matches:', matches)\n return matches\n\n\n"
] | [
[
"numpy.array",
"sklearn.neural_network.MLPClassifier"
]
] |
sahithyaravi1493/modAL | [
"39336f21cd872974cf2f34c1c79012ca30a96819"
] | [
"tests/example_tests/shape_learning.py"
] | [
"\"\"\"\nLearning the shape of an object using uncertainty based sampling.\n\nIn this example, we will demonstrate the use of ActiveLearner with\nthe scikit-learn implementation of the kNN classifier algorithm.\n\"\"\"\n\nimport numpy as np\nfrom copy import deepcopy\nfrom sklearn.ensemble import RandomForestClassifier\nfrom modAL.models import ActiveLearner\n\nnp.random.seed(0)\n\n# creating the image\nim_width = 500\nim_height = 500\ndata = np.zeros((im_height, im_width))\ndata[100:im_width-1 - 100, 100:im_height-1 - 100] = 1\n\n# create the pool from the image\nX_full = np.transpose(\n [np.tile(np.asarray(range(data.shape[0])), data.shape[1]),\n np.repeat(np.asarray(range(data.shape[1])), data.shape[0])]\n)\n# map the intensity values against the grid\ny_full = np.asarray([data[P[0], P[1]] for P in X_full])\nX_pool = deepcopy(X_full)\ny_pool = deepcopy(y_full)\n\n# assembling initial training set\ninitial_idx = [0, im_height-1, im_height*(im_height-1), -1, im_width//2 + im_height//2*im_height]\nX_train, y_train = X_pool[initial_idx], y_pool[initial_idx]\n\n# create an ActiveLearner instance\nlearner = ActiveLearner(\n estimator=RandomForestClassifier(n_estimators=10),\n X_training=X_train, y_training=y_train\n)\ninitial_prediction = learner.predict_proba(X_full)[:, 1].reshape(im_height, im_width)\n\nn_queries = 100\nfor round_idx in range(n_queries):\n query_idx, query_inst = learner.query(X_pool)\n learner.teach(X_pool[query_idx].reshape(1, -1), y_pool[query_idx].reshape(-1, ))\n X_pool = np.delete(X_pool, query_idx, axis=0)\n y_pool = np.delete(y_pool, query_idx)\n\nfinal_prediction = learner.predict_proba(X_full)[:, 1].reshape(im_height, im_width)\n\n# learning with randomly selected queries instead of active learning\nrandom_idx = initial_idx + list(np.random.choice(range(len(X_full)), n_queries, replace=False))\nX_train, y_train = X_full[initial_idx], y_full[initial_idx]\nrandom_learner = ActiveLearner(\n estimator=RandomForestClassifier(),\n X_training=X_train, y_training=y_train\n)\n"
] | [
[
"numpy.zeros",
"numpy.random.seed",
"numpy.asarray",
"numpy.delete",
"sklearn.ensemble.RandomForestClassifier"
]
] |
abhishreeshetty/IDL-CrossViz | [
"729baabd146980839544e274387bd5adb4640d03"
] | [
"datasets/prepare_thing_sem_from_lvis.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport functools\nimport multiprocessing as mp\nimport os\nimport time\n\nimport numpy as np\nfrom lvis import LVIS\nfrom pycocotools import mask as maskUtils\n\n\ndef annToRLE(ann, img_size):\n h, w = img_size\n segm = ann['segmentation']\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['segmentation']\n return rle\n\n\ndef annToMask(ann, img_size):\n rle = annToRLE(ann, img_size)\n m = maskUtils.decode(rle)\n return m\n\n\ndef _process_instance_to_semantic(anns, output_semantic, img):\n img_size = (img['height'], img['width'])\n output = np.zeros(img_size, dtype=np.uint8)\n for ann in anns:\n mask = annToMask(ann, img_size)\n output[mask == 1] = ann['category_id'] // 5\n # save as compressed npz\n np.savez_compressed(output_semantic, mask=output)\n # Image.fromarray(output).save(output_semantic)\n\n\ndef create_lvis_semantic_from_instance(instance_json, sem_seg_root):\n \"\"\"Create semantic segmentation annotations from panoptic segmentation\n annotations, to be used by PanopticFPN.\n\n It maps all thing categories to contiguous ids starting from 1, and maps all unlabeled pixels to class 0\n\n Args:\n instance_json (str): path to the instance json file, in COCO's format.\n sem_seg_root (str): a directory to output semantic annotation files\n \"\"\"\n os.makedirs(sem_seg_root, exist_ok=True)\n\n lvis_detection = LVIS(instance_json)\n\n def iter_annotations():\n for img_id in lvis_detection.get_img_ids():\n anns_ids = lvis_detection.get_ann_ids([img_id])\n anns = lvis_detection.load_anns(anns_ids)\n img = lvis_detection.load_imgs([img_id])[0]\n file_name = os.path.splitext(img['file_name'])[0]\n output = os.path.join(sem_seg_root, file_name + '.npz')\n yield anns, output, img\n\n # # single process\n # print(\"Start writing to {} ...\".format(sem_seg_root))\n # start = time.time()\n # for anno, oup, img in iter_annotations():\n # _process_instance_to_semantic(\n # anno, oup, img)\n # print(\"Finished. time: {:.2f}s\".format(time.time() - start))\n # return\n\n pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4))\n\n print('Start writing to {} ...'.format(sem_seg_root))\n start = time.time()\n pool.starmap(\n functools.partial(_process_instance_to_semantic),\n iter_annotations(),\n chunksize=100,\n )\n print('Finished. time: {:.2f}s'.format(time.time() - start))\n\n\nif __name__ == '__main__':\n dataset_dir = os.path.join(os.path.dirname(__file__), 'lvis')\n for s in ['train']:\n create_lvis_semantic_from_instance(\n os.path.join(dataset_dir, 'lvis_v0.5_{}.json'.format(s)),\n os.path.join(dataset_dir, 'thing_{}'.format(s)),\n )\n"
] | [
[
"numpy.zeros",
"numpy.savez_compressed"
]
] |
BierOne/VQA-AttReg | [
"dc160fcc54b0a18cf321dfcff133761b5a8f5975"
] | [
"visuals/visual.py"
] | [
"import sys\n# from visualize.visual import *\nimport h5py\nimport json\nimport random\nimport cv2\nimport numpy as np\nimport os\nfrom skimage.transform import resize\nfrom skimage.filters import gaussian\nimport matplotlib.pyplot as plt\nfrom scipy import misc\n\n\nfont=cv2.FONT_HERSHEY_SIMPLEX\nfalse_results_dir = \"vqa-mfb/mfb_coatt_glove/false_temp/\"\ntrue_results_dir = \"prior-mfb/mfb_coatt_glove/\"\n# with open('%s/results.json'%false_results_dir,'r') as f:\n\t# false_results = json.load(f)\n# with open('%s/results.json'%true_results_dir,'r') as f:\n\t# true_results = json.load(f)\nwith open('visualize/mfb_epoch10_cross_results.json','r') as f:\n\tcross_results = json.load(f)\nQuestion_dir = \"/home/share/guoyangyang/vqa/data\"\nwith open('%s/OpenEnded_mscoco_val2014_questions.json'%Question_dir,'r') as f:\n\tdata = json.load(f)\n\tqi = {ques['question_id']: ques['image_id'] for ques in data['questions']}\n\tqq = {ques['question_id']: ques['question'] for ques in data['questions']}\n\nfalse_weights = h5py.File('%s/weights.h5'%false_results_dir, 'r')\ntrue_weights = h5py.File('%s/weights.h5'%true_results_dir, 'r')\n\ndef get_cross_results(start,end):\n\tcross_results = {}\n\tfor i in range(start,end):\n\t\t# false_res = false_results[str(i)]\n\t\t# true_res = true_results[str(i)]\n\t\tfalse_res = false_results\n\t\ttrue_res = true_results\n\t\tcross_results[i] = []\n\t\tfor f_idx, f_ques in enumerate(false_res):\n\t\t\tfor t_idx, t_ques in enumerate(true_res):\n\t\t\t\tif f_ques['question_id'] == t_ques['question_id']:\n\t\t\t\t\tcross_results[i].append({'question_id': t_ques['question_id'], 'true_answer':t_ques['answer'], 'false_answer': f_ques['answer'],'true_idx':t_idx, 'false_idx':f_idx })\n\t\tprint(\"The cross_results: epoch:{}, length: {}\".format(i,len(cross_results[i])))\n\treturn cross_results\n\n\ndef get_blend_map(img, att_map, blur=True, overlap=True):\n\tatt_map -= att_map.min()\n\tif att_map.max() > 0:\n\t\tatt_map /= att_map.max()\n\tatt_map = resize(att_map, (img.shape[:2]), order = 3, mode='constant', anti_aliasing=True)\n\tif blur:\n\t\tatt_map = gaussian(att_map, 0.02*max(img.shape[:2]))\n\t\tatt_map -= att_map.min()\n\t\tatt_map /= att_map.max()\n\tcmap = plt.get_cmap('jet')\n\tatt_map_v = cmap(att_map)\n\tatt_map_v = np.delete(att_map_v, 3, 2)\n\tif overlap:\n\t\tatt_map = 1*(1-att_map**0.7).reshape(att_map.shape + (1,))*img + (att_map**0.7).reshape(att_map.shape+(1,)) * att_map_v\n\treturn att_map\n\n\ndef downsample_image(img):\n\timg_h, img_w, img_c = img.shape\n\timg = resize(img, (int(448 * img_h / img_w), 448), mode='constant', anti_aliasing=True)\n\t# 22x22 regions\n\t# img = misc.imresize(img, (300, 300), interp='bicubic')\n\treturn img\n\n\ndef save_attention_visualization(img, att_map, answer, file_name=\"COCO_xxx\", dest_dir = \"visuals/\", type=False):\n\t\"\"\"\n\tVisualize the attention map on the image and save the visualization.\n\t\"\"\"\n\tpath0 = os.path.join(dest_dir, file_name + '.png')\n\timg_h, img_w, img_c = img.shape\n\tatt_h, att_w = att_map.shape\n\tatt_map = att_map.reshape((att_h, att_w))\n\theat_map = get_blend_map(img, att_map)\n\t# if type:\n\t\t# cv2.putText(heat_map, answer, (20,200), font, 0.7, (0,255,0), 2)\n\t# else:\n\t\t# cv2.putText(heat_map, answer, (20,200), font, 0.7, (0,0,255), 2)\n\tcv2.imwrite(path0, heat_map *255.0)\n\treturn\n\n\ndef getImgIds(quesIds=[]):\n\timg_ids = qi[quesIds]\n\treturn img_ids, qq[quesIds]\n\n\ndef visual_attention(sample_nums, results, false_weights, true_weights, dir, map_shape=[14,14]):\n\tprint(len(results), len(true_weights), len(true_weights[0]), np.sum(true_weights[0]))\n\tsamples = random.sample(range(0,len(results)-1),sample_nums)\n\tdest_dir='visualize/%s/'%(dir)\n\tif not os.path.isdir(dest_dir+''):\n\t\tos.system('mkdir -p ' + dest_dir)\n\tfor idx in samples:\n\t\tq_id = results[idx]['question_id']\n\t\timg_ids, questions = getImgIds(quesIds=q_id)\n\t\ttrue_answer = results[idx]['true_answer']\n\t\tt_idx = results[idx]['true_idx']\n\t\tfalse_answer = results[idx]['false_answer']\n\t\tf_idx = results[idx]['false_idx']\n\t\t\n\t\tif false_answer=='yes' or false_answer=='no':\n\t\t\tcontinue\n\t\tif q_id != 618361 and q_id != 29850:\n\t\t\tcontinue\n\t\tprint('question:{}, false_answer: {}, true_answer: {} img_id: {} \\n'.format(questions, false_answer, true_answer, img_ids))\n\t\tsource_img_path = '/home/share/guoyangyang/vqa/mscoco/val2014/COCO_val2014_%s.jpg'\n\t\timg_path = source_img_path%(str(img_ids).zfill(12))\n\n\t\timg = downsample_image(cv2.imread(img_path)) # cv2.imread does auto-rotate\n\t\t# cv2.putText(img, questions, (20,20), font, 0.7, (255,0,0), 2)\n\n\t\tfile_name='%s_False_Attention_val2014'%(str(q_id).zfill(12))\n\t\tdemo_map = false_weights[f_idx]\n\t\tsave_attention_visualization(img, demo_map.reshape(map_shape[0],map_shape[1]), false_answer, file_name,dest_dir )\n\n\t\tfile_name='%s_True_Attention_val2014'%(str(q_id).zfill(12))\n\t\tdemo_map = true_weights[t_idx]\n\t\tsave_attention_visualization(img, demo_map.reshape(map_shape[0],map_shape[1]), true_answer, file_name, dest_dir,type=True)\n\n\t\tcv2.imwrite(dest_dir+ '%s_Normal.png'%(str(q_id).zfill(12)), img*255.0)\n\n\tprint(dir,'ok!')\n\treturn\n\ndef main():\n\tstart = 0\n\tend = 1\n\tmodel = 'mfb'\n\t# cross_results = get_cross_results(start,end)\n\t# with open('./visualize/mfb_epoch10_cross_results.json','w') as f:\n\t\t# json.dump(cross_results,f)\n\tfor i in range(start, end):\n\t\tvisual_attention(len(cross_results[str(i)])-1, cross_results[str(i)], false_weights['epoch_%s'%i], true_weights['epoch_%s'%i], dir='%s/results_%s'%(model, i))\n\nif __name__ == '__main__':\n\tmain()\n"
] | [
[
"numpy.sum",
"matplotlib.pyplot.get_cmap",
"numpy.delete"
]
] |
hermannbene/BeamTomography | [
"39eae19c54128f27eb90a2717b1876768d730f29"
] | [
"model2D.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom loadData import gauss, loadAndPrepareInput\nfrom scipy import signal\nfrom generateTestData import loadTestData\nfrom plot import plotProjections2D, plotError2D\nfrom scipy.optimize import curve_fit\n\ndef growthRate(X, x, bins, y, angle, convFunction = None, microCharge = None):\n \"\"\"\n X: particle coordinates [[xi], [yi]]\n x: projection axis\n bins: bin edges corresponding to x\n y: normalized measurement values\n angle: projection angle in deg\n \"\"\"\n\n \"\"\" Rotation \"\"\"\n cos = np.cos(np.pi*angle/180)\n sin = -np.sin(np.pi*angle/180)\n \n Xrot = sin*X[0]+cos*X[1]\n\n \"\"\" Binning \"\"\"\n# hist, b = np.histogram(Xrot, bins=bins, density=False)\n# hist = hist/np.sum(hist)\n \n if microCharge==None:\n hist, b = np.histogram(Xrot, bins=bins)\n hist = hist/np.sum(hist)/(bins[1]-bins[0])\n \n else:\n hist, b = np.histogram(Xrot, bins=bins, density=False)\n hist = np.array(hist, dtype = float)*microCharge/np.average(np.diff(bins))\n if not(convFunction is None):\n if sum(convFunction)>0:\n hist = signal.convolve(hist, convFunction, mode = \"same\")\n # plt.figure()\n # plt.plot(bins[:-1], hist)\n # plt.plot(bins[:-1], histConv)\n # plt.plot(bins[:-1], convFunction)\n # hist=histConv\n \"\"\" growth rate calculation \"\"\"\n hist = np.interp(x, bins[:-1]+0.5*(bins[1]-bins[0]), hist)\n# hist = hist/np.sum(hist)\n birthRate = (y-hist)\n# birthRate/=np.max(hist)\n# birthRate*= np.sum(np.abs(y-hist)*(bins[1]-bins[0]))\n\n birthRateX = np.interp(Xrot, x, birthRate)\n\n return birthRateX\n\ndef grow(X, birthRateX, smoothing):\n randomX = np.random.random(len(X[0]))\n keepIdx = np.argwhere(-birthRateX<randomX).flatten()\n addIdx = np.argwhere(birthRateX>randomX).flatten()\n Xnew = np.hstack([X[:,keepIdx], X[:,addIdx]])\n# Xnew = np.hstack([X.copy()[:,keepIdx], X.copy()[:,addIdx]])\n# selectIdx = np.random.randint(0, len(Xnew[1]), len(X[1]))\n# Xnew = Xnew[:,selectIdx]\n# randomizeAmplitude = np.std(Xnew)/len(Xnew)\n Xnew+=np.random.normal(0, smoothing, Xnew.shape)\n return Xnew\n\nclass model2D:\n def __init__(self, fileName):\n self.fileName = fileName\n self.nPart = 2e5\n self.y, self.x, self.projectionAngles = loadAndPrepareInput(fileName, manualSave=False)\n self.bins = [np.linspace(self.x[i][0]-(self.x[i][-1]-self.x[i][-2])*0.5, self.x[i][-1]+(self.x[i][-1]-self.x[i][-2])*0.5, 6*len(self.x[i])+1) for i in range(len(self.x))]\n self.projectionAngles=self.projectionAngles[:,0]\n self.X = np.random.rand(2,int(self.nPart))*(np.max(self.x)-np.min(self.x))+np.min(self.x)\n \n self.nAngles = len(self.projectionAngles)\n offsets=[]\n \n for i in range(self.nAngles):\n self.nD = len(self.x[i])\n s = self.y[i]\n \n initGuess = (np.max(s)-np.min(s), self.x[i][np.argmax(s)], 0.1*(np.max(self.x[i])-np.min(self.x[i])), np.min(s))\n fit = curve_fit(gauss, self.x[i], s, p0 = initGuess)[0]\n offsets.append(fit[-1])\n \n integrals = []\n \n for i in range(self.nAngles): \n self.y[i] -= np.average(offsets)\n integrals.append(np.sum(self.y[i])*np.average(np.diff(self.x[i])))\n maxSList = []\n\n for i in range(self.nAngles): \n self.y[i] = self.y[i]/np.average(integrals)\n maxSList.append(np.max(self.y[i]))\n \n self.maxS = np.max(maxSList)\n \n# self.y = [self.y[i,:]/np.average(integrals) for i in range(len(self.y))]\n \n self.wireHalfWidth = 0.5\n self.wireHalfWidthBins = int(self.wireHalfWidth/(self.bins[0][1]-self.bins[0][0]))\n convFunction = np.squeeze(np.zeros(len(self.bins[0])-1))\n m = int(len(convFunction)*0.5)\n convFunction[m-self.wireHalfWidthBins:m+self.wireHalfWidthBins]=1.\n self.convFunction = convFunction/np.sum(convFunction)\n \n self.i = 0\n self.historyLength=10\n self.history = []\n \n def iterate(self):\n print(self.i)\n self.birthRatesX = []\n for j, angle in enumerate(self.projectionAngles):\n self.birthRatesX.append(growthRate(self.X, self.x[j], self.bins[j], self.y[j], angle, convFunction=self.convFunction, microCharge=1/self.nPart))\n \n birthRateX = np.average(self.birthRatesX, axis=0)/self.maxS\n self.X = grow(self.X, birthRateX, 0.08)\n self.addToHistory()\n self.i+=1\n\n def uncertainty(self):\n# self.birthRatesX = []\n# for j, angle in enumerate(self.projectionAngles):\n# self.birthRatesX.append(growthRate(self.X, self.x[j], self.bins[j], self.y[j], angle, convFunction=self.convFunction))\n self.samples=[]\n for j, angle in enumerate(self.projectionAngles):\n birthRateX = growthRate(self.X, self.x[j], self.bins[j], self.y[j], angle, convFunction=self.convFunction, microCharge=1/self.nPart)\n Xnew = grow(self.X, birthRateX/self.maxS, 0.08)\n for i in range(10):\n birthRateX = growthRate(Xnew, self.x[j], self.bins[j], self.y[j], angle, convFunction=self.convFunction, microCharge=1/self.nPart)\n Xnew = grow(Xnew, birthRateX/self.maxS, 0.08)\n self.samples.append(Xnew)\n# plotProjections2D([self.samples[-1]], rm.projectionAngles,rm.x, rm.y, rm.bins, convFunction=rm.convFunction, fileName=rm.i)\n \n def addToHistory(self):\n self.history.append(self.X)\n self.history = self.history[-min(self.historyLength, len(self.history)):]\n\n def saveDistribution(self):\n saveFileName = '/'.join(self.fileName.split('/')[:-1])+'/reconstructedDistribution.npy'\n np.save(saveFileName, self.X)\n \n \nif __name__ == \"__main__\":\n \n\n path = 'E:/Measurement Data/ATHOS/20210313/Hexapod/'\n fileNames = ['ws_20210313_162151']\n fileNames = [path+f+'/RAWScanData.h5' for f in fileNames]\n\n for fileName in fileNames:\n rm = model2D(fileName) # reconstruction model\n for i in range(150):\n rm.iterate()\n rm.uncertainty()\n plotProjections2D(rm.samples, rm.projectionAngles,rm.x, rm.y, rm.bins, convFunction=rm.convFunction, fileName=rm.i, microCharge=1./rm.nPart)\n# plotError2D(rm.X, rm.samples)\n# rm.saveDistribution()\n# plt.savefig(fileName+\"Tomo.png\", dpi=600)\n \n"
] | [
[
"numpy.save",
"numpy.sum",
"numpy.array",
"numpy.interp",
"numpy.argwhere",
"numpy.histogram",
"numpy.diff",
"scipy.optimize.curve_fit",
"numpy.cos",
"numpy.argmax",
"scipy.signal.convolve",
"numpy.hstack",
"numpy.max",
"numpy.min",
"numpy.random.normal",
"numpy.sin",
"numpy.average"
]
] |
ROUASAAD/PCOS | [
"21bc2698f9893484eb6ee9af5f108134948465c6"
] | [
"run.py"
] | [
"import flask\nimport pandas as pd\nimport io\nfrom flask import request, jsonify, render_template, send_from_directory\nimport warnings\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\n####################################################\n# Flask Config\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n####################################################\n\n####################################################\n# This block is used to initialize and read the training engine to be able to consume it in the api\n# it will be initialized only once on app run, this way we don't have to train the engine on every api request.\ntestData = pd.read_csv(\"./assets/RBF_SVM.csv\")\nX = testData.drop('PCOS', axis=1)\ny = testData['PCOS']\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nsvclassifier = SVC(kernel='rbf')\nsvclassifier.fit(X_train, y_train)\n####################################################\n\n\n####################################################\n# This Block is used to define the routing formula of the frontend\n# Main Login \[email protected]('/', methods=['GET'])\ndef login_index():\n return render_template(\"index.html\")\n \n # To Load index CSS \[email protected](\"/index.css\")\ndef index_style():\n return send_from_directory(\"templates\", \"index.css\")\n\n # Form Page \[email protected]('/prediction-form', methods=['GET'])\ndef prediction_form():\n return render_template(\"form.html\")\n\n # To Load form CSS \[email protected](\"/form.css\")\ndef form_style():\n return send_from_directory(\"templates\", \"form.css\")\n####################################################\n\n####################################################\n\n# This block is used to define the API's that will be needed to calculate the predictions, and recieve data\n\n# A route to return all of the data used for training\[email protected]('/api/ai/getTest', methods=['GET'])\ndef get_training_data():\n return testData.to_json()\n\n# This API is used to compute the prediction according to the parameters sent from the frontend\[email protected]('/api/ai/predict-result', methods=['POST'])\ndef compute_predict():\n try:\n # consume the request and parse it to a data frame object to be predicted\n df = pd.json_normalize(request.get_json())\n print('Model to Predict: ', request.get_json())\n y_pred = svclassifier.predict(df)\n print('Result came back as: ', y_pred)\n if y_pred == [1]: # if result is postive we return a success message for the user\n return 'Positive Result'\n elif y_pred == [0]: # if result is negative we return a negative message for the user\n return 'Negative Result'\n else:\n return 'Result was inconclusive' # if the prediction didn't work, we return inconclusive for the user\n except:\n return 'Result was inconclusive' # in case of any general error, we return inconclusive for the user\n####################################################\n\n# to run the app and define which port \napp.run(debug=True, port=5000)"
] | [
[
"pandas.read_csv",
"sklearn.svm.SVC",
"sklearn.model_selection.train_test_split"
]
] |
Dieblitzen/SAMAR-Project | [
"8b3655a3d3ef17c61eb5e0acf411c8b191bd917e"
] | [
"pixor/smooth_L1.py"
] | [
"import tensorflow as tf\nimport numpy as np\n\nTILE_SIZE = 224\n\"\"\" Implements smooth L1 on each dimension. Erases loss for negative pixel locations. \"\"\"\ndef smooth_L1(box_labels, box_preds, class_labels):\n\tdifference = tf.subtract(box_preds, box_labels)\n\tresult = tf.where(tf.abs(difference) < 1, tf.multiply(0.5, tf.square(difference)), tf.abs(difference) - 0.5)\n\tclass_labels = tf.cast(tf.clip_by_value(class_labels, clip_value_min=0, clip_value_max=1), dtype=tf.float32)\n\t# only compute bbox loss over positive ground truth boxes\n\tprocessed_result = tf.multiply(result, class_labels)\n\n\treturn tf.reduce_mean(processed_result)\n\ndef decode_smooth_L1(box_labels, box_preds, class_labels):\n\tdifference = tf.subtract(box_preds, box_labels)\n\tresult = tf.where(tf.abs(difference) < 1, tf.multiply(0.5, tf.square(difference)), tf.abs(difference) - 0.5)\n\t\n\t# only compute bbox loss over positive ground truth boxes\n\treshaped_result = tf.reshape(result, [-1, TILE_SIZE, TILE_SIZE, 8])\n\tclass_labels = tf.cast(tf.clip_by_value(class_labels, clip_value_min=0, clip_value_max=1), dtype=tf.float32)\n\n\n\tprocessed_result = tf.multiply(reshaped_result, class_labels)\n\treshaped_processed = tf.reshape(processed_result, [-1, TILE_SIZE, TILE_SIZE, 4, 2])\n\n\treturn tf.reduce_mean(reshaped_processed)\n\n\nif __name__ == \"__main__\":\n\n\t# BELOW IS A TEST CASE. ANSWER SHOULD BE ~0.58167\n\n\tsess = tf.InteractiveSession()\n\n\tbox_preds = [[1, 0.5, 0.3]]\n\tbox_labels = [[0, 0.2, 2]]\n\tclass_labels = tf.convert_to_tensor([[1.0]])\n\tbox_preds = tf.convert_to_tensor(box_preds)\n\tbox_labels = tf.convert_to_tensor(box_labels)\n\tresult = smooth_L1(box_labels, box_preds, class_labels)\n\tprint(\"result is: \" + str(result.eval())) \n"
] | [
[
"tensorflow.reshape",
"tensorflow.subtract",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.InteractiveSession",
"tensorflow.abs",
"tensorflow.convert_to_tensor",
"tensorflow.clip_by_value",
"tensorflow.square"
]
] |
Roxbili/kws-demo | [
"7e0674f1407572fc8f148293b23fa20a5164bc5e"
] | [
"zynq/tkinter_kws.py"
] | [
"#-*- encoding: utf-8 -*-\n\nimport time\nimport argparse\nimport numpy as np\nimport tkinter as tk \nfrom tkinter.ttk import Label\nfrom kws_ps_pl import BRAM, PSPLTalk, InputDataToBram\nfrom multiprocessing import Process\n\nclass timeRecorder(object):\n def __init__(self):\n self.total_time = 0.\n self.counter = 0\n \n def start(self):\n self.start_time = time.time()\n\n def end(self):\n self.total_time += time.time() - self.start_time\n self.counter += 1\n\n def get_total_time(self):\n return self.total_time\n\n def get_avg_time(self):\n return self.total_time / self.counter\n\nclass App(PSPLTalk):\n def __init__(self, args):\n super(App, self).__init__()\n self.input_object = InputDataToBram(args.mode)\n self.input_object.reset_flag() # 初始化标记位\n self.input_object.info2bram() # 发送基本的参数\n self.input_data_path = iter(self.input_object.data_path) # 创建输入数据路径的迭代器\n\n self.timer = timeRecorder()\n\n self.root = tk.Tk()\n self.word = Label(self.root)\n self.txt_placeholder = tk.StringVar()\n self._set_text('###')\n \n color = '#1C1C1C'\n self._set_root(color)\n self._set_label(color)\n self.first_scan = True # 第一轮mainloop先把组件显示出来\n \n def mainloop(self):\n self.root.mainloop()\n \n def _set_root(self, color):\n self.root.geometry('200x60')\n self.root.title('Keywords spotting')\n self.root.config(background=color)\n \n def _set_label(self, color):\n self.word.config(\n width = 7,\n font=(\"Times\", 40, 'bold'), \n textvariable=self.txt_placeholder,\n background=color, \n foreground='#FCFAF2'\n )\n # self.txt_placeholder.set('unknown')\n\n # lbl = Label(root, font = ('calibri', 40, 'bold'), \n # background = 'purple', \n # foreground = 'white') \n self.word.pack(anchor='center', ipady=5)\n \n def _set_text(self, txt):\n self.txt_placeholder.set(txt)\n \n def show_result(self):\n # 第一轮mainloop先显示组件\n if self.first_scan:\n self.word.after(1000, self.show_result)\n self.first_scan = False\n return\n\n # 首先拿到数据\n path = next(self.input_data_path) # 遍历数据集\n # path = self.input_object.data_path[0] # 测试用,仅看0_no.npy\n input_data = np.load(path)\n # 接着监测标记位是否改变,是的话发送数据,否则阻塞\n while not self.input_object.sendData(input_data): pass\n\n while True:\n result_flag = self.bram.read_oneByOne(1, start=0x0, map_id=1)\n if result_flag[0] == 1:\n self.timer.start()\n\n # reset result flag\n self.bram.write(b'\\x00\\x00\\x00\\x00', start=0x0, map_id=1)\n # get result\n result = self.bram.read_oneByOne(12, start=0x4, map_id=1)\n # show result\n word = self.words_list[np.argmax(result)]\n self._set_text(word)\n print('path: %s, show word %s' % (path, word))\n\n self.timer.end()\n print('Total time: {}'.format(self.timer.get_total_time()))\n print('Average time: {}'.format(self.timer.get_avg_time()))\n self.word.after(1, self.show_result) # 表示接着运行\n break\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-m',\n '--mode',\n type=str,\n default='sdData',\n )\n args = parser.parse_args()\n\n ##################### init ##################### \n app = App(args)\n\n ##################### run 2 process ##################### \n print('Start listening...')\n app.show_result()\n app.mainloop()"
] | [
[
"numpy.load",
"numpy.argmax"
]
] |
AmaljithCf/RacingRobot | [
"6885ddb37407dff15845d29f641bc7c39279b216"
] | [
"ros_nodes/serial_adapter.py"
] | [
"import threading\nimport time\n\nimport numpy as np\nimport rospy\nfrom std_msgs.msg import Int16, Int8\nfrom robust_serial import write_order, Order\nfrom robust_serial.threads import CommandThread, ListenerThread\nfrom robust_serial.utils import open_serial_port, CustomQueue\n\nfrom constants import BAUDRATE, N_MESSAGES_ALLOWED, COMMAND_QUEUE_SIZE, THETA_MIN, THETA_MAX\n\n\ndef servoCallback(data):\n servo_order = data.data\n servo_order = np.clip(servo_order, 0, 180)\n command_queue.put((Order.SERVO, servo_order))\n\n\ndef motorCallback(data):\n speed = data.data\n command_queue.put((Order.MOTOR, speed))\n\n\ndef listener():\n rospy.init_node('serial_adapter', anonymous=True)\n # Declare the Subscriber to motors orders\n rospy.Subscriber(\"arduino/servo\", Int16, servoCallback, queue_size=2)\n rospy.Subscriber(\"arduino/motor\", Int8, motorCallback, queue_size=2)\n rospy.spin()\n\n\ndef forceStop():\n \"\"\"\n Stop The car\n \"\"\"\n command_queue.clear()\n n_received_semaphore.release()\n n_received_semaphore.release()\n command_queue.put((Order.MOTOR, 0))\n command_queue.put((Order.SERVO, int((THETA_MIN + THETA_MAX) / 2)))\n\n\nif __name__ == '__main__':\n serial_file = None\n try:\n # Open serial port (for communication with Arduino)\n serial_file = open_serial_port(baudrate=BAUDRATE)\n except Exception as e:\n raise e\n\n is_connected = False\n # Initialize communication with Arduino\n while not is_connected:\n print(\"Waiting for arduino...\")\n write_order(serial_file, Order.HELLO)\n bytes_array = bytearray(serial_file.read(1))\n if not bytes_array:\n time.sleep(2)\n continue\n byte = bytes_array[0]\n if byte in [Order.HELLO.value, Order.ALREADY_CONNECTED.value]:\n is_connected = True\n\n print(\"Connected to Arduino\")\n\n # Create Command queue for sending orders\n command_queue = CustomQueue(COMMAND_QUEUE_SIZE)\n n_received_semaphore = threading.Semaphore(N_MESSAGES_ALLOWED)\n # Lock for accessing serial file (to avoid reading and writing at the same time)\n serial_lock = threading.Lock()\n # Event to notify threads that they should terminate\n exit_event = threading.Event()\n\n print(\"Starting Communication Threads\")\n # Threads for arduino communication\n threads = [CommandThread(serial_file, command_queue, exit_event, n_received_semaphore, serial_lock),\n ListenerThread(serial_file, exit_event, n_received_semaphore, serial_lock)]\n for thread in threads:\n thread.start()\n\n try:\n listener()\n except rospy.ROSInterruptException:\n pass\n\n # End the threads\n exit_event.set()\n n_received_semaphore.release()\n\n print(\"Exiting...\")\n for thread in threads:\n thread.join()\n"
] | [
[
"numpy.clip"
]
] |
ESOGU-SRLAB/opendr | [
"f2eb5a6d7a070d3534d470987c3abc69eec53905"
] | [
"projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/train.py"
] | [
"import torch.multiprocessing as multiprocessing\nimport sys\nfrom options.train_options import TrainOptions\nimport data\nfrom trainers import create_trainer\nfrom util.iter_counter import IterationCounter\nfrom util.visualizer import Visualizer\nfrom torch.multiprocessing import Queue\nfrom data.data_utils import init_parallel_jobs\nfrom models.networks.render import Render\nmultiprocessing.set_start_method('spawn', force=True)\n\n\nif __name__ == '__main__':\n\n # parse options\n opt = TrainOptions().parse()\n\n # print options to help debugging\n print(' '.join(sys.argv))\n\n # load the dataset\n dataloader = data.create_dataloader_test(opt)\n\n # create tool for counting iterations\n\n if type(dataloader) == list:\n data_loader_size = len(dataloader[0]) * opt.render_thread\n else:\n data_loader_size = len(dataloader)\n iter_counter = IterationCounter(opt, data_loader_size)\n\n ngpus = opt.device_count\n\n training_queue = Queue(10)\n\n # render layers\n\n render_gpu_ids = list(range(ngpus - opt.render_thread, ngpus))\n render_layer_list = []\n for gpu in render_gpu_ids:\n opt.gpu_ids = gpu\n render_layer = Render(opt)\n render_layer_list.append(render_layer)\n\n training_tasks = init_parallel_jobs(training_queue, dataloader, iter_counter, opt, render_layer_list)\n\n opt.gpu_ids = list(range(0, ngpus - opt.render_thread))\n print('Training gpu ', opt.gpu_ids)\n # create trainer for our model\n trainer = create_trainer(opt)\n # create tool for visualization\n visualizer = Visualizer(opt)\n\n for epoch in iter_counter.training_epochs():\n iter_counter.record_epoch_start(epoch)\n for i, data_i in enumerate(range(data_loader_size), start=iter_counter.epoch_iter):\n iter_counter.record_one_iteration()\n\n # data = trainer.get_input(data_i)\n data = training_queue.get(block=True)\n # Training\n # train generator\n if i % opt.D_steps_per_G == 0:\n trainer.run_generator_one_step(data)\n\n # train discriminator\n trainer.run_discriminator_one_step(data)\n\n # Visualizations\n if iter_counter.needs_printing():\n losses = trainer.get_latest_losses()\n visualizer.print_current_errors(epoch, iter_counter.epoch_iter,\n losses, iter_counter.time_per_iter)\n visualizer.plot_current_errors(losses, iter_counter.total_steps_so_far)\n\n if iter_counter.needs_displaying():\n visuals = trainer.get_current_visuals(data)\n visualizer.display_current_results(visuals, epoch, iter_counter.total_steps_so_far)\n\n if iter_counter.needs_saving():\n print('saving the latest model (epoch %d, total_steps %d)' %\n (epoch, iter_counter.total_steps_so_far))\n trainer.save('latest')\n iter_counter.record_current_iter()\n\n trainer.update_learning_rate(epoch)\n iter_counter.record_epoch_end()\n\n if epoch % opt.save_epoch_freq == 0 or \\\n epoch == iter_counter.total_epochs:\n print('saving the model at the end of epoch %d, iters %d' %\n (epoch, iter_counter.total_steps_so_far))\n trainer.save('latest')\n trainer.save(epoch)\n\n for training_task in training_tasks:\n training_task.terminate()\n print('Training was successfully finished.')\n"
] | [
[
"torch.multiprocessing.set_start_method",
"torch.multiprocessing.Queue"
]
] |
umautobots/osp | [
"d055f1c846f907445186b9dea7da2d4dca4790a6"
] | [
"misc/social_forces/fieldofview.py"
] | [
"\"\"\"\nhttps://github.com/svenkreiss/socialforce\nField of view computation.\n\"\"\"\nimport numpy as np\n\n\nclass FieldOfView(object):\n \"\"\"Compute field of view prefactors.\n\n The field of view angle twophi is given in degrees.\n out_of_view_factor is C in the paper.\n \"\"\"\n def __init__(self, twophi=200.0, out_of_view_factor=0.5):\n self.cosphi = np.cos(twophi / 2.0 / 180.0 * np.pi)\n self.out_of_view_factor = out_of_view_factor\n\n def __call__(self, e, f):\n \"\"\"Weighting factor for field of view.\n\n e is rank 2 and normalized in the last index.\n f is a rank 3 tensor.\n \"\"\"\n in_sight = np.einsum('aj,abj->ab', e, f) > np.linalg.norm(f, axis=-1) * self.cosphi\n out = self.out_of_view_factor * np.ones_like(in_sight)\n out[in_sight] = 1.0\n np.fill_diagonal(out, 0.0)\n return out\n"
] | [
[
"numpy.ones_like",
"numpy.cos",
"numpy.fill_diagonal",
"numpy.einsum",
"numpy.linalg.norm"
]
] |
yanchunyu71/relational-networks-paddle | [
"40fcd55c00d890136f52504d8b73f76ddef6e159"
] | [
"main.py"
] | [
"from __future__ import print_function\nimport argparse\nimport os\nimport pickle\nimport random\nimport numpy as np\nimport csv\n\n\nimport paddle\nfrom model import RN, CNN_MLP\n\n\n# Training settings\nparser = argparse.ArgumentParser(description='Paddle Relational-Network sort-of-CLVR Example')\nparser.add_argument('--model', type=str, choices=['RN', 'CNN_MLP'], default='RN', \n help='resume from model stored')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--epochs', type=int, default=20, metavar='N',\n help='number of epochs to train (default: 20)')\nparser.add_argument('--lr', type=float, default=0.0001, metavar='LR',\n help='learning rate (default: 0.0001)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--resume', type=str,\n help='resume from model stored')\nparser.add_argument('--relation-type', type=str, default='binary',\n help='what kind of relations to learn. options: binary, ternary (default: binary)')\n\nargs = parser.parse_args()\n\npaddle.seed(args.seed)\n\nif args.model=='CNN_MLP': \n model = CNN_MLP(args)\nelse:\n model = RN(args)\n \nmodel_dirs = './model'\nbs = args.batch_size\ninput_img = paddle.empty(shape=[bs, 3, 75, 75])\ninput_qst = paddle.empty(shape=[bs, 18])\nlabel = paddle.empty(shape=[bs],dtype='int64')\n\ndef tensor_data(data, i):\n img = paddle.to_tensor(np.asarray(data[0][bs*i:bs*(i+1)]),dtype='float32')\n qst = paddle.to_tensor(np.asarray(data[1][bs*i:bs*(i+1)]),dtype='float32')\n ans = paddle.to_tensor(np.asarray(data[2][bs*i:bs*(i+1)]))\n\n global input_img\n global input_qst\n global label\n input_img = img\n input_qst = qst\n label = ans\n\ndef cvt_data_axis(data):\n img = [e[0] for e in data]\n qst = [e[1] for e in data]\n ans = [e[2] for e in data]\n return (img,qst,ans)\n\n \ndef train(epoch, ternary, rel, norel):\n model.train()\n\n if not len(rel[0]) == len(norel[0]):\n print('Not equal length for relation dataset and non-relation dataset.')\n return\n \n random.shuffle(ternary)\n random.shuffle(rel)\n random.shuffle(norel)\n\n ternary = cvt_data_axis(ternary)\n rel = cvt_data_axis(rel)\n norel = cvt_data_axis(norel)\n\n acc_ternary = []\n acc_rels = []\n acc_norels = []\n\n l_ternary = []\n l_binary = []\n l_unary = []\n\n for batch_idx in range(len(rel[0]) // bs):\n tensor_data(ternary, batch_idx)\n accuracy_ternary, loss_ternary = model.train_(input_img, input_qst, label)\n acc_ternary.append(accuracy_ternary)\n l_ternary.append(loss_ternary)\n\n tensor_data(rel, batch_idx)\n accuracy_rel, loss_binary = model.train_(input_img, input_qst, label)\n acc_rels.append(accuracy_rel)\n l_binary.append(loss_binary)\n\n tensor_data(norel, batch_idx)\n accuracy_norel, loss_unary = model.train_(input_img, input_qst, label)\n acc_norels.append(accuracy_norel)\n l_unary.append(loss_unary)\n\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)] '\n 'Ternary accuracy: {:.0f}% | Relations accuracy: {:.0f}% | Non-relations accuracy: {:.0f}%'.format(\n epoch,\n batch_idx * bs * 2,\n len(rel[0]) * 2,\n 100. * batch_idx * bs / len(rel[0]),\n accuracy_ternary,\n accuracy_rel,\n accuracy_norel))\n \n avg_acc_ternary = sum(acc_ternary) / len(acc_ternary)\n avg_acc_binary = sum(acc_rels) / len(acc_rels)\n avg_acc_unary = sum(acc_norels) / len(acc_norels)\n\n\n avg_loss_ternary = sum(l_ternary) / len(l_ternary)\n avg_loss_binary = sum(l_binary) / len(l_binary)\n avg_loss_unary = sum(l_unary) / len(l_unary)\n\n\n return avg_acc_ternary, avg_acc_binary, avg_acc_unary\n\ndef test(epoch, ternary, rel, norel):\n model.eval()\n if not len(rel[0]) == len(norel[0]):\n print('Not equal length for relation dataset and non-relation dataset.')\n return\n \n ternary = cvt_data_axis(ternary)\n rel = cvt_data_axis(rel)\n norel = cvt_data_axis(norel)\n\n accuracy_ternary = []\n accuracy_rels = []\n accuracy_norels = []\n\n loss_ternary = []\n loss_binary = []\n loss_unary = []\n\n for batch_idx in range(len(rel[0]) // bs):\n tensor_data(ternary, batch_idx)\n acc_ter, l_ter = model.test_(input_img, input_qst, label)\n accuracy_ternary.append(acc_ter)\n loss_ternary.append(l_ter)\n\n tensor_data(rel, batch_idx)\n acc_bin, l_bin = model.test_(input_img, input_qst, label)\n accuracy_rels.append(acc_bin)\n loss_binary.append(l_bin)\n\n tensor_data(norel, batch_idx)\n acc_un, l_un = model.test_(input_img, input_qst, label)\n accuracy_norels.append(acc_un)\n loss_unary.append(l_un)\n\n accuracy_ternary = sum(accuracy_ternary) / len(accuracy_ternary)\n accuracy_rel = sum(accuracy_rels) / len(accuracy_rels)\n accuracy_norel = sum(accuracy_norels) / len(accuracy_norels)\n print('\\n Test set: Ternary accuracy: {:.0f}% Binary accuracy: {:.0f}% | Unary accuracy: {:.0f}%\\n'.format(\n accuracy_ternary, accuracy_rel, accuracy_norel))\n\n\n loss_ternary = sum(loss_ternary) / len(loss_ternary)\n loss_binary = sum(loss_binary) / len(loss_binary)\n loss_unary = sum(loss_unary) / len(loss_unary)\n\n\n return accuracy_ternary, accuracy_rel, accuracy_norel\n\n \ndef load_data():\n print('loading data...')\n dirs = './data'\n filename = os.path.join(dirs,'sort-of-clevr.pickle')\n with open(filename, 'rb') as f:\n train_datasets, test_datasets = pickle.load(f)\n ternary_train = []\n ternary_test = []\n rel_train = []\n rel_test = []\n norel_train = []\n norel_test = []\n print('processing data...')\n\n for img, ternary, relations, norelations in train_datasets:\n img = np.swapaxes(img, 0, 2)\n for qst, ans in zip(ternary[0], ternary[1]):\n ternary_train.append((img,qst,ans))\n for qst,ans in zip(relations[0], relations[1]):\n rel_train.append((img,qst,ans))\n for qst,ans in zip(norelations[0], norelations[1]):\n norel_train.append((img,qst,ans))\n\n for img, ternary, relations, norelations in test_datasets:\n img = np.swapaxes(img, 0, 2)\n for qst, ans in zip(ternary[0], ternary[1]):\n ternary_test.append((img, qst, ans))\n for qst,ans in zip(relations[0], relations[1]):\n rel_test.append((img,qst,ans))\n for qst,ans in zip(norelations[0], norelations[1]):\n norel_test.append((img,qst,ans))\n \n return (ternary_train, ternary_test, rel_train, rel_test, norel_train, norel_test)\n \n\nternary_train, ternary_test, rel_train, rel_test, norel_train, norel_test = load_data()\n\ntry:\n os.makedirs(model_dirs)\nexcept:\n print('directory {} already exists'.format(model_dirs))\n\nif args.resume:\n filename = os.path.join(model_dirs, args.resume)\n if os.path.isfile(filename):\n print('==> loading checkpoint {}'.format(filename))\n checkpoint = paddle.load(filename)\n model.load_state_dict(checkpoint)\n print('==> loaded checkpoint {}'.format(filename))\n\nwith open(f'./{args.model}_{args.seed}_log.csv', 'w') as log_file:\n csv_writer = csv.writer(log_file, delimiter=',')\n csv_writer.writerow(['epoch', 'train_acc_ternary', 'train_acc_rel',\n 'train_acc_norel', 'train_acc_ternary', 'test_acc_rel', 'test_acc_norel'])\n\n print(f\"Training {args.model} {f'({args.relation_type})' if args.model == 'RN' else ''} model...\")\n\n for epoch in range(1, args.epochs + 1):\n train_acc_ternary, train_acc_binary, train_acc_unary = train(\n epoch, ternary_train, rel_train, norel_train)\n test_acc_ternary, test_acc_binary, test_acc_unary = test(\n epoch, ternary_test, rel_test, norel_test)\n\n csv_writer.writerow([epoch, train_acc_ternary, train_acc_binary,\n train_acc_unary, test_acc_ternary, test_acc_binary, test_acc_unary])\n model.save_model(epoch)"
] | [
[
"numpy.swapaxes",
"numpy.asarray"
]
] |
niva83/mocalum | [
"5c387197127a81ecc5cda2e4d79b5a33e9f8e31f"
] | [
"mocalum/persistance.py"
] | [
"\"\"\"This module contains a class which stores data created in the interaction\nwith mocalum.\n\"\"\"\nimport time\nfrom . import metadata\nimport numpy as np\nfrom numpy.linalg import inv as inv\nimport xarray as xr\nfrom tqdm import tqdm\nfrom .utils import sliding_window_slicing, bbox_pts_from_array, bbox_pts_from_cfg\n\nclass Data:\n def __init__(self):\n self.probing = {}\n self.los = {} # should be key-value pairs\n self.ffield = None # should be key-value pairs\n self._ffield = None # should be key-value pairs\n self.rc_wind = None # should be key-value pairs\n self.fmodel_cfg = {}\n self.meas_cfg = {}\n self.bbox_meas_pts = {}\n self.bbox_ffield = {}\n self.ffield_bbox_cfg = {} # should be a dict with lidar_id as key!\n\n\n def _cr8_fmodel_cfg(self, cfg):\n \"\"\"\n Adds configuration of flow model to class\n\n Parameters\n ----------\n cfg : dict\n Dictionary contating key-value paris which parametrize flow model\n \"\"\"\n self.fmodel_cfg = cfg\n\n\n def _cr8_bbox_dict(self, bbox_type, key, CRS,\n x_coord, y_coord, z_coord,t_coord,\n x_offset, y_offset, z_offset,t_offset,\n x_res, y_res, z_res, t_res, **kwargs):\n \"\"\"\n Creates bounding box dictionary corresponding to measurement points\n\n Parameters\n ----------\n bbox_type : str\n Indicates type of bounding box, it can be 'lidar' or 'flow-field'\n key : str\n Key which will be used to add bounding box dict to dict of bboxes\n CRS : dict\n Dict describing coordinate reference system\n x_coord : numpy\n An array of x coordinates of measurement points\n y_coord : numpy\n An array of y coordinates of measurement points\n z_coord : numpy\n An array of z coordinates of measurement points\n t_coord : numpy\n An array of time instance at which measurement points are acquired\n x_offset : float\n Offset of x coordinates\n y_offset : float\n Offset of y coordinates\n z_offset : float\n Offset of z coordinates\n t_offset : float\n Time offset\n x_res : int\n Resolution of x coordinates\n y_res : int\n Resolution of y coordinates\n z_res : int\n Resolution of z coordinates\n t_res : float\n Resolution of time\n \"\"\"\n\n bbox_cfg = {}\n\n # info about coordinate system reference (CRS)\n bbox_cfg.update({'CRS':{'x':CRS['x'],\n 'y':CRS['y'],\n 'z':CRS['z'],\n 'rot_matrix':CRS['rot_matrix']}})\n\n\n bbox_cfg.update({'x':{'min':np.min(x_coord),\n 'max':np.max(x_coord),\n 'offset':x_offset,\n 'res':x_res}})\n\n bbox_cfg.update({'y':{'min':np.min(y_coord),\n 'max':np.max(y_coord),\n 'offset':y_offset,\n 'res':y_res}})\n\n bbox_cfg.update({'z':{'min':np.min(z_coord),\n 'max':np.max(z_coord),\n 'offset':z_offset,\n 'res':z_res}})\n\n bbox_cfg.update({'t':{'min':np.min(t_coord),\n 'max':np.max(t_coord),\n 'offset':t_offset,\n 'res':t_res}})\n\n if bbox_type == 'lidar':\n self.bbox_meas_pts.update({key:bbox_cfg})\n else:\n bbox_cfg.update({'linked_lidars':kwargs['linked_lidars']})\n self.bbox_ffield.update({key:bbox_cfg})\n\n\n def _cr8_3d_tfield_ds(self, id, turb_df):\n \"\"\"\n Creates Mocalum 3D flow field xr.DataSet\n\n Parameters\n ----------\n id : str\n ID of bounding box cfg\n turb_df : pandas\n PyConTurb pandas df containing 3D turbulence (y,z, time)\n \"\"\"\n\n _ , y, z, t = self._get_ffield_coords(id)\n\n turb_np = turb_df.to_numpy().transpose().ravel()\n turb_np = turb_np.reshape(int(len(turb_np)/len(t)), len(t))\n\n\n # -1 to aligned properly axis\n R_tb = -self.bbox_ffield[id]['CRS']['rot_matrix']\n\n # rotate u and v component to be Eastward and Northward wind\n # according to the met conventions\n uv = np.array([turb_np[0::3],turb_np[1::3]]).transpose()\n tmp_shape = uv.shape\n uv = uv.reshape(tmp_shape[0]*tmp_shape[1],2).dot(inv(R_tb)).reshape(tmp_shape)\n uv = uv.transpose()\n\n u = uv[0].reshape(len(y), len(z) ,len(t)).transpose(1,0,2)\n v = uv[1].reshape(len(y), len(z) ,len(t)).transpose(1,0,2)\n w = turb_np[2::3].reshape(len(y), len(z) ,len(t)).transpose(1,0,2)\n\n self.ffield = xr.Dataset({'u': (['z', 'y', 'time'], u),\n 'v': (['z', 'y', 'time'], v),\n 'w': (['z', 'y', 'time'], w)},\n coords={'time': t,\n 'y': y,\n 'z': z})\n\n\n # Adding metadata\n self.ffield = self._add_metadata(self.ffield, metadata,\n 'Turbulent flow field dataset')\n self.ffield.attrs['generator'] = 'PyConTurb'\n\n def _cr8_4d_tfield_ds(self, id):\n \"\"\"\n Converts 3D turbulence flow field dataset to 4D dataset\n\n Parameters\n ----------\n id : str\n ID of bounding box cfg\n \"\"\"\n\n self._ffield = self.ffield\n R_tb = self.bbox_ffield[id]['CRS']['rot_matrix']\n y = self.ffield.y.values\n z = self.ffield.z.values\n\n\n ws = self.fmodel_cfg['wind_speed']\n bbox_pts = bbox_pts_from_cfg(self.bbox_ffield[id])\n t_res = self.bbox_ffield[id]['t']['res']\n\n x_start_pos = bbox_pts[:,0].min()\n x_len = abs(bbox_pts[:,0].max() - bbox_pts[:,0].min())\n x_res = ws * t_res\n no_items = int(np.ceil(x_len / x_res)) + 1\n\n u_3d = self.ffield.u.values.transpose()\n v_3d = self.ffield.v.values.transpose()\n w_3d = self.ffield.w.values.transpose()\n\n u_4d = sliding_window_slicing(u_3d, no_items, item_type=1).transpose(0,3,2,1)\n v_4d = sliding_window_slicing(v_3d, no_items, item_type=1).transpose(0,3,2,1)\n w_4d = sliding_window_slicing(w_3d, no_items, item_type=1).transpose(0,3,2,1)\n\n t = np.arange(0, u_4d.shape[0]*t_res, t_res)\n x = np.arange(0, no_items*x_res, x_res) + x_start_pos\n\n\n ew = np.empty((len(x),len(y),2))\n\n\n\n for i in range(0,len(x)):\n for j in range(0,len(y)):\n ew[i,j] = np.array([x[i],y[j]]).dot(inv(R_tb))\n\n\n self.ffield = xr.Dataset({'u': (['time', 'z', 'y', 'x'], u_4d),\n 'v': (['time', 'z', 'y', 'x'], v_4d),\n 'w': (['time', 'z', 'y', 'x'], w_4d)},\n coords={'time': t,\n 'y': y,\n 'z': z,\n 'x': x,\n 'Easting' : (['x','y'], ew[:,:,0]),\n 'Northing' : (['x','y'], ew[:,:,1]),\n 'Height' : (['z'], z)\n })\n\n self.ffield.attrs['generator'] = 'turbulence_box'\n self.ffield = self._add_metadata(self.ffield, metadata,\n 'Turbulent flow field dataset')\n\n def _cr8_plfield_ds(self, bbox_id, u, v, w):\n \"\"\"\n Creates power law flow field 3D dataset\n\n Parameters\n ----------\n bbox_id : str\n ID of bounding box cfg\n u : numpy\n ND array of shape (len(z), len(y), len(x)) of u values\n v : numpy\n ND array of shape (len(z), len(y), len(x)) of v values\n w : numpy\n ND array of shape (len(z), len(y), len(x)) of w values\n \"\"\"\n\n x_coord= np.arange(self.bbox_ffield[bbox_id]['x']['min'],\n self.bbox_ffield[bbox_id]['x']['max'] +\n self.bbox_ffield[bbox_id]['x']['res'],\n self.bbox_ffield[bbox_id]['x']['res'])\n\n y_coord= np.arange(self.bbox_ffield[bbox_id]['y']['min'],\n self.bbox_ffield[bbox_id]['y']['max'] +\n self.bbox_ffield[bbox_id]['y']['res'],\n self.bbox_ffield[bbox_id]['y']['res'])\n\n z_coord= np.arange(self.bbox_ffield[bbox_id]['z']['min'],\n self.bbox_ffield[bbox_id]['z']['max'] +\n self.bbox_ffield[bbox_id]['z']['res'],\n self.bbox_ffield[bbox_id]['z']['res'])\n\n base_array = np.empty((len(z_coord), len(y_coord),len(x_coord)))\n\n u = self._pl_fill_in(base_array, u)\n v = self._pl_fill_in(base_array, v)\n w = self._pl_fill_in(base_array, w)\n\n self.ffield = xr.Dataset({'u': (['z', 'y', 'x'], u),\n 'v': (['z', 'y', 'x'], v),\n 'w': (['z', 'y', 'x'], w)},\n coords={\n 'x': x_coord,\n 'y': y_coord,\n 'z': z_coord,\n 'Easting' : (['x'], x_coord),\n 'Northing' : (['y'], y_coord),\n 'Height' : (['z'], z_coord)\n })\n # Adding metadata\n self.ffield = self._add_metadata(self.ffield, metadata,\n 'Flow field dataset')\n self.ffield.attrs['generator'] = bbox_id\n\n\n @staticmethod\n def _pl_fill_in(empty_array, values):\n \"\"\"\n Fills empty array with repeated values\n\n Parameters\n ----------\n empty_array : numpy\n Numpy array of shape ...\n values : numpy\n 1D array of values to be filled in empty array\n\n Returns\n -------\n numpy\n Populated array with values\n \"\"\"\n\n full_array = np.copy(empty_array)\n for i, value in enumerate(values):\n full_array[i, :, :] = value\n return full_array\n\n\n def _upd8_meas_cfg(self, lidar_id, scan_type, az, el, rng, no_los,\n no_scans, scn_speed, sectrsz, scn_tm, rtn_tm, max_speed, max_acc):\n \"\"\"\n Updates measurement config\n\n Parameters\n ----------\n lidar_id : str\n ID of lidar for which measurement config is updates\n scan_type : str\n Scan type\n az : numpy\n Array of all azimuth positions\n el : numpy\n Array of all elevation positions\n rng : numpy\n Array of all range values\n no_los : int\n Number of line of sight\n no_scans : int\n Number of scans\n scn_speed : float\n Averaged scan speed through all measurement points\n sectrsz : float\n Size of scanned area (sector size for PPI)\n scn_tm : float\n Total time for scanning through all measurement points\n rtn_tm : float\n Time to return from the end to the begining of the scan\n max_speed : float\n Maximum permitted angular speed, in deg/s\n max_acc : float\n Maximum permitted angular acceleration, in deg/s^2\n \"\"\"\n\n self.meas_cfg[lidar_id]['config'].update({'scan_type':scan_type})\n self.meas_cfg[lidar_id]['config'].update({'max_scn_speed':max_speed})\n self.meas_cfg[lidar_id]['config'].update({'max_scn_acc':max_acc})\n self.meas_cfg[lidar_id]['config'].update({'scn_speed':scn_speed})\n self.meas_cfg[lidar_id]['config'].update({'no_los':no_los})\n self.meas_cfg[lidar_id]['config'].update({'no_scans':no_scans})\n self.meas_cfg[lidar_id]['config'].update({'sectrsz':sectrsz})\n self.meas_cfg[lidar_id]['config'].update({'scn_tm':scn_tm})\n self.meas_cfg[lidar_id]['config'].update({'rtn_tm':rtn_tm})\n self.meas_cfg[lidar_id]['config'].update({'az':az})\n self.meas_cfg[lidar_id]['config'].update({'el':el})\n self.meas_cfg[lidar_id]['config'].update({'rng':rng})\n\n def _cr8_probing_ds(self, lidar_id, az, el, rng, time):\n \"\"\"\n Creates Mocalum probing xr.DataSet\n\n Parameters\n ----------\n lidar_id : str\n ID of lidar for which probing dataset is created\n az : numpy\n Array of azimuth positions\n el : numpy\n Array of elevation positions\n rng : numpy\n Array of range values\n time : numpy\n Array of time values\n \"\"\"\n # generating empty uncertainty and xyz arrays\n unc = np.full(az.shape, 0.0, dtype=float)\n xyz = np.full(az.shape, np.nan, dtype=float)\n\n # pulling information from measurement config dictionary\n s_sz = (self.meas_cfg[lidar_id]['config']['sectrsz']\n if 'sectrsz' in self.meas_cfg[lidar_id]['config'] else None)\n n_scn = (self.meas_cfg[lidar_id]['config']['no_scans']\n if 'no_scans' in self.meas_cfg[lidar_id]['config'] else None)\n no_los = (self.meas_cfg[lidar_id]['config']['no_los']\n if 'no_los' in self.meas_cfg[lidar_id]['config'] else None)\n s_tm = (self.meas_cfg[lidar_id]['config']['scn_tm']\n if 'scn_tm' in self.meas_cfg[lidar_id]['config'] else None)\n r_tm = (self.meas_cfg[lidar_id]['config']['rtn_tm']\n if 'rtn_tm' in self.meas_cfg[lidar_id]['config'] else None)\n lidar_pos = (self.meas_cfg[lidar_id]['position']\n if 'position' in self.meas_cfg[lidar_id] else None)\n\n\n probing_ds = xr.Dataset({'az': (['time'], az),\n 'el': (['time'], el),\n 'rng': (['time'], rng),\n 'x': (['time'], xyz),\n 'y': (['time'], xyz),\n 'z': (['time'], xyz),\n 'unc_az': (['time'], unc),\n 'unc_el': (['time'], unc),\n 'unc_rng': (['time'], unc),\n 'unc_est': (['time'], unc),\n 'sectrsz':(s_sz),\n 'no_scans':(n_scn),\n 'no_los':(no_los),\n 'scan_tm':(s_tm),\n 'return_tm':(r_tm),\n 'lidar_pos_x':(lidar_pos[0]),\n 'lidar_pos_y':(lidar_pos[1]),\n 'lidar_pos_z':(lidar_pos[2]),\n },coords={'time': time})\n\n # adding/updating metadata\n probing_ds = self._add_metadata(probing_ds, metadata,\n 'Lidar atmosphere probing dataset')\n\n self.probing.update({lidar_id:probing_ds})\n\n\n def _add_unc(self, lidar_id, unc_term, samples):\n \"\"\"\n Adds generated uncertainties to probing xr.DataSet\n\n Parameters\n ----------\n lidar_id : str\n Id of lidar from self.meas_cfg lidar dict\n unc_term : str\n ID of uncertainty term\n samples : numpy\n Numpy array containing generated values for given uncertainty term\n \"\"\"\n self.tmp_unc = unc_term\n self.tmp_unc_val = samples\n self.probing[lidar_id][unc_term].values = samples\n\n\n def _add_xyz(self, lidar_id, x, y, z):\n \"\"\"\n Adds Cartesian coordinates to probing dataset\n\n Parameters\n ----------\n lidar_id : str\n Id of lidar for which probing dataset is being updated\n x : numpy\n Array of x values\n y : numpy\n Array of y values\n z : numpy\n Array of z values\n \"\"\"\n self.probing[lidar_id].x.values = x.values\n self.probing[lidar_id].y.values = y.values\n self.probing[lidar_id].z.values = z.values\n\n def _cr8_los_ds(self, lidar_id, los):\n \"\"\"\n Create mocalum los xarray dataset\n\n Parameters\n ----------\n lidar_id : str\n Id of lidar for which LOS dataset is being created\n los : numpy\n Array of los speed\n \"\"\"\n # TODO: detect what type of measurements it is (PPI, RHI, etc.)\n\n los = xr.Dataset({'vrad': (['time'], los),\n 'az': (['time'], self.probing[lidar_id].az.values),\n 'el': (['time'], self.probing[lidar_id].el.values),\n 'rng': (['time'], self.probing[lidar_id].rng.values),\n 'no_scans':(self.probing[lidar_id].no_scans.values),\n 'no_los': (self.probing[lidar_id].no_los.values)\n },coords={'time': self.probing[lidar_id].time.values})\n\n\n # adding/updating metadata\n los = self._add_metadata(los, metadata,'Radial wind speed dataset')\n\n self.los.update({lidar_id:los})\n def _cr8_sonic_ds(self, points_pos, time, u, v, w, ws, wdir):\n \"\"\"\n Create mocalum virtual sonic anemometer xarray dataset\n\n Parameters\n ----------\n points_pos : numpy\n Measurement point position as (n,3) shaped numpy array\n time : numpy\n Numpy array of time instances at which sonic is 'measuring'\n u : numpy\n Array of reconstructed u values\n v : numpy\n Array of reconstructed v values\n ws : numpy\n Array of reconstructed wind speed values\n wdir : numpy\n Array of reconstructed wind direction values\n w : numpy, optional\n Array of reconstructed vertical wind speed, by default None\n \"\"\"\n shape = points_pos.shape\n\n self.sonic_wind = xr.Dataset({'ws': (['time', 'point'], ws),\n 'wdir':(['time','point'], wdir),\n 'u': (['time', 'point'], u),\n 'v': ([ 'time', 'point'], v),\n 'w': (['time', 'point'], w)\n },coords={'time': time,\n 'point' : np.arange(1,len(points_pos)+1, 1),\n 'x':(['point'], points_pos[:,0]),\n 'y':(['point'], points_pos[:,1]),\n 'z':(['point'], points_pos[:,2])})\n\n # adding/updating metadata\n self.sonic_wind = self._add_metadata(self.sonic_wind, metadata,\n 'Virtual sonics')\n\n\n def _cr8_rc_wind_ds(self, scan_type, u, v, ws, wdir, w = None):\n \"\"\"\n Create mocalum reconstructed wind xarray dataset\n\n Parameters\n ----------\n scan_type : str\n Indicates scan type used to produce background LOS measurements\n u : numpy\n Array of reconstructed u values\n v : numpy\n Array of reconstructed v values\n ws : numpy\n Array of reconstructed wind speed values\n wdir : numpy\n Array of reconstructed wind direction values\n w : numpy, optional\n Array of reconstructed vertical wind speed, by default None\n \"\"\"\n shape = ws.shape\n if type(w) != type(None):\n self.rc_wind = xr.Dataset({'ws': (['scan', 'point'], ws),\n 'wdir':(['scan', 'point'], wdir),\n 'u': (['scan', 'point'], u),\n 'v': (['scan', 'point'], v),\n 'w': (['scan', 'point'], w)\n },coords={'scan': np.arange(1,shape[0]+1, 1),\n 'point' : np.arange(1,shape[1]+1, 1)})\n else:\n self.rc_wind = xr.Dataset({'ws': (['scan','point'], ws),\n 'wdir':(['scan', 'point'], wdir),\n 'u': (['scan', 'point'], u),\n 'v': (['scan', 'point'], v)\n },coords={'scan': np.arange(1,shape[0]+1, 1),\n 'point' : np.arange(1,shape[1]+1, 1)})\n\n\n # adding/updating metadata\n self.rc_wind = self._add_metadata(self.rc_wind, metadata,\n 'Reconstructed wind')\n self.rc_wind.attrs['scan_type'] = scan_type\n\n def _get_ffield_coords(self, id):\n \"\"\"\n Gets coordinates of flow field points\n\n Parameters\n ----------\n id : str\n BBOX cfg id\n\n Returns\n -------\n list\n list of numpy arrays for x, y, z, and time coordinates\n \"\"\"\n bbox_cfg=self.bbox_ffield[id]\n\n x_coords = np.arange(bbox_cfg['x']['min'] - bbox_cfg['x']['res'],\n bbox_cfg['x']['max'] + 2*bbox_cfg['x']['res'],\n bbox_cfg['x']['res'])\n\n y_coords = np.arange(bbox_cfg['y']['min'] - bbox_cfg['y']['res'],\n bbox_cfg['y']['max'] + 2*bbox_cfg['y']['res'],\n bbox_cfg['y']['res'])\n\n z_coords = np.arange(bbox_cfg['z']['min'] - bbox_cfg['z']['res'],\n bbox_cfg['z']['max'] + 2*bbox_cfg['z']['res'],\n bbox_cfg['z']['res'])\n\n t_coords = np.arange(bbox_cfg['t']['min'],\n bbox_cfg['t']['max'] + bbox_cfg['t']['res'],\n bbox_cfg['t']['res'])\n\n return x_coords, y_coords, z_coords, t_coords\n\n\n @staticmethod\n def _add_metadata(ds, metadata, ds_title=''):\n \"\"\"\n Adds metadata to xr.DataSet\n\n Parameters\n ----------\n ds : xr.DataSet\n Mocalum xarray DataSet\n metadata : module\n Python module containing dictionaries of metadata\n ds_title : str, optional\n Title of DataSet, by default ''\n\n Returns\n -------\n xr.DataSet\n Mocalum xarray DataSet enriched with metadata\n \"\"\"\n for var in ds.data_vars.keys():\n if var in metadata.VARS:\n ds[var].attrs = metadata.VARS[var]\n for coord in ds.coords.keys():\n if coord in metadata.DIMS:\n ds[coord].attrs = metadata.DIMS[coord]\n ds.attrs['title'] = ds_title\n return ds\n\n\ndata = Data()"
] | [
[
"numpy.ceil",
"numpy.linalg.inv",
"numpy.copy",
"numpy.arange",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.full"
]
] |
PolSl-PBL-7/DragonEye | [
"13ba314809e62ec1118756a00453a7e175df4329"
] | [
"pipelines/training_pipeline.py"
] | [
"NAME = \"training_pipeline\"\n\n\ndef training_pipeline(\n pipeline_params: dict,\n compile_params: dict,\n model_params: dict,\n source_params: dict,\n training_params: dict,\n data_processing_pipeline_params:\n dict = None,\n versioner_params: dict = None,\n processor_params: dict = None,\n sink_params: dict = None,\n source_params_dynamic: dict = None,\n processor_params_dynamic: dict = None,\n):\n\n from dnn.training.builder import CompileConfig, model_builder, config_builder\n from data import LocalTFDataSource, SourceConfig\n from pipelines.data_processing_pipeline import data_processing_pipeline\n from dnn.training.losses import losses\n from dnn.training.metrics import metrics\n\n import tensorflow as tf\n\n import wandb\n from wandb.keras import WandbCallback\n\n from utils.callbacks import CallbackName, get_callback_by_name\n from datetime import datetime\n import pickle\n import gc\n\n if data_processing_pipeline_params and versioner_params and processor_params:\n dataset = data_processing_pipeline(\n versioner_params=versioner_params,\n source_params=source_params,\n processor_params=processor_params,\n pipeline_params=data_processing_pipeline_params,\n sink_params=sink_params,\n source_params_dynamic=source_params_dynamic,\n processor_params_dynamic=processor_params_dynamic)\n else:\n source_config = SourceConfig(**source_params)\n source = LocalTFDataSource(source_config)\n dataset = source(pipeline_params['dataset_path'])\n\n # Add labels\n if processor_params_dynamic and source_params_dynamic and pipeline_params['model'] == 'ITAE':\n dataset = dataset.map(lambda x: (x, x['Input_Dynamic']))\n else:\n dataset = tf.data.Dataset.zip((dataset, dataset))\n\n dataset_size = len([0 for _ in dataset])\n train_dataset = dataset.take(int(dataset_size * 0.8))\n val_dataset = dataset.skip(int(dataset_size * 0.8))\n\n gpus = tf.config.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(\n logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n\n compile_config = CompileConfig(**compile_params)\n model_config = config_builder[pipeline_params['model']](**model_params)\n model = model_builder[pipeline_params['model']](\n model_config=model_config,\n compile_config=compile_config\n )\n print(\"model created\")\n\n for callback in training_params['callbacks']:\n if callback == CallbackName.wandb_training_loss.value:\n wandb.init(project=pipeline_params['project'],\n entity=pipeline_params['entity'],\n magic=pipeline_params['magic'])\n\n training_params['callbacks'] = [callback if not isinstance(callback, str) else get_callback_by_name(callback) for callback in training_params['callbacks']]\n training_params['callbacks'].append(\n tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n patience=3,\n ))\n\n history = model.fit(train_dataset, **training_params, validation_data=val_dataset, shuffle=True)\n\n model_path = str(pipeline_params['model_path'])\n if pipeline_params['add_date_to_model_path']:\n model_path += f'/{datetime.now().strftime(r\"%m-%d-%Y-%H-%M-%S\")}'\n\n if pipeline_params['model_path']:\n model.save(model_path + '/model')\n with open(model_path + '/history', 'wb') as f:\n pickle.dump(history.history, f)\n\n gc.collect()\n\n return model, history, model_path\n"
] | [
[
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.list_logical_devices",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.data.Dataset.zip",
"tensorflow.config.list_physical_devices"
]
] |
ubiquity6/MVSNet | [
"7dc026acb019d270e79de7be4a5cfcb33863127f"
] | [
"mvsnet/cnn_wrapper/network.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nCopyright 2019, Zixin Luo & Yao Yao, HKUST.\nCNN layer wrapper.\n\nPlease be noted that the center and scale paramter are disabled by default for all BN / GN layers\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\n\nfrom mvsnet.cnn_wrapper.common import Notify\n\nDEFAULT_PADDING = 'SAME'\n\n\ndef layer(op):\n \"\"\"Decorator for composable network layers.\"\"\"\n\n def layer_decorated(self, *args, **kwargs):\n \"\"\"Layer decoration.\"\"\"\n # We allow to construct low-level layers instead of high-level networks.\n if self.inputs is None or (len(args) > 0 and isinstance(args[0], tf.Tensor)):\n layer_output = op(self, *args, **kwargs)\n return layer_output\n # Automatically set a name if not provided.\n name = kwargs.setdefault('name', self.get_unique_name(op.__name__))\n # Figure out the layer inputs.\n if not self.terminals:\n raise RuntimeError('No input variables found for layer %s.' % name)\n elif len(self.terminals) == 1:\n layer_input = self.terminals[0]\n else:\n layer_input = list(self.terminals)\n # Perform the operation and get the output.\n layer_output = op(self, layer_input, *args, **kwargs)\n # Add to layer LUT.\n self.layers[name] = layer_output\n # This output is now the input for the next layer.\n self.feed(layer_output)\n # Return self for chained calls.\n return self\n\n return layer_decorated\n\n\nclass Network(object):\n \"\"\"Class NetWork.\"\"\"\n\n def __init__(self, inputs, trainable, training=True, mode='normal',\n dropout_rate=0.5, seed=None, epsilon=1e-5, reuse=False, fcn=True, regularize=True,\n **kwargs):\n # The input nodes for this network\n self.inputs = inputs\n # If true, the resulting variables are set as trainable\n self.trainable = trainable if isinstance(trainable, bool) else True\n # If true, variables are shared between feature towers\n self.reuse = reuse\n # Should be True for training mode and false for inference\n self.training = training\n # Dropout rate\n self.dropout_rate = dropout_rate\n # Seed for randomness\n self.seed = seed\n # Add regularizer for parameters.\n self.regularizer = tf.contrib.layers.l2_regularizer(\n 1.0) if regularize else None\n # The epsilon paramater in BN layer.\n self.bn_epsilon = epsilon\n self.extra_args = kwargs\n self.base_divisor = 1.0 # for dividing the base_filter size of the network\n if mode == 'semilite':\n self.base_divisor = 4/3\n if mode == 'lite':\n self.base_divisor = 2.0\n if mode == 'ultralite':\n self.base_divisor = 4.0\n if mode == 'fat':\n self.base_divisor = 0.5\n if mode == 'ultrafat':\n self.base_divisor = 0.25\n if inputs is not None:\n # The current list of terminal nodes\n self.terminals = []\n # Mapping from layer names to layers\n self.layers = dict(inputs)\n # If true, dense layers will be omitted in network construction\n self.fcn = fcn\n self.setup()\n\n def setup(self):\n '''Construct the network. '''\n raise NotImplementedError('Must be implemented by the subclass.')\n\n def load(self, data_path, session, ignore_missing=False, exclude_var=None):\n '''Load network weights.\n data_path: The path to the numpy-serialized network weights\n session: The current TensorFlow session\n ignore_missing: If true, serialized weights for missing layers are ignored.\n '''\n data_dict = np.load(data_path).item()\n if exclude_var is not None:\n keyword = exclude_var.split(',')\n assign_op = []\n for op_name in data_dict:\n if exclude_var is not None:\n find_keyword = False\n for tmp_keyword in keyword:\n if op_name.find(tmp_keyword) >= 0:\n find_keyword = True\n if find_keyword:\n continue\n\n with tf.variable_scope(op_name, reuse=True):\n for param_name, data in data_dict[op_name].iteritems():\n\n try:\n var = tf.get_variable(param_name)\n assign_op.append(var.assign(data))\n except ValueError:\n if not ignore_missing:\n raise\n else:\n print(Notify.WARNING, ':'.join(\n [op_name, param_name]), \"is omitted.\", Notify.ENDC)\n session.run(assign_op)\n\n def feed(self, *args):\n '''Set the input(s) for the next operation by replacing the terminal nodes.\n The arguments can be either layer names or the actual layers.\n '''\n assert args\n self.terminals = []\n for fed_layer in args:\n if isinstance(fed_layer, basestring):\n try:\n fed_layer = self.layers[fed_layer]\n except KeyError:\n raise KeyError('Unknown layer name fed: %s' % fed_layer)\n self.terminals.append(fed_layer)\n return self\n\n def get_output(self):\n '''Returns the current network output.'''\n return self.terminals[-1]\n\n def get_output_by_name(self, layer_name):\n '''\n Get graph node by layer name\n :param layer_name: layer name string\n :return: tf node\n '''\n return self.layers[layer_name]\n\n def get_unique_name(self, prefix):\n '''Returns an index-suffixed unique name for the given prefix.\n This is used for auto-generating layer names based on the type-prefix.\n '''\n ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1\n return '%s_%d' % (prefix, ident)\n\n def change_inputs(self, input_tensors):\n assert len(input_tensors) == 1\n for key in input_tensors:\n self.layers[key] = input_tensors[key]\n\n @layer\n def conv(self,\n input_tensor,\n kernel_size,\n filters,\n strides,\n name,\n relu=True,\n dilation_rate=1,\n padding=DEFAULT_PADDING,\n biased=True,\n reuse=False,\n separable=False):\n \"\"\"2D/3D convolution.\"\"\"\n kwargs = {'filters': filters,\n 'kernel_size': kernel_size,\n 'strides': strides,\n 'activation': tf.nn.relu if relu else None,\n 'use_bias': biased,\n 'dilation_rate': dilation_rate,\n 'trainable': self.trainable,\n 'reuse': self.reuse or reuse,\n 'bias_regularizer': self.regularizer if biased else None,\n 'name': name,\n 'padding': padding}\n\n if separable:\n kwargs['depthwise_regularizer'] = self.regularizer\n kwargs['pointwise_regularizer'] = self.regularizer\n else:\n kwargs['kernel_regularizer'] = self.regularizer\n\n if len(input_tensor.get_shape()) == 4:\n if not separable:\n return tf.layers.conv2d(input_tensor, **kwargs)\n else:\n return tf.layers.separable_conv2d(input_tensor, **kwargs)\n elif len(input_tensor.get_shape()) == 5:\n if not separable:\n return tf.layers.conv3d(input_tensor, **kwargs)\n else:\n raise NotImplementedError(\n 'No official implementation for separable_conv3d')\n else:\n raise ValueError('Improper input rank for layer: ' + name)\n\n @layer\n def conv_gn(self,\n input_tensor,\n kernel_size,\n filters,\n strides,\n name,\n relu=True,\n center=False,\n scale=False,\n dilation_rate=1,\n channel_wise=True,\n group=32,\n group_channel=8,\n padding=DEFAULT_PADDING,\n biased=False,\n separable=False):\n assert len(input_tensor.get_shape()) == 4\n conv = self.conv(input_tensor, kernel_size, filters, strides, name, relu=False,\n dilation_rate=dilation_rate, padding=padding,\n biased=biased, reuse=self.reuse, separable=separable)\n\n # tranpose: [bs, h, w, c] to [bs, c, h, w] following the paper\n x = tf.transpose(conv, [0, 3, 1, 2])\n shape = tf.shape(x)\n N = shape[0]\n C = x.get_shape()[1]\n H = shape[2]\n W = shape[3]\n if channel_wise:\n G = max(1, C / group_channel)\n else:\n G = min(group, C)\n\n # normalization\n x = tf.reshape(x, [N, G, C // G, H, W])\n mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)\n x = (x - mean) / tf.sqrt(var + self.bn_epsilon)\n\n # per channel scale and bias (gamma and beta)\n with tf.variable_scope(name + '/gn', reuse=self.reuse):\n if scale:\n gamma = tf.get_variable(\n 'gamma', [C], dtype=tf.float32, initializer=tf.ones_initializer())\n else:\n gamma = tf.constant(1.0, shape=[C])\n if center:\n beta = tf.get_variable(\n 'beta', [C], dtype=tf.float32, initializer=tf.zeros_initializer())\n else:\n beta = tf.constant(0.0, shape=[C])\n gamma = tf.reshape(gamma, [1, C, 1, 1])\n beta = tf.reshape(beta, [1, C, 1, 1])\n output = tf.reshape(x, [-1, C, H, W]) * gamma + beta\n\n # tranpose: [bs, c, h, w, c] to [bs, h, w, c] following the paper\n output = tf.transpose(output, [0, 2, 3, 1])\n if relu:\n output = self.relu(output, name + '/relu')\n return output\n\n @layer\n def conv_bn(self,\n input_tensor,\n kernel_size,\n filters,\n strides,\n name,\n relu=True,\n center=False,\n scale=False,\n dilation_rate=1,\n padding=DEFAULT_PADDING,\n biased=False,\n separable=False,\n reuse=False):\n conv = self.conv(input_tensor, kernel_size, filters, strides, name, relu=False,\n dilation_rate=dilation_rate, padding=padding,\n biased=biased, reuse=reuse, separable=separable)\n conv_bn = self.batch_normalization(conv, name + '/bn',\n center=center, scale=scale, relu=relu, reuse=reuse)\n return conv_bn\n\n @layer\n def deconv(self,\n input_tensor,\n kernel_size,\n filters,\n strides,\n name,\n relu=True,\n padding=DEFAULT_PADDING,\n biased=True,\n reuse=False):\n \"\"\"2D/3D deconvolution.\"\"\"\n kwargs = {'filters': filters,\n 'kernel_size': kernel_size,\n 'strides': strides,\n 'activation': tf.nn.relu if relu else None,\n 'use_bias': biased,\n 'trainable': self.trainable,\n 'reuse': self.reuse or reuse,\n 'kernel_regularizer': self.regularizer,\n 'bias_regularizer': self.regularizer if biased else None,\n 'name': name,\n 'padding': padding}\n\n if len(input_tensor.get_shape()) == 4:\n return tf.layers.conv2d_transpose(input_tensor, **kwargs)\n elif len(input_tensor.get_shape()) == 5:\n return tf.layers.conv3d_transpose(input_tensor, **kwargs)\n else:\n raise ValueError('Improper input rank for layer: ' + name)\n\n @layer\n def deconv_bn(self,\n input_tensor,\n kernel_size,\n filters,\n strides,\n name,\n relu=True,\n center=False,\n scale=False,\n padding=DEFAULT_PADDING,\n biased=False,\n reuse=False):\n deconv = self.deconv(input_tensor, kernel_size, filters, strides, name,\n relu=False, padding=padding, biased=biased, reuse=reuse)\n deconv_bn = self.batch_normalization(deconv, name + '/bn',\n center=center, scale=scale, relu=relu, reuse=reuse)\n return deconv_bn\n\n @layer\n def deconv_gn(self,\n input_tensor,\n kernel_size,\n filters,\n strides,\n name,\n relu=False,\n center=False,\n scale=False,\n channel_wise=True,\n group=32,\n group_channel=8,\n padding=DEFAULT_PADDING,\n biased=False):\n assert len(input_tensor.get_shape()) == 4\n\n # deconvolution\n deconv = self.deconv(input_tensor, kernel_size, filters, strides, name,\n relu=False, padding=padding, biased=biased, reuse=self.reuse)\n\n # group normalization\n x = tf.transpose(deconv, [0, 3, 1, 2])\n shape = tf.shape(x)\n N = shape[0]\n C = x.get_shape()[1]\n H = shape[2]\n W = shape[3]\n if channel_wise:\n G = max(1, C / group_channel)\n else:\n G = min(group, C)\n\n # normalization\n x = tf.reshape(x, [N, G, C // G, H, W])\n mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)\n x = (x - mean) / tf.sqrt(var + self.bn_epsilon)\n\n # per channel scale and bias (gamma and beta)\n with tf.variable_scope(name + '/gn', reuse=self.reuse):\n if scale:\n gamma = tf.get_variable(\n 'gamma', [C], dtype=tf.float32, initializer=tf.ones_initializer())\n else:\n gamma = tf.constant(1.0, shape=[C])\n if center:\n beta = tf.get_variable(\n 'beta', [C], dtype=tf.float32, initializer=tf.zeros_initializer())\n else:\n beta = tf.constant(0.0, shape=[C])\n gamma = tf.reshape(gamma, [1, C, 1, 1])\n beta = tf.reshape(beta, [1, C, 1, 1])\n output = tf.reshape(x, [-1, C, H, W]) * gamma + beta\n\n # tranpose: [bs, c, h, w, c] to [bs, h, w, c] following the paper\n output = tf.transpose(output, [0, 2, 3, 1])\n\n if relu:\n output = self.relu(output, name + '/relu')\n return output\n\n @layer\n def relu(self, input_tensor, name=None):\n \"\"\"ReLu activation.\"\"\"\n return tf.nn.relu(input_tensor, name=name)\n\n @layer\n def max_pool(self, input_tensor, pool_size, strides, name, padding=DEFAULT_PADDING):\n \"\"\"Max pooling.\"\"\"\n return tf.layers.max_pooling2d(input_tensor,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n name=name)\n\n @layer\n def avg_pool(self, input_tensor, pool_size, strides, name, padding=DEFAULT_PADDING):\n \"\"\"\"Average pooling.\"\"\"\n return tf.layers.average_pooling2d(input_tensor,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n name=name)\n\n @layer\n def l2_pool(self, input_tensor, pool_size, strides, name, padding=DEFAULT_PADDING):\n \"\"\"L2 pooling.\"\"\"\n return tf.sqrt(tf.layers.average_pooling2d(\n tf.square(input_tensor),\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n name=name) + 1e-6)\n\n @layer\n def lrn(self, input_tensor, radius, alpha, beta, name, bias=1.0):\n return tf.nn.local_response_normalization(input_tensor,\n depth_radius=radius,\n alpha=alpha,\n beta=beta,\n bias=bias,\n name=name)\n\n @layer\n def concat(self, input_tensors, axis, name):\n return tf.concat(values=input_tensors, axis=axis, name=name)\n\n @layer\n def add(self, input_tensors, name):\n return tf.add_n(input_tensors, name=name)\n\n @layer\n def fc(self, input_tensor, num_out, name, biased=True, relu=True, flatten=True, reuse=False):\n # To behave same to Caffe.\n if flatten:\n flatten_tensor = tf.layers.flatten(input_tensor)\n else:\n flatten_tensor = input_tensor\n return tf.layers.dense(flatten_tensor,\n units=num_out,\n use_bias=biased,\n activation=tf.nn.relu if relu else None,\n trainable=self.trainable,\n reuse=self.reuse or reuse,\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer if biased else None,\n name=name)\n\n @layer\n def fc_bn(self, input_tensor, num_out, name,\n biased=False, relu=True, center=False, scale=False, flatten=True, reuse=False):\n # To behave same to Caffe.\n fc = self.fc(input_tensor, num_out, name, relu=False,\n biased=biased, flatten=flatten, reuse=reuse)\n fc_bn = self.batch_normalization(fc, name + '/bn',\n center=center, scale=scale, relu=relu, reuse=reuse)\n return fc_bn\n\n @layer\n def softmax(self, input_tensor, name, dim=-1):\n return tf.nn.softmax(input_tensor, dim=dim, name=name)\n\n @layer\n def batch_normalization(self, input_tensor, name,\n center=False, scale=False, relu=False, reuse=False):\n \"\"\"Batch normalization.\"\"\"\n output = tf.layers.batch_normalization(input_tensor,\n center=center,\n scale=scale,\n fused=True,\n training=self.training,\n trainable=self.trainable,\n reuse=self.reuse or reuse,\n epsilon=self.bn_epsilon,\n gamma_regularizer=None, # self.regularizer if scale else None,\n beta_regularizer=None, # self.regularizer if center else None,\n name=name)\n if relu:\n output = self.relu(output, name + '/relu')\n return output\n\n @layer\n def dropout(self, input_tensor, name):\n return tf.layers.dropout(input_tensor,\n rate=self.dropout_rate,\n training=self.training,\n seed=self.seed,\n name=name)\n\n @layer\n def l2norm(self, input_tensor, name, dim=-1):\n return tf.nn.l2_normalize(input_tensor, dim=dim, name=name)\n\n @layer\n def squeeze(self, input_tensor, axis=None, name=None):\n return tf.squeeze(input_tensor, axis=axis, name=name)\n\n @layer\n def reshape(self, input_tensor, shape, name=None):\n return tf.reshape(input_tensor, shape, name=name)\n\n @layer\n def flatten(self, input_tensor, name=None):\n return tf.layers.flatten(input_tensor, name=name)\n\n @layer\n def tanh(self, input_tensor, name=None):\n return tf.tanh(input_tensor, name=name)\n"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.concat",
"tensorflow.layers.average_pooling2d",
"tensorflow.nn.softmax",
"tensorflow.layers.conv3d_transpose",
"tensorflow.layers.flatten",
"tensorflow.ones_initializer",
"tensorflow.layers.conv3d",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.layers.conv2d_transpose",
"tensorflow.transpose",
"tensorflow.constant",
"numpy.load",
"tensorflow.shape",
"tensorflow.add_n",
"tensorflow.zeros_initializer",
"tensorflow.tanh",
"tensorflow.layers.batch_normalization",
"tensorflow.layers.max_pooling2d",
"tensorflow.layers.dense",
"tensorflow.layers.dropout",
"tensorflow.nn.l2_normalize",
"tensorflow.nn.local_response_normalization",
"tensorflow.nn.moments",
"tensorflow.layers.separable_conv2d",
"tensorflow.sqrt",
"tensorflow.square",
"tensorflow.nn.relu",
"tensorflow.get_variable"
]
] |
abcxs/polyrnn | [
"92eee689fe62585529deb1c44fbf1c889f414fa2"
] | [
"mmdet/models/utils/vertex_util.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, xavier_init\nfrom mmcv.cnn.bricks import NonLocal2d\nfrom .builder import MODULE_UTIL\n\nclass Bottleneck(nn.Module):\n\n def __init__(self,\n in_channels,\n mid_channels,\n dilation,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU')):\n super(Bottleneck, self).__init__()\n norm_cfg_ = dict(type='GN', num_groups=4)\n self.conv1 = ConvModule(in_channels, mid_channels, kernel_size=1, padding=0, norm_cfg=norm_cfg_, act_cfg=act_cfg)\n norm_cfg_ = dict(type='GN', num_groups=1)\n self.conv2 = ConvModule(mid_channels, mid_channels, kernel_size=3, padding=dilation, dilation=dilation, norm_cfg=norm_cfg_, act_cfg=act_cfg)\n norm_cfg_ = dict(type='GN', num_groups=4)\n self.conv3 = ConvModule(mid_channels, in_channels, kernel_size=1, padding=0, norm_cfg=norm_cfg_, act_cfg=act_cfg)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n identity = x\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n out = out + identity\n return out\n\n@MODULE_UTIL.register_module()\nclass FusionModule(nn.Module):\n def __init__(self, in_channels, refine_level=2, refine_type=None, conv_cfg=None,\n norm_cfg=None, num_convs=0, dilations=None):\n super(FusionModule, self).__init__()\n self.in_channels = in_channels\n self.refine_level = refine_level\n self.refine_type = refine_type\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n if self.refine_type == 'conv':\n self.refine = ConvModule(\n self.in_channels,\n self.in_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg)\n elif self.refine_type == 'non_local':\n self.refine = NonLocal2d(\n self.in_channels,\n reduction=1,\n use_scale=False,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg)\n \n self.convs = nn.ModuleList()\n if dilations is None:\n dilations = [1 for _ in range(num_convs)]\n else:\n assert len(dilations) == num_convs\n for dilation in dilations:\n self.convs.append(Bottleneck(self.in_channels, self.in_channels // 4, dilation=dilation))\n\n def init_weights(self):\n \"\"\"Initialize the weights of FPN module.\"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n def forward(self, inputs):\n num_levels = len(inputs)\n\n feats = []\n gather_size = inputs[self.refine_level].size()[2:]\n for i in range(num_levels):\n if i < self.refine_level:\n gathered = F.adaptive_max_pool2d(\n inputs[i], output_size=gather_size)\n else:\n gathered = F.interpolate(\n inputs[i], size=gather_size, mode='nearest')\n feats.append(gathered)\n\n bsf = sum(feats) / len(feats)\n\n if self.refine_type is not None:\n bsf = self.refine(bsf)\n \n for conv in self.convs:\n bsf = conv(bsf)\n\n return bsf\n\nclass ConvLstmCell(nn.Module):\n def __init__(self, input_size, hidden_size, kernel_size, bias=True):\n super(ConvLstmCell, self).__init__()\n \n self.input_size = input_size\n self.hidden_size = hidden_size\n self.kernel_size = kernel_size\n self.padding = kernel_size // 2\n\n self.ih = nn.Conv2d(in_channels=self.input_size, \n out_channels=self.hidden_size * 4, \n kernel_size=self.kernel_size, \n padding=self.padding, \n bias=bias)\n self.hh = nn.Conv2d(in_channels=self.hidden_size, \n out_channels=self.hidden_size * 4, \n kernel_size=self.kernel_size, \n padding=self.padding, \n bias=bias)\n\n def forward(self, input, hidden):\n hx, cx = hidden\n gates = self.ih(input) + self.hh(hx)\n \n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = torch.sigmoid(ingate)\n forgetgate = torch.sigmoid(forgetgate)\n cellgate = torch.tanh(cellgate)\n outgate = torch.sigmoid(outgate)\n\n cy = forgetgate * cx + ingate * cellgate\n hy = outgate * torch.tanh(cy)\n\n return hy, cy\n \n\nclass ConvLstm(nn.Module):\n def __init__(self, input_size, hidden_size, kernel_size=3, num_layers=1, bias=True, bidirectional=False):\n super(ConvLstm, self).__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.kernel_size = kernel_size\n self.num_layer = num_layers\n self.bias = bias\n self.bidirectional = bidirectional\n num_directions = 2 if bidirectional else 1\n self.num_directions = num_directions\n\n for layer in range(num_layers):\n for direction in range(num_directions):\n layer_input_size = input_size if layer == 0 else hidden_size * num_directions\n cell = ConvLstmCell(layer_input_size, hidden_size, kernel_size, bias)\n setattr(self, f'cell_{layer}_{direction}', cell)\n\n def forward(self, input, hx=None):\n \n def _forward(cell, reverse=False):\n def forward(input, hidden):\n output = []\n steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))\n for i in steps:\n hidden = cell(input[i], hidden)\n output.append(hidden[0])\n\n if reverse:\n output.reverse()\n\n output = torch.cat(output, 0).view(input.size(0), *output[0].size())\n return hidden, output\n return forward\n\n if hx is None:\n max_batch_size = input.size(1)\n hx = input.new_zeros(self.num_layer * self.num_directions, max_batch_size, self.hidden_size, input.size(3), input.size(4), requires_grad=False)\n hx = (hx, hx)\n\n hx = list(zip(*hx))\n\n next_hidden = []\n for i in range(self.num_layer):\n all_output = []\n for j in range(self.num_directions):\n reverse = False\n if j & 1:\n reverse = True\n cell = _forward(getattr(self, f'cell_{i}_{j}'), reverse=reverse)\n\n l = i * self.num_directions + j\n hy, output = cell(input, hx[l])\n\n next_hidden.append(hy)\n all_output.append(output)\n\n input = torch.cat(all_output, 2)\n \n next_h, next_c = zip(*next_hidden)\n total_layers = self.num_layer * self.num_directions\n next_hidden = (\n torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),\n torch.cat(next_c, 0).view(total_layers, *next_c[0].size())\n )\n\n return input, next_hidden\n\ndef test_conv_lstm():\n convlstm = ConvLstm(32, 32, 3, 2, bidirectional=False)\n input = torch.randn(10, 4, 32, 8, 8)\n output, (h, c) = convlstm(input)\n print(output.shape)\n print(h.shape)\n print(c.shape)\n\nif __name__ == '__main__':\n test_conv_lstm()"
] | [
[
"torch.randn",
"torch.nn.functional.adaptive_max_pool2d",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.tanh",
"torch.sigmoid",
"torch.cat",
"torch.nn.functional.interpolate"
]
] |
WdBlink/Teacher-Student-Faster-Rcnn | [
"df8085c61e334abb04bab5e8192de8cb4ce2b2af"
] | [
"ubteacher/data/datasets/builtin.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\nimport contextlib\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom fvcore.common.timer import Timer\nfrom fvcore.common.file_io import PathManager\nimport io\nimport logging\nfrom detectron2.data.datasets.cityscapes import load_cityscapes_instances, load_cityscapes_semantic\nfrom detectron2.data.datasets.cityscapes_panoptic import register_all_cityscapes_panoptic\nfrom detectron2.data.datasets.builtin_meta import _get_builtin_metadata\nfrom detectron2.structures import BoxMode\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom typing import List, Tuple, Union\nfrom pathlib import Path\nimport glob\nimport cv2\n\nlogger = logging.getLogger(__name__)\nIMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes\n\nJSON_ANNOTATIONS_DIR = \"\"\n_SPLITS_COCO_FORMAT = [\n (\"destroy_201005069_unlable\", \"/test_tif/201005069\"),\n (\"destroy_0013_unlable\", \"/test_tif/0013\")\n]\n\n\ndef register_destroy_unlabel(root):\n for key, dataset_name in _SPLITS_COCO_FORMAT:\n meta = {}\n image_root = root + dataset_name\n register_destroy_unlabel_instances(key, meta, image_root)\n\n\ndef register_destroy_unlabel_instances(name, metadata, image_root):\n \"\"\"\n Register a dataset in COCO's json annotation format for\n instance detection, instance segmentation and keypoint detection.\n (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.\n `instances*.json` and `person_keypoints*.json` in the dataset).\n\n This is an example of how to register a new dataset.\n You can do something similar to this function, to register new datasets.\n\n Args:\n name (str): the name that identifies a dataset, e.g. \"coco_2014_train\".\n metadata (dict): extra metadata associated with this dataset. You can\n leave it as an empty dict.\n json_file (str): path to the json instance annotation file.\n image_root (str or path-like): directory which contains all the images.\n \"\"\"\n assert isinstance(name, str), name\n assert isinstance(image_root, (str, os.PathLike)), image_root\n\n # 1. register a function which returns dicts\n DatasetCatalog.register(\n name, lambda: load_destroy_unlabel_file(image_root)\n )\n\n # 2. Optionally, add metadata about this dataset,\n # since they might be useful in evaluation, visualization or logging\n MetadataCatalog.get(name).set(\n image_root=image_root, evaluator_type=\"destroy_voc\", **metadata\n )\n\n\ndef load_destroy_unlabel_file(image_root):\n p = str(Path(image_root).absolute())\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n dataset_dicts = []\n for path in images:\n img_id = os.path.splitext(os.path.split(path)[1])[0]\n file_name = os.path.split(path)[1]\n img = cv2.imread(path)\n height, width = img.shape[:2]\n record = {}\n record[\"file_name\"] = path\n record[\"height\"] = height\n record[\"width\"] = width\n record[\"image_id\"] = img_id\n dataset_dicts.append(record)\n\n return dataset_dicts\n\n# ==== Predefined splits for raw cityscapes images ===========\n_RAW_CITYSCAPES_SPLITS = {\n \"cityscapes_foggy_fine_{task}_train\": (\"cityscapes/leftImg8bit_foggy/train/\", \"cityscapes/gtFine/train/\"),\n \"cityscapes_foggy_fine_{task}_val\": (\"cityscapes/leftImg8bit_foggy/val/\", \"cityscapes/gtFine/val/\"),\n \"cityscapes_foggy_fine_{task}_test\": (\"cityscapes/leftImg8bit_foggy/test/\", \"cityscapes/gtFine/test/\"),\n}\n\ndef register_all_cityscapes(root):\n for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():\n meta = _get_builtin_metadata(\"cityscapes\")\n image_dir = os.path.join(root, image_dir)\n gt_dir = os.path.join(root, gt_dir)\n\n inst_key = key.format(task=\"instance_seg\")\n DatasetCatalog.register(\n inst_key,\n lambda x=image_dir, y=gt_dir: load_cityscapes_instances(\n x, y, from_json=True, to_polygons=True\n ),\n )\n MetadataCatalog.get(inst_key).set(\n image_dir=image_dir, gt_dir=gt_dir, evaluator_type=\"cityscapes_instance\", **meta\n )\n\n sem_key = key.format(task=\"sem_seg\")\n DatasetCatalog.register(\n sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)\n )\n MetadataCatalog.get(sem_key).set(\n image_dir=image_dir,\n gt_dir=gt_dir,\n evaluator_type=\"cityscapes_sem_seg\",\n ignore_label=255,\n **meta,\n )\n\n# ==== Predefined splits for PASCAL VOC ===========\\\nCLASS_NAMES = [\"1\"]\n\ndef register_all_destroy_voc(root):\n SPLITS = [\n (\"voc_destroy_trainval\", \"taining_data_2021-08-19\", \"trainval\"),\n (\"voc_destroy_train\", \"taining_data_2021-08-19\", \"train\"),\n (\"voc_destroy_val\", \"taining_data_2021-08-19\", \"val\"),\n (\"voc_destroy_test\", \"taining_data_2021-08-19\", \"test\")\n ]\n for name, dirname, split in SPLITS:\n register_pascal_voc(name, os.path.join(root, dirname), split)\n MetadataCatalog.get(name).evaluator_type = \"destroy_voc\"\n\ndef load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]):\n \"\"\"\n Load Pascal VOC detection annotations to Detectron2 format.\n\n Args:\n dirname: Contain \"Annotations\", \"ImageSets\", \"JPEGImages\"\n split (str): one of \"train\", \"test\", \"val\", \"trainval\"\n class_names: list or tuple of class names\n \"\"\"\n with PathManager.open(os.path.join(dirname, \"ImageSets\", \"Main\", split + \".txt\")) as f:\n fileids = np.loadtxt(f, dtype=np.str)\n\n # Needs to read many small annotation files. Makes sense at local\n annotation_dirname = PathManager.get_local_path(os.path.join(dirname, \"Annotations/\"))\n dicts = []\n for fileid in fileids:\n anno_file = os.path.join(annotation_dirname, fileid + \".xml\")\n jpeg_file = os.path.join(dirname, \"JPEGImages\", fileid + \".tif\")\n\n with PathManager.open(anno_file) as f:\n tree = ET.parse(f)\n\n r = {\n \"file_name\": jpeg_file,\n \"image_id\": fileid,\n \"height\": int(tree.findall(\"./size/height\")[0].text),\n \"width\": int(tree.findall(\"./size/width\")[0].text),\n }\n instances = []\n\n for obj in tree.findall(\"object\"):\n cls = obj.find(\"name\").text\n # We include \"difficult\" samples in training.\n # Based on limited experiments, they don't hurt accuracy.\n # difficult = int(obj.find(\"difficult\").text)\n # if difficult == 1:\n # continue\n bbox = obj.find(\"bndbox\")\n bbox = [float(bbox.find(x).text) for x in [\"xmin\", \"ymin\", \"xmax\", \"ymax\"]]\n # Original annotations are integers in the range [1, W or H]\n # Assuming they mean 1-based pixel indices (inclusive),\n # a box with annotation (xmin=1, xmax=W) covers the whole image.\n # In coordinate space this is represented by (xmin=0, xmax=W)\n bbox[0] -= 1.0\n bbox[1] -= 1.0\n instances.append(\n {\"category_id\": class_names.index(cls), \"bbox\": bbox, \"bbox_mode\": BoxMode.XYXY_ABS}\n )\n r[\"annotations\"] = instances\n dicts.append(r)\n return dicts\n\n\ndef register_pascal_voc(name, dirname, split, class_names=CLASS_NAMES):\n year = 2007 if \"2007\" in name else 2012\n DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))\n MetadataCatalog.get(name).set(\n thing_classes=list(class_names), dirname=dirname, split=split, year=year\n )\n\n\nos.environ[\"DETECTRON2_DATASETS\"] = \"/home/msi/Documents/Datasets\"\n_root = os.getenv(\"DETECTRON2_DATASETS\", \"datasets\")\nregister_all_cityscapes(_root)\nregister_all_destroy_voc(_root)\nregister_destroy_unlabel(_root)\n\n"
] | [
[
"numpy.loadtxt"
]
] |
mosesnah-shared/adaptive-control | [
"dc9504f1f2531a3ed2d16358f28024c4647a09e4"
] | [
"MuJoCo/run.py"
] | [
"\"\"\"\n\n# ============================================================================= #\n| Project: Adaptive Controller Example\n| Title: Python Controller File for Running the adaptive controller simulation\n| Author: Moses C. Nah\n| Email: [Moses] [email protected]\n| Creation Date: Saturday, Dec 6th, 2020\n# ============================================================================= #\n\n# ============================================================================= #\n| (0A) [DESCRIPTION]\n|\n| - Python Script for running mujoco-py.\n| Simple example for running the adpative controller\n| The corresponding xml model file is under \"model\" directory.\n|\n# ============================================================================= #\n\n# ============================================================================= #\n| (0B) [KEYWORDS DEFINITION]\n| : type the following \"keywords\" for cases as...\n| - [BACKUP] [NAME]: Back-up code in case it's needed for the near future\n| - [TODO]: The task which should be done as soon as possible\n| - [TIP]: The reason why the following code was written.\n| - [SBA]: Acronym for \"Should Be Added\" in the future. This means that the mentioned functionality should be added soon.\n| - [CBC]: Acronym for \"Could Be Changed\" in the future. This means that the following code could be changed (or deprecated) soon.\n# ============================================================================= #\n\n# ============================================================================= #\n| (0C) [PYTHON NAMING CONVENTION]\n| Our project will follow the python naming convention, [REF]: https://stackoverflow.com/a/8423697/13437196\n| ---------------------------------------------------------------------------------------------------------\n| module_name, package_name, ClassName, method_name, ExceptionName, function_name,\n| GLOBAL_CONSTANT_NAME, global_var_name, instance_var_name, function_parameter_name, local_var_name.\n# ============================================================================= #\n\n# ============================================================================= #\n| (0D) [DOCOPT PARSE]\n| From now on, the written comments are specifically for \"docopt\" function.\n| [REF] http://docopt.org/\n# ============================================================================= #\n\nUsage:\n run.py [options]\n run.py -h | --help\n run.py -d | --debugMode\n\nArguments:\n\nOptions:\n -h --help Showing the usage and options\n --version Show version\n -s --saveData Saving the neccessary data from MuJoCo simulation as a txt file in the current directory\n [default: False]\n -r --recordVideo Record simulation video as a .mp4 file in the current directory\n [default: False]\n --vidRate=RATE The rate of how fast the video runs. If 1.0, then normal speed, if 0.5, then 2 times slower.\n [default: 1.0]\n --runTime=TIME The total time of the simulation\n [default: 5.0]\n --startTime=TIME The start time of the movement, or controller\n [default: 0.0]\n --simType=TYPE The Type of the Simulation\n [default: 1]\n 1: Joint space trajectory tracking task\n 2: Cartesian space trajectory tracking task\n --modelName=NAME Setting the xml model file name which will be used for the simulation.\n The starting number of the xml model file indicates the type of simulation, hence the --modelName\n already contains the simulation typep information.\n List of models.\n 1: 2D_model.xml - 1-DOF on shoulder, 1-DOF on elbow\n 2: 3D_model.xml - 3-DOF on shoulder, 1-DOF on elbow\n [default: 2D_model.xml]\n --videoOFF Turning off the video\n This is useful for cases when you want to make the computation of the simulation faster\n [default: False]\n --camPos=STRING Setting the Camera Position of the simulation.\n default is None\n --verbose Print more text\n [default: False]\n\nExamples, try:\n python3 run.py --help\n python3 run.py --version\n python3 run.py --modelName=\"2D_model.xml\" --simType=1 --runTime=30\n python3 run.py --modelName=\"2D_model.xml\" --simType=2\n python3 run.py --modelName=\"3D_model.xml\" --simType=1 --saveData\n python3 run.py --modelName=\"3D_model.xml\" --simType=2 --saveData --recordVideo --vidRate=0.5\n python3 run.py --modelName=\"3D_model.xml\" --simType=2 --saveData --videoOFF\n python3 run.py --modelName=\"3D_model_w_N25.xml\" --simType=2 --videoOFF \n\n\"\"\"\n\n\n\n# ============================================================================= #\n# (0A) [IMPORT MODULES]\n# Importing necessary modules + declaring basic configurations for running the whole mujoco simulator.\n\n# [Built-in modules]\nimport sys\nimport os\nimport re\nimport argparse\nimport datetime\nimport shutil\nimport pickle\n\n# [3rd party modules]\nimport numpy as np\nimport cv2\ntry:\n import mujoco_py as mjPy\nexcept ImportError as e:\n raise error.DependencyNotInstalled( \"{}. (HINT: you need to install mujoco_py, \\\n and also perform the setup instructions here: \\\n https://github.com/openai/mujoco-py/.)\".format( e ) )\n\nfrom docopt import docopt\n\n# [3rd party modules] - For Optimization\nimport matplotlib.pyplot as plt\n# import nevergrad as ng # [BACKUP] Needed for Optimization\n\nimport sympy as sp\nfrom sympy.utilities.lambdify import lambdify, implemented_function\n\n# [Local modules]\nfrom modules.simulation import Simulation\nfrom modules.controllers import AdaptiveController\nfrom modules.utils import ( my_print, my_mkdir, args_cleanup,\n my_rmdir, str2float, camel2snake, snake2camel )\nfrom modules.constants import Constants\n\n# ============================================================================= #\n\n# ============================================================================= #\n# (0B) [SYSTEM SETTINGS]\n\nif sys.version_info[ : 3 ] < ( 3, 0, 0 ): # Simple version check of the python version. python3+ is recommended for this file.\n my_print( NOTIFICATION = \" PYTHON3+ is recommended for this script \" )\n\n\n # [Printing Format]\nprec = 4 # Defining the float precision for print/number comparison.\nnp.set_printoptions( linewidth = 8000, suppress = True, precision = prec ) # Setting the numpy print options, useful for printing out data with consistent pattern.\n\nargs = docopt( __doc__, version = Constants.VERSION ) # Parsing the Argument\nargs = args_cleanup( args, '--' ) # Cleaning up the dictionary, discard prefix string '--' for the variables\n\n# [TODO] [Moses]\n# It might be beneficial, if we have some sort of \"parser function\", which gets the input args, and save it as the corresponding specific type.\n# If video needs to be recorded or data should be saved, then append 'saveDir' element to args dictionary\nargs[ 'saveDir' ] = my_mkdir( ) if args[ 'recordVideo' ] or args[ 'saveData' ] else None\n\nmy_print( saveDir = args[ 'saveDir' ] )\n\n# ============================================================================= #\n\n\n# ============================================================================= #\n\ndef main( ):\n # ============================================================================= #\n # (1A) [GENERATE MODEL]\n\n model_name = args[ 'modelName' ]\n my_print( modelName = model_name )\n\n # ============================================================================= #\n\n # ============================================================================= #\n # (1C) [RUN SIMULATION]\n\n VISUALIZE = False if args[ 'videoOFF' ] else True # Turn-off visualization\n # VISUALIZE = True\n\n mySim = Simulation( model_name = model_name,\n is_visualize = VISUALIZE,\n arg_parse = args )\n\n # [Type #1] Joint Space tracking task\n # [Type #2] Cartesian Space tracking task\n type = int( args[ 'simType' ] )\n controller_object = AdaptiveController( mySim.mjModel, mySim.mjData, type, is_robust = True )\n\n if \"2D\" in args[ 'modelName' ]:\n\n if type == 1:\n trajectory = [ 0.3 * sp.sin( 1 * controller_object.t_sym ) + 0.3 * sp.sin( 2 * controller_object.t_sym ) + 0.3 * sp.sin( 3 * controller_object.t_sym ) + \\\n 0.3 * sp.sin( 4 * controller_object.t_sym ) + 0.3 * sp.sin( 5 * controller_object.t_sym ) + 0.3 * sp.sin( 6 * controller_object.t_sym ) ,\n np.pi/2 + 0.3 * sp.sin( 1 * controller_object.t_sym ) + 0.3 * sp.sin( 2 * controller_object.t_sym ) + 0.3 * sp.sin( 3 * controller_object.t_sym ) + \\\n 0.3 * sp.sin( 4 * controller_object.t_sym ) + 0.3 * sp.sin( 5 * controller_object.t_sym ) + 0.3 * sp.sin( 6 * controller_object.t_sym ) ]\n\n elif type == 2:\n r = 1.5;\n trajectory = [ r * sp.cos( 0.5 * controller_object.t_sym ),\n 0,\n r * sp.sin( 0.5 * controller_object.t_sym ) ]\n\n\n\n # BEFORE RUNNING THE SIMULATION\n # SETTING THE INITIAL POSTURE TO PREVENT SINGULARITY!!\n mySim.mjData.qpos[ 0 ] = np.pi/2\n mySim.mjData.qpos[ 1 ] = 0.1\n mySim.mjSim.forward( ) # need to update mujoco with this forward method\n\n\n elif \"3D\" in args[ 'modelName' ]:\n\n if type == 1:\n\n trajectory = [ 1.0 * sp.sin( 1 * controller_object.t_sym ) + 1.0 * sp.sin( 2 * controller_object.t_sym ) + 1.0 * sp.sin( 3 * controller_object.t_sym ) + \\\n 1.0 * sp.sin( 4 * controller_object.t_sym ) + 1.0 * sp.sin( 5 * controller_object.t_sym ) + 1.0 * sp.sin( 6 * controller_object.t_sym ) ,\n 0.3 * sp.sin( 1 * controller_object.t_sym ) + 0.3 * sp.sin( 2 * controller_object.t_sym ) + 0.3 * sp.sin( 3 * controller_object.t_sym ) + \\\n 0.3 * sp.sin( 4 * controller_object.t_sym ) + 0.3 * sp.sin( 5 * controller_object.t_sym ) + 0.3 * sp.sin( 6 * controller_object.t_sym ) ,\n 0.5 * sp.sin( 1 * controller_object.t_sym ) + 0.5 * sp.sin( 2 * controller_object.t_sym ) + 0.5 * sp.sin( 3 * controller_object.t_sym ) + \\\n 0.5 * sp.sin( 4 * controller_object.t_sym ) + 0.5 * sp.sin( 5 * controller_object.t_sym ) + 0.5 * sp.sin( 6 * controller_object.t_sym ) ,\n np.pi/2 + 0.15 * sp.sin( 1 * controller_object.t_sym ) + 0.15 * sp.sin( 2 * controller_object.t_sym ) + 0.15 * sp.sin( 3 * controller_object.t_sym ) + \\\n 0.15 * sp.sin( 4 * controller_object.t_sym ) + 0.15 * sp.sin( 5 * controller_object.t_sym ) + 0.15 * sp.sin( 6 * controller_object.t_sym ) ]\n\n elif type == 2:\n\n trajectory = [ ( 0.3 + 0.3 * sp.exp( -0.5 * controller_object.t_sym) ) * sp.cos( 1.5 * np.pi * ( 1 - sp.exp( -0.2 * controller_object.t_sym) ) * controller_object.t_sym ),\n ( 0.3 + 0.3 * sp.exp( -0.5 * controller_object.t_sym) ) * sp.sin( 1.5 * np.pi * ( 1 - sp.exp( -0.2 * controller_object.t_sym) ) * controller_object.t_sym ),\n ( 0.4 - 0.4 * sp.exp( -1.0 * controller_object.t_sym) ) ]\n\n\n # BEFORE RUNNING THE SIMULATION\n # SETTING THE INITIAL POSTURE TO PREVENT SINGULARITY!!\n if type == 1:\n mySim.mjData.qpos[ 0 ] = 0\n mySim.mjData.qpos[ 3 ] = np.pi/2\n\n elif type == 2:\n mySim.mjData.qpos[ 0 ] = np.pi/2\n mySim.mjData.qpos[ 3 ] = 0.1\n\n mySim.mjSim.forward( ) # need to update mujoco with this forward method\n\n\n controller_object.set_trajectory( trajectory )\n controller_object.add_NN( )\n\n mySim.attach_controller( controller_object )\n\n mySim.run( )\n\n\n if args[ 'saveDir' ] is not None:\n mySim.save_simulation_data( args[ 'saveDir' ] )\n shutil.copyfile( Constants.MODEL_DIR + model_name,\n args[ 'saveDir' ] + model_name )\n\n\n mySim.reset( )\n\n # ============================================================================= #\n\nif __name__ == \"__main__\":\n\n try:\n main( )\n\n except KeyboardInterrupt:\n print( \"Ctrl-C was inputted. Halting the program. \", end = ' ' )\n\n if args[ 'saveDir' ] is not None:\n my_rmdir( args[ 'saveDir' ] )\n"
] | [
[
"numpy.set_printoptions"
]
] |
GranScudetto/TensorflowExamples | [
"25e0f0f973febc8997b75eb512c22d2e85b0788a"
] | [
"classification/cifar10/cifar_models.py"
] | [
"\"\"\"\nSeparated File containing all different models implemented\n\nCreation Date: May 2020\nCreator: GranScudetto\n\"\"\"\nfrom tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, Dense\nfrom tensorflow.keras.layers import MaxPool2D, Concatenate, Flatten\nfrom tensorflow.keras import Model\n\n\ndef model_1(input_shape, nb_classes):\n # 32 x 32\n inp = Input(shape=input_shape)\n conv_1 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inp)\n pool_1 = MaxPool2D(pool_size=(2, 2))(conv_1)\n # 16 x 16\n conv_2 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(pool_1)\n conv_3 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv_2)\n pool_2 = MaxPool2D(pool_size=(2, 2))(conv_3)\n # 8 x 8\n conv_4 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(pool_2)\n conv_5 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(conv_4)\n pool_3 = MaxPool2D(pool_size=(2, 2))(conv_5)\n # 4 x 4\n conv_6 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(pool_3)\n conv_7 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(conv_6)\n flatten = Flatten()(conv_7)\n dense_1 = Dense(units=512, activation='relu')(flatten)\n out = Dense(units=nb_classes, activation='softmax')(dense_1)\n\n return Model(inputs=inp, outputs=out)\n\n\ndef model_2(input_shape, nb_classes):\n \n # 32, 16, 8, 4, 2\n inp = Input(shape=input_shape) # 32 x 32\n \n conv_3x3_1 = Conv2D(filters=16, kernel_size=(3, 3), padding='same')(inp)\n conv_3x3_1 = BatchNormalization()(conv_3x3_1)\n conv_3x3_1 = Activation(activation='relu')(conv_3x3_1)\n \n conv_5x5_1 = Conv2D(filters=16, kernel_size=(3, 3), padding='same')(inp)\n conv_5x5_1 = BatchNormalization()(conv_5x5_1)\n conv_5x5_1 = Activation(activation='relu')(conv_5x5_1)\n \n network_layer_1 = Concatenate()([conv_3x3_1, conv_5x5_1])\n network_layer_1_pooled = MaxPool2D(pool_size=(2, 2))(network_layer_1) # 16x16\n \n conv_3x3_2 = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(network_layer_1_pooled)\n conv_3x3_2 = BatchNormalization()(conv_3x3_2)\n conv_3x3_2 = Activation(activation='relu')(conv_3x3_2)\n \n conv_5x5_2 = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(network_layer_1_pooled)\n conv_5x5_2 = BatchNormalization()(conv_5x5_2)\n conv_5x5_2 = Activation(activation='relu')(conv_5x5_2)\n \n scaled_input = MaxPool2D(pool_size=(2, 2))(inp)\n conv_3x3_1_3 = Conv2D(filters=16, kernel_size=(3, 3), padding='same')(scaled_input)\n conv_3x3_1_3 = BatchNormalization()(conv_3x3_1_3)\n conv_3x3_1_3 = Activation(activation='relu')(conv_3x3_1_3)\n conv_3x3_2_3 = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(conv_3x3_1_3)\n conv_3x3_2_3 = BatchNormalization()(conv_3x3_2_3)\n conv_3x3_2_3 = Activation(activation='relu')(conv_3x3_2_3)\n \n network_layer_2 = Concatenate()([conv_3x3_2, conv_5x5_2, conv_3x3_2_3])\n network_layer_2_pooled = MaxPool2D(pool_size=(2, 2))(network_layer_2) # 8x8\n \n conv_3x3_3 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')(network_layer_2_pooled)\n conv_3x3_3 = BatchNormalization()(conv_3x3_3)\n conv_3x3_3 = Activation(activation='relu')(conv_3x3_3)\n \n conv_3x3_3_3 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')(conv_3x3_2_3)\n conv_3x3_3_3 = BatchNormalization()(conv_3x3_3_3)\n conv_3x3_3_3 = Activation(activation='relu')(conv_3x3_3_3)\n \n conv_3x3_3_3 = MaxPool2D(pool_size=(2, 2))(conv_3x3_3_3)\n network_layer_3 = Concatenate()([conv_3x3_3, conv_3x3_3_3])\n network_layer_3_pooled = MaxPool2D(pool_size=(2, 2))(network_layer_3)\n \n conv_3x3_4 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(network_layer_3_pooled)\n conv_3x3_4 = BatchNormalization()(conv_3x3_4)\n conv_3x3_4 = Activation(activation='relu')(conv_3x3_4)\n \n conv_3x3_5 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')(conv_3x3_4)\n conv_3x3_5 = BatchNormalization()(conv_3x3_5)\n conv_3x3_5 = Activation(activation='relu')(conv_3x3_5)\n \n conv_3x3_6 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(conv_3x3_5)\n conv_3x3_6 = BatchNormalization()(conv_3x3_6)\n conv_3x3_6 = Activation(activation='relu')(conv_3x3_6)\n \n flattened = Flatten()(conv_3x3_6)\n flattened = Dense(units=128, activation='relu')(flattened)\n dense_pre_out = Dense(units=nb_classes, activation='relu')(flattened)\n \n out = Dense(units=nb_classes, activation='softmax')(dense_pre_out)\n \n return Model(inputs=inp, outputs=out)\n"
] | [
[
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.Input"
]
] |
pengnanchi/CPC_audio | [
"6900d49d441df90679cdd4ba76b266331d98e7be"
] | [
"cpc/train.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport argparse\nimport json\nimport os\nimport numpy as np\nimport torch\nimport time\nfrom copy import deepcopy\nimport random\nimport psutil\nimport sys\n\nimport criterion as cr\nimport model as model\nimport utils.misc as utils\nimport feature_loader as fl\nfrom cpc_default_config import set_default_cpc_config\nfrom dataset import AudioBatchData, findAllSeqs, filterSeqs, parseSeqLabels\n\n\ndef getCriterion(args, downsampling, nSpeakers, nPhones):\n dimFeatures = args.hiddenGar if not args.onEncoder else args.hiddenEncoder\n if not args.supervised:\n if args.cpc_mode == 'none':\n cpcCriterion = cr.NoneCriterion()\n else:\n sizeInputSeq = (args.sizeWindow // downsampling)\n cpcCriterion = cr.CPCUnsupersivedCriterion(args.nPredicts,\n args.hiddenGar,\n args.hiddenEncoder,\n args.negativeSamplingExt,\n mode=args.cpc_mode,\n rnnMode=args.rnnMode,\n dropout=args.dropout,\n nSpeakers=nSpeakers,\n speakerEmbedding=args.speakerEmbedding,\n sizeInputSeq=sizeInputSeq)\n elif args.pathPhone is not None:\n if not args.CTC:\n cpcCriterion = cr.PhoneCriterion(dimFeatures,\n nPhones, args.onEncoder,\n nLayers=args.nLevelsPhone)\n else:\n cpcCriterion = cr.CTCPhoneCriterion(dimFeatures,\n nPhones, args.onEncoder)\n else:\n cpcCriterion = cr.SpeakerCriterion(dimFeatures, nSpeakers)\n return cpcCriterion\n\n\ndef loadCriterion(pathCheckpoint, downsampling, nSpeakers, nPhones):\n _, _, locArgs = fl.getCheckpointData(os.path.dirname(pathCheckpoint))\n criterion = getCriterion(locArgs, downsampling, nSpeakers, nPhones)\n\n state_dict = torch.load(pathCheckpoint, 'cpu')\n\n criterion.load_state_dict(state_dict[\"cpcCriterion\"])\n return criterion\n\n\ndef trainStep(dataLoader,\n cpcModel,\n cpcCriterion,\n optimizer,\n scheduler,\n loggingStep):\n\n cpcModel.train()\n cpcCriterion.train()\n\n start_time = time.perf_counter()\n n_examples = 0\n logs, lastlogs = {}, None\n iter = 0\n for step, fulldata in enumerate(dataLoader):\n batchData, label = fulldata\n n_examples += batchData.size(0)\n batchData = batchData.cuda(non_blocking=True)\n label = label.cuda(non_blocking=True)\n c_feature, encoded_data, label = cpcModel(batchData, label)\n allLosses, allAcc = cpcCriterion(c_feature, encoded_data, label)\n totLoss = allLosses.sum()\n\n totLoss.backward()\n\n # Show grads ?\n optimizer.step()\n optimizer.zero_grad()\n\n if \"locLoss_train\" not in logs:\n logs[\"locLoss_train\"] = np.zeros(allLosses.size(1))\n logs[\"locAcc_train\"] = np.zeros(allLosses.size(1))\n\n iter += 1\n logs[\"locLoss_train\"] += (allLosses.mean(dim=0)).detach().cpu().numpy()\n logs[\"locAcc_train\"] += (allAcc.mean(dim=0)).cpu().numpy()\n\n if (step + 1) % loggingStep == 0:\n new_time = time.perf_counter()\n elapsed = new_time - start_time\n print(f\"Update {step + 1}\")\n print(f\"elapsed: {elapsed:.1f} s\")\n print(\n f\"{1000.0 * elapsed / loggingStep:.1f} ms per batch, {1000.0 * elapsed / n_examples:.1f} ms / example\")\n locLogs = utils.update_logs(logs, loggingStep, lastlogs)\n lastlogs = deepcopy(logs)\n utils.show_logs(\"Training loss\", locLogs)\n start_time, n_examples = new_time, 0\n\n if scheduler is not None:\n scheduler.step()\n\n logs = utils.update_logs(logs, iter)\n logs[\"iter\"] = iter\n utils.show_logs(\"Average training loss on epoch\", logs)\n return logs\n\n\ndef valStep(dataLoader,\n cpcModel,\n cpcCriterion):\n\n cpcCriterion.eval()\n cpcModel.eval()\n logs = {}\n cpcCriterion.eval()\n cpcModel.eval()\n iter = 0\n\n for step, fulldata in enumerate(dataLoader):\n\n batchData, label = fulldata\n\n batchData = batchData.cuda(non_blocking=True)\n label = label.cuda(non_blocking=True)\n\n with torch.no_grad():\n c_feature, encoded_data, label = cpcModel(batchData, label)\n allLosses, allAcc = cpcCriterion(c_feature, encoded_data, label)\n\n if \"locLoss_val\" not in logs:\n logs[\"locLoss_val\"] = np.zeros(allLosses.size(1))\n logs[\"locAcc_val\"] = np.zeros(allLosses.size(1))\n\n iter += 1\n logs[\"locLoss_val\"] += allLosses.mean(dim=0).cpu().numpy()\n logs[\"locAcc_val\"] += allAcc.mean(dim=0).cpu().numpy()\n\n logs = utils.update_logs(logs, iter)\n logs[\"iter\"] = iter\n utils.show_logs(\"Validation loss:\", logs)\n return logs\n\n\ndef run(trainDataset,\n valDataset,\n batchSize,\n samplingMode,\n cpcModel,\n cpcCriterion,\n nEpoch,\n pathCheckpoint,\n optimizer,\n scheduler,\n logs):\n\n print(f\"Running {nEpoch} epochs\")\n startEpoch = len(logs[\"epoch\"])\n bestAcc = 0\n bestStateDict = None\n start_time = time.time()\n\n for epoch in range(startEpoch, nEpoch):\n\n print(f\"Starting epoch {epoch}\")\n utils.cpu_stats()\n\n trainLoader = trainDataset.getDataLoader(batchSize, samplingMode,\n True, numWorkers=0)\n\n valLoader = valDataset.getDataLoader(batchSize, 'sequential', False,\n numWorkers=0)\n\n print(\"Training dataset %d batches, Validation dataset %d batches, batch size %d\" %\n (len(trainLoader), len(valLoader), batchSize))\n\n locLogsTrain = trainStep(trainLoader, cpcModel, cpcCriterion,\n optimizer, scheduler, logs[\"logging_step\"])\n\n locLogsVal = valStep(valLoader, cpcModel, cpcCriterion)\n\n print(f'Ran {epoch + 1} epochs '\n f'in {time.time() - start_time:.2f} seconds')\n\n torch.cuda.empty_cache()\n\n currentAccuracy = float(locLogsVal[\"locAcc_val\"].mean())\n if currentAccuracy > bestAcc:\n bestStateDict = fl.get_module(cpcModel).state_dict()\n\n for key, value in dict(locLogsTrain, **locLogsVal).items():\n if key not in logs:\n logs[key] = [None for x in range(epoch)]\n if isinstance(value, np.ndarray):\n value = value.tolist()\n logs[key].append(value)\n\n logs[\"epoch\"].append(epoch)\n\n if pathCheckpoint is not None \\\n and (epoch % logs[\"saveStep\"] == 0 or epoch == nEpoch-1):\n\n modelStateDict = fl.get_module(cpcModel).state_dict()\n criterionStateDict = fl.get_module(cpcCriterion).state_dict()\n\n fl.save_checkpoint(modelStateDict, criterionStateDict,\n optimizer.state_dict(), bestStateDict,\n f\"{pathCheckpoint}_{epoch}.pt\")\n utils.save_logs(logs, pathCheckpoint + \"_logs.json\")\n\n\ndef main(args):\n args = parseArgs(args)\n\n utils.set_seed(args.random_seed)\n logs = {\"epoch\": [], \"iter\": [], \"saveStep\": args.save_step}\n loadOptimizer = False\n if args.pathCheckpoint is not None and not args.restart:\n cdata = fl.getCheckpointData(args.pathCheckpoint)\n if cdata is not None:\n data, logs, locArgs = cdata\n print(f\"Checkpoint detected at {data}\")\n fl.loadArgs(args, locArgs,\n forbiddenAttr={\"nGPU\", \"pathCheckpoint\",\n \"debug\", \"restart\", \"world_size\",\n \"n_nodes\", \"node_id\", \"n_gpu_per_node\",\n \"max_size_loaded\"})\n args.load, loadOptimizer = [data], True\n args.loadCriterion = True\n\n logs[\"logging_step\"] = args.logging_step\n\n print(f'CONFIG:\\n{json.dumps(vars(args), indent=4, sort_keys=True)}')\n print('-' * 50)\n\n seqNames, speakers = findAllSeqs(args.pathDB,\n extension=args.file_extension,\n loadCache=not args.ignore_cache)\n\n print(f'Found files: {len(seqNames)} seqs, {len(speakers)} speakers')\n # Datasets\n if args.pathTrain is not None:\n seqTrain = filterSeqs(args.pathTrain, seqNames)\n else:\n seqTrain = seqNames\n\n if args.pathVal is None:\n random.shuffle(seqTrain)\n sizeTrain = int(0.99 * len(seqTrain))\n seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]\n print(f'Found files: {len(seqTrain)} train, {len(seqVal)} val')\n else:\n seqVal = filterSeqs(args.pathVal, seqNames)\n\n if args.debug:\n seqTrain = seqTrain[-1000:]\n seqVal = seqVal[-100:]\n\n phoneLabels, nPhones = None, None\n if args.supervised and args.pathPhone is not None:\n print(\"Loading the phone labels at \" + args.pathPhone)\n phoneLabels, nPhones = parseSeqLabels(args.pathPhone)\n print(f\"{nPhones} phones found\")\n\n print(\"\")\n print(f'Loading audio data at {args.pathDB}')\n print(\"Loading the training dataset\")\n trainDataset = AudioBatchData(args.pathDB,\n args.sizeWindow,\n seqTrain,\n phoneLabels,\n len(speakers),\n nProcessLoader=args.n_process_loader,\n MAX_SIZE_LOADED=args.max_size_loaded)\n print(\"Training dataset loaded\")\n print(\"\")\n\n print(\"Loading the validation dataset\")\n valDataset = AudioBatchData(args.pathDB,\n args.sizeWindow,\n seqVal,\n phoneLabels,\n len(speakers),\n nProcessLoader=args.n_process_loader)\n print(\"Validation dataset loaded\")\n print(\"\")\n\n if args.load is not None:\n cpcModel, args.hiddenGar, args.hiddenEncoder = \\\n fl.loadModel(args.load)\n\n else:\n # Encoder network\n encoderNet = fl.getEncoder(args)\n # AR Network\n arNet = fl.getAR(args)\n\n cpcModel = model.CPCModel(encoderNet, arNet)\n\n batchSize = args.nGPU * args.batchSizeGPU\n cpcModel.supervised = args.supervised\n\n # Training criterion\n if args.load is not None and args.loadCriterion:\n cpcCriterion = loadCriterion(args.load[0], cpcModel.gEncoder.DOWNSAMPLING,\n len(speakers), nPhones)\n else:\n cpcCriterion = getCriterion(args, cpcModel.gEncoder.DOWNSAMPLING,\n len(speakers), nPhones)\n\n if loadOptimizer:\n state_dict = torch.load(args.load[0], 'cpu')\n cpcCriterion.load_state_dict(state_dict[\"cpcCriterion\"])\n\n cpcCriterion.cuda()\n cpcModel.cuda()\n\n # Optimizer\n g_params = list(cpcCriterion.parameters()) + list(cpcModel.parameters())\n\n lr = args.learningRate\n optimizer = torch.optim.Adam(g_params, lr=lr,\n betas=(args.beta1, args.beta2),\n eps=args.epsilon)\n\n if loadOptimizer:\n print(\"Loading optimizer \" + args.load[0])\n state_dict = torch.load(args.load[0], 'cpu')\n if \"optimizer\" in state_dict:\n optimizer.load_state_dict(state_dict[\"optimizer\"])\n\n # Checkpoint\n if args.pathCheckpoint is not None:\n if not os.path.isdir(args.pathCheckpoint):\n os.mkdir(args.pathCheckpoint)\n args.pathCheckpoint = os.path.join(args.pathCheckpoint, \"checkpoint\")\n\n scheduler = None\n if args.schedulerStep > 0:\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer,\n args.schedulerStep,\n gamma=0.5)\n if args.schedulerRamp is not None:\n n_epoch = args.schedulerRamp\n print(f\"Ramp activated. n_e = {n_epoch}\")\n scheduler_ramp = torch.optim.lr_scheduler.LambdaLR(optimizer,\n lr_lambda=lambda epoch: utils.ramp_scheduling_function(\n n_epoch, epoch),\n last_epoch=-1)\n if scheduler is None:\n scheduler = scheduler_ramp\n else:\n scheduler = utils.SchedulerCombiner([scheduler_ramp, scheduler],\n [0, args.schedulerRamp])\n if scheduler is not None:\n for i in range(len(logs[\"epoch\"])):\n scheduler.step()\n\n cpcModel = torch.nn.DataParallel(cpcModel,\n device_ids=range(args.nGPU)).cuda()\n cpcCriterion = torch.nn.DataParallel(cpcCriterion,\n device_ids=range(args.nGPU)).cuda()\n\n run(trainDataset,\n valDataset,\n batchSize,\n args.samplingType,\n cpcModel,\n cpcCriterion,\n args.nEpoch,\n args.pathCheckpoint,\n optimizer,\n scheduler,\n logs)\n\n\ndef parseArgs(argv):\n # Run parameters\n parser = argparse.ArgumentParser(description='Trainer')\n\n # Default arguments:\n parser = set_default_cpc_config(parser)\n\n group_db = parser.add_argument_group('Dataset')\n group_db.add_argument('--pathDB', type=str, default=None,\n help='Path to the directory containing the '\n 'data.')\n group_db.add_argument('--file_extension', type=str, default=\".flac\",\n help=\"Extension of the audio files in the dataset.\")\n group_db.add_argument('--pathTrain', type=str, default=None,\n help='Path to a .txt file containing the list of the '\n 'training sequences.')\n group_db.add_argument('--pathVal', type=str, default=None,\n help='Path to a .txt file containing the list of the '\n 'validation sequences.')\n group_db.add_argument('--n_process_loader', type=int, default=8,\n help='Number of processes to call to load the '\n 'dataset')\n group_db.add_argument('--ignore_cache', action='store_true',\n help='Activate if the dataset has been modified '\n 'since the last training session.')\n group_db.add_argument('--max_size_loaded', type=int, default=4000000000,\n help='Maximal amount of data (in byte) a dataset '\n 'can hold in memory at any given time')\n group_supervised = parser.add_argument_group(\n 'Supervised mode (depreciated)')\n group_supervised.add_argument('--supervised', action='store_true',\n help='(Depreciated) Disable the CPC loss and activate '\n 'the supervised mode. By default, the supervised '\n 'training method is the speaker classification.')\n group_supervised.add_argument('--pathPhone', type=str, default=None,\n help='(Supervised mode only) Path to a .txt '\n 'containing the phone labels of the dataset. If given '\n 'and --supervised, will train the model using a '\n 'phone classification task.')\n group_supervised.add_argument('--CTC', action='store_true')\n\n group_save = parser.add_argument_group('Save')\n group_save.add_argument('--pathCheckpoint', type=str, default=None,\n help=\"Path of the output directory.\")\n group_save.add_argument('--logging_step', type=int, default=1000)\n group_save.add_argument('--save_step', type=int, default=5,\n help=\"Frequency (in epochs) at which a checkpoint \"\n \"should be saved\")\n\n group_load = parser.add_argument_group('Load')\n group_load.add_argument('--load', type=str, default=None, nargs='*',\n help=\"Load an exsiting checkpoint. Should give a path \"\n \"to a .pt file. The directory containing the file to \"\n \"load should also have a 'checkpoint.logs' and a \"\n \"'checkpoint.args'\")\n group_load.add_argument('--loadCriterion', action='store_true',\n help=\"If --load is activated, load the state of the \"\n \"training criterion as well as the state of the \"\n \"feature network (encoder + AR)\")\n group_load.add_argument('--restart', action='store_true',\n help=\"If any checkpoint is found, ignore it and \"\n \"restart the training from scratch.\")\n\n group_gpu = parser.add_argument_group('GPUs')\n group_gpu.add_argument('--nGPU', type=int, default=-1,\n help=\"Number of GPU to use (default: use all \"\n \"available GPUs)\")\n group_gpu.add_argument('--batchSizeGPU', type=int, default=8,\n help='Number of batches per GPU.')\n parser.add_argument('--debug', action='store_true',\n help=\"Load only a very small amount of files for \"\n \"debugging purposes.\")\n args = parser.parse_args(argv)\n\n if args.pathDB is None and (args.pathCheckpoint is None or args.restart):\n parser.print_help()\n print(\"Either provides an input dataset or a checkpoint to load\")\n sys.exit()\n\n if args.pathCheckpoint is not None:\n args.pathCheckpoint = os.path.abspath(args.pathCheckpoint)\n\n if args.load is not None:\n args.load = [os.path.abspath(x) for x in args.load]\n\n # set it up if needed, so that it is dumped along with other args\n if args.random_seed is None:\n args.random_seed = random.randint(0, 2**31)\n\n if args.nGPU < 0:\n args.nGPU = torch.cuda.device_count()\n assert args.nGPU <= torch.cuda.device_count(),\\\n f\"number of GPU asked: {args.nGPU},\" \\\n f\"number GPU detected: {torch.cuda.device_count()}\"\n print(f\"Let's use {args.nGPU} GPUs!\")\n\n if args.arMode == 'no_ar':\n args.hiddenGar = args.hiddenEncoder\n return args\n\n\nif __name__ == \"__main__\":\n torch.multiprocessing.set_start_method('spawn')\n args = sys.argv[1:]\n main(args)\n"
] | [
[
"torch.cuda.empty_cache",
"torch.load",
"torch.no_grad",
"torch.cuda.device_count",
"torch.optim.Adam",
"torch.multiprocessing.set_start_method",
"torch.optim.lr_scheduler.StepLR"
]
] |
mfkasim1/pyscf | [
"7be5e015b2b40181755c71d888449db936604660"
] | [
"pyscf/agf2/ragf2_slow.py"
] | [
"# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Oliver J. Backhouse <[email protected]>\n# George H. Booth <[email protected]>\n#\n\n'''\nAuxiliary second-order Green's function perturbation theory for \narbitrary moment consistency\n'''\n\nimport time\nimport numpy as np\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf import __config__\nfrom pyscf.agf2 import aux, ragf2\n\n\ndef build_se_part(agf2, eri, gf_occ, gf_vir, os_factor=1.0, ss_factor=1.0):\n ''' Builds either the auxiliaries of the occupied self-energy,\n or virtual if :attr:`gf_occ` and :attr:`gf_vir` are swapped.\n\n Args:\n eri : _ChemistsERIs\n Electronic repulsion integrals\n gf_occ : GreensFunction\n Occupied Green's function\n gf_vir : GreensFunction \n Virtual Green's function\n\n Kwargs:\n os_factor : float\n Opposite-spin factor for spin-component-scaled (SCS)\n calculations. Default 1.0\n ss_factor : float\n Same-spin factor for spin-component-scaled (SCS)\n calculations. Default 1.0\n\n Returns:\n :class:`SelfEnergy`\n '''\n\n cput0 = (time.clock(), time.time())\n log = logger.Logger(agf2.stdout, agf2.verbose)\n\n assert type(gf_occ) is aux.GreensFunction\n assert type(gf_vir) is aux.GreensFunction\n\n nmo = agf2.nmo\n nocc = gf_occ.naux\n nvir = gf_vir.naux\n naux = nocc * nocc * nvir\n tol = agf2.weight_tol\n\n if not (agf2.frozen is None or agf2.frozen == 0):\n mask = ragf2.get_frozen_mask(agf2)\n nmo -= np.sum(~mask)\n\n e = np.zeros((naux))\n v = np.zeros((nmo, naux))\n\n fpos = np.sqrt(0.5 * os_factor)\n fneg = np.sqrt(0.5 * os_factor + ss_factor)\n fdia = np.sqrt(os_factor)\n\n eja = lib.direct_sum('j,a->ja', gf_occ.energy, -gf_vir.energy)\n \n coeffs = (gf_occ.coupling, gf_occ.coupling, gf_vir.coupling)\n qeri = _make_qmo_eris_incore(agf2, eri, coeffs)\n\n p1 = 0\n for i in range(nocc):\n xija = qeri[:,i,:i].reshape(nmo, -1)\n xjia = qeri[:,:i,i].reshape(nmo, -1)\n xiia = qeri[:,i,i].reshape(nmo, -1)\n eija = gf_occ.energy[i] + eja[:i+1]\n\n p0, p1 = p1, p1 + i*nvir\n e[p0:p1] = eija[:i].ravel()\n v[:,p0:p1] = fneg * (xija - xjia)\n\n p0, p1 = p1, p1 + i*nvir\n e[p0:p1] = eija[:i].ravel()\n v[:,p0:p1] = fpos * (xija + xjia)\n\n p0, p1 = p1, p1 + nvir\n e[p0:p1] = eija[i].ravel()\n v[:,p0:p1] = fdia * xiia\n\n se = aux.SelfEnergy(e, v, chempot=gf_occ.chempot)\n se.remove_uncoupled(tol=tol)\n\n if not (agf2.frozen is None or agf2.frozen == 0):\n coupling = np.zeros((agf2.nmo, se.naux))\n coupling[mask] = se.coupling\n se = aux.SelfEnergy(se.energy, coupling, chempot=se.chempot)\n\n log.timer('se part', *cput0)\n \n return se\n\n\nclass RAGF2(ragf2.RAGF2):\n ''' Restricted AGF2 with canonical HF reference for arbitrary\n moment consistency\n\n Attributes:\n nmom : tuple of int\n Compression level of the Green's function and\n self-energy, respectively\n verbose : int\n Print level. Default value equals to :class:`Mole.verbose`\n max_memory : float or int\n Allowed memory in MB. Default value equals to :class:`Mole.max_memory`\n conv_tol : float\n Convergence threshold for AGF2 energy. Default value is 1e-7\n conv_tol_rdm1 : float\n Convergence threshold for first-order reduced density matrix.\n Default value is 1e-8.\n conv_tol_nelec : float\n Convergence threshold for the number of electrons. Default \n value is 1e-6.\n max_cycle : int\n Maximum number of AGF2 iterations. Default value is 50.\n max_cycle_outer : int\n Maximum number of outer Fock loop iterations. Default \n value is 20.\n max_cycle_inner : int\n Maximum number of inner Fock loop iterations. Default\n value is 50.\n weight_tol : float\n Threshold in spectral weight of auxiliaries to be considered\n zero. Default 1e-11.\n fock_diis_space : int\n DIIS space size for Fock loop iterations. Default value is 6.\n fock_diis_min_space : \n Minimum space of DIIS. Default value is 1.\n os_factor : float\n Opposite-spin factor for spin-component-scaled (SCS)\n calculations. Default 1.0\n ss_factor : float\n Same-spin factor for spin-component-scaled (SCS)\n calculations. Default 1.0\n damping : float\n Damping factor for the self-energy. Default value is 0.0\n\n Saved results\n\n e_corr : float\n AGF2 correlation energy\n e_tot : float\n Total energy (HF + correlation)\n e_1b : float\n One-body part of :attr:`e_tot`\n e_2b : float\n Two-body part of :attr:`e_tot`\n e_init : float\n Initial correlation energy (truncated MP2)\n converged : bool\n Whether convergence was successful\n se : SelfEnergy\n Auxiliaries of the self-energy\n gf : GreensFunction\n Auxiliaries of the Green's function\n '''\n\n def __init__(self, mf, nmom=(None,0), frozen=None, mo_energy=None, mo_coeff=None, mo_occ=None):\n\n ragf2.RAGF2.__init__(self, mf, frozen=frozen, mo_energy=mo_energy,\n mo_coeff=mo_coeff, mo_occ=mo_occ)\n\n self.nmom = nmom\n\n self._keys.update(['nmom'])\n\n build_se_part = build_se_part\n\n def build_se(self, eri=None, gf=None, os_factor=None, ss_factor=None, se_prev=None):\n ''' Builds the auxiliaries of the self-energy.\n\n Args:\n eri : _ChemistsERIs\n Electronic repulsion integrals\n gf : GreensFunction\n Auxiliaries of the Green's function\n\n Kwargs:\n os_factor : float\n Opposite-spin factor for spin-component-scaled (SCS)\n calculations. Default 1.0\n ss_factor : float\n Same-spin factor for spin-component-scaled (SCS)\n calculations. Default 1.0\n se_prev : SelfEnergy\n Previous self-energy for damping. Default value is None\n\n Returns:\n :class:`SelfEnergy`\n '''\n\n if eri is None: eri = self.ao2mo()\n if gf is None: gf = self.gf\n if gf is None: gf = self.init_gf()\n\n fock = None\n if self.nmom[0] != None:\n fock = self.get_fock(eri=eri, gf=gf)\n\n if os_factor is None: os_factor = self.os_factor\n if ss_factor is None: ss_factor = self.ss_factor\n\n facs = dict(os_factor=os_factor, ss_factor=ss_factor)\n gf_occ = gf.get_occupied()\n gf_vir = gf.get_virtual()\n\n se_occ = self.build_se_part(eri, gf_occ, gf_vir, **facs)\n se_occ = se_occ.compress(n=(None, self.nmom[1]))\n\n se_vir = self.build_se_part(eri, gf_vir, gf_occ, **facs)\n se_vir = se_vir.compress(n=(None, self.nmom[1]))\n\n se = aux.combine(se_vir, se_occ)\n se = se.compress(phys=fock, n=(self.nmom[0], None))\n\n if se_prev is not None and self.damping != 0.0:\n se.coupling *= np.sqrt(1.0-self.damping)\n se_prev.coupling *= np.sqrt(self.damping)\n se = aux.combine(se, se_prev)\n se = se.compress(n=self.nmom)\n\n return se\n\n def dump_flags(self, verbose=None):\n ragf2.RAGF2.dump_flags(self, verbose=verbose)\n logger.info(self, 'nmom = %s', repr(self.nmom))\n return self\n\n def run_diis(self, se, diis=None):\n return se\n\n\nclass _ChemistsERIs(ragf2._ChemistsERIs):\n pass\n\n_make_qmo_eris_incore = ragf2._make_qmo_eris_incore\n\n\n\nif __name__ == '__main__':\n from pyscf import gto, scf, mp\n\n mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='cc-pvdz', verbose=3)\n rhf = scf.RHF(mol)\n rhf.conv_tol = 1e-11\n rhf.run()\n\n agf2 = RAGF2(rhf, nmom=(None,0))\n agf2.run()\n \n agf2 = ragf2.RAGF2(rhf)\n agf2.run()\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.zeros"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.