repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
ethanabrooks/pytorch-dnc
|
[
"bf7a039e3062742654364fb80b1ab5d44e5746f8",
"bf7a039e3062742654364fb80b1ab5d44e5746f8"
] |
[
"core/circuit.py",
"core/agent.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass Circuit(\n nn.Module\n): # NOTE: basically this whole module is treated as a custom rnn cell\n def __init__(self, args):\n super(Circuit, self).__init__()\n # logging\n self.logger = args.logger\n # params\n self.use_cuda = args.use_cuda\n self.dtype = args.dtype\n # params\n self.batch_size = args.batch_size\n self.input_dim = args.input_dim\n self.output_dim = args.output_dim\n self.hidden_dim = args.hidden_dim\n self.num_write_heads = args.num_write_heads\n self.num_read_heads = args.num_read_heads\n self.mem_hei = args.mem_hei\n self.mem_wid = args.mem_wid\n self.clip_value = args.clip_value\n\n # functional components\n self.controller_params = args.controller_params\n self.accessor_params = args.accessor_params\n\n # now we fill in the missing values for each module\n self.read_vec_dim = self.num_read_heads * self.mem_wid\n # controller\n self.controller_params.batch_size = self.batch_size\n self.controller_params.input_dim = self.input_dim\n self.controller_params.read_vec_dim = self.read_vec_dim\n self.controller_params.output_dim = self.output_dim\n self.controller_params.hidden_dim = self.hidden_dim\n self.controller_params.mem_hei = self.mem_hei\n self.controller_params.mem_wid = self.mem_wid\n self.controller_params.clip_value = self.clip_value\n # accessor: {write_heads, read_heads, memory}\n self.accessor_params.batch_size = self.batch_size\n self.accessor_params.hidden_dim = self.hidden_dim\n self.accessor_params.num_write_heads = self.num_write_heads\n self.accessor_params.num_read_heads = self.num_read_heads\n self.accessor_params.mem_hei = self.mem_hei\n self.accessor_params.mem_wid = self.mem_wid\n self.accessor_params.clip_value = self.clip_value\n\n self.logger.warning(\n \"<-----------------------------======> Circuit: {Controller, Accessor}\"\n )\n\n def _init_weights(self):\n raise NotImplementedError(\"not implemented in base calss\")\n\n def print_model(self):\n self.logger.warning(\n \"<-----------------------------======> Circuit: {Overall Architecture}\"\n )\n self.logger.warning(self)\n\n def _reset_states(\n self\n ): # should be called at the beginning of forwarding a new input sequence\n # we first reset the previous read vector\n self.read_vec_vb = Variable(self.read_vec_ts).type(self.dtype)\n # we then reset the controller's hidden state\n self.controller._reset_states()\n # we then reset the write/read weights of heads\n self.accessor._reset_states()\n\n def _reset(self):\n self._init_weights()\n self.type(self.dtype)\n self.print_model()\n # reset internal states\n self.read_vec_ts = torch.zeros(self.batch_size, self.read_vec_dim).fill_(1e-6)\n self._reset_states()\n\n def forward(self, input_vb):\n # NOTE: the operation order must be the following: control, access{write, read}, output\n\n # 1. first feed {input, read_vec_{t-1}} to controller\n hidden_vb = self.controller.forward(input_vb, self.read_vec_vb)\n # 2. then we write to memory_{t-1} to get memory_{t}; then read from memory_{t} to get read_vec_{t}\n self.read_vec_vb = self.accessor.forward(hidden_vb)\n # 3. finally we concat the output from the controller and the current read_vec_{t} to get the final output\n output_vb = self.hid_to_out(\n torch.cat(\n (\n hidden_vb.view(-1, self.hidden_dim),\n self.read_vec_vb.view(-1, self.read_vec_dim),\n ),\n 1,\n )\n )\n\n # we clip the output values here\n return F.sigmoid(\n torch.clamp(output_vb, min=-self.clip_value, max=self.clip_value)\n ).view(1, self.batch_size, self.output_dim)\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport torch\nimport torch.optim as optim\n\nfrom utils.helpers import Experience\n\n\nclass Agent(object):\n def __init__(self, args, env_prototype, circuit_prototype):\n # logging\n self.mode = args.mode # NOTE: when mode==2 we visualize accessor states\n self.logger = args.logger\n\n # prototypes for env & model & memory\n self.env_prototype = env_prototype # NOTE: instantiated in inherited Agents\n self.env_params = args.env_params\n self.circuit_prototype = (\n circuit_prototype\n ) # NOTE: instantiated in inherited Agents\n self.circuit_params = args.circuit_params\n\n # TODO: let's decide what to save later\n # params\n self.model_name = (\n args.model_name\n ) # NOTE: will save the current model to model_name\n self.model_file = (\n args.model_file\n ) # NOTE: will load pretrained model_file if not None\n\n self.render = args.render\n self.visualize = args.visualize\n if self.visualize:\n self.vis = args.vis\n self.refs = args.refs\n\n self.save_best = args.save_best\n if self.save_best:\n self.best_step = None # NOTE: achieves best_reward at this step\n self.best_reward = (\n None\n ) # NOTE: only save a new model if achieves higher reward\n\n self.use_cuda = args.use_cuda\n self.dtype = args.dtype\n\n # agent_params\n # criteria and optimizer\n self.criteria = args.criteria\n self.optim = args.optim\n # hyperparameters\n self.steps = args.steps\n self.batch_size = args.batch_size\n self.early_stop = args.early_stop\n self.clip_grad = args.clip_grad\n # self.clip_value = args.clip_value\n self.lr = args.lr\n self.optim_eps = args.optim_eps\n self.optim_alpha = args.optim_alpha\n self.eval_freq = args.eval_freq\n self.eval_steps = args.eval_steps\n self.prog_freq = args.prog_freq\n self.test_nepisodes = args.test_nepisodes\n\n def _reset_experience(self):\n self.experience = Experience(\n state0=None, action=None, reward=None, state1=None, terminal1=False\n )\n\n def _load_model(self, model_file):\n if model_file:\n self.logger.warning(\"Loading Model: \" + self.model_file + \" ...\")\n self.circuit.load_state_dict(torch.load(model_file))\n self.logger.warning(\"Loaded Model: \" + self.model_file + \" ...\")\n else:\n self.logger.warning(\"No Pretrained Model. Will Train From Scratch.\")\n\n def _save_model(self, step, curr_reward=0.0):\n self.logger.warning(\n \"Saving Model @ Step: \" + str(step) + \": \" + self.model_name + \" ...\"\n )\n if self.save_best:\n if self.best_step is None:\n self.best_step = step\n self.best_reward = curr_reward\n if curr_reward >= self.best_reward:\n self.best_step = step\n self.best_reward = curr_reward\n torch.save(self.circuit.state_dict(), self.model_name)\n self.logger.warning(\n \"Saved Model @ Step: \"\n + str(step)\n + \": \"\n + self.model_name\n + \". {Best Step: \"\n + str(self.best_step)\n + \" | Best Reward: \"\n + str(self.best_reward)\n + \"}\"\n )\n else:\n torch.save(self.circuit.state_dict(), self.model_name)\n self.logger.warning(\n \"Saved Model @ Step: \" + str(step) + \": \" + self.model_name + \".\"\n )\n\n def _forward(self, observation):\n raise NotImplementedError(\"not implemented in base calss\")\n\n def _backward(self, reward, terminal):\n raise NotImplementedError(\"not implemented in base calss\")\n\n def fit_model(self): # training\n raise NotImplementedError(\"not implemented in base calss\")\n\n def _eval_model(self): # evaluation during training\n raise NotImplementedError(\"not implemented in base calss\")\n\n def test_model(self): # testing pre-trained models\n raise NotImplementedError(\"not implemented in base calss\")\n"
] |
[
[
"torch.clamp",
"torch.zeros",
"torch.autograd.Variable"
],
[
"torch.load"
]
] |
luomou97/BERT-pytorch
|
[
"61bb990d75a23dc39b5a1ec27787c4a596ba5352"
] |
[
"bert_pytorch/model/language_model.py"
] |
[
"import torch.nn as nn\n\nfrom .bert import BERT\n\n\nclass BERTLM(nn.Module):\n \"\"\"\n BERT Language Model\n Next Sentence Prediction Model + Masked Language Model\n \"\"\"\n\n def __init__(self, bert: BERT, vocab_size):\n \"\"\"\n :param bert: BERT model which should be trained\n :param vocab_size: total vocab size for masked_lm\n \"\"\"\n\n super().__init__()\n self.bert = bert\n self.next_sentence = NextSentencePrediction(self.bert.hidden) # next sentence prediction task\n self.mask_lm = MaskedLanguageModel(self.bert.hidden, vocab_size) # next sentence prediction task\n\n def forward(self, x, segment_label):\n x = self.bert(x, segment_label)\n return self.next_sentence(x), self.mask_lm(x)\n\n\nclass NextSentencePrediction(nn.Module):\n \"\"\"\n 2-class classification model : is_next, is_not_next\n \"\"\"\n\n def __init__(self, hidden):\n \"\"\"\n :param hidden: BERT model output size\n \"\"\"\n super().__init__()\n self.linear = nn.Linear(hidden, 2)\n self.softmax = nn.LogSoftmax(dim=-1)\n\n def forward(self, x):\n return self.softmax(self.linear(x[:, 0])) # \\ref page 4, using the first token for classification task, <batch_size, 2>\n\n\nclass MaskedLanguageModel(nn.Module):\n \"\"\"\n predicting origin token from masked input sequence\n n-class classification problem, n-class = vocab_size\n \"\"\"\n\n def __init__(self, hidden, vocab_size):\n \"\"\"\n :param hidden: output size of BERT model\n :param vocab_size: total vocab size\n \"\"\"\n super().__init__()\n self.linear = nn.Linear(hidden, vocab_size)\n self.softmax = nn.LogSoftmax(dim=-1)\n\n def forward(self, x):\n return self.softmax(self.linear(x))\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.LogSoftmax"
]
] |
yakutovicha/aiida-core
|
[
"35b5c341e24df22b9b920c094348cef4f1a72846"
] |
[
"aiida/backends/sqlalchemy/migrations/versions/ce56d84bcc35_delete_trajectory_symbols_array.py"
] |
[
"# -*- coding: utf-8 -*-\n###########################################################################\n# Copyright (c), The AiiDA team. All rights reserved. #\n# This file is part of the AiiDA code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.aiida.net #\n###########################################################################\n# pylint: disable=invalid-name,no-member\n\"\"\"Delete trajectory symbols array from the repository and the reference in the attributes\n\nRevision ID: ce56d84bcc35\nRevises: 12536798d4d3\nCreate Date: 2019-01-21 15:35:07.280805\n\n\"\"\"\n# Remove when https://github.com/PyCQA/pylint/issues/1931 is fixed\n# pylint: disable=no-member,no-name-in-module,import-error\n\nimport numpy\n\nfrom alembic import op\nfrom sqlalchemy.sql import table, column, select, func, text\nfrom sqlalchemy import String, Integer, cast\nfrom sqlalchemy.dialects.postgresql import UUID, JSONB\n\nfrom aiida.backends.general.migrations import utils\n\n# revision identifiers, used by Alembic.\nrevision = 'ce56d84bcc35'\ndown_revision = '12536798d4d3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n \"\"\"Migrations for the upgrade.\"\"\"\n # yapf:disable\n connection = op.get_bind()\n\n DbNode = table('db_dbnode', column('id', Integer), column('uuid', UUID), column('type', String),\n column('attributes', JSONB))\n\n nodes = connection.execute(\n select(DbNode.c.id, DbNode.c.uuid).where(\n DbNode.c.type == op.inline_literal('node.data.array.trajectory.TrajectoryData.'))).fetchall()\n\n for pk, uuid in nodes:\n connection.execute(\n text(f\"\"\"UPDATE db_dbnode SET attributes = attributes #- '{{array|symbols}}' WHERE id = {pk}\"\"\"))\n utils.delete_numpy_array_from_repository(uuid, 'symbols')\n\n\ndef downgrade():\n \"\"\"Migrations for the downgrade.\"\"\"\n # yapf:disable\n connection = op.get_bind()\n\n DbNode = table('db_dbnode', column('id', Integer), column('uuid', UUID), column('type', String),\n column('attributes', JSONB))\n\n nodes = connection.execute(\n select(DbNode.c.id, DbNode.c.uuid).where(\n DbNode.c.type == op.inline_literal('node.data.array.trajectory.TrajectoryData.'))).fetchall()\n\n for pk, uuid in nodes:\n attributes = connection.execute(select(DbNode.c.attributes).where(DbNode.c.id == pk)).fetchone()\n symbols = numpy.array(attributes['symbols'])\n utils.store_numpy_array_in_repository(uuid, 'symbols', symbols)\n key = op.inline_literal('{\"array|symbols\"}')\n connection.execute(DbNode.update().where(DbNode.c.id == pk).values(\n attributes=func.jsonb_set(DbNode.c.attributes, key, cast(list(symbols.shape), JSONB))))\n"
] |
[
[
"numpy.array"
]
] |
yuyq96/asv-subtools
|
[
"a678b8f3327de0e99c445a79a9e91e5e0e006b11"
] |
[
"pytorch/libs/training/optim.py"
] |
[
"# -*- coding:utf-8 -*-\n\n# Copyright xmuspeech (Author: Snowdar 2019-08-01)\n\nimport logging\nimport types\nimport math\nimport itertools as it\nfrom torch._six import inf\nfrom functools import partial, wraps\nimport warnings\nfrom bisect import bisect_right\n\nimport torch\nimport torch.optim as optim\nfrom torch.optim.optimizer import Optimizer\n\nimport libs.support.utils as utils\n\n# Logger\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n## Wrapper ✿\ndef get_optimizer(model, params:dict={}):\n # Suggested weight_decay: 1e-4 for l2 regularization (sgd, adam) and \n # 1e-1 for decouped weight decay (sgdw, adamw, radam, ralamb, adamod etc.)\n default_params = {\n \"name\":\"adamW\",\n \"learn_rate\":0.001,\n \"beta1\":0.9,\n \"beta2\":0.999,\n \"beta3\":0.999,\n \"weight_decay\":1e-4,\n \"lookahead.k\":5,\n \"lookahead.alpha\":0.,\n \"gc\":False\n }\n\n used_params = utils.assign_params_dict(default_params, params)\n\n # Base params\n name = used_params[\"name\"]\n learn_rate = used_params[\"learn_rate\"]\n beta1 = used_params[\"beta1\"]\n beta2 = used_params[\"beta2\"]\n beta3 = used_params[\"beta3\"]\n weight_decay = used_params[\"weight_decay\"]\n gc = used_params[\"gc\"]\n\n extra_params = {}\n\n # Gradient centralization: \n # Yong, H., Huang, J., Hua, X., & Zhang, L. (2020). Gradient Centralization: \n # A New Optimization Technique for Deep Neural Networks. arXiv e-prints, arXiv:2004.01461. \n # Retrieved from https://ui.adsabs.harvard.edu/abs/2020arXiv200401461Y\n # Github: https://github.com/Yonghongwei/Gradient-Centralization\n if gc:\n # Specify this list by developer.\n default_support_gc_list = [\"adamW\", \"ralamb\"]\n\n if name not in default_support_gc_list:\n raise TypeError(\"Optimizer {} does not support gradient centralization (GC) now.\".format(name))\n\n extra_params[\"gc\"] = True\n\n # Select optimizer\n if name == \"sgd\":\n base_optimizer = optim.SGD(model.parameters(), lr=learn_rate, momentum=beta1, weight_decay=weight_decay)\n elif name == \"sgdW\":\n base_optimizer = SGDW(model.parameters(), lr=learn_rate, momentum=beta1, weight_decay=weight_decay)\n elif name == \"adam\":\n base_optimizer = optim.Adam(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay)\n elif name == \"adamW\":\n base_optimizer = AdamW(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay, **extra_params)\n elif name == \"radam\":\n base_optimizer = RAdam(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay)\n elif name == \"ralamb\":\n base_optimizer = Ralamb(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay, **extra_params)\n elif name == \"adamod\":\n base_optimizer = AdaMod(model.parameters(), lr=learn_rate, betas=(beta1, beta2), beta3=beta3, weight_decay=weight_decay)\n elif name == \"novograd\":\n base_optimizer = Novograd(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay)\n else:\n raise ValueError(\"Do not support {0} optimizer now.\".format(name))\n\n # Using alpha to decide whether to use lookahead\n if used_params[\"lookahead.alpha\"] > 0:\n logger.info(\"Use lookahead optimizer with alpha={} and k={}\".format(used_params[\"lookahead.alpha\"], used_params[\"lookahead.k\"]))\n optimizer = Lookahead(base_optimizer, k=used_params[\"lookahead.k\"], alpha=used_params[\"lookahead.alpha\"])\n else:\n optimizer = base_optimizer\n\n return optimizer\n\n\n## Optim-wrapper ✿\nclass Lookahead(Optimizer):\n \"\"\"https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py\n \"\"\"\n def __init__(self, base_optimizer, k=5, alpha=0.5):\n if not 0.0 <= alpha <= 1.0:\n raise ValueError(f'Invalid slow update rate: {alpha}')\n if not 1 <= k:\n raise ValueError(f'Invalid lookahead steps: {k}')\n self.optimizer = base_optimizer\n self.param_groups = self.optimizer.param_groups\n self.alpha = alpha\n self.k = k\n self.is_back_step = False\n self.init_weights = False\n\n for group in self.param_groups:\n group[\"step_counter\"] = 0\n\n def step(self, closure=None):\n self.is_back_step = False\n # Init weights after model in a certrain device and keep the device of weights same to model. [Snowdar 2018-09-01]\n if not self.init_weights and self.alpha > 0:\n self.slow_weights = [[p.clone().detach() for p in group['params']]\n for group in self.param_groups]\n\n for w in it.chain(*self.slow_weights):\n w.requires_grad = False\n \n self.init_weights = True\n\n loss = None\n if closure is not None:\n loss = closure()\n loss = self.optimizer.step()\n if self.alpha > 0:\n for group,slow_weights in zip(self.param_groups,self.slow_weights):\n group['step_counter'] += 1\n if group['step_counter'] % self.k != 0:\n continue\n else:\n self.is_back_step = True\n\n for p,q in zip(group['params'],slow_weights):\n if p.grad is None:\n continue\n q.data.add_(self.alpha * (p.data - q.data))\n p.data.copy_(q.data)\n return loss\n\n\n## Optimizer ✿\nclass SGDW(Optimizer):\n r\"\"\"Implements stochastic gradient descent (optionally with momentum) with decouped weight decay.\n\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n\n .. note::\n The implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n\n Considering the specific case of Momentum, the update can be written as\n\n .. math::\n v_{t+1} = \\mu * v_{t} + g_{t+1} \\\\\n p_{t+1} = p_{t} - lr * v_{t+1}\n\n where p, g, v and :math:`\\mu` denote the parameters, gradient,\n velocity, and momentum respectively.\n\n This is in contrast to Sutskever et. al. and\n other frameworks which employ an update of the form\n\n .. math::\n v_{t+1} = \\mu * v_{t} + lr * g_{t+1} \\\\\n p_{t+1} = p_{t} - v_{t+1}\n\n The Nesterov version is analogously modified.\n \"\"\"\n\n def __init__(self, params, lr=0.1, momentum=0, dampening=0,\n weight_decay=0, nesterov=False):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super(SGDW, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(SGDW, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_((1 - dampening) * d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n if group['weight_decay'] != 0:\n p.data.add_(-group['weight_decay'] * group['lr'] * p.data)\n\n p.data.add_(-group['lr'] * d_p)\n\n return loss\n\n\nclass AdamW(Optimizer):\n r\"\"\"Implements AdamW algorithm.\n\n The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.\n The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay coefficient (default: 1e-2)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _Decoupled Weight Decay Regularization:\n https://arxiv.org/abs/1711.05101\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=1e-2, amsgrad=False, gc=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n self.gc = gc\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, amsgrad=amsgrad)\n super(AdamW, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(AdamW, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n\n # Perform stepweight decay\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\n\n # Perform optimization step\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data) #, memory_format=torch.preserve_format)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data) #, memory_format=torch.preserve_format)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros_like(p.data) #, memory_format=torch.preserve_format)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n if self.gc:\n # For linear layer Y=WX+b, the tensor shape of weight is (outplanes, inplanes),\n # but for CNN layer(1d and 2d etc.), the tensor shape of weight is (outplanes, inplanes, [cnn-core]).\n # And here the gc is used in both linear and CNN layer.\n # It is not influenced by weight decay for weight decay directly changes the p.data rather than p.grad.\n # But when using gc in adam, the order question should be considered for L2 regularization changes \n # the p.grad.\n if len(list(grad.size()))>=2:\n grad.add_(-grad.mean(dim = tuple(range(1,len(list(grad.size())))), keepdim = True))\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_((1 - beta1) * grad)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n else:\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n\n step_size = group['lr'] / bias_correction1\n\n p.data.addcdiv_(exp_avg, denom, value=-step_size)\n\n return loss\n\n\nclass RAdam(Optimizer):\n '''https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py\n \n a PyTorch implementation of the RAdam Optimizer from th paper\n On the Variance of the Adaptive Learning Rate and Beyond.\n https://arxiv.org/abs/1908.03265\n Example:\n >>> from optimizer import RAdam\n >>> optimizer = RAdam(model.parameters(), lr=0.001)\n Note, here the weight decay is not L2 regularization.\n '''\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), N_sma_threshhold=4, eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n self.N_sma_threshhold = N_sma_threshhold\n self.buffer = [[None, None, None] for ind in range(10)]\n super(RAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n exp_avg.mul_(beta1).add_((1 - beta1) * grad)\n\n state['step'] += 1\n buffered = self.buffer[int(state['step'] % 10)]\n if state['step'] == buffered[0]:\n N_sma, step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n if N_sma > self.N_sma_threshhold:\n step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])\n else:\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n buffered[2] = step_size\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'] * p_data_fp32)\n\n if N_sma > self.N_sma_threshhold:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)\n else:\n p_data_fp32.add_(-step_size * exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n\n\nclass Ralamb(Optimizer):\n '''https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py\n Ralamb optimizer [RAdam + Layer-wise Adaptive Rate Scaling (LARS) trick]\n '''\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), N_sma_threshhold=4, eps=1e-8, weight_decay=0, gc=False):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n self.N_sma_threshhold = N_sma_threshhold\n self.buffer = [[None, None, None] for ind in range(10)]\n self.gc = gc\n super(Ralamb, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(Ralamb, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('Ralamb does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n if self.gc:\n # For linear layer Y=WX+b, the tensor shape of weight is (outplanes, inplanes),\n # but for CNN layer(1d and 2d etc.), the tensor shape of weight is (outplanes, inplanes, [cnn-core]).\n # And here the gc is used in both linear and CNN layer.\n if len(list(grad.size()))>=2:\n grad.add_(-grad.mean(dim = tuple(range(1,len(list(grad.size())))), keepdim = True))\n\n # Decay the first and second moment running average coefficient\n # m_t\n exp_avg.mul_(beta1).add_((1 - beta1) * grad)\n # v_t\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n state['step'] += 1\n buffered = self.buffer[int(state['step'] % 10)]\n\n if state['step'] == buffered[0]:\n N_sma, radam_step = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma > self.N_sma_threshhold:\n radam_step = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])\n else:\n radam_step = group['lr'] / (1 - beta1 ** state['step'])\n buffered[2] = radam_step\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'] * p_data_fp32)\n\n weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)\n radam_norm = p_data_fp32.pow(2).sum().sqrt()\n if weight_norm == 0 or radam_norm == 0:\n trust_ratio = 1\n else:\n trust_ratio = weight_norm / radam_norm\n\n state['weight_norm'] = weight_norm\n state['adam_norm'] = radam_norm\n state['trust_ratio'] = trust_ratio\n\n # more conservative since it's an approximated value\n if N_sma > self.N_sma_threshhold:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(exp_avg, denom, value=-radam_step * trust_ratio)\n else:\n p_data_fp32.add_(exp_avg, alpha=-radam_step * trust_ratio)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n\n\nclass AdaMod(Optimizer):\n \"\"\"Implements AdaMod algorithm with Decoupled Weight Decay (arxiv.org/abs/1711.05101)\n It has been proposed in `Adaptive and Momental Bounds for Adaptive Learning Rate Methods`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n beta3 (float, optional): smoothing coefficient for adaptive learning rates (default: 0.9999)\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay rather than L2 penalty (default: 0)\n\n Reference: https://github.com/lancopku/AdaMod.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), beta3=0.999,\n eps=1e-8, weight_decay=0):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n if not 0.0 <= beta3 < 1.0:\n raise ValueError(\"Invalid beta3 parameter: {}\".format(beta3))\n defaults = dict(lr=lr, betas=betas, beta3=beta3, eps=eps,\n weight_decay=weight_decay)\n super(AdaMod, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(AdaMod, self).__setstate__(state)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n 'AdaMod does not support sparse gradients')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n # Exponential moving average of actual learning rates\n state['exp_avg_lr'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq, exp_avg_lr = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_lr']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_((1 - beta1) * grad)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n if group['weight_decay'] != 0:\n p.data.add_(-group['weight_decay'] * group['lr'] * p.data)\n\n # Applies momental bounds on actual learning rates\n step_size = torch.full_like(denom, step_size)\n step_size.div_(denom)\n exp_avg_lr.mul_(group['beta3']).add_((1 - group['beta3']) * step_size)\n step_size = torch.min(step_size, exp_avg_lr)\n step_size.mul_(exp_avg)\n\n p.data.add_(-step_size)\n\n return loss\n\n\nclass Novograd(Optimizer):\n \"\"\"\n Implements Novograd algorithm.\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.95, 0))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (not L2 penalty) (default: 0)\n grad_averaging: gradient averaging\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n \n Reference: https://github.com/NVIDIA/DeepLearningExamples/\n blob/22f122183da1d46052a114bfcc1727921829e705/PyTorch/SpeechRecognition/\n Jasper/optimizers.py\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.95, 0.25), eps=1e-8,\n weight_decay=0, grad_averaging=False, amsgrad=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay,\n grad_averaging=grad_averaging,\n amsgrad=amsgrad)\n\n super(Novograd, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(Novograd, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Sparse gradients are not supported.')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n\n if group['weight_decay'] != 0:\n p.data.add_(-group['weight_decay'] * group['lr'] * p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n norm = torch.sum(torch.pow(grad, 2))\n\n if exp_avg_sq == 0:\n exp_avg_sq.copy_(norm)\n else:\n exp_avg_sq.mul_(beta2).add_((1 - beta2) * norm)\n\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n grad.div_(denom)\n # if group['weight_decay'] != 0:\n # grad.add_(group['weight_decay'], p.data)\n if group['grad_averaging']:\n grad.mul_(1 - beta1)\n exp_avg.mul_(beta1).add_(grad)\n\n p.data.add_(-group['lr'] * exp_avg)\n \n return loss\n"
] |
[
[
"torch.max",
"torch.zeros",
"torch.min",
"torch.clone",
"torch.zeros_like",
"torch.full_like",
"torch.pow"
]
] |
davidhalladay/DSP-Auto-drawer-Generating-and-Modifying-Images-Continually-using-Knowledge-graph
|
[
"1610bbd567a5caba0478d8f7026f98766e6e39f8"
] |
[
"scripts/run_model.py"
] |
[
"#!/usr/bin/python\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse, json, os\n\nfrom imageio import imwrite\nimport torch\n\nfrom sg2im.model import Sg2ImModel\nfrom sg2im.data.utils import imagenet_deprocess_batch\nimport sg2im.vis as vis\n\nimport pickle\nimport pprint\nfrom sklearn_crfsuite import CRF \nfrom sklearn_crfsuite import metrics\n\nimport gensim\nfrom gensim.models import Word2Vec\n\nimport nltk\nfrom nltk import word_tokenize\nfrom nltk.tag.util import untag\nfrom nltk.tree import ParentedTree, Tree\nfrom nltk.corpus import brown, movie_reviews, treebank\nfrom sg2im.drawer import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--checkpoint', default='sg2im-models/vg128.pt')\nparser.add_argument('--scene_graphs_json', default='scene_graphs/figure_6_sheep.json')\nparser.add_argument('--crf_model_pretrained', default='./simple_crf/crf_model.pkl')\nparser.add_argument('--coco_cls_txt', default='./datasets/coco_cls.txt')\nparser.add_argument('--word2vec_sample', default='models/word2vec_sample/pruned.word2vec.txt')\nparser.add_argument('--output_dir', default='outputs')\nparser.add_argument('--draw_scene_graphs', type=int, default=0)\nparser.add_argument('--device', default='gpu', choices=['cpu', 'gpu'])\n\n\ndef main(args):\n if not os.path.isfile(args.checkpoint):\n print('ERROR: Checkpoint file \"%s\" not found' % args.checkpoint)\n print('Maybe you forgot to download pretraind models? Try running:')\n print('bash scripts/download_models.sh')\n return\n\n if not os.path.isdir(args.output_dir):\n print('Output directory \"%s\" does not exist; creating it' % args.output_dir)\n os.makedirs(args.output_dir)\n\n if args.device == 'cpu':\n device = torch.device('cpu')\n elif args.device == 'gpu':\n device = torch.device('cuda:0')\n if not torch.cuda.is_available():\n print('WARNING: CUDA not available; falling back to CPU')\n device = torch.device('cpu')\n\n # Load the model, with a bit of care in case there are no GPUs\n map_location = 'cpu' if device == torch.device('cpu') else None\n checkpoint = torch.load(args.checkpoint, map_location=map_location)\n model = Sg2ImModel(**checkpoint['model_kwargs'])\n model.load_state_dict(checkpoint['model_state'])\n model.eval()\n model.to(device)\n\n # Load the scene graphs\n# with open(args.scene_graphs_json, 'r') as f:\n# scene_graphs = json.load(f)\n crf_model_path = args.crf_model_pretrained\n crf_model = pickle.load(open(crf_model_path, 'rb'))\n cate_list = load_cate(args.coco_cls_txt)\n pos_lists, feat_x, feat_y, pca, clf, wn_model = construct_pos_list(args.word2vec_sample)\n print(\"Start drawing something!\")\n count = 0\n sg_list = [{'objects': [], 'relationships': []}]\n while 1:\n sentence = input(\"Please input a sentence: \")\n # Run the model forward\n # scene_graphs only with one graph\n token_sentence = word_tokenize(sentence)\n t = pos_tag(token_sentence, crf_model)\n print(t)\n so_list, p_list = spo_extractor(t, cate_list)\n if len(so_list) != 2: \n print(\"please make sure that input sentence contain 2 objects in coco_list.\")\n print(\"Only find \",so_list)\n continue\n so_list = so_extractor(so_list, cate_list)\n p_list = p_extractor(p_list, pos_lists, feat_x, feat_y, pca, clf, wn_model)\n scene_graphs = sg_constructor(so_list, p_list, sg_list)\n print(sg_list)\n with torch.no_grad():\n imgs, boxes_pred, masks_pred, _ = model.forward_json(scene_graphs)\n imgs = imagenet_deprocess_batch(imgs)\n\n # Save the generated images\n for i in range(imgs.shape[0]):\n img_np = imgs[i].numpy().transpose(1, 2, 0)\n img_path = os.path.join(args.output_dir, 'img%06d.png' % count)\n imwrite(img_path, img_np)\n\n # Draw the scene graphs\n if args.draw_scene_graphs == 1:\n for i, sg in enumerate(scene_graphs):\n sg_img = vis.draw_scene_graph(sg['objects'], sg['relationships'])\n sg_img_path = os.path.join(args.output_dir, 'sg%06d.png' % count)\n imwrite(sg_img_path, sg_img)\n count += 1\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n\n"
] |
[
[
"torch.device",
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
]
] |
ditastaszewski/C17705971-DT228-FYP
|
[
"5e4be0aad9b6d86180c01a3291952e3de0bec156"
] |
[
"site/mysite/facerecognition/tools.py"
] |
[
"import cv2 \nimport matplotlib.pyplot as plot\nfrom matplotlib import pyplot as plt\nfrom matplotlib import image as image\nimport easygui\nimport numpy as np\nimport glob\nimport tensorflow as tf\nimport keras,os\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, MaxPool2D , Flatten\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications.resnet50 import preprocess_input\nfrom keras import optimizers\nfrom tensorflow.python.keras.applications.resnet import ResNet50\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Dense\nimport PIL\nfrom PIL import ImageFont, ImageDraw, Image \n\nfrom django.conf import settings\n\nstaticPath = settings.STATICFILES_DIRS[0]\nfaceLabels = ['face', 'non-face']\ncharacterLabels = ['Flandre', 'Marisa', 'Reimu', 'Remilia', 'Sakuya']\n\ndef loadModel(modelPath, classes):\n #ResNet50 model for the face classification\n model = Sequential()\n\n # 1st layer as the lumpsum weights from resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5\n # NOTE that this layer will be set below as NOT TRAINABLE, i.e., use it as is\n #model.add(ResNet50(include_top = False, pooling = RESNET50_POOLING_AVERAGE, weights = weightsPath))\n model.add(ResNet50(include_top = False, pooling = 'avg'))\n\n # 2nd layer as Dense for 2-class classification\n model.add(Dense(classes, activation = 'softmax'))\n\n sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)\n model.compile(optimizer = sgd, loss = 'categorical_crossentropy', metrics = ['accuracy'])\n\n model.load_weights(modelPath)\n \n return model\n \ndef getFacePrediction(img, labels):\n \n img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)\n img = img.reshape((1,) + img.shape)\n\n predictions = faceModel.predict(img, steps=1)\n #print(predictions)\n #print(verbosePredictions(predictions, labels))\n \n if predictions[0][0] > predictions[0][1]:\n return 1\n else:\n return 0\n\ndef getCharacterPrediction(img, labels):\n img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)\n img = img.reshape((1,) + img.shape)\n\n predictions = touhouModel.predict(img, steps=1)\n \n #print(predictions)\n #print(verbosePredictions(predictions, labels))\n \n highestPrediction = np.amax(predictions[0])\n predictionPercentage = highestPrediction * 100\n predictionIndex = np.argmax(predictions[0])\n character = labels[predictionIndex]\n \n return character, predictionPercentage\n \ndef verbosePredictions(predictions, labels):\n predictString = \"\"\n for i in range(0, len(predictions[0])):\n predictString += \"%s-%.2f%% \" % (labels[i], predictions[0][i] * 100)\n \n return predictString\n\ndef getFaces(inpath, outpath, classifyCharacters):\n #Get the width and height\n img = cv2.imread(inpath)\n markedImg = img.copy()\n height = img.shape[0]\n width = img.shape[1]\n potentialFaces = 0\n actualFaces = 0\n #rectangle width\n rW = 2\n \n faceCascade = cv2.CascadeClassifier(staticPath + \"/code/lbpcascade_animeface.xml\")\n \n faces = faceCascade.detectMultiScale(img,\n # detector options\n scaleFactor = 1.01,\n minNeighbors = 3,\n minSize = (32, 32))\n\n charactersFound = dict.fromkeys(characterLabels, 0)\n \n for (x, y, w, h) in faces:\n potentialFaces += 1\n #cv2.rectangle(markedImg, (x,y), (x + w, y + h), (0,0,255), rW)\n\n prediction = 0\n\n #print(potentialFaces)\n prediction = getFacePrediction(img[y:y+h, x:x+w], faceLabels) \n #cv2.rectangle(markedImg, (lx,ly), (rx, ry), (255,0,0), rW)\n \n if prediction == 1:\n #print(\"detected\")\n outputImg = img.copy()\n actualFaces += 1\n \n #See which charcter it is if we are going to classify the characters\n if classifyCharacters:\n character, characterPrediction = getCharacterPrediction(outputImg[y:y+h, x:x+w], characterLabels)\n resultString = \"%s-%.2f%%\" % (character, characterPrediction)\n \n #Increment the counter for how many times the character was found in the image\n charactersFound[character] += 1\n \n fontSize = 40\n font = ImageFont.truetype(\"arial.ttf\", fontSize)\n \n while font.getsize(resultString)[0] > w:\n fontSize -= 1\n font = ImageFont.truetype(\"arial.ttf\", fontSize) \n \n fW, fH = font.getsize(resultString)[0], font.getsize(resultString)[1] \n \n markedImgHSV = cv2.cvtColor(markedImg, cv2.COLOR_BGR2HSV)\n \n markedImgHSV[y+h-fH:y+h,x:x+w,2] = markedImgHSV[y+h-fH:y+h,x:x+w,2] * 0.5\n markedImg = cv2.cvtColor(markedImgHSV, cv2.COLOR_HSV2BGR)\n \n cv2.rectangle(markedImg, (x,y), (x+w, y+h), (255,255,255), rW, lineType=cv2.LINE_AA)\n cv2.rectangle(markedImg, (x,y), (x+w, y+h), (0,0,0), rW - 1, lineType=cv2.LINE_AA)\n \n tempImg = Image.fromarray(markedImg)\n draw = ImageDraw.Draw(tempImg)\n draw.text((x+rW, y+h-rW), resultString, font=font, anchor='lb') \n #draw.text((x+rW+1, y+rW+1), str(potentialFaces), font=font, anchor='lt') \n markedImg = np.asarray(tempImg)\n else:\n cv2.rectangle(markedImg, (x,y), (x+w, y+h), (0,255,0), rW * 2, lineType=cv2.LINE_AA)\n \n #cv2.putText(markedImg, str(potentialFaces), (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)\n cv2.imwrite(outpath, markedImg) \n\n\n return actualFaces, charactersFound\n \nfaceModel = loadModel(staticPath + \"/code/faceModel.hdf5\", 2)\ntouhouModel = loadModel(staticPath + \"/code/touhouModel.hdf5\", 5)"
] |
[
[
"numpy.amax",
"tensorflow.python.keras.layers.Dense",
"numpy.asarray",
"tensorflow.python.keras.models.Sequential",
"numpy.argmax",
"tensorflow.python.keras.applications.resnet.ResNet50"
]
] |
CFM-MSG/SDN
|
[
"f309602dc2bb73117355003f3744f8e5450dbccc"
] |
[
"extension/backbones/simple_backbone.py"
] |
[
"import torch\n\nclass simple_backbone(torch.nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.conv = torch.nn.Sequential(torch.nn.Conv2d(1, 64, 3, 1, 1),\n torch.nn.ReLU(),\n torch.nn.Conv2d(64, 128, 3, 1, 1),\n torch.nn.ReLU(),\n torch.nn.MaxPool2d(2, 2)) \n def forward(self, images):\n output = self.conv(images)\n return output\n"
] |
[
[
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
pfornia/tsfresh
|
[
"9550f84b8a920cfe53d9b6ca47eedeca619725cf"
] |
[
"tests/units/transformers/test_relevant_feature_augmenter.py"
] |
[
"# -*- coding: utf-8 -*-\n# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)\n# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import model_selection\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom tests.fixtures import DataTestCase\nimport mock\n\nfrom tsfresh.feature_extraction import MinimalFCParameters\nfrom tsfresh.transformers.relevant_feature_augmenter import RelevantFeatureAugmenter\nfrom tests.fixtures import warning_free\n\n\nclass RelevantFeatureAugmenterTestCase(DataTestCase):\n def setUp(self):\n self.test_df = self.create_test_data_sample()\n fc_parameters = {\"length\": None}\n self.kind_to_fc_parameters = {\"a\": fc_parameters.copy(),\n \"b\": fc_parameters.copy()}\n\n def test_not_fitted(self):\n augmenter = RelevantFeatureAugmenter()\n\n X = pd.DataFrame()\n\n self.assertRaises(RuntimeError, augmenter.transform, X)\n\n def test_no_timeseries(self):\n augmenter = RelevantFeatureAugmenter()\n\n X = pd.DataFrame()\n y = pd.Series(dtype=\"float64\")\n\n self.assertRaises(RuntimeError, augmenter.fit, X, y)\n self.assertRaises(RuntimeError, augmenter.fit_transform, X, y)\n\n def test_nothing_relevant(self):\n augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,\n column_value=\"val\", column_id=\"id\", column_sort=\"sort\",\n column_kind=\"kind\")\n\n y = pd.Series({10: 1, 500: 0})\n X = pd.DataFrame(index=[10, 500])\n\n augmenter.set_timeseries_container(self.test_df)\n augmenter.fit(X, y)\n transformed_X = augmenter.transform(X.copy())\n\n fit_transformed_X = augmenter.fit_transform(X, y)\n\n self.assertEqual(list(transformed_X.columns), [])\n self.assertEqual(list(transformed_X.index), list(X.index))\n self.assertEqual(list(fit_transformed_X.columns), [])\n self.assertEqual(list(fit_transformed_X.index), list(X.index))\n\n def test_filter_only_tsfresh_features_true(self):\n \"\"\"\n The boolean flag `filter_only_tsfresh_features` makes sure that only the time series based features are\n filtered. This unit tests checks that\n \"\"\"\n\n augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,\n filter_only_tsfresh_features=True,\n column_value=\"val\", column_id=\"id\", column_sort=\"sort\", column_kind=\"kind\")\n\n y = pd.Series({10: 1, 500: 0})\n X = pd.DataFrame(index=[10, 500])\n X[\"pre_feature\"] = 0\n\n augmenter.set_timeseries_container(self.test_df)\n augmenter.fit(X, y)\n transformed_X = augmenter.transform(X.copy())\n\n fit_transformed_X = augmenter.fit_transform(X, y)\n\n self.assertEqual(sum([\"pre_feature\" == column for column in transformed_X.columns]), 1)\n self.assertEqual(sum([\"pre_feature\" == column for column in fit_transformed_X.columns]), 1)\n\n def test_filter_only_tsfresh_features_false(self):\n \"\"\"\n The boolean flag `filter_only_tsfresh_features` makes sure that only the time series based features are\n filtered. This unit tests checks that\n \"\"\"\n\n augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,\n filter_only_tsfresh_features=False,\n column_value=\"val\", column_id=\"id\", column_sort=\"sort\", column_kind=\"kind\")\n\n df, y = self.create_test_data_sample_with_target()\n X = pd.DataFrame(index=np.unique(df.id))\n X[\"pre_drop\"] = 0\n X[\"pre_keep\"] = y\n\n augmenter.set_timeseries_container(df)\n augmenter.fit(X, y)\n transformed_X = augmenter.transform(X.copy())\n\n fit_transformed_X = augmenter.fit_transform(X, y)\n\n self.assertEqual(sum([\"pre_keep\" == column for column in transformed_X.columns]), 1)\n self.assertEqual(sum([\"pre_drop\" == column for column in transformed_X.columns]), 0)\n self.assertEqual(sum([\"pre_keep\" == column for column in fit_transformed_X.columns]), 1)\n self.assertEqual(sum([\"pre_drop\" == column for column in fit_transformed_X.columns]), 0)\n\n @mock.patch('tsfresh.transformers.feature_selector.calculate_relevance_table')\n def test_does_impute(self, calculate_relevance_table_mock):\n df = pd.DataFrame([[1, 1, 1], [2, 1, 1]], columns=['id', 'time', 'value'])\n X = pd.DataFrame(index=[1])\n y = pd.Series([0, 1])\n fc_parameters = {\"autocorrelation\": [{'lag': 2}]}\n\n calculate_relevance_table_mock.return_value = pd.DataFrame(columns=['feature', 'p_value', 'relevant'])\n augmenter = RelevantFeatureAugmenter(column_id='id', column_sort='time', default_fc_parameters=fc_parameters)\n augmenter.set_timeseries_container(df)\n with warning_free():\n augmenter.fit(X, y)\n\n assert calculate_relevance_table_mock.call_count == 1\n assert not calculate_relevance_table_mock.call_args[0][0].isnull().any().any()\n\n def test_no_ids_present(self):\n augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,\n filter_only_tsfresh_features=False,\n column_value=\"val\", column_id=\"id\", column_sort=\"sort\", column_kind=\"kind\")\n\n df, y = self.create_test_data_sample_with_target()\n X_with_wrong_ids = pd.DataFrame(index=[-999])\n\n augmenter.set_timeseries_container(df)\n\n self.assertRaisesRegex(AttributeError, r\"The ids of the time series container\",\n augmenter.fit, X_with_wrong_ids, y)\n self.assertRaisesRegex(AttributeError, r\"The ids of the time series container\",\n augmenter.fit_transform, X_with_wrong_ids, y)\n\n def test_multiclass_selection(self):\n augmenter = RelevantFeatureAugmenter(\n column_value=\"val\",\n column_id=\"id\",\n column_sort=\"sort\",\n column_kind=\"kind\",\n multiclass=True,\n n_significant=3,\n )\n\n df, y = self.create_test_data_sample_with_multiclass_target()\n X = pd.DataFrame(index=np.unique(df.id))\n\n augmenter.set_timeseries_container(df)\n fit_transformed_X = augmenter.fit_transform(X, y)\n\n self.assertEqual(len(fit_transformed_X.columns), 4)\n\n\ndef test_relevant_augmentor_cross_validated():\n \"\"\"\n Validates that the RelevantFeatureAugmenter can be cloned in pipelines, see issue 537\n \"\"\"\n n = 16 # number of samples, needs to be divisable by 4\n index = range(n)\n df_ts = pd.DataFrame({\"time\": [10, 11] * n, \"id\": np.repeat(index, 2),\n \"value\": [0, 1] * (n // 4) + [1, 2] * (n // 4) + # class 0\n [10, 11] * (n // 4) + [12, 14] * (n // 4)})\n y = pd.Series(data=[0] * (n // 2) + [1] * (n // 2), index=index)\n X = pd.DataFrame(index=index)\n augmenter = RelevantFeatureAugmenter(column_id='id', column_sort='time', timeseries_container=df_ts,\n default_fc_parameters=MinimalFCParameters(),\n disable_progressbar=True, show_warnings=False, fdr_level=0.90)\n pipeline = Pipeline([('augmenter', augmenter),\n ('classifier', RandomForestClassifier(random_state=1))])\n\n scores = model_selection.cross_val_score(pipeline, X, y, cv=2)\n assert (scores == np.array([1, 1])).all()\n"
] |
[
[
"sklearn.model_selection.cross_val_score",
"pandas.Series",
"sklearn.ensemble.RandomForestClassifier",
"numpy.unique",
"pandas.DataFrame",
"numpy.repeat",
"numpy.array"
]
] |
szha/dgl
|
[
"60d2e7d3c928d43bbb18e7ab17c066451c49f649"
] |
[
"apps/life_sci/python/dgllife/utils/splitters.py"
] |
[
"\"\"\"Various methods for splitting chemical datasets.\n\nWe mostly adapt them from deepchem\n(https://github.com/deepchem/deepchem/blob/master/deepchem/splits/splitters.py).\n\"\"\"\n# pylint: disable= no-member, arguments-differ, invalid-name\n# pylint: disable=E0611\nfrom collections import defaultdict\nfrom functools import partial\nfrom itertools import accumulate, chain\nfrom rdkit import Chem\nfrom rdkit.Chem import rdMolDescriptors\nfrom rdkit.Chem.rdmolops import FastFindRings\nfrom rdkit.Chem.Scaffolds import MurckoScaffold\n\nimport dgl.backend as F\nimport numpy as np\nfrom dgl.data.utils import split_dataset, Subset\n\n__all__ = ['ConsecutiveSplitter',\n 'RandomSplitter',\n 'MolecularWeightSplitter',\n 'ScaffoldSplitter',\n 'SingleTaskStratifiedSplitter']\n\ndef base_k_fold_split(split_method, dataset, k, log):\n \"\"\"Split dataset for k-fold cross validation.\n\n Parameters\n ----------\n split_method : callable\n Arbitrary method for splitting the dataset\n into training, validation and test subsets.\n dataset\n We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``\n gives the ith datapoint.\n k : int\n Number of folds to use and should be no smaller than 2.\n log : bool\n Whether to print a message at the start of preparing each fold.\n\n Returns\n -------\n all_folds : list of 2-tuples\n Each element of the list represents a fold and is a 2-tuple (train_set, val_set).\n \"\"\"\n assert k >= 2, 'Expect the number of folds to be no smaller than 2, got {:d}'.format(k)\n all_folds = []\n frac_per_part = 1. / k\n for i in range(k):\n if log:\n print('Processing fold {:d}/{:d}'.format(i + 1, k))\n # We are reusing the code for train-validation-test split.\n train_set1, val_set, train_set2 = split_method(dataset,\n frac_train=i * frac_per_part,\n frac_val=frac_per_part,\n frac_test=1. - (i + 1) * frac_per_part)\n # For cross validation, each fold consists of only a train subset and\n # a validation subset.\n train_set = Subset(dataset, np.concatenate(\n [train_set1.indices, train_set2.indices]).astype(np.int64))\n all_folds.append((train_set, val_set))\n return all_folds\n\ndef train_val_test_sanity_check(frac_train, frac_val, frac_test):\n \"\"\"Sanity check for train-val-test split\n\n Ensure that the fractions of the dataset to use for training,\n validation and test add up to 1.\n\n Parameters\n ----------\n frac_train : float\n Fraction of the dataset to use for training.\n frac_val : float\n Fraction of the dataset to use for validation.\n frac_test : float\n Fraction of the dataset to use for test.\n \"\"\"\n total_fraction = frac_train + frac_val + frac_test\n assert np.allclose(total_fraction, 1.), \\\n 'Expect the sum of fractions for training, validation and ' \\\n 'test to be 1, got {:.4f}'.format(total_fraction)\n\ndef indices_split(dataset, frac_train, frac_val, frac_test, indices):\n \"\"\"Reorder datapoints based on the specified indices and then take consecutive\n chunks as subsets.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``\n gives the ith datapoint.\n frac_train : float\n Fraction of data to use for training.\n frac_val : float\n Fraction of data to use for validation.\n frac_test : float\n Fraction of data to use for test.\n indices : list or ndarray\n Indices specifying the order of datapoints.\n\n Returns\n -------\n list of length 3\n Subsets for training, validation and test, which are all :class:`Subset` instances.\n \"\"\"\n frac_list = np.array([frac_train, frac_val, frac_test])\n assert np.allclose(np.sum(frac_list), 1.), \\\n 'Expect frac_list sum to 1, got {:.4f}'.format(np.sum(frac_list))\n num_data = len(dataset)\n lengths = (num_data * frac_list).astype(int)\n lengths[-1] = num_data - np.sum(lengths[:-1])\n\n return [Subset(dataset, list(indices[offset - length:offset]))\n for offset, length in zip(accumulate(lengths), lengths)]\n\ndef count_and_log(message, i, total, log_every_n):\n \"\"\"Print a message to reflect the progress of processing once a while.\n\n Parameters\n ----------\n message : str\n Message to print.\n i : int\n Current index.\n total : int\n Total count.\n log_every_n : None or int\n Molecule related computation can take a long time for a large dataset and we want\n to learn the progress of processing. This can be done by printing a message whenever\n a batch of ``log_every_n`` molecules have been processed. If None, no messages will\n be printed.\n \"\"\"\n if (log_every_n is not None) and ((i + 1) % log_every_n == 0):\n print('{} {:d}/{:d}'.format(message, i + 1, total))\n\ndef prepare_mols(dataset, mols, sanitize, log_every_n=1000):\n \"\"\"Prepare RDKit molecule instances.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``\n gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the\n ith datapoint.\n mols : None or list of rdkit.Chem.rdchem.Mol\n None or pre-computed RDKit molecule instances. If not None, we expect a\n one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.\n ``mols[i]`` corresponds to ``dataset.smiles[i]``.\n sanitize : bool\n This argument only comes into effect when ``mols`` is None and decides whether\n sanitization is performed in initializing RDKit molecule instances. See\n https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.\n log_every_n : None or int\n Molecule related computation can take a long time for a large dataset and we want\n to learn the progress of processing. This can be done by printing a message whenever\n a batch of ``log_every_n`` molecules have been processed. If None, no messages will\n be printed. Default to 1000.\n\n Returns\n -------\n mols : list of rdkit.Chem.rdchem.Mol\n RDkit molecule instances where there is a one-on-one correspondence between\n ``dataset.smiles`` and ``mols``, i.e. ``mols[i]`` corresponds to ``dataset.smiles[i]``.\n \"\"\"\n if mols is not None:\n # Sanity check\n assert len(mols) == len(dataset), \\\n 'Expect mols to be of the same size as that of the dataset, ' \\\n 'got {:d} and {:d}'.format(len(mols), len(dataset))\n else:\n if log_every_n is not None:\n print('Start initializing RDKit molecule instances...')\n mols = []\n for i, s in enumerate(dataset.smiles):\n count_and_log('Creating RDKit molecule instance',\n i, len(dataset.smiles), log_every_n)\n mols.append(Chem.MolFromSmiles(s, sanitize=sanitize))\n\n return mols\n\nclass ConsecutiveSplitter(object):\n \"\"\"Split datasets with the input order.\n\n The dataset is split without permutation, so the splitting is deterministic.\n \"\"\"\n\n @staticmethod\n def train_val_test_split(dataset, frac_train=0.8, frac_val=0.1, frac_test=0.1):\n \"\"\"Split the dataset into three consecutive chunks for training, validation and test.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``\n gives the ith datapoint.\n frac_train : float\n Fraction of data to use for training. By default, we set this to be 0.8, i.e.\n 80% of the dataset is used for training.\n frac_val : float\n Fraction of data to use for validation. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for validation.\n frac_test : float\n Fraction of data to use for test. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for test.\n\n Returns\n -------\n list of length 3\n Subsets for training, validation and test, which are all :class:`Subset` instances.\n \"\"\"\n return split_dataset(dataset, frac_list=[frac_train, frac_val, frac_test], shuffle=False)\n\n @staticmethod\n def k_fold_split(dataset, k=5, log=True):\n \"\"\"Split the dataset for k-fold cross validation by taking consecutive chunks.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``\n gives the ith datapoint.\n k : int\n Number of folds to use and should be no smaller than 2. Default to be 5.\n log : bool\n Whether to print a message at the start of preparing each fold.\n\n Returns\n -------\n list of 2-tuples\n Each element of the list represents a fold and is a 2-tuple (train_set, val_set).\n \"\"\"\n return base_k_fold_split(ConsecutiveSplitter.train_val_test_split, dataset, k, log)\n\nclass RandomSplitter(object):\n \"\"\"Randomly reorder datasets and then split them.\n\n The dataset is split with permutation and the splitting is hence random.\n \"\"\"\n\n @staticmethod\n def train_val_test_split(dataset, frac_train=0.8, frac_val=0.1,\n frac_test=0.1, random_state=None):\n \"\"\"Randomly permute the dataset and then split it into\n three consecutive chunks for training, validation and test.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``\n gives the ith datapoint.\n frac_train : float\n Fraction of data to use for training. By default, we set this to be 0.8, i.e.\n 80% of the dataset is used for training.\n frac_val : float\n Fraction of data to use for validation. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for validation.\n frac_test : float\n Fraction of data to use for test. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for test.\n random_state : None, int or array_like, optional\n Random seed used to initialize the pseudo-random number generator.\n Can be any integer between 0 and 2**32 - 1 inclusive, an array\n (or other sequence) of such integers, or None (the default).\n If seed is None, then RandomState will try to read data from /dev/urandom\n (or the Windows analogue) if available or seed from the clock otherwise.\n\n Returns\n -------\n list of length 3\n Subsets for training, validation and test.\n \"\"\"\n return split_dataset(dataset, frac_list=[frac_train, frac_val, frac_test],\n shuffle=True, random_state=random_state)\n\n @staticmethod\n def k_fold_split(dataset, k=5, random_state=None, log=True):\n \"\"\"Randomly permute the dataset and then split it\n for k-fold cross validation by taking consecutive chunks.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``\n gives the ith datapoint.\n k : int\n Number of folds to use and should be no smaller than 2. Default to be 5.\n random_state : None, int or array_like, optional\n Random seed used to initialize the pseudo-random number generator.\n Can be any integer between 0 and 2**32 - 1 inclusive, an array\n (or other sequence) of such integers, or None (the default).\n If seed is None, then RandomState will try to read data from /dev/urandom\n (or the Windows analogue) if available or seed from the clock otherwise.\n log : bool\n Whether to print a message at the start of preparing each fold. Default to True.\n\n Returns\n -------\n list of 2-tuples\n Each element of the list represents a fold and is a 2-tuple (train_set, val_set).\n \"\"\"\n # Permute the dataset only once so that each datapoint\n # will appear once in exactly one fold.\n indices = np.random.RandomState(seed=random_state).permutation(len(dataset))\n\n return base_k_fold_split(partial(indices_split, indices=indices), dataset, k, log)\n\n# pylint: disable=I1101\nclass MolecularWeightSplitter(object):\n \"\"\"Sort molecules based on their weights and then split them.\"\"\"\n\n @staticmethod\n def molecular_weight_indices(molecules, log_every_n):\n \"\"\"Reorder molecules based on molecular weights.\n\n Parameters\n ----------\n molecules : list of rdkit.Chem.rdchem.Mol\n Pre-computed RDKit molecule instances. We expect a one-on-one\n correspondence between ``dataset.smiles`` and ``mols``, i.e.\n ``mols[i]`` corresponds to ``dataset.smiles[i]``.\n log_every_n : None or int\n Molecule related computation can take a long time for a large dataset and we want\n to learn the progress of processing. This can be done by printing a message whenever\n a batch of ``log_every_n`` molecules have been processed. If None, no messages will\n be printed.\n\n Returns\n -------\n indices : list or ndarray\n Indices specifying the order of datapoints, which are basically\n argsort of the molecular weights.\n \"\"\"\n if log_every_n is not None:\n print('Start computing molecular weights.')\n mws = []\n for i, mol in enumerate(molecules):\n count_and_log('Computing molecular weight for compound',\n i, len(molecules), log_every_n)\n mws.append(rdMolDescriptors.CalcExactMolWt(mol))\n\n return np.argsort(mws)\n\n @staticmethod\n def train_val_test_split(dataset, mols=None, sanitize=True, frac_train=0.8,\n frac_val=0.1, frac_test=0.1, log_every_n=1000):\n \"\"\"Sort molecules based on their weights and then split them into\n three consecutive chunks for training, validation and test.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``\n gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the\n ith datapoint.\n mols : None or list of rdkit.Chem.rdchem.Mol\n None or pre-computed RDKit molecule instances. If not None, we expect a\n one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.\n ``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.\n sanitize : bool\n This argument only comes into effect when ``mols`` is None and decides whether\n sanitization is performed in initializing RDKit molecule instances. See\n https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.\n Default to be True.\n frac_train : float\n Fraction of data to use for training. By default, we set this to be 0.8, i.e.\n 80% of the dataset is used for training.\n frac_val : float\n Fraction of data to use for validation. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for validation.\n frac_test : float\n Fraction of data to use for test. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for test.\n log_every_n : None or int\n Molecule related computation can take a long time for a large dataset and we want\n to learn the progress of processing. This can be done by printing a message whenever\n a batch of ``log_every_n`` molecules have been processed. If None, no messages will\n be printed. Default to 1000.\n\n Returns\n -------\n list of length 3\n Subsets for training, validation and test, which are all :class:`Subset` instances.\n \"\"\"\n # Perform sanity check first as molecule instance initialization and descriptor\n # computation can take a long time.\n train_val_test_sanity_check(frac_train, frac_val, frac_test)\n molecules = prepare_mols(dataset, mols, sanitize, log_every_n)\n sorted_indices = MolecularWeightSplitter.molecular_weight_indices(molecules, log_every_n)\n\n return indices_split(dataset, frac_train, frac_val, frac_test, sorted_indices)\n\n @staticmethod\n def k_fold_split(dataset, mols=None, sanitize=True, k=5, log_every_n=1000):\n \"\"\"Sort molecules based on their weights and then split them\n for k-fold cross validation by taking consecutive chunks.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``\n gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the\n ith datapoint.\n mols : None or list of rdkit.Chem.rdchem.Mol\n None or pre-computed RDKit molecule instances. If not None, we expect a\n one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.\n ``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.\n sanitize : bool\n This argument only comes into effect when ``mols`` is None and decides whether\n sanitization is performed in initializing RDKit molecule instances. See\n https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.\n Default to be True.\n k : int\n Number of folds to use and should be no smaller than 2. Default to be 5.\n log_every_n : None or int\n Molecule related computation can take a long time for a large dataset and we want\n to learn the progress of processing. This can be done by printing a message whenever\n a batch of ``log_every_n`` molecules have been processed. If None, no messages will\n be printed. Default to 1000.\n\n Returns\n -------\n list of 2-tuples\n Each element of the list represents a fold and is a 2-tuple (train_set, val_set).\n \"\"\"\n molecules = prepare_mols(dataset, mols, sanitize, log_every_n)\n sorted_indices = MolecularWeightSplitter.molecular_weight_indices(molecules, log_every_n)\n\n return base_k_fold_split(partial(indices_split, indices=sorted_indices), dataset, k,\n log=(log_every_n is not None))\n\n# pylint: disable=W0702\nclass ScaffoldSplitter(object):\n \"\"\"Group molecules based on their Bemis-Murcko scaffolds and then split the groups.\n\n Group molecules so that all molecules in a group have a same scaffold (see reference).\n The dataset is then split at the level of groups.\n\n References\n ----------\n Bemis, G. W.; Murcko, M. A. “The Properties of Known Drugs.\n 1. Molecular Frameworks.” J. Med. Chem. 39:2887-93 (1996).\n \"\"\"\n\n @staticmethod\n def get_ordered_scaffold_sets(molecules, include_chirality, log_every_n):\n \"\"\"Group molecules based on their Bemis-Murcko scaffolds and\n order these groups based on their sizes.\n\n The order is decided by comparing the size of groups, where groups with a larger size\n are placed before the ones with a smaller size.\n\n Parameters\n ----------\n molecules : list of rdkit.Chem.rdchem.Mol\n Pre-computed RDKit molecule instances. We expect a one-on-one\n correspondence between ``dataset.smiles`` and ``mols``, i.e.\n ``mols[i]`` corresponds to ``dataset.smiles[i]``.\n include_chirality : bool\n Whether to consider chirality in computing scaffolds.\n log_every_n : None or int\n Molecule related computation can take a long time for a large dataset and we want\n to learn the progress of processing. This can be done by printing a message whenever\n a batch of ``log_every_n`` molecules have been processed. If None, no messages will\n be printed.\n\n Returns\n -------\n scaffold_sets : list\n Each element of the list is a list of int,\n representing the indices of compounds with a same scaffold.\n \"\"\"\n if log_every_n is not None:\n print('Start computing Bemis-Murcko scaffolds.')\n scaffolds = defaultdict(list)\n for i, mol in enumerate(molecules):\n count_and_log('Computing Bemis-Murcko for compound',\n i, len(molecules), log_every_n)\n # For mols that have not been sanitized, we need to compute their ring information\n try:\n FastFindRings(mol)\n mol_scaffold = MurckoScaffold.MurckoScaffoldSmiles(\n mol=mol, includeChirality=include_chirality)\n # Group molecules that have the same scaffold\n scaffolds[mol_scaffold].append(i)\n except:\n print('Failed to compute the scaffold for molecule {:d} '\n 'and it will be excluded.'.format(i + 1))\n\n # Order groups of molecules by first comparing the size of groups\n # and then the index of the first compound in the group.\n scaffold_sets = [\n scaffold_set for (scaffold, scaffold_set) in sorted(\n scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)\n ]\n\n return scaffold_sets\n\n @staticmethod\n def train_val_test_split(dataset, mols=None, sanitize=True, include_chirality=False,\n frac_train=0.8, frac_val=0.1, frac_test=0.1, log_every_n=1000):\n \"\"\"Split the dataset into training, validation and test set based on molecular scaffolds.\n\n This spliting method ensures that molecules with a same scaffold will be collectively\n in only one of the training, validation or test set. As a result, the fraction\n of dataset to use for training and validation tend to be smaller than ``frac_train``\n and ``frac_val``, while the fraction of dataset to use for test tends to be larger\n than ``frac_test``.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``\n gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the\n ith datapoint.\n mols : None or list of rdkit.Chem.rdchem.Mol\n None or pre-computed RDKit molecule instances. If not None, we expect a\n one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.\n ``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.\n sanitize : bool\n This argument only comes into effect when ``mols`` is None and decides whether\n sanitization is performed in initializing RDKit molecule instances. See\n https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.\n Default to True.\n include_chirality : bool\n Whether to consider chirality in computing scaffolds. Default to False.\n frac_train : float\n Fraction of data to use for training. By default, we set this to be 0.8, i.e.\n 80% of the dataset is used for training.\n frac_val : float\n Fraction of data to use for validation. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for validation.\n frac_test : float\n Fraction of data to use for test. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for test.\n log_every_n : None or int\n Molecule related computation can take a long time for a large dataset and we want\n to learn the progress of processing. This can be done by printing a message whenever\n a batch of ``log_every_n`` molecules have been processed. If None, no messages will\n be printed. Default to 1000.\n\n Returns\n -------\n list of length 3\n Subsets for training, validation and test, which are all :class:`Subset` instances.\n \"\"\"\n # Perform sanity check first as molecule related computation can take a long time.\n train_val_test_sanity_check(frac_train, frac_val, frac_test)\n molecules = prepare_mols(dataset, mols, sanitize)\n scaffold_sets = ScaffoldSplitter.get_ordered_scaffold_sets(\n molecules, include_chirality, log_every_n)\n\n train_indices, val_indices, test_indices = [], [], []\n train_cutoff = int(frac_train * len(molecules))\n val_cutoff = int((frac_train + frac_val) * len(molecules))\n for group_indices in scaffold_sets:\n if len(train_indices) + len(group_indices) > train_cutoff:\n if len(train_indices) + len(val_indices) + len(group_indices) > val_cutoff:\n test_indices.extend(group_indices)\n else:\n val_indices.extend(group_indices)\n else:\n train_indices.extend(group_indices)\n\n return [Subset(dataset, train_indices),\n Subset(dataset, val_indices),\n Subset(dataset, test_indices)]\n\n @staticmethod\n def k_fold_split(dataset, mols=None, sanitize=True,\n include_chirality=False, k=5, log_every_n=1000):\n \"\"\"Group molecules based on their scaffolds and sort groups based on their sizes.\n The groups are then split for k-fold cross validation.\n\n Same as usual k-fold splitting methods, each molecule will appear only once\n in the validation set among all folds. In addition, this method ensures that\n molecules with a same scaffold will be collectively in either the training\n set or the validation set for each fold.\n\n Note that the folds can be highly imbalanced depending on the\n scaffold distribution in the dataset.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``\n gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the\n ith datapoint.\n mols : None or list of rdkit.Chem.rdchem.Mol\n None or pre-computed RDKit molecule instances. If not None, we expect a\n one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.\n ``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.\n sanitize : bool\n This argument only comes into effect when ``mols`` is None and decides whether\n sanitization is performed in initializing RDKit molecule instances. See\n https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.\n Default to True.\n include_chirality : bool\n Whether to consider chirality in computing scaffolds. Default to False.\n k : int\n Number of folds to use and should be no smaller than 2. Default to be 5.\n log_every_n : None or int\n Molecule related computation can take a long time for a large dataset and we want\n to learn the progress of processing. This can be done by printing a message whenever\n a batch of ``log_every_n`` molecules have been processed. If None, no messages will\n be printed. Default to 1000.\n\n Returns\n -------\n list of 2-tuples\n Each element of the list represents a fold and is a 2-tuple (train_set, val_set).\n \"\"\"\n assert k >= 2, 'Expect the number of folds to be no smaller than 2, got {:d}'.format(k)\n\n molecules = prepare_mols(dataset, mols, sanitize)\n scaffold_sets = ScaffoldSplitter.get_ordered_scaffold_sets(\n molecules, include_chirality, log_every_n)\n\n # k buckets that form a relatively balanced partition of the dataset\n index_buckets = [[] for _ in range(k)]\n for group_indices in scaffold_sets:\n bucket_chosen = int(np.argmin([len(bucket) for bucket in index_buckets]))\n index_buckets[bucket_chosen].extend(group_indices)\n\n all_folds = []\n for i in range(k):\n if log_every_n is not None:\n print('Processing fold {:d}/{:d}'.format(i + 1, k))\n train_indices = list(chain.from_iterable(index_buckets[:i] + index_buckets[i + 1:]))\n val_indices = index_buckets[i]\n all_folds.append((Subset(dataset, train_indices), Subset(dataset, val_indices)))\n\n return all_folds\n\nclass SingleTaskStratifiedSplitter(object):\n \"\"\"Splits the dataset by stratification on a single task.\n\n We sort the molecules based on their label values for a task and then repeatedly\n take buckets of datapoints to augment the training, validation and test subsets.\n \"\"\"\n\n @staticmethod\n def train_val_test_split(dataset, labels, task_id, frac_train=0.8, frac_val=0.1,\n frac_test=0.1, bucket_size=10, random_state=None):\n \"\"\"Split the dataset into training, validation and test subsets as stated above.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``\n gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the\n ith datapoint.\n labels : tensor of shape (N, T)\n Dataset labels all tasks. N for the number of datapoints and T for the number\n of tasks.\n task_id : int\n Index for the task.\n frac_train : float\n Fraction of data to use for training. By default, we set this to be 0.8, i.e.\n 80% of the dataset is used for training.\n frac_val : float\n Fraction of data to use for validation. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for validation.\n frac_test : float\n Fraction of data to use for test. By default, we set this to be 0.1, i.e.\n 10% of the dataset is used for test.\n bucket_size : int\n Size of bucket of datapoints. Default to 10.\n random_state : None, int or array_like, optional\n Random seed used to initialize the pseudo-random number generator.\n Can be any integer between 0 and 2**32 - 1 inclusive, an array\n (or other sequence) of such integers, or None (the default).\n If seed is None, then RandomState will try to read data from /dev/urandom\n (or the Windows analogue) if available or seed from the clock otherwise.\n\n Returns\n -------\n list of length 3\n Subsets for training, validation and test, which are all :class:`Subset` instances.\n \"\"\"\n train_val_test_sanity_check(frac_train, frac_val, frac_test)\n\n if random_state is not None:\n np.random.seed(random_state)\n\n if not isinstance(labels, np.ndarray):\n labels = F.asnumpy(labels)\n task_labels = labels[:, task_id]\n sorted_indices = np.argsort(task_labels)\n\n train_bucket_cutoff = int(np.round(frac_train * bucket_size))\n val_bucket_cutoff = int(np.round(frac_val * bucket_size)) + train_bucket_cutoff\n\n train_indices, val_indices, test_indices = [], [], []\n\n while sorted_indices.shape[0] >= bucket_size:\n current_batch, sorted_indices = np.split(sorted_indices, [bucket_size])\n shuffled = np.random.permutation(range(bucket_size))\n train_indices.extend(\n current_batch[shuffled[:train_bucket_cutoff]].tolist())\n val_indices.extend(\n current_batch[shuffled[train_bucket_cutoff:val_bucket_cutoff]].tolist())\n test_indices.extend(\n current_batch[shuffled[val_bucket_cutoff:]].tolist())\n\n # Place rest samples in the training set.\n train_indices.extend(sorted_indices.tolist())\n\n return [Subset(dataset, train_indices),\n Subset(dataset, val_indices),\n Subset(dataset, test_indices)]\n\n @staticmethod\n def k_fold_split(dataset, labels, task_id, k=5, log=True):\n \"\"\"Sort molecules based on their label values for a task and then split them\n for k-fold cross validation by taking consecutive chunks.\n\n Parameters\n ----------\n dataset\n We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``\n gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the\n ith datapoint.\n labels : tensor of shape (N, T)\n Dataset labels all tasks. N for the number of datapoints and T for the number\n of tasks.\n task_id : int\n Index for the task.\n k : int\n Number of folds to use and should be no smaller than 2. Default to be 5.\n log : bool\n Whether to print a message at the start of preparing each fold.\n\n Returns\n -------\n list of 2-tuples\n Each element of the list represents a fold and is a 2-tuple (train_set, val_set).\n \"\"\"\n if not isinstance(labels, np.ndarray):\n labels = F.asnumpy(labels)\n task_labels = labels[:, task_id]\n sorted_indices = np.argsort(task_labels).tolist()\n\n return base_k_fold_split(partial(indices_split, indices=sorted_indices), dataset, k, log)\n"
] |
[
[
"numpy.split",
"numpy.allclose",
"numpy.random.seed",
"numpy.round",
"numpy.concatenate",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.random.RandomState"
]
] |
peferso/pegaso-training
|
[
"e1c99be63b58053d0de7f6a6e392bf08c42c7337"
] |
[
"src/train-random-forest.py"
] |
[
"import os\nimport pymysql\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport time\nimport logging\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nimport joblib\nfrom multiprocessing import Pool\nimport pickle\nfrom random import randint\n\n\ndef fetch_database_data():\n time_start = time.time()\n print('Start')\n connection = pymysql.connect(host=os.environ['DBHOST'],\n user=os.environ['DBUSER'],\n passwd=os.environ['DBPASS'],\n db=\"pegaso_db\",\n charset='utf8')\n sql_query = pd.read_sql_query(\"\"\"SELECT \n brand, LTRIM(model), price_c, kilometers, power, \n doors, professional_vendor, automatic_gearbox, year, batch_ts \n FROM \n raw_data\n WHERE\n brand IN (SELECT nb.brand FROM brands_count nb WHERE nb.num_cars>1000)\n ;\"\"\", connection)\n dfrd = pd.DataFrame(sql_query,\n columns=['brand', # One-hot\n 'model', # One-hot\n 'price_c',\n 'kilometers',\n 'power',\n 'doors',\n 'professional_vendor', # One-hot\n 'automatic_gearbox', # One-hot\n 'year',\n 'batch_ts'\n ])\n time_end = time.time()\n print('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')\n return dfrd\n\n\ndef build_features(df):\n time_start = time.time()\n print('Start')\n\n print('Compute new variable \\'years\\'...')\n l_years = []\n for index, row in df.iterrows():\n years = row['batch_ts'].year - int(row['year'])\n l_years.append(years)\n\n df['years'] = l_years\n print('Compute new variable \\'years\\'. Done.')\n\n print('Dropping useless columns...')\n df = df.drop('batch_ts', axis=1)\n df = df.drop('year', axis=1)\n df = df.drop('professional_vendor', axis=1)\n df = df.drop('automatic_gearbox', axis=1)\n df = df.drop('model', axis=1)\n print('Dropping useless columns. Done.')\n\n print('Dropping rows with \\'nans\\'...')\n df = df.dropna()\n print('Dropping rows with \\'nans\\'. Done.')\n\n l_avprice = []\n\n print('Getting average price of each car based on brand...')\n t1 = time.time()\n irow = 0\n for index, row in df.iterrows():\n t2 = time.time()\n brand = row['brand']\n avprice = 1 # np.mean(df[df['brand'] == brand]['price_c'])\n l_avprice.append(avprice)\n if t2 - t1 > 10:\n print(' ' + str(index) + ' rows processed. ' +\n str(round(t2 - time_start, 2)) + ' seconds elapsed - ' +\n str(round((df.shape[0] - irow) / (index - irow) * (t2 - t1), 2)) +\n ' seconds to finish...')\n t1 = time.time()\n print('Getting average price of each car based on brand. Done.')\n\n df_baseline = pd.DataFrame(data={'av_price': l_avprice, 'price_c': df['price_c']})\n\n # Shuffle rows and keep apart a set to finally evaluate accuracy\n df.sample(frac=1).reset_index(drop=True)\n\n # One-hot encoding TO TEST https://towardsdatascience.com/random-forest-in-python-24d0893d51c0\n features = pd.get_dummies(df)\n time_end = time.time()\n print('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')\n return features, df_baseline\n\n\ndef convert_to_arrays(features):\n # Labels are the values we want to predict\n labels = np.array(features['price_c']) # Remove the labels from the features\n # axis 1 refers to the columns\n features = features.drop('price_c', axis=1) # Saving feature names for later use\n feature_list = list(features.columns) # Convert to numpy array\n features = np.array(features)\n return feature_list, features, labels\n\n\ndef initial_checks(data_folder):\n time_start = time.time()\n print('Start')\n if not os.path.exists(data_folder):\n logging.warning('Folder ' + data_folder + 'does not exist: creating...')\n os.makedirs(data_folder)\n else:\n print('Folder \\'' + data_folder + '\\' exists: not creating.')\n print('Folder \\'' + data_folder + '\\' contains the following files:')\n ic = 0\n for i in os.listdir(data_folder):\n ic += 1\n print('File ' + str(ic) + ': \\'' + str(i) + '\\'')\n time_end = time.time()\n print('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')\n\n\ndef write_report(rep_est, rep_mft, prec_list, mape_list, folds_list, rep_tms, report_file):\n dfreport = pd.DataFrame(list(zip(rep_est, rep_mft, prec_list, mape_list, folds_list, rep_tms)),\n columns=['estimators', 'max_features', 'average_accuracy', 'average_mape', 'folds',\n 'train_time'])\n dfreport.to_csv(report_file, index=None, header=True)\n\n\nclass Model:\n\n def __init__(self, features, labels, id):\n logging.basicConfig(\n format=\"%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s\",\n level=logging.INFO)\n # Hyperparameters\n self.n_estimators = 5\n self.random_state = None\n self.max_features = 0.75\n self.criterion = 'squared_error'\n # Input data\n self.features = features\n self.labels = labels\n # train set data\n self.train_features = None\n self.train_labels = None\n self.train_indx = None\n # test set data\n self.test_features = None\n self.test_labels = None\n self.test_indx = None\n\n self.id = id\n\n def split_train_and_test_sets(self):\n random_state = randint(0, 42)\n self.train_features, self.test_features, self.train_labels, self.test_labels, self.train_indx, self.test_indx = \\\n train_test_split(self.features,\n self.labels,\n np.arange(self.features.shape[0]),\n test_size=0.20,\n random_state=random_state)\n\n def set_hyperparameters(self, n_estimators, random_state, max_features, criterion):\n self.n_estimators = n_estimators\n self.random_state = random_state\n self.max_features = max_features\n self.criterion = criterion\n\n def train_a_random_forest(self):\n self.split_train_and_test_sets()\n rf = RandomForestRegressor(n_estimators=self.n_estimators,\n criterion=self.criterion,\n max_features=self.max_features,\n random_state=self.random_state)\n rf.fit(self.train_features, self.train_labels)\n predictions = rf.predict(self.test_features)\n rf = None\n mape = round(np.mean(abs((predictions - self.test_labels) / self.test_labels * 100.0)), 2)\n return mape\n\ndef cross_validation_training(model):\n return model.train_a_random_forest()\n\n# Variables\nTHIS_SCRIPT_PATH = os.environ['PEGASO_TRAIN_DIR']\nexecution_timestamp = datetime.datetime.now()\nmodel_folder = 'models'\nmodel_file = model_folder + '/rf_' + str(execution_timestamp).replace(':', '-').replace('.', '').replace(' ', '_')\nreport_file = model_file + '.csv'\ninitial_checks(model_folder)\n\ndf = fetch_database_data()\n\nfeatures, df_baseline = build_features(df)\n\nfeature_list, features, labels = convert_to_arrays(features)\n\nf = open(model_file + '-feature_list.txt', 'w')\ns1 = '\\n'.join(feature_list)\nf.write(s1)\nf.close()\n\nf = open(model_file + '-feature_list.list', 'wb')\npickle.dump(feature_list, f)\nf.close()\n\n# Split the data into features + evaluation features. The latter will not be used in training nor hyperparameters tuning\nfeatures, eval_features, labels, eval_labels, indx, eval_indx = train_test_split(features, labels,\n np.arange(\n features.shape[\n 0]),\n test_size=0.30,\n random_state=42)\n\nprint(' * Size of features set: ' + str(features.shape[0]))\nprint(' * Size of evaluation set: ' + str(eval_features.shape[0]))\n\ntime_start = time.time()\nprint('Computing baseline mean absolute error (MAE) and mean absolute percentage error (MAPE) on evaluation set...')\nit = 0\nbaseline_mae = 0.0\nbaseline_mape = 0.0\nfor i in eval_indx:\n baseline_mae += abs(df_baseline.iloc[i, 0] - df_baseline.iloc[i, 1]) / df_baseline.shape[0]\n baseline_mape += abs(df_baseline.iloc[i, 0] / df_baseline.iloc[i, 1] - 1.0) * 100.0 / df_baseline.shape[0]\n it += 1\nprint('Computing baseline mean absolute error (MAE) and mean absolute percentage error (MAPE) on evaluation set. Done.')\nprint(' * baseline Mean Absolute Error (MAE): ' + str(round(baseline_mae, 2)) + ' Euros.')\nprint(' * baseline Mean Absolute Percentage Error (MAPE): ' + str(round(baseline_mape, 2)) + ' %.')\nprint(' * baseline Accuracy: ' + str(round(100 - baseline_mape, 2)) + ' %.')\n\nn_est_list = range(1, 501, 1)\nmax_features_list = [x / 100.0 for x in range(10, 105, 5)]\ncriterion = 'squared_error'\nrandom_state = None\nmape_min = 100.0\ntimes, rep_est, rep_mft, prec_list, mape_list, folds_list, rep_tms = ([] for i in range(7))\ninterations_remaining = len(n_est_list) * len(max_features_list)\n\nfolds = 16\ncpu_cores = 4\nmodels = []\nfor i in range(1, folds + 1):\n model = Model(features, labels, i)\n models.append(model)\n\nprint('Computing grid of parameters:')\nf = open(model_file + '-grid_cross_val_data.csv', 'w')\nprint('n_estimators', 'max_features', *['mape_fold' + str(i) for i in range(1, folds + 1)]\n , 'average_mape', 'stderr_mape', sep=',', end='\\n', file=f)\nf.close()\nfor n_estimators in n_est_list:\n for max_features in max_features_list:\n\n #print('\\tn_estimators:' + str(n_estimators))\n #print('\\tmax_features:' + str(max_features))\n #print('\\tCross validating over ' + str(folds) + '-folds...')\n #print('\\tFold computations parallelized over ' + str(cpu_cores) + ' cores...')\n\n #print('\\t\\tMultiprocessing begins...')\n ti = time.time()\n for model in models:\n model.set_hyperparameters(n_estimators, random_state, max_features, criterion)\n p = Pool(processes=cpu_cores)\n result = p.map(cross_validation_training, models)\n p.close()\n p.join()\n tf = time.time()\n #print('\\t\\tMultiprocessing ends.')\n #print('\\t\\t', result)\n #print('\\tCross validation finished.')\n #print('\\tElapsed:' + str(tf - ti))\n\n mape = np.average(np.array(result))\n mape_var = np.std(np.array(result))\n\n f = open(model_file + '-grid_cross_val_data.csv', 'a')\n print(n_estimators, max_features, *result, mape, mape_var, sep=',', end='\\n', file=f)\n f.close()\n\n if mape < mape_min:\n mape_min = mape\n n_estimators_min = n_estimators\n max_features_min = max_features\n\n rep_est.append(n_estimators)\n rep_mft.append(max_features)\n mape_list.append(round(mape, 4))\n prec_list.append(round(100.0 - mape, 4))\n folds_list.append(folds)\n rep_tms.append(tf - ti)\n\n print('\\tacc.', round(100.0 - mape, 2), n_estimators, max_features,\n ' - mape ', mape, mape_var,\n ' --- max acc.', round(100.0 - mape_min, 2), n_estimators_min, max_features_min)\n #print('\\tMinimum average mape accross folds found: ' + str(mape_min))\n #print('\\t max. accuracy: ' + str(100.0 - mape_min))\n #print('\\t n_estimators: ' + str(n_estimators_min))\n #print('\\t max_features: ' + str(max_features_min))\n#\n write_report(rep_est, rep_mft, mape_list, prec_list, folds_list, rep_tms, report_file)\n\nprint('\\nTraining the best model.\\n')\nrf = RandomForestRegressor(n_estimators=n_estimators_min, criterion=criterion, max_features=max_features_min, random_state=random_state)\nprint('Training begins...')\ntime_start = time.time()\nrf.fit(self.train_features, self.train_labels)\ntime_end = time.time()\nprint('Training ends. Elapsed time: ' + str(time_end - time_start) + ' seconds.')\n\nprint('Predicting evaluation data...')\npredictions = rf.predict(eval_features)\nprint('Predicting test data. Done.')\nprint('Computing MAE and MAPE on test set...')\nmae = round(np.mean(abs(predictions - eval_labels)), 2)\nmape = round(np.mean(abs((predictions - eval_labels) / eval_labels * 100.0)), 2)\nprint('Computing MAE and MAPE on test set. Done.')\nprint(' * Mean Absolute Error (MAE): ' + str(round(mae, 2)) + ' Euros.')\nprint(' * Mean Absolute Percentage Error (MAPE): ' + str(round(mape, 2)) + ' %.')\nprint(' * Accuracy: ' + str(round(100 - mape, 2)) + ' %.')\n\nprint('Export the model...')\njoblib.dump(rf, model_file + \".joblib\", compress=0)\nprint('Export the model. Done.')\n"
] |
[
[
"sklearn.ensemble.RandomForestRegressor",
"pandas.read_sql_query",
"numpy.arange",
"pandas.DataFrame",
"numpy.array",
"pandas.get_dummies"
]
] |
AlphaPlusTT/nerf-w
|
[
"c56589df46b80077eb9e0bfb29b023490b0a7fa1"
] |
[
"temp.py"
] |
[
"import torch\nfrom torch.utils.data import Dataset\nimport glob\nimport numpy as np\nimport os\nimport pandas as pd\nimport pickle\nfrom PIL import Image\nfrom torchvision import transforms as T\n\nfrom datasets.ray_utils import *\nfrom datasets.colmap_utils import \\\n read_cameras_binary, read_images_binary, read_points3d_binary\n\n\nclass PhototourismDataset(Dataset):\n def __init__(self, root_dir, split='train', img_downscale=1, val_num=1, use_cache=False):\n \"\"\"\n img_downscale: how much scale to downsample the training images.\n The original image sizes are around 500~100, so value of 1 or 2\n are recommended.\n ATTENTION! Value of 1 will consume large CPU memory,\n about 40G for brandenburg gate.\n val_num: number of val images (used for multigpu, validate same image for all gpus)\n use_cache: during data preparation, use precomputed rays (useful to accelerate\n data loading, especially for multigpu!)\n \"\"\"\n self.root_dir = root_dir\n self.split = split\n assert img_downscale >= 1, 'image can only be downsampled, please set img_downscale>=1!'\n self.img_downscale = img_downscale\n if split == 'val': # image downscale=1 will cause OOM in val mode\n self.img_downscale = max(2, self.img_downscale)\n self.val_num = max(1, val_num) # at least 1\n self.use_cache = use_cache\n self.define_transforms()\n\n self.read_meta()\n self.white_back = False\n\n def read_meta(self):\n # read all files in the tsv first (split to train and test later)\n tsv = glob.glob(os.path.join(self.root_dir, '*.tsv'))[0]\n self.scene_name = os.path.basename(tsv)[:-4]\n self.files = pd.read_csv(tsv, sep='\\t')\n self.files = self.files[~self.files['id'].isnull()] # remove data without id\n self.files.reset_index(inplace=True, drop=True)\n\n # Step 1. load image paths\n # Attention! The 'id' column in the tsv is BROKEN, don't use it!!!!\n # Instead, read the id from images.bin using image file name!\n if self.use_cache:\n with open(os.path.join(self.root_dir, f'cache/img_ids.pkl'), 'rb') as f:\n self.img_ids = pickle.load(f)\n with open(os.path.join(self.root_dir, f'cache/image_paths.pkl'), 'rb') as f:\n self.image_paths = pickle.load(f)\n else:\n imdata = read_images_binary(os.path.join(self.root_dir, 'dense/sparse/images.bin'))\n img_path_to_id = {}\n for v in imdata.values():\n img_path_to_id[v.name] = v.id\n self.img_ids = []\n self.image_paths = {} # {id: filename}\n for filename in list(self.files['filename']):\n id_ = img_path_to_id[filename]\n self.image_paths[id_] = filename\n self.img_ids += [id_]\n pass\n\n # Step 2: read and rescale camera intrinsics\n if self.use_cache:\n with open(os.path.join(self.root_dir, f'cache/Ks{self.img_downscale}.pkl'), 'rb') as f:\n self.Ks = pickle.load(f)\n else:\n self.Ks = {} # {id: K}\n camdata = read_cameras_binary(os.path.join(self.root_dir, 'dense/sparse/cameras.bin'))\n for id_ in self.img_ids:\n K = np.zeros((3, 3), dtype=np.float32)\n cam = camdata[id_]\n img_w, img_h = int(cam.params[2] * 2), int(cam.params[3] * 2)\n img_w_, img_h_ = img_w // self.img_downscale, img_h // self.img_downscale\n K[0, 0] = cam.params[0] * img_w_ / img_w # fx\n K[1, 1] = cam.params[1] * img_h_ / img_h # fy\n K[0, 2] = cam.params[2] * img_w_ / img_w # cx\n K[1, 2] = cam.params[3] * img_h_ / img_h # cy\n K[2, 2] = 1\n # print(K)\n pass\n self.Ks[id_] = K\n\n # Step 3: read c2w poses (of the images in tsv file only) and correct the order\n if self.use_cache:\n self.poses = np.load(os.path.join(self.root_dir, 'cache/poses.npy'))\n else:\n w2c_mats = []\n bottom = np.array([0, 0, 0, 1.]).reshape(1, 4)\n for id_ in self.img_ids:\n im = imdata[id_]\n R = im.qvec2rotmat()\n t = im.tvec.reshape(3, 1)\n w2c_mats += [np.concatenate([np.concatenate([R, t], 1), bottom], 0)]\n w2c_mats = np.stack(w2c_mats, 0) # (N_images, 4, 4)\n self.poses = np.linalg.inv(w2c_mats)[:, :3] # (N_images, 3, 4)\n # Original poses has rotation in form \"right down front\", change to \"right up back\"\n self.poses[..., 1:3] *= -1\n\n # Step 4: correct scale\n if self.use_cache:\n self.xyz_world = np.load(os.path.join(self.root_dir, 'cache/xyz_world.npy'))\n with open(os.path.join(self.root_dir, f'cache/nears.pkl'), 'rb') as f:\n self.nears = pickle.load(f)\n with open(os.path.join(self.root_dir, f'cache/fars.pkl'), 'rb') as f:\n self.fars = pickle.load(f)\n else:\n pts3d = read_points3d_binary(os.path.join(self.root_dir, 'dense/sparse/points3D.bin'))\n self.xyz_world = np.array([pts3d[p_id].xyz for p_id in pts3d])\n xyz_world_h = np.concatenate([self.xyz_world, np.ones((len(self.xyz_world), 1))], -1)\n # Compute near and far bounds for each image individually\n self.nears, self.fars = {}, {} # {id_: distance}\n for i, id_ in enumerate(self.img_ids):\n xyz_cam_i = (xyz_world_h @ w2c_mats[i].T)[:, :3] # xyz in the ith cam coordinate\n xyz_cam_i = xyz_cam_i[xyz_cam_i[:, 2] > 0] # filter out points that lie behind the cam\n self.nears[id_] = np.percentile(xyz_cam_i[:, 2], 0.1)\n self.fars[id_] = np.percentile(xyz_cam_i[:, 2], 99.9)\n\n max_far = np.fromiter(self.fars.values(), np.float32).max()\n scale_factor = max_far / 5 # so that the max far is scaled to 5\n self.poses[..., 3] /= scale_factor\n for k in self.nears:\n self.nears[k] /= scale_factor\n for k in self.fars:\n self.fars[k] /= scale_factor\n self.xyz_world /= scale_factor\n self.poses_dict = {id_: self.poses[i] for i, id_ in enumerate(self.img_ids)}\n\n # Step 5. split the img_ids (the number of images is verfied to match that in the paper)\n self.img_ids_train = [id_ for i, id_ in enumerate(self.img_ids)\n if self.files.loc[i, 'split'] == 'train']\n self.img_ids_test = [id_ for i, id_ in enumerate(self.img_ids)\n if self.files.loc[i, 'split'] == 'test']\n self.N_images_train = len(self.img_ids_train)\n self.N_images_test = len(self.img_ids_test)\n\n if self.split == 'train': # create buffer of all rays and rgb data\n if self.use_cache:\n all_rays = np.load(os.path.join(self.root_dir,\n f'cache/rays{self.img_downscale}.npy'))\n self.all_rays = torch.from_numpy(all_rays)\n all_rgbs = np.load(os.path.join(self.root_dir,\n f'cache/rgbs{self.img_downscale}.npy'))\n self.all_rgbs = torch.from_numpy(all_rgbs)\n else:\n self.all_rays = []\n self.all_rgbs = []\n for id_ in self.img_ids_train:\n c2w = torch.FloatTensor(self.poses_dict[id_])\n\n img = Image.open(os.path.join(self.root_dir, 'dense/images',\n self.image_paths[id_])).convert('RGB')\n img_w, img_h = img.size\n if self.img_downscale > 1:\n img_w = img_w // self.img_downscale\n img_h = img_h // self.img_downscale\n img = img.resize((img_w, img_h), Image.LANCZOS)\n img = self.transform(img) # (3, h, w)\n img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB\n self.all_rgbs += [img]\n\n directions = get_ray_directions(img_h, img_w, self.Ks[id_])\n rays_o, rays_d = get_rays(directions, c2w)\n rays_t = id_ * torch.ones(len(rays_o), 1)\n\n self.all_rays += [torch.cat([rays_o, rays_d,\n self.nears[id_] * torch.ones_like(rays_o[:, :1]),\n self.fars[id_] * torch.ones_like(rays_o[:, :1]),\n rays_t],\n 1)] # (h*w, 8)\n\n self.all_rays = torch.cat(self.all_rays, 0) # ((N_images-1)*h*w, 8)\n self.all_rgbs = torch.cat(self.all_rgbs, 0) # ((N_images-1)*h*w, 3)\n\n elif self.split in ['val', 'test_train']: # use the first image as val image (also in train)\n self.val_id = self.img_ids_train[0]\n\n else: # for testing, create a parametric rendering path\n # test poses and appearance index are defined in eval.py\n pass\n\n def define_transforms(self):\n self.transform = T.ToTensor()\n\n def __len__(self):\n if self.split == 'train':\n return len(self.all_rays)\n if self.split == 'test_train':\n return self.N_images_train\n if self.split == 'val':\n return self.val_num\n return len(self.poses_test)\n\n def __getitem__(self, idx):\n if self.split == 'train': # use data in the buffers\n sample = {'rays': self.all_rays[idx, :8],\n 'ts': self.all_rays[idx, 8].long(),\n 'rgbs': self.all_rgbs[idx]}\n\n elif self.split in ['val', 'test_train']:\n sample = {}\n if self.split == 'val':\n id_ = self.val_id\n else:\n id_ = self.img_ids_train[idx]\n sample['c2w'] = c2w = torch.FloatTensor(self.poses_dict[id_])\n\n img = Image.open(os.path.join(self.root_dir, 'dense/images',\n self.image_paths[id_])).convert('RGB')\n img_w, img_h = img.size\n if self.img_downscale > 1:\n img_w = img_w // self.img_downscale\n img_h = img_h // self.img_downscale\n img = img.resize((img_w, img_h), Image.LANCZOS)\n img = self.transform(img) # (3, h, w)\n img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB\n sample['rgbs'] = img\n\n directions = get_ray_directions(img_h, img_w, self.Ks[id_])\n rays_o, rays_d = get_rays(directions, c2w)\n rays = torch.cat([rays_o, rays_d,\n self.nears[id_] * torch.ones_like(rays_o[:, :1]),\n self.fars[id_] * torch.ones_like(rays_o[:, :1])],\n 1) # (h*w, 8)\n sample['rays'] = rays\n sample['ts'] = id_ * torch.ones(len(rays), dtype=torch.long)\n sample['img_wh'] = torch.LongTensor([img_w, img_h])\n\n else:\n sample = {}\n sample['c2w'] = c2w = torch.FloatTensor(self.poses_test[idx])\n directions = get_ray_directions(self.test_img_h, self.test_img_w, self.test_K)\n rays_o, rays_d = get_rays(directions, c2w)\n near, far = 0, 5\n rays = torch.cat([rays_o, rays_d,\n near * torch.ones_like(rays_o[:, :1]),\n far * torch.ones_like(rays_o[:, :1])],\n 1)\n sample['rays'] = rays\n sample['ts'] = self.test_appearance_idx * torch.ones(len(rays), dtype=torch.long)\n sample['img_wh'] = torch.LongTensor([self.test_img_w, self.test_img_h])\n\n return sample\n\n\nif __name__ == '__main__':\n data = PhototourismDataset('/home/zed/data/nerf/brandenburg_gate')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"torch.LongTensor",
"pandas.read_csv",
"torch.cat",
"numpy.linalg.inv",
"torch.from_numpy",
"numpy.stack",
"numpy.percentile",
"numpy.concatenate",
"torch.FloatTensor",
"numpy.array",
"numpy.zeros",
"torch.ones_like"
]
] |
artiste-qb-net/Quantum_Edward
|
[
"89d3a7d40177065eaa34fabd4b4c255b8ef51881"
] |
[
"Fitter.py"
] |
[
"import numpy as np\nimport numpy.random as npr\nimport scipy.stats as ss\nimport utilities as ut\nfrom TimeStep import *\nfrom Plotter import *\n\n\nclass Fitter:\n \"\"\"\n Read docstrings for Model class first.\n\n The goal of this class is to implement the BBVI(see ref below) for a \n Model object 'model' to estimate those values for the hidden variables\n list1_angs which best fit the training data y_nsam_nb, x_nsam_na.\n \n In BBVI, one maximizes ELBO with respect to a parameter lambda. In this\n case, lambda = list1_conc0, list1_conc1 and z = list1_z =\n list1_angs/dpi. The angles in list1_angs are in the interval [0, dpi] so\n the entries list1_z are in the interval [0, 1].\n \n References\n ---------- \n R. Ranganath, S. Gerrish, D. M. Blei, \"Black Box Variational\n Inference\", https://arxiv.org/abs/1401.0118\n\n \"\"\"\n\n def __init__(self, model, y_nsam_nb, x_nsam_na, nsamgrad,\n nt, eta, t_step_meth):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n model : Model\n y_nsam_nb : np.array \n An array of zeros and ones with shape=(nsam, nb) containing nsam \n samples of y output. \n x_nsam_na : np.array\n An array of zeros and ones with shape=(nsam, na) containing nsam \n samples of x input. \n nsamgrad : int\n Number of samples used during averaging of the gradient of ELBO\n nt : int\n Number of time steps (aka iterations). Value of ELBO changes (\n increases or stays the same) with each iteration.\n eta : float\n positive scaling parameter (proportionality factor) for delta\n lambda. Passed to TimeStep class\n t_step_meth : str\n str labelling the method used to calculate delta lambda. This\n str is passed to TimeStep class.\n\n Returns\n -------\n None\n\n \"\"\"\n self.mod = model\n self.y_nsam_nb = y_nsam_nb\n self.x_nsam_na = x_nsam_na\n self.nsamgrad = nsamgrad\n self.nt = nt\n self.eta = eta\n self.t_step_meth = t_step_meth\n\n assert self.mod.na == x_nsam_na.shape[1]\n assert self.mod.nb == y_nsam_nb.shape[1]\n assert self.y_nsam_nb.shape[0] == self.x_nsam_na.shape[0]\n\n # the following will be filled by do_fit()\n self.fin_t = None\n self.fin_list1_conc0 = None\n self.fin_list1_conc1 = None\n\n len1 = self.mod.len1\n self.conc_nt_2_len1 = np.zeros((nt, 2, len1), dtype=float)\n self.delta_conc_nt_2_len1 = np.zeros((nt, 2, len1), dtype=float)\n self.elbo_nt_len1 = np.zeros((nt, len1), dtype=float)\n\n def get_delbo_and_grad_delbo(self, list1_z, list1_conc0, list1_conc1):\n \"\"\"\n delbo = density of elbo. grad = gradient. This is a private \n auxiliary function used by do_fit(). Inside the method do_fit(), \n we calculate elbo from delbo by taking expected value of delbo over \n z~ q(z | lambda) \n\n\n Parameters\n ----------\n list1_z : list[np.array]\n list1_conc0 : list[np.array]\n list1_conc1 : list[np.array]\n\n Returns\n -------\n tuple[list[np.array], list[np.array], list[np.array]]\n\n \"\"\"\n nsam = self.y_nsam_nb.shape[0]\n len1 = self.mod.len1\n\n # grad0,1 log q(z| lambda=conc0, conc1)\n xx = [ut.grad_log_beta_prob(list1_z[k],\n list1_conc0[k],\n list1_conc1[k])\n for k in range(len1)]\n # zip doesn't work\n # list1_g0, list1_g1 = zip(xx)\n\n def my_zip(a):\n return [[a[j][k] for j in range(len(a))]\n for k in range(len(a[0]))]\n\n # print('---------xx')\n # for j in range(2):\n # print(j, xx[j])\n # print('---------zip(zz)')\n # for j in range(2):\n # tempo = list(zip(xx))\n # print(j, tempo[j])\n # print('---------my_zip(zz)')\n # for j in range(2):\n # tempo = my_zip(xx)\n # print(j, tempo[j])\n\n list1_g0, list1_g1 = my_zip(xx)\n\n # sum_sam (log p(y| x, z = angs/dpi))\n x_nsam = ut.bin_vec_to_dec(self.x_nsam_na, nsam=nsam)\n y_nsam = ut.bin_vec_to_dec(self.y_nsam_nb, nsam=nsam)\n list1_angs = [list1_z[k]*ut.dpi for k in range(len1)]\n # log_py is a constant with shape 1\n log_py = np.sum(np.log(1e-8 + np.array(\n [self.mod.prob_y_given_x_and_angs_prior(y_nsam[sam],\n x_nsam[sam], list1_angs) for sam in range(nsam)]\n )))\n\n # log_px is a constant with shape 1\n log_px = np.sum(np.log(1e-8 + np.array(\n [self.mod.prob_x(x_nsam[sam], list1_angs) for sam in range(nsam)]\n )))\n\n # log p(z)\n list1_log_pz = [ut.log_beta_prob(list1_z[k],\n self.mod.list1_conc0_prior[k],\n self.mod.list1_conc1_prior[k])\n for k in range(len1)]\n\n # log q(z| lambda)\n list1_log_qz = [ut.log_beta_prob(list1_z[k],\n list1_conc0[k],\n list1_conc1[k])\n for k in range(len1)]\n\n # log p(y, x, z) - log q(z | lambda)\n list1_delbo = [log_py + log_px + list1_log_pz[k] - list1_log_qz[k]\n for k in range(len1)]\n # print(\"//\", len1, \"log_py=\", log_py, list1_delbo)\n\n list1_grad0_delbo = [np.multiply(list1_g0[k], list1_delbo[k])\n for k in range(len1)]\n\n list1_grad1_delbo = [np.multiply(list1_g1[k], list1_delbo[k])\n for k in range(len1)]\n\n return list1_delbo, list1_grad0_delbo, list1_grad1_delbo\n\n def do_fit(self):\n \"\"\"\n This function attempts to maximize ELBO over lambda. Does at most nt\n iterations (i.e., lambda changes, time steps). But may reach a\n convergence condition before doing nt iterations. Final iteration\n time is stored in self.fin_t.\n \n This function stores final values for time and lambda (lambda =\n concentrations 0, 1)\n \n self.fin_t\n self.fin_list1_conc0\n self.fin_list1_conc1\n\n It also stores traces (time series) for lambda (lambda =\n concentrations 0, 1), delta lambda between consecutive steps,\n and the ELBO value:\n \n self.conc_nt_2_len1\n self.delta_conc_nt_2_len1\n self.elbo_nt_len1\n\n Returns\n -------\n None\n\n \"\"\"\n len1 = self.mod.len1\n\n # starting values\n shapes = self.mod.shapes1\n list1_conc0 = ut.new_uniform_array_list(1., shapes)\n list1_conc1 = ut.new_uniform_array_list(1., shapes)\n step = TimeStep(self.t_step_meth, self.eta, self.mod.len1)\n\n for t in range(self.nt):\n\n list1_elbo = ut.new_uniform_array_list(0., shapes)\n list1_grad0_elbo = ut.new_uniform_array_list(0., shapes)\n list1_grad1_elbo = ut.new_uniform_array_list(0., shapes)\n \n for s in range(self.nsamgrad):\n list1_z = [ss.beta.rvs(list1_conc0[k], list1_conc1[k])\n for k in range(len1)]\n x0, x1, x2 =\\\n self.get_delbo_and_grad_delbo(list1_z,\n list1_conc0,\n list1_conc1)\n for k in range(len1):\n list1_elbo[k] += x0[k]/self.nsamgrad\n list1_grad0_elbo[k] += x1[k]/self.nsamgrad\n list1_grad1_elbo[k] += x2[k]/self.nsamgrad\n\n g0 = list1_grad0_elbo\n g1 = list1_grad1_elbo\n for k in range(len1):\n delta_conc = step.get_delta_conc(g0[k], g1[k], t, k)\n\n old_conc0 = np.copy(list1_conc0[k])\n list1_conc0[k] += delta_conc[0]\n list1_conc0[k] = np.clip(list1_conc0[k], 1e-5, 15)\n true_delta_conc0 = list1_conc0[k] - old_conc0\n\n old_conc1 = np.copy(list1_conc1[k])\n list1_conc1[k] += delta_conc[1]\n list1_conc1[k] = np.clip(list1_conc1[k], 1e-5, 15)\n true_delta_conc1 = list1_conc1[k] - old_conc1\n\n self.conc_nt_2_len1[t, 0, k] = np.sum(list1_conc0[k])\n self.conc_nt_2_len1[t, 1, k] = np.sum(list1_conc1[k])\n self.delta_conc_nt_2_len1[t, 0, k] = np.sum(true_delta_conc0)\n self.delta_conc_nt_2_len1[t, 1, k] = np.sum(true_delta_conc1)\n\n self.elbo_nt_len1[t, :] = \\\n ut.av_each_elem_in_array_list(list1_elbo)\n if np.all(self.delta_conc_nt_2_len1[t, :, :] < 0.001):\n break\n\n self.fin_t = t\n self.fin_list1_conc0 = list1_conc0\n self.fin_list1_conc1 = list1_conc1\n \n def print_fit_values_at_fin_t(self):\n \"\"\"\n Prints to screen summary of values at final time fin_t of do_fit()\n run.\n\n Recall z = ang/dpi with ang in interval [0, dpi] so z in interval [\n 0, 1].This function calculates for each z, its estimate, the std of\n that estimate, and the fractional error (z_estimate -\n z_prior)/z_prior. z_prior = angs_prior/dpi.\n\n angs_prior are the prior angles assumed for the model. If we use\n training data generated by Model:get_toy_data(), angs_prior are true\n values, the ones used to generate the synthetic data.\n\n Returns\n -------\n None\n\n \"\"\"\n len1 = self.mod.len1\n list1_conc0 = self.fin_list1_conc0\n list1_conc1 = self.fin_list1_conc1\n\n list1_zpred = [ss.beta.mean(list1_conc0[k], list1_conc1[k])\n for k in range(len1)]\n list1_std_zpred = [ss.beta.std(list1_conc0[k], list1_conc1[k])\n for k in range(len1)]\n\n print('fin_t=', self.fin_t, \"\\n\")\n for k in range(len1):\n print(\"list1_z[\" + str(k) + \"]:\")\n print(\"estimate:\\n\" + str(list1_zpred[k]))\n print(\"st.dev.:\\n\" + str(list1_std_zpred[k]))\n zprior = self.mod.list1_angs_prior[k]/ut.dpi\n print(\"frac. error = (est-prior)/prior:\\n\" +\n str((list1_zpred[k] - zprior)/zprior) + \"\\n\")\n\n def plot_fit_traces(self):\n \"\"\"\n Calls Plotter to plot traces (time series) collected during do_fit() \n run. Plots time series of lambda, delta lambda and ELBO. \n\n Returns\n -------\n None\n\n \"\"\"\n Plotter.plot_conc_traces(self.fin_t,\n self.conc_nt_2_len1,\n self.delta_conc_nt_2_len1)\n Plotter.plot_elbo_traces(self.fin_t,\n self.elbo_nt_len1)\n\n\nif __name__ == \"__main__\":\n from NbTrolsModel import *\n from NoNbTrolsModel import *\n\n def main():\n\n # Ridiculously small numbers,\n # just to make sure it runs without crashing\n npr.seed(1234)\n na = 2 # number of alpha qubits\n nb = 2 # number of beta qubits\n mod = NbTrolsModel(nb, na)\n # mod = NoNbTrolsModel(nb, na)\n\n nsam = 20 # number of samples\n y_nsam_nb, x_nsam_na = mod.gen_toy_data(nsam)\n\n nsamgrad = 10 # number of samples for grad estimate\n nt = 20 # number of interations\n\n # t_step_type, eta = naive', .0003 # very sensitive to eta\n # t_step_type, eta = 'naive_t', .0003 # very sensitive to eta\n # t_step_type, eta = 'mag1_grad', .2\n t_step_meth, eta = 'ada_grad', .1\n\n ff = Fitter(mod, y_nsam_nb, x_nsam_na,\n nsamgrad, nt, eta, t_step_meth)\n ff.do_fit()\n ff.print_fit_values_at_fin_t()\n ff.plot_fit_traces()\n\n main()\n"
] |
[
[
"numpy.multiply",
"numpy.random.seed",
"numpy.clip",
"scipy.stats.beta.rvs",
"scipy.stats.beta.std",
"scipy.stats.beta.mean",
"numpy.all",
"numpy.copy",
"numpy.zeros",
"numpy.sum"
]
] |
jiunting/PhaseLink
|
[
"994e85ff7162869dd2a54c26eeb307e72ed1972e"
] |
[
"phaselink_plot.py"
] |
[
"#!/home/zross/bin/python \n\nimport numpy as np\nimport sys\nimport glob\nimport obspy\nimport pylab as plt\nimport json\nimport random\n\nclass Arrival():\n def __init__(self, net=None, sta=None, time=None, phase=None,\n dist=None, resid=None):\n self.net = net\n self.sta = sta\n self.time = time\n self.phase = phase\n self.dist = dist\n self.resid = resid\n\nclass Event():\n def __init__(self, arrivals = None):\n if arrivals is not None:\n self.arrivals = arrivals\n else:\n self.arrivals = []\n\ndef get_unassociated_trigs(origin_time, triggers, trig_meta):\n t_start = origin_time - obspy.UTCDateTime(0) - 60.0\n t_stop = t_start + 120.\n idx = np.where((triggers >= t_start) & (triggers < t_stop))[0]\n trigs = {}\n for x in idx:\n if trig_meta[x][1] not in trigs:\n trigs[trig_meta[x][1]] = []\n trigs[trig_meta[x][1]].append((trig_meta[x][3], trig_meta[x][4]))\n return trigs\n\ndef plot_seismicity(catalog, params):\n import pandas as pd\n\n print('Reading fault file in GMT format, please wait...')\n\n # list to store fault segments\n faults = []\n\n # preallocate to track fault pts within segment\n maxpts = 1600000 # based on number of lines in file\n flats = np.zeros(maxpts)\n flons = np.zeros(maxpts)\n fsegs = np.zeros(maxpts,dtype='int')\n nn = -1\n nseg=-1\n\n # loop over lines\n with open(params['fault_file']) as f:\n for line in f:\n\n # header line that gives number of points in segment\n if line.startswith('Pline'):\n nseg+=1\n\n # fault point line\n elif line.startswith('-1'):\n nn+=1\n lineS = line.split()\n flons[nn]=float(lineS[0])\n flats[nn]=float(lineS[1])\n fsegs[nn]=nseg\n\n # covert to dataframe\n fault_df = pd.DataFrame()\n fault_df['flon']=flons[:nn+1]\n fault_df['flat']=flats[:nn+1]\n fault_df['fseg']=fsegs[:nn+1]\n print('Done, {:} faults read'.format(nseg+1))\n\n from mpl_toolkits.basemap import Basemap, shiftgrid, cm\n fig = plt.figure(figsize=(10,10))\n ax = plt.gca()\n lat0, lat1 = params['lat_min'], params['lat_max']\n clat = (lat0+lat1)/2.\n lon0, lon1 = params['lon_min'], params['lon_max']\n clon = (lon0+lon1)/2.\n\n proj = 'merc'\n epsg = 4269\n m = Basemap(llcrnrlon=lon0,llcrnrlat=lat0,urcrnrlon=lon1,urcrnrlat=lat1,\n resolution='h',projection=proj,lat_0=clat,lon_0=clon, ax=ax,\n epsg=epsg)\n m.drawcoastlines()\n m.fillcontinents(color='white', lake_color='paleturquoise')\n m.drawparallels(np.arange(32, 38, 1.), labels=[1,0,0,1])\n m.drawmeridians(np.arange(-120, -114, 1.), labels=[1,0,0,1])\n m.drawmapboundary(fill_color='paleturquoise')\n\n\n xpixels = 5000\n service = 'World_Shaded_Relief'\n #m.arcgisimage(service=service, xpixels = xpixels, verbose= False)\n\n # plot faults\n ifaults = (fault_df.flat >= lat0)&(fault_df.flat <= lat1) & (\n fault_df.flon >= lon0)&(fault_df.flon <= lon1)\n for g, v in fault_df[ifaults].groupby('fseg'):\n m.plot(v.flon.values,v.flat.values,'-k',lw=1.0,latlon=True)\n\n lon = []\n lat = []\n for event in cat:\n lon.append(event.origins[0].longitude)\n lat.append(event.origins[0].latitude)\n #with open(\"datasets/cahuilla_sum.nll\", 'r') as f:\n # for line in f:\n # temp = line.split()\n # lon.append(float(temp[11]))\n # lat.append(float(temp[9]))\n\n m.scatter(lon, lat, 0.5, marker='o', color='r', latlon=True, zorder=10)\n stla = []\n stlo = []\n with open(params[\"station_file\"], 'r') as f:\n for line in f:\n temp = line.split()\n stla.append(float(temp[2]))\n stlo.append(float(temp[3]))\n m.scatter(stlo, stla, 50, marker='^', color='blue', latlon=True, zorder=10)\n plt.tight_layout()\n plt.savefig(\"detection_map.png\", dpi=320)\n plt.show()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"phaselink_plot.py control_file.json\")\n sys.exit()\n\n\n with open(sys.argv[1], \"r\") as f:\n params = json.load(f)\n\n triggers = []\n trig_meta = []\n if params['plot_unassociated']:\n print(\"Reading unassociated triggers...\")\n with open(params['gpd_file'], 'r') as f:\n for line in f:\n net, sta, phase, time, prob, dur = line.split()\n if float(prob) < params['pr_min'] or \\\n float(dur) < params['trig_dur_min']:\n continue\n trig_type = 0\n else:\n trig_type = 1\n triggers.append(obspy.UTCDateTime(time) - obspy.UTCDateTime(0))\n trig_meta.append((net, sta, phase, obspy.UTCDateTime(time),\n trig_type))\n idx = np.argsort(triggers)\n triggers = np.array([triggers[x] for x in idx])\n trig_meta = [trig_meta[x] for x in idx]\n\n print(\"Now building catalog\")\n\n #nll_summary_file = \"%s/%s\" % \\\n # (params['nlloc_loc_path'], params['nlloc_sum_file'])\n #cat = obspy.io.nlloc.core.read_nlloc_hyp(nll_summary_file)\n nll_files = glob.glob(\"%s/*.*.*.*.*.hyp\" % params['nlloc_loc_path'])\n cat = obspy.Catalog()\n for fname in nll_files:\n try:\n cat += obspy.read_events(fname)\n except:\n continue\n random.shuffle(nll_files)\n\n for event in cat:\n print(event.preferred_origin().time)\n print(cat)\n print()\n\n if params['plot_seismicity']:\n plot_seismicity(cat, params)\n\n\n for fname in nll_files:\n cat = obspy.read_events(fname)\n event = cat[0]\n origin = event.preferred_origin()\n origin_time = origin.time\n print(event)\n print(origin)\n\n if params['plot_unassociated']:\n trigs = get_unassociated_trigs(origin_time, triggers, trig_meta)\n\n # Build id_map for join between arrivals and picks\n picks = {}\n sta_order = []\n dist_count = 0\n for arrival in origin.arrivals:\n pick = arrival.pick_id.get_referred_object()\n sta = pick.waveform_id.station_code\n phase = arrival.phase\n time = pick.time\n #if arrival.distance <= params['dist_cutoff_radius']:\n # dist_count += 1\n if abs(arrival.time_residual) > params['max_t_resid']:\n flag = 1\n else:\n flag = 0\n if sta not in picks:\n picks[sta] = [(time, phase, flag)]\n sta_order.append(sta)\n else:\n picks[sta].append((time, phase, flag))\n\n #if dist_count < params['dist_cutoff_n_min']:\n # print(\"Skipping event, only %d phases within radius %.2f\" % \\\n # (dist_count, params['dist_cutoff_radius']))\n # continue\n\n # Plot results\n fig, ax = plt.subplots(1,1,figsize=(30,30))\n colors = {0: 'lime', 1: 'yellow'}\n count = 0\n for sta in sta_order:\n\n st = obspy.read(\"%s/%04d/%03d/*.%s.*\" % \\\n (params['wf_path'], origin_time.year, origin_time.julday, sta),\n starttime=origin_time-60, endtime=origin_time+60)\n st.detrend()\n st.filter(type='bandpass', freqmin=3.0, freqmax=20)\n for tr in st:\n ax.plot(np.arange(tr.data.size)*tr.stats.delta, \\\n tr.data/np.max(tr.data) + count, c='k', lw=1)\n ax.text(125, count, sta)\n if params['plot_unassociated']:\n if sta in trigs:\n for pick, t_type in trigs[sta]:\n #tr_slice = tr.slice(starttime=pick,\n # endtime=pick+1.0)\n #ax.plot(np.arange(tr_slice.data.size) \\\n # * tr.stats.delta + (pick - origin_time) + 60.,\n # tr_slice.data/np.max(tr.data) + count,\n # c=colors[t_type], lw=1)\n ax.plot(pick-tr.stats.starttime, 0,\n marker=\"|\", c=colors[t_type])\n\n for pick, phase, flag in picks[sta]:\n if phase == 'P':\n color = 'r'\n else:\n color = 'b'\n #if flag:\n # color = 'limegreen'\n ax.plot([pick-tr.stats.starttime, pick-tr.stats.starttime], [count-0.75, count+0.75], c=color)\n count += 1\n plt.show()\n print()\n"
] |
[
[
"numpy.arange",
"pandas.DataFrame",
"numpy.max",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
cds-mipt/yolact
|
[
"d226620495f16314ff8f5dda57bca18de54e004b"
] |
[
"train.py"
] |
[
"from data import *\nfrom utils.augmentations import SSDAugmentation, BaseTransform\nfrom utils.functions import MovingAverage, SavePath\nfrom utils.logger import Log\nfrom utils import timer\nfrom layers.modules import MultiBoxLoss\nfrom yolact import Yolact\nimport os\nimport sys\nimport time\nimport math, random\nfrom pathlib import Path\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.utils.data as data\nimport numpy as np\nimport argparse\nimport datetime\n\n# Oof\nimport eval as eval_script\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Yolact Training Script')\nparser.add_argument('--batch_size', default=8, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str,\n help='Checkpoint state_dict file to resume training from. If this is \"interrupt\"'\\\n ', the model will resume training from the interrupt file.')\nparser.add_argument('--start_iter', default=-1, type=int,\n help='Resume training at this iter. If this is -1, the iteration will be'\\\n 'determined from the file name.')\nparser.add_argument('--num_workers', default=4, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning_rate', default=None, type=float,\n help='Initial learning rate. Leave as None to read this from the config.')\nparser.add_argument('--momentum', default=None, type=float,\n help='Momentum for SGD. Leave as None to read this from the config.')\nparser.add_argument('--decay', '--weight_decay', default=None, type=float,\n help='Weight decay for SGD. Leave as None to read this from the config.')\nparser.add_argument('--gamma', default=None, type=float,\n help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models.')\nparser.add_argument('--log_folder', default='logs/',\n help='Directory for saving logs.')\nparser.add_argument('--config', default=None,\n help='The config object to use.')\nparser.add_argument('--save_interval', default=50,type=int,\n help='The number of iterations between saving the model.')\nparser.add_argument('--validation_size', default=10000,type=int,\n help='The number of images to use for validation.')\nparser.add_argument('--validation_epochs', default=4, type=int,\n help='Output validation information every n iterations. If -1, do no validation.')\nparser.add_argument('--keep_latest', dest='keep_latest', action='store_true',\n help='Only keep the latest checkpoint instead of each one.')\nparser.add_argument('--keep_latest_interval', default=100000, type=int,\n help='When --keep_latest is on, don\\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')\nparser.add_argument('--dataset', default=None, type=str,\n help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')\nparser.add_argument('--no_log', dest='log', action='store_false',\n help='Don\\'t log per iteration information into log_folder.')\nparser.add_argument('--log_gpu', dest='log_gpu', action='store_true',\n help='Include GPU information in the logs. Nvidia-smi tends to be slow, so set this with caution.')\nparser.add_argument('--no_interrupt', dest='interrupt', action='store_false',\n help='Don\\'t save an interrupt when KeyboardInterrupt is caught.')\nparser.add_argument('--batch_alloc', default=None, type=str,\n help='If using multiple GPUS, you can set this to be a comma separated list detailing which GPUs should get what local batch size (It should add up to your total batch size).')\nparser.add_argument('--no_autoscale', dest='autoscale', action='store_false',\n help='YOLACT will automatically scale the lr and the number of iterations depending on the batch size. Set this if you want to disable that.')\n\nparser.set_defaults(keep_latest=False, log=True, log_gpu=False, interrupt=True, autoscale=True)\nargs = parser.parse_args()\n\nif args.config is not None:\n set_cfg(args.config)\n\nif args.dataset is not None:\n set_dataset(args.dataset)\n\nif args.autoscale and args.batch_size != 8:\n factor = args.batch_size / 8\n print('Scaling parameters by %.2f to account for a batch size of %d.' % (factor, args.batch_size))\n\n cfg.lr *= factor\n cfg.max_iter //= factor\n cfg.lr_steps = [x // factor for x in cfg.lr_steps]\n\n# Update training parameters from the config if necessary\ndef replace(name):\n if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))\nreplace('lr')\nreplace('decay')\nreplace('gamma')\nreplace('momentum')\n\n# This is managed by set_lr\ncur_lr = args.lr\n\nif torch.cuda.device_count() == 0:\n print('No GPUs detected. Exiting...')\n exit(-1)\n\nif args.batch_size // torch.cuda.device_count() < 6:\n print('Per-GPU batch size is less than the recommended limit for batch norm. Disabling batch norm.')\n cfg.freeze_bn = True\n\nloss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S']\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nclass NetLoss(nn.Module):\n \"\"\"\n A wrapper for running the network and computing the loss\n This is so we can more efficiently use DataParallel.\n \"\"\"\n \n def __init__(self, net:Yolact, criterion:MultiBoxLoss):\n super().__init__()\n\n self.net = net\n self.criterion = criterion\n \n def forward(self, images, targets, masks, num_crowds):\n preds = self.net(images)\n return self.criterion(preds, targets, masks, num_crowds)\n\nclass CustomDataParallel(nn.DataParallel):\n \"\"\"\n This is a custom version of DataParallel that works better with our training data.\n It should also be faster than the general case.\n \"\"\"\n\n def scatter(self, inputs, kwargs, device_ids):\n # More like scatter and data prep at the same time. The point is we prep the data in such a way\n # that no scatter is necessary, and there's no need to shuffle stuff around different GPUs.\n devices = ['cuda:' + str(x) for x in device_ids]\n splits = prepare_data(inputs[0], devices, allocation=args.batch_alloc)\n\n return [[split[device_idx] for split in splits] for device_idx in range(len(devices))], \\\n [kwargs] * len(devices)\n\n def gather(self, outputs, output_device):\n out = {}\n\n for k in outputs[0]:\n out[k] = torch.stack([output[k].to(output_device) for output in outputs])\n \n return out\n\ndef train():\n if not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n dataset = COCODetection(image_path=cfg.dataset.train_images,\n info_file=cfg.dataset.train_info,\n transform=SSDAugmentation(MEANS))\n \n if args.validation_epochs > 0:\n setup_eval()\n val_dataset = COCODetection(image_path=cfg.dataset.valid_images,\n info_file=cfg.dataset.valid_info,\n transform=BaseTransform(MEANS))\n\n filename_log = args.log_folder+cfg.name+'.log' \n AP_best = 0\n # Parallel wraps the underlying module, but when saving and loading we don't want that\n yolact_net = Yolact()\n net = yolact_net\n net.train()\n if args.log:\n log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()),\n overwrite=(args.resume is None), log_gpu_stats=args.log_gpu)\n\n # I don't use the timer during training (I use a different timing method).\n # Apparently there's a race condition with multiple GPUs, so disable it just to be safe.\n timer.disable_all()\n\n # Both of these can set args.resume to None, so do them before the check \n if args.resume == 'interrupt':\n args.resume = SavePath.get_interrupt(args.save_folder)\n elif args.resume == 'latest':\n args.resume = SavePath.get_latest(args.save_folder, cfg.name)\n\n if args.resume is not None:\n print('Resuming training, loading {}...'.format(args.resume))\n yolact_net.load_weights(args.resume)\n\n if args.start_iter == -1:\n args.start_iter = SavePath.from_str(args.resume).iteration\n else:\n print('Initializing weights...')\n yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path)\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.decay)\n criterion = MultiBoxLoss(num_classes=cfg.num_classes,\n pos_threshold=cfg.positive_iou_threshold,\n neg_threshold=cfg.negative_iou_threshold,\n negpos_ratio=cfg.ohem_negpos_ratio)\n\n if args.batch_alloc is not None:\n args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')]\n if sum(args.batch_alloc) != args.batch_size:\n print('Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size))\n exit(-1)\n\n net = CustomDataParallel(NetLoss(net, criterion))\n if args.cuda:\n net = net.cuda()\n \n # Initialize everything\n if not cfg.freeze_bn: yolact_net.freeze_bn() # Freeze bn so we don't kill our means\n yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda())\n if not cfg.freeze_bn: yolact_net.freeze_bn(True)\n\n # loss counters\n loc_loss = 0\n conf_loss = 0\n iteration = max(args.start_iter, 0)\n last_time = time.time()\n\n epoch_size = len(dataset) // args.batch_size\n num_epochs = math.ceil(cfg.max_iter / epoch_size)\n \n best_AP = 0\n \n # Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index\n step_index = 0\n\n data_loader = data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n \n \n save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)\n time_avg = MovingAverage()\n\n global loss_types # Forms the print order\n loss_avgs = { k: MovingAverage(100) for k in loss_types }\n\n print('Begin training!')\n print()\n # try-except so you can use ctrl+c to save early and stop training\n try:\n for epoch in range(num_epochs):\n # Resume from start_iter\n compute_validation_map(epoch, iteration, yolact_net, val_dataset, None)\n if (epoch+1)*epoch_size < iteration:\n continue\n \n for datum in data_loader:\n # Stop if we've reached an epoch if we're resuming from start_iter\n if iteration == (epoch+1)*epoch_size:\n break\n\n # Stop at the configured number of iterations even if mid-epoch\n if iteration == cfg.max_iter:\n break\n\n # Change a config setting if we've reached the specified iteration\n changed = False\n for change in cfg.delayed_settings:\n if iteration >= change[0]:\n changed = True\n cfg.replace(change[1])\n\n # Reset the loss averages because things might have changed\n for avg in loss_avgs:\n avg.reset()\n \n # If a config setting was changed, remove it from the list so we don't keep checking\n if changed:\n cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]\n\n # Warm up by linearly interpolating the learning rate from some smaller value\n if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:\n set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)\n\n # Adjust the learning rate at the given iterations, but also if we resume from past that iteration\n while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:\n step_index += 1\n set_lr(optimizer, args.lr * (args.gamma ** step_index))\n \n # Zero the grad to get ready to compute gradients\n optimizer.zero_grad()\n\n # Forward Pass + Compute loss at the same time (see CustomDataParallel and NetLoss)\n losses = net(datum)\n losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel\n loss = sum([losses[k] for k in losses])\n # no_inf_mean removes some components from the loss, so make sure to backward through all of it\n # all_loss = sum([v.mean() for v in losses.values()])\n\n # Backprop\n loss.backward() # Do this to free up vram even if loss is not finite\n if torch.isfinite(loss).item():\n optimizer.step()\n \n # Add the loss to the moving average for bookkeeping\n for k in losses:\n loss_avgs[k].add(losses[k].item())\n\n cur_time = time.time()\n elapsed = cur_time - last_time\n last_time = cur_time\n\n # Exclude graph setup from the timing information\n if iteration != args.start_iter:\n time_avg.add(elapsed)\n\n if iteration % 10 == 0:\n eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]\n \n total = sum([loss_avgs[k].get_avg() for k in losses])\n loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])\n \n print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')\n % tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)\n\n if args.log:\n precision = 5\n loss_info = {k: round(losses[k].item(), precision) for k in losses}\n loss_info['T'] = sum([round(losses[k].item(), precision) for k in losses])\n\n if args.log_gpu:\n log.log_gpu_stats = (iteration % 10 == 0) # nvidia-smi is sloooow\n \n log.log('train', loss=loss_info, epoch=epoch, iter=iteration,\n lr=round(cur_lr, 10), elapsed=elapsed)\n\n log.log_gpu_stats = args.log_gpu\n \n iteration += 1\n# if iteration % args.save_interval == 0 and iteration != args.start_iter:\n# if args.keep_latest:\n# latest = SavePath.get_latest(args.save_folder, cfg.name)\n\n# print('Saving state, iter:', iteration)\n# yolact_net.save_weights(save_path(epoch, iteration))\n\n# if args.keep_latest and latest is not None:\n# if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:\n# print('Deleting old save...')\n# os.remove(latest)\n # This is done per epoch\n if args.validation_epochs > 0:\n if epoch % args.validation_epochs == 0 and epoch > 0:\n compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)\n with open(filename_log) as f:\n f = f.readlines()\n record = f[-1]\n record = record.replace('true','True')\n record = record.replace('null','None')\n record = record.replace('NaN','None')\n record = record.replace('false','False')\n record = record.replace('Infinity', 'np.inf')\n record_dict = eval(record)\n AP = record_dict['data']['box']['50']\n if AP_best < AP:\n AP_best = AP\n if args.keep_latest:\n latest = SavePath.get_latest(args.save_folder, cfg.name)\n print('Saving state, iter:', iteration)\n yolact_net.save_weights(save_path(epoch, iteration))\n if args.keep_latest and latest is not None:\n if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:\n print('Deleting old save...')\n os.remove(latest)\n \n # Compute validation mAP after training is finished\n compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)\n except KeyboardInterrupt:\n if args.interrupt:\n print('Stopping early. Saving network...')\n \n # Delete previous copy of the interrupted network so we don't spam the weights folder\n SavePath.remove_interrupt(args.save_folder)\n \n yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))\n exit()\n\n yolact_net.save_weights(save_path(epoch, iteration))\n\n\ndef set_lr(optimizer, new_lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n \n global cur_lr\n cur_lr = new_lr\n\ndef gradinator(x):\n x.requires_grad = False\n return x\n\ndef prepare_data(datum, devices:list=None, allocation:list=None):\n with torch.no_grad():\n if devices is None:\n devices = ['cuda:0'] if args.cuda else ['cpu']\n if allocation is None:\n allocation = [args.batch_size // len(devices)] * (len(devices) - 1)\n allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less\n \n images, (targets, masks, num_crowds) = datum\n\n cur_idx = 0\n for device, alloc in zip(devices, allocation):\n for _ in range(alloc):\n images[cur_idx] = gradinator(images[cur_idx].to(device))\n targets[cur_idx] = gradinator(targets[cur_idx].to(device))\n masks[cur_idx] = gradinator(masks[cur_idx].to(device))\n cur_idx += 1\n\n if cfg.preserve_aspect_ratio:\n # Choose a random size from the batch\n _, h, w = images[random.randint(0, len(images)-1)].size()\n\n for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):\n images[idx], targets[idx], masks[idx], num_crowds[idx] \\\n = enforce_size(image, target, mask, num_crowd, w, h)\n \n cur_idx = 0\n split_images, split_targets, split_masks, split_numcrowds \\\n = [[None for alloc in allocation] for _ in range(4)]\n\n for device_idx, alloc in enumerate(allocation):\n split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)\n split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]\n split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]\n split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]\n\n cur_idx += alloc\n\n return split_images, split_targets, split_masks, split_numcrowds\n\ndef no_inf_mean(x:torch.Tensor):\n \"\"\"\n Computes the mean of a vector, throwing out all inf values.\n If there are no non-inf values, this will return inf (i.e., just the normal mean).\n \"\"\"\n\n no_inf = [a for a in x if torch.isfinite(a)]\n\n if len(no_inf) > 0:\n return sum(no_inf) / len(no_inf)\n else:\n return x.mean()\n\ndef compute_validation_loss(net, data_loader, criterion):\n global loss_types\n\n with torch.no_grad():\n losses = {}\n \n # Don't switch to eval mode because we want to get losses\n iterations = 0\n for datum in data_loader:\n images, targets, masks, num_crowds = prepare_data(datum)\n out = net(images)\n\n wrapper = ScatterWrapper(targets, masks, num_crowds)\n _losses = criterion(out, wrapper, wrapper.make_mask())\n \n for k, v in _losses.items():\n v = v.mean().item()\n if k in losses:\n losses[k] += v\n else:\n losses[k] = v\n\n iterations += 1\n if args.validation_size <= iterations * args.batch_size:\n break\n \n for k in losses:\n losses[k] /= iterations\n \n \n loss_labels = sum([[k, losses[k]] for k in loss_types if k in losses], [])\n print(('Validation ||' + (' %s: %.3f |' * len(losses)) + ')') % tuple(loss_labels), flush=True)\n\ndef compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):\n with torch.no_grad():\n yolact_net.eval()\n \n start = time.time()\n print()\n print(\"Computing validation mAP (this may take a while)...\", flush=True)\n val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)\n end = time.time()\n\n if log is not None:\n log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)\n \n yolact_net.train()\n return 1\n\n \ndef setup_eval():\n eval_script.parse_args(['--no_bar', '--max_images='+str(args.validation_size)])\n\nif __name__ == '__main__':\n train()\n"
] |
[
[
"torch.set_default_tensor_type",
"torch.zeros",
"torch.utils.data.DataLoader",
"torch.isfinite",
"torch.no_grad",
"torch.cuda.is_available",
"torch.stack",
"torch.cuda.device_count"
]
] |
ecdedios/ddfloww-site
|
[
"bd582bc34a6248338abac8c2fe9c22bfbf69d79f"
] |
[
"predict2.py"
] |
[
"import pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom collections import OrderedDict\nfrom sklearn.metrics import accuracy_score\n\ndf = pd.read_csv('phase2_df.csv')\n\nx_columns = ['slap',\n 'threat_object',\n 'beaten',\n 'limit_family_contact',\n 'kick_punch',\n 'threat_hit',\n 'push_shove',\n 'jealous',\n 'life_danger'\n ]\n\nX = df[x_columns]\ny = df[['reassault']]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .15, random_state = 0, stratify=y)\n\nclf = LogisticRegressionCV(cv=5,\n random_state=0,\n solver='liblinear'\n ).fit(X_train, y_train)\n\n\nprint('Accuracy of Logistic Regression classifier on training set: {:.2f}'\n .format(clf.score(X_train, y_train)))\n\nprint('Accuracy of Logistic Regression classifier on test set: {:.2f}'\n .format(clf.score(X_test, y_test)))\n\ndef predictorizer(feature1, feature2, feature3, feature4, feature5, feature6, feature7, feature8, feature9):\n\n new_data = OrderedDict([ \n ('slap', feature1),\n ('threat_object', feature2),\n ('beaten', feature3),\n ('limit_family_contact', feature4),\n ('kick_punch', feature5),\n ('threat_hit', feature6),\n ('push_shove', feature7),\n ('jealous', feature8),\n ('life_danger', feature9,)\n ])\n # .values.reshape(1, -1) because it must be 2-dim, because we passed only one new observation\n new_data = pd.Series(new_data).values.reshape(1,-1) \n # Use the model to make a prediction\n prediction = str(clf.predict_proba(new_data)[[0],[1]])\n prediction = prediction.replace('[','')\n prediction = prediction.replace(']','')\n prediction = \"{:.1%}\".format(float(prediction))\n return prediction\n\n\nprint(predictorizer('1','1','1','1','1','1','1','1','1'))\n\n"
] |
[
[
"sklearn.linear_model.LogisticRegressionCV",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"pandas.Series"
]
] |
GrowthJeff/lifelines
|
[
"4415be1bfeb7d15203109842926c1f6e50facaa6"
] |
[
"lifelines/fitters/mixins.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom typing import List, Optional, Dict, Any, Iterable\nfrom textwrap import dedent, fill\nfrom autograd import numpy as anp\nimport numpy as np\nfrom pandas import DataFrame, Series\nfrom lifelines.statistics import proportional_hazard_test, TimeTransformers\nfrom lifelines.utils import format_p_value\nfrom lifelines.utils.lowess import lowess\n\n\nclass SplineFitterMixin:\n @staticmethod\n def relu(x: np.array):\n return anp.maximum(0, x)\n\n def basis(self, x: np.array, knot: float, min_knot: float, max_knot: float):\n lambda_ = (max_knot - knot) / (max_knot - min_knot)\n return self.relu(x - knot) ** 3 - (lambda_ * self.relu(x - min_knot) ** 3 + (1 - lambda_) * self.relu(x - max_knot) ** 3)\n\n\nclass ProportionalHazardMixin:\n def check_assumptions(\n self,\n training_df: DataFrame,\n advice: bool = True,\n show_plots: bool = False,\n p_value_threshold: float = 0.01,\n plot_n_bootstraps: int = 10,\n columns: Optional[List[str]] = None,\n ) -> None:\n \"\"\"\n Use this function to test the proportional hazards assumption. See usage example at\n https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html\n\n\n Parameters\n -----------\n\n training_df: DataFrame\n the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.\n advice: bool, optional\n display advice as output to the user's screen\n show_plots: bool, optional\n display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.\n This will slow down the function significantly.\n p_value_threshold: float, optional\n the threshold to use to alert the user of violations. See note below.\n plot_n_bootstraps:\n in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down\n the function significantly.\n columns: list, optional\n specify a subset of columns to test.\n\n\n Examples\n ----------\n\n .. code:: python\n\n from lifelines.datasets import load_rossi\n from lifelines import CoxPHFitter\n\n rossi = load_rossi()\n cph = CoxPHFitter().fit(rossi, 'week', 'arrest')\n\n cph.check_assumptions(rossi)\n\n\n Notes\n -------\n The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates\n will be below the threshold (i.e. by chance). This is compounded when there are many covariates.\n\n Similarly, when there are lots of observations, even minor deviances from the proportional hazard\n assumption will be flagged.\n\n With that in mind, it's best to use a combination of statistical tests and eyeball tests to\n determine the most serious violations.\n\n\n References\n -----------\n section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,\n http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,\n http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf\n \"\"\"\n\n if not training_df.index.is_unique:\n raise IndexError(\n \"`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index\"\n )\n\n residuals = self.compute_residuals(training_df, kind=\"scaled_schoenfeld\")\n test_results = proportional_hazard_test(self, training_df, time_transform=[\"rank\", \"km\"], precomputed_residuals=residuals)\n\n residuals_and_duration = residuals.join(training_df[self.duration_col])\n\n counter = 0\n n = residuals_and_duration.shape[0]\n\n for variable in self.params_.index.intersection(columns or self.params_.index):\n minumum_observed_p_value = test_results.summary.loc[variable, \"p\"].min()\n if np.round(minumum_observed_p_value, 2) > p_value_threshold:\n continue\n\n counter += 1\n\n if counter == 1:\n if advice:\n print(\n fill(\n \"\"\"The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged.\"\"\"\n % p_value_threshold,\n width=100,\n )\n )\n print()\n print(\n fill(\n \"\"\"With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.\"\"\",\n width=100,\n )\n )\n print()\n test_results.print_summary()\n print()\n\n print()\n print(\n \"%d. Variable '%s' failed the non-proportional test: p-value is %s.\"\n % (counter, variable, format_p_value(4)(minumum_observed_p_value)),\n end=\"\\n\\n\",\n )\n\n if advice:\n values = training_df[variable]\n value_counts = values.value_counts()\n n_uniques = value_counts.shape[0]\n\n # Arbitrary chosen 10 and 4 to check for ability to use strata col.\n # This should capture dichotomous / low cardinality values.\n if n_uniques <= 10 and value_counts.min() >= 5:\n print(\n fill(\n \" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.\".format(\n n_uniques, variable\n ),\n width=100,\n )\n )\n else:\n print(\n fill(\n \"\"\" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.\"\"\".format(\n var=variable\n ),\n width=100,\n ),\n end=\"\\n\\n\",\n )\n print(\n fill(\n \"\"\" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.\"\"\".format(\n var=variable\n ),\n width=100,\n ),\n end=\"\\n\\n\",\n )\n print(\n fill(\n \"\"\" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.\"\"\",\n width=100,\n ),\n end=\"\\n\\n\",\n )\n\n if show_plots:\n\n from matplotlib import pyplot as plt\n\n fig = plt.figure()\n\n # plot variable against all time transformations.\n for i, (transform_name, transformer) in enumerate(TimeTransformers().iter([\"rank\", \"km\"]), start=1):\n p_value = test_results.summary.loc[(variable, transform_name), \"p\"]\n\n ax = fig.add_subplot(1, 2, i)\n\n y = residuals_and_duration[variable]\n tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]\n\n ax.scatter(tt, y, alpha=0.75)\n\n y_lowess = lowess(tt.values, y.values)\n ax.plot(tt, y_lowess, color=\"k\", alpha=1.0, linewidth=2)\n\n # bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals\n for _ in range(plot_n_bootstraps):\n ix = sorted(np.random.choice(n, n))\n tt_ = tt.values[ix]\n y_lowess = lowess(tt_, y.values[ix])\n ax.plot(tt_, y_lowess, color=\"k\", alpha=0.30)\n\n best_xlim = ax.get_xlim()\n ax.hlines(0, 0, tt.max(), linestyles=\"dashed\", linewidths=1)\n ax.set_xlim(best_xlim)\n\n ax.set_xlabel(\"%s-transformed time\\n(p=%.4f)\" % (transform_name, p_value), fontsize=10)\n\n fig.suptitle(\"Scaled Schoenfeld residuals of '%s'\" % variable, fontsize=14)\n plt.tight_layout()\n plt.subplots_adjust(top=0.90)\n\n if advice and counter > 0:\n print(\n dedent(\n r\"\"\"\n ---\n [A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html\n [B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it\n [C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates\n [D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form\n [E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification\n \"\"\"\n )\n )\n\n if counter == 0:\n print(\"Proportional hazard assumption looks okay.\")\n\n @property\n def hazard_ratios_(self):\n return Series(np.exp(self.params_), index=self.params_.index, name=\"exp(coef)\")\n\n def compute_followup_hazard_ratios(self, training_df: DataFrame, followup_times: Iterable) -> DataFrame:\n \"\"\"\n Recompute the hazard ratio at different follow-up times (lifelines handles accounting for updated censoring and updated durations).\n This is useful because we need to remember that the hazard ratio is actually a weighted-average of period-specific hazard ratios.\n\n Parameters\n ----------\n\n training_df: pd.DataFrame\n The same dataframe used to train the model\n followup_times: Iterable\n a list/array of follow-up times to recompute the hazard ratio at.\n\n\n \"\"\"\n results = {}\n for t in sorted(followup_times):\n assert t <= training_df[self.duration_col].max(), \"all follow-up times must be less than max observed duration\"\n df = training_df.copy()\n # if we \"rollback\" the df to time t, who is dead and who is censored\n df[self.event_col] = (df[self.duration_col] <= t) & df[self.event_col]\n df[self.duration_col] = np.minimum(df[self.duration_col], t)\n\n model = self.__class__(penalizer=self.penalizer, l1_ratio=self.l1_ratio).fit(\n df, self.duration_col, self.event_col, weights_col=self.weights_col, entry_col=self.entry_col\n )\n results[t] = model.hazard_ratios_\n return DataFrame(results).T\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"numpy.minimum",
"numpy.random.choice",
"pandas.DataFrame",
"numpy.round",
"matplotlib.pyplot.subplots_adjust",
"numpy.exp",
"matplotlib.pyplot.figure"
]
] |
lofar-astron/lofarimaging
|
[
"9672b52bb9be8f3405e6e3f85701175bdc4bf211"
] |
[
"lofarimaging/lofarimaging.py"
] |
[
"\"\"\"Functions for working with LOFAR single station data\"\"\"\n\nfrom typing import Dict, List\nimport numpy as np\nfrom numpy.linalg import norm, lstsq\nimport numexpr as ne\nimport numba\nfrom astropy.coordinates import SkyCoord, SkyOffsetFrame, CartesianRepresentation\n\n\n__all__ = [\"nearfield_imager\", \"sky_imager\", \"ground_imager\", \"skycoord_to_lmn\", \"calibrate\", \"simulate_sky_source\",\n \"subtract_sources\"]\n\n__version__ = \"1.5.0\"\nSPEED_OF_LIGHT = 299792458.0\n\n\ndef skycoord_to_lmn(pos: SkyCoord, phasecentre: SkyCoord):\n \"\"\"\n Convert astropy sky coordinates into the l,m,n coordinate system\n relative to a phase centre.\n\n The l,m,n is a RHS coordinate system with\n * its origin on the sky sphere\n * m,n and the celestial north on the same plane\n * l,m a tangential plane of the sky sphere\n\n Note that this means that l increases east-wards\n\n This function was taken from https://github.com/SKA-ScienceDataProcessor/algorithm-reference-library\n \"\"\"\n\n # Determine relative sky position\n todc = pos.transform_to(SkyOffsetFrame(origin=phasecentre))\n dc = todc.represent_as(CartesianRepresentation)\n dc /= dc.norm()\n\n # Do coordinate transformation - astropy's relative coordinates do\n # not quite follow imaging conventions\n return dc.y.value, dc.z.value, dc.x.value - 1\n\n\[email protected](parallel=True)\ndef sky_imager(visibilities, baselines, freq, npix_l, npix_m):\n \"\"\"\n Sky imager\n\n Args:\n visibilities: Numpy array with visibilities, shape [num_antennas x num_antennas]\n baselines: Numpy array with distances between antennas, shape [num_antennas, num_antennas, 3]\n freq: frequency\n npix_l: Number of pixels in l-direction\n npix_m: Number of pixels in m-direction\n\n Returns:\n np.array(float): Real valued array of shape [npix_l, npix_m]\n \"\"\"\n img = np.zeros((npix_m, npix_l), dtype=np.complex128)\n\n for m_ix in range(npix_m):\n m = -1 + m_ix * 2 / npix_m\n for l_ix in range(npix_l):\n l = 1 - l_ix * 2 / npix_l\n img[m_ix, l_ix] = np.mean(visibilities * np.exp(-2j * np.pi * freq *\n (baselines[:, :, 0] * l + baselines[:, :, 1] * m) /\n SPEED_OF_LIGHT))\n return np.real(img)\n\n\ndef ground_imager(visibilities, freq, npix_p, npix_q, dims, station_pqr, height=1.5):\n \"\"\"Do a Fourier transform for ground imaging\"\"\"\n img = np.zeros([npix_q, npix_p], dtype=np.complex128)\n\n for q_ix, q in enumerate(np.linspace(dims[2], dims[3], npix_q)):\n for p_ix, p in enumerate(np.linspace(dims[0], dims[1], npix_p)):\n r = height\n pqr = np.array([p, q, r], dtype=np.float32)\n antdist = np.linalg.norm(station_pqr - pqr[np.newaxis, :], axis=1)\n groundbase = antdist[:, np.newaxis] - antdist[np.newaxis, :]\n img[q_ix, p_ix] = np.mean(visibilities * np.exp(-2j * np.pi * freq * (-groundbase) / SPEED_OF_LIGHT))\n\n return img\n\n\ndef nearfield_imager(visibilities, baseline_indices, freqs, npix_p, npix_q, extent, station_pqr, height=1.5,\n max_memory_mb=200):\n \"\"\"\n Nearfield imager\n\n Args:\n visibilities: Numpy array with visibilities, shape [num_visibilities x num_frequencies]\n baseline_indices: List with tuples of antenna numbers in visibilities, shape [2 x num_visibilities]\n freqs: List of frequencies\n npix_p: Number of pixels in p-direction\n npix_q: Number of pixels in q-direction\n extent: Extent (in m) that the image should span\n station_pqr: PQR coordinates of stations\n height: Height of image in metre\n max_memory_mb: Maximum amount of memory to use for the biggest array. Higher may improve performance.\n\n Returns:\n np.array(complex): Complex valued array of shape [npix_p, npix_q]\n \"\"\"\n z = height\n x = np.linspace(extent[0], extent[1], npix_p)\n y = np.linspace(extent[2], extent[3], npix_q)\n\n posx, posy = np.meshgrid(x, y)\n posxyz = np.transpose(np.array([posx, posy, z * np.ones_like(posx)]), [1, 2, 0])\n\n diff_vectors = (station_pqr[:, None, None, :] - posxyz[None, :, :, :])\n distances = np.linalg.norm(diff_vectors, axis=3)\n\n vis_chunksize = max_memory_mb * 1024 * 1024 // (8 * npix_p * npix_q)\n\n bl_diff = np.zeros((vis_chunksize, npix_q, npix_p), dtype=np.float64)\n img = np.zeros((npix_q, npix_p), dtype=np.complex128)\n for vis_chunkstart in range(0, len(baseline_indices), vis_chunksize):\n vis_chunkend = min(vis_chunkstart + vis_chunksize, baseline_indices.shape[0])\n # For the last chunk, bl_diff_chunk is a bit smaller than bl_diff\n bl_diff_chunk = bl_diff[:vis_chunkend - vis_chunkstart, :]\n np.add(distances[baseline_indices[vis_chunkstart:vis_chunkend, 0]],\n -distances[baseline_indices[vis_chunkstart:vis_chunkend, 1]], out=bl_diff_chunk)\n\n j2pi = 1j * 2 * np.pi\n for ifreq, freq in enumerate(freqs):\n v = visibilities[vis_chunkstart:vis_chunkend, ifreq][:, None, None]\n lamb = SPEED_OF_LIGHT / freq\n\n # v[:,np.newaxis,np.newaxis]*np.exp(-2j*np.pi*freq/c*groundbase_pixels[:,:,:]/c)\n # groundbase_pixels=nvis x npix x npix\n np.add(img, np.sum(ne.evaluate(\"v * exp(j2pi * bl_diff_chunk / lamb)\"), axis=0), out=img)\n img /= len(freqs) * len(baseline_indices)\n\n return img\n\n\ndef calibrate(vis, modelvis, maxiter=30, amplitudeonly=True):\n \"\"\"\n Calibrate and subtract some sources\n\n Args:\n vis: visibility matrix, shape [n_st, n_st]\n modelvis: model visibility matrices, shape [n_dir, n_st, n_st]\n maxiter: max iterations (default 30)\n amplitudeonly: fit only amplitudes (default True)\n\n Returns:\n residual: visibilities with calibrated directions subtracted, shape [n_st, n_st]\n gains: gains, shape [n_dir, n_st]\n \"\"\"\n nst = vis.shape[1]\n ndir = np.array(modelvis).shape[0]\n gains = np.ones([ndir, nst], dtype=np.complex)\n\n if ndir == 0:\n return vis, gains\n else:\n gains *= np.sqrt(norm(vis) / norm(modelvis))\n\n iteration = 0\n while iteration < maxiter:\n iteration += 1\n gains_prev = gains.copy()\n for k in range(nst):\n z = np.conj(gains_prev) * np.array(modelvis)[:, :, k]\n gains[:, k] = lstsq(z.T, vis[:, k], rcond=None)[0]\n if amplitudeonly:\n gains = np.abs(gains).astype(np.complex)\n if iteration % 2 == 0 and iteration > 0:\n dg = norm(gains - gains_prev)\n residual = vis.copy()\n for d in range(ndir):\n residual -= np.diag(np.conj(gains[d])) @ modelvis[d] @ np.diag(gains[d])\n gains = 0.5 * gains + 0.5 * gains_prev\n return residual, gains\n\n\ndef simulate_sky_source(lmn_coord: np.array, baselines: np.array, freq: float):\n \"\"\"\n Simulate visibilities for a sky source\n\n Args:\n lmn_coord (np.array): l, m, n coordinate\n baselines (np.array): baseline distances in metres, shape (n_ant, n_ant)\n freq (float): Frequency in Hz\n \"\"\"\n return np.exp(2j * np.pi * freq * baselines.dot(np.array(lmn_coord)) / SPEED_OF_LIGHT)\n\n\ndef subtract_sources(vis: np.array, baselines: np.array, freq: float, lmn_dict: Dict[str, np.array],\n sources=[\"Cas A\", \"Cyg A\", \"Sun\"]):\n \"\"\"\n Subtract sky sources from visibilities\n\n Args:\n vis (np.array): visibility matrix, shape [n_ant, n_ant]\n lmn_dict (Dict[str, np.array]): dictionary with lmn coordinates\n baselines (np.array): baseline distances in metres, shape (n_ant, n_ant)\n freq (float): Frequency in Hz\n sources (List[str]): list with source names to subtract (should all be in lmn_dict).\n Default [\"Cas A\", \"Sun\"]\n\n Returns:\n vis (np.array): visibility matrix with sources subtracted\n \"\"\"\n modelvis = [simulate_sky_source(lmn_dict[srcname], baselines, freq) for srcname in lmn_dict\n if srcname in sources]\n\n residual, _ = calibrate(vis, modelvis)\n\n return residual\n"
] |
[
[
"numpy.diag",
"numpy.ones_like",
"numpy.conj",
"numpy.linspace",
"numpy.abs",
"numpy.linalg.norm",
"numpy.ones",
"numpy.linalg.lstsq",
"numpy.real",
"numpy.exp",
"numpy.add",
"numpy.array",
"numpy.meshgrid",
"numpy.zeros"
]
] |
AI21Labs/sense-bert
|
[
"32773c4da8ba23674978170598498fe0239ddb1a"
] |
[
"sensebert.py"
] |
[
"import os\nfrom collections import namedtuple\nimport tensorflow as tf\n\nfrom tokenization import FullTokenizer\n\n_SenseBertGraph = namedtuple(\n 'SenseBertGraph',\n ('input_ids', 'input_mask', 'contextualized_embeddings', 'mlm_logits', 'supersense_losits')\n)\n\n_MODEL_PATHS = {\n 'sensebert-base-uncased': 'gs://ai21-public-models/sensebert-base-uncased',\n 'sensebert-large-uncased': 'gs://ai21-public-models/sensebert-large-uncased'\n}\n_CONTEXTUALIZED_EMBEDDINGS_TENSOR_NAME = \"bert/encoder/Reshape_13:0\"\n\n\ndef _get_model_path(name_or_path, is_tokenizer=False):\n if name_or_path in _MODEL_PATHS:\n print(f\"Loading the known {'tokenizer' if is_tokenizer else 'model'} '{name_or_path}'\")\n model_path = _MODEL_PATHS[name_or_path]\n else:\n print(f\"This is not a known {'tokenizer' if is_tokenizer else 'model'}. \"\n f\"Assuming {name_or_path} is a path or a url...\")\n model_path = name_or_path\n return model_path\n\n\ndef load_tokenizer(name_or_path):\n model_path = _get_model_path(name_or_path, is_tokenizer=True)\n vocab_file = os.path.join(model_path, \"vocab.txt\")\n supersense_vocab_file = os.path.join(model_path, \"supersense_vocab.txt\")\n return FullTokenizer(vocab_file=vocab_file, senses_file=supersense_vocab_file)\n\n\ndef _load_model(name_or_path, session=None):\n if session is None:\n session = tf.get_default_session()\n\n model = tf.saved_model.load(export_dir=_get_model_path(name_or_path), sess=session, tags=[tf.saved_model.SERVING])\n serve_def = model.signature_def[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n\n inputs, outputs = ({key: session.graph.get_tensor_by_name(info.name) for key, info in puts.items()}\n for puts in (serve_def.inputs, serve_def.outputs))\n\n return _SenseBertGraph(\n input_ids=inputs['input_ids'],\n input_mask=inputs['input_mask'],\n contextualized_embeddings=session.graph.get_tensor_by_name(_CONTEXTUALIZED_EMBEDDINGS_TENSOR_NAME),\n supersense_losits=outputs['ss'],\n mlm_logits=outputs['masked_lm']\n )\n\n\nclass SenseBert:\n def __init__(self, name_or_path, max_seq_length=512, session=None):\n self.max_seq_length = max_seq_length\n self.session = session if session else tf.get_default_session()\n self.model = _load_model(name_or_path, session=self.session)\n self.tokenizer = load_tokenizer(name_or_path)\n\n def tokenize(self, inputs):\n \"\"\"\n Gets a string or a list of strings, and returns a tuple (input_ids, input_mask) to use as inputs for SenseBERT.\n Both share the same shape: [batch_size, sequence_length] where sequence_length is the maximal sequence length.\n \"\"\"\n if isinstance(inputs, str):\n inputs = [inputs]\n\n # tokenizing all inputs\n all_token_ids = []\n for inp in inputs:\n tokens = [self.tokenizer.start_sym] + self.tokenizer.tokenize(inp)[0] + [self.tokenizer.end_sym]\n assert len(tokens) <= self.max_seq_length\n all_token_ids.append(self.tokenizer.convert_tokens_to_ids(tokens))\n\n # decide the maximum sequence length and pad accordingly\n max_len = max([len(token_ids) for token_ids in all_token_ids])\n input_ids, input_mask = [], []\n pad_sym_id = self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_sym])\n for token_ids in all_token_ids:\n to_pad = max_len - len(token_ids)\n input_ids.append(token_ids + pad_sym_id * to_pad)\n input_mask.append([1] * len(token_ids) + [0] * to_pad)\n\n return input_ids, input_mask\n\n def run(self, input_ids, input_mask):\n return self.session.run(\n [self.model.contextualized_embeddings, self.model.mlm_logits, self.model.supersense_losits],\n feed_dict={self.model.input_ids: input_ids, self.model.input_mask: input_mask}\n )\n"
] |
[
[
"tensorflow.get_default_session"
]
] |
NREL/pySMARTS
|
[
"83e702ed508eedcd8f6a6e11f2e640557f649dcd"
] |
[
"docs/tutorials/1 - Beginer - Plot ALBEDOs from Smarts.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# # 1 - Beginner - Plot Spectra and Albedos from SMARTS\n# \n# ##### Generate & Plot Spectra and Albedos from SMARTS\n# ######      * 1. DNI and DHI for a particular time and location\n# ######      * 2. Ground Albedo for various materials at AM 1.5\n# ######      * 3. Ground Albedo for complete AOD and PWD Weather Data\n# \n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport pvlib\nimport datetime\nimport pprint\nimport os\n\n\n# In[2]:\n\n\nplt.rcParams['timezone'] = 'Etc/GMT+7'\nfont = {'family' : 'DejaVu Sans',\n'weight' : 'normal',\n'size' : 18}\nplt.rc('font', **font)\nplt.rcParams['figure.figsize'] = (12, 5)\n\n\n# In[3]:\n\n\nimport pySMARTS\n\n\n# In[4]:\n\n\npySMARTS.__version__\n\n\n# #### Real Input data from SRRL for OCTOBER 21st, 12:45 PM\n\n# # 1. Plot a DNI and DHI for a particular time and location\n# \n\n# In[22]:\n\n\nIOUT = '2 3' # DNI and DHI\n\n\n# In[21]:\n\n\nYEAR = '2021'\nMONTH = '06'\nDAY = '21'\nHOUR = '12'\nLATIT = '33'\nLONGIT = '110'\nALTIT = '0.9' # km above sea level\nZONE = '-7' # Timezone\n\n\n# In[ ]:\n\n\npySMARTS.SMARTSTimeLocation(IOUT,YEAR,MONTH,DAY,HOUR, LATIT, LONGIT, ALTIT, ZONE)\n\n\n# # 2. Plot Albedos from SMARTS\n\n# In[16]:\n\n\nIOUT = '30' # Albedo\n\n\n# #### Plot Ground Albedo AM 1.0\n\n# In[ ]:\n\n\nmaterials = ['Concrete', 'LiteLoam', 'RConcrte', 'Gravel']\n\nalb_db = pd.DataFrame()\n\nfor i in range (0, len(materials)):\n\n alb = pySMARTS.SMARTSAirMass(IOUT=IOUT, AMASS='1.5', material=materials[i])\n\n alb_db[materials[i]] = alb[alb.keys()[1]]\n \nalb_db.index = alb.Wvlgth\n\nalb_db_10 = alb_db\n\nfor col in alb_db:\n alb_db[col].plot(legend=True)\n \nplt.xlabel('Wavelength [nm]')\nplt.xlim([300, 2500])\nplt.axhline(y=0.084, color='r')\nplt.axhline(y=0.10, color='r')\n\n#UV albedo: 295 to 385\n#Total albedo: 300 to 3000\n#10.4 and 8.4 $ Measured\n#References\n\nplt.ylim([0,1])\nplt.ylabel('Reflectance')\nplt.legend(bbox_to_anchor=(1.04,0.75), loc=\"upper left\")\nplt.title('Ground albedos AM 1')\nplt.show()\n\nvis=alb_db.iloc[40:1801].mean()\nuv=alb_db.iloc[30:210].mean()\n\nprint(vis)\nprint(uv)\n\n\n# ## Extra: Averaging Albedos for Visible and UV\n# \n\n# In[ ]:\n\n\nvis=alb_db.iloc[40:1801].mean()\nuv=alb_db.iloc[30:210].mean()\nprint(\"Albedo on Visible Range:\\n\", vis)\nprint(\"Albedo on UV Range:\\n\", uv)\n\n\n# <div class=\"alert alert-block alert-info\"><b>Tip: </b> If you want full spectrum averages, we recommend interpolating as the default granularity of SMARTS at higher wavelengths is not the same than at lower wavelengths, thus the 'step' is not the same. </div>\n# \n\n# In[68]:\n\n\nr = pd.RangeIndex(2800,40000, 5)\nr = r/10\nalb2 = alb_db.reindex(r, method='ffill')\nprint(\"Albedo for all wavelengths:\", alb2.mean())\n\n\n# In[74]:\n\n\n# FYI: Wavelengths corresponding to the albedo before and after interpolating\n\"\"\"\n# Visible\nalb_db.iloc[40] # 300\nalb_db.iloc[1801] # 3000\n\n# UV\nalb_db.iloc[30] # 295\nalb_db.iloc[210] # 385 \n\n# Visible\nalb2.iloc[40] # 300\nalb2.iloc[5440] # 3000\n\n# UV\nalb2.iloc[30] # 295\nalb2.iloc[210] # 385 \n\"\"\"\n\n\n# # 3. ADVANCED: Plot Ground Albedo for More Complete Weather Data\n# \n# #### This asumes you know a lot more parameters about your weather data souch as: Broadband Turbidity, Aeorsol Opticla Density parameters, and Precipitable Water. \n# \n\n# ### Real Input data from SRRL for OCTOBER 21st, 12:45 PM\n\n# In[7]:\n\n\nalb = 0.2205\nYEAR='2020'; MONTH='10'; DAY='21'; HOUR = '12.75' \nLATIT='39.74'; LONGIT='-105.17'; ALTIT='1.0'; ZONE='-7'\nTILT='33.0'; WAZIM='180.0'; HEIGHT='0' \nmaterial='DryGrass'\nmin_wvl='280'; Max_wvl='4000'\n\nTAIR = '20.3'\nRH = '2.138'\nSEASON = 'WINTER'\nTDAY = '12.78'\nSPR = '810.406'\nRHOG = '0.2205'\n\nWAZIMtracker = '270'\nTILTtracker = '23.37'\ntracker_tetha_bifrad = '-23.37'\n\nTAU5='0.18422' # SRRL-GRAL \"Broadband Turbidity\"\nTAU5 = '0.037' # SRRL-AOD [500nm]\nGG = '0.7417' # SSRL-AOD Asymmetry [500nm]\nBETA = '0.0309' # SRRL-AOD Beta\nALPHA = '0.1949' # SRRL-AOD Alpha [Angstrom exp]\nOMEGL = '0.9802' # SRRL-AOD SSA [500nm]\nW = str(7.9/10) # SRRL-PWD Precipitable Water [mm]\n\n\n# In[8]:\n\n\nmaterial = 'DryGrass'\n\nalb_db = pd.DataFrame()\n\nalb = pySMARTS.SMARTSSRRL(\n IOUT=IOUT, YEAR=YEAR, MONTH=MONTH,DAY=DAY, HOUR='12.45', LATIT=LATIT, \n LONGIT=LONGIT, ALTIT=ALTIT, \n ZONE=ZONE, W=W, RH=RH, TAIR=TAIR, \n SEASON=SEASON, TDAY=TDAY, TAU5=None, SPR=SPR, \n TILT=TILT, WAZIM=WAZIM,\n ALPHA1 = ALPHA, ALPHA2 = 0, OMEGL = OMEGL,\n GG = GG, BETA = BETA,\n RHOG=RHOG, HEIGHT=HEIGHT, material=material, POA = True)\n\nalb_db[material] = alb[alb.keys()[1]] \nalb_db.index = alb.Wvlgth\n\n\n# In[ ]:\n\n\nalb_db[material].plot(legend=True, color='y')\nplt.xlabel('Wavelength [nm]')\nplt.xlim([300, 2500])\nplt.ylim([0,1])\nplt.ylabel('Reflectance')\nplt.legend(bbox_to_anchor=(1.04,0.75), loc=\"upper left\")\nplt.title('Albedo @ 12.45 Oct 21, 2020 for SRRL Weather Data ')\nplt.show()\n\n\n# ### A plotly plot to explore the results\n\n# In[24]:\n\n\nimport plotly.express as px\n\n\n# In[ ]:\n\n\nfig = px.line(alb_db[material], title='Albedo @ 12.45 Oct 21, 2020 for SRRL Weather Data')\n\nfig.update_layout(xaxis_title='Wavelength [nm]',\n yaxis_title='Reflectance')\nfig.show()\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.title",
"pandas.RangeIndex",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.rc",
"pandas.DataFrame",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
PerttuHamalainen/DRMM
|
[
"1d7d52df95adee344516322700209f3a9f8147fb"
] |
[
"PrecisionRecallTest.py"
] |
[
"\nimport numpy as np\nimport random\nimport os\nimport matplotlib.pyplot as pp\n#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"-1\" #disable Tensorflow GPU usage, these simple graphs run faster on CPU\nimport tensorflow as tf\nimport DRMM as DRMM\nfrom skimage.util import view_as_blocks\nfrom precision_recall import knn_precision_recall_features\nimport MocapUtils as mocap\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--datasetIdx', type=int, default=1)\nparser.add_argument('--modelIdx', type=int, default=0)\nparser.add_argument('--nIter', type=int, default=50000)\nparser.add_argument('--nEval', type=int, default=20000)\n\nargs = parser.parse_args()\ndatasetIdx=args.datasetIdx\nmodelIdx=args.modelIdx\nnIter=args.nIter\n\nnBatch=64\ninitialLearningRate=0.002\n#datasets=[\"IK (arm)\"] \ndatasets=[\"IK (fullbody)\",\"Motion Capture\"]\nnTargetEvalSamples=args.nEval\n\n#Returns squared distance matrix D with elements d_ij = | a_i - b_j|^2, where a_i = A[i,:] and b_j=B[j,:]\ndef pairwiseSqDistances(A,B):\n #d_ij=(a_i-b_j)'(a_i-b_j) = a_i'a_i - 2 a_i'b_j + b_j'b_j\n #D = [a_0'a_0, a_1'a_1, ...] - 2 AB' + [b_0'b_0, b_1'b_1, ...]', assuming broadcasting\n #D = A_d - 2 AB' + B_d\n A_d=np.sum(A * A,axis=1,keepdims=True)\n B_d=np.reshape(np.sum(B * B,axis=1),[1,B.shape[0]])\n return np.clip(A_d - 2 * np.matmul(A,np.transpose(B)) + B_d,0,np.inf) #relu to ensure no negative results due to computational inaccuracy\n\ndef modifiedHausdorffDistance(A,B):\n sqDist=pairwiseSqDistances(A,B)\n return np.sqrt(np.sum(np.min(sqDist,axis=0))+np.sum(np.min(sqDist,axis=1)))\n\ndef numDrmmParameters(dataDim,nLayers,nComponentsPerLayer):\n nParameters=0\n layerInputVars=dataDim\n for layerIdx in range(nLayers):\n nParameters+=1 #scalar variance parameter\n nParameters+=layerInputVars*nComponentsPerLayer #Gaussian means or class prototypes\n nParameters+=nComponentsPerLayer #marginal probabilities\n layerInputVars+=nComponentsPerLayer\n return nParameters\n\n\nplotIdx=0\ndataset=datasets[datasetIdx]\n#Load or create data\nif dataset==\"Swissroll 3D\":\n print(\"Creating 3D swissroll data\")\n x=[]\n noiseSd=0.0\n for angle in np.arange(0,4.0*np.pi,0.001):\n #swiss roll\n x.append(np.reshape(0.5*angle*np.array([np.sin(angle),np.cos(angle)])+np.random.normal(0,noiseSd,size=[2]),[1,2]))\n #circle\n #x.append(np.reshape(np.array([np.sin(angle),np.cos(angle)])+np.random.normal(0,noiseSd,size=[2]),[1,2]))\n #sine wave\n #x.append(np.reshape(np.array([angle,np.cos(angle)])+np.random.normal(0,noiseSd,size=[2]),[1,2]))\n data=np.concatenate(x)\n data=np.concatenate([data,np.random.uniform(-2,2,size=[data.shape[0],1])],axis=1)\nelif dataset==\"Sierpinski 2D\":\n x=[]\n\n def sierpinski(x0,x1,x2,data,depth=8):\n if depth==0:\n data.append(x0)\n data.append(x1)\n data.append(x2)\n else:\n depth-=1\n sierpinski(x0,0.5*(x0+x1),0.5*(x0+x2),data,depth)\n sierpinski(x1,0.5*(x1+x0),0.5*(x1+x2),data,depth)\n sierpinski(x2,0.5*(x2+x0),0.5*(x2+x1),data,depth)\n\n def pointOnUnitCircle(angle):\n return np.array([np.sin(angle),np.cos(angle)])\n sierpinski(pointOnUnitCircle(0),pointOnUnitCircle(1.0/3.0*2.0*np.pi),pointOnUnitCircle(2.0/3.0*2.0*np.pi),x)\n data=np.array(x)\nelif dataset==\"IK (arm)\":\n print(\"Loading data\")\n dataFile=\"./IKTest/arm_data.npy\"\n data=np.load(dataFile)\nelif dataset==\"IK (fullbody)\":\n print(\"Loading data\")\n dataFile=\"./IKTest/fullbody_data.npy\"\n data=np.load(dataFile)\nelif dataset == \"Motion Capture\":\n print(\"Loading Motion Capture Data\")\n mocapData = mocap.MocapDataset(\"mocapdata/laforge_locomotion_nosliding.zip\",\n sequenceLength=30,\n optimizeForSpeed=True)\n data=mocapData.allSequences.reshape([mocapData.allSequences.shape[0],-1])\nelse:\n raise Exception(\"Invalid dataset\")\ndataDim=data.shape[1]\nprint(\"Dataset has {} vectors of {} variables\".format(data.shape[0],data.shape[1]))\n#if data.shape[0]>maxData:\n# data=data[:maxData]\n#A helper function for extracting a random data batch\ndef getDataBatch(nBatch):\n return data[np.random.randint(data.shape[0], size=nBatch),:]\n\nlayerAmounts=[1,2,3,4]\n\n\n\nnLayers=layerAmounts[modelIdx]\n\n\n#We test GMM with 64,128... components, and DRMM:s with the same (approximately) number of parameters\nfor modelSize in [64,128,256,512,1024]:\n if nLayers==1:\n nComponentsPerLayer=modelSize\n else:\n targetNumParams=numDrmmParameters(dataDim,1,modelSize)\n nComponentsPerLayer=4\n while (numDrmmParameters(dataDim,nLayers,nComponentsPerLayer)<targetNumParams):\n nComponentsPerLayer+=1\n nParameters=numDrmmParameters(dataDim,nLayers,nComponentsPerLayer)\n\n #Init tf\n tf.reset_default_graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True\n sess = tf.Session(config=config)\n\n #create model\n if nParameters<2000: # or (datasetIdx==2 and nLayers==1): #run small models on CPU (faster), and also run the datasetIdx==2 and nLayers==1 cases on CPU because they need a lot of memory and the dev. laptop GPU is only 6GB\n with tf.device('/cpu:0'): \n model=DRMM.DRMM(sess=sess,\n nLayers=nLayers,\n nComponentsPerLayer=nComponentsPerLayer,\n initialLearningRate=initialLearningRate,\n inputs=DRMM.dataStream(\"continuous\",shape=[None,dataDim]))\n else:\n model = DRMM.DRMM(sess=sess,\n nLayers=nLayers,\n nComponentsPerLayer=nComponentsPerLayer,\n initialLearningRate=initialLearningRate,\n inputs=DRMM.dataStream(\"continuous\", shape=[None, dataDim]))\n assert(nParameters==model.nParameters) #check that our parameter amount estimation was correct\n\n\n\n #Initialize\n tf.global_variables_initializer().run(session=sess)\n model.init(data[:min([2000,data.shape[0]])])\n\n #Optimize\n for i in range(nIter):\n info = model.train(i / nIter, getDataBatch(nBatch))\n # Print progress\n if i % 100 == 0 or i == nIter - 1:\n logp = np.mean(\n model.getLogP(inputs=DRMM.DataIn(data=getDataBatch(1024),mask=np.ones([1024,dataDim])))) # evaluate log-likelihood of a large data batch\n print(\n \"\\rIteration {}/{}, phase {:.3f} Loss {:.3f}, logp {:.3f} learning rate {:.6f}, precision {:.3f}\".format(\n i, nIter, i / nIter, info[\"loss\"], logp, info[\"lr\"], info[\"rho\"]), end=\"\")\n\n #Evaluate\n nEvalSamples=min([data.shape[0],nTargetEvalSamples])\n print(\"\\nGenerating {} samples\".format(nEvalSamples))\n sampled_fetch=np.zeros([nEvalSamples,dataDim])\n nSampled=0\n while (nSampled<nEvalSamples):\n batchSize=min([10000,nEvalSamples-nSampled])\n sampled_fetch[nSampled:nSampled+batchSize]=model.sample(nSamples=batchSize)\n nSampled+=batchSize\n #print(sampled_fetch)\n print(\"Evaluating\")\n if nEvalSamples<data.shape[0]:\n evalData=getDataBatch(nEvalSamples)\n else:\n evalData=data\n #evalData=data[:min([nEvalSamples, data.shape[0]])]\n logp = np.mean(model.getLogP(inputs=DRMM.DataIn(data=evalData,mask=np.ones_like(evalData))))\n with sess.as_default():\n #Precision and recall code from: https://github.com/kynkaat/improved-precision-and-recall-metric\n precrecall=knn_precision_recall_features(evalData,sampled_fetch,row_batch_size=10000)\n precision=precrecall['precision'][0]\n recall=precrecall['recall'][0]\n f1=2.0*(recall * precision) / (recall + precision + 1e-8)\n print(\"F1 {}, logp {}\".format(f1,logp))\n logFileName=\"Results/benchmark_precrecall.csv\"\n if not os.path.isfile(logFileName):\n logFile=open(logFileName,\"w\")\n logFile.write(\"dataset,datasetIdx,nLayers,nComponentsPerLayer,nParameters,precision,recall,f1,logp\\n\")\n else:\n logFile=open(logFileName,\"a\")\n #logFile.write(\"dataset,datasetIdx,nLayers,nComponentsPerLayer,sampleQuality\")\n logFile.write(\"{},{},{},{},{},{},{},{},{}\\n\".format(dataset,datasetIdx,nLayers,nComponentsPerLayer,model.nParameters,precision,recall,f1,logp))\n logFile.close()\n #pp.close()\n\n\n\n"
] |
[
[
"tensorflow.device",
"numpy.concatenate",
"numpy.random.randint",
"numpy.ones_like",
"numpy.arange",
"numpy.sin",
"tensorflow.ConfigProto",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"numpy.load",
"numpy.zeros",
"numpy.min",
"tensorflow.global_variables_initializer",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.cos",
"numpy.ones",
"numpy.random.normal",
"numpy.random.uniform"
]
] |
Shiao-Computing-Volumes/project-based-learning-in-python
|
[
"52e0b02cf085de97c3b5d9aa44bf8786d8a9ad19"
] |
[
"05_normal_distribution_simulator/studio/charting/histograms.py"
] |
[
"import numpy as np\nimport scipy.stats\nimport matplotlib.pyplot as plt\n\nfrom studio.settings.frames import STYLE, THEME_COLOR, AIDED_COLOR\nfrom studio.settings.frames import FIGSIZE, DPI\nfrom studio.frames.camera import Camera\nfrom studio.charting.components.legends import captioning\n\nplt.style.use(STYLE)\n\n\ndef hist_density(datasets, suptitle, title, captions1, caption2):\n fig = plt.figure(figsize=FIGSIZE, dpi=DPI)\n fig.suptitle(suptitle)\n\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n ax1.spines['right'].set_visible(False)\n ax1.spines['top'].set_visible(False)\n ax1.spines['left'].set_color(AIDED_COLOR)\n ax1.spines['bottom'].set_color(AIDED_COLOR)\n ax2.spines['right'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n ax2.spines['left'].set_color(AIDED_COLOR)\n ax2.spines['bottom'].set_color(AIDED_COLOR)\n\n camera = Camera(fig)\n\n for index in range(len(datasets)):\n n = len(datasets[index])\n if n < 101: step = int(n/5)\n else: step = int(n/10)\n for i in range(0, len(datasets[index]), step):\n single_histogram(ax1, datasets[index], i+step, title, captions1[index])\n single_density(ax2, datasets[index], i+step, title, caption2)\n camera.snap()\n\n return camera.animate()\n\n\ndef single_histogram(ax, data, i, title, caption):\n max_value = max(data)\n min_value = min(data)\n bin_width = (max_value - min_value) / float(len(data) - 1)\n n_bins = np.arange(min_value, max_value + bin_width, bin_width)\n\n ax.hist(data[:i], n_bins,\n linewidth=1.2,\n edgecolor=THEME_COLOR,\n color=THEME_COLOR,\n alpha=0.8)\n\n # captioning\n ax, legend = captioning(ax, caption)\n\n ax.set_title(title.format(\"Histogram\"), fontsize=10, loc=\"left\")\n # ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Frequency\")\n ax.tick_params(axis='x', colors=AIDED_COLOR)\n ax.tick_params(axis='y', colors=AIDED_COLOR)\n return ax\n\n\ndef single_density(ax, data, i, title, caption):\n density = scipy.stats.gaussian_kde(data[:i])\n x = np.linspace(min(data), max(data), 500)\n ax.plot(x, density(x), color=THEME_COLOR)\n ax.fill_between(x, density(x), 0, facecolor=THEME_COLOR, alpha=0.5)\n\n # captioning\n ax, legend = captioning(ax, caption)\n\n ax.set_title(title.format(\"Density\"), fontsize=10, loc=\"left\")\n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Density\")\n ax.tick_params(axis='x', colors=AIDED_COLOR)\n ax.tick_params(axis='y', colors=AIDED_COLOR)\n return ax\n"
] |
[
[
"numpy.arange",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] |
brillouinzone/atmosense-abcgan
|
[
"30b77fd082a55869e58839a4cfbab61a7eab2b13"
] |
[
"src/abcgan/transforms.py"
] |
[
"\"\"\"\nTransforms to and from z-scaled variables.\n\nUses numpy only (no pytorch)\n\"\"\"\nimport numpy as np # noqa\nimport abcgan.constants as const\n\n\ndef encode(data, name):\n \"\"\"\n Encode variables, or just add extra dimension\n\n Parameters\n ----------\n data : np.ndarray\n array of variable values.\n name : str\n name of the variable.\n\n Returns\n -------\n enc : np.ndarray\n array of encoded variables (with an extra dimension in all cases)\n \"\"\"\n if name in const.cyclic_driver:\n wrap_val = const.cyclic_driver[name]\n enc = data % wrap_val\n enc = (enc / wrap_val) * 2.0 * np.pi\n enc = np.stack((np.cos(enc), np.sin(enc)), axis=-1)\n else:\n enc = data[..., None] # add trailing singleton dimension\n\n return enc\n\n\ndef decode(data):\n \"\"\"\n Encode variables, or just add extra dimension\n\n Parameters\n ----------\n data : np.ndarray\n array of feature values.\n\n Returns\n -------\n enc : np.ndarray\n array of encoded variables\n \"\"\"\n curr = 0\n decs = []\n for name in const.driver_names:\n if name in const.cyclic_driver:\n wrap_val = const.cyclic_driver[name]\n cval = data[:, curr]\n sval = data[:, curr + 1]\n theta = np.arctan2(sval, cval)\n dec = theta * wrap_val / 2.0 / np.pi\n dec = dec % wrap_val\n decs.append(dec)\n curr += 2\n else:\n decs.append(data[:, curr])\n curr += 1\n return np.stack(decs, axis=-1)\n\n\ndef compute_valid(bvs):\n # valid only if value within thresholds and if at least one non-zero value\n valid_mask = np.zeros((bvs.shape[0], const.n_bv_feat))\n for i in range(const.n_bv_feat):\n valid_mask[:, i] = ~(((const.bv_thresholds[i][0] > bvs[:, :, i]) |\n (bvs[:, :, i] > const.bv_thresholds[i][1])).any(-1) |\n ((bvs[:, :, i] == 0).all(-1)))\n # valid only if every altitude is valid (skip first for now)\n valid_mask = valid_mask.all(-1)\n return valid_mask\n\n\ndef scale_driver(drivers):\n \"\"\"\n Return a scaled version of the drivers.\n\n Parameters\n --------------\n drivers: np.ndarray\n n_samples x n_driver\n\n Returns\n --------------\n driver_feat: np.ndarray\n n_samples x n_driver_feat\n \"\"\"\n drivers = np.hstack([\n encode(drivers[:, i], n)\n for i, n in enumerate(const.driver_names)\n ])\n drivers = np.log(1 + drivers)\n driver_feat = (drivers - const.driver_mu) / const.driver_sigma\n return driver_feat\n\n\ndef scale_bv(bvs):\n \"\"\"\n Return a scaled version of the drivers.\n\n Parameters\n --------------\n bvs: np.ndarray\n n_samples x n_bv\n\n Returns\n --------------\n bv_feat: np.ndarray\n n_samples x n_bv_feat\n \"\"\"\n valid_mask = compute_valid(bvs)\n bvs = np.log(1 + bvs)\n bv_feat = (bvs - const.bv_mu) / const.bv_sigma\n # pad bvs to max_alt if needed\n if bv_feat.shape[1] < const.max_alt:\n pad_alt = const.max_alt - bv_feat.shape[1]\n bv_feat = np.pad(bv_feat,\n ((0, 0), (0, pad_alt), (0, 0)),\n constant_values=np.nan)\n elif bv_feat.shape[1] > const.max_alt:\n bv_feat = bv_feat[:, :const.max_alt, :]\n return bv_feat, valid_mask\n\n\ndef get_driver(driver_feat):\n \"\"\"\n Invert featurization to recover driving parameters.\n\n Parameters\n --------------\n drivers: np.ndarray\n n_samples x n_driver\n\n Returns\n --------------\n scaled_feat: np.ndarray\n n_samples x n_driver_feat\n \"\"\"\n drivers = const.driver_sigma * driver_feat + const.driver_mu\n drivers = np.exp(drivers) - 1.0\n drivers = decode(drivers)\n return drivers\n\n\ndef get_bv(bv_feat):\n \"\"\"\n Invert featurization to recover bvs.\n\n Parameters\n --------------\n bvs: np.ndarray\n n_samples x n_bv\n\n Returns\n --------------\n scaled_feat: np.ndarray\n n_samples x n_bv_feat\n \"\"\"\n bvs = const.bv_sigma * bv_feat + const.bv_mu\n bvs = np.exp(bvs) - 1.0\n return bvs\n"
] |
[
[
"numpy.log",
"numpy.pad",
"numpy.cos",
"numpy.stack",
"numpy.sin",
"numpy.arctan2",
"numpy.exp",
"numpy.zeros"
]
] |
pavelzw/pyWATTS
|
[
"423f5eba7a54b4ced0876454e2f24a1840210076"
] |
[
"tests/unit/modules/test_root_mean_squared_error.py"
] |
[
"import unittest\n\nimport pytest\nimport xarray as xr\nimport pandas as pd\nfrom pywatts.core.exceptions.input_not_available import InputNotAvailable\n\nfrom pywatts.modules.root_mean_squared_error import RmseCalculator\nimport numpy as np\n\n\nclass TestRMSECalculator(unittest.TestCase):\n\n def setUp(self) -> None:\n self.rmse_calculator = RmseCalculator()\n\n def tearDown(self) -> None:\n self.rmse_calculator = None\n\n def test_get_params(self):\n self.assertEqual(self.rmse_calculator.get_params(),\n {\"offset\" : 0})\n\n\n def test_set_params(self):\n self.rmse_calculator.set_params(offset=24)\n self.assertEqual(self.rmse_calculator.get_params(),\n {\"offset\": 24})\n\n def test_transform(self):\n self.rmse_calculator.set_params()\n\n time = pd.to_datetime(['2015-06-03 00:00:00', '2015-06-03 01:00:00',\n '2015-06-03 02:00:00', '2015-06-03 03:00:00',\n '2015-06-03 04:00:00'])\n\n result_time = pd.to_datetime(['2015-06-03 04:00:00'])\n\n test_data = xr.Dataset({\"testCol\": (\"time\", xr.DataArray([-2, -1, 0, 1, 2])),\n \"predictCol1\": (\"time\", xr.DataArray([2, -3, 3, 1, -2])),\n \"predictCol2\": (\"time\", xr.DataArray([4, 4, 3, -2, 1])), \"time\": time})\n\n test_result = self.rmse_calculator.transform(y=test_data['testCol'], gt=test_data['testCol'],\n pred1=test_data['predictCol1'],\n pred2=test_data['predictCol2'])\n\n expected_result = xr.DataArray(np.array([[0.0, 3.0, 4.0]]),\n coords={\"time\": result_time, \"predictions\": [\"gt\", \"pred1\", \"pred2\"]},\n dims=[\"time\", \"predictions\"])\n\n xr.testing.assert_equal(test_result, expected_result)\n\n def test_transform_without_predictions(self):\n self.rmse_calculator.set_params()\n\n time = pd.to_datetime(['2015-06-03 00:00:00', '2015-06-03 01:00:00',\n '2015-06-03 02:00:00', '2015-06-03 03:00:00',\n '2015-06-03 04:00:00'])\n\n test_data = xr.Dataset({\"testCol\": (\"time\", xr.DataArray([-2, -1, 0, 1, 2])),\n \"predictCol1\": (\"time\", xr.DataArray([2, -3, 3, 1, -2])),\n \"predictCol2\": (\"time\", xr.DataArray([4, 4, 3, -2, 1])), \"time\": time})\n\n with pytest.raises(InputNotAvailable) as e_info:\n self.rmse_calculator.transform(y=test_data['testCol'])\n\n self.assertEqual(e_info.value.message,\n \"No predictions are provided as input for the RMSE Calculator. You should add the predictions \"\n \"by a seperate key word arguments if you add the RMSECalculator to the pipeline.\")\n"
] |
[
[
"numpy.array",
"pandas.to_datetime"
]
] |
shibaji7/Tdiff_Validation
|
[
"0e143a53763ea4eb965760c83239b5232326d91e"
] |
[
"py/get_fit_data.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"get_fit_data.py: utility module to fetch fitacf<v> level data.\"\"\"\n\n__author__ = \"Chakraborty, S.\"\n__copyright__ = \"Copyright 2020, SuperDARN@VT\"\n__credits__ = []\n__license__ = \"MIT\"\n__version__ = \"1.0.\"\n__maintainer__ = \"Chakraborty, S.\"\n__email__ = \"[email protected]\"\n__status__ = \"Research\"\n\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nimport glob\nimport bz2\nimport pydarnio as pydarn\nfrom loguru import logger\n\nimport copy\n\nclass Gate(object):\n \"\"\"Class object to hold each range cell value\"\"\"\n\n def __init__(self, bm, i, params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"v_e\"], gflg_type=-1):\n \"\"\"\n initialize the parameters which will be stored\n bm: beam object\n i: index to store\n params: parameters to store\n \"\"\"\n for p in params:\n if len(getattr(bm, p)) > i : setattr(self, p, getattr(bm, p)[i])\n else: setattr(self, p, np.nan)\n if gflg_type >= 0 and len(getattr(bm, \"gsflg\")[gflg_type]) > 0: setattr(self, \"gflg\", getattr(bm, \"gsflg\")[gflg_type][i])\n return\n\nclass Beam(object):\n \"\"\"Class to hold one beam object\"\"\"\n\n def __init__(self):\n \"\"\" initialize the instance \"\"\"\n return\n\n def set(self, time, d, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\"], k=None):\n \"\"\"\n Set all parameters\n time: datetime of beam\n d: data dict for other parameters\n s_param: other scalar params\n v_params: other list params\n \"\"\"\n for p in s_params:\n if p in d.keys():\n if p == \"scan\" and d[p] != 0: setattr(self, p, 1)\n else: setattr(self, p, d[p]) if k is None else setattr(self, p, d[p][k])\n else: setattr(self, p, None)\n for p in v_params:\n if p in d.keys(): setattr(self, p, d[p])\n else: setattr(self, p, [])\n self.time = time\n return\n \n def set_nc(self, time, d, i, s_params, v_params):\n \"\"\"\n Set all parameters\n time: datetime of beam\n d: data dict for other parameters\n s_param: other scalar params\n v_params: other list params\n \"\"\"\n for p in s_params:\n if p in d.keys(): setattr(self, p, d[p][i])\n else: setattr(self, p, None)\n for p in v_params:\n if p in d.keys(): \n setattr(self, p, np.array(d[p])[i,:])\n if \"slist\" not in v_params and p==\"v\": setattr(self, \"slist\", np.argwhere(~np.isnan(getattr(self, \"v\"))))\n setattr(self, p, getattr(self, p)[~np.isnan(getattr(self, p))])\n else: setattr(self, p, [])\n self.time = time\n return\n \n def copy(self, bm):\n \"\"\" Copy all parameters \"\"\"\n for p in bm.__dict__.keys(): setattr(self, p, getattr(bm, p))\n return\n\n def gs_estimation(self):\n \"\"\"\n Estimate GS flag using different criterion\n Cases -\n 0. Sundeen et al. |v| + w/3 < 30 m/s\n 1. Blanchard et al. |v| + 0.4w < 60 m/s\n 2. Blanchard et al. [2009] |v| - 0.139w + 0.00113w^2 < 33.1 m/s\n \"\"\"\n self.gsflg = {}\n if len(self.v) > 0 and len(self.w_l) > 0: self.gsflg[0] = ((np.abs(self.v) + self.w_l/3.) < 30.).astype(int)\n if len(self.v) > 0 and len(self.w_l) > 0: self.gsflg[1] = ((np.abs(self.v) + self.w_l*0.4) < 60.).astype(int)\n if len(self.v) > 0 and len(self.w_l) > 0: self.gsflg[2] = ((np.abs(self.v) - 0.139*self.w_l + 0.00113*self.w_l**2) < 33.1).astype(int)\n # Modified defination by S. Chakraborty: {W-[50-(0.7*(V+5)**2)]} < 0\n self.gsflg[3] = ((np.array(self.w_l)-(50-(0.7*(np.array(self.v)+5)**2))<0)).astype(int)\n return\n \nclass Scan(object):\n \"\"\"Class to hold one scan (multiple beams)\"\"\"\n\n def __init__(self, stime=None, etime=None, s_mode=\"normal\"):\n \"\"\"\n initialize the parameters which will be stored\n stime: start time of scan\n etime: end time of scan\n s_mode: scan type\n \"\"\"\n self.stime = stime\n self.etime = etime\n self.s_mode = s_mode\n self.beams = []\n return\n\n def update_time(self):\n \"\"\"\n Update stime and etime of the scan.\n up: Update average parameters if True\n \"\"\"\n self.stime = min([b.time for b in self.beams])\n self.etime = max([b.time for b in self.beams])\n self._populate_avg_params()\n return\n\n def _populate_avg_params(self):\n \"\"\"\n Polulate average parameetrs\n \"\"\"\n f, nsky = [], []\n for b in self.beams:\n f.append(getattr(b, \"tfreq\"))\n nsky.append(getattr(b, \"noise.sky\"))\n self.f, self.nsky = np.mean(f), np.mean(nsky)\n return\n \nclass FetchData(object):\n \"\"\"Class to fetch data from fitacf files for one radar for atleast a day\"\"\"\n\n def __init__(self, rad, date_range, ftype=\"fitacf\", files=None, verbose=True):\n \"\"\"\n initialize the vars\n rad = radar code\n date_range = [ start_date, end_date ]\n files = List of files to load the data from\n e.x : rad = \"sas\"\n date_range = [\n datetime.datetime(2017,3,17),\n datetime.datetime(2017,3,18),\n ]\n \"\"\"\n self.rad = rad\n self.date_range = date_range\n self.files = files\n self.verbose = verbose\n self.regex = \"/sd-data/{year}/{ftype}/{rad}/{date}.*{ftype}*.bz2\"\n self.ftype = ftype\n if (rad is not None) and (date_range is not None) and (len(date_range) == 2):\n self._create_files()\n return\n \n def _create_files(self):\n \"\"\"\n Create file names from date and radar code\n \"\"\"\n if self.files is None: self.files = []\n reg_ex = self.regex\n days = (self.date_range[1] - self.date_range[0]).days + 2\n for d in range(-1,days):\n e = self.date_range[0] + dt.timedelta(days=d)\n fnames = glob.glob(reg_ex.format(year=e.year, rad=self.rad, ftype=self.ftype, date=e.strftime(\"%Y%m%d\")))\n fnames.sort()\n for fname in fnames:\n tm = fname.split(\".\")[1]\n sc = fname.split(\".\")[2]\n d0 = dt.datetime.strptime(fname.split(\".\")[0].split(\"/\")[-1] + tm + sc, \"%Y%m%d%H%M%S\")\n d1 = d0 + dt.timedelta(hours=2)\n if (self.date_range[0] <= d0) and (d0 <= self.date_range[1]): self.files.append(fname)\n elif (d0 <= self.date_range[0] <=d1): self.files.append(fname)\n self.files = list(set(self.files))\n self.files.sort()\n return\n \n def _parse_data(self, data, s_params, v_params, by, scan_prop):\n \"\"\"\n Parse data by data type\n data: list of data dict\n params: parameter list to fetch\n by: sort data by beam or scan\n scan_prop: provide scan properties if by='scan'\n {\"s_mode\": type of scan, \"s_time\": duration in min}\n \"\"\"\n _b, _s = [], []\n if self.verbose: logger.info(\"Started converting to beam data %02d.\"%len(data))\n for d in data:\n time = dt.datetime(d[\"time.yr\"], d[\"time.mo\"], d[\"time.dy\"], d[\"time.hr\"], d[\"time.mt\"], d[\"time.sc\"], d[\"time.us\"])\n if time >= self.date_range[0] and time <= self.date_range[1]:\n bm = Beam()\n bm.set(time, d, s_params, v_params)\n _b.append(bm)\n if self.verbose: logger.info(\"Converted to beam data.\")\n if by == \"scan\":\n if self.verbose: logger.info(\"Started converting to scan data.\")\n scan, sc = 0, Scan(None, None, scan_prop[\"s_mode\"])\n sc.beams.append(_b[0])\n for _ix, d in enumerate(_b[1:]):\n if d.scan == 1 and d.time != _b[_ix].time:\n sc.update_time()\n _s.append(sc)\n sc = Scan(None, None, scan_prop[\"s_mode\"])\n sc.beams.append(d)\n else: sc.beams.append(d)\n _s.append(sc)\n if self.verbose: logger.info(\"Converted to scan data.\")\n return _b, _s\n \n def convert_to_pandas(self, beams, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \n \"time\", \"rsep\", \"frang\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"]):\n \"\"\"\n Convert the beam data into dataframe\n \"\"\"\n _o = dict(zip(s_params+v_params, ([] for _ in s_params+v_params)))\n for b in beams:\n l = len(getattr(b, \"slist\"))\n for p in v_params:\n _o[p].extend(getattr(b, p))\n for p in s_params:\n _o[p].extend([getattr(b, p)]*l)\n L = len(_o[\"slist\"])\n for p in s_params+v_params:\n if len(_o[p]) < L:\n l = len(_o[p])\n _o[p].extend([np.nan]*(L-l))\n return pd.DataFrame.from_records(_o)\n \n def scans_to_pandas(self, scans, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \"time\", \"channel\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"], start_scnum=0):\n \"\"\"\n Convert the scan data into dataframe\n \"\"\"\n new_cols = [\"scnum\",\"sbnum\"]\n _o = dict(zip(s_params+v_params+new_cols, ([] for _ in s_params+v_params+new_cols)))\n for idn, s in enumerate(scans):\n for idh, b in enumerate(s.beams):\n l = len(getattr(b, \"slist\"))\n for p in v_params:\n _o[p].extend(getattr(b, p))\n for p in s_params:\n _o[p].extend([getattr(b, p)]*l)\n _o[\"scnum\"].extend([idn + start_scnum]*l)\n _o[\"sbnum\"].extend([idh]*l)\n L = len(_o[\"slist\"])\n for p in s_params+v_params+new_cols:\n if len(_o[p]) < L:\n l = len(_o[p])\n _o[p].extend([np.nan]*(L-l))\n return pd.DataFrame.from_records(_o)\n \n def pandas_to_beams(self, df, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \"time\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"]):\n \"\"\"\n Convert the dataframe to beam\n \"\"\"\n beams = []\n for bm in np.unique(df.bmnum):\n o = df[df.bmnum==bm]\n d = o.to_dict(orient=\"list\")\n for p in s_params:\n d[p] = d[p][0]\n b = Beam()\n b.set(o.time.tolist()[0], d, s_params, v_params)\n beams.append(b)\n return beams\n \n def pandas_to_scans(self, df, smode, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \"time\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"]):\n \"\"\"\n Convert the dataframe to scans\n \"\"\"\n bmax = 0\n scans = []\n for sn in np.unique(df.scnum):\n o = df[df.scnum==sn]\n beams = []\n for bn in np.unique(o.sbnum):\n ox = o[o.sbnum==bn]\n b = self.pandas_to_beams(ox, s_params, v_params)\n beams.extend(b)\n bmax = len(beams) if bmax < len(beams) else bmax\n sc = Scan(None, None, smode)\n sc.beams.extend(beams)\n sc.update_time()\n scans.append(sc)\n mscans = []\n if len(scans[0].beams) + len(scans[1].beams) == len(scans[2].beams):\n sc = Scan(None, None, scans[0].s_mode)\n sc.beams.extend(scans[0].beams)\n sc.beams.extend(scans[1].beams)\n sc.update_time()\n mscans.append(sc)\n for i in range(2,len(scans)):\n mscans.append(scans[i])\n scans = copy.copy(mscans) if len(mscans) > 0 else scans\n return scans, bmax\n \n def fetch_data(self, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \"intt.sc\", \"intt.us\",\\\n \"mppul\", \"nrang\", \"rsep\", \"cp\", \"frang\", \"smsep\", \"lagfr\", \"channel\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"],\n by=\"beam\", scan_prop={\"s_time\": 1, \"s_mode\": \"normal\"}):\n \"\"\"\n Fetch data from file list and return the dataset\n params: parameter list to fetch\n by: sort data by beam or scan\n scan_prop: provide scan properties if by='scan'\n {\"s_mode\": type of scan, \"s_time\": duration in min}\n \"\"\"\n data = []\n for f in self.files:\n with bz2.open(f) as fp:\n fs = fp.read()\n if self.verbose: logger.info(f\"Read file - {f}\")\n reader = pydarn.SDarnRead(fs, True)\n records = reader.read_fitacf()\n data += records\n if by is not None: data = self._parse_data(data, s_params, v_params, by, scan_prop)\n return data\n \nif __name__ == \"__main__\":\n fdata = FetchData( \"sas\", [dt.datetime(2015,3,17,3),\n dt.datetime(2015,3,17,3,20)] )\n fdata.fetch_data()\n fdata.fetch_data(by=\"scan\", scan_prop={\"s_time\": 2, \"s_mode\": \"themis\"})"
] |
[
[
"numpy.abs",
"numpy.unique",
"numpy.mean",
"pandas.DataFrame.from_records",
"numpy.array"
]
] |
vishwasourab/quadcopter_project
|
[
"67b65e41a151fc2c24dd3905f33b73209157b52e"
] |
[
"task.py"
] |
[
"import numpy as np\nfrom physics_sim import PhysicsSim\n\nclass Task():\n \"\"\"Task (environment) that defines the goal and provides feedback to the agent.\"\"\"\n def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pos=None):\n \"\"\"Initialize a Task object.\n Params\n ======\n init_pose: initial position of the quadcopter in (x,y,z) dimensions and the Euler angles\n init_velocities: initial velocity of the quadcopter in (x,y,z) dimensions\n init_angle_velocities: initial radians/second for each of the three Euler angles\n runtime: time limit for each episode\n target_pos: target/goal (x,y,z) position for the agent\n \"\"\"\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n\n self.state_size = self.action_repeat * 6\n self.action_low = 0\n self.action_high = 900\n self.action_size = 4\n\n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.]) \n\n def get_reward(self):\n \"\"\"Uses current pose of sim to return reward.\"\"\"\n reward = 1.-.17*(abs(self.sim.pose[:3] - self.target_pos)).sum()\n if reward>1:\n reward = 1\n if reward<-1:\n reward = -1\n return reward\n\n def step(self, rotor_speeds):\n \"\"\"Uses action to obtain next state, reward, done.\"\"\"\n reward = 0\n pose_all = []\n for _ in range(self.action_repeat):\n done = self.sim.next_timestep(rotor_speeds) # update the sim pose and velocities\n reward += self.get_reward() \n pose_all.append(self.sim.pose)\n next_state = np.concatenate(pose_all)\n return next_state, reward, done\n\n def reset(self):\n \"\"\"Reset the sim to start a new episode.\"\"\"\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state"
] |
[
[
"numpy.concatenate",
"numpy.array"
]
] |
csquigley/dcgan
|
[
"985280b5c6875062f728afe37cfb76d93a58dfb1"
] |
[
"dcgan.py"
] |
[
"from __future__ import print_function\n#%matplotlib inline\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom IPython.display import HTML\n\n# Set random seed for reproducibility\n\nmanualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\n\n# Root directory for dataset\ndataroot = \"data/celeba\"\n\n# Number of workers for dataloader\nworkers = 2\n\n# Batch size during training\nbatch_size = 128\n\n# Spatial size of training images. All images will be resized to this\n# size using a transformer.\nimage_size = 128\n\n# Number of channels in the training images. For color images this is 3\nnc = 3\n\n# Size of z latent vector (i.e. size of generator input)\nnz = 100\n\n# Size of feature maps in generator\nngf = 64\n\n# Size of feature maps in discriminator\nndf = 64\n\n# Number of training epochs\nnum_epochs = 5\n\n# Learning rate for optimizers\nlr = 0.0002\n\n# Beta1 hyperparam for Adam optimizers\nbeta1 = 0.5\n\n# Number of GPUs available. Use 0 for CPU mode.\nngpu = 2\n\n# We can use an image folder dataset the way we have it setup.\n# Create the dataset\ndataset = dset.ImageFolder(root=dataroot,\n transform=transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\n# Create the dataloader\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=True, num_workers=workers)\n\n# Decide which device we want to run on\ndevice = torch.device(\"cuda:0\" if (torch.cuda.is_available() and ngpu > 0) else \"cpu\")\n\n# Plot some training images\nreal_batch = next(iter(dataloader))\nplt.figure(figsize=(8,8))\nplt.axis(\"off\")\nplt.title(\"Training Images\")\nplt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=2, normalize=True).cpu(),(1,2,0)))\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n# Generator Code\n\nclass Generator(nn.Module):\n def __init__(self, ngpu):\n super(Generator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d( ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n return self.main(input)\n# Create the generator\nnetG = Generator(ngpu).to(device)\n\n# Handle multi-gpu if desired\nif (device.type == 'cuda') and (ngpu > 1):\n netG = nn.DataParallel(netG, list(range(ngpu)))\n\n# Apply the weights_init function to randomly initialize all weights\n# to mean=0, stdev=0.02.\nnetG.apply(weights_init)\n\n# Print the model\nprint(netG)\nclass Discriminator(nn.Module):\n def __init__(self, ngpu):\n super(Discriminator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n return self.main(input)\n\n# Create the Discriminator\nnetD = Discriminator(ngpu).to(device)\n\n# Handle multi-gpu if desired\nif (device.type == 'cuda') and (ngpu > 1):\n netD = nn.DataParallel(netD, list(range(ngpu)))\n\n# Apply the weights_init function to randomly initialize all weights\n# to mean=0, stdev=0.2.\nnetD.apply(weights_init)\n\n# Print the model\nprint(netD)\n# Initialize BCELoss function\ncriterion = nn.BCELoss()\n\n# Create batch of latent vectors that we will use to visualize\n# the progression of the generator\nfixed_noise = torch.randn(64, nz, 1, 1, device=device)\n\n# Establish convention for real and fake labels during training\nreal_label = 1.\nfake_label = 0.\n\n# Setup Adam optimizers for both G and D\noptimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))\noptimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))\n\n# Training Loop\n\n# Lists to keep track of progress\nimg_list = []\nG_losses = []\nD_losses = []\niters = 0\n\nprint(\"Starting Training Loop...\")\n# For each epoch\nfor epoch in range(num_epochs):\n # For each batch in the dataloader\n for i, data in enumerate(dataloader, 0):\n\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n ## Train with all-real batch\n netD.zero_grad()\n # Format batch\n real_cpu = data[0].to(device)\n b_size = real_cpu.size(0)\n label = torch.full((b_size,), real_label, dtype=torch.float, device=device)\n # Forward pass real batch through D\n output = netD(real_cpu).view(-1)\n # Calculate loss on all-real batch\n errD_real = criterion(output, label)\n # Calculate gradients for D in backward pass\n errD_real.backward()\n D_x = output.mean().item()\n\n ## Train with all-fake batch\n # Generate batch of latent vectors\n noise = torch.randn(b_size, nz, 1, 1, device=device)\n # Generate fake image batch with G\n fake = netG(noise)\n label.fill_(fake_label)\n # Classify all fake batch with D\n output = netD(fake.detach()).view(-1)\n # Calculate D's loss on the all-fake batch\n errD_fake = criterion(output, label)\n # Calculate the gradients for this batch, accumulated (summed) with previous gradients\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n # Compute error of D as sum over the fake and the real batches\n errD = errD_real + errD_fake\n # Update D\n optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n label.fill_(real_label) # fake labels are real for generator cost\n # Since we just updated D, perform another forward pass of all-fake batch through D\n output = netD(fake).view(-1)\n # Calculate G's loss based on this output\n errG = criterion(output, label)\n # Calculate gradients for G\n errG.backward()\n D_G_z2 = output.mean().item()\n # Update G\n optimizerG.step()\n\n # Output training stats\n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f'\n % (epoch, num_epochs, i, len(dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n\n # Save Losses for plotting later\n G_losses.append(errG.item())\n D_losses.append(errD.item())\n\n # Check how the generator is doing by saving G's output on fixed_noise\n if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):\n with torch.no_grad():\n fake = netG(fixed_noise).detach().cpu()\n img_list.append(vutils.make_grid(fake, padding=2, normalize=True))\n\n iters += 1\n"
] |
[
[
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.randn",
"torch.nn.Sigmoid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"torch.full",
"torch.nn.init.constant_",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.nn.BCELoss",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.manual_seed",
"torch.nn.Tanh",
"torch.nn.ReLU"
]
] |
Saran-nns/cunumeric
|
[
"3472109aa3fd6a9d42409586efd39dcb5924e0b5"
] |
[
"tests/test_tools/generators.py"
] |
[
"# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom itertools import permutations, product\n\nimport numpy as np\n\nfrom legate.core import LEGATE_MAX_DIM\n\n\ndef scalar_gen(lib, val):\n \"\"\"\n Generates different kinds of scalar-like arrays that contain the given\n value.\n \"\"\"\n # pure scalar values\n yield lib.array(val)\n # ()-shape arrays\n yield lib.full((), val)\n for ndim in range(1, LEGATE_MAX_DIM + 1):\n # singleton arrays\n yield lib.full(ndim * (1,), val)\n # singleton slices of larger arrays\n yield lib.full(ndim * (5,), val)[ndim * (slice(1, 2),)]\n\n\ndef mk_0to1_array(lib, shape):\n \"\"\"\n Constructs an array of the required shape, containing (in C order)\n sequential real values uniformly spaced in the range (0,1].\n \"\"\"\n size = np.prod(shape)\n if size == 1:\n # Avoid zeros, since those are more likely to cause arithmetic issues\n # or produce degenerate outputs.\n return lib.full(shape, 0.5)\n return mk_seq_array(lib, shape) / size\n\n\ndef mk_seq_array(lib, shape):\n \"\"\"\n Constructs an array of the required shape, containing (in C order)\n sequential integer values starting from 1.\n \"\"\"\n arr = lib.zeros(shape, dtype=int)\n size = np.prod(shape)\n # Don't return the reshaped array directly, instead use it to update\n # the contents of an existing array of the same shape, thus producing a\n # Store without transformations, that has been tiled in the natural way\n arr[:] = lib.arange(1, size + 1).reshape(shape)\n return arr\n\n\ndef broadcasts_to(lib, tgt_shape, mk_array=mk_0to1_array):\n \"\"\"\n Generates a collection of arrays that will broadcast to the given shape.\n \"\"\"\n past_first = False\n for mask in product([True, False], repeat=len(tgt_shape)):\n if not past_first:\n past_first = True\n continue\n src_shape = tuple(\n d if keep else 1 for (d, keep) in zip(tgt_shape, mask)\n )\n yield mk_array(lib, src_shape)\n\n\ndef permutes_to(lib, tgt_shape, mk_array=mk_0to1_array):\n \"\"\"\n Generates all the possible ways that an array can be transposed to meet a\n given shape.\n\n All arrays returned will have the same shape, `tgt_shape`, but are the\n result of tranposing a base array of a different shape.\n \"\"\"\n past_first = False\n for axes in permutations(range(len(tgt_shape))):\n if not past_first:\n past_first = True\n continue\n src_shape = [-1] * len(tgt_shape)\n for (i, j) in enumerate(axes):\n src_shape[j] = tgt_shape[i]\n src_shape = tuple(src_shape)\n yield mk_array(lib, src_shape).transpose(axes)\n"
] |
[
[
"numpy.prod"
]
] |
keithnull/EE101
|
[
"298f2c1dc3c1d6437815525632c0f56b13602f3d"
] |
[
"Lab4/Code/DataPreprocess/preprocess.py"
] |
[
"# coding:utf8\n\nfrom load_data import load_data, timer\nfrom feature import FeatureExtracter\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nimport pymysql\nfrom itertools import combinations\n\n\n@timer\ndef train_SVM(X_train, y_train):\n model = SVC(kernel=\"linear\", C=1, gamma=0.125)\n #model = LogisticRegression()\n print(\"Start to train the SVM model.\")\n model.fit(X_train, y_train)\n print(\"Finished.\")\n return model\n\n\ndef score_SVM(model, X_test, y_test):\n return model.score(X_test, y_test)\n\n\ndef connect_to_db(user, password, db, host=\"localhost\", port=3306, charset=\"utf8\"):\n connection = pymysql.connect(host=host, user=user, password=password, db=db, port=port, charset=charset)\n cursor = connection.cursor()\n return connection, cursor\n\n\ndef process_one_paper(paper_id, feature_extracter, db_cursor, model):\n query_for_authors = \"\"\"SELECT authorid FROM paper_author_affiliation WHERE paperid=\"{0}\";\"\"\".format(paper_id)\n try:\n db_cursor.execute(query_for_authors)\n authors = [row[0] for row in db_cursor.fetchall()]\n authors.sort() # notice\n # print(authors)\n # input()\n for pair in combinations(authors, 2):\n author1, author2 = pair\n query_for_existence = \"\"\"SELECT * FROM author_relationship WHERE authorid1=\"{0}\" AND authorid2=\"{1}\";\"\"\".format(author1, author2)\n existence = db_cursor.execute(query_for_existence)\n # print(existence)\n if not existence:\n feature = feature_extracter.extract_feature(author1, author2)\n # print(feature)\n relation = model.predict([feature])[0]\n # print(relation)\n if(relation == 0): # predict whether author2 is the instructor of author1\n feature = feature_extracter.extract_feature(author2, author1)\n relation = -model.predict([feature])[0]\n query_to_insert = \"\"\"INSERT INTO author_relationship VALUES(\"{0}\",\"{1}\",{2},{3})\"\"\".format(author1, author2, 1, relation)\n db_cursor.execute(query_to_insert)\n else:\n # print(\"HERE!!!!!!\")\n times = db_cursor.fetchone()[2]+1\n # print(times)\n query_to_update = \"\"\"\n \t\t\tUPDATE author_relationship SET cooperationtimes={0} WHERE authorid1=\"{1}\" AND authorid2=\"{2}\";\"\"\".format(times, author1, author2)\n db_cursor.execute(query_to_update)\n\n except Exception as e:\n print(e)\n\n\ndef get_all_papers(db_cursor):\n query = \"\"\"SELECT paperid FROM papers;\"\"\"\n try:\n result_num = db_cursor.execute(query)\n print(\"Result num:{0}\".format(result_num))\n for i in range(result_num):\n yield db_cursor.fetchone()[0]\n except:\n print(\"ERROR when trying to get all papers.\")\n\n\n@timer\ndef main():\n X_train, y_train, X_test, y_test = load_data(\"\")\n model = train_SVM(X_train, y_train)\n feature_extracter = FeatureExtracter()\n feature_extracter.connect(\"root\", \"\", \"academicdb\")\n db_connection1, db_cursor1 = connect_to_db(\"root\", \"\", \"academicdb\")\n db_connection2, db_cursor2 = connect_to_db(\"root\", \"\", \"academicdb\")\n cnt = 0\n for paper in get_all_papers(db_cursor1):\n try:\n # print(paper)\n cnt += 1\n print(\"\\r{0}\".format(cnt), end=\"\")\n process_one_paper(paper, feature_extracter, db_cursor2, model)\n if(cnt % 100 == 0):\n db_connection2.commit()\n\n except:\n print(\"ERROR\")\n\n\nif __name__ == '__main__':\n main()\n\n\n'''\nStart to load training data from feature file.\nRuntime:0.239s\nStart to load testinging data from feature file.\nRuntime:0.147s\nStart to train the SVM model.\nFinished.\nRuntime:4.348s\nResult num:98215\n98215Runtime:2473.511s\n'''\n"
] |
[
[
"sklearn.svm.SVC"
]
] |
k-ivey/TextAttack
|
[
"47d15acea90bf92e6a7f19200a59da29e74731e6"
] |
[
"textattack/constraints/grammaticality/language_models/learning_to_write/rnn_model.py"
] |
[
"from torch import nn as nn\nfrom torch.autograd import Variable\n\nfrom .adaptive_softmax import AdaptiveSoftmax\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\n\n Based on official pytorch examples\n \"\"\"\n\n def __init__(\n self,\n rnn_type,\n ntoken,\n ninp,\n nhid,\n nlayers,\n cutoffs,\n proj=False,\n dropout=0.5,\n tie_weights=False,\n lm1b=False,\n ):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n\n self.lm1b = lm1b\n\n if rnn_type == \"GRU\":\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)\n else:\n try:\n nonlinearity = {\"RNN_TANH\": \"tanh\", \"RNN_RELU\": \"relu\"}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(\n ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout\n )\n\n self.proj = proj\n\n if ninp != nhid and proj:\n self.proj_layer = nn.Linear(nhid, ninp)\n\n # if tie_weights:\n # if nhid != ninp and not proj:\n # raise ValueError('When using the tied flag, nhid must be equal to emsize')\n # self.decoder = nn.Linear(ninp, ntoken)\n # self.decoder.weight = self.encoder.weight\n # else:\n # if nhid != ninp and not proj:\n # if not lm1b:\n # self.decoder = nn.Linear(nhid, ntoken)\n # else:\n # self.decoder = adapt_loss\n # else:\n # self.decoder = nn.Linear(ninp, ntoken)\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n if proj:\n self.softmax = AdaptiveSoftmax(ninp, cutoffs)\n else:\n self.softmax = AdaptiveSoftmax(nhid, cutoffs)\n\n self.full = False\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n # self.decoder.bias.data.fill_(0)\n # self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n\n if \"proj\" in vars(self):\n if self.proj:\n output = self.proj_layer(output)\n\n output = output.view(output.size(0) * output.size(1), output.size(2))\n\n if self.full:\n decode = self.softmax.log_prob(output)\n else:\n decode = self.softmax(output)\n\n return decode, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.RNN",
"torch.nn.Embedding"
]
] |
sinotec2/Focus-on-Air-Quality
|
[
"eac84651eaf6300a16f25a4d76b97a7f53454035"
] |
[
"utilities/CGI-pythons/terrainXYINC.py"
] |
[
"\nimport twd97\nLatitude_Pole, Longitude_Pole = 23.61000, 120.990\nXcent, Ycent = twd97.fromwgs84(Latitude_Pole, Longitude_Pole)\n\ndef twdIJ1(xv,yv):\n return (int((xv-Xcent)/1000)+int(83*3/2))*1000+int((yv-Ycent)/1000)+int(137*3/2)\n\ndef terrainXYINC(pth,STR):\n from pandas import read_csv, DataFrame\n import os\n\n WEB='/Library/WebServer/Documents/'\n CGI='/Library/WebServer/CGI-Executables/isc/'\n OUT='>> '+pth+'isc.out'\n geninp='/opt/local/bin/gen_inp.py'\n WAITM='/opt/local/bin/wait_map.cs'\n CSV=WEB+'terr_results/TWN_1X1REC.csv'\n reg='GRIDCART'\n \n\n inam=STR.index(reg)\n inp=STR[(inam+len(reg)):].split()\n snamo=inp[0]\n fname=pth+snamo+'.zip'\n\n #read the others\n inc='XYINC'\n iinc=STR.index(inc)\n inp=STR[(iinc+len(inc)):].lstrip()\n x0,nx,dx,y0,ny,dy=(int(float(inp.split()[i])) for i in range(6))\n inp0='%s %s %s %s %s %s' %(x0,nx,dx,y0,ny,dy)\n inp=inp0.replace(' ','_')\n df=read_csv(CSV)\n #df['inp']=['%s %s %s %s %s %s' %(i,j,k,l,m,n) for i,j,k,l,m,n in zip(df.x0,df.nx,df.dx,df.y0,df.ny,df.dy)]\n if inp not in list(df.inp) or not os.path.isfile(WEB+'terr_results/'+inp+'/'+snamo+'.REC'):\n x0,nx,dx,y0,ny,dy=(int(float(inp.split('_')[i])) for i in range(6))\n centIJ=str(twdIJ1(x0+nx*dx/2,y0+ny*dy/2))\n pathIJ=centIJ\n path=snamo\n DD={}\n for s in 'pathIJ,centIJ,path,x0,y0,nx,ny,dx,dy,inp'.split(','):\n eval('DD.update({\"'+s+'\":['+s+']})',locals())\n df=df.append(DataFrame(DD),ignore_index=True,sort=True)\n df.drop_duplicates(inplace=True) \n\n cmd ='cd '+pth+';'\n cmd+='sed s/test/'+snamo+'/g '+WEB+'trj_results/aermap.inp_template>aermap.inp;'\n os.system(cmd)\n cmd ='cd '+pth+';'\n cmd+= geninp+' '+pth+snamo+' '+inp0+' >>geninp.out & disown'\n rst=os.system(cmd)\n\n n=90\n while rst==0 and n<100:\n cmd='sleep 5s'\n os.system(cmd)\n if os.path.isfile(fname):break\n n+=1\n\n cmd ='cd '+pth+';'\n cmd+= WAITM+' '+pth+' '+inp+' & disown'\n rst=os.system(cmd)\n df.set_index('pathIJ').to_csv(CSV)\n else:\n terr_path=list(df.loc[df.inp==inp,'path'])[0]\n path=WEB+'terr_results/'+inp+'/'+terr_path\n cmd ='cd '+pth+';'\n cmd+='for i in $(ls '+path+'*);do j=$(echo $i|cut -d\"/\" -f7);ln -f ../../terr_results/'+inp+'/$j .;done'+OUT\n os.system(cmd)\n snamo=terr_path.split('/')[-1].replace('.REC','')\n return snamo\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
mnishida/pymwm
|
[
"820d0a9056982fd37972b0e10f5dad9d1697ed2f"
] |
[
"src/pymwm/cylinder/__init__.py"
] |
[
"from __future__ import annotations\n\nimport cmath\n\nimport numpy as np\nimport psutil\nimport ray\nimport scipy.special as ssp\n\nfrom pymwm.utils import cylinder_utils\nfrom pymwm.waveguide import Database, Sampling, Waveguide\n\nfrom .samples import Samples, SamplesForRay, SamplesLowLoss, SamplesLowLossForRay\n\n\nclass Cylinder(Waveguide):\n \"\"\"A class defining a cylindrical waveguide.\"\"\"\n\n def __init__(self, params):\n \"\"\"Init Cylinder class.\n\n Args:\n params: A dict whose keys and values are as follows:\n 'core': A dict of the setting parameters of the core:\n 'shape': A string indicating the shape of the core.\n 'size': A float indicating the radius of the circular cross\n section [um].\n 'fill': A dict of the parameters of the core Material.\n 'clad': A dict of the parameters of the clad Material.\n 'bounds': A dict indicating the bounds of database.interpolation\n and its keys and values are as follows:\n 'wl_max': A float indicating the maximum wavelength [um]\n 'wl_min': A float indicating the minimum wavelength [um]\n 'wl_imag': A float indicating the maximum value of\n abs(c / f_imag) [um] where f_imag is the imaginary part\n of the frequency.\n 'modes': A dict of the settings for calculating modes:\n 'wl_max': A float indicating the maximum wavelength [um]\n (default: 5.0)\n 'wl_min': A float indicating the minimum wavelength [um]\n (default: 0.4)\n 'wl_imag': A float indicating the maximum value of\n abs(c / f_imag) [um] where f_imag is the imaginary part\n of the frequency. (default: 5.0)\n 'dw': A float indicating frequency interval\n [rad c / 1um]=[2.99792458e14 rad / s]\n (default: 1 / 64).\n 'num_n': An integer indicating the number of orders of\n modes.\n 'num_m': An integer indicating the number of modes in each\n order and polarization.\n 'ls': A list of characters chosen from \"h\" (horizontal\n polarization) and \"v\" (vertical polarization).\n \"\"\"\n super().__init__(params)\n self.u_pec, self.jnu_pec, self.jnpu_pec = self.u_jnu_jnpu_pec(\n self.num_n, self.num_m\n )\n\n def get_alphas(self, alpha_list: list[tuple[str, int, int]]) -> dict:\n alphas: dict = {\"h\": [], \"v\": []}\n for alpha in [(\"E\", 0, m) for m in range(1, self.num_m + 1)]:\n if alpha in alpha_list:\n alphas[\"v\"].append(alpha)\n for alpha in [\n (\"E\", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1)\n ]:\n if alpha in alpha_list:\n alphas[\"h\"].append(alpha)\n alphas[\"v\"].append(alpha)\n for alpha in [(\"M\", 0, m) for m in range(1, self.num_m + 1)]:\n if alpha in alpha_list:\n alphas[\"h\"].append(alpha)\n for alpha in [\n (\"M\", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1)\n ]:\n if alpha in alpha_list:\n alphas[\"h\"].append(alpha)\n alphas[\"v\"].append(alpha)\n return alphas\n\n def betas_convs_samples(self, params: dict) -> tuple[dict, dict, Samples]:\n im_factor = self.clad.im_factor\n self.clad.im_factor = 1.0\n self.clad_params[\"im_factor\"] = 1.0\n p_modes = params[\"modes\"].copy()\n num_n_0 = p_modes[\"num_n\"]\n num_m_0 = p_modes[\"num_m\"]\n betas: dict = {}\n convs: dict = {}\n success = False\n catalog = Database().load_catalog()\n num_n_max = catalog[\"num_n\"].max()\n num_m_max = catalog[\"num_m\"].max()\n if not np.isnan(num_n_max):\n for num_n, num_m in [\n (n, m)\n for n in range(num_n_0, num_n_max + 1)\n for m in range(num_m_0, num_m_max + 1)\n ]:\n p_modes[\"num_n\"] = num_n\n p_modes[\"num_m\"] = num_m\n smp = Samples(self.r, self.fill_params, self.clad_params, p_modes)\n try:\n betas, convs = smp.database.load()\n success = True\n break\n except IndexError:\n continue\n if not success:\n p_modes[\"num_n\"] = num_n_0\n p_modes[\"num_m\"] = num_m_0\n betas, convs, smp = self.do_sampling(p_modes)\n if im_factor != 1.0:\n self.clad.im_factor = im_factor\n self.clad_params[\"im_factor\"] = im_factor\n betas, convs, smp = self.do_sampling_for_im_factor(betas, convs, p_modes)\n return betas, convs, smp\n\n def do_sampling(self, p_modes: dict) -> tuple[dict, dict, Samples]:\n num_n_0 = p_modes[\"num_n\"]\n num_m_0 = p_modes[\"num_m\"]\n smp = Samples(self.r, self.fill_params, self.clad_params, p_modes)\n ray.shutdown()\n try:\n ray.init()\n p_modes_id = ray.put(p_modes)\n pool = ray.util.ActorPool(\n SamplesForRay.remote(\n self.r, self.fill_params, self.clad_params, p_modes_id\n )\n for _ in range(psutil.cpu_count())\n )\n xs_success_wr_list: list[tuple[np.ndarray, np.ndarray]] = list(\n pool.map(lambda a, arg: a.wr_sampling.remote(arg), range(num_n_0))\n )\n num_wr = xs_success_wr_list[0][0].shape[0]\n args = []\n for n in range(num_n_0):\n xs_array, _ = xs_success_wr_list[n]\n for iwr in range(num_wr):\n args.append((n, iwr, xs_array[iwr]))\n xs_success_wi_list: list[tuple[np.ndarray, np.ndarray]] = list(\n pool.map(lambda a, arg: a.wi_sampling.remote(arg), args)\n )\n num_wi = xs_success_wi_list[0][0].shape[0]\n xs_success_list: list[tuple[np.ndarray, np.ndarray]] = []\n for n in range(num_n_0):\n xs_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=complex)\n success_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=bool)\n for iwr in range(num_wr):\n i = num_wr * n + iwr\n xs_i, success_i = xs_success_wi_list[i]\n xs_array[iwr] = xs_i\n success_array[iwr] = success_i\n xs_success_list.append((xs_array, success_array))\n finally:\n ray.shutdown()\n betas, convs = smp.betas_convs(xs_success_list)\n smp.database.save(betas, convs)\n return betas, convs, smp\n\n def do_sampling_for_im_factor(\n self, betas: dict, convs: dict, p_modes: dict\n ) -> tuple[dict, dict, SamplesLowLoss]:\n smp = SamplesLowLoss(self.r, self.fill_params, self.clad_params, p_modes)\n try:\n betas, convs = smp.database.load()\n except IndexError:\n num_n = p_modes[\"num_n\"]\n num_m = p_modes[\"num_m\"]\n args = []\n for iwr in range(len(smp.ws)):\n for iwi in range(len(smp.wis)):\n xis_list = []\n for n in range(num_n):\n xis = []\n for i in range(num_m + 1):\n xis.append(betas[(\"M\", n, i + 1)][iwr, iwi] ** 2)\n for i in range(num_m):\n xis.append(betas[(\"E\", n, i + 1)][iwr, iwi] ** 2)\n xis_list.append(xis)\n args.append((iwr, iwi, xis_list))\n try:\n ray.init()\n p_modes_id = ray.put(p_modes)\n pool = ray.util.ActorPool(\n SamplesLowLossForRay.remote(\n self.r, self.fill_params, self.clad_params, p_modes_id\n )\n for _ in range(psutil.cpu_count())\n )\n xs_success_list = list(\n pool.map(lambda a, arg: a.task.remote(arg), args)\n )\n finally:\n ray.shutdown()\n betas, convs = smp.betas_convs(xs_success_list)\n smp.database.save(betas, convs)\n return betas, convs, smp\n\n def beta(self, w: complex, alpha: tuple[str, int, int]) -> complex:\n \"\"\"Return phase constant\n\n Args:\n w: A complex indicating the angular frequency\n alpha: (pol, n, m)\n pol: 'M' (TM-like mode) or 'E' (TE-like mode)\n n: The order of the mode\n m: The sub order of the mode.\n Returns:\n h: The phase constant.\n \"\"\"\n if self.clad.label == \"PEC\":\n return self.beta_pec(w, alpha)\n wr = w.real\n wi = w.imag\n hr: float = self.beta_funcs[(alpha, \"real\")](wr, wi)[0, 0]\n hi: float = self.beta_funcs[(alpha, \"imag\")](wr, wi)[0, 0]\n # if hr < 0:\n # hr = 1e-16\n # if hi < 0:\n # hi = 1e-16\n return hr + 1j * hi\n\n def beta_pec(self, w: complex, alpha: tuple[str, int, int]) -> complex:\n \"\"\"Return phase constant of PEC waveguide\n\n Args:\n w: A complex indicating the angular frequency\n alpha: A tuple (pol, n, m) where pol is 'M' for TM mode or\n 'E' for TE mode, n is the order of the mode, and m is the\n number of modes in the order and the polarization.\n Returns:\n h: A complex indicating the phase constant.\n \"\"\"\n w_comp = w.real + 1j * w.imag\n pol, n, m = alpha\n if pol == \"E\":\n chi = ssp.jnp_zeros(n, m)[-1]\n elif pol == \"M\":\n chi = ssp.jn_zeros(n, m)[-1]\n else:\n raise ValueError(\"pol must be 'E' or 'M\")\n val = cmath.sqrt(self.fill(w_comp) * w_comp ** 2 - chi ** 2 / self.r ** 2)\n if abs(val.real) > abs(val.imag):\n if val.real < 0:\n val *= -1\n else:\n if val.imag < 0:\n val *= -1\n return val\n\n def coef(self, h, w, alpha):\n \"\"\"Return the coefficients of TE- and TM- components which compose\n the hybrid mode.\n\n Args:\n h: A complex indicating the phase constant.\n w: A complex indicating the angular frequency\n alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or\n 'E' for TE-like mode, n is the order of the mode, and m is\n the number of modes in the order and the polarization.\n Returns:\n a: A complex indicating the coefficient of TE-component\n b: A complex indicating the coefficient of TM-component\n \"\"\"\n e1 = self.fill(w)\n e2 = self.clad(w)\n pol, n, m = alpha\n w = w.real + 1j * w.imag\n h = h.real + 1j * h.imag\n if e2.real < -1e6:\n if pol == \"E\":\n norm = self.norm(w, h, alpha, 1.0 + 0.0j, 0.0j)\n ai, bi = 1.0 / norm, 0.0\n else:\n norm = self.norm(w, h, alpha, 0.0j, 1.0 + 0.0j)\n ai, bi = 0.0, 1.0 / norm\n else:\n u = self.samples.u(h ** 2, w, e1)\n v = self.samples.v(h ** 2, w, e2)\n knv = ssp.kv(n, v)\n knpv = ssp.kvp(n, v)\n jnu = ssp.jv(n, u)\n jnpu = ssp.jvp(n, u)\n ci = -n * (u ** 2 + v ** 2) * jnu * knv / (u * v)\n if pol == \"E\":\n ci *= (h / w) ** 2\n ci /= e1 * jnpu * v * knv + e2 * knpv * u * jnu\n norm = self.norm(w, h, alpha, 1.0 + 0.0j, ci)\n ai = 1.0 / norm\n bi = ci / norm\n else:\n ci /= jnpu * v * knv + knpv * u * jnu\n norm = self.norm(w, h, alpha, ci, 1.0 + 0.0j)\n bi = 1.0 / norm\n ai = ci / norm\n return ai, bi\n\n def norm(self, w, h, alpha, a, b):\n pol, n, m = alpha\n en = 1 if n == 0 else 2\n if self.clad(w).real < -1e6:\n radius = self.r\n if pol == \"E\":\n u = ssp.jnp_zeros(n, m)[-1]\n jnu = ssp.jv(n, u)\n jnpu = 0.0\n else:\n u = ssp.jn_zeros(n, m)[-1]\n jnu = 0.0\n jnpu = ssp.jvp(n, u)\n return cmath.sqrt(\n a ** 2 * np.pi * radius ** 2 / en * (1 - n ** 2 / u ** 2) * jnu ** 2\n + b ** 2 * np.pi * radius ** 2 / en * jnpu ** 2\n )\n u = self.samples.u(h ** 2, w, self.fill(w))\n jnu = ssp.jv(n, u)\n jnpu = ssp.jvp(n, u)\n v = self.samples.v(h ** 2, w, self.clad(w))\n knv = ssp.kv(n, v)\n knpv = ssp.kvp(n, v)\n val_u = 2 * np.pi * self.r ** 2 / en\n val_v = val_u * ((u * jnu) / (v * knv)) ** 2\n upart_diag = self.upart_diag(n, u, jnu, jnpu)\n vpart_diag = self.vpart_diag(n, v, knv, knpv)\n upart_off = self.upart_off(n, u, jnu)\n vpart_off = self.vpart_off(n, v, knv)\n return cmath.sqrt(\n val_u\n * (\n a * (a * upart_diag + b * upart_off)\n + b * (b * upart_diag + a * upart_off)\n )\n - val_v\n * (\n a * (a * vpart_diag + b * vpart_off)\n + b * (b * vpart_diag + a * vpart_off)\n )\n )\n\n @staticmethod\n def upart_diag(n, u, jnu, jnpu):\n return jnu * jnpu / u + (jnpu ** 2 + (1 - n ** 2 / u ** 2) * jnu ** 2) / 2\n\n @staticmethod\n def upart_off(n, u, jnu):\n return n * (jnu / u) ** 2\n\n @staticmethod\n def vpart_diag(n, v, knv, knpv):\n return knv * knpv / v + (knpv ** 2 - (1 + n ** 2 / v ** 2) * knv ** 2) / 2\n\n @staticmethod\n def vpart_off(n, v, knv):\n return n * (knv / v) ** 2\n\n def Y(\n self,\n w: complex,\n h: complex,\n alpha: tuple[str, int, int],\n a: complex,\n b: complex,\n ) -> complex:\n \"\"\"Return the effective admittance of the waveguide mode\n\n Args:\n w: A complex indicating the angular frequency\n h: A complex indicating the phase constant.\n alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or\n 'E' for TE-like mode, n is the order of the mode, and m is\n the number of modes in the order and the polarization.\n a: A complex indicating the coefficient of TE-component\n b: A complex indicating the coefficient of TM-component\n Returns:\n y: A complex indicating the effective admittance\n \"\"\"\n pol, n, m = alpha\n e1 = self.fill(w)\n e2 = self.clad(w)\n en = 1 if n == 0 else 2\n if e2.real < -1e6:\n if pol == \"E\":\n val = h / w\n else:\n val = e1 * w / h\n else:\n u = self.samples.u(h ** 2, w, e1)\n jnu = ssp.jv(n, u)\n jnpu = ssp.jvp(n, u)\n v = self.samples.v(h ** 2, w, e2)\n knv = ssp.kv(n, v)\n knpv = ssp.kvp(n, v)\n val_u = 2 * np.pi * self.r ** 2 / en\n val_v = val_u * ((u * jnu) / (v * knv)) ** 2\n upart_diag = self.upart_diag(n, u, jnu, jnpu)\n vpart_diag = self.vpart_diag(n, v, knv, knpv)\n upart_off = self.upart_off(n, u, jnu)\n vpart_off = self.vpart_off(n, v, knv)\n val = val_u * (\n h / w * a * (a * upart_diag + b * upart_off)\n + e1 * w / h * b * (b * upart_diag + a * upart_off)\n ) - val_v * (\n h / w * a * (a * vpart_diag + b * vpart_off)\n + e2 * w / h * b * (b * vpart_diag + a * vpart_off)\n )\n return val\n\n @staticmethod\n def y_te(w, h):\n return h / w\n\n def y_tm_inner(self, w, h):\n e = self.fill(w)\n return e * w / h\n\n def y_tm_outer(self, w, h):\n e = self.clad(w)\n return e * w / h\n\n def fields(self, x, y, w, dir, alpha, h, coef):\n \"\"\"Return the electromagnetic field vectors for the specified mode and\n point\n\n Args:\n x: A float indicating the x coordinate [um]\n y: A float indicating the y coordinate [um]\n w: A complex indicating the angular frequency\n dir: \"h\" (horizontal polarization) or \"v\" (vertical polarization)\n alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or\n 'E' for TE-like mode, n is the order of the mode, and m is\n the number of modes in the order and the polarization.\n h: A complex indicating the phase constant.\n coef: The coefficients of TE- and TM- components\n Returns:\n f_vec: An array of complexes [ex, ey, ez, hx, hy, hz].\n \"\"\"\n pol, n, m = alpha\n a, b = coef\n r = np.hypot(x, y)\n p = np.arctan2(y, x)\n u = self.samples.u(h ** 2, w, self.fill(w))\n v = self.samples.v(h ** 2, w, self.clad(w))\n ur = u * r / self.r\n vr = v * r / self.r\n if dir == \"h\":\n fr = np.cos(n * p)\n fp = -np.sin(n * p)\n else:\n fr = np.sin(n * p)\n fp = np.cos(n * p)\n y_te = Cylinder.y_te(w, h)\n if r <= self.r:\n y_tm = self.y_tm_inner(w, h)\n er_te = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fr\n er_tm = ssp.jvp(n, ur) * fr\n er = a * er_te + b * er_tm\n ep_te = ssp.jvp(n, ur) * fp\n ep_tm = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fp\n ep = a * ep_te + b * ep_tm\n ez = u / (1j * h * self.r) * b * ssp.jv(n, ur) * fr\n hr = -y_te * a * ep_te - y_tm * b * ep_tm\n hp = y_te * a * er_te + y_tm * b * er_tm\n hz = -u / (1j * h * self.r) * y_te * a * ssp.jv(n, ur) * fp\n else:\n y_tm = self.y_tm_outer(w, h)\n val = -u * ssp.jv(n, u) / (v * ssp.kv(n, v))\n er_te = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fr * val\n er_tm = ssp.kvp(n, vr) * fr * val\n er = a * er_te + b * er_tm\n ep_te = ssp.kvp(n, vr) * fp * val\n ep_tm = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fp * val\n ep = a * ep_te + b * ep_tm\n ez = -v / (1j * h * self.r) * b * ssp.kv(n, vr) * fr * val\n hr = -y_te * a * ep_te - y_tm * b * ep_tm\n hp = y_te * a * er_te + y_tm * b * er_tm\n hz = v / (1j * h * self.r) * y_te * a * ssp.kv(n, vr) * fp * val\n ex = er * np.cos(p) - ep * np.sin(p)\n ey = er * np.sin(p) + ep * np.cos(p)\n hx = hr * np.cos(p) - hp * np.sin(p)\n hy = hr * np.sin(p) + hp * np.cos(p)\n return np.array([ex, ey, ez, hx, hy, hz])\n\n def e_field(self, x, y, w, dir, alpha, h, coef):\n \"\"\"Return the electric field vector for the specified mode and\n point\n\n Args:\n x: A float indicating the x coordinate [um]\n y: A float indicating the y coordinate [um]\n w: A complex indicating the angular frequency\n dir: \"h\" (horizontal polarization) or \"v\" (vertical polarization)\n alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or\n 'E' for TE-like mode, n is the order of the mode, and m is\n the number of modes in the order and the polarization.\n h: A complex indicating the phase constant.\n coef: The coefficients of TE- and TM- components\n Returns:\n e_vec: An array of complexes [ex, ey, ez].\n \"\"\"\n pol, n, m = alpha\n a, b = coef\n r = np.hypot(x, y)\n p = np.arctan2(y, x)\n u = self.samples.u(h ** 2, w, self.fill(w))\n v = self.samples.v(h ** 2, w, self.clad(w))\n ur = u * r / self.r\n vr = v * r / self.r\n if dir == \"h\":\n fr = np.cos(n * p)\n fp = -np.sin(n * p)\n else:\n fr = np.sin(n * p)\n fp = np.cos(n * p)\n if r <= self.r:\n er_te = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fr\n er_tm = ssp.jvp(n, ur) * fr\n er = a * er_te + b * er_tm\n ep_te = ssp.jvp(n, ur) * fp\n ep_tm = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fp\n ep = a * ep_te + b * ep_tm\n ez = u / (1j * h * self.r) * b * ssp.jv(n, ur) * fr\n else:\n val = -u * ssp.jv(n, u) / (v * ssp.kv(n, v))\n er_te = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fr * val\n er_tm = ssp.kvp(n, vr) * fr * val\n er = a * er_te + b * er_tm\n ep_te = ssp.kvp(n, vr) * fp * val\n ep_tm = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fp * val\n ep = a * ep_te + b * ep_tm\n ez = -v / (1j * h * self.r) * b * ssp.kv(n, vr) * fr * val\n ex = er * np.cos(p) - ep * np.sin(p)\n ey = er * np.sin(p) + ep * np.cos(p)\n return np.array([ex, ey, ez])\n\n def h_field(self, x, y, w, dir, alpha, h, coef):\n \"\"\"Return the magnetic field vectors for the specified mode and\n point\n\n Args:\n x: A float indicating the x coordinate [um]\n y: A float indicating the y coordinate [um]\n w: A complex indicating the angular frequency\n dir: \"h\" (horizontal polarization) or \"v\" (vertical polarization)\n alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or\n 'E' for TE-like mode, n is the order of the mode, and m is\n the number of modes in the order and the polarization.\n h: A complex indicating the phase constant.\n coef: The coefficients of TE- and TM- components\n Returns:\n h_vec: An array of complexes [hx, hy, hz].\n \"\"\"\n pol, n, m = alpha\n a, b = coef\n r = np.hypot(x, y)\n p = np.arctan2(y, x)\n u = self.samples.u(h ** 2, w, self.fill(w))\n v = self.samples.v(h ** 2, w, self.clad(w))\n ur = u * r / self.r\n vr = v * r / self.r\n if dir == \"h\":\n fr = np.cos(n * p)\n fp = -np.sin(n * p)\n else:\n fr = np.sin(n * p)\n fp = np.cos(n * p)\n y_te = Cylinder.y_te(w, h)\n if r <= self.r:\n y_tm = self.y_tm_inner(w, h)\n er_te = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fr\n er_tm = ssp.jvp(n, ur) * fr\n ep_te = ssp.jvp(n, ur) * fp\n ep_tm = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fp\n hr = -y_te * a * ep_te - y_tm * b * ep_tm\n hp = y_te * a * er_te + y_tm * b * er_tm\n hz = -u / (1j * h * self.r) * y_te * a * ssp.jv(n, ur) * fp\n else:\n y_tm = self.y_tm_outer(w, h)\n val = -u * ssp.jv(n, u) / (v * ssp.kv(n, v))\n er_te = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fr * val\n er_tm = ssp.kvp(n, vr) * fr * val\n ep_te = ssp.kvp(n, vr) * fp * val\n ep_tm = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fp * val\n hr = -y_te * a * ep_te - y_tm * b * ep_tm\n hp = y_te * a * er_te + y_tm * b * er_tm\n hz = v / (1j * h * self.r) * y_te * a * ssp.kv(n, vr) * fp * val\n hx = hr * np.cos(p) - hp * np.sin(p)\n hy = hr * np.sin(p) + hp * np.cos(p)\n return np.array([hx, hy, hz])\n\n @staticmethod\n def u_jnu_jnpu_pec(num_n, num_m):\n us = np.empty((2, num_n, num_m))\n jnus = np.empty((2, num_n, num_m))\n jnpus = np.empty((2, num_n, num_m))\n for n in range(num_n):\n us[0, n] = ssp.jnp_zeros(n, num_m)\n us[1, n] = ssp.jn_zeros(n, num_m)\n jnus[0, n] = ssp.jv(n, us[0, n])\n jnus[1, n] = np.zeros(num_m)\n jnpus[0, n] = np.zeros(num_m)\n jnpus[1, n] = ssp.jvp(n, us[1, n])\n return us, jnus, jnpus\n\n def coefs(self, hs, w):\n As = []\n Bs = []\n for h, s, n, m in zip(hs, self.s_all, self.n_all, self.m_all):\n pol = \"E\" if s == 0 else \"M\"\n ai, bi = self.coef(h, w, (pol, n, m))\n As.append(ai)\n Bs.append(bi)\n return np.ascontiguousarray(As), np.ascontiguousarray(Bs)\n\n def Ys(self, w, hs, As, Bs):\n vals = []\n for h, s, n, a, b in zip(hs, self.s_all, self.n_all, As, Bs):\n pol = \"E\" if s == 0 else \"M\"\n vals.append(self.Y(w, h, (pol, n, 1), a, b))\n return np.array(vals)\n\n def props_numpy(self, w):\n e1 = self.fill(w)\n e2 = self.clad(w)\n hs = np.array([self.beta(w, alpha) for alpha in self.alpha_all])\n As, Bs = self.coefs(hs, w)\n Ys = self.Ys(w, hs, As, Bs)\n if e2.real < -1e6:\n us = np.zeros_like(hs, dtype=complex)\n jus = np.zeros_like(hs, dtype=complex)\n jpus = np.zeros_like(hs, dtype=complex)\n for i, (h, s, n, m) in enumerate(\n zip(hs, self.s_all, self.n_all, self.m_all)\n ):\n us[i] = self.u_pec[s, n, m - 1]\n jus[i] = self.jnu_pec[s, n, m - 1]\n jpus[i] = self.jnpu_pec[s, n, m - 1]\n vs = (1 - 1j) * np.sqrt(0.5j * (-e2 * w ** 2 + hs ** 2)) * self.r\n kvs = np.zeros_like(vs)\n kpvs = np.zeros_like(vs)\n else:\n us = self.samples.u(hs ** 2, w, e1)\n vs = self.samples.v(hs ** 2, w, e2)\n jus = ssp.jv(self.n_all, us)\n jpus = ssp.jvp(self.n_all, us)\n kvs = ssp.kv(self.n_all, vs)\n kpvs = ssp.kvp(self.n_all, vs)\n return hs, us, vs, jus, jpus, kvs, kpvs, As, Bs, Ys\n\n def props(self, w):\n e1 = self.fill(w)\n e2 = self.clad(w)\n hs = np.array([self.beta(w, alpha) for alpha in self.alpha_all])\n us, vs, jus, jpus, kvs, kpvs, As, Bs, Ys = cylinder_utils.props_cython(\n w,\n self.r,\n self.s_all,\n self.n_all,\n self.m_all,\n hs,\n e1,\n e2,\n self.u_pec,\n self.jnu_pec,\n self.jnpu_pec,\n )\n return hs, us, vs, jus, jpus, kvs, kpvs, As, Bs, Ys\n"
] |
[
[
"scipy.special.jn_zeros",
"numpy.sqrt",
"scipy.special.jv",
"numpy.isnan",
"numpy.ascontiguousarray",
"numpy.cos",
"numpy.empty",
"numpy.sin",
"numpy.arctan2",
"numpy.zeros_like",
"scipy.special.jvp",
"scipy.special.kvp",
"scipy.special.kv",
"numpy.array",
"numpy.zeros",
"scipy.special.jnp_zeros",
"numpy.hypot"
]
] |
ikathuria/SignatureVerification
|
[
"4d0f26eb2652ecdf8cd5a679c6d3468046ab0d88"
] |
[
"quadruplet_utils.py"
] |
[
"\"\"\"Quadruplet loss utility functions.\n\nembedding_net\nbuild_metric_network\nQuadrupletLossLayer\nbuild_quadruplet_model\n\ncompute_l2_dist\ncompute_probs\ncompute_metrics\nfind_nearest\ndraw_roc\ndraw_eval_quadruplets\n\"\"\"\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport math\nimport numpy as np\nfrom tqdm import tqdm\n\nimport keras.backend as K\n\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Input, concatenate, Layer\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Lambda, Flatten, Dense, Concatenate\nfrom tensorflow.keras.initializers import glorot_uniform\nfrom tensorflow.keras.regularizers import l2\n\nfrom sklearn.metrics import roc_curve, roc_auc_score\n\nimport matplotlib.pyplot as plt\n\n\n# Model ##################################################################################\ndef embedding_net(embeddingsize, input_shape=(224, 224, 1)):\n \"\"\"Embedding network.\n\n Args:\n embeddingsize -- int : embedding size.\n input_shape -- tuple : input shape of (224, 224, 1).\n\n Returns:\n embedding -- keras.models.Sequential : embedding sequential network.\n \"\"\"\n\n # Convolutional Neural Network\n network = Sequential(name=\"sequential_network\")\n\n # 1 Conv2D\n network.add(Conv2D(128, (7, 7), activation='relu',\n padding='same',\n input_shape=input_shape,\n kernel_initializer='he_uniform',\n kernel_regularizer=l2(2e-4)))\n network.add(MaxPooling2D())\n\n # 2 Conv2D\n network.add(Conv2D(128, (5, 5), activation='relu',\n padding='same',\n kernel_initializer='he_uniform',\n kernel_regularizer=l2(2e-4)))\n network.add(MaxPooling2D())\n\n # 3 Conv2D\n network.add(Conv2D(64, (5, 5), activation='relu',\n padding='same',\n kernel_initializer='he_uniform',\n kernel_regularizer=l2(2e-4)))\n\n # flatten the output to 1D\n network.add(Flatten())\n\n # 1 Dense\n network.add(Dense(2048, activation='relu',\n kernel_regularizer=l2(1e-3),\n kernel_initializer='he_uniform'))\n\n # 2 Dense\n network.add(Dense(embeddingsize, activation=None,\n kernel_regularizer=l2(1e-3),\n kernel_initializer='he_uniform'))\n\n # Force the encoding to live on the d-dimentional hypershpere\n network.add(Lambda(lambda x: K.l2_normalize(x, axis=-1)))\n\n return network\n\n\ndef build_metric_network(single_embedding_shape):\n '''\n Define the neural network to learn the metric\n Input : \n single_embedding_shape : shape of input embeddings or feature map. Must be an array\n\n '''\n # compute shape for input\n input_shape = single_embedding_shape\n # the two input embeddings will be concatenated\n input_shape[0] = input_shape[0]*2\n\n # Neural Network\n network = Sequential(name=\"learned_metric\")\n network.add(Dense(10, activation='relu',\n input_shape=input_shape,\n kernel_regularizer=l2(1e-3),\n kernel_initializer='he_uniform'))\n network.add(Dense(10, activation='relu',\n kernel_regularizer=l2(1e-3),\n kernel_initializer='he_uniform'))\n network.add(Dense(10, activation='relu',\n kernel_regularizer=l2(1e-3),\n kernel_initializer='he_uniform'))\n\n # Last layer : binary softmax\n network.add(Dense(2, activation='softmax'))\n\n # Select only one output value from the softmax\n network.add(Lambda(lambda x: x[:, 0]))\n\n return network\n\n\nclass QuadrupletLossLayer(Layer):\n def __init__(self, alpha=1, beta=0.5, **kwargs):\n self.alpha = alpha\n self.beta = beta\n self.debugeric = 1\n\n super(QuadrupletLossLayer, self).__init__(**kwargs)\n\n def quadruplet_loss(self, inputs):\n ap_dist, an_dist, nn_dist = inputs\n\n # square\n ap_dist2 = K.square(ap_dist)\n an_dist2 = K.square(an_dist)\n nn_dist2 = K.square(nn_dist)\n\n return K.sum(K.maximum(ap_dist2 - an_dist2 + self.alpha, 0), axis=0) + K.sum(K.maximum(ap_dist2 - nn_dist2 + self.beta, 0), axis=0)\n\n def call(self, inputs):\n loss = self.quadruplet_loss(inputs)\n self.add_loss(loss)\n return loss\n\n\ndef build_quadruplet_model(input_shape, network, metricnetwork, margin=1, margin2=0.5):\n '''\n Define the Keras Model for training \n Input : \n input_shape : shape of input images\n network : Neural network to train outputing embeddings\n metricnetwork : Neural network to train the learned metric\n margin : minimal distance between Anchor-Positive and Anchor-Negative for the lossfunction (alpha1)\n margin2 : minimal distance between Anchor-Positive and Negative-Negative2 for the lossfunction (alpha2)\n\n '''\n # Define the tensors for the four input images\n anchor_input = Input(input_shape, name=\"anchor_input\")\n positive_input = Input(input_shape, name=\"positive_input\")\n negative_input = Input(input_shape, name=\"negative_input\")\n negative2_input = Input(input_shape, name=\"negative2_input\")\n\n # Generate the encodings (feature vectors) for the four images\n encoded_a = network(anchor_input)\n encoded_p = network(positive_input)\n encoded_n = network(negative_input)\n encoded_n2 = network(negative2_input)\n\n # compute the concatenated pairs\n encoded_ap = Concatenate(\n axis=-1, name=\"Anchor-Positive\")([encoded_a, encoded_p])\n encoded_an = Concatenate(\n axis=-1, name=\"Anchor-Negative\")([encoded_a, encoded_n])\n encoded_nn = Concatenate(\n axis=-1, name=\"Negative-Negative2\")([encoded_n, encoded_n2])\n\n # compute the distances AP, AN, NN\n ap_dist = metricnetwork(encoded_ap)\n an_dist = metricnetwork(encoded_an)\n nn_dist = metricnetwork(encoded_nn)\n\n # QuadrupletLoss Layer\n loss_layer = QuadrupletLossLayer(alpha=margin, beta=margin2, name='4xLoss')([\n ap_dist, an_dist, nn_dist])\n\n # Connect the inputs with the outputs\n network_train = Model(\n inputs=[anchor_input, positive_input, negative_input, negative2_input],\n outputs=loss_layer)\n\n # return the model\n return network_train\n\n\n# EVALUATION ##################################################################################\ndef compute_l2_dist(a, b):\n return np.sum(np.square(a-b))\n\n\ndef compute_probs(network, X):\n '''\n Input\n network : current NN to compute embeddings.\n X : tensor of shape (m, w, h, 1) containing pics to evaluate.\n Y : tensor of shape (m,) containing true class.\n\n Returns\n probs : array of shape (m, m) containing distances.\n\n '''\n left = X[0]\n right = X[1]\n\n m = left.shape[0]\n probs = np.zeros((m))\n\n for i in tqdm(range(m), desc='QUADRUPLETS PROBS'):\n emb_left = network.predict(left[m].reshape(1, 224, 224, 1))\n emb_right = network.predict(right[m].reshape(1, 224, 224, 1))\n probs[i] = -compute_l2_dist(emb_left, emb_right)\n\n return probs\n\n\ndef compute_metrics(yprobs, probs):\n '''\n Returns\n fpr : Increasing false positive rates such that element i is the false positive rate of predictions with score >= thresholds[i]\n tpr : Increasing true positive rates such that element i is the true positive rate of predictions with score >= thresholds[i].\n thresholds : Decreasing thresholds on the decision function used to compute fpr and tpr. thresholds[0] represents no instances being predicted and is arbitrarily set to max(y_score) + 1\n auc : Area Under the ROC Curve metric\n '''\n # calculate AUC\n auc = roc_auc_score(yprobs, probs)\n\n # calculate roc curve\n fpr, tpr, thresholds = roc_curve(yprobs, probs)\n\n return fpr, tpr, thresholds, auc\n\n\ndef find_nearest(array, value):\n idx = np.searchsorted(array, value, side=\"left\")\n if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):\n return array[idx-1], idx-1\n else:\n return array[idx], idx\n\n\ndef draw_roc(fpr, tpr, thresholds, auc, n_iteration):\n # find threshold\n targetfpr = 1e-3\n _, idx = find_nearest(fpr, targetfpr)\n threshold = thresholds[idx]\n recall = tpr[idx]\n\n # plot no skill\n plt.plot([0, 1], [0, 1], linestyle='--')\n # plot the roc curve for the model\n plt.plot(fpr, tpr, marker='.')\n plt.title('AUC: {0:.3f} @ {4} iterations\\nSensitivity : {2:.1%} @FPR={1:.0e}\\nThreshold={3})'.format(\n auc, targetfpr, recall, abs(threshold), n_iteration\n ))\n # show the plot\n plt.show()\n\n\ndef draw_eval_quadruplets(network, n_iteration, X, Y):\n yprobs = Y\n probs = compute_probs(network, X)\n fpr, tpr, thresholds, auc = compute_metrics(yprobs, probs)\n draw_roc(fpr, tpr, thresholds, auc, n_iteration)\n"
] |
[
[
"tensorflow.keras.layers.Concatenate",
"sklearn.metrics.roc_auc_score",
"numpy.square",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.regularizers.l2",
"sklearn.metrics.roc_curve",
"tensorflow.keras.layers.MaxPooling2D",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Flatten",
"numpy.searchsorted",
"matplotlib.pyplot.show",
"tensorflow.keras.models.Sequential",
"numpy.zeros",
"tensorflow.keras.layers.Input"
]
] |
FumiyukiKato/PrivBTS
|
[
"d2fae18588b24281d0968b8af09dc9295e73cdda"
] |
[
"hdmm/hdmm/templates.py"
] |
[
"import numpy as np\nfrom scipy import sparse, optimize\nfrom scipy.sparse.linalg import spsolve_triangular, aslinearoperator\nfrom hdmm import workload, approximation, implicit\nimport time\nfrom functools import reduce\n\n\nclass TemplateStrategy:\n \"\"\"\n A template strategy is a space of strategies parameterized by some vector of weights.\n This weight vector can be optimized for a particular workload.\n \"\"\"\n def __init__(self, theta0=None):\n self.set_params(theta0)\n \n def get_params(self):\n return self.theta\n\n def set_params(self, theta):\n self.theta = np.maximum(theta, 0)\n\n def run_mechanism(self, x, eps):\n \"\"\" Run the matrix mechanism with the current strategy on the given data vector \"\"\"\n A = self.strategy()\n A1 = self.inverse()\n delta = self.sensitivity()\n y = A.dot(x) + np.random.laplace(0, delta/eps, size=A.shape[0])\n return A1.dot(y)\n\n @property\n def A(self):\n return self.strategy(form='matrix')\n \n def _AtA1(self):\n return np.linalg.pinv(self.A.T.dot(self.A))\n\n def sparse_matrix(self):\n return sparse.csr_matrix(self.A)\n\n def strategy(self, form='linop'):\n \"\"\" Return a linear operator for the strategy \"\"\"\n assert form in ['matrix', 'linop']\n A = self._strategy()\n if form == 'matrix':\n I = np.eye(A.shape[1])\n return A.dot(I)\n return A\n\n def _strategy(self):\n return aslinearoperator(self.A)\n\n def inverse(self, form='linop'):\n \"\"\" Return a linear operator for the pseudo inverse of the strategy \"\"\"\n assert form in ['matrix', 'linop']\n A1 = self._inverse()\n if form == 'matrix':\n I = np.eye(A1.shape[1])\n return A1.dot(I)\n return A1\n\n def _inverse(self):\n return aslinearoperator(np.linalg.pinv(self.A))\n\n def _loss_and_grad(self):\n pass\n\n def sensitivity(self):\n return np.abs(self.A).sum(axis=0).max()\n\n def set_workload(self, W):\n self.workload = W\n\n def optimize(self, W):\n \"\"\"\n Optimize strategy for given workload \n\n :param W: the workload, may be a n x n numpy array for WtW or a workload object\n \"\"\"\n t0 = time.time()\n self.set_workload(W)\n init = self.get_params()\n bnds = [(0,None)] * init.size\n log = []\n \n def obj(theta):\n self.set_params(theta)\n ans = self._loss_and_grad()\n log.append(ans[0])\n return ans\n\n opts = { 'ftol' : 1e-4 }\n res = optimize.minimize(obj, init, jac=True, method='L-BFGS-B', bounds=bnds, options=opts)\n t1 = time.time()\n params = self.get_params()\n ans = { 'log' : log, 'time' : t1 - t0, 'loss' : res.fun, 'res' : res, 'params' : params }\n return ans\n\n def restart_optimize(self, W, restarts):\n best = self.optimize(W)\n for i in range(restarts-1):\n ans = self.optimize(W)\n if ans['loss'] < best['loss']:\n best = ans\n self.set_params(best['params'])\n return best \n\nclass Default(TemplateStrategy):\n \"\"\"\n \"\"\"\n def __init__(self, m, n):\n theta0 = np.random.rand(m*n)\n self.m = m\n self.n = n\n TemplateStrategy.__init__(theta0) \n\n def _strategy(self):\n return self.get_params().reshape(self.m, self.n)\n\n def _loss_and_grad(self):\n WtW = self.workload.WtW\n A = self.get_params().reshape(self.m, self.n)\n sums = np.sum(np.abs(A), axis=0)\n col = np.argmax(sums)\n F = sums[col]**2\n # note: F is not differentiable, but we can take subgradients\n dF = np.zeros_like(A)\n dF[:,col] = np.sign(A[:,col])*2*sums[col]\n AtA = A.T.dot(A)\n AtA1 = np.linalg.pinv(AtA)\n M = WtW.dot(AtA1)\n G = np.trace(M)\n dX = -AtA1.dot(M)\n dG = 2*A.dot(dX)\n dA = dF*G + F*dG\n return F*G, dA.flatten()\n\nclass PIdentity(TemplateStrategy):\n \"\"\"\n A PIdentity strategy is a strategy of the form (I + B) D where D is a diagonal scaling matrix\n that depends on B and ensures uniform column norm. B is a p x n matrix of free parameters.\n \"\"\"\n def __init__(self, p, n):\n \"\"\"\n Initialize a PIdentity strategy\n :param p: the number of non-identity queries\n :param n: the domain size\n \"\"\"\n theta0 = np.random.rand(p*n)\n self.p = p\n self.n = n\n TemplateStrategy.__init__(self, theta0)\n \n def sparse_matrix(self):\n I = sparse.identity(self.n, format='csr')\n B = self.get_params().reshape(self.p, self.n)\n D = 1 + B.sum(axis=0)\n A = sparse.vstack([I,B], format='csr')\n return A * sparse.diags(1.0 / D)\n \n def _strategy(self):\n I = np.eye(self.n)\n B = self.get_params().reshape(self.p, self.n)\n A = np.vstack([I, B])\n A = A / A.sum(axis=0)\n return aslinearoperator(sparse.csr_matrix(A))\n\n def _AtA1(self):\n B = self.get_params().reshape(self.p, self.n)\n R = np.linalg.inv(np.eye(self.p) + B.dot(B.T))\n D = 1.0 + B.sum(axis=0)\n return (np.eye(self.n) - B.T.dot(R).dot(B))*D*D[:,None]\n\n def _inverse(self):\n B = self.get_params().reshape(self.p, self.n)\n return implicit.inverse(B)\n \n def _loss_and_grad(self):\n WtW = self.workload.WtW\n p, n = self.p, self.n\n\n B = np.reshape(self.get_params(), (p,n))\n scale = 1.0 + np.sum(B, axis=0)\n R = np.linalg.inv(np.eye(p) + B.dot(B.T)) # O(k^3)\n C = WtW * scale * scale[:,None] # O(n^2)\n\n M1 = R.dot(B) # O(n k^2)\n M2 = M1.dot(C) # O(n^2 k)\n M3 = B.T.dot(M2) # O(n^2 k)\n M4 = B.T.dot(M2.dot(M1.T)).dot(B) # O(n^2 k)\n\n Z = -(C - M3 - M3.T + M4) * scale * scale[:,None] # O(n^2)\n\n Y1 = 2*np.diag(Z) / scale # O(n)\n Y2 = 2*(B/scale).dot(Z) # O(n^2 k)\n g = Y1 + (B*Y2).sum(axis=0) # O(n k)\n\n loss = np.trace(C) - np.trace(M3)\n grad = (Y2*scale - g) / scale**2\n return loss, grad.flatten()\n\nclass AugmentedIdentity(TemplateStrategy):\n \"\"\"\n An AugmentedIdentity strategy is like a PIdentity strategy with additional structure imposed.\n The template is defiend by a p x n matrix of non-negative integers P. Each unique nonzero entry\n of this matrix P refers to a free parameter that can be optimized. An entry that is 0 in P is\n a structural zero in the strategy. \n\n Example 1:\n A PIdentity strategy can be represented as an AugmentedIdentity strategy with \n P = np.arange(1, p*n+1).reshape(p, n)\n \n Example 2:\n A strategy of the form w*T + I can be represented as an AugmentedIdentity strategy with\n P = np.ones((1, n), dtype=int)\n \"\"\"\n def __init__(self, imatrix):\n \"\"\" \n Create an AugmentedIdentity strategy with the given P matrix\n \"\"\"\n self.imatrix = imatrix\n p, n = imatrix.shape\n num = imatrix.max()\n theta0 = np.random.rand(num)\n self._pid = PIdentity(p, n)\n TemplateStrategy.__init__(self, p+n, n, theta0)\n # should call set_params\n \n def _strategy(self):\n return self._pid._strategy() \n \n def _inverse(self):\n return self._pid.inverse()\n\n def set_params(theta):\n self.theta = theta\n params = np.append(0, theta)\n B = params[self.imatrix]\n self._pid.set_params(B.flatten())\n\n def _AtA1(self):\n return self._pid._AtA1()\n\n def set_workload(self, W):\n self.workload = W\n self._pid.set_workload(W)\n \n def _loss_and_grad(self):\n #params = np.append(0, self.get_params())\n #B = params[self.imatrix]\n #self._pid.set_params(B.flatten())\n obj, grad = self._pid._loss_and_grad() \n grad2 = np.bincount(self.imatrix.flatten(), grad)[1:]\n return obj, grad2\n\nclass Static(TemplateStrategy):\n def __init__(self, strategy):\n self.A = strategy\n TemplateStrategy.__init__(self, np.array([]))\n\n def optimize(self, W):\n pass\n\nclass Kronecker(TemplateStrategy):\n \"\"\" A Kronecker template strategy is of the form A1 x ... x Ad, where each Ai is some 1D \n template strategy\"\"\"\n def __init__(self, strategies):\n \"\"\"\n :param strategies: a list of templates for each dimension of template\n \"\"\"\n self.strategies = strategies\n\n def sparse_matrix(self):\n return reduce(sparse.kron, [A.sparse_matrix() for A in self.strategies])\n\n def set_params(self, params):\n for strategy, param in zip(self.strategies, params):\n strategy.set_params(param)\n\n def get_params(self):\n return [strategy.get_params() for strategy in self.strategies]\n\n def _strategy(self):\n return implicit.krons(*[S._strategy() for S in self.strategies])\n\n def _inverse(self):\n return implicit.krons(*[S._inverse() for S in self.strategies])\n\n def sensitivity(self):\n return np.prod([S.sensitivity() for S in self.strategies])\n\n def optimize(self, W):\n self.set_workload(W)\n t0 = time.time()\n if isinstance(W, workload.Kron):\n loss = 0\n for subA, subW in zip(self.strategies, W.workloads):\n ans = subA.optimize(subW)\n loss += ans['loss']\n params = self.get_params()\n return { 'time' : time.time() - t0, 'loss' : loss, 'params' : params }\n assert isinstance(W, workload.Concat) and isinstance(W.workloads[0], workload.Kron)\n \n workloads = [K.workloads for K in W.workloads] # a k x d table of workloads\n strategies = self.strategies\n \n k = len(workloads)\n d = len(workloads[0])\n\n log = []\n\n C = np.ones((d, k))\n for i in range(d):\n AtA1 = strategies[i]._AtA1()\n for j in range(k):\n C[i,j] = np.sum(workloads[j][i].WtW * AtA1)\n for r in range(10):\n err = C.prod(axis=0).sum()\n for i in range(d):\n cs = np.sqrt(C.prod(axis=0) / C[i])\n What = workload.Concat([c*Ws[i] for c, Ws in zip(cs, workloads)])\n res = strategies[i].optimize(What)\n AtA1 = strategies[i]._AtA1()\n for j in range(k):\n C[i,j] = np.sum(workloads[j][i].WtW * AtA1)\n log.append(err)\n\n t1 = time.time()\n params = self.get_params()\n ans = { 'log' : log, 'loss' : err, 'time' : t1 - t0, 'params' : params }\n return ans\n\nclass Marginals(TemplateStrategy):\n \"\"\"\n A marginals template is parameterized by 2^d weights where d is the number of dimensions. \n The strategy is of the form w_1 (T x ... x T) + ... + w_{2^d} (I x ... I) - every marginal\n with nonzero weight is queried with weight w_i\n \"\"\"\n def __init__(self, domain):\n self.domain = domain\n theta = np.random.rand(2**len(domain))\n\n d = len(domain)\n mult = np.ones(2**d)\n for i in range(2**d):\n for k in range(d):\n if not (i & (2**k)):\n mult[i] *= domain[k]\n self.mult = mult\n\n TemplateStrategy.__init__(self, theta)\n\n def _strategy(self):\n return implicit.marginals_linop(self.domain, self.get_params())\n\n def _inverse(self):\n theta = self.get_params()\n Y, _ = self._Xmatrix(theta**2)\n tmp = Y.dot(theta**2)\n X, _ = self._Xmatrix(tmp)\n invtheta = spsolve_triangular(X, theta**2, lower=False)\n return implicit.marginals_inverse(self.domain, theta, invtheta)\n\n def sensitivity(self):\n return np.sum(np.abs(self.get_params()))\n\n def _Xmatrix(self,vect):\n # the matrix X such that M(u) M(v) = M(X(u) v)\n d = len(self.domain)\n A = np.arange(2**d)\n mult = self.mult\n\n values = np.zeros(3**d)\n rows = np.zeros(3**d, dtype=int)\n cols = np.zeros(3**d, dtype=int)\n start = 0\n for b in range(2**d):\n #uniq, rev = np.unique(a&B, return_inverse=True) # most of time being spent here\n mask = np.zeros(2**d, dtype=int)\n mask[A&b] = 1\n uniq = np.nonzero(mask)[0]\n step = uniq.size\n mask[uniq] = np.arange(step)\n rev = mask[A&b]\n values[start:start+step] = np.bincount(rev, vect*mult[A|b], step)\n if values[start+step-1] == 0:\n values[start+step-1] = 1.0\n cols[start:start+step] = b\n rows[start:start+step] = uniq\n start += step\n X = sparse.csr_matrix((values, (rows, cols)), (2**d, 2**d))\n XT = sparse.csr_matrix((values, (cols, rows)), (2**d, 2**d))\n return X, XT\n\n def set_workload(self, W):\n marg = approximation.marginals_approx(W)\n self.workload = marg\n d = len(self.domain)\n A = np.arange(2**d)\n weights = marg.weight_vector()\n self.dphi = np.array([np.dot(weights**2, self.mult[A|b]) for b in range(2**d)]) \n\n def _loss_and_grad(self):\n d = len(self.domain)\n A = np.arange(2**d)\n mult = self.mult\n dphi = self.dphi\n theta = self.get_params()\n\n delta = np.sum(theta)**2\n ddelta = 2*np.sum(theta)\n theta2 = theta**2\n Y, YT = self._Xmatrix(theta2)\n params = Y.dot(theta2)\n X, XT = self._Xmatrix(params)\n phi = spsolve_triangular(X, theta2, lower=False)\n # Note: we should be multiplying by domain size here if we want total squared error\n ans = np.dot(phi, dphi)\n dXvect = -spsolve_triangular(XT, dphi, lower=True)\n # dX = outer(dXvect, phi)\n dparams = np.array([np.dot(dXvect[A&b]*phi, mult[A|b]) for b in range(2**d)])\n dtheta2 = YT.dot(dparams)\n dtheta = 2*theta*dtheta2\n return delta*ans, delta*dtheta + ddelta*ans\n\n# (df / dtheta_k) = sum_ij (df / d_Aij) (dA_ij / theta_k)\n \ndef KronPIdentity(ns, ps):\n \"\"\"\n Builds a template strategy of the form A1 x ... x Ad where each Ai is a PIdentity template\n :param ns: the domain size of each dimension\n :param ps: the number of p queries in each dimension\n \"\"\"\n return Kronecker([PIdentity(p, n) for p,n in zip(ps, ns)])\n \ndef RangeTemplate(n, start=32, branch=4, shared=False):\n \"\"\"\n Builds a template strategy for range queries with queries that have structural zeros \n everywhere except at indices at [i, i+w) where w is the width of the query and ranges from\n start to n in powers of branch and i is a multiple of w/2.\n\n :param n: the domain size\n :param start: the width of the smallest query\n :param branch: the width multiplying factor for larger queries\n :param shared: flag to determine if parameters should be shared for queries of the same width\n\n Example:\n RangeTemplate(16, start=8, branch=2) builds a strategy template with four augmented queries that have structural zeros everywhere except in the intervals indicated below:\n 1. [0,8)\n 2. [4,12)\n 3. [8,16)\n 4. [0,16)\n \"\"\"\n rows = []\n width = start\n idx = 1\n while width <= n:\n for i in range(0, n-width//2, width//2):\n row = np.zeros(n, dtype=int)\n row[i:i+width] = np.arange(width) + idx\n if not shared: idx += width\n rows.append(row)\n if shared: idx += width\n width *= branch\n return AugmentedIdentity(np.vstack(rows))\n\ndef IdTotal(n):\n \"\"\" Build a single-parameter template strategy of the form w*Total + Identity \"\"\"\n P = np.ones((1,n), dtype=int)\n return AugmentedIdentity(P)\n\ndef Identity(n):\n \"\"\" Builds a template strategy that is always Identity \"\"\"\n return Static(np.eye(n))\n\ndef Total(n):\n \"\"\" Builds a template strategy that is always Total \"\"\"\n return Static(np.ones((1,n)))\n\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.zeros_like",
"scipy.sparse.vstack",
"numpy.random.laplace",
"numpy.trace",
"numpy.arange",
"numpy.eye",
"scipy.sparse.diags",
"scipy.sparse.linalg.aslinearoperator",
"numpy.argmax",
"scipy.sparse.linalg.spsolve_triangular",
"numpy.zeros",
"numpy.nonzero",
"scipy.sparse.csr_matrix",
"numpy.append",
"scipy.optimize.minimize",
"numpy.random.rand",
"numpy.array",
"numpy.sum",
"numpy.maximum",
"numpy.abs",
"numpy.ones",
"numpy.sign",
"numpy.linalg.pinv",
"scipy.sparse.identity",
"numpy.bincount",
"numpy.vstack"
]
] |
arupkpatel/HandGestureDetection
|
[
"2be7224a53a100c37b71e7a6333ed69f5729032a"
] |
[
"cnn.py"
] |
[
"import tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.3\nset_session(tf.Session(config=config))\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout\n\nclassifier = Sequential()\n\nclassifier.add(Conv2D(32, (3, 3), input_shape=(48, 48, 1), activation='relu'))\nclassifier.add(Conv2D(32, (3, 3), activation='relu'))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(.25))\n\n\n# classifier.add(MaxPooling2D(pool_size=(2, 2)))\n# classifier.add(Dropout(.20))\n\nclassifier.add(Conv2D(32, (3, 3), activation='relu'))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(.25))\n\nclassifier.add(Flatten())\nclassifier.add(Dense(256, activation='relu'))\nclassifier.add(Dropout(.5))\nclassifier.add(Dense(1, activation='sigmoid'))\n\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntrain_set = train_datagen.flow_from_directory(\n 'dataset/training',\n target_size=(48, 48),\n batch_size=16,\n color_mode='grayscale',\n class_mode='binary')\n\ntest_set = test_datagen.flow_from_directory(\n 'dataset/testing',\n target_size=(48, 48),\n batch_size=16,\n class_mode='binary',\n color_mode='grayscale')\n\nclassifier.fit_generator(\n train_set,\n steps_per_epoch=8575,\n epochs=5,\n validation_data=test_set,\n validation_steps=2000)\nprint('done!!')\n\nclassifier.save('cdi.h5')\nclassifier.save_weights('cdiw.h5')\njstring = classifier.to_json()\njfile = open('classifier.json','w')\njfile.write(jstring)\njfile.close()\n\n# from keras.models import load_model\n\n# classifier = load_model('cdi.h5')\n\n# import numpy as np\n# from keras.preprocessing import image\n# import cv2\n# import os\n\n# img = cv2.imread('spred\\\\9.jpg',0)\n# img2 =cv2.imread('spred\\\\8.jpg')\n# imgbw = cv2.resize(img, (48, 48))\n# imgbw = image.img_to_array(imgbw)\n# imgbw = np.expand_dims(imgbw, axis=0)\n# result = classifier.predict(imgbw)\n# # print(result)\n\n# if result[0][0] == 0:\n# print('1')\n# os.system('notepad')\n# cv2.putText(img2, 'Gesture Recognised: Notepad', (50, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n# color=(255, 0, 0),\n# thickness=3)\n# else:\n# print('2')\n# os.system('calc')\n# cv2.putText(img2, 'Gesture Recognised: Calculator', (50, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n# color=(255, 0, 0), thickness=3)\n\n\n# cv2.imshow('Output',img2)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n\n\n"
] |
[
[
"tensorflow.ConfigProto",
"tensorflow.Session"
]
] |
dfm/rvhmc
|
[
"03c9aa6a28722989cf3a6da8963a54d1e1faa0cf"
] |
[
"paper/scripts/emcee-comp/compare.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\n\nimport os\nimport sys\n\nif len(sys.argv) > 1:\n n_planets = int(sys.argv[1])\nelse:\n n_planets = 1\ndirname = \"{0:02d}\".format(n_planets)\nif len(sys.argv) > 2:\n version = int(sys.argv[2])\n dirname = os.path.join(dirname, \"{0:04d}\".format(version))\nelse:\n version = 0\nos.makedirs(dirname, exist_ok=True)\n\nos.environ[\"THEANO_FLAGS\"] = \\\n \"compiledir=./{0}/cache\".format(dirname)\n\nimport time\nimport string\n\nimport emcee\nimport corner\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport pymc3 as pm\n\nimport theano\nimport theano.tensor as tt\n\nfrom rvhmc import RVDataset, PolynomialTrend, RVModel, RVPlanet\n\n\ndef build_model(peaks, t, y=None, yerr=None, model=None):\n model = pm.modelcontext(model)\n\n n_planets = len(peaks)\n\n if yerr is None:\n yerr = np.random.uniform(0.01, 0.1, len(t))\n if y is None:\n y = yerr*np.random.randn(len(t))\n\n trend = PolynomialTrend(\"trend\", order=3)\n logs = pm.Normal(\"logs\", mu=-5.0, sd=5.0, testval=-5.0)\n meanrv = pm.Normal(\"meanrv\", mu=0.0, sd=10.0, testval=0.0)\n dataset = RVDataset(\"data\", t, y, yerr, logs=logs, trend=trend,\n meanrv=meanrv)\n\n logamps = pm.Uniform(\"logamps\",\n lower=np.log(min_amp),\n upper=np.log(max_amp),\n shape=n_planets,\n testval=np.log([np.clip(peak[\"amp\"],\n min_amp+1e-2,\n max_amp-1e-2)\n for peak in peaks]))\n\n planets = []\n for i, (peak, name) in enumerate(zip(peaks, string.ascii_lowercase[1:])):\n logP = pm.Uniform(name + \":logP\",\n lower=np.log(min_period),\n upper=np.log(max_period),\n testval=np.log(peak[\"period\"]))\n logK = pm.Deterministic(name + \":logK\", logamps[i])\n\n eccen = pm.Beta(name + \":eccen\",\n alpha=0.867,\n beta=3.03,\n testval=peak[\"eccen\"])\n omegabase = pm.Uniform(name + \":omegabase\", -2*np.pi, 2*np.pi,\n testval=peak[\"omega\"])\n omegavec = pm.Deterministic(name + \":omegavec\",\n tt.stack([tt.cos(omegabase),\n tt.sin(omegabase)]))\n\n phibase = pm.Uniform(name + \":phibase\", -2*np.pi, 2*np.pi,\n testval=peak[\"phase\"])\n phivec = pm.Deterministic(name + \":phivec\",\n tt.stack([tt.cos(phibase), tt.sin(phibase)]))\n planets.append(\n RVPlanet(name, logP, logK, phivec=phivec, eccen=eccen,\n omegavec=omegavec))\n\n rvmodel = RVModel(\"rv\", dataset, planets)\n pm.Deterministic(\"logp\", model.logpt)\n\n return rvmodel\n\n\n# Simulate a random dataset\n\nnp.random.seed(42 + version)\n\nt = np.sort(np.random.uniform(0.0, 4*365.0, 50))\nyerr = np.random.uniform(0.01, 0.1, len(t))\ny = yerr * np.random.randn(len(t))\n\nmin_period = 5\nmax_period = 100\nmin_amp = 0.2\nmax_amp = 0.8\ntarget_n_eff = 500\n\npeaks = []\nfor i in range(n_planets):\n peaks.append(dict(\n period=np.exp(np.random.uniform(np.log(min_period),\n np.log(max_period))),\n amp=np.exp(np.random.uniform(np.log(min_amp), np.log(max_amp))),\n phase=np.random.uniform(0, 2*np.pi),\n omega=np.random.uniform(0, 2*np.pi),\n eccen=np.random.uniform(0.01, 0.3),\n ))\npeaks = sorted(peaks, key=lambda x: x[\"amp\"])\n\nwith pm.Model() as sim_model:\n sim_rvmodel = build_model(peaks, t, y, yerr)\n f = theano.function(sim_model.vars, sim_rvmodel.get_rvmodels(t),\n on_unused_input=\"ignore\")\n coords = sim_model.test_point\n y += np.sum(f(*(coords[k.name] for k in sim_model.vars)), axis=1)\n\n# Plot the data\nfig = plt.figure()\nplt.errorbar(t % peaks[-1][\"period\"], y, yerr=yerr, fmt=\".k\")\nfig.savefig(os.path.join(dirname, \"data.png\"), bbox_inches=\"tight\")\nplt.close(fig)\n\n# Work out the key variables\nwith pm.Model() as model:\n rvmodel = build_model(peaks, t, y, yerr)\n\n key_vars = [v.name for v in rvmodel.datasets[0].vars]\n key_vars += [p.name + k for p in rvmodel.planets\n for k in (\":logP\", \":logK\", \":phi\", \":eccen\", \":omega\")]\n\n# Fit using emcee\nwith model:\n f = theano.function(model.vars,\n [model.logpt] + model.vars + model.deterministics)\n\n def log_prob_func(params):\n dct = model.bijection.rmap(params)\n args = (dct[k.name] for k in model.vars)\n results = f(*args)\n return tuple(results)\n\n # First we work out the shapes of all of the deterministic variables\n res = model.test_point\n vec = model.bijection.map(res)\n initial_blobs = log_prob_func(vec)[1:]\n dtype = [(var.name, float, np.shape(b)) for var, b in\n zip(model.vars + model.deterministics, initial_blobs)]\n\n # Then sample as usual\n coords = vec + 1e-5 * np.random.randn(3*len(vec), len(vec))\n nwalkers, ndim = coords.shape\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob_func,\n blobs_dtype=dtype)\n thin_by = 100\n tottime = 0\n for i in range(1000):\n strt = time.time()\n sampler.run_mcmc(coords, 50, thin_by=thin_by, progress=True)\n tottime += time.time() - strt\n\n samples = sampler.get_blobs()\n tau = np.array([float(emcee.autocorr.integrated_time(samples[k],\n tol=0))\n for k in key_vars])\n\n print(sampler.iteration * nwalkers / tau)\n converged = np.all(tau * target_n_eff / thin_by\n < sampler.iteration * nwalkers)\n converged &= np.all(sampler.iteration > 50 * tau)\n if converged:\n break\n\n samples = sampler.get_blobs(discard=int(tau.max()))\n tau_emcee = np.array([float(emcee.autocorr.integrated_time(samples[k],\n tol=0))\n for k in key_vars])\n time_emcee = tottime\n time_per_emcee = time_emcee / (sampler.iteration * nwalkers)\n time_ind_emcee = time_per_emcee * tau_emcee\n\n\n# Sample using pymc\nwith model:\n start = model.test_point\n\n ntune = 2000\n samples = sampler.get_chain(discard=int(tau_emcee.max()), flat=True)\n potential = pm.step_methods.hmc.quadpotential.QuadPotentialFull(\n np.cov(samples, rowvar=0))\n step = pm.NUTS(potential=potential)\n\n# ntune = 5000\n# _, step = pm.init_nuts(init=\"adapt_diag\", target_accept=0.8)\n\n print(\"Running burn-in...\")\n burnin = pm.sample(start=start, tune=ntune, draws=1, step=step, chains=1,\n compute_convergence_checks=False)\n\n trace = None\n next_start = burnin.point(-1)\n draws = 2000\n chains = 2\n ntotal = 0\n tottime = 0\n for i in range(100):\n strt = time.time()\n trace = pm.sample(start=next_start, trace=trace, tune=0, draws=draws,\n step=step, chains=chains,\n compute_convergence_checks=False, cores=1)\n tottime += time.time() - strt\n ntotal += draws * chains\n next_start = [trace.point(-1, c) for c in trace.chains]\n\n tau = np.array([\n float(emcee.autocorr.integrated_time(np.array(\n trace.get_values(v, combine=False)).T,\n tol=0))\n for v in key_vars])\n print(tau)\n print(ntotal / tau)\n print(pm.summary(trace, varnames=key_vars).n_eff)\n\n if (ntotal / tau).min() > target_n_eff and ntotal > tau.max() * 50:\n break\n tau_pymc = np.copy(tau)\n time_pymc = tottime\n time_per_pymc = time_pymc / (len(trace) * chains)\n time_ind_pymc = time_per_pymc * tau_pymc\n\nprint(\"time per ind. sample, emcee: {0}\".format(time_ind_emcee))\nprint(\"time per ind. sample, pymc: {0}\".format(time_ind_pymc))\nprint(\"time per ind. sample, ratio: {0}\"\n .format(time_ind_emcee / time_ind_pymc))\ndf = pd.DataFrame(dict(zip(key_vars, zip(time_ind_emcee, time_ind_pymc))))\ndf[\"method\"] = [\"emcee\", \"pymc\"]\ndf.to_csv(os.path.join(dirname, \"results.csv\"), index=False)\n\ntau = tau_emcee.max()\nsamples = sampler.get_blobs(flat=True, discard=int(2*tau), thin=int(tau))\ndf_emcee = pd.DataFrame.from_records(samples[key_vars])\n\nranges = [(np.min(df_emcee[k]), np.max(df_emcee[k])) for k in df_emcee.columns]\n\ndf_pymc = pm.trace_to_dataframe(trace, varnames=key_vars)\n\nw_pymc = len(df_emcee) / len(df_pymc) + np.zeros(len(df_pymc))\n\nv = key_vars[:15]\nfig = corner.corner(df_emcee[v], color=\"C0\",\n range=ranges[:len(v)])\ncorner.corner(df_pymc[v], weights=w_pymc, color=\"C1\", fig=fig,\n range=ranges[:len(v)])\nfig.savefig(os.path.join(dirname, \"corner.png\"), bbox_inches=\"tight\")\nplt.close(fig)\n"
] |
[
[
"numpy.log",
"numpy.random.seed",
"numpy.min",
"numpy.clip",
"numpy.all",
"numpy.max",
"numpy.copy",
"numpy.cov",
"numpy.shape",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.close",
"pandas.DataFrame.from_records",
"numpy.random.uniform",
"matplotlib.pyplot.figure"
]
] |
BorthakurAyon/snnpytorch
|
[
"212eea2d7cd80d9ebc709cc334ca28d165fc2861"
] |
[
"snnpytorch/test/test_spike_raster.py"
] |
[
"\"\"\"\n\nTest and Plot Spike Raster\n===========================\n\"\"\"\n\nfrom snnpytorch.network.spiking_neural_network import SNN\nfrom snnpytorch.dataset.spike_raster import SpikeRaster\nfrom time import time as t\nfrom torch.utils import data\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport torch\nimport os\n\n\nclass Pipeline(object):\n \"\"\"\n\n Class for testing the network and plotting spike raster\n \"\"\"\n\n def __init__(self):\n self.num_steps = None\n self.num_input_neurons = None\n self.num_output_neurons = None\n self.conn_prob = None\n self.device = None\n\n def parse_cmd(self) -> None:\n \"\"\"\n\n Parse command line inputs and update the model\n \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--num_steps\", type=int, default=200)\n parser.add_argument(\"--num_input_neurons\", type=int, default=10)\n parser.add_argument(\"--num_output_neurons\", type=int, default=100)\n parser.add_argument(\"--conn_prob\", type=float, default=0.5)\n args = parser.parse_args()\n\n self.num_steps = args.num_steps\n self.num_input_neurons = args.num_input_neurons\n self.num_output_neurons = args.num_output_neurons\n self.conn_prob = args.conn_prob\n use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\n def gen_input_spike_raster(self) -> None:\n \"\"\"\n\n Generate and save input spike raster.\n \"\"\"\n\n input_data = np.zeros((self.num_steps, self.num_input_neurons))\n expectation = 5\n num_spikes_per_neuron = int(self.num_steps / expectation)\n\n # Make the spike data\n for input_neuron in range(self.num_input_neurons):\n isi = np.random.poisson(expectation, num_spikes_per_neuron)\n spike_times = np.cumsum(isi)\n input_data[\n spike_times[spike_times < self.num_steps], input_neuron] = 1\n\n # Save the data for later use by dataloader\n file_name = Path.cwd() / 'snnpytorch' / 'data' / 'input_data.pt'\n torch.save(input_data, file_name)\n\n def load_dataset(self) -> torch.Tensor:\n \"\"\"\n\n Load spike data for time points using torch dataloader\\n\n :return: Input spike data for a single time point\n \"\"\"\n\n # Parameters\n params = {'shuffle': False,\n 'batch_size': 1\n }\n\n file_name = Path.cwd() / 'snnpytorch' / 'data' / 'input_data.pt'\n\n # Generators\n input_data = SpikeRaster(fname=file_name)\n input_data_generator = data.DataLoader(input_data, **params)\n return input_data_generator\n\n def run(self) -> (torch.Tensor, torch.Tensor):\n \"\"\"\n\n Run the simulation for the defined number of steps.\\n\n :return: input spike raster, output spike raster\n \"\"\"\n\n output_spikes = []\n input_spikes = []\n\n # Update parameters as specified from command line\n self.parse_cmd()\n\n # Generate input spike raster ( self.num_steps, self.num_input_neurons )\n self.gen_input_spike_raster()\n\n # Load dataset\n input_data_generator = self.load_dataset()\n\n # Create spiking neuron model\n model = SNN(num_input_neurons=self.num_input_neurons,\n num_output_neurons=self.num_output_neurons,\n conn_prob=self.conn_prob)\n model.to(self.device)\n\n start = t()\n print(\"\\nProgress: \"\"(%.4f seconds)\" % (t() - start))\n\n progress = tqdm(total=len(input_data_generator))\n\n for local_data in input_data_generator:\n # Transfer data to GPU / CPU\n local_data = local_data.to(self.device).float()\n\n with torch.no_grad(): # No learning in the model\n input_spikes.append(local_data.tolist()[0]) # batch size=1\n\n # Update model with the data and store the spike outputs\n output_spikes.append(model.forward(local_data).tolist()[0])\n progress.update(1)\n\n progress.close()\n return torch.tensor(input_spikes), torch.tensor(output_spikes)\n\n def plot_simulation_results(self, input_spike_data: torch.Tensor,\n output_spike_data: torch.Tensor) -> None:\n \"\"\"\n\n Plot input and output spike raster.\\n\n :param input_spike_data: Input spike raster\n :param output_spike_data: Output spike raster\n \"\"\"\n\n fig, ax = plt.subplots(1, 2)\n\n # Input spike raster plot\n ax[0].scatter(*torch.where(input_spike_data), color='k')\n ax[0].set_xlim([0, self.num_steps])\n ax[0].set_ylabel(\" Number of input channels (M) \")\n ax[0].set_xlabel(\" Number of time points (T) \")\n ax[0].set_title(\" Spike raster plot \")\n\n # Output spike raster plot\n ax[1].scatter(*torch.where(output_spike_data), color='r')\n ax[1].set_xlim([0, self.num_steps])\n ax[1].set_xlabel(\" Number of time points (T) \")\n ax[1].set_ylabel(\" Number of output neurons (N) \")\n ax[1].set_title(\" Spike raster plot \")\n\n # plot if display is available\n have_display = bool(os.environ.get('DISPLAY', None))\n if have_display:\n plt.show()\n filename = Path.cwd() / 'snnpytorch' / 'data' / \\\n 'input_output_spike_raster.png'\n plt.savefig(filename)\n else:\n filename = Path.cwd() / 'snnpytorch' / 'data' / \\\n 'input_output_spike_raster.png'\n plt.savefig(filename)\n\n\nif __name__ == \"__main__\":\n pipeline = Pipeline()\n input_spikes, output_spikes = pipeline.run()\n\n # Plot raster if device display is available\n pipeline.plot_simulation_results(input_spikes, output_spikes)\n print(\"End of simulation\")\n"
] |
[
[
"torch.utils.data.DataLoader",
"matplotlib.pyplot.subplots",
"numpy.cumsum",
"torch.tensor",
"matplotlib.pyplot.savefig",
"numpy.random.poisson",
"torch.no_grad",
"torch.cuda.is_available",
"torch.where",
"torch.device",
"matplotlib.pyplot.show",
"numpy.zeros",
"torch.save"
]
] |
nilp0inter/LinakDeskApp
|
[
"0cf287ee96002f5c270c087ba73b72c548baa8c5"
] |
[
"src/linakdeskapp/gui/mpl/position_chart.py"
] |
[
"# MIT License\n#\n# Copyright (c) 2017 Arkadiusz Netczuk <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n\nimport logging\n\ntry:\n import pandas\nexcept ImportError:\n ### No module named <name>\n logging.exception(\"Exception while importing\")\n exit(1)\n\nfrom .mpl_canvas import matplotlib, DynamicMplCanvas\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass PositionChart(DynamicMplCanvas):\n\n def __init__(self, parentWidget=None):\n super().__init__(parentWidget, 10, 10, 80)\n\n self.xdata = list()\n self.ydata = list()\n\n linesList = self.plot.plot_date( self.xdata, self.ydata, 'r',\n linewidth=3, antialiased=True)\n self.line = linesList[0]\n\n# self.fig.suptitle('Desk position', y=0.95, fontsize=18)\n self.plot.set_xlabel('Time', fontsize=14)\n self.plot.set_ylabel('Height', fontsize=14)\n\n formatter = matplotlib.dates.DateFormatter('%H:%M:%S')\n self.plot.xaxis.set_major_formatter( formatter )\n\n self.plot.margins( y=0.2 )\n self.plot.set_xmargin(0.0) ## prevents empty space between first tick and y axis\n\n # rotates and right aligns the x labels, and moves the bottom of the\n # axes up to make room for them\n self.fig.autofmt_xdate()\n\n self._set_plot_data()\n\n def addData(self, deskHeight):\n currTime = self.getCurrTime()\n self.xdata.append(currTime)\n self.ydata.append(deskHeight)\n\n self._set_plot_data()\n\n def clearData(self):\n self.xdata.clear()\n self.ydata.clear()\n self._set_plot_data()\n\n def updateData(self):\n yLen = len(self.ydata)\n if yLen < 1:\n ## no data - nothing to do\n return False\n last = self.ydata[-1]\n if yLen < 2:\n ## only one value\n self.addData( last )\n return True\n ## two or more values\n last2 = self.ydata[-2]\n if last != last2:\n self.addData( last )\n return True\n self.xdata[-1] = self.getCurrTime()\n self._set_plot_data()\n return True\n\n def getCurrTime(self):\n currTime = pandas.Timestamp.now()\n return currTime\n\n def _set_plot_data(self):\n if len(self.xdata) < 2:\n return\n\n self.line.set_xdata( self.xdata )\n self.line.set_ydata( self.ydata )\n\n ticks = self._generate_ticks(12)\n self.plot.set_xticks( ticks )\n\n ### hide first and last major tick (next to plot edges)\n xticks = self.plot.xaxis.get_major_ticks()\n xticks[0].label1.set_visible(False)\n ##xticks[-1].label1.set_visible(False)\n\n self.plot.relim(True)\n self.plot.autoscale_view()\n self.fig.tight_layout() ## make space for labels of axis\n# self.fig.subplots_adjust(top=0.82) ## make space for suptitle\n\n def _generate_ticks(self, number):\n if number < 1:\n return list()\n start = self.xdata[0].timestamp()\n tzoffset = start - pandas.Timestamp( start, unit=\"s\" ).timestamp()\n if number < 2:\n middle = (start + self.xdata[-1].timestamp()) / 2 + tzoffset\n ts = pandas.Timestamp( middle, unit=\"s\" )\n ticks = [ts]\n return ticks\n delta = (self.xdata[-1].timestamp() - start) / (number - 1)\n ticks = list()\n ticks.append( self.xdata[0] )\n currTs = start + tzoffset\n for _ in range(1, number):\n currTs += delta\n ts = pandas.Timestamp( currTs, unit=\"s\" )\n ticks.append( ts )\n return ticks\n"
] |
[
[
"pandas.Timestamp",
"pandas.Timestamp.now"
]
] |
panda-tech/poseidon-airflow
|
[
"bce5bc02b55f15330635a436056d99acb93488ef"
] |
[
"poseidon/dags/water_tests/indicator_bacteria_jobs.py"
] |
[
"import cx_Oracle\nimport pandas as pd\nimport os\nimport string\nimport logging\nimport re\n\nfrom datetime import datetime, timedelta\n\nfrom trident.util import general\n\nconf = general.config\n\n\n\ndef get_indicator_bacteria_tests(date_start='01-JAN-2014', date_end='15-JUN-2017', **kwargs):\n \n # For test mode\n if kwargs['test_mode'] == True:\n logging.warning(\"RUNNING IN TEST MODE, PULLING LAST YEAR ONLY!!!!\")\n date_start = (kwargs['execution_date'] - timedelta(days=365)).strftime('%d-%b-%Y')\n \n db = cx_Oracle.connect(conf['oracle_wpl'])\n\n logging.info(\"Starting Indicator Bac Tests: \" + date_start + \" to \" + date_end)\n\n\n jzn_1_q = string.Template(general.file_to_string('./sql/jzn1.sql', __file__))\\\n .substitute(ds=date_start, de=date_end)\n jzn_2_q = string.Template(general.file_to_string('./sql/jzn2.sql', __file__))\\\n .substitute(ds=date_start, de=date_end)\n jzn_3_q = string.Template(general.file_to_string('./sql/jzn3.sql', __file__))\\\n .substitute(ds=date_start, de=date_end)\n jzn_4_q = string.Template(general.file_to_string('./sql/jzn4.sql', __file__))\\\n .substitute(ds=date_start, de=date_end)\n\n logging.info(\"Reading JZN1\")\n jzn_1 = pd.read_sql_query(jzn_1_q, db, coerce_float=True, index_col='F_FIELD_RECORD')\n jzn_1.F_VALUE = pd.to_numeric(jzn_1.F_VALUE, errors='coerce')\n jzn_1 = jzn_1[jzn_1.F_VALUE.notnull()]\n\n logging.info(\"Reading JZN2\")\n jzn_2 = pd.read_sql_query(jzn_2_q, db, coerce_float=True, index_col='F_FIELD_RECORD')\n jzn_2.F_VALUE = pd.to_numeric(jzn_2.F_VALUE, errors='coerce')\n jzn_2 = jzn_2[jzn_2.F_VALUE.notnull()]\n\n logging.info(\"Reading JZN3\")\n jzn_3 = pd.read_sql_query(jzn_3_q, db, coerce_float=True, index_col='F_FIELD_RECORD')\n jzn_3.F_VALUE = pd.to_numeric(jzn_3.F_VALUE, errors='coerce')\n jzn_3 = jzn_3[jzn_3.F_VALUE.notnull()]\n\n logging.info(\"Reading JZN4\")\n jzn_4 = pd.read_sql_query(jzn_4_q, db, coerce_float=True, index_col='F_FIELD_RECORD')\n jzn_4.F_VALUE = pd.to_numeric(jzn_4.F_VALUE, errors='coerce')\n jzn_4 = jzn_4[jzn_4.F_VALUE.notnull()]\n\n jn_1 = jzn_1.rename(columns={\n 'SOURCE':'V5_SOURCE',\n 'SAMPLE_DATE':'V5_SAMPLE_DATE',\n 'SAMPLE_ID':'V5_SAMPLE_ID',\n 'F_VALUE':'V5_CL2_TOTAL',\n 'L_VALUE':'V5_T_COLIFORM'\n }).filter(like='V5',axis=1)\n\n jn_2 = jzn_2.rename(columns={\n 'L_VALUE':'V5_E_COLI'\n }).filter(like='V5',axis=1)\n\n jn_3 = jzn_3.rename(columns={\n 'F_QUAL':'V5_TEMP_PART1',\n 'F_VALUE':'V5_TEMP_PART2'\n }).filter(like='V5',axis=1)\n\n jn_4 = jzn_4.rename(columns={\n 'F_QUAL':'V5_PH_PART1',\n 'F_VALUE':'V5_PH_PART2'\n }).filter(like='V5',axis=1)\n\n df = jn_1.join([jn_2, jn_3, jn_4], how='inner')\n\n df = df.rename(columns={\n 'V5_PH_PART2':'V5_PH',\n 'V5_TEMP_PART2':'V5_TEMPERATURE',\n })\n\n del df['V5_PH_PART1']\n del df['V5_TEMP_PART1']\n\n df.columns = [re.sub('V5\\_','',x) for x in df.columns]\n df.columns = [x.lower() for x in df.columns]\n df = df.rename(columns={'sample_date':'date_sampled'})\n df.index.rename(name='FR_NUM', inplace=True)\n\n new_file_path = conf['prod_data_dir'] + '/indicator_bacteria_tests_datasd_v1.csv'\n logging.info(\"Writing to \" + new_file_path)\n df.to_csv(new_file_path,\n index=True, \n encoding='utf-8', \n doublequote=True, \n date_format=conf['date_format_ymd'])\n \n return \"Indicator bacteria tests written to \" + new_file_path\n\n\ndef get_latest_bac_tests():\n full_bacs_path = conf['prod_data_dir'] + \"/indicator_bacteria_tests_datasd_v1.csv\"\n bac_tests = pd.read_csv(full_bacs_path)\n bac_tests.date_sampled = pd.to_datetime(bac_tests.date_sampled, infer_datetime_format=True)\n\n df = bac_tests[bac_tests.date_sampled == max(bac_tests.date_sampled)]\n\n new_file_path = conf['prod_data_dir'] + '/latest_indicator_bac_tests_datasd_v1.csv'\n\n df.to_csv(new_file_path,\n index=False, \n encoding='utf-8', \n doublequote=True, \n date_format=conf['date_format_ymd'])\n\n return \"Latest indicator bacteria tests written to \" + new_file_path\n"
] |
[
[
"pandas.read_sql_query",
"pandas.read_csv",
"pandas.to_numeric",
"pandas.to_datetime"
]
] |
Aleksa14/DeepRecommender
|
[
"39716087ab18cfa7d42a042451b0b9bad7701359"
] |
[
"infer.py"
] |
[
"# Copyright (c) 2017 NVIDIA Corporation\nimport torch\nimport argparse\nimport copy\nfrom reco_encoder.data import input_layer\nfrom reco_encoder.model import model\nfrom torch.autograd import Variable\nfrom pathlib import Path\n\nparser = argparse.ArgumentParser(description='RecoEncoder')\n\nparser.add_argument('--drop_prob', type=float, default=0.0, metavar='N',\n help='dropout drop probability')\nparser.add_argument('--constrained', action='store_true',\n help='constrained autoencoder')\nparser.add_argument('--skip_last_layer_nl', action='store_true',\n help='if present, decoder\\'s last layer will not apply non-linearity function')\nparser.add_argument('--hidden_layers', type=str, default=\"1024,512,512,128\", metavar='N',\n help='hidden layer sizes, comma-separated')\nparser.add_argument('--path_to_train_data', type=str, default=\"\", metavar='N',\n help='Path to training data')\nparser.add_argument('--path_to_eval_data', type=str, default=\"\", metavar='N',\n help='Path to evaluation data')\nparser.add_argument('--non_linearity_type', type=str, default=\"selu\", metavar='N',\n help='type of the non-linearity used in activations')\nparser.add_argument('--save_path', type=str, default=\"autorec.pt\", metavar='N',\n help='where to save model')\nparser.add_argument('--predictions_path', type=str, default=\"out.txt\", metavar='N',\n help='where to save predictions')\n\nargs = parser.parse_args()\nprint(args)\n\ndef main():\n params = dict()\n params['batch_size'] = 1\n params['data_dir'] = args.path_to_train_data\n params['major'] = 'users'\n params['itemIdInd'] = 1\n params['userIdInd'] = 0\n print(\"Loading training data\")\n data_layer = input_layer.UserItemRecDataProvider(params=params)\n print(\"Data loaded\")\n print(\"Total items found: {}\".format(len(data_layer.data.keys())))\n print(\"Vector dim: {}\".format(data_layer.vector_dim))\n\n print(\"Loading eval data\")\n eval_params = copy.deepcopy(params)\n # must set eval batch size to 1 to make sure no examples are missed\n eval_params['batch_size'] = 1\n eval_params['data_dir'] = args.path_to_eval_data\n eval_data_layer = input_layer.UserItemRecDataProvider(params=eval_params,\n user_id_map=data_layer.userIdMap,\n item_id_map=data_layer.itemIdMap)\n\n rencoder = model.AutoEncoder(layer_sizes=[data_layer.vector_dim] + [int(l) for l in args.hidden_layers.split(',')],\n nl_type=args.non_linearity_type,\n is_constrained=args.constrained,\n dp_drop_prob=args.drop_prob,\n last_layer_activations=not args.skip_last_layer_nl)\n\n path_to_model = Path(args.save_path)\n if path_to_model.is_file():\n print(\"Loading model from: {}\".format(path_to_model))\n rencoder.load_state_dict(torch.load(args.save_path))\n\n print('######################################################')\n print('######################################################')\n print('############# AutoEncoder Model: #####################')\n print(rencoder)\n print('######################################################')\n print('######################################################')\n rencoder.eval()\n rencoder = rencoder.cuda()\n inv_userIdMap = {v: k for k, v in data_layer.userIdMap.items()}\n inv_itemIdMap = {v: k for k, v in data_layer.itemIdMap.items()}\n\n eval_data_layer.src_data = data_layer.data\n with open(args.predictions_path, 'w') as outf:\n for i, ((out, src), majorInd) in enumerate(eval_data_layer.iterate_one_epoch_eval(for_inf=True)):\n inputs = Variable(src.cuda().to_dense())\n targets_np = out.to_dense().numpy()[0, :]\n outputs = rencoder(inputs).cpu().data.numpy()[0, :]\n non_zeros = targets_np.nonzero()[0].tolist()\n major_key = inv_userIdMap [majorInd]\n for ind in non_zeros:\n outf.write(\"{}\\t{}\\t{}\\t{}\\n\".format(major_key, inv_itemIdMap[ind], outputs[ind], targets_np[ind]))\n if i % 10000 == 0:\n print(\"Done: {}\".format(i))\n\nif __name__ == '__main__':\n main()\n\n\n"
] |
[
[
"torch.load"
]
] |
DenXX/fvcore
|
[
"4b91cf092f4f5d379b2c93398780a3b5755e7179"
] |
[
"tests/test_param_count.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n\nimport unittest\nfrom torch import nn\n\nfrom fvcore.nn.parameter_count import parameter_count, parameter_count_table\n\n\nclass NetWithReuse(nn.Module):\n def __init__(self, reuse: bool = False) -> None:\n super().__init__()\n self.conv1 = nn.Conv2d(100, 100, 3)\n self.conv2 = nn.Conv2d(100, 100, 3)\n if reuse:\n self.conv2.weight = self.conv1.weight # pyre-ignore\n\n\nclass NetWithDupPrefix(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.conv1 = nn.Conv2d(100, 100, 3)\n self.conv111 = nn.Conv2d(100, 100, 3)\n\n\nclass TestParamCount(unittest.TestCase):\n def test_param(self) -> None:\n net = NetWithReuse()\n count = parameter_count(net)\n self.assertTrue(count[\"\"], 180200)\n self.assertTrue(count[\"conv2\"], 90100)\n\n def test_param_with_reuse(self) -> None:\n net = NetWithReuse(reuse=True)\n count = parameter_count(net)\n self.assertTrue(count[\"\"], 90200)\n self.assertTrue(count[\"conv2\"], 100)\n\n def test_param_with_same_prefix(self) -> None:\n net = NetWithDupPrefix()\n table = parameter_count_table(net)\n c = [\"conv111.weight\" in line for line in table.split(\"\\n\")]\n self.assertEqual(\n sum(c), 1\n ) # it only appears once, despite being a prefix of conv1\n"
] |
[
[
"torch.nn.Conv2d"
]
] |
fredericgo/rl_morph_pytorch
|
[
"743cd82d82c16c8d52e5265b6cc5cdf490cb8945"
] |
[
"style_transfer/train.py"
] |
[
"import argparse\nimport datetime\nimport gym\nimport numpy as np\nimport itertools\n\nimport sys\nsys.path.insert(0, '..')\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom style_transfer.replay_memory_dataset import ReplayMemoryDataset\nfrom style_transfer.skeleton_template_dataset import SkeletonTemplateDataset\nfrom style_transfer.skeleton_encoder import SkeletonEncoder\nfrom style_transfer.motion_encoder import MotionEncoder\nfrom style_transfer.motion_decoder import MotionDecoder\nfrom style_transfer.ae import AE\nimport envs\n\nparser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')\nparser.add_argument('--env1-name', default=\"ant\",\n help='Mujoco Gym environment (default: HalfCheetah-v2)')\nparser.add_argument('--env2-name', default=\"ant3\",\n help='Mujoco Gym environment (default: HalfCheetah-v2)')\nparser.add_argument('--agent_memory1', default='data/ant.memory',\n help='Path for saved replay memory')\nparser.add_argument('--agent_memory2', default='data/ant3.memory',\n help='Path for saved replay memory')\nparser.add_argument('--hidden_dim', type=int, default=256,\n help='MLP hidden dimension')\nparser.add_argument('--latent_dim', type=int, default=64,\n help='Encoder latent dimension')\nparser.add_argument('--seed', type=int, default=123456, metavar='N',\n help='random seed (default: 123456)')\nparser.add_argument('--lr', type=float, default=5e-4, metavar='N',\n help='random seed (default: 123456)')\nparser.add_argument('--epochs', type=int, default=2000, metavar='N',\n help='random seed (default: 123456)')\nparser.add_argument('--batch_size', type=int, default=128, metavar='N',\n help='random seed (default: 123456)')\nparser.add_argument('--checkpoint_interval', type=int, default=10, \n help='checkpoint training model every # steps')\nparser.add_argument('--cuda', action=\"store_true\",\n help='run on CUDA (default: False)')\nargs = parser.parse_args()\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\nenv = envs.load(args.env1_name)\nenv.seed(args.seed)\n\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\ndataset1 = ReplayMemoryDataset(args.agent_memory1)\ndataset2 = ReplayMemoryDataset(args.agent_memory2)\ncombined_dataset = ConcatDataset([dataset1, dataset2])\n\ns1 = dataset1[0][0].size(0)\ns2 = dataset2[0][0].size(0)\n\nskeleton_dataset = SkeletonTemplateDataset([s1, s2])\n\nMAX_LEN = 27\n\ndef collate_and_pad(batch):\n B = len(batch)\n out_dims = (B, MAX_LEN)\n out_x = batch[0][0].new_full(out_dims, 0.)\n for i, (state, _, _, _, _) in enumerate(batch):\n length = state.size(0)\n out_x[i, :length, ...] = state\n out_x = out_x.to(device=device)\n return out_x\n\nstate_size = env.observation_space.shape[0]\nmodel = AE(state_size, state_size, args.hidden_dim, args.latent_dim).to(device=device)\n\n\n\n#Tesnorboard\ndatetime_st = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\nlog_dir = f'runs/{datetime_st}_StyleAE'\nwriter = SummaryWriter(log_dir)\n\ndataloader = DataLoader(combined_dataset, batch_size=args.batch_size,\n collate_fn=collate_and_pad, drop_last=True,\n shuffle=True, num_workers=2)\nskeleton_loader = DataLoader(skeleton_dataset, batch_size=args.batch_size, num_workers=0)\nskeleton_iter = iter(itertools.cycle(skeleton_loader))\n\ndef style_trasfer_loss(f, x, s, x_hat):\n dt = f(x_hat, s) - f(x, s)\n content_loss = torch.sum(torch.norm(dt, p=2, dim=-1))\n ds = f.skeleton_encoder(x_hat) - f.skeleton_encoder(s)\n style_loss = torch.sum(torch.norm(ds, p=2, dim=-1))\n return content_loss + style_loss\n\noptimizer = Adam(model.parameters(), lr=args.lr)\nprint(\"Start training StyleAE...\")\nmodel.train()\n\nepoch = 0\n\nfor epoch in range(args.epochs):\n overall_loss = 0\n\n for batch_idx, x, in enumerate(dataloader):\n s = next(skeleton_iter)\n \n optimizer.zero_grad()\n x_hat = model(x, s)\n \n \n loss = style_trasfer_loss(model.f,\n x, s, x_hat)\n overall_loss += loss.item()\n \n loss.backward()\n optimizer.step()\n avg_loss = overall_loss / (batch_idx * args.batch_size)\n\n writer.add_scalar('loss', avg_loss, epoch)\n\n print(f\"\\tEpoch {epoch + 1} completed!\\t Average Loss: {avg_loss}\")\n\n if epoch % args.checkpoint_interval == 0:\n model.save_model(log_dir)\n print(\"----------------------------------------\")\n print(f\"Save Model: {epoch} epoch.\")\n print(\"----------------------------------------\")"
] |
[
[
"torch.norm",
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.utils.data.ConcatDataset",
"torch.utils.tensorboard.SummaryWriter",
"torch.device"
]
] |
BboyTian/gupview
|
[
"6ef6693f8b58d224a89e2963bcd4d44312e957de"
] |
[
"gupview/Secondary_Scripts/Flouroscence.py"
] |
[
"#########\n#Imports#\n#########\n\n# Python Basics\nfrom decimal import Decimal\n\n# Graph Plotting\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.figure import Figure\n\n# Image process\nimport numpy as np\nimport PIL\nfrom .Masks import rectMask_func\n\n# Parameters\nimport Parameters as para\n\n\n###########\n#Operation#\n###########\n\nclass Flouro:\n def __init__(self, plotsize, cropsize):\n\n figsize = int(plotsize/80)\n self.halfcropsize = int(cropsize/2)\n\n # image to be processed\n self.img = None\n\n self.count = 0\n\n #intialising figure\n self.fig = Figure(figsize=(figsize, figsize), dpi=100)\n self.ax = self.fig.add_subplot(111)\n self.ax.set_ylim(para.ylim)\n self.ax.set_xlim([self.count-para.xlim,self.count])\n\n self.count_ara = np.array([])\n self.flour_ara = np.array([])\n self.flour_plot = self.ax.plot(self.count_ara, self.flour_ara)\n\n\n def get_plot(self, image, cropLoc, cropdimension, xlim, flourSum_res):\n halfcropsize_x, halfcropsize_y = int(cropdimension[0] / 2), int(cropdimension[1] / 2)\n\n # Obtaining crop image\n cropImage = image[cropLoc[1]-halfcropsize_y : cropLoc[1]+halfcropsize_y,\n cropLoc[0]-halfcropsize_x : cropLoc[0]+halfcropsize_x]\n flour = np.sum(cropImage)\n\n # appending new values\n self.count_ara = np.append(self.count_ara, self.count)\n self.count += 1\n self.flour_ara = np.append(self.flour_ara, flour)\n\n # deleting beyond the limit\n if len(self.count_ara) > xlim:\n self.count_ara = self.count_ara[-xlim:]\n self.flour_ara = self.flour_ara[-xlim:]\n self.flour_plot[0].remove()\n\n # updating plot\n self.flour_plot = self.ax.plot(self.count_ara, self.flour_ara, 'o', color='C0')\n self.ax.set_xlim([self.count-xlim,self.count])\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\n # updating display number\n flourSum_res.configure(text='%.5E' % Decimal(str(flour)))\n\n def get_feed(self, array, cropLoc, cropdimension, width, height):\n\n image = PIL.Image.fromarray(array)\n # Obtaining Feed Image\n feed_image = rectMask_func(image, cropLoc, cropdimension)\n feed_image = feed_image.resize((width, height), PIL.Image.NEAREST)\n\n return feed_image\n"
] |
[
[
"matplotlib.figure.Figure",
"matplotlib.use",
"numpy.append",
"numpy.array",
"numpy.sum"
]
] |
endrol/Anomaly_Clustering
|
[
"670546751543f1d919c4a788e96bcf4405e3423c"
] |
[
"datasets/mvtec.py"
] |
[
"import os\nimport sys\nfrom pathlib import Path\nfrom typing import List, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch import Tensor\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms as T\nimport config as c\n\n\n__all__ = (\"MVTecDataset\")\n\n# URL = 'ftp://guest:[email protected]/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz'\nMVTEC_CLASS_NAMES = [\n \"bottle\",\n \"cable\",\n \"capsule\",\n \"carpet\",\n \"grid\",\n \"hazelnut\",\n \"leather\",\n \"metal_nut\",\n \"pill\",\n \"screw\",\n \"tile\",\n \"toothbrush\",\n \"transistor\",\n \"wood\",\n \"zipper\",\n]\n\n\nclass MVTecDataset(Dataset):\n def __init__(self, is_train=True):\n assert c.class_name in MVTEC_CLASS_NAMES, \"class_name: {}, should be in {}\".format(\n c.class_name, MVTEC_CLASS_NAMES\n )\n self.dataset_path = c.mvtec_data_path\n self.class_name = c.class_name\n self.is_train = is_train\n self.cropsize = c.crp_size\n # load dataset\n self.x, self.y, self.mask = self.load_dataset_folder()\n # set transforms\n if is_train:\n self.transform_x = T.Compose(\n [\n T.Resize(c.img_size, Image.ANTIALIAS),\n T.CenterCrop(c.crp_size),\n T.ToTensor(),\n ]\n )\n # test:\n else:\n self.transform_x = T.Compose(\n [T.Resize(c.img_size, Image.ANTIALIAS), T.CenterCrop(c.crp_size), T.ToTensor()]\n )\n # mask\n self.transform_mask = T.Compose(\n [T.Resize(c.img_size, Image.NEAREST), T.CenterCrop(c.crp_size), T.ToTensor()]\n )\n\n self.normalize = T.Compose([T.Normalize(c.norm_mean, c.norm_std)])\n\n def __getitem__(self, idx):\n x, y, mask = self.x[idx], self.y[idx], self.mask[idx]\n # x = Image.open(x).convert('RGB')\n x = Image.open(x)\n if self.class_name in [\"zipper\", \"screw\", \"grid\"]: # handle greyscale classes\n x = np.expand_dims(np.array(x), axis=2)\n x = np.concatenate([x, x, x], axis=2)\n\n x = Image.fromarray(x.astype(\"uint8\")).convert(\"RGB\")\n #\n x = self.normalize(self.transform_x(x))\n #\n if y == 0:\n mask = torch.zeros([1, self.cropsize[0], self.cropsize[1]])\n else:\n mask = Image.open(mask)\n mask = self.transform_mask(mask)\n\n return x, y, mask\n\n def __len__(self):\n return len(self.x)\n\n def load_dataset_folder(self):\n phase = \"train\" if self.is_train else \"test\"\n x, y, mask = [], [], []\n\n img_dir = os.path.join(self.dataset_path, self.class_name, phase)\n gt_dir = os.path.join(self.dataset_path, self.class_name, \"ground_truth\")\n\n img_types = sorted(os.listdir(img_dir))\n for img_type in img_types:\n\n # load images\n img_type_dir = os.path.join(img_dir, img_type)\n if not os.path.isdir(img_type_dir):\n continue\n img_fpath_list = sorted(\n [\n os.path.join(img_type_dir, f)\n for f in os.listdir(img_type_dir)\n if f.endswith(\".png\")\n ]\n )\n x.extend(img_fpath_list)\n\n # load gt labels\n if img_type == \"good\":\n y.extend([0] * len(img_fpath_list))\n mask.extend([None] * len(img_fpath_list))\n else:\n y.extend([1] * len(img_fpath_list))\n gt_type_dir = os.path.join(gt_dir, img_type)\n img_fname_list = [os.path.splitext(os.path.basename(f))[0] for f in img_fpath_list]\n gt_fpath_list = [\n os.path.join(gt_type_dir, img_fname + \"_mask.png\")\n for img_fname in img_fname_list\n ]\n mask.extend(gt_fpath_list)\n\n assert len(x) == len(y), \"number of x and y should be same\"\n\n return list(x), list(y), list(mask)\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"torch.zeros"
]
] |
bajcmartinez/Finding-Car-Lanes-Without-Deep-Learning
|
[
"2d660ce1f6f3ed5c57ddd919a13b65853dee0758"
] |
[
"lib/lane.py"
] |
[
"import numpy as np\nimport cv2\n\nclass Lane():\n \"\"\"\n Define a class to receive the characteristics of each line detection\n \"\"\"\n\n def __init__(self, xm_per_pix, ym_per_pix):\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n self.recent_x_fitted = []\n # average x values of the fitted line over the last n iterations\n self.best_x = None\n # polynomial coefficients averaged over the last n iterations\n self.best_fit = None\n # polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n # polynomial coefficients for the recent fits\n self.history_fit = []\n # max count for elements in the history, 1 second approx\n self.max_history = 30\n # weights used to calculate the history average\n self.history_weights = [x//2+1 for x in range(self.max_history)]\n # radius of curvature of the line in some units\n self.radius_of_curvature = None\n # sanity check lane\n self._insanity = 0.0\n # distance in meters of vehicle center from the line\n self.line_base_pos = None\n # difference in fit coefficients between last and new fits\n self.diffs = np.array([0, 0, 0], dtype='float')\n\n # x values for detected line pixels\n self.all_x = None\n # y values for detected line pixels\n self.all_y = None\n\n # meters per pixel in dimension\n self._xm_per_pix = xm_per_pix\n self._ym_per_pix = ym_per_pix\n\n def sanity_check_lane(self, R):\n \"\"\"\n Checks the radius of curvature `R` against the radius stored in the object.\n \"\"\"\n # Return true if there is no prior data\n if self.radius_of_curvature is None:\n return True\n\n R0 = self.radius_of_curvature\n self._insanity = abs(R - R0) / R0\n return self._insanity <= 0.5\n\n def calculate_curvature(self):\n fit_cr = np.polyfit(self.all_y * self._ym_per_pix, self.all_x * self._xm_per_pix, 2)\n plot_y = np.linspace(0, 720 - 1, 720)\n y_eval = np.max(plot_y)\n\n curve = ((1 + (2 * fit_cr[0] * y_eval * self._ym_per_pix + fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * fit_cr[0])\n\n return curve\n\n def add_fit(self, fit, points_x, points_y):\n \"\"\"\n Adds a fit to the current lane\n\n :param fit: Second order polynomial that represents the lane\n \"\"\"\n if fit is not None:\n if self.best_fit is not None:\n # if we have a best fit, see how this new fit compares\n self.diffs = abs(fit - self.best_fit)\n\n self.detected = True\n\n # update points\n self.all_x = points_x\n self.all_y = points_y\n _radius_of_curvature = self.calculate_curvature()\n self.detected = self.sanity_check_lane(_radius_of_curvature)\n if self.detected:\n self.radius_of_curvature = _radius_of_curvature\n\n # if we detected a good fit then we store in current_fit\n self.current_fit = fit\n self.history_fit.append(fit)\n # keep only last N items\n self.history_fit = self.history_fit[-self.max_history:]\n\n # calculate the average\n self.best_fit = np.average(self.history_fit, axis=0, weights=self.history_weights[:len(self.history_fit)])\n else:\n # we fail the sanity check\n self.detected = False\n self.current_fit = [np.array([False])]\n\n else:\n self.detected = False\n self.current_fit = [np.array([False])]\n"
] |
[
[
"numpy.polyfit",
"numpy.absolute",
"numpy.linspace",
"numpy.max",
"numpy.array"
]
] |
fredmontet/tars
|
[
"922786e8c6456fc0cc1a9db07714f11dd78219d9"
] |
[
"src/tars/utils/runner.py"
] |
[
"import logging\nfrom typing import Callable, NoReturn\nfrom time import sleep\n\nfrom pandas import Timestamp, Timedelta\n\n\nclass Runner:\n \"\"\"\n A Runner represent an object able to execute a function through time.\n\n The function can be executed with a chosen frequency e.g. every 10 seconds\n and for a optional duration e.g. 2 hours.\n\n :ivar is_running : Boolean describing if the Runner is running or not.\n \"\"\"\n \n def __init__(self):\n self.is_running = False\n\n def start(self, func: Callable, frequency: str, duration: str = None) \\\n -> NoReturn:\n \"\"\" Start the Runner\n\n :param func: The function to be executed\n :param frequency: String representing a frequency in the same form than a Pandas' Timedelta (https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html)\n :param duration: String representing a frequency in the same form than a Pandas' Timedelta (https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html)\n \"\"\"\n self.is_running = True\n\n if duration is not None:\n end_time = Timestamp.now() + Timedelta(duration)\n \n while self.is_running:\n if duration is not None:\n if Timestamp.now() >= end_time:\n break\n func()\n sleep(Timedelta(frequency).total_seconds())\n\n logging.debug(f'Runner started with frequency of {frequency} and '\n f'duration of {duration}')\n\n def stop(self) -> NoReturn:\n \"\"\" Stop the Runner \"\"\"\n self.is_running = False\n logging.debug(f'Runner stopped')\n"
] |
[
[
"pandas.Timestamp.now",
"pandas.Timedelta"
]
] |
Arsh0023/stockstats
|
[
"3b13bc74b2106d1a5ebbb6f456344abc3a06ed0e"
] |
[
"stockstats.py"
] |
[
"# coding=utf-8\r\n# Copyright (c) 2016, Cedric Zhuang\r\n# All rights reserved.\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n#\r\n# * Redistributions of source code must retain the above copyright\r\n# notice, this list of conditions and the following disclaimer.\r\n# * Redistributions in binary form must reproduce the above copyright\r\n# notice, this list of conditions and the following disclaimer in the\r\n# documentation and/or other materials provided with the distribution.\r\n# * Neither the name of disclaimer nor the names of its contributors may\r\n# be used to endorse or promote products derived from this software\r\n# without specific prior written permission.\r\n#\r\n# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS \"AS IS\" AND ANY\r\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY\r\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n\r\nfrom __future__ import unicode_literals\r\n\r\nimport itertools\r\nimport logging\r\nimport operator\r\nimport random\r\nimport re\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom int_date import get_date_from_diff\r\n\r\n__author__ = 'Cedric Zhuang'\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\nclass StockDataFrame(pd.DataFrame):\r\n OPERATORS = ['le', 'ge', 'lt', 'gt', 'eq', 'ne']\r\n\r\n # Start of options.\r\n KDJ_PARAM = (2.0 / 3.0, 1.0 / 3.0)\r\n KDJ_WINDOW = 9\r\n\r\n BOLL_PERIOD = 20\r\n BOLL_STD_TIMES = 2\r\n\r\n MACD_EMA_SHORT = 12\r\n MACD_EMA_LONG = 26\r\n MACD_EMA_SIGNAL = 9\r\n\r\n PDI_SMMA = 14\r\n MDI_SMMA = 14\r\n DX_SMMA = 14\r\n ADX_EMA = 6\r\n ADXR_EMA = 6\r\n\r\n CR_MA1 = 5\r\n CR_MA2 = 10\r\n CR_MA3 = 20\r\n\r\n TRIX_EMA_WINDOW = 12\r\n\r\n TEMA_EMA_WINDOW = 5\r\n\r\n ATR_SMMA = 14\r\n\r\n # End of options\r\n\r\n @staticmethod\r\n def _get_change(df):\r\n \"\"\" Get the percentage change column\r\n\r\n :param df: DataFrame object\r\n :return: result series\r\n \"\"\"\r\n df['change'] = df['close'].pct_change() * 100\r\n return df['change']\r\n\r\n @staticmethod\r\n def _get_p(df, column, shifts):\r\n \"\"\" get the permutation of specified range\r\n\r\n example:\r\n index x x_-2,-1_p\r\n 0 1 NaN\r\n 1 -1 NaN\r\n 2 3 2 (0.x > 0, and assigned to weight 2)\r\n 3 5 1 (2.x > 0, and assigned to weight 1)\r\n 4 1 3\r\n\r\n :param df: data frame\r\n :param column: the column to calculate p from\r\n :param shifts: the range to consider\r\n :return:\r\n \"\"\"\r\n column_name = '{}_{}_p'.format(column, shifts)\r\n # initialize the column if not\r\n df.get(column)\r\n shifts = StockDataFrame.to_ints(shifts)[::-1]\r\n indices = None\r\n count = 0\r\n for shift in shifts:\r\n shifted = df.shift(-shift)\r\n index = (shifted[column] > 0) * (2 ** count)\r\n if indices is None:\r\n indices = index\r\n else:\r\n indices += index\r\n count += 1\r\n if indices is not None:\r\n cp = indices.copy()\r\n StockDataFrame.set_nan(cp, shifts)\r\n df[column_name] = cp\r\n\r\n @classmethod\r\n def to_ints(cls, shifts):\r\n items = map(cls._process_shifts_segment,\r\n shifts.split(','))\r\n return sorted(list(set(itertools.chain(*items))))\r\n\r\n @classmethod\r\n def to_int(cls, shifts):\r\n numbers = cls.to_ints(shifts)\r\n if len(numbers) != 1:\r\n raise IndexError(\"only accept 1 number.\")\r\n return numbers[0]\r\n\r\n @staticmethod\r\n def to_floats(shifts):\r\n floats = map(float, shifts.split(','))\r\n return sorted(list(set(floats)))\r\n\r\n @classmethod\r\n def to_float(cls, shifts):\r\n floats = cls.to_floats(shifts)\r\n if len(floats) != 1:\r\n raise IndexError('only accept 1 float.')\r\n return floats[0]\r\n\r\n @staticmethod\r\n def _process_shifts_segment(shift_segment):\r\n if '~' in shift_segment:\r\n start, end = shift_segment.split('~')\r\n shifts = range(int(start), int(end) + 1)\r\n else:\r\n shifts = [int(shift_segment)]\r\n return shifts\r\n\r\n @staticmethod\r\n def set_nan(pd_obj, shift):\r\n try:\r\n iter(shift)\r\n max_shift = max(shift)\r\n min_shift = min(shift)\r\n StockDataFrame._set_nan_of_single_shift(pd_obj, max_shift)\r\n StockDataFrame._set_nan_of_single_shift(pd_obj, min_shift)\r\n except TypeError:\r\n # shift is not iterable\r\n StockDataFrame._set_nan_of_single_shift(pd_obj, shift)\r\n\r\n @staticmethod\r\n def _set_nan_of_single_shift(pd_obj, shift):\r\n val = np.nan\r\n if shift > 0:\r\n pd_obj.iloc[-shift:] = val\r\n elif shift < 0:\r\n pd_obj.iloc[:-shift] = val\r\n\r\n @classmethod\r\n def _get_r(cls, df, column, shifts):\r\n \"\"\" Get rate of change of column\r\n\r\n :param df: DataFrame object\r\n :param column: column name of the rate to calculate\r\n :param shifts: days to shift, accept one shift only\r\n :return: None\r\n \"\"\"\r\n shift = cls.to_int(shifts)\r\n rate_key = '{}_{}_r'.format(column, shift)\r\n df[rate_key] = df[column].pct_change(periods=-shift) * 100\r\n\r\n @classmethod\r\n def _get_s(cls, df, column, shifts):\r\n \"\"\" Get the column shifted by days\r\n\r\n :param df: DataFrame object\r\n :param column: name of the column to shift\r\n :param shifts: days to shift, accept one shift only\r\n :return: None\r\n \"\"\"\r\n shift = cls.to_int(shifts)\r\n shifted_key = \"{}_{}_s\".format(column, shift)\r\n df[shifted_key] = df[column].shift(-shift)\r\n cp = df[shifted_key].copy()\r\n StockDataFrame.set_nan(cp, shift)\r\n df[shifted_key] = cp\r\n\r\n @classmethod\r\n def _get_log_ret(cls, df):\r\n df['log-ret'] = np.log(df['close'] / df['close_-1_s'])\r\n\r\n @classmethod\r\n def _get_c(cls, df, column, shifts):\r\n \"\"\" get the count of column in range (shifts)\r\n\r\n example: kdjj_0_le_20_c\r\n :param df: stock data\r\n :param column: column name\r\n :param shifts: range to count, only to previous\r\n :return: result series\r\n \"\"\"\r\n column_name = '{}_{}_c'.format(column, shifts)\r\n shifts = cls.get_only_one_positive_int(shifts)\r\n df[column_name] = df[column].rolling(\r\n center=False,\r\n window=shifts,\r\n min_periods=0).apply(np.count_nonzero)\r\n return df[column_name]\r\n\r\n @classmethod\r\n def _get_fc(cls, df, column, shifts):\r\n \"\"\" get the count of column in range of future (shifts)\r\n\r\n example: kdjj_0_le_20_fc\r\n :param df: stock data\r\n :param column: column name\r\n :param shifts: range to count, only to future\r\n :return: result series\r\n \"\"\"\r\n column_name = '{}_{}_fc'.format(column, shifts)\r\n shift = cls.get_only_one_positive_int(shifts)\r\n reversed_series = df[column][::-1]\r\n reversed_counts = reversed_series.rolling(\r\n center=False,\r\n window=shift,\r\n min_periods=0).apply(np.count_nonzero)\r\n counts = reversed_counts[::-1]\r\n df[column_name] = counts\r\n return counts\r\n\r\n @classmethod\r\n def _get_op(cls, df, column, threshold, op):\r\n column_name = '{}_{}_{}'.format(column, threshold, op)\r\n threshold = cls.to_float(threshold)\r\n f = getattr(operator, op)\r\n df[column_name] = f(df[column], threshold)\r\n\r\n @staticmethod\r\n def get_diff_convolve_array(shift):\r\n if shift == 0:\r\n ret = [1]\r\n else:\r\n ret = np.zeros(abs(shift) + 1)\r\n if shift < 0:\r\n ret[[0, -1]] = 1, -1\r\n else:\r\n ret[[0, -1]] = -1, 1\r\n return ret\r\n\r\n @classmethod\r\n def _init_shifted_columns(cls, column, df, shifts):\r\n # initialize the column if not\r\n df.get(column)\r\n shifts = cls.to_ints(shifts)\r\n shift_column_names = ['{}_{}_s'.format(column, shift) for shift in\r\n shifts]\r\n [df.get(name) for name in shift_column_names]\r\n return shift_column_names\r\n\r\n @classmethod\r\n def _get_max(cls, df, column, shifts):\r\n column_name = '{}_{}_max'.format(column, shifts)\r\n shift_column_names = cls._init_shifted_columns(column, df, shifts)\r\n df[column_name] = np.max(df[shift_column_names], axis=1)\r\n\r\n @classmethod\r\n def _get_min(cls, df, column, shifts):\r\n column_name = '{}_{}_min'.format(column, shifts)\r\n shift_column_names = cls._init_shifted_columns(column, df, shifts)\r\n df[column_name] = np.min(df[shift_column_names], axis=1)\r\n\r\n @staticmethod\r\n def _get_rsv(df, n_days):\r\n \"\"\" Calculate the RSV (Raw Stochastic Value) within N days\r\n\r\n This value is essential for calculating KDJs\r\n Current day is included in N\r\n :param df: data\r\n :param n_days: N days\r\n :return: None\r\n \"\"\"\r\n n_days = int(n_days)\r\n column_name = 'rsv_{}'.format(n_days)\r\n low_min = df['low'].rolling(\r\n min_periods=1, window=n_days, center=False).min()\r\n high_max = df['high'].rolling(\r\n min_periods=1, window=n_days, center=False).max()\r\n\r\n cv = (df['close'] - low_min) / (high_max - low_min)\r\n df[column_name] = cv.fillna(0).astype('float64') * 100\r\n\r\n @staticmethod\r\n def _positive_sum(data):\r\n data = [i if i > 0 else 0 for i in data]\r\n ret = data[0]\r\n for i in data[1:]:\r\n ret = (ret * (len(data) - 1) + i) / len(data)\r\n return ret\r\n\r\n @staticmethod\r\n def _negative_sum(data):\r\n data = [-i if i < 0 else 0 for i in data]\r\n ret = data[0]\r\n for i in data[1:]:\r\n ret = (ret * (len(data) - 1) + i) / len(data)\r\n return ret\r\n\r\n # noinspection PyUnresolvedReferences\r\n @classmethod\r\n def _get_rsi(cls, df, n_days):\r\n \"\"\" Calculate the RSI (Relative Strength Index) within N days\r\n\r\n calculated based on the formula at:\r\n https://en.wikipedia.org/wiki/Relative_strength_index\r\n\r\n :param df: data\r\n :param n_days: N days\r\n :return: None\r\n \"\"\"\r\n n_days = int(n_days)\r\n d = df['close_-1_d']\r\n\r\n df['closepm'] = (d + d.abs()) / 2\r\n df['closenm'] = (-d + d.abs()) / 2\r\n closepm_smma_column = 'closepm_{}_smma'.format(n_days)\r\n closenm_smma_column = 'closenm_{}_smma'.format(n_days)\r\n p_ema = df[closepm_smma_column]\r\n n_ema = df[closenm_smma_column]\r\n\r\n rs_column_name = 'rs_{}'.format(n_days)\r\n rsi_column_name = 'rsi_{}'.format(n_days)\r\n df[rs_column_name] = rs = p_ema / n_ema\r\n df[rsi_column_name] = 100 - 100 / (1.0 + rs)\r\n\r\n columns_to_remove = ['closepm',\r\n 'closenm',\r\n closepm_smma_column,\r\n closenm_smma_column]\r\n cls._drop_columns(df, columns_to_remove)\r\n\r\n @staticmethod\r\n def _drop_columns(df, columns):\r\n df.drop(columns, inplace=True, axis=1)\r\n\r\n def _ensure_type(self, obj):\r\n \"\"\" override the method in pandas, omit the check\r\n\r\n This patch is not the perfect way but could make the lib work.\r\n \"\"\"\r\n return obj\r\n\r\n @classmethod\r\n def _get_smma(cls, df, column, windows):\r\n \"\"\" get smoothed moving average.\r\n\r\n :param df: data\r\n :param windows: range\r\n :return: result series\r\n \"\"\"\r\n window = cls.get_only_one_positive_int(windows)\r\n column_name = '{}_{}_smma'.format(column, window)\r\n smma = df[column].ewm(\r\n ignore_na=False, alpha=1.0 / window,\r\n min_periods=0, adjust=True).mean()\r\n df[column_name] = smma\r\n return smma\r\n\r\n @classmethod\r\n def _get_trix(cls, df, column=None, windows=None):\r\n if column is None and windows is None:\r\n column_name = 'trix'\r\n else:\r\n column_name = '{}_{}_trix'.format(column, windows)\r\n\r\n if column is None:\r\n column = 'close'\r\n if windows is None:\r\n windows = cls.TRIX_EMA_WINDOW\r\n window = cls.get_only_one_positive_int(windows)\r\n\r\n single = '{c}_{w}_ema'.format(c=column, w=window)\r\n double = '{c}_{w}_ema_{w}_ema'.format(c=column, w=window)\r\n triple = '{c}_{w}_ema_{w}_ema_{w}_ema'.format(c=column, w=window)\r\n prev_triple = '{}_-1_s'.format(triple)\r\n df[column_name] = ((df[triple] - df[prev_triple]) * 100\r\n / df[prev_triple])\r\n\r\n columns_to_drop = [single, double, triple, prev_triple]\r\n cls._drop_columns(df, columns_to_drop)\r\n\r\n @classmethod\r\n def _get_tema(cls, df, column=None, windows=None):\r\n \"\"\" Another implementation for triple ema\r\n\r\n Check the algorithm described below:\r\n https://www.forextraders.com/forex-education/forex-technical-analysis/triple-exponential-moving-average-the-tema-indicator/\r\n :param df: data frame\r\n :param column: column to calculate ema\r\n :param windows: window of the calculation\r\n :return: result series\r\n \"\"\"\r\n if column is None and windows is None:\r\n column_name = 'tema'\r\n else:\r\n column_name = '{}_{}_tema'.format(column, windows)\r\n\r\n if column is None:\r\n column = 'close'\r\n if windows is None:\r\n windows = cls.TEMA_EMA_WINDOW\r\n window = cls.get_only_one_positive_int(windows)\r\n\r\n single = '{c}_{w}_ema'.format(c=column, w=window)\r\n double = '{c}_{w}_ema_{w}_ema'.format(c=column, w=window)\r\n triple = '{c}_{w}_ema_{w}_ema_{w}_ema'.format(c=column, w=window)\r\n df[column_name] = 3 * df[single] - 3 * df[double] + df[triple]\r\n\r\n cls._drop_columns(df, [single, double, triple])\r\n return df[column_name]\r\n\r\n @classmethod\r\n def _get_wr(cls, df, n_days):\r\n \"\"\" Williams Overbought/Oversold Index\r\n\r\n WMS=[(Hn—Ct)/(Hn—Ln)] ×100\r\n Ct - the close price\r\n Hn - N days high\r\n Ln - N days low\r\n\r\n :param df: data\r\n :param n_days: N days\r\n :return: None\r\n \"\"\"\r\n n_days = int(n_days)\r\n ln = df['low'].rolling(min_periods=1, window=n_days,\r\n center=False).min()\r\n\r\n hn = df['high'].rolling(min_periods=1, window=n_days,\r\n center=False).max()\r\n column_name = 'wr_{}'.format(n_days)\r\n df[column_name] = (hn - df['close']) / (hn - ln) * 100\r\n\r\n @classmethod\r\n def _get_cci(cls, df, n_days=None):\r\n \"\"\" Commodity Channel Index\r\n\r\n CCI = (Typical Price - 20-period SMA of TP) / (.015 x Mean Deviation)\r\n Typical Price (TP) = (High + Low + Close)/3\r\n TP is also implemented as 'middle'.\r\n\r\n :param df: data\r\n :param n_days: N days window\r\n :return: None\r\n \"\"\"\r\n if n_days is None:\r\n n_days = 14\r\n column_name = 'cci'\r\n else:\r\n n_days = int(n_days)\r\n column_name = 'cci_{}'.format(n_days)\r\n\r\n tp = df['middle']\r\n tp_sma = df['middle_{}_sma'.format(n_days)]\r\n md = df['middle'].rolling(\r\n min_periods=1, center=False, window=n_days).apply(\r\n lambda x: np.fabs(x - x.mean()).mean())\r\n\r\n df[column_name] = (tp - tp_sma) / (.015 * md)\r\n\r\n @classmethod\r\n def _get_tr(cls, df):\r\n \"\"\" True Range of the trading\r\n\r\n tr = max[(high - low), abs(high - close_prev), abs(low - close_prev)]\r\n :param df: data\r\n :return: None\r\n \"\"\"\r\n prev_close = df['close_-1_s']\r\n high = df['high']\r\n low = df['low']\r\n c1 = high - low\r\n c2 = np.abs(high - prev_close)\r\n c3 = np.abs(low - prev_close)\r\n df['tr'] = np.max((c1, c2, c3), axis=0)\r\n\r\n @classmethod\r\n def _get_atr(cls, df, window=None):\r\n \"\"\" Average True Range\r\n\r\n The average true range is an N-day smoothed moving average (SMMA) of\r\n the true range values. Default to 14 days.\r\n https://en.wikipedia.org/wiki/Average_true_range\r\n\r\n :param df: data\r\n :return: None\r\n \"\"\"\r\n if window is None:\r\n window = cls.ATR_SMMA\r\n column_name = 'atr'\r\n else:\r\n window = int(window)\r\n column_name = 'atr_{}'.format(window)\r\n tr_smma_column = 'tr_{}_smma'.format(window)\r\n\r\n df[column_name] = df[tr_smma_column]\r\n cls._drop_columns(df, [tr_smma_column])\r\n\r\n @classmethod\r\n def _get_dma(cls, df):\r\n \"\"\" Different of Moving Average\r\n\r\n default to 10 and 50.\r\n :param df: data\r\n :return: None\r\n \"\"\"\r\n df['dma'] = df['close_10_sma'] - df['close_50_sma']\r\n\r\n @classmethod\r\n def _get_dmi(cls, df):\r\n \"\"\" get the default setting for DMI\r\n\r\n including:\r\n +DI: 14 days SMMA of +DM,\r\n -DI: 14 days SMMA of -DM,\r\n DX: based on +DI and -DI\r\n ADX: 6 days SMMA of DX\r\n :param df: data\r\n :return:\r\n \"\"\"\r\n df['pdi'] = cls._get_pdi(df, cls.PDI_SMMA)\r\n df['mdi'] = cls._get_mdi(df, cls.MDI_SMMA)\r\n df['dx'] = cls._get_dx(df, cls.DX_SMMA)\r\n df['adx'] = df['dx_{}_ema'.format(cls.ADX_EMA)]\r\n df['adxr'] = df['adx_{}_ema'.format(cls.ADXR_EMA)]\r\n\r\n @classmethod\r\n def _get_um_dm(cls, df):\r\n \"\"\" Up move and down move\r\n\r\n initialize up move and down move\r\n :param df: data\r\n \"\"\"\r\n hd = df['high_delta']\r\n df['um'] = (hd + hd.abs()) / 2\r\n ld = -df['low_delta']\r\n df['dm'] = (ld + ld.abs()) / 2\r\n\r\n @classmethod\r\n def _get_pdm(cls, df, windows):\r\n \"\"\" +DM, positive directional moving\r\n\r\n If window is not 1, calculate the SMMA of +DM\r\n :param df: data\r\n :param windows: range\r\n :return:\r\n \"\"\"\r\n window = cls.get_only_one_positive_int(windows)\r\n column_name = 'pdm_{}'.format(window)\r\n um, dm = df['um'], df['dm']\r\n df['pdm'] = np.where(um > dm, um, 0)\r\n if window > 1:\r\n pdm = df['pdm_{}_ema'.format(window)]\r\n else:\r\n pdm = df['pdm']\r\n df[column_name] = pdm\r\n\r\n @classmethod\r\n def _get_vr(cls, df, windows=None):\r\n if windows is None:\r\n window = 26\r\n column_name = 'vr'\r\n else:\r\n window = cls.get_only_one_positive_int(windows)\r\n column_name = 'vr_{}'.format(window)\r\n\r\n df['av'] = np.where(df['change'] > 0, df['volume'], 0)\r\n avs = df['av'].rolling(\r\n min_periods=1, window=window, center=False).sum()\r\n\r\n df['bv'] = np.where(df['change'] < 0, df['volume'], 0)\r\n bvs = df['bv'].rolling(\r\n min_periods=1, window=window, center=False).sum()\r\n\r\n df['cv'] = np.where(df['change'] == 0, df['volume'], 0)\r\n cvs = df['cv'].rolling(\r\n min_periods=1, window=window, center=False).sum()\r\n\r\n df[column_name] = (avs + cvs / 2) / (bvs + cvs / 2) * 100\r\n cls._drop_columns(df, ['av', 'bv', 'cv'])\r\n\r\n @classmethod\r\n def _get_mdm(cls, df, windows):\r\n \"\"\" -DM, negative directional moving accumulation\r\n\r\n If window is not 1, return the SMA of -DM.\r\n :param df: data\r\n :param windows: range\r\n :return:\r\n \"\"\"\r\n window = cls.get_only_one_positive_int(windows)\r\n column_name = 'mdm_{}'.format(window)\r\n um, dm = df['um'], df['dm']\r\n df['mdm'] = np.where(dm > um, dm, 0)\r\n if window > 1:\r\n mdm = df['mdm_{}_ema'.format(window)]\r\n else:\r\n mdm = df['mdm']\r\n df[column_name] = mdm\r\n\r\n @classmethod\r\n def _get_pdi(cls, df, windows):\r\n \"\"\" +DI, positive directional moving index\r\n\r\n :param df: data\r\n :param windows: range\r\n :return:\r\n \"\"\"\r\n window = cls.get_only_one_positive_int(windows)\r\n pdm_column = 'pdm_{}'.format(window)\r\n tr_column = 'atr_{}'.format(window)\r\n pdi_column = 'pdi_{}'.format(window)\r\n df[pdi_column] = df[pdm_column] / df[tr_column] * 100\r\n return df[pdi_column]\r\n\r\n @classmethod\r\n def _get_mdi(cls, df, windows):\r\n window = cls.get_only_one_positive_int(windows)\r\n mdm_column = 'mdm_{}'.format(window)\r\n tr_column = 'atr_{}'.format(window)\r\n mdi_column = 'mdi_{}'.format(window)\r\n df[mdi_column] = df[mdm_column] / df[tr_column] * 100\r\n return df[mdi_column]\r\n\r\n @classmethod\r\n def _get_dx(cls, df, windows):\r\n window = cls.get_only_one_positive_int(windows)\r\n dx_column = 'dx_{}'.format(window)\r\n mdi_column = 'mdi_{}'.format(window)\r\n pdi_column = 'pdi_{}'.format(window)\r\n mdi, pdi = df[mdi_column], df[pdi_column]\r\n df[dx_column] = abs(pdi - mdi) / (pdi + mdi) * 100\r\n return df[dx_column]\r\n\r\n @classmethod\r\n def _get_kdj_default(cls, df):\r\n \"\"\" default KDJ, 9 days\r\n\r\n :param df: k line data frame\r\n :return: None\r\n \"\"\"\r\n df['kdjk'] = df['kdjk_{}'.format(cls.KDJ_WINDOW)]\r\n df['kdjd'] = df['kdjd_{}'.format(cls.KDJ_WINDOW)]\r\n df['kdjj'] = df['kdjj_{}'.format(cls.KDJ_WINDOW)]\r\n\r\n @classmethod\r\n def _get_cr(cls, df, window=26):\r\n ym = df['middle_-1_s']\r\n h = df['high']\r\n p1_m = df.loc[:, ['middle_-1_s', 'high']].min(axis=1)\r\n p2_m = df.loc[:, ['middle_-1_s', 'low']].min(axis=1)\r\n p1 = (h - p1_m).rolling(\r\n min_periods=1, window=window, center=False).sum()\r\n p2 = (ym - p2_m).rolling(\r\n min_periods=1, window=window, center=False).sum()\r\n df['cr'] = p1 / p2 * 100\r\n del df['middle_-1_s']\r\n df['cr-ma1'] = cls._shifted_cr_sma(df, cls.CR_MA1)\r\n df['cr-ma2'] = cls._shifted_cr_sma(df, cls.CR_MA2)\r\n df['cr-ma3'] = cls._shifted_cr_sma(df, cls.CR_MA3)\r\n\r\n @classmethod\r\n def _shifted_cr_sma(cls, df, window):\r\n name = cls._temp_name()\r\n df[name] = df['cr'].rolling(min_periods=1, window=window,\r\n center=False).mean()\r\n to_shift = '{}_-{}_s'.format(name, int(window / 2.5 + 1))\r\n ret = df[to_shift]\r\n del df[name], df[to_shift]\r\n return ret\r\n\r\n @classmethod\r\n def _temp_name(cls):\r\n return 'sdf{}'.format(random.randint(0, 10e8))\r\n\r\n @classmethod\r\n def _get_middle(cls, df):\r\n df['middle'] = (df['close'] + df['high'] + df['low']) / 3.0\r\n\r\n @classmethod\r\n def _calc_kd(cls, column):\r\n param0, param1 = cls.KDJ_PARAM\r\n k = 50.0\r\n # noinspection PyTypeChecker\r\n for i in param1 * column:\r\n k = param0 * k + i\r\n yield k\r\n\r\n @classmethod\r\n def _get_kdjk(cls, df, n_days):\r\n \"\"\" Get the K of KDJ\r\n\r\n K = 2/3 × (prev. K) +1/3 × (curr. RSV)\r\n 2/3 and 1/3 are the smooth parameters.\r\n :param df: data\r\n :param n_days: calculation range\r\n :return: None\r\n \"\"\"\r\n rsv_column = 'rsv_{}'.format(n_days)\r\n k_column = 'kdjk_{}'.format(n_days)\r\n df[k_column] = list(cls._calc_kd(df.get(rsv_column)))\r\n\r\n @classmethod\r\n def _get_kdjd(cls, df, n_days):\r\n \"\"\" Get the D of KDJ\r\n\r\n D = 2/3 × (prev. D) +1/3 × (curr. K)\r\n 2/3 and 1/3 are the smooth parameters.\r\n :param df: data\r\n :param n_days: calculation range\r\n :return: None\r\n \"\"\"\r\n k_column = 'kdjk_{}'.format(n_days)\r\n d_column = 'kdjd_{}'.format(n_days)\r\n df[d_column] = list(cls._calc_kd(df.get(k_column)))\r\n\r\n @staticmethod\r\n def _get_kdjj(df, n_days):\r\n \"\"\" Get the J of KDJ\r\n\r\n J = 3K-2D\r\n :param df: data\r\n :param n_days: calculation range\r\n :return: None\r\n \"\"\"\r\n k_column = 'kdjk_{}'.format(n_days)\r\n d_column = 'kdjd_{}'.format(n_days)\r\n j_column = 'kdjj_{}'.format(n_days)\r\n df[j_column] = 3 * df[k_column] - 2 * df[d_column]\r\n\r\n @staticmethod\r\n def remove_random_nan(pd_obj):\r\n return pd_obj.where((pd.notnull(pd_obj)), None)\r\n\r\n @staticmethod\r\n def _get_d(df, column, shifts):\r\n shift = StockDataFrame.to_int(shifts)\r\n shift_column = '{}_{}_s'.format(column, shift)\r\n column_name = '{}_{}_d'.format(column, shift)\r\n df[column_name] = df[column] - df[shift_column]\r\n cp = df[column_name].copy()\r\n StockDataFrame.set_nan(cp, shift)\r\n df[column_name] = cp\r\n\r\n @classmethod\r\n def _get_sma(cls, df, column, windows):\r\n \"\"\" get simple moving average\r\n\r\n :param df: data\r\n :param column: column to calculate\r\n :param windows: collection of window of simple moving average\r\n :return: None\r\n \"\"\"\r\n window = cls.get_only_one_positive_int(windows)\r\n column_name = '{}_{}_sma'.format(column, window)\r\n df[column_name] = df[column].rolling(min_periods=1, window=window,\r\n center=False).mean()\r\n\r\n @classmethod\r\n def _get_ema(cls, df, column, windows):\r\n \"\"\" get exponential moving average\r\n\r\n :param df: data\r\n :param column: column to calculate\r\n :param windows: collection of window of exponential moving average\r\n :return: None\r\n \"\"\"\r\n window = cls.get_only_one_positive_int(windows)\r\n column_name = '{}_{}_ema'.format(column, window)\r\n if len(df[column]) > 0:\r\n df[column_name] = df[column].ewm(\r\n ignore_na=False, span=window,\r\n min_periods=0, adjust=True).mean()\r\n else:\r\n df[column_name] = []\r\n\r\n @classmethod\r\n def _get_boll(cls, df):\r\n \"\"\" Get Bollinger bands.\r\n\r\n boll_ub means the upper band of the Bollinger bands\r\n boll_lb means the lower band of the Bollinger bands\r\n boll_ub = MA + Kσ\r\n boll_lb = MA − Kσ\r\n M = BOLL_PERIOD\r\n K = BOLL_STD_TIMES\r\n :param df: data\r\n :return: None\r\n \"\"\"\r\n moving_avg = df['close_{}_sma'.format(cls.BOLL_PERIOD)]\r\n moving_std = df['close_{}_mstd'.format(cls.BOLL_PERIOD)]\r\n df['boll'] = moving_avg\r\n moving_avg = list(map(np.float64, moving_avg))\r\n moving_std = list(map(np.float64, moving_std))\r\n # noinspection PyTypeChecker\r\n df['boll_ub'] = np.add(moving_avg,\r\n np.multiply(cls.BOLL_STD_TIMES, moving_std))\r\n # noinspection PyTypeChecker\r\n df['boll_lb'] = np.subtract(moving_avg,\r\n np.multiply(cls.BOLL_STD_TIMES,\r\n moving_std))\r\n\r\n @classmethod\r\n def _get_macd(cls, df):\r\n \"\"\" Moving Average Convergence Divergence\r\n\r\n This function will initialize all following columns.\r\n\r\n MACD Line (macd): (12-day EMA - 26-day EMA)\r\n Signal Line (macds): 9-day EMA of MACD Line\r\n MACD Histogram (macdh): MACD Line - Signal Line\r\n :param df: data\r\n :return: None\r\n \"\"\"\r\n ema_short = 'close_{}_ema'.format(cls.MACD_EMA_SHORT)\r\n ema_long = 'close_{}_ema'.format(cls.MACD_EMA_LONG)\r\n ema_signal = 'macd_{}_ema'.format(cls.MACD_EMA_SIGNAL)\r\n fast = df[ema_short]\r\n slow = df[ema_long]\r\n df['macd'] = fast - slow\r\n df['macds'] = df[ema_signal]\r\n df['macdh'] = (df['macd'] - df['macds'])\r\n cls._drop_columns(df, [ema_short, ema_long, ema_signal])\r\n\r\n @classmethod\r\n def _get_vwap(cls,df):\r\n df['avg_price'] = (df['high']+df['close']+df['low'])/3\r\n df['cumilative_volume'] = df['volume'].cumsum()\r\n df['pv'] = df['avg_price']*df['volume']\r\n df['cumilative_pv'] = df['pv'].cumsum()\r\n df['vwap'] = df['cumilative_pv']/df['cumilative_volume']\r\n cls._drop_columns(df, ['avg_price', 'cumilative_volume', 'pv', 'cumilative_pv'])\r\n\r\n @classmethod\r\n def get_only_one_positive_int(cls, windows):\r\n if isinstance(windows, int):\r\n window = windows\r\n else:\r\n window = cls.to_int(windows)\r\n if window <= 0:\r\n raise IndexError(\"window must be greater than 0\")\r\n return window\r\n\r\n @classmethod\r\n def _get_mstd(cls, df, column, windows):\r\n \"\"\" get moving standard deviation\r\n\r\n :param df: data\r\n :param column: column to calculate\r\n :param windows: collection of window of moving standard deviation\r\n :return: None\r\n \"\"\"\r\n window = cls.get_only_one_positive_int(windows)\r\n column_name = '{}_{}_mstd'.format(column, window)\r\n df[column_name] = df[column].rolling(min_periods=1, window=window,\r\n center=False).std()\r\n\r\n @classmethod\r\n def _get_mvar(cls, df, column, windows):\r\n \"\"\" get moving variance\r\n\r\n :param df: data\r\n :param column: column to calculate\r\n :param windows: collection of window of moving variance\r\n :return: None\r\n \"\"\"\r\n window = cls.get_only_one_positive_int(windows)\r\n column_name = '{}_{}_mvar'.format(column, window)\r\n df[column_name] = df[column].rolling(\r\n min_periods=1, window=window, center=False).var()\r\n\r\n @staticmethod\r\n def parse_column_name(name):\r\n m = re.match(r'(.*)_([\\d\\-+~,.]+)_(\\w+)', name)\r\n ret = [None, None, None]\r\n if m is None:\r\n m = re.match(r'(.*)_([\\d\\-+~,]+)', name)\r\n if m is not None:\r\n ret = m.group(1, 2)\r\n ret = ret + (None,)\r\n else:\r\n ret = m.group(1, 2, 3)\r\n return ret\r\n\r\n CROSS_COLUMN_MATCH_STR = '(.+)_(x|xu|xd)_(.+)'\r\n\r\n @classmethod\r\n def is_cross_columns(cls, name):\r\n return re.match(cls.CROSS_COLUMN_MATCH_STR, name) is not None\r\n\r\n @classmethod\r\n def parse_cross_column(cls, name):\r\n m = re.match(cls.CROSS_COLUMN_MATCH_STR, name)\r\n ret = [None, None, None]\r\n if m is not None:\r\n ret = m.group(1, 2, 3)\r\n return ret\r\n\r\n @staticmethod\r\n def _get_rate(df):\r\n \"\"\" same as percent\r\n\r\n :param df: data frame\r\n :return: None\r\n \"\"\"\r\n df['rate'] = df['close'].pct_change() * 100\r\n\r\n @staticmethod\r\n def _get_delta(df, key):\r\n key_to_delta = key.replace('_delta', '')\r\n df[key] = df[key_to_delta].diff()\r\n return df[key]\r\n\r\n @staticmethod\r\n def _get_cross(df, key):\r\n left, op, right = StockDataFrame.parse_cross_column(key)\r\n lt_series = df[left] > df[right]\r\n # noinspection PyTypeChecker\r\n different = np.zeros_like(lt_series)\r\n if len(different) > 1:\r\n # noinspection PyTypeChecker\r\n different[1:] = np.diff(lt_series)\r\n different[0] = False\r\n if op == 'x':\r\n df[key] = different\r\n elif op == 'xu':\r\n df[key] = different & lt_series\r\n elif op == 'xd':\r\n df[key] = different & ~lt_series\r\n return df[key]\r\n\r\n @staticmethod\r\n def init_columns(obj, columns):\r\n if isinstance(columns, list):\r\n for column in columns:\r\n StockDataFrame.__init_column(obj, column)\r\n else:\r\n StockDataFrame.__init_column(obj, columns)\r\n\r\n @classmethod\r\n def __init_not_exist_column(cls, df, key):\r\n if key == 'change':\r\n cls._get_change(df)\r\n elif key == 'rate':\r\n cls._get_rate(df)\r\n elif key == 'middle':\r\n cls._get_middle(df)\r\n elif key in ['boll', 'boll_ub', 'boll_lb']:\r\n cls._get_boll(df)\r\n elif key in ['macd', 'macds', 'macdh']:\r\n cls._get_macd(df)\r\n elif key in ['kdjk', 'kdjd', 'kdjj']:\r\n cls._get_kdj_default(df)\r\n elif key in ['cr', 'cr-ma1', 'cr-ma2', 'cr-ma3']:\r\n cls._get_cr(df)\r\n elif key in ['cci']:\r\n cls._get_cci(df)\r\n elif key in ['tr']:\r\n cls._get_tr(df)\r\n elif key in ['atr']:\r\n cls._get_atr(df)\r\n elif key in ['um', 'dm']:\r\n cls._get_um_dm(df)\r\n elif key in ['pdi', 'mdi', 'dx', 'adx', 'adxr']:\r\n cls._get_dmi(df)\r\n elif key in ['trix']:\r\n cls._get_trix(df)\r\n elif key in ['tema']:\r\n cls._get_tema(df)\r\n elif key in ['vr']:\r\n cls._get_vr(df)\r\n elif key in ['dma']:\r\n cls._get_dma(df)\r\n elif key == 'log-ret':\r\n cls._get_log_ret(df)\r\n elif key in ['vwap']:\r\n cls._get_vwap(df)\r\n elif key.endswith('_delta'):\r\n cls._get_delta(df, key)\r\n elif cls.is_cross_columns(key):\r\n cls._get_cross(df, key)\r\n else:\r\n c, r, t = cls.parse_column_name(key)\r\n if t is not None:\r\n if t in cls.OPERATORS:\r\n # support all kinds of compare operators\r\n cls._get_op(df, c, r, t)\r\n else:\r\n func_name = '_get_{}'.format(t)\r\n getattr(cls, func_name)(df, c, r)\r\n else:\r\n func_name = '_get_{}'.format(c)\r\n getattr(cls, func_name)(df, r)\r\n\r\n @staticmethod\r\n def __init_column(df, key):\r\n if key not in df:\r\n if len(df) == 0:\r\n df[key] = []\r\n else:\r\n StockDataFrame.__init_not_exist_column(df, key)\r\n\r\n def __getitem__(self, item):\r\n try:\r\n result = self.retype(\r\n super(StockDataFrame, self).__getitem__(item))\r\n except KeyError:\r\n try:\r\n self.init_columns(self, item)\r\n except AttributeError:\r\n log.exception('{} not found.'.format(item))\r\n result = self.retype(\r\n super(StockDataFrame, self).__getitem__(item))\r\n return result\r\n\r\n def in_date_delta(self, delta_day, anchor=None):\r\n if anchor is None:\r\n anchor = self.get_today()\r\n other_day = get_date_from_diff(anchor, delta_day)\r\n if delta_day > 0:\r\n start, end = anchor, other_day\r\n else:\r\n start, end = other_day, anchor\r\n return self.retype(self.loc[start:end])\r\n\r\n def till(self, end_date):\r\n return self[self.index <= end_date]\r\n\r\n def start_from(self, start_date):\r\n return self[self.index >= start_date]\r\n\r\n def within(self, start_date, end_date):\r\n return self.start_from(start_date).till(end_date)\r\n\r\n def copy(self, deep=True):\r\n return self.retype(super(StockDataFrame, self).copy(deep))\r\n\r\n @staticmethod\r\n def retype(value, index_column=None):\r\n \"\"\" if the input is a `DataFrame`, convert it to this class.\r\n\r\n :param index_column: the column that will be used as index,\r\n default to `date`\r\n :param value: value to convert\r\n :return: this extended class\r\n \"\"\"\r\n if index_column is None:\r\n index_column = 'date'\r\n\r\n if isinstance(value, pd.DataFrame):\r\n # use all lower case for column name\r\n value.columns = map(lambda c: c.lower(), value.columns)\r\n\r\n if index_column in value.columns:\r\n value.set_index(index_column, inplace=True)\r\n value = StockDataFrame(value)\r\n return value\r\n"
] |
[
[
"numpy.log",
"pandas.notnull",
"numpy.abs",
"numpy.multiply",
"numpy.min",
"numpy.max",
"numpy.zeros_like",
"numpy.diff",
"numpy.where"
]
] |
MattMorgis/tfx
|
[
"f11cc054f079c998a52002e14b6ba74063fed986"
] |
[
"tfx/examples/chicago_taxi/preprocess.py"
] |
[
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Preprocessor applying tf.transform to the chicago_taxi data.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\n\nimport apache_beam as beam\nimport tensorflow as tf\n\nimport tensorflow_transform as transform\nimport tensorflow_transform.beam as tft_beam\n\nfrom tensorflow_transform.coders import example_proto_coder\nfrom tensorflow_transform.tf_metadata import dataset_metadata\nfrom tensorflow_transform.tf_metadata import dataset_schema\nfrom tfx.examples.chicago_taxi.trainer import taxi\n\n\ndef _fill_in_missing(x):\n \"\"\"Replace missing values in a SparseTensor.\n\n Fills in missing values of `x` with '' or 0, and converts to a dense tensor.\n\n Args:\n x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1\n in the second dimension.\n\n Returns:\n A rank 1 tensor where missing values of `x` have been filled in.\n \"\"\"\n default_value = '' if x.dtype == tf.string else 0\n return tf.squeeze(\n tf.sparse_to_dense(x.indices, [x.dense_shape[0], 1], x.values,\n default_value),\n axis=1)\n\n\n# TODO(b/114126687): Make schema as a required argument and remove the\n# hard-coded feature spec in trainer/taxi.py.\ndef transform_data(input_handle,\n outfile_prefix,\n working_dir,\n schema_file,\n transform_dir=None,\n max_rows=None,\n pipeline_args=None):\n \"\"\"The main tf.transform method which analyzes and transforms data.\n\n Args:\n input_handle: BigQuery table name to process specified as DATASET.TABLE or\n path to csv file with input data.\n outfile_prefix: Filename prefix for emitted transformed examples\n working_dir: Directory in which transformed examples and transform function\n will be emitted.\n schema_file: An file path that contains a text-serialized TensorFlow\n metadata schema of the input data.\n transform_dir: Directory in which the transform output is located. If\n provided, this will load the transform_fn from disk instead of computing\n it over the data. Hint: this is useful for transforming eval data.\n max_rows: Number of rows to query from BigQuery\n pipeline_args: additional DataflowRunner or DirectRunner args passed to the\n beam pipeline.\n \"\"\"\n\n def preprocessing_fn(inputs):\n \"\"\"tf.transform's callback function for preprocessing inputs.\n\n Args:\n inputs: map from feature keys to raw not-yet-transformed features.\n\n Returns:\n Map from string feature key to transformed feature operations.\n \"\"\"\n outputs = {}\n for key in taxi.DENSE_FLOAT_FEATURE_KEYS:\n # Preserve this feature as a dense float, setting nan's to the mean.\n outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(\n _fill_in_missing(inputs[key]))\n\n for key in taxi.VOCAB_FEATURE_KEYS:\n # Build a vocabulary for this feature.\n outputs[\n taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(\n _fill_in_missing(inputs[key]),\n top_k=taxi.VOCAB_SIZE,\n num_oov_buckets=taxi.OOV_SIZE)\n\n for key in taxi.BUCKET_FEATURE_KEYS:\n outputs[taxi.transformed_name(key)] = transform.bucketize(\n _fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT)\n\n for key in taxi.CATEGORICAL_FEATURE_KEYS:\n outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key])\n\n # Was this passenger a big tipper?\n taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY])\n tips = _fill_in_missing(inputs[taxi.LABEL_KEY])\n outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(\n tf.is_nan(taxi_fare),\n tf.cast(tf.zeros_like(taxi_fare), tf.int64),\n # Test if the tip was > 20% of the fare.\n tf.cast(\n tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))),\n tf.int64))\n\n return outputs\n\n schema = taxi.read_schema(schema_file)\n raw_feature_spec = taxi.get_raw_feature_spec(schema)\n raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)\n raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema)\n\n with beam.Pipeline(argv=pipeline_args) as pipeline:\n with tft_beam.Context(temp_dir=working_dir):\n if input_handle.lower().endswith('csv'):\n csv_coder = taxi.make_csv_coder(schema)\n raw_data = (\n pipeline\n | 'ReadFromText' >> beam.io.ReadFromText(\n input_handle, skip_header_lines=1))\n decode_transform = beam.Map(csv_coder.decode)\n else:\n query = taxi.make_sql(input_handle, max_rows, for_eval=False)\n raw_data = (\n pipeline\n | 'ReadBigQuery' >> beam.io.Read(\n beam.io.BigQuerySource(query=query, use_standard_sql=True)))\n decode_transform = beam.Map(\n taxi.clean_raw_data_dict, raw_feature_spec=raw_feature_spec)\n\n if transform_dir is None:\n decoded_data = raw_data | 'DecodeForAnalyze' >> decode_transform\n transform_fn = (\n (decoded_data, raw_data_metadata) |\n ('Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn)))\n\n _ = (\n transform_fn\n | ('WriteTransformFn' >>\n tft_beam.WriteTransformFn(working_dir)))\n else:\n transform_fn = pipeline | tft_beam.ReadTransformFn(transform_dir)\n\n # Shuffling the data before materialization will improve Training\n # effectiveness downstream. Here we shuffle the raw_data (as opposed to\n # decoded data) since it has a compact representation.\n shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle()\n\n decoded_data = shuffled_data | 'DecodeForTransform' >> decode_transform\n (transformed_data, transformed_metadata) = (\n ((decoded_data, raw_data_metadata), transform_fn)\n | 'Transform' >> tft_beam.TransformDataset())\n\n coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)\n _ = (\n transformed_data\n | 'SerializeExamples' >> beam.Map(coder.encode)\n | 'WriteExamples' >> beam.io.WriteToTFRecord(\n os.path.join(working_dir, outfile_prefix), file_name_suffix='.gz')\n )\n\n\ndef main():\n tf.logging.set_verbosity(tf.logging.INFO)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--input',\n help=('Input BigQuery table to process specified as: '\n 'DATASET.TABLE or path to csv file with input data.'))\n\n parser.add_argument(\n '--schema_file', help='File holding the schema for the input data')\n\n parser.add_argument(\n '--output_dir',\n help=('Directory in which transformed examples and function '\n 'will be emitted.'))\n\n parser.add_argument(\n '--outfile_prefix',\n help='Filename prefix for emitted transformed examples')\n\n parser.add_argument(\n '--transform_dir',\n required=False,\n default=None,\n help='Directory in which the transform output is located')\n\n parser.add_argument(\n '--max_rows',\n help='Number of rows to query from BigQuery',\n default=None,\n type=int)\n\n known_args, pipeline_args = parser.parse_known_args()\n transform_data(\n input_handle=known_args.input,\n outfile_prefix=known_args.outfile_prefix,\n working_dir=known_args.output_dir,\n schema_file=known_args.schema_file,\n transform_dir=known_args.transform_dir,\n max_rows=known_args.max_rows,\n pipeline_args=pipeline_args)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"tensorflow.is_nan",
"tensorflow.sparse_to_dense",
"tensorflow.constant",
"tensorflow.zeros_like",
"tensorflow.logging.set_verbosity"
]
] |
samuelbroscheit/open_knowledge_graph_embeddings
|
[
"1ce37a4261a37e357a0f4dac3ee130ff11cbea4e"
] |
[
"utils/misc.py"
] |
[
"import random\n\nimport numpy\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\n\ndef onehot(indexes, N=None, ignore_index=None):\n \"\"\"\n Creates a one-representation of indexes with N possible entries\n if N is not specified, it will suit the maximum index appearing.\n indexes is a long-tensor of indexes\n ignore_index will be zero in onehot representation\n \"\"\"\n return_variable = False\n if isinstance(indexes, Variable):\n return_variable = True\n indexes = indexes.data\n if N is None:\n N = indexes.max() + 1\n sz = list(indexes.size())\n output = indexes.new().byte().resize_(*sz, N).zero_()\n output.scatter_(-1, indexes.unsqueeze(-1), 1)\n if ignore_index is not None and ignore_index >= 0:\n output.masked_fill_(indexes.eq(ignore_index).unsqueeze(-1), 0)\n if return_variable:\n output = Variable(output, requires_grad=False)\n\n return output\n\n\ndef set_global_seeds(i):\n try:\n import torch\n except ImportError:\n pass\n else:\n torch.manual_seed(i)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(i)\n np.random.seed(i)\n random.seed(i)\n\n\ndef prettyformat_dict_string(d, indent=''):\n result = list()\n for k, v in d.items():\n if isinstance(v, dict):\n result.append('{}{}:\\t\\n{}'.format(indent, k, prettyformat_dict_string(v, indent + ' ')))\n else:\n result.append('{}{}:\\t{}\\n'.format(indent, k, v))\n return ''.join(result)\n\n\ndef pack_list_of_lists(lol):\n offsets = list()\n ent_list = list()\n offsets.append(0)\n for l in lol:\n if isinstance(l, list) or isinstance(l, tuple):\n ent_list.extend(l)\n offsets.append(len(ent_list))\n else:\n ent_list.append(l)\n offsets.append(len(ent_list))\n offsets.append(-len(offsets)-1)\n out = (numpy.array(offsets)+len(offsets)).tolist()\n return out + ent_list\n\ndef unpack_list_of_lists(ents):\n ent_list = list()\n end = -1\n all_begin = -1\n all_end = -1\n for off in ents:\n if all_begin == -1:\n all_begin = off\n if off == 0:\n break\n if end == -1:\n end = off\n continue\n else:\n begin = end\n end = off\n all_end = off\n ent_list.append(ents[begin:end].tolist())\n return ent_list, ents[all_begin:all_end].tolist()\n\n\ndef argparse_bool_type(v):\n \"Type for argparse that correctly treats Boolean values\"\n if isinstance(v, bool):\n return v\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"numpy.array",
"torch.autograd.Variable"
]
] |
ys10/GCIClassify
|
[
"a66b1a257ac26b10732a68228721023b99f67a8e"
] |
[
"data_process/ops.py"
] |
[
"# coding=utf-8\nimport os\nimport numpy as np\nfrom scipy.signal import argrelextrema\nfrom scipy.io import wavfile\n\n\ndef find_local_minimum(data, threshold=None):\n \"\"\"\n Find local minimum in data.\n :param data: input data.\n :param threshold: (optional) local minimum whose value is not less than threshold won't be selected.\n :return: a 1-D array.\n \"\"\"\n local_min_idx = argrelextrema(data, np.less)\n local_min_idx = local_min_idx[0]\n if threshold:\n local_min_idx = [idx for idx in local_min_idx if data[idx] < threshold]\n return local_min_idx\n\n\ndef file_names(file_dir):\n \"\"\"\n List all file names(without extension) in target directory.\n :param file_dir:\n target directory.\n :return:\n a list containing file names.\n \"\"\"\n file_names_list = list()\n for _, _, files in os.walk(file_dir):\n for file in files:\n file_names_list.append(file.split(\".\")[0])\n return file_names_list\n\n\ndef read_wav_data(path):\n \"\"\"\n Read wav file.\n :param path:\n wav file path.\n :return:\n sampling rate, waveform data.\n \"\"\"\n rate, data = wavfile.read(path)\n return rate, data[:]\n\n\ndef read_marks_data(path, rate, wave_length):\n \"\"\"\n Read marks file.\n :param path:\n marks file path(containing time of gci).\n :param rate:\n sampling rate.\n :param wave_length:\n wave length.\n :return:\n an list containing the index(time * rate) of gci.\n \"\"\"\n marks = list()\n with open(path) as mark_file:\n while 1:\n lines = mark_file.readlines(10000)\n if not lines:\n break\n marks.extend(map(lambda l: round(float(l) * rate), lines))\n if marks[-1] >= wave_length:\n return marks[:-2]\n return marks\n\n\ndef label_peaks(peaks, marks, threshold):\n \"\"\"\n Label peaks with marks.\n Give a distance threshold, for all peaks within distance from mark no more than threshold.\n Pick up target peak follow these priorities\n 1. nearest right peak;\n 2. nearest left peak;\n 3. missed.\n :param peaks: peak indices.\n :param marks: marks indices.\n :param threshold: distance threshold between a couple of (peak, mark).\n :return: a tuple(labels, errors, pos_cnt) where:\n labels: peak labels.\n errors: distance between peaks and marks(zero for negative sample)\n miss: missed marks\n pos_cnt: positive sample count.\n \"\"\"\n labels = [0] * len(peaks)\n errors = [0] * len(peaks)\n miss = list() # missed marks\n pos_cnt = 0 # positive labeled marks count\n for mark in marks:\n left_peaks = list()\n right_peaks = list()\n \"\"\"calculate a search range based on mark & threshold\"\"\"\n search_range = calculate_search_range(mark, threshold)\n \"\"\"record target peaks in search range\"\"\"\n for j in range(0, len(peaks)):\n peak = peaks[j]\n if peak < search_range[\"left\"]:\n continue\n elif peak > search_range[\"right\"]:\n continue\n elif search_range[\"left\"] <= peak < mark: # in left half search range\n left_peaks.append(j)\n elif mark <= peak <= search_range[\"right\"]: # in right half search range\n right_peaks.append(j)\n else:\n print(\"mark: {}, peak: {}, threshold: {}\".format(mark, peak, threshold))\n print(\"left_border: {}, right_border: {}\".format(search_range[\"left\"], search_range[\"right\"]))\n raise KeyError\n \"\"\"pick up the optimum peak\"\"\"\n left_peaks.sort()\n right_peaks.sort()\n if len(right_peaks) > 0: # nearest right peak exists.\n right_peaks.sort()\n peak_idx = right_peaks[0]\n elif len(left_peaks) > 0: # nearest right peak does not exist, but nearest left peak exists.\n left_peaks.sort()\n peak_idx = left_peaks[len(left_peaks) - 1]\n else: # neither nearest right or left peak exists, finally miss this mark & record it.\n miss.append(mark)\n continue\n labels[peak_idx] = 1\n peak = peaks[peak_idx]\n error = abs(peak - mark)\n errors[peak_idx] = error\n pos_cnt += 1\n assert len(peaks) == len(labels) == len(errors)\n return labels, errors, miss, pos_cnt\n\n\ndef calculate_search_range(mark, threshold):\n search_range = {\"left\": mark-threshold/2, \"right\": mark+threshold}\n return search_range\n\n\n# def label_peaks(peaks, marks, threshold):\n# \"\"\"\n# Label peaks with marks.\n# Give a distance threshold, for all peaks within distance from mark no more than threshold.\n# Pick up target peak follow these priorities\n# 1. nearest right peak;\n# 2. nearest left peak;\n# 3. missed.\n# :param peaks: peak indices.\n# :param marks: marks indices.\n# :param threshold: distance threshold between a couple of (peak, mark).\n# :return: a tuple(labels, errors, pos_cnt) where:\n# labels: peak labels.\n# errors: distance between peaks and marks(zero for negative sample)\n# miss: missed marks\n# pos_cnt: positive sample count.\n# \"\"\"\n# marks.sort()\n# peaks.sort()\n# labels = [0] * len(peaks)\n# errors = [0] * len(peaks)\n# miss = list() # missed marks\n# pos_cnt = 0 # positive labeled marks count\n# current_peak = 0 # peak index\n# for i in range(len(marks)):\n# mark = marks[i]\n# if current_peak >= len(peaks) - 1: # finally miss this mark & record it.\n# miss.append(mark)\n# continue\n# left_peaks = []\n# right_peaks = []\n# for j in range(current_peak, len(peaks)):\n# peak = peaks[j]\n# error = abs(peak-mark)\n# if peak < mark & error <= threshold:\n# left_peaks.append(j)\n# elif peak >= mark & error <= threshold:\n# right_peaks.append(j)\n# elif peak > mark: # Key step\n# break\n# left_peaks.sort()\n# right_peaks.sort()\n# if len(right_peaks) > 0: # nearest right peak exists.\n# right_peaks.sort()\n# peak_idx = right_peaks[0]\n# elif len(left_peaks) > 0: # nearest right peak does not exist, but nearest left peak exists.\n# left_peaks.sort()\n# peak_idx = left_peaks[len(left_peaks) - 1]\n# else: # neither nearest right or left peak exists, finally miss this mark & record it.\n# miss.append(mark)\n# # rate = 16000\n# # print(\"\\tmissed mark: \" + str(mark / rate))\n# # print(\"\\tcurrent peak: \" + str(peaks[current_peak] / rate))\n# continue\n# labels[peak_idx] = 1\n# peak = peaks[peak_idx]\n# error = abs(peak - mark)\n# errors[peak_idx] = error\n# pos_cnt += 1\n# current_peak = peak_idx + 1\n# assert len(peaks) == len(labels) == len(errors)\n# return labels, errors, miss, pos_cnt\n#\n#\n# def old_label_peaks(peaks, marks, threshold):\n# \"\"\"\n# Label peaks with marks.\n# Give a distance threshold, for all peaks within distance from mark no more than threshold.\n# Pick up target peak follow these priorities\n# 1. nearest right peak;\n# 2. missed.\n# :param peaks: peak indices.\n# :param marks: marks indices.\n# :param threshold: distance threshold between a couple of (peak, mark).\n# :return: a tuple(labels, errors, pos_cnt) where:\n# labels: peak labels.\n# errors: distance between peaks and marks(zero for negative sample)\n# miss: missed marks\n# pos_cnt: positive sample count.\n# \"\"\"\n# labels = [0] * len(peaks)\n# errors = [0] * len(peaks)\n# miss = list()\n# pos_cnt = 0\n# current_peak = 0\n# for i in range(len(marks)):\n# mark = marks[i]\n# if current_peak == len(peaks): # finally miss this mark & record it.\n# miss.append(mark)\n# continue\n# for j in range(current_peak, len(peaks)):\n# peak = peaks[j]\n# error = peak-mark\n# if peak >= mark & error <= threshold: # label this peak & jump out of the loop.\n# labels[j] = 1\n# errors[j] = error\n# pos_cnt += 1\n# current_peak = j+1\n# break\n# if j == len(peaks)-1: # finally miss this mark & record it.\n# miss.append(mark)\n# assert len(peaks) == len(labels) == len(errors)\n# return labels, errors, miss, pos_cnt\n\n\ndef crop_wav(wav, center, radius):\n \"\"\"\n Crop wav on [center - radius, center + radius + 1], and pad 0 for out of range indices.\n :param wav: wav\n :param center: crop center\n :param radius: crop radius\n :return: a slice whose length is radius*2 +1.\n \"\"\"\n left_border = center - radius\n right_border = center + radius + 1\n if left_border < 0:\n zeros = np.zeros(-left_border)\n cropped_wav = np.concatenate([zeros, wav[0: right_border]])\n elif right_border > len(wav):\n zeros = np.zeros(right_border - len(wav))\n cropped_wav = np.concatenate([wav[left_border: len(wav)], zeros])\n else:\n cropped_wav = wav[left_border: right_border]\n assert len(cropped_wav) == radius * 2 + 1\n return cropped_wav\n"
] |
[
[
"numpy.concatenate",
"scipy.io.wavfile.read",
"numpy.zeros",
"scipy.signal.argrelextrema"
]
] |
CarlOwOs/VH_and_PE_codes
|
[
"700726332489ed87270ec52d9efe46fcb835c598"
] |
[
"Data_extraction/CS4.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport Auxiliary.auxiliary_functions as aux_fun\n#--------------------------------------------------\ndef read_and_extract_target():\n '''\n This function reads the processed \"events\" df and computes which\n of the observations correspond to an IC phenomena. After that computation,\n only relevant columns are kept.\n '''\n events_label = pd.read_csv(\"./Temp/events_CS2.csv\")\n # Deleting the previous temporary files\n aux_fun.delete_csvs([\"events_CS2\"],\"./Temp/\")\n events_label[\"target\"] = 0\n for i,row in events_label.iterrows():\n if row.tipus_event in [\"Urgències per Insuficiència Cardíaca\", \"Ingrés per Insuficiència Cardíaca\"]:\n events_label.at[i,\"target\"] = 1\n elif row.tipus_event == \"Exitus\" and row.causa_exitus == \"Cardiovascular\" and row.causa_exitus_cv==\"Insuficiència cardíaca\":\n events_label.at[i,\"target\"] = 1\n elif events_label.loc[i,\"tipus_event\"] in [\"Ingrés per altra causa cardiològica\"]:\n events_label.at[i,\"target\"] = 2\n\n events_label.drop(columns=['fecha_exitus_event', 'causa_exitus', 'causa_exitus_cv', 'origen_ingres_ic', 'tipus_event'], inplace= True)\n return events_label\n#--------------------------------------------------\ndef execute_script():\n events_label = read_and_extract_target()\n # Change this value to modify the file name.\n names = [\"events_label_CS4\"]\n # Change this variable to modify the saving path.\n saving_path = './Temp/'\n aux_fun.write_csvs([events_label],saving_path,names)\n#--------------------------------------------------\n"
] |
[
[
"pandas.read_csv"
]
] |
SCiarella/jax
|
[
"a7c9b6d11fa833c748d72b3ccc11baeed9c0248c"
] |
[
"tests/api_test.py"
] |
[
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections\nimport collections.abc\nfrom contextlib import contextmanager\nimport copy\nimport enum\nfrom functools import partial\nimport operator\nimport re\nimport subprocess\nimport sys\nimport types\nimport unittest\nimport warnings\nimport weakref\nimport functools\nimport itertools as it\nimport operator as op\n\nfrom absl import logging\nfrom absl.testing import absltest, parameterized\nimport numpy as np\n\nimport concurrent.futures\n\nimport jax\nimport jax.numpy as jnp\nfrom jax import float0, jit, grad, device_put, jacfwd, jacrev, hessian\nfrom jax import core, dtypes, lax\nfrom jax._src import api\nfrom jax.core import Primitive\nfrom jax.errors import UnexpectedTracerError\nfrom jax.interpreters import ad\nfrom jax.interpreters import xla\nfrom jax.interpreters import pxla\nfrom jax.interpreters.sharded_jit import PartitionSpec as P\nimport jax._src.lib\nfrom jax._src.lib import xla_client\nfrom jax._src import test_util as jtu\nfrom jax import tree_util\nfrom jax import linear_util as lu\nimport jax._src.util\nfrom jax._src.ad_checkpoint import saved_residuals\nfrom jax.ad_checkpoint import checkpoint as new_checkpoint, checkpoint_name\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n\n\npython_version = (sys.version_info[0], sys.version_info[1])\nnumpy_version = tuple(map(int, np.__version__.split('.')[:3]))\n\n\nclass CPPJitTest(jtu.BufferDonationTestCase):\n \"\"\"Shared tests between the Python and the C++ jax,jit implementations.\n\n Because the Python implementation supports more features, we need to have the\n Python tests that extend the C++ tests (and not the other way around).\n \"\"\"\n\n @property\n def jit(self):\n # Right now, the CPP tests also test the Python code-path when jaxlib is\n # too old.\n # TODO(jblespiau,phawkins): Remove this when jaxlib has been released.\n # This is in the future, because we are making a breaking change to\n # Tensorflow.\n return api._cpp_jit\n\n @unittest.skipIf(jax._src.lib._xla_extension_version < 40,\n \"Test requires jaxlib 0.1.73\")\n def test_jit_repr(self):\n def my_function():\n return\n jitted = jit(my_function)\n self.assertEqual(repr(jitted), f\"<CompiledFunction of {repr(my_function)}>\")\n\n @unittest.skipIf(jax._src.lib._xla_extension_version < 40,\n \"Test requires jaxlib 0.1.73\")\n def test_jit_repr_errors(self):\n class Callable:\n def __call__(self): pass\n def __repr__(self):\n raise ValueError(\"invalid repr\")\n\n # repr succeeds when underlying function repr fails.\n jitted = jit(Callable())\n self.assertEqual(repr(jitted), \"<CompiledFunction>\")\n\n # repr succeeds when object is malformed.\n del jitted.__wrapped__\n self.assertEqual(repr(jitted), \"<CompiledFunction>\")\n\n def test_jit_of_noncallable(self):\n self.assertRaisesRegex(TypeError, \"Expected a callable value.*\",\n lambda: self.jit(3))\n\n def test_jit_of_generator(self):\n\n def gen(x):\n yield x\n\n self.assertRaisesRegex(TypeError,\n \"Expected a function, got a generator function.*\",\n lambda: self.jit(gen))\n\n @parameterized.parameters([\n # Integer support\n (1, 2, 3, 4, 5),\n # Numpy array support\n (\n np.asarray(1, np.int32),\n np.asarray(2, np.int32),\n np.asarray(3, np.int32),\n np.asarray(4, np.int32),\n np.asarray(5, np.int32),\n ),\n ])\n def test_jit_static_args(self, one, two, three, four, five):\n side = []\n\n def f(x, y, z, flag=False, flag2=False):\n del flag2 # unused\n assert flag\n side.append(None)\n return 100 * x + 10 * y + z\n\n f1 = self.jit(f, static_argnums=(3, 4))\n assert f1(one, two, three, True, False) == 123\n assert len(side) == 1\n assert f1(one, two, three, True, False) == 123\n assert len(side) == 1 # Obvious cache hit.\n assert f1(two, one, three, True, False) == 213\n assert len(side) == 1 # Should cache hit because same signature.\n assert f1(two, one, three, True, True) == 213\n assert len(side) == 2\n\n side[:] = []\n f2 = self.jit(f, static_argnums=(0, 2, 3, 4))\n assert f2(1, 2, 3, True, False) == 123\n assert len(side) == 1\n assert f2(1, 3, 3, True, False) == 133\n assert len(side) == 1\n assert f2(2, 2, 3, True, False) == 223\n assert len(side) == 2\n assert f2(2, 4, 3, True, False) == 243\n assert len(side) == 2\n assert f2(2, 4, 3, True, True) == 243\n assert len(side) == 3\n assert f2(2, 5, 3, True, True) == 253\n assert len(side) == 3\n\n def test_static_args_equality(self):\n class A():\n\n def __hash__(self):\n return 1\n\n def __eq__(self, other):\n return isinstance(other, A)\n\n side = []\n def f(x, static_arg):\n del static_arg\n side.append(None)\n return x * 100\n\n f1 = self.jit(f, static_argnums=(1,))\n\n self.assertEqual(f1(1, A()), 100)\n self.assertLen(side, 1)\n self.assertEqual(f1(1, A()), 100)\n self.assertLen(side, 1)\n if self.jit == api._cpp_jit:\n f1_cpp = getattr(f1, \"_cpp_jitted_f\", f1)\n self.assertEqual(f1_cpp._cache_size(), 1)\n\n @parameterized.parameters([\n (1, 2, 3),\n (\n np.asarray(1, np.int32),\n np.asarray(2, np.int32),\n np.asarray(3, np.int32),\n ),\n ])\n def test_jit_kwargs(self, one, two, three):\n side = []\n # For the CPP jit, we need to clear the cache to prevent cache hits between\n # parameterized tests.\n if hasattr(self.jit, \"cache_clear\"):\n self.jit.cache_clear()\n\n def f(x, y, z):\n side.append(None)\n return 100 * x + 10 * y + z\n\n f = self.jit(f)\n assert f(one, two, three) == 123\n assert len(side) == 1\n assert f(one, two, three) == 123\n assert len(side) == 1\n\n assert f(one, two, z=three) == 123\n assert len(side) == 2 # actually recompiles from kwarg\n assert f(one, two, z=three) == 123\n assert len(side) == 2 # but should still cache\n\n f(one, two, z=np.zeros(3)) # doesn't crash\n if config.x64_enabled:\n # In the above call, three is of a new type (int64), thus it should\n # trigger a new compilation.\n assert len(side) == 3\n\n def test_jit_device(self):\n device = jax.devices()[-1]\n x = self.jit(lambda x: x, device=device)(3.)\n self.assertIsInstance(x, xla.DeviceArray)\n self.assertEqual(x.device_buffer.device(), device)\n\n def test_complex_support(self):\n self.assertEqual(self.jit(lambda x: x + 1)(1 + 1j), 2 + 1j)\n\n def test_jit_with_many_args_works(self):\n\n @self.jit\n def f(args_list):\n return sum(args_list)\n\n self.assertEqual(f(list(range(500))), sum(range(500)))\n\n # Jit and Donate arguments\n\n def test_jit_donate_argnums_warning_raised(self):\n x = jnp.array([1.0, 2.0], jnp.float32)\n y = jnp.array([1, 2], jnp.int32)\n f = self.jit(lambda x, y: x.sum() + y.sum(), donate_argnums=(0, 1))\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n f(x, y)\n\n self.assertLen(w, 1)\n self.assertTrue(issubclass(w[-1].category, UserWarning))\n self.assertIn(\n \"Some donated buffers were not usable: f32[2]{0}, s32[2]{0}\",\n str(w[-1].message))\n\n @jtu.skip_on_devices(\"cpu\") # In/out aliasing not supported on CPU.\n def test_jit_donate_argnums_invalidates_input(self):\n # We can't just use `lambda x: x` because JAX simplifies this away to an\n # empty XLA computation.\n move = self.jit(lambda x: x + x - x, donate_argnums=0)\n x = jnp.ones([])\n y = move(x)\n self.assertDeleted(x)\n self.assertEqual(y, 1.)\n\n @jtu.skip_on_devices(\"cpu\") # In/out aliasing not supported on CPU.\n def test_jit_donate_argnums_static_argnums(self):\n jit_fun = self.jit(\n lambda a, b, c, d: ((a + b + c), (a + b + d)),\n static_argnums=(0, 1),\n donate_argnums=(2, 3))\n\n c = jax.device_put(jnp.array([1., 1.]))\n d = jax.device_put(jnp.array([1., 1., 1.]))\n e, f = jit_fun(1, 2, c, d)\n np.testing.assert_allclose(e, jnp.array([4., 4.]))\n np.testing.assert_allclose(f, jnp.array([4., 4., 4.]))\n self.assertDeleted(c)\n self.assertDeleted(d)\n\n @jtu.skip_on_devices(\"cpu\") # In/out aliasing not supported on CPU.\n def test_jnp_array_copy(self):\n # https://github.com/google/jax/issues/3412\n\n @partial(self.jit, donate_argnums=(0,))\n def _test(array):\n return array.at[0].set(77)\n\n x = jnp.asarray([0, 1])\n x_copy = jnp.array(x, copy=True)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n _test(x) # donation\n\n # Gives: RuntimeError: Invalid argument: CopyToHostAsync() called on invalid buffer.\n print(x_copy) # doesn't crash\n\n def test_jit_global_cache(self):\n def f(x):\n assert python_should_be_executing\n return x\n\n python_should_be_executing = True\n self.jit(f)(2)\n python_should_be_executing = False\n self.jit(f)(3)\n\n def test_jit_shallow_copy(self):\n def f(x):\n return copy.copy(x)\n self.jit(f)(1)\n\n def test_jit_deep_copy(self):\n def f(x):\n return copy.deepcopy(x)\n self.jit(f)(1)\n\n def test_disable_jit(self):\n effects = []\n\n @self.jit\n def f(x):\n effects.append(1)\n return x\n\n with api.disable_jit():\n f(2)\n f(2)\n assert len(effects) == 2\n\n f(2)\n f(2)\n assert len(effects) == 3\n\n def test_static_argnum_on_method(self):\n\n class A:\n\n @functools.partial(self.jit, static_argnums=(0,))\n def my_func_jit(self, x):\n return x+2\n\n A().my_func_jit(3)\n\n def test_static_argnum_on_static_method_is_not_supported(self):\n with self.assertRaisesRegex(TypeError, \"Expected a callable value\"):\n\n class A:\n\n @functools.partial(self.jit, static_argnums=(0,))\n @classmethod\n def my_classmethod_jit(cls, x):\n return x+2\n\n def test_staticmethod_is_not_supported(self):\n with self.assertRaisesRegex(TypeError,\n \"staticmethod arguments are not supported\"):\n\n class A:\n\n @functools.partial(self.jit)\n @staticmethod\n def my_staticmethod_jit(x):\n return x + 2\n\n def test_concurrent_jit(self):\n @self.jit\n def f(x):\n return x + x - 3.\n\n xs = [np.random.randn(i) for i in range(10)]\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = [executor.submit(partial(f, x)) for x in xs]\n ys = [f.result() for f in futures]\n for x, y in zip(xs, ys):\n self.assertAllClose(x * 2 - 3., y)\n\n def test_trivial_computations(self):\n x = jnp.array([1, 2, 3])\n y = self.jit(lambda x: x)(x)\n self.assertIs(x, y)\n\n z1, z2 = self.jit(lambda x: (x, x))(x)\n self.assertIs(z1, z2)\n\n x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])\n z1, z2, z3 = self.jit(lambda x, y: (y, 1, x))(x1, x2)\n self.assertIs(z1, x2)\n self.assertIs(z3, x1)\n self.assertEqual(z2, 1)\n\n def test_trivial_computations_with_tokens(self):\n @self.jit\n def noop(arr, token):\n return arr, token\n\n arr = jax.numpy.ones(10)\n token = jax.lax.create_token()\n\n self.assertEqual(token, noop(arr, token)[1])\n\n def test_jit_bad_input(self):\n def f(x):\n return x\n\n self.assertRaisesRegex(\n TypeError, \".* 'foo' of type <.*'str'> is not a valid JAX type\",\n lambda: self.jit(f)(\"foo\"))\n\n def test_jit_on_all_devices(self):\n # Verifies we can run the same computation on every device present, even\n # if they are, for example, different models of GPU.\n data = np.random.rand(1000).astype(np.float32)\n f = self.jit(jnp.negative)\n for device in jax.local_devices():\n x = device_put(data, device=device)\n np.testing.assert_array_equal(-data, f(x))\n\n def test_jit_nested_donate_ignored(self):\n jit_fun = self.jit(lambda x: self.jit(lambda y: y**2, donate_argnums=0)(x))\n a = jax.device_put(jnp.array(1))\n\n # NOTE(mattjj): stopped raising error here and instead just ignored\n # with self.assertRaisesRegex(ValueError, \"nested.*not supported\"):\n # jit_fun(a)\n\n jit_fun(a) # doesn't crash\n\n def test_jit_reference_dropping(self):\n x = jnp.ones(10)\n f = (lambda x: lambda: x)(x) # reference to x in f's closure\n g = self.jit(f)\n x = weakref.ref(x) # no more strong ref to x in this scope\n assert x() is not None # x is still around\n f() # f runs\n g() # g runs\n g() # g runs a second time\n del f # delete the raw callable\n assert x() is not None # x is still around\n g() # g still runs\n del g # no more references to x\n assert x() is None # x is gone\n\n def test_jit_raises_on_first_invocation_on_non_hashable_static_argnum(self):\n if self.jit != api._python_jit:\n raise unittest.SkipTest(\"this test only applies to _python_jit\")\n f = lambda x, y: x + 3\n jitted_f = self.jit(f, static_argnums=(1,))\n\n msg = (\"Non-hashable static arguments are not supported, as this can lead \"\n \"to unexpected cache-misses. Static argument (index 1) of type \"\n \"<class 'numpy.ndarray'> for function <lambda> is non-hashable.\")\n with self.assertRaisesRegex(ValueError, re.escape(msg)):\n jitted_f(1, np.asarray(1))\n\n def test_cpp_jit_raises_on_non_hashable_static_argnum(self):\n if self.jit != api._cpp_jit:\n raise unittest.SkipTest(\"this test only applies to _cpp_jit\")\n\n f = lambda x, y: x + 3\n jitted_f = api._cpp_jit(f, static_argnums=[1])\n\n jitted_f(1, 1)\n\n msg = (\"Non-hashable static arguments are not supported. An error occured \"\n \".*while trying to hash an object of type \"\n \"<class 'numpy\\\\.ndarray'>, 1. The error was:\\nTypeError: \"\n \"unhashable type: 'numpy\\\\.ndarray'\")\n\n with self.assertRaisesRegex(ValueError, msg):\n jitted_f(1, np.asarray(1))\n\n class HashableWithoutEq:\n\n def __hash__(self):\n return 1\n\n def __eq__(self, other):\n raise NotImplementedError(\n \"A Python error is as is, without stack trace\")\n\n with self.assertRaisesRegex(\n ValueError,\n re.escape(\"static arguments should be comparable using __eq__\")):\n jitted_f(1, HashableWithoutEq())\n\n def test_cpp_jitted_function_returns_PyBuffer(self):\n if self.jit != api._cpp_jit:\n raise unittest.SkipTest(\"this test only applies to _cpp_jit\")\n\n jitted_f = self.jit(lambda a: a + 1)\n jitted_f(1)\n self.assertIsInstance(jitted_f(2), xla._CppDeviceArray)\n\n @jtu.skip_on_devices(\"cpu\")\n def test_explicit_backend(self):\n f = lambda x: x + 1\n jitted_f = jit(f, backend=jtu.device_under_test())\n jitted_f_cpu = jit(f, backend=\"cpu\")\n\n result = jitted_f(1.)\n result_cpu = jitted_f_cpu(1.)\n self.assertEqual(result.device_buffer.platform(), jtu.device_under_test())\n self.assertEqual(result_cpu.device_buffer.platform(), \"cpu\")\n\n @jtu.skip_on_devices(\"cpu\")\n def test_device_to_device_copy_between_backends(self):\n # b/186624243\n f = lambda x: x + 1\n jitted_f = jit(f, backend=jtu.device_under_test())\n jitted_f_cpu = jit(f, backend=\"cpu\")\n\n x = np.arange(30).reshape(1, 10, 3)\n result = jitted_f(x)\n result_cpu = jitted_f_cpu(result)\n result_2 = jitted_f(result_cpu)\n result_cpu_2 = jitted_f_cpu(result_2)\n self.assertAllClose(result_2, x + 3)\n self.assertAllClose(result_cpu_2, x + 4)\n\n @jtu.skip_on_devices(\"cpu\")\n def test_mismatched_nested_backends(self):\n @partial(jit, backend=jtu.device_under_test())\n def f(x):\n return jit(lambda x: x + 1, backend=\"cpu\")(x)\n\n with self.assertRaisesRegex(\n ValueError,\n f\"Outer-jit backend specification {jtu.device_under_test()} must match \"\n f\"explicit inner-jit backend specification cpu.\"):\n f(1.)\n\n def test_omnistaging(self):\n # See https://github.com/google/jax/issues/5206\n\n # TODO(frostig): remove once we always enable_custom_prng\n def _prng_key_as_array(key):\n return key.unsafe_raw_array() if config.jax_enable_custom_prng else key\n\n # TODO(frostig): remove once we always enable_custom_prng\n def _array_as_prng_key(arr):\n arr = np.array(arr, dtype=np.uint32)\n if config.jax_enable_custom_prng:\n return jax._src.prng.PRNGKeyArray(\n jax._src.prng.threefry_prng_impl, arr)\n else:\n return arr\n\n key_list = [None]\n\n def init():\n key, subkey = jax.random.split(key_list[0])\n key_list[0] = key\n return jax.random.normal(subkey, ())\n\n key_list[0] = _array_as_prng_key([2384771982, 3928867769])\n init()\n self.jit(init)()\n self.assertIsInstance(_prng_key_as_array(key_list[0]), core.Tracer)\n\n def test_jit_wrapped_attributes(self):\n def f(x: int) -> int:\n \"\"\"docstring of f.\"\"\"\n return x + 1\n f.some_value = 4\n jf = self.jit(f)\n for attr in [\"doc\", \"name\", \"module\", \"qualname\", \"annotations\"]:\n self.assertEqual(\n {attr: getattr(f, f\"__{attr}__\")},\n {attr: getattr(jf, f\"__{attr}__\")})\n self.assertEqual(f.some_value, jf.some_value)\n\n def test_jit_python_builtin(self):\n x = jnp.array([1, 2])\n expected = x + 1\n jit_add = self.jit(operator.add, static_argnums=(1,))\n actual = jit_add(x, 1)\n self.assertArraysEqual(expected, actual)\n\n def test__infer_argnums_and_argnames(self):\n def f(x, y=1):\n pass\n\n argnums, argnames = api._infer_argnums_and_argnames(\n f, argnums=None, argnames=None)\n assert argnums == ()\n assert argnames == ()\n\n argnums, argnames = api._infer_argnums_and_argnames(\n f, argnums=0, argnames=None)\n assert argnums == (0,)\n assert argnames == ('x',)\n\n argnums, argnames = api._infer_argnums_and_argnames(\n f, argnums=None, argnames='y')\n assert argnums == (1,)\n assert argnames == ('y',)\n\n argnums, argnames = api._infer_argnums_and_argnames(\n f, argnums=0, argnames='y') # no validation\n assert argnums == (0,)\n assert argnames == ('y',)\n\n def g(x, y, *args):\n pass\n\n argnums, argnames = api._infer_argnums_and_argnames(\n g, argnums=(1, 2), argnames=None)\n assert argnums == (1, 2)\n assert argnames == ('y',)\n\n def h(x, y, **kwargs):\n pass\n\n argnums, argnames = api._infer_argnums_and_argnames(\n h, argnums=None, argnames=('foo', 'bar'))\n assert argnums == ()\n assert argnames == ('foo', 'bar')\n\n def test_jit_with_static_argnames(self):\n\n def f(x):\n assert x == 'foo'\n return 1\n\n f_nums = self.jit(f, static_argnums=0)\n assert f_nums('foo') == 1\n assert f_nums(x='foo') == 1\n\n f_names = self.jit(f, static_argnames='x')\n assert f_names('foo') == 1\n assert f_names(x='foo') == 1\n\n def test_new_static_argnum_on_keyword_arguments(self):\n f = self.jit(lambda x: x, static_argnums=0)\n y = f(x=4)\n assert y == 4\n\n def test_new_static_argnum_with_default_arguments(self):\n f = self.jit(lambda x=4: x, static_argnums=0)\n y = f()\n assert y == 4\n\n def test_jit_with_mismatched_static_argnames(self):\n x_is_tracer, y_is_tracer = False, False\n def f(x, y):\n assert isinstance(x, core.Tracer) == x_is_tracer\n assert isinstance(y, core.Tracer) == y_is_tracer\n return 1\n\n # If both static_argnums and static_argnames are provided, they are allowed\n # to disagree and `jit` will respect the user's choices.\n f_nums = self.jit(f, static_argnums=1, static_argnames=())\n x_is_tracer, y_is_tracer = True, False\n assert f_nums(2, 'foo') == 1\n x_is_tracer, y_is_tracer = True, True\n assert f_nums(1, y=2) == 1\n\n f_names = self.jit(f, static_argnums=(), static_argnames='y')\n x_is_tracer, y_is_tracer = True, True\n assert f_names(2, 3) == 1\n x_is_tracer, y_is_tracer = True, False\n assert f_names(1, y='foo') == 1\n\n f_mixed = self.jit(f, static_argnums=(1,), static_argnames='x')\n x_is_tracer, y_is_tracer = True, False\n assert f_mixed(2, 'foo') == 1\n x_is_tracer, y_is_tracer = True, True\n assert f_mixed(1, y=3) == 1\n x_is_tracer, y_is_tracer = False, True\n assert f_mixed(x='foo', y=3) == 1\n\n # TODO(zhangqiaorjc): Test pruning constants after DCE pass prunes primitive\n # applications.\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_num_args={}\".format(num_args),\n \"num_args\": num_args}\n for num_args in [2, 3, 4]))\n def test_jit_with_pruned_args(self, num_args):\n def f(*args):\n used = np.array(2)\n return args[1] + used\n f_pruned = self.jit(f)\n args = range(num_args)\n with jtu.count_device_put() as count:\n np.testing.assert_allclose(f_pruned(*args), 3)\n self.assertEqual(count[0], 1)\n\n @unittest.skipIf(jax._src.lib._xla_extension_version <= 36,\n \"Test requires jaxlib 0.1.71\")\n def testBuffersAreFreedPromptly(self):\n # Regression test for a bug where garbage collection was delayed too long\n # for NumPy buffers that are aliased zero-copy by the runtime.\n @self.jit\n def f(x):\n return x + 1\n\n refs = []\n x = np.ones((10000,), np.float32)\n for step in range(1000):\n x = f(x)\n refs.append(weakref.ref(x))\n x = np.asarray(x)\n\n # We expect most of the input buffers to have been garbage\n # collected in parallel with the execution. We can't call\n # block_until_ready() here because it would force a garbage collection.\n live_refs = len([ref for ref in refs if ref() is not None])\n self.assertLessEqual(live_refs, 100)\n\n def test_jit_lower_compile(self):\n def f(x):\n return jnp.sqrt(x ** 2) + 1.\n\n f_jit = self.jit(f)\n f_low = f_jit.lower(1.)\n f_exe = f_low.compile()\n self.assertAllClose(f_exe(1.), 2.)\n\n def test_jit_lower_compile_in_tree_mismatch(self):\n def f(x):\n return jnp.sqrt(x ** 2) + 1.\n\n f_jit = self.jit(f)\n f_low = f_jit.lower(1.)\n f_exe = f_low.compile()\n self.assertRaisesRegex(\n TypeError, \"function compiled for .*, called with .*\",\n lambda: f_exe([1.]))\n\n def test_jit_lower_compile_trivial(self):\n def f(x): return x\n out = self.jit(f).lower(1.).compile()(4.)\n self.assertAllClose(out, 4.)\n\n def test_jit_lower_compile_trivial_in_tree_mismatch(self):\n def f(x): return x\n f_exe = self.jit(f).lower(1.).compile()\n self.assertRaisesRegex(\n TypeError, \"function compiled for .*, called with .*\",\n lambda: f_exe([4.]))\n\n def test_jit_lower_compile_arg_type_mismatch(self):\n def f(x):\n return jnp.sqrt(x ** 2) + 1.\n\n x = jnp.array(1, dtype=int)\n x_f32 = x.astype(jnp.float32)\n x_i32 = x.astype(jnp.int32)\n f_exe = self.jit(f).lower(x_f32).compile()\n self.assertRaisesRegex(\n TypeError,\n \"Computation compiled for input types:\\n.*float32.*\\n\"\n \"called with:\\n.*int32.*\",\n lambda: f_exe(x_i32))\n\n def test_jit_lower_compile_multi_arg(self):\n def f(*args):\n x, *_ = args\n return jnp.sqrt(x ** 2) + 1.\n f_exe = self.jit(f).lower(1., 1.).compile()\n self.assertAllClose(f_exe(1., 1.), 2.)\n\n def test_jit_lower_compile_trivial_multi_arg(self):\n def f(*args):\n x, *_ = args\n return x\n f_exe = self.jit(f).lower(1., 1.).compile()\n self.assertAllClose(f_exe(1., 1.), 1.)\n\n\nclass PythonJitTest(CPPJitTest):\n\n @property\n def jit(self):\n return api._python_jit\n\n\nclass APITest(jtu.JaxTestCase):\n\n def test_grad_bad_input(self):\n def f(x):\n return x\n\n self.assertRaisesRegex(\n TypeError, \".* 'foo' of type <.*'str'> is not a valid JAX type\",\n lambda: grad(f)(\"foo\"))\n\n def test_grad_argnums(self):\n def f(x, y, z, flag=False):\n assert flag\n return 1.0 * x + 2.0 * y + 3.0 * z\n\n assert grad(f)(1.0, 1.0, 1.0, flag=True) == 1.0\n assert grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == 2.0\n assert grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (3.0, 1.0)\n\n def test_value_and_grad_argnums(self):\n def f(x, y, z, flag=False):\n assert flag\n return 1.0 * x + 2.0 * y + 3.0 * z\n\n y = f(1.0, 1.0, 1.0, flag=True)\n assert api.value_and_grad(f)(1.0, 1.0, 1.0, flag=True) == (y, 1.0)\n assert api.value_and_grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == (y, 2.0)\n assert api.value_and_grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (y, (3.0, 1.0))\n\n def test_grad_of_jit(self):\n side = []\n\n @jit\n def f(x):\n side.append(None)\n return x * x\n\n assert grad(f)(1.0) == 2.0\n assert len(side) == 1\n assert grad(f)(2.0) == 4.0\n assert len(side) == 1\n\n def test_jit_of_grad(self):\n side = []\n\n @jit\n def f(x):\n side.append(None)\n return x * x\n\n g = jit(grad(f))\n assert g(1.0) == 2.0\n assert len(side) == 1\n assert g(2.0) == 4.0\n assert len(side) == 1\n\n def test_bad_input(self):\n def f(x):\n return x\n\n self.assertRaisesRegex(\n TypeError, \".* 'foo' of type <.*'str'> is not a valid JAX type\",\n lambda: grad(f)(\"foo\"))\n\n self.assertRaisesRegex(\n TypeError, \".* 'foo' of type <.*'str'> is not a valid JAX type\",\n lambda: jit(f)(\"foo\"))\n\n def test_grad_tuple_output(self):\n jtu.check_raises(lambda: grad(lambda x: (x,x))(1.0), TypeError,\n \"Gradient only defined for scalar-output functions. \")\n\n def test_grad_unit_output(self):\n jtu.check_raises(lambda: grad(lambda x: ())(np.zeros(3)), TypeError,\n \"Gradient only defined for scalar-output functions. \")\n\n def test_grad_nonscalar_output(self):\n jtu.check_raises(lambda: grad(lambda x: x)(np.zeros(3)), TypeError,\n \"Gradient only defined for scalar-output functions. \")\n\n def test_unwrapped_numpy(self):\n def f(x):\n return np.exp(x)\n\n with self.assertRaisesRegex(Exception, \"The numpy.ndarray conversion .*\"):\n grad(f)(np.zeros(3))\n\n def test_binop_mismatch(self):\n def f(x, y):\n return x + y\n\n jtu.check_raises(\n lambda: f(jnp.zeros(3), jnp.zeros(4)),\n TypeError,\n \"add got incompatible shapes for broadcasting: (3,), (4,).\")\n\n jtu.check_raises(\n lambda: grad(f)(np.zeros(3), np.zeros(4)),\n TypeError,\n \"add got incompatible shapes for broadcasting: (3,), (4,).\")\n\n def test_dot_mismatch(self):\n def f(x, y):\n return jnp.dot(x, y)\n\n self.assertRaisesRegex(\n TypeError, \"Incompatible shapes for dot: got \\\\(3L?,\\\\) and \\\\(4L?,\\\\).\",\n lambda: grad(f)(np.zeros(3), np.zeros(4)))\n\n def test_abstract_error_message(self):\n for castfun in [float, complex, int]:\n def f(x):\n return castfun(x)\n\n self.assertRaisesRegex(\n TypeError,\n f\"[Tt]ry using `x.astype\\\\({castfun.__name__}\\\\)`\",\n lambda: jit(f)(1.0))\n\n def test_switch_value_jit(self):\n def f(x):\n y = x > 0\n if y:\n return x\n else:\n return -x\n\n assert grad(f)(1.0) == 1.0\n assert grad(f)(-1.0) == -1.0\n with self.assertRaisesRegex(core.ConcretizationTypeError,\n \"Abstract tracer value\"):\n jit(f)(1)\n\n def test_list_index_err(self):\n L = [1, 2, 3]\n def f(n):\n return L[n]\n\n assert jit(f, static_argnums=(0,))(0) == L[0]\n self.assertRaisesRegex(\n TypeError,\n r\"The __index__\\(\\) method was called on the JAX Tracer object.*\",\n lambda: jit(f)(0))\n\n def test_range_err(self):\n def f(x, n):\n for i in range(n):\n x = x + i\n return x\n\n assert jit(f, static_argnums=(1,))(0, 5) == 10\n self.assertRaisesRegex(\n TypeError,\n r\"The __index__\\(\\) method was called on the JAX Tracer object.*\",\n lambda: jit(f)(0, 5))\n\n def test_cast_int(self):\n f = lambda x: int(x)\n self.assertRaisesRegex(\n TypeError,\n \"('(?:JaxprTracer|DynamicJaxprTracer)' object cannot be interpreted as an integer\"\n \"|Abstract tracer value encountered where concrete value is expected.*)\", lambda: jit(f)(0))\n\n def test_casts(self):\n for castfun in [hex, oct]:\n f = lambda x: castfun(x)\n self.assertRaisesRegex(\n TypeError,\n r\"The __index__\\(\\) method was called on the JAX Tracer object.*\", lambda: jit(f)(0))\n\n def test_unimplemented_interpreter_rules(self):\n foo_p = Primitive('foo')\n def foo(x):\n return foo_p.bind(x)\n\n jtu.check_raises(lambda: foo(1.0), NotImplementedError,\n \"Evaluation rule for 'foo' not implemented\")\n\n jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,\n \"Abstract evaluation for 'foo' not implemented\")\n\n jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,\n \"Differentiation rule for 'foo' not implemented\")\n\n foo_p.def_abstract_eval(lambda x: x)\n\n jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,\n \"XLA translation rule for primitive 'foo' not found\")\n\n foo_p.def_impl(lambda x: x)\n ad.defjvp(foo_p, lambda g, x: foo(g))\n\n jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,\n \"Transpose rule (for reverse-mode differentiation) for 'foo' not implemented\")\n\n def test_is_subclass(self):\n self.assertTrue(issubclass(xla.DeviceArray, jnp.ndarray))\n self.assertTrue(issubclass(xla._CppDeviceArray, jnp.ndarray))\n self.assertTrue(issubclass(pxla.ShardedDeviceArray, jnp.ndarray))\n self.assertTrue(issubclass(pxla._ShardedDeviceArray, jnp.ndarray))\n self.assertFalse(issubclass(np.ndarray, jnp.ndarray))\n self.assertFalse(issubclass(xla.DeviceArray, np.ndarray))\n self.assertFalse(issubclass(xla._CppDeviceArray, np.ndarray))\n self.assertFalse(issubclass(pxla.ShardedDeviceArray, np.ndarray))\n self.assertFalse(issubclass(pxla._ShardedDeviceArray, np.ndarray))\n\n def test_is_instance(self):\n def f(x):\n self.assertIsInstance(x, jnp.ndarray)\n self.assertNotIsInstance(x, np.ndarray)\n return x + 2\n jit(f)(3)\n jax.vmap(f)(np.arange(3))\n\n def test_device_put_and_get(self):\n x = np.arange(12.).reshape((3, 4)).astype(\"float32\")\n dx = api.device_put(x)\n self.assertIsInstance(dx, xla.DeviceArray)\n self.assertIsInstance(dx, jnp.ndarray)\n self.assertNotIsInstance(dx, np.ndarray)\n x2 = api.device_get(dx)\n self.assertNotIsInstance(x2, jnp.ndarray)\n self.assertIsInstance(x2, np.ndarray)\n assert np.all(x == x2)\n\n y = [x, (2 * x, 3 * x)]\n dy = api.device_put(y)\n y2 = api.device_get(dy)\n self.assertIsInstance(y2, list)\n self.assertIsInstance(y2[0], np.ndarray)\n assert np.all(y2[0] == x)\n self.assertIsInstance(y2[1], tuple)\n self.assertIsInstance(y2[1][0], np.ndarray)\n assert np.all(y2[1][0] == 2 * x)\n self.assertIsInstance(y2[1][1], np.ndarray)\n assert np.all(y2[1][1] == 3 * x)\n\n def test_device_get_scalar(self):\n x = np.arange(12.).reshape((3, 4)).astype(\"float32\")\n x = api.device_put(x)\n self.assertIsInstance(x, xla.DeviceArray)\n y = [x, 2]\n y2 = api.device_get(y)\n self.assertIsInstance(y2, list)\n self.assertIsInstance(y2[0], np.ndarray)\n assert np.all(y2[0] == x)\n self.assertIsInstance(y2[1], int)\n self.assertEqual(y2[1], 2)\n\n @parameterized.parameters([(3,)], [(2, 0)])\n def test_device_put_across_devices(self, shape):\n if len(api.local_devices()) < 2:\n raise unittest.SkipTest(\"this test requires multiple devices\")\n d1, d2 = api.local_devices()[:2]\n data = np.random.randn(*shape).astype(np.float32)\n x = api.device_put(data, device=d1)\n self.assertEqual(x.device_buffer.device(), d1)\n y = api.device_put(x, device=d2)\n self.assertEqual(y.device_buffer.device(), d2)\n np.testing.assert_array_equal(data, np.array(y))\n # Make sure these don't crash\n api.device_put(x)\n api.device_put(y)\n\n @jtu.skip_on_devices(\"cpu\")\n def test_device_put_across_platforms(self):\n default_device = jax.devices()[0]\n cpu_device = jax.devices(\"cpu\")[0]\n\n np_arr = np.array([1,2,3])\n scalar = 1\n device_arr = jnp.array([1,2,3])\n assert device_arr.device_buffer.device() is default_device\n\n for val in [np_arr, device_arr, scalar]:\n x = api.device_put(val, device=cpu_device)\n self.assertEqual(x.device_buffer.device(), cpu_device)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_jacobian(self):\n R = np.random.RandomState(0).randn\n A = R(4, 3)\n x = R(3)\n\n f = lambda x: jnp.dot(A, x)\n assert np.allclose(jacfwd(f)(x), A)\n assert np.allclose(jacrev(f)(x), A)\n\n f = lambda x: jnp.tanh(jnp.dot(A, x))\n assert np.allclose(jacfwd(f)(x), jacrev(f)(x))\n\n @jtu.skip_on_devices(\"tpu\")\n def test_hessian(self):\n R = np.random.RandomState(0).randn\n A = R(4, 4)\n x = R(4)\n\n f = lambda x: jnp.dot(x, jnp.dot(A, x))\n assert np.allclose(hessian(f)(x), A + A.T)\n\n def test_std_basis(self):\n basis = api._std_basis(jnp.zeros(3))\n assert getattr(basis, \"shape\", None) == (3, 3)\n assert np.allclose(basis, np.eye(3))\n\n basis = api._std_basis(jnp.zeros((3, 3)))\n assert getattr(basis, \"shape\", None) == (9, 3, 3)\n assert np.allclose(basis, np.eye(9).reshape(9, 3, 3))\n\n basis = api._std_basis([0., (jnp.zeros(3), jnp.zeros((3, 4)))])\n assert isinstance(basis, list) and len(basis) == 2\n assert getattr(basis[0], \"shape\", None) == (16,)\n assert isinstance(basis[1], tuple) and len(basis[1]) == 2\n assert getattr(basis[1][0], \"shape\", None) == (16, 3)\n assert getattr(basis[1][1], \"shape\", None) == (16, 3, 4)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_jacobian_on_pytrees(self):\n for jacfun in [jacfwd, jacrev]:\n ans = jacfun(lambda x, y: (x, y))(0., 1.)\n expected = (1., 0.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = jacfun(lambda x, y: (x, y), 1)(0., 1.)\n expected = (0., 1.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = jacfun(lambda x, y: (x, y), (0, 1))(0., 1.)\n expected = ((1., 0.),\n (0., 1.),)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = jacfun(lambda x: x[:2])((1., 2., 3.))\n expected = ((1., 0., 0.),\n (0., 1., 0.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n R = np.random.RandomState(0).randn\n x = R(2)\n y = R(3)\n ans = jacfun(lambda x, y: {'x': x, 'xy': jnp.outer(x, y)})(x, y)\n expected = {'x': np.eye(2),\n 'xy': np.kron(np.eye(2), y[:, None]).reshape(2, 3, 2)}\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_hessian_on_pytrees(self):\n ans = hessian(lambda x: jnp.array(x)**2)((1., 2.))\n expected = ((np.array([2., 0.]), np.array([0., 0.])),\n (np.array([0., 0.]), np.array([0., 2.])))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_issue1372(self):\n def quad(x):\n return jnp.dot(x, x)\n\n def f(x, u):\n return quad(x) + quad(u)\n\n x, u = jnp.ones(5), jnp.ones(2)\n\n rev = jacrev\n fwd = jacfwd\n\n # Diagonal entries\n self.assertEqual(rev(rev(f, 0), 0)(x, u).shape, (5, 5))\n self.assertEqual(rev(fwd(f, 0), 0)(x, u).shape, (5, 5))\n self.assertEqual(fwd(rev(f, 0), 0)(x, u).shape, (5, 5))\n self.assertEqual(fwd(fwd(f, 0), 0)(x, u).shape, (5, 5))\n self.assertEqual(rev(rev(f, 1), 1)(x, u).shape, (2, 2))\n self.assertEqual(rev(fwd(f, 1), 1)(x, u).shape, (2, 2))\n self.assertEqual(fwd(rev(f, 1), 1)(x, u).shape, (2, 2))\n self.assertEqual(fwd(fwd(f, 1), 1)(x, u).shape, (2, 2))\n\n # Off-diagonal entries by reverse-mode on the outside\n self.assertEqual(rev(rev(f, 1), 0)(x, u).shape, (2, 5))\n self.assertEqual(rev(fwd(f, 1), 0)(x, u).shape, (2, 5))\n self.assertEqual(rev(rev(f, 0), 1)(x, u).shape, (5, 2))\n self.assertEqual(rev(fwd(f, 0), 1)(x, u).shape, (5, 2))\n\n # Off-diagonal entries by forward-mode on the outside\n self.assertEqual(fwd(rev(f, 1), 0)(x, u).shape, (2, 5))\n self.assertEqual(fwd(fwd(f, 1), 0)(x, u).shape, (2, 5))\n self.assertEqual(fwd(rev(f, 0), 1)(x, u).shape, (5, 2))\n self.assertEqual(fwd(fwd(f, 0), 1)(x, u).shape, (5, 2))\n\n\n def test_large_device_constant(self):\n ans = jit(lambda x: 2 * x)(jnp.ones(int(2e6))) # doesn't crash\n self.assertAllClose(ans, np.ones(int(2e6)) * 2., check_dtypes=False)\n\n def test_grad_and_aux_basic(self):\n g, aux = grad(lambda x: (x**3, [x**2]), has_aux=True)(3.)\n self.assertAllClose(g, grad(lambda x: x**3)(3.))\n self.assertAllClose(aux, [9.], check_dtypes=False)\n\n def test_grad_and_aux_error(self):\n with self.assertRaisesRegex(TypeError, \"two-element tuple\"):\n grad(lambda x: (1, 2, 3), has_aux=True)(1.)\n\n with self.assertRaisesRegex(TypeError, \"two-element tuple\"):\n grad(lambda x: x, has_aux=True)(1.)\n\n with self.assertRaisesRegex(TypeError, \"two-element tuple\"):\n grad(lambda x: (x,), has_aux=True)(1.)\n\n def test_grad_and_aux_nested(self):\n def f(x):\n g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)\n return aux[0]\n\n f2 = lambda x: x**3\n\n self.assertEqual(grad(f)(4.), grad(f2)(4.))\n self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))\n self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))\n\n def f(x):\n g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)\n return aux[0] * jnp.sin(x)\n\n f2 = lambda x: x**3 * jnp.sin(x)\n\n self.assertEqual(grad(f)(4.), grad(f2)(4.))\n self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))\n self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))\n\n def test_grad_and_aux_constant(self):\n g, aux = grad(lambda x: (x**3, [4.]), has_aux=True)(4.)\n self.assertEqual(g, grad(lambda x: x**3)(4.))\n self.assertEqual(aux, [4.])\n\n g, aux = grad(lambda x: (x**3, [x**2, 4.]), has_aux=True)(4.)\n self.assertEqual(g, grad(lambda x: x**3)(4.))\n self.assertEqual(aux, [4.**2, 4.])\n\n def test_grad_and_aux_no_tracers(self):\n # see https://github.com/google/jax/issues/1950\n def f(x):\n aux = dict(identity=x, p1=x+1)\n return x ** 2, aux\n\n _, aux = jax.grad(f, has_aux=True)(3.)\n self.assertIsInstance(aux, dict)\n for val in aux.values():\n self.assertNotIsInstance(val, core.Tracer)\n\n def test_jvp_mismatched_arguments(self):\n self.assertRaisesRegex(\n TypeError,\n (\"primal and tangent arguments to jax.jvp must have the same tree \"\n \"structure\"),\n lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), ()))\n # If primals and tangents must both be tuples or both lists\n self.assertRaisesRegex(\n TypeError,\n (\"primal and tangent arguments to jax.jvp must have the same tree \"\n \"structure\"),\n lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), [np.float32(2)]))\n self.assertRaisesRegex(\n TypeError,\n \"primal and tangent arguments to jax.jvp do not match.\",\n lambda: api.jvp(lambda x: -x, (np.float16(2),), (np.float32(4),)))\n # If primals and tangents are not of the same shape then raise error\n fun = lambda x: x+1\n with self.assertRaisesRegex(\n ValueError, \"jvp called with different primal and tangent shapes\"):\n api.jvp(fun, (jnp.array([1.,2.,3.]),), (jnp.array([1.,2.,3.,4.]),))\n with self.assertRaisesRegex(\n ValueError, \"jvp called with different primal and tangent shapes\"):\n api.jvp(fun, (jnp.float32(10.),), (jnp.array([1.,2.,3.], dtype=jnp.float32),))\n with self.assertRaisesRegex(\n ValueError, \"jvp called with different primal and tangent shapes\"):\n api.jvp(fun, (jnp.array([1.,2.,3.], dtype=jnp.float32),), (jnp.float32(20.),))\n with self.assertRaisesRegex(\n ValueError, \"jvp called with different primal and tangent shapes\"):\n api.jvp(fun, (jnp.array([1.,2.,3.]),), (20.,))\n\n def test_jvp_non_tuple_arguments(self):\n def f(x, y): return x + y\n self.assertRaisesRegex(\n TypeError,\n \"primal and tangent arguments to jax.jvp must be tuples or lists; found float and tuple.\",\n lambda: api.jvp(f, 0., (1.,)))\n self.assertRaisesRegex(\n TypeError,\n \"primal and tangent arguments to jax.jvp must be tuples or lists; found tuple and ndarray.\",\n lambda: api.jvp(f, (0.,), np.array([1., 2.])))\n\n def test_vjp_mismatched_arguments(self):\n _, pullback = api.vjp(lambda x, y: x * y, np.float32(3), np.float32(4))\n self.assertRaisesRegex(\n TypeError,\n \"Tree structure of cotangent input.*does not match\",\n lambda: pullback((np.float32(7), np.float32(100))))\n self.assertRaisesRegex(\n TypeError,\n \"Type of cotangent input to vjp pullback.*is not the expected tangent type\",\n lambda: pullback((np.float16(42))))\n\n def test_vjp_bad_cotangent_shape(self):\n x = np.ones((2, 5), dtype=np.float32)\n y = np.ones((5, 3), dtype=np.float32)\n def f_jax(x, y):\n return jnp.matmul(x, y)\n res, pullback = jax.vjp(f_jax, x, y)\n with self.assertRaisesRegex(\n ValueError,\n \"Shape of cotangent input to vjp pullback function .* must be the same as the shape of corresponding primal input .*\"):\n pullback(np.ones((2, 4), dtype=np.float32))\n\n def test_jvp_jit_cached(self):\n \"\"\"Bug in caching in presence of JVP and JIT.\"\"\"\n\n def func(x):\n def inner(y):\n return y * x\n\n # Must have two calls to the inner jit (the second one hits the cache)\n res1 = api.jit(inner)(4.)\n res2 = api.jit(inner)(5.)\n return res1 + res2\n\n self.assertAllClose((45., 9.), api.jvp(func, (5.,), (1.,)))\n\n def test_linear_transpose_abstract(self):\n x = types.SimpleNamespace(shape=(3,), dtype=np.dtype(np.float32))\n y = jnp.arange(3, dtype=np.float32)\n transpose_fun = api.linear_transpose(lambda x: 2 * x, x)\n z, = transpose_fun(y)\n self.assertArraysEqual(2 * y, z, check_dtypes=True)\n\n def test_linear_transpose_integer(self):\n f = lambda x: 2 * x\n transpose = api.linear_transpose(f, 1)\n actual, = transpose(3)\n expected = 6\n self.assertEqual(actual, expected)\n\n def test_linear_transpose_error(self):\n with self.assertRaisesRegex(\n TypeError, \"linear_transpose only supports\"):\n api.linear_transpose(lambda x: 2. * x, 1)\n transpose_fun = api.linear_transpose(lambda x: [x, x], 1.0)\n with self.assertRaisesRegex(TypeError, \"cotangent tree does not match\"):\n transpose_fun(1.0)\n\n transpose_fun = api.linear_transpose(lambda x: jnp.stack([x, x]), 1.0)\n with self.assertRaisesRegex(TypeError, \"cotangent type does not match\"):\n transpose_fun(1.0)\n\n transpose_fun = api.linear_transpose(lambda x: 1j * x, 1.0)\n with self.assertRaisesRegex(TypeError, \"cotangent type does not match\"):\n transpose_fun(1.0)\n\n transpose_fun = api.linear_transpose(lambda x: x, 1.0)\n with self.assertRaisesRegex(TypeError, \"cotangent type does not match\"):\n transpose_fun(1j)\n\n def test_linear_transpose_complex(self):\n f = lambda x: (1 + 2j) * x\n transpose = api.linear_transpose(f, 1j)\n actual, = transpose(3 + 4j)\n expected = -5 + 10j\n self.assertEqual(actual, expected)\n\n def test_linear_transpose_zeros(self):\n f = lambda x: x[0]\n transpose = api.linear_transpose(f, [1., 2.])\n actual, = transpose(3.)\n expected = [3., 0.]\n self.assertEqual(actual, expected)\n\n def test_complex_grad_raises_error(self):\n self.assertRaises(TypeError, lambda: grad(lambda x: jnp.sin(x))(1 + 2j))\n\n def test_holomorphic_grad(self):\n out = grad(lambda x: jnp.sin(x), holomorphic=True)(1 + 2j)\n expected = 2.0327230070196656 - 3.0518977991518j\n self.assertAllClose(out, expected, check_dtypes=False)\n\n def test_nonholomorphic_grad(self):\n zs = 0.5j * np.arange(5) + np.arange(5)\n\n def f(z):\n return jnp.sum(jnp.cos(jnp.abs(z)))\n\n ans = grad(f)(zs)\n expected = np.array([ 0. + 0.j,\n -0.80430663 + 0.40215331j,\n -0.70368982 + 0.35184491j,\n 0.1886467 - 0.09432335j,\n 0.86873727 - 0.43436864j])\n self.assertAllClose(ans, expected, check_dtypes=False,\n atol=jtu.default_gradient_tolerance,\n rtol=jtu.default_gradient_tolerance)\n\n def test_complex_output_jacrev_raises_error(self):\n self.assertRaises(TypeError, lambda: jacrev(lambda x: jnp.sin(x))(1 + 2j))\n\n def test_nonholomorphic_jacrev(self):\n # code based on https://github.com/google/jax/issues/603\n zs = 0.5j * np.arange(5) + np.arange(5)\n\n def f(z):\n return jnp.cos(jnp.linalg.norm(2 * z))\n\n ans = jacrev(f)(zs)\n expected = grad(f)(zs)\n self.assertAllClose(ans, expected)\n\n def test_heterogeneous_jacfwd(self):\n # See https://github.com/google/jax/issues/7157\n # See https://github.com/google/jax/issues/7780\n x = np.array([2.0], dtype=np.float16)\n y = np.array([3.0], dtype=np.float32)\n a = (x, y)\n\n def f(tup):\n jtu._check_dtypes_match(tup, a)\n x, y = tup\n return x, y, x + y\n\n actual = jacfwd(f)(a)\n desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float16)),\n (np.array(0., dtype=np.float32), np.array(1., dtype=np.float32)),\n (np.array(1., dtype=np.float32), np.array(1., dtype=np.float32)))\n jtu._check_dtypes_match(actual, desired)\n jtu.check_eq(actual, desired)\n\n def test_heterogeneous_jacrev(self):\n # See https://github.com/google/jax/issues/7157\n # See https://github.com/google/jax/issues/7780\n x = np.array([2.0], dtype=np.float16)\n y = np.array([3.0], dtype=np.float32)\n a = (x, y)\n\n def f(tup):\n jtu._check_dtypes_match(tup, a)\n x, y = tup\n return x, y, x + y\n\n actual = jacrev(f)(a)\n desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float32)),\n (np.array(0., dtype=np.float16), np.array(1., dtype=np.float32)),\n (np.array(1., dtype=np.float16), np.array(1., dtype=np.float32)))\n jtu._check_dtypes_match(actual, desired)\n jtu.check_eq(actual, desired)\n\n def test_heterogeneous_grad(self):\n # See https://github.com/google/jax/issues/7157\n x = np.array(1.0+1j)\n y = np.array(2.0)\n a = (x, y)\n\n def f(tup):\n jtu._check_dtypes_match(tup, a)\n x, y = tup\n return jnp.square(jnp.abs(x)) + y\n\n actual = grad(f)(a)\n desired = (np.array(2 - 2j), np.array(1.))\n jtu._check_dtypes_match(actual, desired)\n jtu.check_eq(actual, desired)\n\n def test_complex_input_jacfwd_raises_error(self):\n self.assertRaises(TypeError, lambda: jacfwd(lambda x: jnp.sin(x))(1 + 2j))\n\n def test_legacy_devicearray_repr(self):\n dx = device_put(3.)\n str(dx.item()) # doesn't crash\n\n def test_devicearray_repr(self):\n x = device_put(jnp.zeros(3))\n self.assertIsInstance(x, xla.DeviceArray)\n repr(x) # doesn't crash\n\n x = device_put(jnp.ones(3) + 1j * jnp.ones(3))\n self.assertIsInstance(x, xla.DeviceArray)\n repr(x) # doesn't crash\n\n def test_devicearray_delete(self):\n x = device_put(1.)\n x.delete()\n self.assertRaisesRegex(RuntimeError, \"DeviceArray has been deleted.\",\n lambda: repr(x))\n\n def test_devicearray_block_until_ready(self):\n x = device_put(1.)\n y = x.block_until_ready()\n # Tests mostly that block_until_ready() does not produce an error.\n self.assertTrue(y is x)\n\n def test_devicearray_weakref_friendly(self):\n x = device_put(1.)\n y = weakref.ref(x)\n self.assertEqual(y(), 1.)\n del x\n self.assertIsNone(y())\n\n def test_namedtuple_transparency(self):\n # See https://github.com/google/jax/issues/446\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n\n def f(pt):\n return jnp.sqrt(pt.x ** 2 + pt.y ** 2)\n\n pt = Point(1., 2.)\n\n f(pt) # doesn't crash\n g = api.grad(f)(pt)\n self.assertIsInstance(g, Point)\n\n f_jit = api.jit(f)\n self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False)\n\n def test_namedtuple_subclass_transparency(self):\n # See https://github.com/google/jax/issues/806\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n\n class ZeroPoint(Point):\n def is_zero(self):\n return (self.x == 0) and (self.y == 0)\n\n pt = ZeroPoint(0., 0.)\n\n def f(pt):\n return 0. if pt.is_zero() else jnp.sqrt(pt.x ** 2 + pt.y ** 2)\n\n f(pt) # doesn't crash\n _ = api.grad(f)(pt)\n self.assertIsInstance(pt, ZeroPoint)\n\n @parameterized.parameters(1, 2, 3)\n def test_shape_dtype_struct(self, i):\n s = api.ShapeDtypeStruct(shape=(i, 2, 3), dtype=jnp.float32)\n self.assertEqual(s.shape, (i, 2, 3))\n self.assertEqual(s.dtype, jnp.float32)\n self.assertEqual(s.ndim, 3)\n self.assertEqual(s.size, i * 2 * 3)\n self.assertLen(s, i)\n for f in (str, repr):\n self.assertEqual(\n f(s), \"ShapeDtypeStruct(shape=({}, 2, 3), dtype=float32)\".format(i))\n\n def test_shape_dtype_struct_scalar(self):\n s = api.ShapeDtypeStruct(shape=(), dtype=jnp.float32)\n self.assertEmpty(s.shape)\n self.assertEqual(s.size, 1)\n self.assertEqual(s.ndim, 0)\n with self.assertRaisesRegex(TypeError, \"len[(][)] of unsized object\"):\n _ = len(s)\n\n def test_shape_dtype_struct_hash(self):\n s1 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32)\n s2 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32)\n s3 = api.ShapeDtypeStruct(shape=(2, 4), dtype=jnp.float32)\n self.assertEqual(hash(s1), hash(s2))\n self.assertNotEqual(hash(s1), hash(s3))\n\n def test_eval_shape(self):\n def fun(x, y):\n return jnp.tanh(jnp.dot(x, y) + 3.)\n\n x = jnp.ones((2, 3))\n y = jnp.ones((3, 4))\n out_shape = api.eval_shape(fun, x, y)\n\n self.assertEqual(out_shape.shape, (2, 4))\n\n def test_eval_shape_constants(self):\n def fun():\n x = jnp.ones((2, 3))\n y = jnp.ones((3, 4))\n return jnp.tanh(jnp.dot(x, y) + 3.)\n\n out_shape = api.eval_shape(fun)\n\n self.assertEqual(out_shape.shape, (2, 4))\n\n def test_eval_shape_tuple_unpacking(self):\n def fun(x, y):\n a, b = x\n return a + b + y\n\n x = (jnp.ones(2), jnp.ones(2))\n y = 3.\n out_shape = api.eval_shape(fun, x, y)\n\n self.assertEqual(out_shape.shape, (2,))\n\n def test_eval_shape_tuple_itemgetting(self):\n def fun(x, y):\n return x[0] + x[1] + y\n\n x = (jnp.ones(2), jnp.ones(2))\n y = 3.\n out_shape = api.eval_shape(fun, x, y)\n\n self.assertEqual(out_shape.shape, (2,))\n\n def test_eval_shape_output_dict(self):\n def fun(x, y):\n return {'hi': x[0] + x[1] + y}\n\n x = (jnp.ones(2), jnp.ones(2))\n y = 3.\n out_shape = api.eval_shape(fun, x, y)\n out_shape = tree_util.tree_map(np.shape, out_shape)\n\n self.assertEqual(out_shape, {'hi': (2,)})\n\n def test_eval_shape_shape_error(self):\n def fun(x, y):\n return jnp.tanh(jnp.dot(x, y) + 3.)\n\n x = jnp.ones((3, 3))\n y = jnp.ones((4, 4))\n\n self.assertRaises(TypeError, lambda: api.eval_shape(fun, x, y))\n\n def test_eval_shape_duck_typing(self):\n def fun(A, b, x):\n return jnp.dot(A, x) + b\n\n class MyArgArray(object):\n def __init__(self, shape, dtype):\n self.shape = shape\n self.dtype = np.dtype(dtype)\n\n A = MyArgArray((3, 4), jnp.float32)\n b = MyArgArray((5,), jnp.float32)\n x = MyArgArray((4, 5), jnp.float32)\n out_shape = api.eval_shape(fun, A, b, x)\n\n self.assertEqual(out_shape.shape, (3, 5))\n\n def test_eval_shape_duck_typing2(self):\n # https://github.com/google/jax/issues/5683\n class EasyDict(dict):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__dict__ = self\n\n x = EasyDict(shape=(3,), dtype=np.dtype('float32'))\n out_shape = api.eval_shape(lambda x: x, x) # doesn't crash\n self.assertEqual(out_shape.shape, (3,))\n\n def test_eval_shape_names(self):\n def fun(x, y):\n return lax.psum(x, 'i') + y\n\n class MyArgArray(object):\n def __init__(self, shape, dtype, named_shape):\n self.shape = shape\n self.dtype = jnp.dtype(dtype)\n self.named_shape = named_shape\n\n x = MyArgArray((3, 2), jnp.float32, {'i': 10})\n y = MyArgArray((3, 2), jnp.float32, {'j': 5})\n with core.extend_axis_env('i', 10, None):\n with core.extend_axis_env('j', 5, None):\n out_shape = api.eval_shape(fun, x, y)\n\n self.assertEqual(out_shape.named_shape, {'j': 5})\n\n def test_issue_871(self):\n T = jnp.array([[1., 2.], [3., 4.], [5., 6.]])\n x = jnp.array([1, 2, 3])\n msg = (\"linearized function called on tangent values inconsistent with \"\n \"the original primal values\")\n\n y, f_jvp = api.linearize(jnp.sum, x)\n with self.assertRaisesRegex(ValueError, msg):\n f_jvp(T)\n\n y, f_jvp = api.linearize(api.jit(jnp.sum), x)\n with self.assertRaisesRegex(ValueError, msg):\n f_jvp(T)\n\n def test_grad_of_int_errors(self):\n # Errors without allow_int=True\n dfn = grad(lambda x: x ** 2)\n self.assertRaisesRegex(\n TypeError,\n (r\"grad requires real- or complex-valued inputs \\(input dtype that is a \"\n r\"sub-dtype of np.inexact\\), but got int.*.\"),\n lambda: dfn(3))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_jvp_of_int_identity(self):\n primals = (1,)\n tangents = (np.zeros(shape=(), dtype=float0),)\n\n _, out = api.jvp(lambda x: x, primals, tangents)\n self.assertEqual(out, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_jvp_of_int_add(self):\n primals = (2,)\n tangents = (np.zeros(shape=(), dtype=float0),)\n\n _, out_tangent = api.jvp(lambda x: x+1, primals, tangents)\n self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_jit_jvp_of_int(self):\n primals = (2,)\n tangents = (np.zeros(shape=(), dtype=float0),)\n\n _, out_tangent = api.jvp(jax.jit(lambda x: x+1), primals, tangents)\n self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_vjp_of_int_index(self):\n primal, fn_vjp = api.vjp(lambda x, i: x[i], np.ones(2)*2, 1)\n tangent_x, tangent_i = fn_vjp(1.)\n self.assertEqual(primal, 2.)\n self.assertAllClose(tangent_x, jnp.array([0., 1.]))\n self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_vjp_of_int_shapes(self):\n out, fn_vjp = api.vjp(lambda x: lax.reshape(x, (2, 2)), np.ones((4, 1),\n dtype=int))\n tangent, = fn_vjp(out)\n self.assertArraysEqual(tangent, np.zeros(shape=(4, 1), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_jit_vjp_of_int(self):\n primal, fn_vjp = api.vjp(lambda x, y: x+y, 2, 1)\n tangent_x, tangent_i = jax.jit(fn_vjp)(1)\n self.assertEqual(primal, 3)\n self.assertEqual(tangent_x, np.zeros(shape=(), dtype=float0))\n self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_vjp_of_int_fulllike(self):\n # Regression test for tangent and cotangent mismatch in convert_element_type\n # transpose rule wrt a ConstVar\n f = lax.full_like\n out, vjp = api.vjp(f, np.zeros((2, 2)), 1)\n self.assertAllClose(out, jnp.ones((2, 2)))\n tangent_x, tangent_y = vjp(out)\n self.assertAllClose(tangent_x, jnp.zeros((2, 2)))\n self.assertEqual(tangent_y, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_grad_of_int(self):\n # Need real-valued output, but testing integer input.\n out = api.grad(lambda x: x+0., allow_int=True)(1)\n self.assertEqual(out, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_grad_of_bool(self):\n def cond(pred):\n return lax.cond(pred, lambda _: 1., lambda _: 2., 1.)\n value, grd = api.value_and_grad(cond, allow_int=True)(True)\n self.assertEqual(value, 1.)\n self.assertEqual(grd, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_grad_of_int_index(self):\n grad_x, grad_i = api.grad(lambda x, i: x[i], argnums=(0, 1),\n allow_int=True)(np.ones(2), 1)\n self.assertAllClose(grad_x, jnp.array([0., 1.]))\n self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_jit_grad_of_int(self):\n grad_f = api.grad(lambda x, i: x[i], argnums=(0, 1), allow_int=True)\n grad_x, grad_i = jax.jit(grad_f)(np.ones(2), 1)\n self.assertAllClose(grad_x, jnp.array([0., 1.]))\n self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_float0_reshape(self):\n # dtype-agnostic operations are supported\n float0_array = jax.grad(lambda x: jnp.sum(x+0.),\n allow_int=True)(np.ones((2, 4), dtype=int))\n\n self.assertArraysEqual(float0_array.reshape((4, 2)),\n np.zeros((4, 2), dtype=float0))\n self.assertArraysEqual(float0_array.transpose(),\n np.zeros((4, 2), dtype=float0))\n\n def test_float0_error(self):\n # float0 is incompatible with other dtypes\n float0_array = jax.grad(lambda x: x+0., allow_int=True)(1)\n error_text = \"float0s do not support any operations by design\"\n\n with self.assertRaisesRegex(TypeError, error_text):\n # dispatch via DeviceArray\n _ = float0_array + jnp.zeros(())\n\n with self.assertRaisesRegex(TypeError, error_text):\n # dispatch via lax\n _ = lax.add(float0_array, jnp.zeros(()))\n\n def test_grad_complex_result_errors(self):\n dfn = grad(lambda x: x ** 2 + 1j)\n self.assertRaisesRegex(\n TypeError,\n (r\"grad requires real-valued outputs \\(output dtype that is a \"\n r\"sub-dtype of np.floating\\), but got complex.*\"),\n lambda: dfn(3.))\n\n def test_holomorphic_grad_of_float_errors(self):\n dfn = grad(lambda x: x ** 2, holomorphic=True)\n self.assertRaisesRegex(\n TypeError,\n (r\"grad with holomorphic=True requires inputs with complex dtype, \"\n r\"but got float.*\"),\n lambda: dfn(3.))\n\n def test_holomorphic_jacrev_of_float_errors(self):\n dfn = jacrev(lambda x: x ** 2, holomorphic=True)\n self.assertRaisesRegex(\n TypeError,\n (r\"jacrev with holomorphic=True requires inputs with complex dtype, \"\n r\"but got float.*\"),\n lambda: dfn(3.))\n\n def test_holomorphic_jacfwd_of_float_errors(self):\n dfn = jacfwd(lambda x: x ** 2, holomorphic=True)\n self.assertRaisesRegex(\n TypeError,\n (r\"jacfwd with holomorphic=True requires inputs with complex dtype, \"\n r\"but got float.*\"),\n lambda: dfn(3.))\n\n def test_jacfwd_of_complex_errors(self):\n dfn = jacfwd(lambda x: x ** 2)\n self.assertRaisesRegex(\n TypeError,\n (r\"jacfwd requires real-valued inputs \\(input dtype that is a \"\n r\"sub-dtype of np.floating\\), but got complex.*\"),\n lambda: dfn(3. + 1j))\n\n def test_xla_computation(self):\n # these tests basically check the examples in the xla_computation docstring\n\n def e(x):\n return jnp.sin(jnp.cos(x))\n c = api.xla_computation(e)(2.)\n self.assertIn('cosine', c.as_hlo_text())\n self.assertIn('sine', c.as_hlo_text())\n\n def f(x):\n return x - lax.psum(x, 'i')\n axis_env = [('i', 4)]\n c = api.xla_computation(f, axis_env=axis_env)(2)\n self.assertIn('all-reduce', c.as_hlo_text())\n self.assertIn('replica_groups={{0,1,2,3}}', c.as_hlo_text())\n\n def g(x):\n rowsum = lax.psum(x, 'i')\n colsum = lax.psum(x, 'j')\n allsum = lax.psum(x, ('i', 'j'))\n return rowsum, colsum, allsum\n axis_env = [('i', 4), ('j', 2)]\n c = api.xla_computation(g, axis_env=axis_env)(5.)\n self.assertIn('all-reduce', c.as_hlo_text())\n self.assertIn('replica_groups={{0,2,4,6},{1,3,5,7}}', c.as_hlo_text())\n self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())\n self.assertIn('replica_groups={{0,1,2,3,4,5,6,7}}', c.as_hlo_text())\n\n def h(x):\n rowsum = lax.psum(x, 'i', axis_index_groups=[[0, 1], [2, 3]])\n colsum = lax.psum(x, 'j')\n return rowsum, colsum\n axis_env = [('i', 4), ('j', 2)]\n c = api.xla_computation(h, axis_env=axis_env)(5.)\n self.assertIn('all-reduce', c.as_hlo_text())\n self.assertIn('replica_groups={{0,2},{4,6},{1,3},{5,7}}', c.as_hlo_text())\n self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())\n\n def test_xla_computation_args(self):\n def foo(x, y, z):\n return x + y + z\n\n c = api.xla_computation(foo)(1., 2., 3.)\n self.assertEqual(len(c.program_shape().parameter_shapes()), 3)\n\n c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)\n param_shapes = c.program_shape().parameter_shapes()\n self.assertEqual(len(param_shapes), 1)\n self.assertEqual(param_shapes[0].xla_element_type(),\n xla_client.PrimitiveType.TUPLE)\n\n def test_xla_computation_duck_typing(self):\n def foo(x, y, z):\n return x + y + z\n\n x = jax.ShapeDtypeStruct((), np.float32)\n y = jax.ShapeDtypeStruct((), np.float32)\n z = jax.ShapeDtypeStruct((), np.float32)\n\n c = api.xla_computation(foo)(x, y, z)\n self.assertEqual(len(c.program_shape().parameter_shapes()), 3)\n\n c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)\n param_shapes = c.program_shape().parameter_shapes()\n self.assertEqual(len(param_shapes), 1)\n self.assertEqual(param_shapes[0].xla_element_type(),\n xla_client.PrimitiveType.TUPLE)\n\n def test_staging_out_multi_replica(self):\n def f(x):\n return api.pmap(jnp.mean)(x)\n xla_comp = api.xla_computation(f)\n xla_comp(jnp.arange(8)).as_hlo_text() # doesn't crash\n\n def test_xla_computation_instantiate_constant_outputs(self):\n def f():\n return jnp.zeros((3, 4))\n\n xla_comp = api.xla_computation(f)()\n out_shape, = xla_comp.program_shape().result_shape().tuple_shapes()\n self.assertEqual(out_shape.dimensions(), (3, 4))\n\n def test_xla_computation_static_argnums(self):\n def f(x, y):\n return x + y\n\n xla_comp = api.xla_computation(f, static_argnums=(1,))(2, 3)\n hlo_text = xla_comp.as_hlo_text()\n self.assertIn(\"constant(3)\", hlo_text)\n # The static arguments should be removed from the function being compiled,\n # thus the function should have only a single argument.\n self.assertIn(\"parameter.1\", hlo_text)\n self.assertNotIn(\"parameter.2\", hlo_text)\n\n def test_xla_computation_return_shape(self):\n _, shape_tree = api.xla_computation(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),\n return_shape=True)(np.int32(1))\n expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),\n api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))\n self.assertEqual(shape_tree, expected)\n\n def test_xla_computation_partitioned(self):\n def f(x, y):\n return jnp.dot(x, y) + 1\n\n x = jax.ShapeDtypeStruct((8, 8), np.float32)\n y = jax.ShapeDtypeStruct((8, 16), np.float32)\n xla_comp = api.xla_computation(f, in_parts=(P(2, 2), None),\n out_parts=P(4, 1))(x, y)\n hlo_text = xla_comp.as_hlo_text()\n self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)\n self.assertIn('sharding={replicated}', hlo_text)\n self.assertIn('sharding={{devices=[4,1]0,1,2,3}}', hlo_text)\n\n def test_xla_computation_replicated_and_partitioned(self):\n def f(x, y):\n return jnp.dot(x, y), lax.psum(x, 'i')\n\n x = jax.ShapeDtypeStruct((8, 8), np.float32)\n y = jax.ShapeDtypeStruct((8, 16), np.float32)\n axis_env = [('i', 4)]\n xla_comp = api.xla_computation(f, axis_env=axis_env,\n in_parts=(P(2, 2), None),\n out_parts=(P(4, 1), None))(x, y)\n hlo_text = xla_comp.as_hlo_text()\n self.assertIn('all-reduce', hlo_text)\n self.assertIn('replica_groups={{0,1,2,3}}', hlo_text)\n self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)\n self.assertIn('sharding={replicated}', hlo_text)\n self.assertIn('sharding={{devices=[4,1]0,1,2,3}, {replicated}}', hlo_text)\n\n def test_xla_computation_psum_constant(self):\n f = lambda: jax.lax.psum(1, \"i\")\n api.xla_computation(f, axis_env=[(\"i\", 2)])() # doesn't crash\n\n @jtu.skip_on_devices(\"cpu\", \"gpu\")\n @jtu.ignore_warning(message=\"Some donated buffers were not usable\")\n def test_xla_computation_donate_argnums(self):\n api.xla_computation(lambda x: None, donate_argnums=(0,))(3) # doesn't crash\n\n def test_xla_computation_lower_fun_axis_env(self):\n axis_name = 'i'\n def fn(x):\n y = lax.all_gather(\n x, axis_name=axis_name)\n return y * lax.axis_index(axis_name).astype(jnp.float32)\n\n input_x = jnp.ones((5,6,4))\n axis_env = [(axis_name, api.local_device_count())]\n _ = api.xla_computation(fn, axis_env=axis_env, backend='cpu')(input_x)\n\n def test_xla_computation_axis_env(self):\n def fn(x):\n z = x * jax.lax.axis_index('i').astype(jnp.float32)\n def inner_fn(carry, a):\n return carry + a, ()\n return jax.lax.scan(inner_fn, jnp.zeros_like(z[0]), z)\n\n x = jnp.ones((5, 6, 4))\n _ = jax.xla_computation(fn, axis_env=(('i', 8),), backend='cpu')(x)\n\n def test_concurrent_device_get_and_put(self):\n def f(x):\n for _ in range(100):\n y = jax.device_put(x)\n x = jax.device_get(y)\n return x\n\n xs = [np.random.randn(i) for i in range(10)]\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = [executor.submit(partial(f, x)) for x in xs]\n ys = [f.result() for f in futures]\n for x, y in zip(xs, ys):\n self.assertAllClose(x, y)\n\n def test_dtype_warning(self):\n # cf. issue #1230\n if config.x64_enabled:\n raise unittest.SkipTest(\"test only applies when x64 is disabled\")\n\n def check_warning(warn, nowarn):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n\n nowarn() # get rid of extra startup warning\n\n prev_len = len(w)\n nowarn()\n assert len(w) == prev_len\n\n warn()\n assert len(w) > 0\n msg = str(w[-1].message)\n expected_prefix = \"Explicitly requested dtype \"\n self.assertEqual(expected_prefix, msg[:len(expected_prefix)])\n\n prev_len = len(w)\n nowarn()\n assert len(w) == prev_len\n\n check_warning(lambda: jnp.array([1, 2, 3], dtype=\"float64\"),\n lambda: jnp.array([1, 2, 3], dtype=\"float32\"))\n check_warning(lambda: jnp.array([1, 2, 3], dtype=\"float64\"),\n lambda: jnp.array([1, 2, 3], dtype=float))\n check_warning(lambda: jnp.ones(3, dtype=np.float64),\n lambda: jnp.ones(3))\n check_warning(lambda: jnp.ones(3, dtype=np.float64),\n lambda: jnp.ones(3, dtype=float))\n check_warning(lambda: jnp.ones_like(3, dtype=np.int64),\n lambda: jnp.ones_like(3, dtype=np.int32))\n check_warning(lambda: jnp.zeros(3, dtype=\"int64\"),\n lambda: jnp.zeros(3, dtype=\"int32\"))\n check_warning(lambda: jnp.zeros_like(3, dtype=\"float64\"),\n lambda: jnp.zeros_like(3, dtype=\"float32\"))\n check_warning(lambda: jnp.full((2, 3), 1, dtype=\"int64\"),\n lambda: jnp.full((2, 3), 1))\n check_warning(lambda: jnp.ones(3).astype(\"float64\"),\n lambda: jnp.ones(3).astype(\"float32\"))\n check_warning(lambda: jnp.eye(3, dtype=np.float64),\n lambda: jnp.eye(3))\n check_warning(lambda: jnp.arange(3, dtype=np.float64),\n lambda: jnp.arange(3, dtype=np.float32))\n check_warning(lambda: jnp.linspace(0, 3, dtype=np.float64),\n lambda: jnp.linspace(0, 3, dtype=np.float32))\n check_warning(lambda: jnp.tri(2, dtype=\"float64\"),\n lambda: jnp.tri(2, dtype=\"float32\"))\n check_warning(lambda: jnp.arange(1).astype(\"float64\"),\n lambda: jnp.arange(1).astype(float))\n check_warning(lambda: jnp.arange(1.0).astype(\"int64\"),\n lambda: jnp.arange(1.0).astype(int))\n\n def test_error_for_invalid_dtype(self):\n with self.assertRaisesRegex(TypeError, \".*not a valid JAX array type.*\"):\n lax.add(jnp.array(7), np.array(\"hello\"))\n\n def test_vmap_preserves_docstr(self):\n def superfun(a):\n \"\"\"Does things with stuff.\"\"\"\n pass\n\n self.assertRegex(api.vmap(superfun).__doc__, \"\\n\".join([\n \"Vectorized version of superfun.*\",\n \"\",\n \"Original documentation:\",\n \"\",\n superfun.__doc__,\n ]))\n\n def test_vmap_in_axes_list(self):\n # https://github.com/google/jax/issues/2367\n dictionary = {'a': 5., 'b': jnp.ones(2)}\n x = jnp.zeros(3)\n y = jnp.arange(3.)\n\n\n def f(dct, x, y):\n return dct['a'] + dct['b'] + x + y\n\n out1 = api.vmap(f, (None, 0, 0))(dictionary, x, y)\n out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y)\n self.assertAllClose(out1, out2)\n\n def test_vmap_in_axes_tree_prefix_error(self):\n # https://github.com/google/jax/issues/795\n value_tree = jnp.ones(3)\n self.assertRaisesRegex(\n ValueError,\n \"vmap in_axes specification must be a tree prefix of the corresponding \"\n r\"value, got specification \\(0, 0\\) for value tree \"\n + re.escape(f\"{tree_util.tree_structure((value_tree,))}.\"),\n lambda: api.vmap(lambda x: x, in_axes=(0, 0))(value_tree)\n )\n\n def test_vmap_in_axes_leaf_types(self):\n with self.assertRaisesRegex(\n TypeError, r\"vmap in_axes must be an int, None, or .*\"):\n api.vmap(lambda x: x, in_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))\n\n def test_vmap_out_axes_leaf_types(self):\n with self.assertRaisesRegex(\n TypeError, r\"vmap out_axes must be an int, None, or .*\"):\n api.vmap(lambda x: x, out_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))\n\n def test_vmap_unbatched_object_passthrough_issue_183(self):\n # https://github.com/google/jax/issues/183\n fun = lambda f, x: f(x)\n vfun = api.vmap(fun, (None, 0))\n ans = vfun(lambda x: x + 1, jnp.arange(3))\n self.assertAllClose(ans, np.arange(1, 4), check_dtypes=False)\n\n def test_vmap_mismatched_axis_sizes_error_message_issue_705(self):\n # https://github.com/google/jax/issues/705\n def h(a, b):\n return jnp.sum(a) + jnp.sum(b)\n\n X = np.random.randn(10, 4)\n U = np.random.randn(10, 2)\n\n with self.assertRaisesRegex(\n ValueError,\n \"vmap got inconsistent sizes for array axes to be mapped:\\n\"\n r\"arg 0 has shape \\(10, 4\\) and axis 0 is to be mapped\" \"\\n\"\n r\"arg 1 has shape \\(10, 2\\) and axis 1 is to be mapped\" \"\\n\"\n \"so\\n\"\n \"arg 0 has an axis to be mapped of size 10\\n\"\n \"arg 1 has an axis to be mapped of size 2\"):\n api.vmap(h, in_axes=(0, 1))(X, U)\n\n with self.assertRaisesRegex(\n ValueError,\n \"vmap got inconsistent sizes for array axes to be mapped:\\n\"\n r\"arg 0 has shape \\(10, 4\\) and axis 0 is to be mapped\" \"\\n\"\n r\"arg 1 has shape \\(10, 2\\) and axis 1 is to be mapped\" \"\\n\"\n r\"arg 2 has shape \\(10, 4\\) and axis 0 is to be mapped\" \"\\n\"\n \"so\\n\"\n \"args 0, 2 have axes to be mapped of size 10\\n\"\n \"arg 1 has an axis to be mapped of size 2\"):\n api.vmap(lambda x, y, z: None, in_axes=(0, 1, 0))(X, U, X)\n\n with self.assertRaisesRegex(\n ValueError,\n \"vmap got inconsistent sizes for array axes to be mapped:\\n\"\n \"the tree of axis sizes is:\\n\"\n r\"\\(10, \\[2, 2\\]\\)\"):\n api.vmap(h, in_axes=(0, 1))(X, [U, U])\n\n error = (r\"vmap was requested to map its argument along axis 0, which \"\n r\"implies that its rank should be at least 1, but is only 0 \"\n r\"\\(its shape is \\(\\)\\)\")\n with self.assertRaisesRegex(ValueError, error):\n # The mapped inputs cannot be scalars\n api.vmap(lambda x: x)(1.)\n\n with self.assertRaisesRegex(\n ValueError, \"vmap must have at least one non-None value in in_axes\"):\n # If the output is mapped, there must be a non-None in_axes\n api.vmap(lambda x: x, in_axes=None)(jnp.array([1., 2.]))\n\n error = (r\"vmap was requested to map its argument along axis 1, which \"\n r\"implies that its rank should be at least 2, but is only 1 \"\n r\"\\(its shape is \\(2,\\)\\)\")\n with self.assertRaisesRegex(ValueError, error):\n api.vmap(lambda x: x, in_axes=1)(jnp.array([1., 2.]))\n\n # Error is: TypeError: only integer scalar arrays can be converted to a scalar index\n with self.assertRaisesRegex(\n ValueError,\n \"vmap out_axes specification must be a tree prefix of the \"\n \"corresponding value.*\"):\n api.vmap(lambda x: x, in_axes=0, out_axes=(2, 3))(jnp.array([1., 2.]))\n\n with self.assertRaisesRegex(\n ValueError,\n r\"vmap has mapped output \\(axis_name=foo\\) but out_axes is None\"):\n # If the output is mapped (user-named axis), then there must be some\n # out_axes specified.\n api.vmap(lambda x: x, out_axes=None, axis_name=\"foo\")(jnp.array([1., 2.]))\n\n with self.assertRaisesRegex(\n ValueError,\n \"vmap has mapped output but out_axes is None\"):\n # If the output is mapped (unnamed axis), then there must be some out_axes\n # specified.\n api.vmap(lambda x: x, out_axes=None)(jnp.array([1., 2.]))\n\n def test_vmap_structured_in_axes(self):\n\n A, B, C, D = 2, 3, 4, 5\n K = 6 # batch size\n x = np.ones((K, A, B)) # batch axis in different locations\n y = np.ones((B, K, C))\n z = np.ones((C, D, K))\n\n def foo(tree_arg):\n x, (y, z) = tree_arg\n return jnp.dot(x, jnp.dot(y, z))\n\n tree = (x, (y, z))\n vfoo = api.vmap(foo, in_axes=((0, (1, 2)),))\n self.assertEqual(vfoo(tree).shape, (6, 2, 5))\n\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n tree = (x, Point(y, z))\n vfoo = api.vmap(foo, in_axes=((0, Point(1, 2)),))\n self.assertEqual(vfoo(tree).shape, (6, 2, 5))\n\n def foo(tree_arg):\n x, dct = tree_arg\n y, z = dct['a'], dct['b']\n return jnp.dot(x, jnp.dot(y, z))\n\n tree = (x, {'a': y, 'b': z})\n vfoo = api.vmap(foo, in_axes=((0, {'a': 1, 'b': 2}),))\n self.assertEqual(vfoo(tree).shape, (6, 2, 5))\n\n tree = (x, collections.OrderedDict([('a', y), ('b', z)]))\n vfoo = api.vmap(\n foo, in_axes=((0, collections.OrderedDict([('a', 1), ('b', 2)])),))\n self.assertEqual(vfoo(tree).shape, (6, 2, 5))\n\n def test_vmap_in_axes_bool_error(self):\n # https://github.com/google/jax/issues/6372\n with self.assertRaisesRegex(TypeError, \"must be an int\"):\n api.vmap(lambda x: x, in_axes=False)(jnp.zeros(3))\n\n def test_pmap_in_axes_bool_error(self):\n # https://github.com/google/jax/issues/6372\n with self.assertRaisesRegex(TypeError, \"must be an int\"):\n api.pmap(lambda x: x, in_axes=False)(jnp.zeros(1))\n\n def test_pmap_global_cache(self):\n def f(x, y):\n return x, y\n\n x = np.ones((1, 1, 1))\n\n # All defaults\n with jtu.assert_num_jit_and_pmap_compilations(1):\n for _ in range(2):\n api.pmap(f)(x, x)\n\n # With axis name\n with jtu.assert_num_jit_and_pmap_compilations(1):\n for _ in range(2):\n api.pmap(f, 'i')(x, x)\n\n # With in_axes and out_axes\n for x_in, y_in, x_out, y_out in it.product(*((0, 1, 2) for _ in range(4))):\n with jtu.assert_num_jit_and_pmap_compilations(1):\n for _ in range(2):\n api.pmap(f, 'i', in_axes=(x_in, y_in), out_axes=(x_out, y_out))(x, x)\n\n # Forward-mode AD on the outside\n with jtu.assert_num_jit_and_pmap_compilations(1):\n for _ in range(2):\n api.jvp(api.pmap(f), (x, x), (x, x))\n\n # Reverse-mode AD on the outside. One compilation for forward, one for backward.\n with jtu.assert_num_jit_and_pmap_compilations(2):\n for _ in range(2):\n api.vjp(api.pmap(f), x, x)[1]((x, x))\n\n def test_device_array_repr(self):\n rep = jnp.ones(()) + 1.\n self.assertStartsWith(repr(rep), \"DeviceArray\")\n\n def test_device_array_hash(self):\n rep = jnp.ones(()) + 1.\n self.assertIsInstance(rep, jax.interpreters.xla.DeviceArray)\n self.assertNotIsInstance(rep, collections.abc.Hashable)\n with self.assertRaisesRegex(TypeError, 'unhashable type'):\n hash(rep)\n\n def test_grad_without_enough_args_error_message(self):\n # https://github.com/google/jax/issues/1696\n def f(x, y): return x + y\n df = api.grad(f, argnums=0)\n self.assertRaisesRegex(\n TypeError,\n \"differentiating with respect to argnums=0 requires at least 1 \"\n \"positional arguments to be passed by the caller, but got only 0 \"\n \"positional arguments.\",\n lambda: partial(df, x=0.)(y=1.))\n\n def test_grad_of_jit_compilation_caching(self):\n if not hasattr(self, \"assertLogs\"):\n raise unittest.SkipTest(\"test requires assertLogs (python 3)\")\n\n lax.add(1, 2) # make sure some initial warnings are already printed\n\n sin = api.jit(jnp.sin)\n\n prev_level = logging.get_verbosity()\n try:\n logging.set_verbosity('DEBUG')\n with self.assertLogs(level=logging.DEBUG) as l:\n ans1 = api.grad(sin)(2.)\n ans2 = api.grad(sin)(3.)\n finally:\n logging.set_verbosity(prev_level)\n self.assertLen(l.output, 2)\n\n self.assertAllClose(ans1, np.cos(2.), check_dtypes=False)\n self.assertAllClose(ans2, np.cos(3.), check_dtypes=False)\n\n def test_grad_of_jit_compilation_caching2(self):\n # Like the above test, but instead of logging use our compile counters.\n @api.jit\n def f(x):\n return jnp.sin(x)\n\n with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841\n _ = jax.grad(f)(3.)\n self.assertEqual(count[0], 2) # one for fwd, one for bwd\n\n with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841\n _ = jax.grad(f)(3.)\n _ = jax.grad(f)(4.)\n self.assertEqual(count[0], 0) # cache hits on both fwd and bwd\n\n def test_grad_does_not_unflatten_tree_with_none(self):\n # https://github.com/google/jax/issues/7546\n class CustomNode(list):\n pass\n\n def unflatten(unused_aux_data, children):\n self.assertIsNotNone(children[0])\n return CustomNode(children)\n\n tree_util.register_pytree_node(CustomNode, lambda x: (x, None), unflatten)\n grad(lambda x: x[0])(CustomNode([0.]))\n\n def test_trivial_computations(self):\n x = jnp.array([1, 2, 3])\n y = api.jit(lambda x: x)(x)\n self.assertIs(x, y)\n\n z1, z2 = api.jit(lambda x: (x, x))(x)\n self.assertIs(z1, z2)\n\n x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])\n z1, z2, z3 = api.jit(lambda x, y: (y, 1, x))(x1, x2)\n self.assertIs(z1, x2)\n self.assertIs(z3, x1)\n self.assertEqual(z2, 1)\n\n def test_nested_jit_hoisting(self):\n @api.jit\n def f(x, y):\n z = 2 * x\n return y + z, 3\n\n @api.jit\n def g(x):\n return f(2, x)\n\n jaxpr_subcomp = xla.jaxpr_subcomp\n\n jaxprs = []\n def jaxpr_subcomp_and_collect(c, jaxpr, *args, **kwargs):\n jaxprs.append(jaxpr)\n return jaxpr_subcomp(c, jaxpr, *args, **kwargs)\n\n try:\n xla.jaxpr_subcomp = jaxpr_subcomp_and_collect\n ans = g(3)\n finally:\n xla.jaxpr_subcomp = jaxpr_subcomp\n\n self.assertEqual(ans, (7, 3))\n self.assertLen(jaxprs, 2)\n outer_jaxpr, inner_jaxpr = jaxprs\n\n self.assertLen(outer_jaxpr.eqns, 1)\n self.assertEqual(outer_jaxpr.eqns[0].primitive.name, 'xla_call')\n subjaxpr_1 = outer_jaxpr.eqns[0].params[\"call_jaxpr\"]\n self.assertEqual(str(subjaxpr_1), str(inner_jaxpr))\n self.assertLen(inner_jaxpr.eqns, 2)\n self.assertEqual(inner_jaxpr.eqns[-2].primitive.name, 'mul')\n self.assertEqual(inner_jaxpr.eqns[-1].primitive.name, 'add')\n\n def test_primitive_compilation_cache(self):\n with jtu.count_primitive_compiles() as count:\n lax.add(1, 2)\n lax.add(2, 3)\n self.assertEqual(count[0], 1)\n\n def test_arange_jit(self):\n # see https://github.com/google/jax/issues/553\n def fun(x):\n r = jnp.arange(x.shape[0])[x]\n return r\n\n jit(fun)(jnp.array([0, 1, 2], dtype=jnp.int32)) # doesn't crash\n\n def helper_save_tracer(self, x):\n self._saved_tracer = x\n return x\n\n def test_escaped_tracers_different_top_level_traces(self):\n api.jit(self.helper_save_tracer)(0.)\n with self.assertRaisesRegex(\n UnexpectedTracerError, \"Encountered an unexpected tracer\"):\n api.jit(lambda x: self._saved_tracer)(0.)\n\n def test_escaped_tracers_cant_lift_sublevels(self):\n api.jit(self.helper_save_tracer)(0.)\n with self.assertRaisesRegex(\n UnexpectedTracerError,\n re.compile(\n \"Encountered an unexpected tracer\",\n re.DOTALL)):\n api.jit(lambda x: x)(self._saved_tracer)\n\n def test_escaped_tracers_tracer_from_higher_level(self):\n api.grad(self.helper_save_tracer)(0.)\n with self.assertRaisesRegex(\n UnexpectedTracerError,\n re.compile(\n \"Encountered an unexpected tracer.*Tracer from a higher level\",\n re.DOTALL)):\n api.grad(lambda x: x)(self._saved_tracer)\n\n def test_escaped_tracers_incompatible_sublevel(self):\n def func1(x):\n api.jit(self.helper_save_tracer)(0.)\n # Use the tracer\n return x + self._saved_tracer\n with self.assertRaisesRegex(\n UnexpectedTracerError,\n re.compile(\"Encountered an unexpected tracer\",\n re.DOTALL)):\n api.jit(func1)(2.)\n\n def test_escaped_tracers_cant_lift(self):\n def func1(x):\n api.grad(self.helper_save_tracer)(0.)\n return x + self._saved_tracer\n with self.assertRaisesRegex(\n UnexpectedTracerError,\n re.compile(\"Encountered an unexpected tracer.*Can't lift\",\n re.DOTALL)):\n api.grad(func1)(2.)\n\n def test_escaped_tracers_not_among_input_tracers(self):\n def func1(x):\n api.grad(self.helper_save_tracer)(x)\n # Use the tracer\n return x + self._saved_tracer\n\n with self.assertRaisesRegex(\n UnexpectedTracerError,\n re.compile(\n \"Encountered an unexpected tracer.*Tracer not among input tracers\",\n re.DOTALL)):\n api.jit(func1)(2.)\n\n def test_escaped_tracer_omnistaging(self):\n count = 1\n\n @jit\n def f():\n nonlocal count\n count = jnp.add(count, 1)\n f() # leaked a tracer! but currently undetected\n\n def f(x, c):\n jnp.add(count, 1)\n return None, None\n\n @jit\n def g():\n lax.scan(f, None, None, length=2)\n\n with self.assertRaisesRegex(UnexpectedTracerError,\n \"was created on line\"):\n g()\n\n def test_escaped_tracer_omnistaging_top_trace(self):\n count = 1\n\n def f(_, __):\n nonlocal count\n count = jnp.add(count, 1)\n return None, None\n\n lax.scan(f, None, None, length=2) # leaked a tracer! (of level 1!)\n\n with self.assertRaisesRegex(UnexpectedTracerError,\n \"was created on line\"):\n # The following call will try and raise the ones array to the count tracer\n # level, which is no longer live.\n jax.jit(jnp.add)(jnp.ones(()), count)\n\n def test_escaped_tracer_transform_name(self):\n with self.assertRaisesRegex(UnexpectedTracerError,\n \"for jit\"):\n jax.jit(self.helper_save_tracer)(1)\n _ = self._saved_tracer+1\n\n with self.assertRaisesRegex(UnexpectedTracerError,\n \"for pmap\"):\n jax.pmap(self.helper_save_tracer)(jnp.ones((1, 2)))\n _ = self._saved_tracer+1\n\n with self.assertRaisesRegex(UnexpectedTracerError,\n \"for eval_shape\"):\n jax.eval_shape(self.helper_save_tracer, 1)\n _ = self._saved_tracer+1\n\n def test_escaped_tracer_shape_dtype(self):\n with self.assertRaisesRegex(core.UnexpectedTracerError,\n r\"shape \\(4, 3\\) and dtype int32\"):\n jax.jit(self.helper_save_tracer)(jnp.ones((4, 3), dtype=jnp.int32))\n _ = self._saved_tracer+1\n\n def test_pmap_static_kwarg_error_message(self):\n # https://github.com/google/jax/issues/3007\n def f(a, b):\n return a + b\n\n g = jax.pmap(f, static_broadcasted_argnums=(1,))\n\n msg = (r\"pmapped function has static_broadcasted_argnums=\\(1,\\) but was \"\n r\"called with only 1 positional argument. All static broadcasted \"\n r\"arguments must be passed positionally.\")\n with self.assertRaisesRegex(ValueError, msg):\n g(jnp.ones((1, 1)), b=1)\n\n def test_vmap_unmapped_last(self):\n @partial(jax.vmap, out_axes=-1)\n def f(x):\n return np.zeros((2,))\n f(np.zeros((5,)))\n\n # TODO(jakevdp): re-enable this if possible.\n @unittest.skipIf(True, \"broken by convert_element_type change.\")\n def test_xla_constant_dedup(self):\n y = np.array([7, 14], dtype=np.float32)\n def f(x):\n return x + y + y\n\n x = np.array([1, 2], dtype=np.float32)\n hlo_lines = jax.xla_computation(f)(x).as_hlo_text().split('\\n')\n hlo_lines = set([s.strip() for s in hlo_lines])\n self.assertIn('constant.1 = f32[2]{0} constant({7, 14})', hlo_lines)\n self.assertNotIn('constant.2 = f32[2]{0} constant({7, 14})', hlo_lines)\n\n def test_eval_context(self):\n @jit\n def f():\n with core.eval_context():\n assert jnp.add(1, 1) == 2\n\n f() # doesn't crash\n\n def test_concrete_error_because_arg_unary(self):\n @jax.jit\n def f(x):\n if x > 0:\n return x\n else:\n return 0\n\n msg = r\"on the value of the argument 'x'\"\n with self.assertRaisesRegex(core.ConcretizationTypeError, msg):\n f(1)\n\n def test_concrete_error_because_arg_binary(self):\n @jax.jit\n def f(x, y):\n if x > y:\n return x\n else:\n return y\n\n msg = r\"on the values of the arguments 'x' and 'y'\"\n with self.assertRaisesRegex(core.ConcretizationTypeError, msg):\n f(1, 2)\n\n def test_concrete_error_because_arg_ternary(self):\n @jax.jit\n def f(x, y, z):\n if x > z:\n return x\n else:\n return y\n\n msg = r\"on the values of the arguments 'x' and 'z'\"\n with self.assertRaisesRegex(core.ConcretizationTypeError, msg):\n f(1, 2, 3)\n\n with self.assertRaisesRegex(core.ConcretizationTypeError, msg):\n f(1, 2, z=3)\n\n with self.assertRaisesRegex(core.ConcretizationTypeError, msg):\n f(1, y=2, z=3)\n\n def test_concrete_error_because_arg_varargs(self):\n @jax.jit\n def f(*args):\n x, y, z = args\n if x > z:\n return x\n else:\n return y\n\n msg = r\"on the values of the argument 'args'\"\n with self.assertRaisesRegex(core.ConcretizationTypeError, msg):\n f(1, 2, 3)\n\n def test_concrete_error_because_arg_kwargs(self):\n @jax.jit\n def f(**kwargs):\n x, y, z = kwargs['x'], kwargs['y'], kwargs['z']\n if x > z:\n return x\n else:\n return y\n\n msg = r\"on the values of the argument 'kwargs'\"\n with self.assertRaisesRegex(core.ConcretizationTypeError, msg):\n f(x=1, y=2, z=3)\n\n def test_concrete_error_because_arg_pytree(self):\n @jax.jit\n def f(xy, z):\n x, y = xy\n if x > 0:\n return x\n else:\n return y\n\n msg = r\"on the value of the argument 'xy'\"\n with self.assertRaisesRegex(core.ConcretizationTypeError, msg):\n f((1, 2), z=3)\n\n def test_concrete_error_because_const(self):\n @jax.jit\n def f():\n assert jnp.add(1, 1) > 0\n\n msg = \"on these lines\"\n with self.assertRaisesRegex(core.ConcretizationTypeError, msg):\n f()\n\n def test_xla_computation_zeros_doesnt_device_put(self):\n with jtu.count_device_put() as count:\n api.xla_computation(lambda: jnp.zeros(3))()\n self.assertEqual(count[0], 0)\n\n def test_join_concrete_arrays_with_omnistaging(self):\n # https://github.com/google/jax/issues/4622\n x = jnp.array([1., 2., 3.])\n y = jnp.array([1., 2., 4.])\n\n @jit\n def f():\n core.lattice_join(core.ConcreteArray(x), core.ConcreteArray(y))\n\n f() # doesn't crash\n\n def test_linearize_aval_error(self):\n # https://github.com/google/jax/issues/4622\n f = lambda x: x\n\n # these should not error\n _, f_jvp = api.linearize(f, 1.)\n f_jvp(1.)\n _, f_jvp = api.linearize(f, np.ones(2, np.int32))\n f_jvp(np.zeros(2, float0))\n\n # these should error\n _, f_jvp = api.linearize(f, 1.)\n with self.assertRaisesRegex(ValueError, \"tangent values inconsistent\"):\n f_jvp(1)\n _, f_jvp = api.linearize(f, np.ones(2, np.int32))\n with self.assertRaisesRegex(ValueError, \"tangent values inconsistent\"):\n f_jvp(np.ones(2, np.int32))\n\n def test_grad_of_token_consuming_primitive(self):\n # https://github.com/google/jax/issues/5463\n tokentest_p = core.Primitive(\"tokentest\")\n tokentest_p.def_impl(partial(xla.apply_primitive, tokentest_p))\n tokentest_p.def_abstract_eval(lambda x, y: x)\n xla.translations[tokentest_p] = lambda c, x, y: x\n ad.defjvp(tokentest_p, (lambda g, x, token: x), None)\n\n token = jax.lax.create_token(123)\n arr = jnp.ones((3, 2))\n res, vjp_fun = jax.vjp(lambda x: tokentest_p.bind(x, token), arr)\n # Should not crash.\n vjp_fun(arr)\n\n def test_jit_returning_token(self):\n x = jax.jit(jax.lax.create_token)(1.0)\n self.assertIsInstance(x, jax.interpreters.xla.Token)\n\n def test_leak_checker_catches_a_jit_leak(self):\n with jax.checking_leaks():\n lst = []\n\n @jit\n def f(x):\n lst.append(x)\n return x\n\n with self.assertRaisesRegex(Exception, r\"Leaked\"):\n f(3)\n\n def test_leak_checker_catches_a_pmap_leak(self):\n with jax.checking_leaks():\n lst = []\n\n @api.pmap\n def f(x):\n lst.append(x)\n return x\n\n with self.assertRaisesRegex(Exception, r\"Leaked\"):\n f(np.ones(1))\n\n def test_leak_checker_catches_a_grad_leak(self):\n with jax.checking_leaks():\n lst = []\n\n def f(x):\n lst.append(x)\n return x\n\n with self.assertRaisesRegex(Exception, r\"Leaked trace\"):\n api.grad(f)(3.)\n\n def test_leak_checker_avoids_false_positives(self):\n with jax.checking_leaks():\n @jit\n def f(x):\n return x\n f(3) # doesn't crash\n api.vmap(f)(np.arange(3)) # doesn't crash\n api.grad(f)(3.) # doesn't crash\n\n @api.pmap\n def f(x):\n return x\n f(np.ones(1)) # doesn't crash\n api.vmap(f)(np.ones((1, 1))) # doesn't crash\n\n def test_leak_checker_catches_a_scan_leak(self):\n with jax.checking_leaks():\n lst = []\n\n to_scan = lambda c, x: (lst.append(c) or jnp.sin(c), None)\n\n with self.assertRaisesRegex(Exception, r\"Leaked trace\"):\n lax.scan(to_scan, 1., np.arange(3.))\n\n def test_leak_checker_avoids_false_positives_scan(self):\n with jax.checking_leaks():\n to_scan = lambda c, x: (jnp.sin(c), None)\n lax.scan(to_scan, 1., np.arange(3.)) # doesn't crash\n\n def test_leak_checker_avoids_false_positives_scan_jvp(self):\n with jax.checking_leaks():\n to_scan = lambda c, x: (c, None)\n\n def f(x):\n lax.scan(to_scan, x, None, length=1)\n api.jvp(f, (3.,), (1.,)) # doesn't crash\n\n def test_leak_checker_avoids_false_positives_scan_vmap(self):\n with jax.checking_leaks():\n to_scan = lambda c, _: (1., None)\n\n @api.vmap\n def f(x):\n lax.scan(to_scan, x, None, length=1)\n f(np.arange(5.)) # doesn't crash\n\n def test_leak_checker_avoids_false_positives_scan_vmap_2(self):\n with jax.checking_leaks():\n to_scan = lambda c, _: (c, None)\n\n @api.vmap\n def f(x):\n lax.scan(to_scan, x, None, length=1)\n f(np.arange(5.)) # doesn't crash\n\n def test_leak_checker_catches_a_sublevel_leak(self):\n with jax.checking_leaks():\n @jit\n def f(x):\n lst = []\n @jit\n def g(x):\n lst.append(x)\n return x\n\n x = g(x)\n return x\n\n with self.assertRaisesRegex(Exception, r\"Leaked sublevel\"):\n f(3)\n\n def test_leak_checker_avoids_false_positive_custom_jvp(self):\n # see https://github.com/google/jax/issues/5636\n with jax.checking_leaks():\n @api.custom_jvp\n def t(y):\n return y\n\n def t_jvp(p, t):\n pass\n\n t.defjvp(t_jvp)\n\n @jit\n def s(y):\n return t(y)\n s(3) # doesn't crash\n\n def test_default_backend(self):\n first_local_device = api.local_devices()[0]\n self.assertEqual(first_local_device.platform, api.default_backend())\n\n def test_dunder_jax_array(self):\n # https://github.com/google/jax/pull/4725\n\n class AlexArray:\n def __init__(self, jax_val):\n self.jax_val = jax_val\n def __jax_array__(self):\n return self.jax_val\n dtype = property(lambda self: self.jax_val.dtype)\n shape = property(lambda self: self.jax_val.shape)\n\n x = AlexArray(jnp.array([1., 2., 3.]))\n y = jnp.sin(x)\n self.assertAllClose(y, jnp.sin(jnp.array([1., 2., 3.])))\n y = api.grad(api.jit(lambda x: jnp.sin(x).sum()))(x)\n self.assertAllClose(y, jnp.cos(jnp.array([1., 2., 3.])))\n\n x = AlexArray(jnp.array([[1., 2., 3.]]))\n y = api.pmap(jnp.sin)(x)\n self.assertAllClose(y, jnp.sin(jnp.array([[1., 2., 3.]])))\n\n x = jnp.array(1)\n a = AlexArray(x)\n for f in [jnp.isscalar, jnp.size, jnp.shape, jnp.dtype]:\n self.assertEqual(f(x), f(a))\n\n def test_constant_handler_mro(self):\n # https://github.com/google/jax/issues/6129\n\n class Foo(enum.IntEnum):\n bar = 1\n\n @api.pmap\n def f(_):\n return Foo.bar\n\n ans = f(jnp.arange(1)) # doesn't crash\n expected = jnp.arange(1) + 1\n self.assertAllClose(ans, expected)\n\n def test_large_python_ints(self):\n with self.assertRaises(OverflowError):\n jnp.multiply(2 ** 100, 3.)\n\n out = lax.convert_element_type(2 ** 100, jnp.float32) # doesn't crash\n self.assertArraysEqual(out, np.float32(2 ** 100))\n\n def test_dot_precision_context_manager(self):\n x = jnp.zeros((2, 2))\n\n with jax.default_matmul_precision(None):\n jnp.dot(x, x) # doesn't crash\n jaxpr = jax.make_jaxpr(jnp.dot)(x, x)\n self.assertIn('precision=None', str(jaxpr))\n\n with jax.default_matmul_precision(\"bfloat16\"):\n x @ x # doesn't crash\n jaxpr = jax.make_jaxpr(op.matmul)(x, x)\n self.assertIn('Precision.DEFAULT', str(jaxpr))\n\n with jax.default_matmul_precision(\"tensorfloat32\"):\n jnp.dot(x, x) # doesn't crash\n jaxpr = jax.make_jaxpr(jnp.dot)(x, x)\n self.assertIn('Precision.HIGH', str(jaxpr))\n\n with jax.default_matmul_precision(\"float32\"):\n jnp.dot(x, x) # doesn't crash\n jaxpr = jax.make_jaxpr(jnp.dot)(x, x)\n self.assertIn('Precision.HIGHEST', str(jaxpr))\n\n dot = partial(jnp.dot, precision=lax.Precision.HIGHEST)\n with jax.default_matmul_precision(\"tensorfloat32\"):\n dot(x, x) # doesn't crash\n jaxpr = jax.make_jaxpr(dot)(x, x)\n self.assertIn('Precision.HIGHEST', str(jaxpr))\n\n def test_dot_precision_flag(self):\n x = jnp.zeros((2, 2))\n\n prev_val = config._read(\"jax_default_matmul_precision\")\n try:\n config.FLAGS.jax_default_matmul_precision = \"tensorfloat32\"\n jnp.dot(x, x) # doesn't crash\n jaxpr = jax.make_jaxpr(jnp.dot)(x, x)\n finally:\n config.FLAGS.jax_default_matmul_precision = prev_val\n self.assertIn('Precision.HIGH', str(jaxpr))\n self.assertEqual(prev_val, config._read(\"jax_default_matmul_precision\"))\n\n prev_val = config._read(\"jax_default_matmul_precision\")\n try:\n config.update('jax_default_matmul_precision','tensorfloat32')\n jnp.dot(x, x) # doesn't crash\n jaxpr = jax.make_jaxpr(jnp.dot)(x, x)\n finally:\n config.update('jax_default_matmul_precision', prev_val)\n self.assertIn('Precision.HIGH', str(jaxpr))\n self.assertEqual(prev_val, config._read(\"jax_default_matmul_precision\"))\n\n def test_dot_precision_forces_retrace(self):\n num_traces = 0\n\n def g(x):\n nonlocal num_traces\n num_traces += 1\n return jnp.dot(x, x)\n def f_cond(x):\n return lax.cond(True, g, g, x)\n\n @jax.jit\n def f_jit(x):\n nonlocal num_traces\n num_traces += 1\n return jnp.dot(x, x)\n\n for f in [f_jit, f_cond]:\n precision = config.jax_default_matmul_precision\n try:\n num_traces = 0\n x = jnp.zeros((2, 2))\n f(x)\n self.assertEqual(num_traces, 1)\n f(x)\n self.assertEqual(num_traces, 1)\n with jax.default_matmul_precision(\"tensorfloat32\"):\n f(x)\n self.assertEqual(num_traces, 2)\n FLAGS.jax_default_matmul_precision = \"float32\"\n f(x)\n self.assertGreaterEqual(num_traces, 2)\n nt = num_traces\n f(x)\n self.assertEqual(num_traces, nt + 1)\n f(x)\n self.assertEqual(num_traces, nt + 1)\n finally:\n FLAGS.jax_default_matmul_precision = precision\n\n def test_rank_promotion_forces_retrace(self):\n num_traces = 0\n\n def g(x):\n nonlocal num_traces\n num_traces += 1\n return x + x\n def f_cond(x):\n return lax.cond(True, g, g, x)\n\n @jax.jit\n def f_jit(x):\n nonlocal num_traces\n num_traces += 1\n return x + x\n\n for f in [f_jit, f_cond]:\n allow_promotion = config.jax_numpy_rank_promotion\n try:\n num_traces = 0\n @jax.jit\n def f(x):\n nonlocal num_traces\n num_traces += 1\n return x + x\n x = jnp.zeros((2, 2))\n f(x)\n self.assertEqual(num_traces, 1)\n f(x)\n self.assertEqual(num_traces, 1)\n with jax.numpy_rank_promotion(\"warn\"):\n f(x)\n self.assertEqual(num_traces, 2)\n FLAGS.jax_numpy_rank_promotion = \"raise\"\n f(x)\n self.assertGreaterEqual(num_traces, 2)\n nt = num_traces\n f(x)\n self.assertEqual(num_traces, nt + 1)\n f(x)\n self.assertEqual(num_traces, nt + 1)\n finally:\n FLAGS.jax_numpy_rank_promotion = allow_promotion\n\n def test_backward_pass_ref_dropping(self):\n refs = []\n\n @api.custom_vjp\n def f(x):\n return x\n def f_fwd(x):\n return x, None\n def f_rev(_, g):\n assert len(refs) != 2 or refs[0]() is None\n zero = np.zeros(())\n refs.append(weakref.ref(zero))\n return (zero,)\n f.defvjp(f_fwd, f_rev)\n\n api.grad(lambda x: f(f(f(x))))(1.)\n\n def test_custom_vjp_scan_batching_edge_case(self):\n # https://github.com/google/jax/issues/5832\n @jax.custom_vjp\n def mul(x, coeff): return x * coeff\n def mul_fwd(x, coeff): return mul(x, coeff), (x, coeff)\n def mul_bwd(res, g):\n x, coeff = res\n g_x = g * coeff\n g_coeff = (x * g).sum()\n return g_x, g_coeff\n mul.defvjp(mul_fwd, mul_bwd)\n\n def scan_over_mul(x, coeff):\n def f_(x, t):\n return mul(x, coeff), None\n y, _ = jax.lax.scan(f_, x, jnp.arange(3))\n return y\n\n key = jax.random.PRNGKey(0)\n key1, key2 = jax.random.split(key, 2)\n x_batch = jax.random.normal(key1, (3, 2))\n covector_batch = jax.random.normal(key2, (3, 2))\n coeff = jnp.array(1.)\n\n batched_scan_over_mul = jax.vmap(scan_over_mul, in_axes=(0, None), out_axes=0)\n res, vjp_fun = jax.vjp(batched_scan_over_mul, x_batch, coeff)\n vjp_fun(covector_batch) # doesn't crash\n\n jtu.check_grads(batched_scan_over_mul, (x_batch, coeff), order=2,\n modes=['rev'])\n\n def test_jit_inline(self):\n @partial(api.jit, inline=False)\n def f(x):\n return x * 2\n\n jaxpr = api.make_jaxpr(f)(3)\n self.assertIn('xla_call', str(jaxpr))\n\n @partial(api.jit, inline=True)\n def f(x):\n return x * 2\n\n jaxpr = api.make_jaxpr(f)(3)\n self.assertNotIn('xla_call', str(jaxpr))\n\n # Repro for https://github.com/google/jax/issues/7229.\n def test_compute_with_large_transfer(self):\n def f(x, delta):\n return x + jnp.asarray(delta, x.dtype)\n\n # A large and potentially unaligned array to trigger non-zero-copy and\n # async device array copy.\n xs = np.random.uniform(0., 1., size=(10, 131, 111, 3)).astype(np.float32)\n for x in xs:\n delta = np.random.uniform(-0.5, 0.5, size=())\n jitted_f = api.jit(f)\n np.testing.assert_allclose(jitted_f(x, delta), f(x, delta))\n\n def test_vjp_fun_jit(self):\n # test that the function returned by vjp can be returned\n # from and passed to jitted functions\n f = lambda x: 2. * x\n\n @partial(jit, static_argnums=0)\n def linearize_vjp(f, x):\n _, vjp_fun = api.vjp(f, x)\n return vjp_fun\n\n linearized = linearize_vjp(f, 1.)\n actual = jit(lambda f, x: f(x))(linearized, 3.)\n expected = (6.,)\n self.assertEqual(actual, expected)\n\n def test_linearize_fun_jit(self):\n # test that the function returned by linearize can be returned\n # from and passed to jitted functions\n f = lambda x: 2. * x\n\n @partial(jit, static_argnums=0)\n def linearize(f, x):\n _, jvp_fun = api.linearize(f, x)\n return jvp_fun\n\n linearized = linearize(f, 1.)\n actual = jit(lambda f, x: f(x))(linearized, 3.)\n expected = 6.\n self.assertEqual(actual, expected)\n\n def test_linear_transpose_fun_jit(self):\n # test that the function returned by linear_transpose can be returned\n # from and passed to jitted functions\n f = lambda x: 2. * x\n\n @partial(jit, static_argnums=0)\n def transpose(f, x):\n return api.linear_transpose(f, x)\n\n transposed = transpose(f, 1.)\n actual = jit(lambda f, x: f(x))(transposed, 3.)\n expected = (6.,)\n self.assertEqual(actual, expected)\n\n def test_leaked_tracer_issue_7613(self):\n # from https://github.com/google/jax/issues/7613\n import numpy.random as npr\n\n def sigmoid(x):\n return 1. / (1. + jnp.exp(-x))\n\n x = jnp.ones((50,))\n A = jnp.array(npr.randn(50, 50))\n\n @jax.jit\n def loss(A, x):\n h = jax.nn.sigmoid(A * x)\n return jnp.sum((h - x)**2)\n\n with jax.checking_leaks():\n _ = jax.grad(loss)(A, x) # doesn't crash\n\n def test_vmap_caching(self):\n # https://github.com/google/jax/issues/7621\n\n f = lambda x: jnp.square(x).mean()\n jf = jax.jit(f)\n x = jax.random.uniform(jax.random.PRNGKey(0), shape=(8, 4))\n\n with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841\n for _ in range(5):\n jax.hessian(jf)(x).block_until_ready()\n\n n = count[0]\n # The exact number of compilations may vary depending on the number of\n # jit decorators in the function above, but it should not grow after an\n # initial warmup phase.\n for _ in range(5):\n jax.hessian(jf)(x).block_until_ready()\n\n self.assertEqual(count[0], n)\n\n def test_jnp_array_doesnt_device_put(self):\n with jtu.count_device_put() as count:\n api.make_jaxpr(lambda: jnp.array(3))()\n self.assertEqual(count[0], 0)\n\n\nclass RematTest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_basic(self, remat):\n @remat\n def g(x):\n return lax.sin(lax.sin(x)), 3.\n\n def f(x):\n x, _ = g(x)\n return x\n\n ans = f(2.)\n expected = np.sin(np.sin(2.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans, f_lin = api.linearize(f, 2.)\n expected = np.sin(np.sin(2.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = f_lin(3.)\n expected = np.cos(np.sin(2.)) * np.cos(2.) * 3.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n sin_calls = []\n cos_calls = []\n sin_impl = lax.sin_p.impl\n cos_impl = lax.cos_p.impl\n try:\n lax.sin_p.def_impl(lambda x: sin_calls.append(1) or sin_impl(x))\n lax.cos_p.def_impl(lambda x: cos_calls.append(1) or cos_impl(x))\n f_lin(3.)\n finally:\n lax.sin_p.def_impl(sin_impl)\n lax.cos_p.def_impl(cos_impl)\n self.assertEqual(len(sin_calls), 1)\n self.assertEqual(len(cos_calls), 2)\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_freevars(self, remat):\n def f1(x):\n y = 2 * jnp.sin(x)\n z = jnp.cos(x) * jnp.sin(y)\n return z\n\n def f2(x):\n y = 2 * jnp.sin(x)\n z = remat(lambda x: jnp.cos(x) * jnp.sin(y))(x)\n return z\n\n ans, f_lin = api.linearize(f2, 2.)\n expected, f_lin_expected = api.linearize(f1, 2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = f_lin(3.)\n expected = f_lin_expected(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_remat_grad_python_control_flow(self):\n @partial(api.remat, concrete=True)\n def g(x):\n if x > 0:\n return lax.sin(x), 3.\n else:\n return lax.cos(x), 4.\n\n def f(x):\n x, _ = g(x)\n return x\n\n ans = f(2.)\n expected = np.sin(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(f)(2.)\n expected = np.cos(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_jit(self, remat):\n @remat\n def g(x):\n return lax.sin(lax.sin(x))\n\n def f_(x):\n return g(x)\n f = api.jit(f_)\n\n ans = f(2.)\n expected = np.sin(np.sin(2.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(f)(2.)\n expected = np.cos(np.sin(2.)) * np.cos(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jit(api.grad(f_))(2.)\n expected = np.cos(np.sin(2.)) * np.cos(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_vmap(self, remat):\n @remat\n def g(x):\n return lax.sin(lax.sin(x))\n\n x = np.arange(3.)\n\n ans = api.vmap(g)(x)\n expected = np.sin(np.sin(x))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jacfwd(g)(x)\n expected = np.diag(np.cos(np.sin(x)) * np.cos(x))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jacrev(g)(x)\n expected = np.diag(np.cos(np.sin(x)) * np.cos(x))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_higher_order_autodiff(self, remat):\n def f(x):\n return lax.cos(lax.sin(x))\n g = remat(f)\n\n ans = api.grad(api.grad(g))(3.)\n expected = api.grad(api.grad(f))(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_remat_scan(self):\n to_scan = lambda c, x: (jnp.sin(c), None)\n\n def f_noremat(x):\n y, _ = lax.scan(to_scan, x, np.arange(3.))\n return y\n\n def f_yesremat(x):\n y, _ = lax.scan(api.remat(to_scan), x, np.arange(3.))\n return y\n\n ans = f_yesremat(4.)\n expected = f_noremat(4.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(f_yesremat)(4.)\n expected = api.grad(f_noremat)(4.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n jaxpr = api.make_jaxpr(api.linearize(f_yesremat, 4.)[1])(1.)\n scan_eqn, = jaxpr.jaxpr.eqns\n self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))\n\n jaxpr = api.make_jaxpr(api.vjp(f_yesremat, 4.)[1])(1.)\n scan_eqn, = jaxpr.jaxpr.eqns\n self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_no_redundant_flops(self, remat):\n # see https://github.com/google/jax/pull/1749#issuecomment-558267584\n\n @api.jit\n def g(x):\n return f(2., x)\n\n @remat\n def f(x, y):\n return jnp.sin(x) * y\n\n # We swap out sin_p's impl rule to count how many times it's invoked\n called = []\n sin_impl = lax.sin_p.impl\n try:\n lax.sin_p.def_impl(lambda x: called.append(1) or sin_impl(x))\n api.grad(g)(3.)\n finally:\n lax.sin_p.def_impl(sin_impl)\n num_calls = len(called)\n self.assertLessEqual(num_calls, 1)\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_binomial_checkpointing(self, remat):\n def binom_checkpoint(funs):\n if len(funs) == 1:\n return funs[0]\n else:\n f1 = binom_checkpoint(funs[:len(funs)//2])\n f2 = binom_checkpoint(funs[len(funs)//2:])\n return remat(lambda x: f1(f2(x)))\n\n f1 = binom_checkpoint([jnp.sin, jnp.sin, jnp.sin, jnp.sin])\n f2 = lambda x: jnp.sin(jnp.sin(jnp.sin(jnp.sin(x))))\n x = 4.\n self.assertAllClose(f1(x), f2(x), check_dtypes=False)\n self.assertAllClose(api.grad(f1)(x), api.grad(f2)(x), check_dtypes=False)\n\n def test_remat_symbolic_zeros(self):\n # code from https://github.com/google/jax/issues/1907\n\n key = jax.random.PRNGKey(0)\n key, split = jax.random.split(key)\n n = 5\n\n def func(D0):\n def shift(R, dR, **unused_kwargs):\n return R + dR\n\n def apply_fn(R):\n return D0 * R\n\n Rinit = jax.random.uniform(split, (n,3), minval=0.0, maxval=5.0,\n dtype=jnp.float32)\n\n def move(R,i):\n F = apply_fn(R)\n return shift(R, 0.001 * F), jnp.array([0.])\n\n move = api.remat(move)\n R, temp = lax.scan(move, Rinit, jnp.arange(2))\n return R[0, 0]\n\n api.grad(func)(5.0) # doesn't crash\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_jit2(self, remat):\n @api.jit\n def f(x):\n y = 2 * x\n\n @remat\n def g():\n return y\n\n return g()\n\n self.assertAllClose(f(3), 6, check_dtypes=False)\n\n def test_remat_nontrivial_env(self):\n # simplified from https://github.com/google/jax/issues/2030\n\n @api.remat\n def foo(state, dt=0.5, c=1):\n u, u_t = state\n u_tt = c**2 * u\n u_t = u_t + u_tt * dt\n return (u, u_t)\n\n @partial(api.jit, static_argnums=(1,))\n def _multi_step(state, count, dt, c):\n f = lambda s, _: (foo(s, dt, c), _)\n return lax.scan(f, state, None, count)\n\n def multi_step(state, count, dt=1/jnp.sqrt(2), c=1):\n return _multi_step(state, count, dt, c)\n\n def loss(u0, target, steps, dt=1/jnp.sqrt(2), c=1):\n init = (u0, jnp.zeros_like(u0))\n (uf, _), _ = multi_step(init, steps, dt, c)\n return ((uf - target) ** 2).mean()\n\n target = jnp.zeros((128, 128))\n u0 = jnp.ones_like(target)\n loss(u0, target, 10) # doesn't crash\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_jit3(self, remat):\n # https://github.com/google/jax/issues/2180\n def f(w, x):\n a = jnp.dot(x, w)\n b = jnp.einsum(\"btd,bTd->btT\", a, a)\n c = jnp.einsum(\"btT,btd->btd\", b, a)\n return jnp.sum(c)\n\n w = jnp.ones([1, 1])\n x = jnp.ones([1, 1, 1])\n f = remat(f)\n api.grad(f)(w, x) # doesn't crash\n\n @api.jit\n def mul(a, b):\n return a * b\n\n def f(w, x):\n a = mul(w, x)\n b = mul(a, a)\n return b\n\n w = 1.\n x = 1.\n f = remat(f)\n api.grad(f)(w, x) # doesn't crash\n\n def test_remat_scan2(self):\n # https://github.com/google/jax/issues/1963\n\n def scan_bug(x0):\n f = lambda x, _: (x + 1, None)\n def scanned_f(x, _):\n return lax.scan(f, x, xs=None, length=1)[0], None\n x, _ = jax.remat(scanned_f)(x0, None)\n return x\n\n jax.grad(scan_bug)(1.0) # doesn't crash\n\n def test_remat_jit_static_argnum_omnistaging(self):\n # https://github.com/google/jax/issues/2833\n # NOTE(mattjj): after #3370, this test doesn't actually call remat...\n def named_call(f):\n def named_f(*args):\n f_ = lu.wrap_init(lambda: (f(*args),))\n out, = core.call_p.bind(f_)\n return out\n return named_f\n\n def f(a_bool, y):\n if a_bool:\n return y + 1\n else:\n return y\n\n api.jit(named_call(f), static_argnums=0)(True, 1) # no crash\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_remat_eval_counter(self, remat):\n # https://github.com/google/jax/issues/2737\n add_one_p = Primitive('add_one')\n add_one = add_one_p.bind\n\n num_evals = 0\n\n @contextmanager\n def assertEvals(n):\n start = num_evals\n yield\n assert num_evals - start == n\n\n def add_one_impl(x):\n nonlocal num_evals\n num_evals += 1\n return x + 1\n add_one_p.def_impl(add_one_impl)\n\n def add_one_jvp(pin, tin):\n pout = add_one(pin[0])\n return pout, pout * tin[0]\n ad.primitive_jvps[add_one_p] = add_one_jvp\n\n add_one_p.def_abstract_eval(lambda x: x)\n\n v = np.zeros((1,))\n\n f = remat(add_one)\n g = remat(lambda x: add_one(f(x)))\n\n # 2 calls needed to evaluate g\n with assertEvals(2):\n _, vjp = jax.vjp(g, v)\n # 2 calls made while transposing g, 1 call made while transposing f\n with assertEvals(3):\n vjp(v)\n\n @jax._src.util.curry\n def call(f, *args):\n return jax.core.call(\n jax.linear_util.wrap_init(lambda *args: [f(*args)]),\n *args, name='foo')[0]\n\n f = call(add_one)\n g = remat(lambda x: add_one(f(x)))\n\n # 2 calls needed to evaluate g\n with assertEvals(2):\n _, vjp = jax.vjp(g, v)\n # 2 calls made while transposing g, no reevaluation for transposition of f\n with assertEvals(2):\n vjp(v)\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_escaped_tracer_remat(self, remat):\n # b/169779185\n def f():\n seq = [jnp.zeros([])]\n def g():\n seq[0] += 1 # this is line 7 btw\n return seq[0]\n\n remat(g)()\n remat(g)()\n\n with self.assertRaisesRegex(UnexpectedTracerError, \"global state\"):\n api.jit(f)()\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_no_cse_widget_on_primals(self, remat):\n @remat\n def g(x):\n return lax.sin(lax.sin(x)), 3.\n\n def f(x):\n x, _ = g(x)\n return x\n\n c = api.xla_computation(f)(2.)\n self.assertNotIn('while', c.as_hlo_text())\n self.assertNotIn('conditional', c.as_hlo_text())\n\n c = api.xla_computation(grad(f))(2.)\n text = c.as_hlo_text()\n self.assertTrue('while' in text or 'conditional' in text)\n\n def test_no_cse_widget_with_prevent_cse_false(self):\n @partial(api.remat, prevent_cse=False)\n def g(x):\n return lax.sin(lax.sin(x)), 3.\n\n def f(x):\n x, _ = g(x)\n return x\n\n c = api.xla_computation(f)(2.)\n self.assertNotIn('while', c.as_hlo_text())\n self.assertNotIn('conditional', c.as_hlo_text())\n\n c = api.xla_computation(grad(f))(2.)\n self.assertNotIn('while', c.as_hlo_text())\n self.assertNotIn('conditional', c.as_hlo_text())\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"_{policy_name}\", \"policy\": policy,\n \"in_jaxpr2\": in_jaxpr2, \"not_in_jaxpr2\": not_in_jaxpr2}\n for policy_name, policy, in_jaxpr2, not_in_jaxpr2 in [\n ('save_anything', lambda *_, **__: True, [], [' sin ', ' cos ']),\n ('save_nothing', lambda *_, **__: False, [' sin ', ' cos '], []),\n ('save_sin', lambda p, *_, **__: str(p) == 'sin', [' cos '], [' sin ']),\n ])\n def test_remat_custom_policy(self, policy, in_jaxpr2, not_in_jaxpr2):\n for square in [lambda x: x * x, api.jit(lambda x: x * x)]:\n f = api.remat(lambda x: jnp.sin(square(jnp.sin(x))),\n policy=policy)\n y, f_lin = api.linearize(f, 1.)\n ydot = f_lin(2.)\n jaxpr_text = str(f_lin.func.args[0])\n for substr in in_jaxpr2:\n self.assertIn(substr, jaxpr_text)\n for substr in not_in_jaxpr2:\n self.assertNotIn(substr, jaxpr_text)\n y_expected, ydot_expected = api.jvp(lambda x: jnp.sin(square(jnp.sin(x))),\n [1.], [2.])\n self.assertAllClose(y, y_expected)\n self.assertAllClose(ydot, ydot_expected)\n jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])\n\n def test_remat_custom_policy_save_cos(self):\n save_cos = lambda prim, *_, **__: str(prim) == 'cos'\n f = api.remat(lambda x: jnp.sin(jnp.sin(x)), # different function\n policy=save_cos)\n _, f_lin = api.linearize(f, 1.)\n jaxpr_text = str(f_lin.func.args[0])\n self.assertNotIn(' sin ', jaxpr_text)\n self.assertNotIn(' cos ', jaxpr_text)\n jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])\n\n def test_remat_checkpoint_dots(self):\n @partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)\n def f(x):\n x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x)\n x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x)\n x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x)\n return x\n\n _, f_lin = api.linearize(f, jnp.ones((2, 2)))\n jaxpr_text = str(f_lin.func.args[0])\n self.assertEqual(jaxpr_text.count(' sin '), 2)\n self.assertEqual(jaxpr_text.count(' dot_'), 6)\n jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])\n\n def test_remat_checkpoint_dots_with_no_batch_dims(self):\n @partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims)\n def f(x):\n x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x)\n x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x)\n x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x)\n return x\n\n _, f_lin = api.linearize(f, jnp.ones((2, 2)))\n jaxpr_text = str(f_lin.func.args[0])\n self.assertEqual(jaxpr_text.count(' sin '), 2)\n self.assertEqual(jaxpr_text.count(' dot_general'), 6)\n jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])\n\n def test_remat_checkpoint_dots_with_no_batch_dims2(self):\n @partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims)\n def f(x):\n x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x)\n x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x)\n x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x)\n return x\n\n _, f_lin = api.linearize(f, jnp.ones((3, 2, 2)))\n jaxpr_text = str(f_lin.func.args[0])\n self.assertEqual(jaxpr_text.count(' sin '), 2)\n self.assertEqual(jaxpr_text.count(' dot_general'), 9)\n jtu.check_grads(f, (jnp.ones((3, 2, 2)),), order=2, modes=['fwd', 'rev'])\n\n def test_remat_checkpoint_dots_jit(self):\n @api.jit\n @partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)\n def f(x):\n x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x * 1e-3)\n x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x * 1e-3)\n x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)\n x = jnp.sin(x * 1e-3)\n return x\n\n _, f_lin = api.linearize(f, jnp.ones((2, 2)))\n jaxpr_text = str(f_lin.func.args[0])\n self.assertEqual(jaxpr_text.count(' sin '), 2)\n self.assertEqual(jaxpr_text.count(' dot_'), 6)\n jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])\n\n def test_remat_checkpoint_dots_inside_scan(self):\n x = jnp.ones((5,))\n\n def f(W):\n @partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)\n def f(x):\n x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))\n x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))\n x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))\n return x\n\n def body(x, _): return f(x), None\n return lax.scan(body, x, None, length=2)[0]\n\n _, f_vjp = api.vjp(f, jnp.ones((5, 5)))\n jaxpr_text = str(f_vjp.args[0].func.args[1])\n\n # Two sine calls in the backward pass because while we don't save sines\n # within the (rematted) body function, we can save the scan carry, which\n # effectively saves one sine. Three cosines for the Jacoian coefficients.\n self.assertEqual(jaxpr_text.count(' sin '), 2)\n self.assertEqual(jaxpr_text.count(' cos '), 3)\n # Six calls to dot_general in the backward pass because we save the primal\n # matmuls and only compure the backward pass ones (two for each primal one).\n self.assertEqual(jaxpr_text.count(' dot_'), 6)\n\n jtu.check_grads(api.jit(f), (jnp.ones((5, 5)),), order=2,\n modes=['fwd', 'rev'])\n\n def test_remat_custom_jvp_policy(self):\n @api.custom_jvp\n def sin(x):\n return jnp.sin(x)\n def sin_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return sin(x), jnp.cos(x) * g\n sin.defjvp(sin_jvp)\n\n @partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)\n def f(x):\n x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)\n x = sin(x * 1e-3)\n x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)\n x = sin(x * 1e-3)\n x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)\n x = sin(x * 1e-3)\n return x\n\n jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])\n\n def g(x):\n return lax.scan(lambda x, _: (f(x), None), x, None, length=2)[0]\n jtu.check_grads(g, (3.,), order=2, modes=['fwd', 'rev'])\n\n def test_remat_custom_vjp_policy(self):\n @api.custom_vjp\n def sin(x):\n return jnp.sin(x)\n def sin_fwd(x):\n return sin(x), x\n def sin_bwd(x, y_bar):\n return (jnp.cos(x) * y_bar,)\n sin.defvjp(sin_fwd, sin_bwd)\n\n @partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)\n def f(x):\n @partial(api.named_call, name=\"dot\")\n def dot2(y, z):\n return jnp.dot(x, jnp.dot(y, z, precision=lax.Precision.HIGHEST),\n precision=lax.Precision.HIGHEST)\n\n x = dot2(x, x)\n x = sin(x * 1e-3)\n x = dot2(x, x)\n x = sin(x * 1e-3)\n x = dot2(x, x)\n x = sin(x * 1e-3)\n return x\n\n jtu.check_grads(f, (3.,), order=2, modes=['rev'])\n\n def g(x):\n return lax.scan(lambda x, _: (f(x), None), x, None, length=2)[0]\n jtu.check_grads(g, (3.,), order=2, modes=['rev'])\n\n def test_remat_dropvar_policy(self):\n def f(x):\n return x, x\n\n @partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)\n def g(x):\n x = api.grad(lambda x: f(x)[0])(x)\n return x\n\n api.grad(g)(3.)\n\n def test_remat_custom_jvp_linear_policy(self):\n @api.custom_jvp\n def sum(x):\n return jnp.sum(x, axis=0)\n @sum.defjvp\n def sum_jvp(primals, tangents):\n (x,), (xdot,) = primals, tangents\n return sum(x), sum(xdot)\n\n @partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)\n def f(x):\n return sum(x)\n jtu.check_grads(f, (jnp.ones(3),), order=2, modes=['fwd', 'rev'])\n\n def g(x):\n return lax.scan(lambda _, x: (None, f(x)), None, x)[1]\n jtu.check_grads(g, (jnp.ones((2, 3)),), order=2, modes=['fwd', 'rev'])\n\n def test_constants_not_hoisted(self):\n # The old implementation of remat worked by data dependence, and so\n # (potentially large) constants would not be rematerialized and could be\n # wastefully instantiated. This test checks that the newer remat\n # implementation avoids that. See https://github.com/google/jax/pull/8191.\n\n # no residuals from constants created inside jnp.einsum\n @partial(new_checkpoint, policy=lambda *_, **__: False)\n def f(x):\n return jnp.einsum('ii->i', x)\n res_avals = saved_residuals(f, jnp.ones((2, 2)))\n self.assertLen(res_avals, 0)\n\n # no residuals from jnp.zeros\n @partial(new_checkpoint, policy=lambda *_, **__: False)\n def f(x):\n return jnp.zeros_like(x) * x\n res_avals = saved_residuals(f, jnp.ones((2, 2)))\n self.assertLen(res_avals, 0)\n\n # no residuals from jnp.zeros, but input must be saved\n @partial(new_checkpoint, policy=lambda *_, **__: False)\n def f(x):\n return jnp.zeros_like(x) * jnp.sin(x)\n res_avals = saved_residuals(f, jnp.ones((2, 2)))\n self.assertLen(res_avals, 1)\n\n def test_name_denylist(self):\n def f(x):\n y = checkpoint_name(jnp.multiply(2., 2.), 'y')\n z = checkpoint_name(jnp.multiply(2., 2.), 'z')\n w = checkpoint_name(jnp.multiply(2., 2.), 'w')\n u = jnp.multiply(2., 2.)\n return (((x * y) * z) * w) * u\n\n policy = jax.checkpoint_policies.save_any_names_but_these('y', 'z', 'w')\n res = saved_residuals(new_checkpoint(f, policy=policy), 1.)\n self.assertLen(res, 0) # can't save anything\n\n policy = jax.checkpoint_policies.save_any_names_but_these('z', 'w')\n res = saved_residuals(new_checkpoint(f, policy=policy), 1.)\n self.assertLen(res, 1) # can save only y\n\n policy = jax.checkpoint_policies.save_any_names_but_these('w')\n res = saved_residuals(new_checkpoint(f, policy=policy), 1.)\n self.assertLen(res, 2) # can save y and z\n\n policy = jax.checkpoint_policies.save_any_names_but_these()\n res = saved_residuals(new_checkpoint(f, policy=policy), 1.)\n self.assertLen(res, 3) # can save y, z, and w\n\n def test_name_allowlist(self):\n def f(x):\n y = checkpoint_name(jnp.multiply(2., 2.), 'y')\n z = checkpoint_name(jnp.multiply(2., 2.), 'z')\n w = checkpoint_name(jnp.multiply(2., 2.), 'w')\n u = jnp.multiply(2., 2.)\n return (((x * y) * z) * w) * u\n\n policy = jax.checkpoint_policies.save_only_these_names('y', 'z', 'w')\n res = saved_residuals(new_checkpoint(f, policy=policy), 1.)\n self.assertLen(res, 3) # can save y, z, and w\n\n policy = jax.checkpoint_policies.save_only_these_names('z', 'w')\n res = saved_residuals(new_checkpoint(f, policy=policy), 1.)\n self.assertLen(res, 2) # can save z and w\n\n policy = jax.checkpoint_policies.save_only_these_names('w')\n res = saved_residuals(new_checkpoint(f, policy=policy), 1.)\n self.assertLen(res, 1) # can save w\n\n policy = jax.checkpoint_policies.save_only_these_names()\n res = saved_residuals(new_checkpoint(f, policy=policy), 1.)\n self.assertLen(res, 0) # can't save anything!\n\n def test_saved_residuals_utility(self):\n def f(x, y):\n x1, x2 = x\n z = checkpoint_name(jnp.sin(3.), 'z')\n return z * ((x1 * x2) * y) * np.array([3.])\n\n res = saved_residuals(f, (2., 3.), y=4.)\n self.assertLen(res, 6)\n self.assertEqual(res[0][0].shape, (1,))\n self.assertEqual(res[0][1], \"from a constant\")\n self.assertEqual(res[1][0].shape, ())\n self.assertEqual(res[1][1], \"from the argument 'x'\")\n self.assertEqual(res[2][0].shape, ())\n self.assertEqual(res[2][1], \"from the argument 'x'\")\n self.assertEqual(res[3][0].shape, ())\n self.assertEqual(res[3][1], \"from the argument 'y'\")\n self.assertEqual(res[4][0].shape, ())\n self.assertStartsWith(res[4][1], \"named 'z'\")\n self.assertEqual(res[5][0].shape, ())\n\n def test_saved_residuals_utility_literals(self):\n res = saved_residuals(lambda x: x * 2., 3.)\n self.assertLen(res, 1)\n self.assertEqual(res[0][0].shape, ())\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_checkpoint_dropvars(self, remat):\n @remat\n def f(x):\n _, x = api.jit(lambda: (x, x))()\n return x\n\n _ = api.grad(f)(3.) # doesn't crash\n\n def test_dce_keeps_eqns_with_used_outputs_but_no_used_inputs(self):\n @new_checkpoint\n def f(x):\n c = jax.jit(lambda: 3.)()\n return c * x\n\n _ = jax.grad(f)(3.) # doesn't crash\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"{suffix}\", \"remat\": remat}\n for suffix, remat in [\n ('', api.remat),\n ('_policy', partial(api.remat, policy=lambda *_, **__: False)),\n ('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),\n ])\n def test_unit_dropvar_consistency_regression(self, remat):\n @partial(remat, policy=lambda *_, **__: False)\n def f(u, x):\n x, _ = jax.jit(lambda x: (x, u))(x)\n return x\n\n _ = api.linearize(partial(f, core.unit), 3.)\n\nclass JaxprTest(jtu.JaxTestCase):\n\n def test_scalar_literals(self):\n jaxpr = api.make_jaxpr(lambda x: x + 2)(42)\n self.assertLen(jaxpr.jaxpr.constvars, 0)\n\n def test_abstract_inputs(self):\n jaxpr = api.make_jaxpr(lambda x: x + 2.)(\n types.SimpleNamespace(shape=(), dtype=np.dtype(np.float32)))\n self.assertEqual(jaxpr.in_avals[0].shape, ())\n self.assertEqual(jaxpr.in_avals[0].dtype, np.float32)\n\n def test_const(self):\n def fun(x):\n return (x, 1., np.zeros(1, dtype=jnp.float32))\n\n expected = \"{ lambda a:f32[1]; b:f32[]. let in (b, 1.0, a) }\"\n jaxpr = api.make_jaxpr(fun)(jnp.float32(0.))\n self.assertMultiLineStrippedEqual(expected, str(jaxpr))\n\n def test_cond(self):\n def f(x):\n return lax.cond(x >= 0.,\n x + 1.,\n lambda xt: xt + x,\n x + 2.,\n lambda xf: xf - x)\n expected = \"\"\"{ lambda ; a:f32[]. let\n b:bool[] = ge a 0.0\n c:f32[] = add a 1.0\n d:f32[] = add a 2.0\n e:i32[] = convert_element_type[new_dtype=int32 weak_type=False] b\n f:f32[] = cond[\n branches=(\n { lambda ; g_:f32[] h:f32[] i:f32[] j:f32[]. let\n k:f32[] = sub j h\n in (k,) }\n { lambda ; l:f32[] m_:f32[] n:f32[] o:f32[]. let\n p:f32[] = add n l\n in (p,) }\n )\n linear=(False, False, False, False)\n ] e a a c d\n in (f,) }\"\"\"\n jaxpr = api.make_jaxpr(f)(jnp.float32(3.))\n self.assertMultiLineStrippedEqual(expected, str(jaxpr))\n\n def test_make_jaxpr_static_argnums(self):\n def f(x, y):\n return x + y\n\n jaxpr = api.make_jaxpr(f, static_argnums=(1,))(2, 3)\n self.assertIn('3', str(jaxpr))\n\n def test_make_jaxpr_return_shape(self):\n _, shape_tree = api.make_jaxpr(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),\n return_shape=True)(np.int32(1))\n expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),\n api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))\n self.assertEqual(shape_tree, expected)\n\n def test_make_jaxpr_axis_env(self):\n def f(x):\n return x - lax.psum(x, 'i')\n jaxpr = api.make_jaxpr(f, axis_env=[('i', 4)])(2)\n self.assertIn('psum', str(jaxpr))\n\n def test_make_jaxpr_named(self):\n def f(x):\n return x - lax.psum(x, 'i')\n\n x = api.ShapeDtypeStruct(\n shape=(2, 3), dtype=jnp.dtype(jnp.float32), named_shape={'i': 10})\n jaxpr = api.make_jaxpr(f, axis_env=[('i', 10)])(x)\n named_shapes = [v.aval.named_shape for v in jaxpr.jaxpr.eqns[1].invars]\n self.assertEqual(named_shapes, [{'i': 10}, {}])\n\n @parameterized.parameters(True, False)\n def test_vjp_reduce_axes_jaxpr(self, gy_batched):\n def f(w, x):\n return jnp.sin(jnp.dot(x, w))\n\n w = api.ShapeDtypeStruct(\n shape=(3, 4), dtype=jnp.float32, named_shape={})\n x = api.ShapeDtypeStruct(\n shape=(3,), dtype=jnp.float32, named_shape={'batch': 2})\n gy = api.ShapeDtypeStruct(\n shape=(4,), dtype=jnp.float32,\n named_shape={'batch': 2} if gy_batched else {})\n\n # per-example\n jaxpr, shapes = api.make_jaxpr(\n lambda w, x, gy: api.vjp(f, w, x)[1](gy), axis_env=[('batch', 2)],\n return_shape=True)(w, x, gy)\n expected = (api.ShapeDtypeStruct(\n shape=(3, 4), dtype=jnp.float32, named_shape={'batch': 2}), x)\n self.assertEqual(shapes, expected)\n self.assertNotIn('psum', str(jaxpr))\n\n # reduced\n jaxpr, shapes = api.make_jaxpr(\n lambda w, x, gy: api.vjp(f, w, x, reduce_axes=('batch',))[1](gy),\n axis_env=[('batch', 2)],\n return_shape=True)(w, x, gy)\n expected = (w, x)\n self.assertEqual(shapes, expected)\n self.assertIn('psum', str(jaxpr))\n\n\nclass CustomJVPTest(jtu.JaxTestCase):\n\n def test_basic(self):\n @api.custom_jvp\n def f(x):\n return jnp.sin(x)\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return f(x), 2 * jnp.cos(x) * g\n f.defjvp(f_jvp)\n\n x = 3.\n self.assertAllClose(f(x), jnp.sin(x))\n self.assertAllClose(api.jvp(f, (x,), (1.,)),\n (jnp.sin(x), 2 * jnp.cos(x)))\n self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))\n\n def test_invariance(self):\n @api.custom_jvp\n def f(x):\n return jnp.cos(2 * x) / 2.\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return (f(x), 3 * g)\n f.defjvp(f_jvp)\n def f2(x):\n y, _ = api.jvp(f, (x,), (x,))\n return y\n def f3(x):\n y, _ = api.jvp(f2, (x,), (x,))\n return y\n x = 1.\n self.assertAllClose(api.jvp(f, (x,), (x,)),\n api.jvp(f2, (x,), (x,)),\n check_dtypes=False)\n self.assertAllClose(api.jvp(f, (x,), (x,)),\n api.jvp(f3, (x,), (x,)),\n check_dtypes=False)\n\n def test_python_control_flow(self):\n @api.custom_jvp\n def f(x):\n if x > 0:\n return jnp.sin(x)\n else:\n return jnp.cos(x)\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n if x > 0:\n return f(x), 2 * g\n else:\n return f(x), 3 * g\n f.defjvp(f_jvp)\n x = 2.\n self.assertAllClose(f(x), jnp.sin(x))\n self.assertAllClose(f(-x), jnp.cos(-x))\n self.assertAllClose(api.jvp(f, (x,), (1.,)),\n (jnp.sin(x), 2.),\n check_dtypes=False)\n self.assertAllClose(api.jvp(f, (-x,), (1.,)),\n (jnp.cos(-x), 3.),\n check_dtypes=False)\n self.assertAllClose(api.grad(f)(x), 2., check_dtypes=False)\n self.assertAllClose(api.grad(f)(-x), 3., check_dtypes=False)\n\n def test_vmap(self):\n @api.custom_jvp\n def f(x):\n assert jnp.ndim(x) == 0\n return jnp.sin(x)\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n assert jnp.ndim(x) == jnp.ndim(g) == 0\n return f(x), 2 * jnp.cos(x) * g\n f.defjvp(f_jvp)\n\n x = jnp.arange(3.)\n xx = jnp.arange(6.).reshape(2, 3)\n\n # vmap of f\n self.assertAllClose(api.vmap(f)(x), jnp.sin(x))\n self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))\n\n # vmap of jvp of f\n self.assertAllClose(api.vmap(lambda x: api.jvp(f, (x,), (x,)))(x),\n (jnp.sin(x), 2 * jnp.cos(x) * x))\n self.assertAllClose(api.vmap(api.vmap(lambda x: api.jvp(f, (x,), (x,))))(xx),\n (jnp.sin(xx), 2 * jnp.cos(xx) * xx))\n\n # jvp of vmap of f\n self.assertAllClose(api.jvp(api.vmap(f), (x,), (x,)),\n (jnp.sin(x), 2 * jnp.cos(x) * x))\n self.assertAllClose(api.jvp(api.vmap(api.vmap(f)), (xx,), (xx,)),\n (jnp.sin(xx), 2 * jnp.cos(xx) * xx))\n\n # vmap of jvp of vmap of f\n self.assertAllClose(api.vmap(lambda x: api.jvp(api.vmap(f), (x,), (x,)))(xx),\n (jnp.sin(xx), 2 * jnp.cos(xx) * xx))\n\n def test_jit(self):\n @api.custom_jvp\n def f(x):\n return jnp.sin(x)\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return f(x), 2 * jnp.cos(x) * g\n f.defjvp(f_jvp)\n\n x = 3.\n\n # jit\n self.assertAllClose(api.jit(f)(x), jnp.sin(x))\n self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))\n\n # jit of jvp\n self.assertAllClose(api.jit(lambda x: api.jvp(f, (x,), (x,)))(x),\n (jnp.sin(x), 2 * jnp.cos(x) * x),\n check_dtypes=False)\n\n # jvp of jit\n self.assertAllClose(api.jvp(api.jit(f), (x,), (x,)),\n (jnp.sin(x), 2 * jnp.cos(x) * x),\n check_dtypes=False)\n\n def test_pytrees(self):\n @api.custom_jvp\n def f(x):\n return {'b': jnp.sin(x['a'])}\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return f(x), {'b': 2 * jnp.cos(x['a']) * g['a']}\n f.defjvp(f_jvp)\n x = {'a': 3.}\n self.assertAllClose(f(x)['b'], jnp.sin(x['a']))\n self.assertAllClose(api.jvp(f, (x,), (x,)),\n ({'b': jnp.sin(x['a'])},\n {'b': 2 * jnp.cos(x['a']) * x['a']}),\n check_dtypes=False)\n\n def test_kwargs(self):\n # from https://github.com/google/jax/issues/1938\n @api.custom_jvp\n def my_fun(x, y, c=1.):\n return c * (x + y)\n def my_jvp(primals, tangents):\n x, y, c = primals\n t_x, t_y, t_c = tangents\n return my_fun(x, y, c), t_c\n my_fun.defjvp(my_jvp)\n f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()\n f(10., 5.) # doesn't crash\n api.jvp(f, (10., 5.), (1., 1.)) # doesn't crash\n\n def test_initial_style(self):\n @api.custom_jvp\n def f(x):\n return 3 * x\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return f(x), 2 * g\n f.defjvp(f_jvp)\n\n def foo(x):\n out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)\n return out\n\n ans = api.grad(foo)(3.)\n expected = 2.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.jit(foo))(3.)\n expected = 2.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jit(api.grad(foo))(3.)\n expected = 2.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.grad(foo))(3.)\n expected = 0.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.grad(api.jit(foo)))(3.)\n expected = 0.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.jit(api.grad(foo)))(3.)\n expected = 0.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jit(api.grad(api.grad(foo)))(3.)\n expected = 0.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_initial_style_vmap(self):\n @api.custom_jvp\n def f(x):\n assert jnp.ndim(x) == 0\n return 3 * x\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return f(x), 2 * g\n f.defjvp(f_jvp)\n\n def foo(x):\n out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)\n return out\n\n ans = api.vmap(foo)(jnp.ones(3))\n expected = 3. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.vmap(api.jit(foo))(jnp.ones(3))\n expected = 3. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jit(api.vmap(foo))(jnp.ones(3))\n expected = 3. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_initial_style_vmap_with_collective(self):\n\n @api.custom_jvp\n def f(x):\n return lax.psum(x, 'foo')\n\n @f.defjvp\n def f_jvp(xs, ts):\n x, = xs\n t, = ts\n return lax.psum(x, 'foo'), t\n\n def g(x):\n jaxpr = api.make_jaxpr(f)(x)\n return core.eval_jaxpr(jaxpr.jaxpr, [], x)[0]\n\n v = api.vmap(lambda _, x: g(x), axis_name='foo', in_axes=(0, None),\n out_axes=None)(jnp.arange(4.), 2.)\n self.assertAllClose(v, 8.)\n\n def test_closed_over_tracers_error_message(self):\n def f(x):\n @api.custom_jvp\n def g(y):\n return x + y\n def g_jvp(primals, tangents):\n return g(x), 2 * primals[0]\n g.defjvp(g_jvp)\n return g(1.)\n\n self.assertRaises(ad.CustomJVPException, lambda: api.jvp(f, (3.,), (1.,)))\n self.assertRaises(ad.CustomJVPException, lambda: api.grad(f)(3.))\n\n def test_nondiff_arg(self):\n @partial(api.custom_jvp, nondiff_argnums=(0,))\n def app(f, x):\n return f(x)\n def app_jvp(f, primals, tangents):\n (x,), (t,) = primals, tangents\n return app(f, x), 3 * t\n app.defjvp(app_jvp)\n\n ans = app(lambda x: 2 * x, 1)\n expected = 2\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jvp(lambda x: app(lambda y: 2 * y, x), (1.,), (1.,))\n expected = (2., 3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_nondiff_arg_jit_tracer(self):\n @partial(api.custom_jvp, nondiff_argnums=(0,))\n def f(x, y):\n return x * y\n def f_jvp(x, primals, tangents):\n (y,), (t_y,) = primals, tangents\n return f(x, y), 5 * t_y\n f.defjvp(f_jvp)\n\n @jit\n def g(x, y):\n return f(x, y)\n\n ans = api.jvp(lambda y: g(2., y), (3.,), (1.,))\n expected = (6., 5.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_nondiff_arg_hiding_jvp_tracer(self):\n def f(x):\n @partial(api.custom_jvp, nondiff_argnums=(0,))\n def g(h, x):\n return h(x)\n @g.defjvp\n def g_jvp(h, primals, tangents):\n x, = primals\n t, = tangents\n return g(h, x), 2. * t\n h = lambda y: x + y # capture x\n return g(h, x)\n\n with self.assertRaisesRegex(ad.CustomJVPException, \"Detected differentiation\"):\n api.jvp(f, (2.,), (1.,))\n\n def test_vmap_axes(self):\n raise unittest.SkipTest(\"TODO\") # TODO(mattjj): write test\n\n def test_pmap(self):\n raise unittest.SkipTest(\"TODO\") # TODO(mattjj): write test\n\n def test_missing_jvp_rule_error_message(self):\n @api.custom_jvp\n def foo(x):\n return x ** 2\n\n self.assertRaisesRegex(\n AttributeError,\n r\"No JVP defined for custom_jvp function foo using defjvp.\",\n lambda: foo(2))\n self.assertRaisesRegex(\n AttributeError,\n r\"No JVP defined for custom_jvp function foo using defjvp.\",\n lambda: api.jvp(foo, (2.,), (1.,)))\n self.assertRaisesRegex(\n AttributeError,\n r\"No JVP defined for custom_jvp function foo using defjvp.\",\n lambda: api.grad(foo)(2.))\n\n def test_jvp_rule_inconsistent_pytree_structures_error_message(self):\n @api.custom_jvp\n def f(x):\n return (x**2,)\n\n @f.defjvp\n def foo_jvp(primals, tangents):\n x, = primals\n t, = tangents\n return f(x), [2 * x * t, x]\n\n f(2.) # doesn't crash\n self.assertRaisesRegex(\n TypeError,\n re.escape(\n \"Custom JVP rule must produce primal and tangent outputs \"\n \"with equal container (pytree) structures, but got \"\n \"{} and {} respectively.\".format(\n tree_util.tree_structure((1,)),\n tree_util.tree_structure([1, 2]))\n ),\n lambda: api.jvp(f, (2.,), (1.,)))\n\n def test_primal_tangent_aval_disagreement_error_message(self):\n @api.custom_jvp\n def f(x):\n return x ** 2\n\n @f.defjvp\n def foo_jvp(primals, tangents):\n x, = primals\n t, = tangents\n return f(x), jnp.reshape(t, (1,))\n\n f(2.) # doesn't crash\n self.assertRaisesRegex(\n TypeError,\n re.escape(\n \"Custom JVP rule must produce primal and tangent outputs \"\n \"with equal shapes and dtypes, but got float32[] and float32[1] \"\n \"respectively.\"),\n lambda: api.jvp(f, (jnp.float32(2.),), (jnp.float32(1.),)))\n\n def test_jvp_rule_doesnt_return_pair_error_message(self):\n # https://github.com/google/jax/issues/2516\n\n @api.custom_jvp\n def f(x):\n return x ** 2\n\n @f.defjvp\n def foo_jvp(primals, tangents):\n x, = primals\n t, = tangents\n return t\n\n f(2.) # doesn't crash\n self.assertRaisesRegex(\n TypeError,\n re.escape(\n \"Custom JVP rule must produce a pair (list or tuple of length two) \"\n \"representing primal and tangent outputs, got 1.0\"),\n lambda: api.jvp(f, (2.,), (1.,)))\n\n def test_multiple_rule_invocations(self):\n @jax.custom_jvp\n def expit(x):\n return 1 / (1 + lax.exp(-x))\n\n @expit.defjvp\n def _expit_jvp(primals, tangents):\n (x,), (t,) = primals, tangents\n ans = expit(x)\n t_out = t * ans * (1 - ans)\n return ans, t_out\n\n def scanned_fun(c, _):\n return [expit(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None\n\n def foo(x):\n c, _ = lax.scan(scanned_fun, [x, 0., 0., 0., 0.], None, length=10)\n return c[-1]\n\n # just make sure these don't crash\n foo(3.)\n grad(foo)(3.)\n grad(lambda x: jax.vmap(foo)(x).sum())(jnp.arange(3.))\n\n def test_hard_stuff(self):\n arr = jnp.ones((5, 2, 2))\n api.jit(jax.vmap(jnp.linalg.det))(arr) # doesn't crash\n\n def test_hard_stuff2(self):\n @jax.custom_jvp\n def f(x):\n return lax.tie_in(x, np.zeros(x.shape, x.dtype))\n\n @f.defjvp\n def f_jvp(primals, tangents):\n x, = primals\n t, = tangents\n return f(x), t\n\n # don't crash\n jax.jit(jax.vmap(f))(jnp.arange(3.))\n jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))\n jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))\n jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))\n jax.jvp(jax.vmap(f), (jnp.arange(3.),), (jnp.ones(3),))\n\n def test_hard_stuff3(self):\n @jax.custom_jvp\n def relu(x):\n return jnp.maximum(x, 0)\n\n @relu.defjvp\n def _relu_jvp(primals, tangents):\n x, = primals\n t, = tangents\n return relu(x), lax.select(x > 0, t, lax.full_like(t, 0))\n\n def scanned_fun(c, _):\n return [relu(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None\n\n def f(x):\n c, _ = lax.scan(scanned_fun, [x, 0., 0., 0., 0.], None, length=10)\n return c[-1]\n\n # don't crash\n jax.jit(jax.vmap(f))(jnp.arange(3.))\n jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))\n jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))\n jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))\n jax.jvp(jax.jit(jax.vmap(f)), (jnp.arange(3.),), (jnp.ones(3),))\n\n def test_eval_shape(self):\n @jax.custom_jvp\n def expit(x):\n return 1 / (1 + lax.exp(-x))\n\n @expit.defjvp\n def _expit_jvp(primals, tangents):\n (x,), (t,) = primals, tangents\n ans = expit(x)\n t_out = t * ans * (1 - ans)\n return ans, t_out\n\n # don't crash\n api.eval_shape(expit, jnp.ones((2, 3)))\n api.eval_shape(api.grad(lambda x: expit(x).sum()), jnp.ones((2, 3)))\n\n def test_jaxpr_zeros(self):\n # from https://github.com/google/jax/issues/2657\n @api.custom_jvp\n def f(A, b):\n return A @ b\n\n def f_jvp(primals, tangents):\n A, b = primals\n dA, db = tangents\n z = f(A, b)\n dz = A @ db + dA @ b\n return z, dz\n\n f.defjvp(f_jvp)\n\n def experiment(theta):\n def step(q, _):\n z = f(jnp.eye(3), jnp.ones(3) * theta)\n q += z[0]\n return q, q\n\n q = 0.\n q, _ = lax.scan(step, q, None, 4)\n return q\n\n grad(experiment)(1.) # doesn't crash\n\n def test_linear_in_scan(self):\n @api.custom_jvp\n def f(x):\n return -x\n\n @f.defjvp\n def f_jvp(primals, tangents):\n x, = primals\n x_dot, = tangents\n return f(x), f(x_dot)\n\n def foo(x):\n out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)\n return out\n\n ans = api.grad(foo)(3.)\n expected = -1.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_custom_jvps_first_rule_is_none(self):\n # https://github.com/google/jax/issues/3389\n @api.custom_jvp\n def f(x, y):\n return x ** 2 * y\n\n f.defjvps(None, lambda x_dot, primal_out, x, y: 2 * x * y * x_dot)\n ans = grad(f, 1)(2., 3.) # doesn't crash\n expected = 12.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_concurrent_initial_style(self):\n # https://github.com/google/jax/issues/3843\n def unroll(param, sequence):\n def scan_f(prev_state, inputs):\n return prev_state, jax.nn.sigmoid(param * inputs)\n return jnp.sum(jax.lax.scan(scan_f, None, sequence)[1])\n\n def run():\n return jax.grad(unroll)(jnp.array(1.0), jnp.array([1.0]))\n\n expected = run()\n\n # we just don't want this to crash\n n_workers = 2\n with concurrent.futures.ThreadPoolExecutor(max_workers=n_workers) as e:\n futures = []\n for _ in range(n_workers):\n futures.append(e.submit(run))\n results = [f.result() for f in futures]\n for ans in results:\n self.assertAllClose(ans, expected)\n\n def test_nondiff_argnums_vmap_tracer(self):\n # https://github.com/google/jax/issues/3964\n @partial(jax.custom_jvp, nondiff_argnums=(0, 2))\n def sample(shape, param, seed):\n return jax.random.uniform(key=seed, shape=shape, minval=param)\n\n @sample.defjvp\n def sample_jvp(shape, seed, primals, tangents):\n param, = primals\n dparam, = tangents\n dparam = jnp.broadcast_to(dparam, shape)\n samples = sample(shape, param, seed)\n return samples, samples * dparam # dummy jvp for proof of concept\n\n # check these don't crash\n jax.vmap(lambda seed: sample((2,3), 1., seed))(\n jax.random.split(jax.random.PRNGKey(1), 10))\n jax.jvp(lambda x: sample((2, 3), x, jax.random.PRNGKey(1)),\n (1.,), (1.,))\n\n def test_fun_with_nested_calls_2(self):\n def call(f, *args):\n f = api.custom_jvp(f)\n f.defjvp(lambda primals, tangents: (f(*primals), sum(tangents)))\n return f(*args)\n\n def fun_with_nested_calls_2(x):\n def bar(y):\n def baz(w):\n q = call(lambda x: y, x)\n q = q + call(lambda: y)\n q = q + call(lambda y: w + y, y)\n q = call(lambda w: call(jnp.sin, x) * y, 1.0) + q\n return q\n return api.jit(baz)(x)\n return call(bar, x)\n\n # test these don't crash\n self.assertAllClose(api.jit(fun_with_nested_calls_2)(3.),\n fun_with_nested_calls_2(3.))\n api.vmap(fun_with_nested_calls_2)(jnp.arange(3.))\n\n def test_closure_with_vmap(self):\n # https://github.com/google/jax/issues/3822\n alpha = np.float32(2.)\n\n def sample(seed):\n @api.custom_jvp\n def f(alpha):\n return jax.random.gamma(seed, alpha, shape=[])\n\n @f.defjvp\n def f_jvp(primal, tangent):\n alpha = primal\n dalpha = tangent\n sample = f(alpha)\n partial_alpha = lax.random_gamma_grad(alpha, sample)\n return sample, partial_alpha * dalpha\n return f(alpha)\n\n api.vmap(sample)(jax.random.split(jax.random.PRNGKey(1), 3)) # don't crash\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_float0(self):\n @api.custom_jvp\n def f(x, y):\n return x, y\n def f_jvp(primals, _):\n # we need a defined (non-float0) tangent to trigger the rule\n return primals, (2., 1)\n f.defjvp(f_jvp)\n\n primals = (2., 3)\n tangents = (np.ones(()), np.zeros((), float0),)\n expected_tangents = (2., np.zeros((), float0))\n self.assertArraysEqual(api.jvp(f, primals, tangents),\n (primals, expected_tangents))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_float0_initial_style(self):\n @api.custom_jvp\n def f(x, y):\n return x, y\n def f_jvp(primals, _):\n x, y = primals\n return (x, y), (2., 1)\n f.defjvp(f_jvp)\n\n def foo(x, y):\n out, _ = lax.scan(lambda c, _: (f(*c), None), (x, y), None, length=1)\n return out\n\n primals = (2., 3)\n tangents = (np.ones(()), np.zeros((), float0),)\n expected_tangents = (2., np.zeros((), float0))\n self.assertArraysEqual(api.jvp(foo, primals, tangents),\n (primals, expected_tangents))\n\n def test_remat(self):\n @api.custom_jvp\n def f(x):\n return jnp.sin(x)\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return f(x), 2 * jnp.cos(x) * g\n f.defjvp(f_jvp)\n\n @api.remat\n def g(x):\n return f(f(x))\n\n ans = g(2.)\n expected = np.sin(np.sin(2.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(g)(2.)\n expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_remat_higher_order(self):\n @api.custom_jvp\n def f(x):\n return jnp.sin(x)\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return f(x), 2 * jnp.cos(x) * g\n f.defjvp(f_jvp)\n\n def g(x):\n return f(f(x))\n\n ans = api.grad(api.grad(api.remat(g)))(2.)\n expected = api.grad(api.grad(g))(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.remat(api.grad(g)))(2.)\n expected = api.grad(api.grad(g))(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.grad(api.grad(api.remat(g))))(2.)\n expected = api.grad(api.grad(api.grad(g)))(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_initial_style_vmap_2(self):\n # This is like test_initial_style_vmap except the primal function closes\n # over an array constant.\n y = jnp.array([1., 2., 3.])\n\n @api.custom_jvp\n def f(x):\n assert jnp.ndim(x) == 0\n return 3 * x * jnp.sum(y)\n def f_jvp(primals, tangents):\n x, = primals\n g, = tangents\n return f(x), 2 * g\n f.defjvp(f_jvp)\n\n def foo(x):\n out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)\n return out\n\n ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))\n expected = 2. * jnp.ones(3)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_custom_jvp_vmap_broadcasting_interaction(self):\n # https://github.com/google/jax/issues/6452\n def f2(y, z):\n v1 = z\n v2 = jnp.sum(y) + z\n return jnp.logaddexp(v1, v2)\n\n def f1(y, z):\n v = api.vmap(lambda _y: f2(_y, z))(y)\n return jnp.sum(v)\n\n y = jnp.ones((3, 2))\n f = lambda z: f1(y, z)\n z = 0.1\n val, g = api.value_and_grad(f)(z)\n self.assertEqual(val.shape, ())\n self.assertEqual(g.shape, ())\n\n def test_custom_jvp_vmap_broadcasting_interaction_2(self):\n # https://github.com/google/jax/issues/5849\n @api.custom_jvp\n def transform(box, R):\n if jnp.isscalar(box) or box.size == 1:\n return R * box\n elif box.ndim == 2:\n return jnp.einsum('ij,j->i', box, R)\n raise ValueError()\n\n @transform.defjvp\n def transform_jvp(primals, tangents):\n box, R = primals\n dbox, dR = tangents\n return (transform(box, R), dR + transform(dbox, R))\n\n def periodic_general(box):\n def displacement_fn(Ra, Rb, **kwargs):\n _box = kwargs.get('box', box)\n return transform(_box, Ra - Rb)\n\n return displacement_fn\n\n N = 250\n\n scalar_box = 1.0\n displacement = periodic_general(scalar_box)\n\n key = jax.random.PRNGKey(0)\n R = jax.random.uniform(key, (N, 2))\n\n def energy_fn(box):\n d = partial(displacement, box=box)\n d = api.vmap(api.vmap(d, (None, 0)), (0, None))\n return jnp.sum(d(R, R) ** 2)\n\n self.assertEqual(grad(energy_fn)(scalar_box).shape, ())\n\n def test_custom_jvp_implicit_broadcasting(self):\n # https://github.com/google/jax/issues/6357\n if config.x64_enabled:\n raise unittest.SkipTest(\"test only applies when x64 is disabled\")\n\n @jax.custom_jvp\n def projection_unit_simplex(x: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Projection onto the unit simplex.\"\"\"\n s = 1.0\n n_features = x.shape[0]\n u = jnp.sort(x)[::-1]\n cssv = jnp.cumsum(u) - s\n ind = jnp.arange(n_features) + 1\n cond = u - cssv / ind > 0\n idx = jnp.count_nonzero(cond)\n threshold = cssv[idx - 1] / idx.astype(x.dtype)\n return jax.nn.relu(x - threshold)\n\n\n @projection_unit_simplex.defjvp\n def projection_unit_simplex_jvp(primals, tangents):\n x, = primals\n x_dot, = tangents\n primal_out = projection_unit_simplex(x)\n supp = primal_out > 0\n card = jnp.count_nonzero(supp)\n tangent_out = supp * x_dot - (jnp.dot(supp, x_dot) / card) * supp\n return primal_out, tangent_out\n\n rng = np.random.RandomState(0)\n x = rng.rand(5).astype(np.float32)\n\n J_rev = jax.jacrev(projection_unit_simplex)(x)\n J_fwd = jax.jacfwd(projection_unit_simplex)(x)\n\n p = projection_unit_simplex(x)\n support = (p > 0).astype(jnp.int32)\n cardinality = jnp.count_nonzero(support)\n J_true = jnp.diag(support) - jnp.outer(support, support) / cardinality\n self.assertAllClose(J_true, J_fwd)\n self.assertAllClose(J_true, J_rev)\n\n proj = jax.vmap(projection_unit_simplex)\n\n def fun(X):\n return jnp.sum(proj(X) ** 2)\n\n rng = np.random.RandomState(0)\n X = rng.rand(4, 5).astype(np.float32)\n U = rng.rand(4, 5)\n U /= np.sqrt(np.sum(U ** 2))\n U = U.astype(np.float32)\n\n eps = 1e-3\n dir_deriv_num = (fun(X + eps * U) - fun(X - eps * U)) / (2 * eps)\n dir_deriv = jnp.vdot(jax.grad(fun)(X), U)\n self.assertAllClose(dir_deriv, dir_deriv_num, atol=1e-3)\n\n def test_vmap_inside_defjvp(self):\n # https://github.com/google/jax/issues/3201\n seed = 47\n key = jax.random.PRNGKey(seed)\n mat = jax.random.normal(key, (2, 3))\n\n @jax.custom_jvp\n def f(mat, aux):\n num_rows, num_cols = mat.shape\n return jnp.ones((num_rows, 1)) / num_cols\n\n @f.defjvp\n def f_jvp(primals, tangents):\n mat, aux = primals\n vec, _ = tangents\n output = f(*primals)\n num_rows, num_cols = mat.shape\n size = num_rows * num_cols\n # -----\n bd_mat = mat.reshape(1, 1, num_rows, num_cols)\n bd_mat = jnp.tile(bd_mat, reps=(num_rows, num_cols))\n bd_mat = bd_mat.reshape(size, num_rows, num_cols)\n # -----\n rowsum = jnp.sum(mat, axis=1, keepdims=True)\n colsum = jnp.sum(mat, axis=0, keepdims=True)\n bd_rowsum = jnp.tile(rowsum, reps=(1, num_rows))\n bd_colsum = jnp.tile(colsum, reps=(num_cols, 1))\n # -----\n bd_vec = vec.reshape(size, 1)\n # -----\n def operate(mx, val):\n buf = 0\n for i in range(2):\n buf = buf + jnp.matmul(mx, bd_colsum) / jnp.power(aux, i)\n buf = jnp.matmul(bd_rowsum, buf)\n return buf * val\n # -----\n # Vertorizing will raise shape error\n bd_buf = jax.vmap(operate, in_axes=(0, 0), out_axes=0)(bd_mat, bd_vec)\n # -----\n bd_buf = bd_buf / aux\n jvp = jnp.sum(bd_buf, axis=0)\n jvp = jnp.mean(jvp, axis=1, keepdims=True)\n # -----\n # JVP ends successfully, but still raise an error\n return (output, jvp)\n\n jax.grad(lambda mat, aux: jnp.sum(f(mat, aux)))(mat, 0.5) # doesn't crash\n\n def test_custom_jvp_unbroadcasting(self):\n # https://github.com/google/jax/issues/3056\n a = jnp.array([1., 1.])\n\n @jax.custom_jvp\n def f(x):\n return a * x\n\n @f.defjvp\n def f_jvp(primals, tangents):\n x, = primals\n dx, = tangents\n return a * x, a * dx\n\n shape = grad(lambda x: jnp.sum(f(x)))(jnp.array(1.)).shape\n self.assertEqual(shape, ())\n\n\nclass CustomVJPTest(jtu.JaxTestCase):\n\n def test_basic(self):\n @api.custom_vjp\n def f(x):\n return jnp.sin(x)\n def f_fwd(x):\n return f(x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (2 * cos_x * g,)\n f.defvjp(f_fwd, f_rev)\n\n x = 3.\n self.assertAllClose(f(x), jnp.sin(x))\n self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))\n self.assertAllClose(api.value_and_grad(f)(x),\n (jnp.sin(x), 2 * jnp.cos(x)))\n\n def test_invariance(self):\n @api.custom_vjp\n def f(x):\n return jnp.cos(2 * x) / 2.\n def f_fwd(x):\n return (f(x), x)\n def f_rev(x, g):\n return (g * 3,)\n f.defvjp(f_fwd, f_rev)\n def f2(x):\n y, _ = api.value_and_grad(f)(x)\n return y\n def f3(x):\n y, _ = api.value_and_grad(f2)(x)\n return y\n x = 1.\n self.assertAllClose(f(x), f2(x), check_dtypes=False)\n self.assertAllClose(f(x), f3(x), check_dtypes=False)\n self.assertAllClose(api.grad(f)(x), api.grad(f2)(x),\n check_dtypes=False)\n self.assertAllClose(api.grad(f)(x), api.grad(f3)(x),\n check_dtypes=False)\n\n def test_python_control_flow(self):\n @api.custom_vjp\n def f(x):\n if x > 0:\n return jnp.sin(x)\n else:\n return jnp.cos(x)\n def f_fwd(x):\n if x > 0:\n return f(x), x\n else:\n return f(x), x\n def f_rev(x, g):\n if x > 0:\n return (2 * g,)\n else:\n return (3 * g,)\n f.defvjp(f_fwd, f_rev)\n x = 2.\n self.assertAllClose(f(x), jnp.sin(x))\n self.assertAllClose(f(-x), jnp.cos(-x))\n self.assertAllClose(api.value_and_grad(f)(x), (jnp.sin(x), 2.),\n check_dtypes=False)\n self.assertAllClose(api.value_and_grad(f)(-x), (jnp.cos(-x), 3.),\n check_dtypes=False)\n\n def test_vmap(self):\n @api.custom_vjp\n def f(x):\n assert jnp.ndim(x) == 0\n return jnp.sin(x)\n def f_fwd(x):\n assert jnp.ndim(x) == 0\n return f(x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (2 * cos_x * g,)\n f.defvjp(f_fwd, f_rev)\n\n x = jnp.arange(3.)\n xx = jnp.arange(6.).reshape(2, 3)\n\n # vmap of f\n self.assertAllClose(api.vmap(f)(x), jnp.sin(x))\n self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))\n\n # vmap of grad of f\n self.assertAllClose(api.vmap(api.grad(f))(x), 2 * jnp.cos(x))\n self.assertAllClose(api.vmap(api.value_and_grad(f))(x),\n (jnp.sin(x), 2 * jnp.cos(x)))\n self.assertAllClose(api.vmap(api.vmap(api.grad(f)))(xx), 2 * jnp.cos(xx))\n self.assertAllClose(api.vmap(api.vmap(api.value_and_grad(f)))(xx),\n (jnp.sin(xx), 2 * jnp.cos(xx)))\n\n # grad of vmap of f\n self.assertAllClose(api.grad(lambda x: api.vmap(f)(x).sum())(x),\n 2 * jnp.cos(x))\n self.assertAllClose(api.grad(lambda x: api.vmap(api.vmap(f))(x).sum())(xx),\n 2 * jnp.cos(xx))\n\n # vmap of grad of vmap of f\n self.assertAllClose(api.vmap(api.grad(lambda x: api.vmap(f)(x).sum()))(xx),\n 2 * jnp.cos(xx))\n\n def test_jit(self):\n @api.custom_vjp\n def f(x):\n return jnp.sin(x)\n def f_fwd(x):\n return f(x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (2 * cos_x * g,)\n f.defvjp(f_fwd, f_rev)\n\n x = 3.\n\n # jit\n self.assertAllClose(api.jit(f)(x), jnp.sin(x))\n self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))\n\n # jit of grad\n self.assertAllClose(api.jit(api.grad(f))(x), 2 * jnp.cos(x),\n check_dtypes=False)\n\n # grad of jit\n self.assertAllClose(api.grad(api.jit(f))(x), 2 * jnp.cos(x),\n check_dtypes=False)\n\n def test_pytrees(self):\n @api.custom_vjp\n def f(x):\n return {'b': jnp.sin(x['a'])}\n def f_fwd(x):\n return f(x), {'r': jnp.cos(x['a'])}\n def f_bwd(res, g):\n cos_x = res['r']\n return ({'a': 2 * cos_x * g['b']},)\n f.defvjp(f_fwd, f_bwd)\n x = {'a': 3.}\n self.assertAllClose(f(x)['b'], jnp.sin(x['a']))\n self.assertAllClose(api.grad(lambda x: f(x)['b'])(x),\n {'a': 2 * jnp.cos(x['a'])})\n\n def test_jvp_error(self):\n @api.custom_vjp\n def f(x):\n return jnp.sin(x)\n def f_fwd(x):\n return f(x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (2 * cos_x * g,)\n f.defvjp(f_fwd, f_rev)\n\n self.assertRaisesRegex(\n TypeError,\n r\"can't apply forward-mode autodiff \\(jvp\\) to a custom_vjp function.\",\n lambda: api.jvp(f, (3.,), (1.,)))\n self.assertRaisesRegex(\n TypeError,\n r\"can't apply forward-mode autodiff \\(jvp\\) to a custom_vjp function.\",\n lambda: api.jvp(api.vmap(f), (jnp.arange(3.),), (jnp.ones(3),)))\n self.assertRaisesRegex(\n TypeError,\n r\"can't apply forward-mode autodiff \\(jvp\\) to a custom_vjp function.\",\n lambda: api.jvp(jit(f), (3.,), (1.,)))\n\n def test_kwargs(self):\n # from https://github.com/google/jax/issues/1938\n @api.custom_vjp\n def my_fun(x, y, c=1.):\n return c * (x + y)\n my_fun.defvjp(lambda x, y, c=1.: (my_fun(c, y, c), None),\n lambda _, g: (g, g, g))\n f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()\n f(10., 5.) # doesn't crash\n api.grad(f)(10., 5.) # doesn't crash\n\n def test_initial_style(self):\n @api.custom_vjp\n def f(x):\n return jnp.sin(x)\n def f_fwd(x):\n return f(x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (2 * cos_x * g,)\n f.defvjp(f_fwd, f_rev)\n\n def foo(x):\n out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)\n return out\n\n ans = api.grad(foo)(3.)\n expected = 2. * jnp.cos(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.grad(foo))(3.)\n expected = -2. * jnp.sin(3.)\n self.assertAllClose(ans, expected)\n\n def test_initial_style_vmap(self):\n @api.custom_vjp\n def f(x):\n assert jnp.ndim(x) == 0\n return 3 * x\n def f_fwd(x):\n return f(x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (2 * cos_x * g,)\n f.defvjp(f_fwd, f_rev)\n\n def foo(x):\n out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)\n return out\n\n ans = api.vmap(foo)(jnp.arange(3.))\n expected = 3. * jnp.arange(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))\n expected = 2. * jnp.cos(jnp.arange(3.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_nondiff_arg(self):\n @partial(api.custom_vjp, nondiff_argnums=(0,))\n def app(f, x):\n return f(x)\n def app_fwd(f, x):\n return app(f, x), jnp.cos(x)\n def app_rev(f, cos_x, g):\n return (cos_x * g,)\n app.defvjp(app_fwd, app_rev)\n\n ans = app(lambda x: 2 * x, 1)\n expected = 2\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.value_and_grad(lambda x: app(lambda y: 2 * y, x))(1.)\n expected = (2., jnp.cos(1.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_closed_over_tracer(self):\n # This test is similar to test_nondiff_arg_tracer except it uses lexical\n # closure rather than the nondiff_argnums mechanism. We decided to disallow\n # tracers in nondiff_argnums to greatly simplify bookkeeping while still\n # supporting the cases for which it is necessary.\n def outer(x):\n @api.custom_vjp\n def f(y):\n return x * y\n def f_fwd(y):\n return f(y), jnp.cos(y)\n def f_rev(cos_y, g):\n return (cos_y * g,)\n f.defvjp(f_fwd, f_rev)\n return f\n\n @jit\n def g(x, y):\n return outer(x)(y)\n\n ans = g(2, 3.)\n expected = 6.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(g, 1)(2., 3.)\n expected = jnp.cos(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_closed_over_tracer2(self):\n def outer(x):\n @api.custom_vjp\n def f(y):\n return x * y\n def f_fwd(y):\n return f(y), jnp.cos(y)\n def f_rev(cos_y, g):\n return (cos_y * g,)\n f.defvjp(f_fwd, f_rev)\n return f\n\n @api.vmap\n def g(x):\n return outer(x)(3.)\n\n ans = g(np.arange(3.))\n expected = np.arange(3.) * 3\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_closed_over_tracer3(self):\n def outer(x):\n @api.custom_vjp\n def f(y):\n return x * y\n def f_fwd(y):\n return f(y), (x, jnp.cos(y))\n def f_rev(res, g):\n x, cos_y = res\n return (cos_y * g * x,)\n f.defvjp(f_fwd, f_rev)\n return api.grad(f)\n\n @api.vmap\n def g(x):\n return outer(x)(3.)\n\n ans = g(np.arange(3.))\n expected = np.cos(3.) * np.arange(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_nondiff_arg_tracer_error(self):\n # This is similar to the old (now skipped) test_nondiff_arg_tracer, except\n # we're testing for the error message that that usage pattern now raises.\n\n @partial(api.custom_vjp, nondiff_argnums=(0,))\n def f(x, y):\n return x * y\n def f_fwd(x, y):\n return f(x, y), jnp.cos(y)\n def f_rev(x, cos_y, g):\n return (cos_y * g,)\n f.defvjp(f_fwd, f_rev)\n\n @jit\n def g(x, y):\n return f(x, y)\n\n with self.assertRaisesRegex(UnexpectedTracerError, \"custom_vjp\"):\n _ = g(2, 3.)\n with self.assertRaisesRegex(UnexpectedTracerError, \"custom_vjp\"):\n _ = api.grad(g, 1)(2., 3.)\n\n def test_vmap_axes(self):\n raise unittest.SkipTest(\"TODO\") # TODO(mattjj): write test\n\n def test_pmap(self):\n raise unittest.SkipTest(\"TODO\") # TODO(mattjj): write test\n\n def test_missing_vjp_rule_error(self):\n @api.custom_vjp\n def foo(x):\n return x ** 2\n\n self.assertRaisesRegex(\n AttributeError,\n r\"No VJP defined for custom_vjp function foo using defvjp.\",\n lambda: foo(2))\n self.assertRaisesRegex(\n AttributeError,\n r\"No VJP defined for custom_vjp function foo using defvjp.\",\n lambda: api.grad(foo)(2.))\n\n def test_vjp_rule_inconsistent_pytree_structures_error(self):\n @api.custom_vjp\n def f(x):\n return x\n\n def foo_fwd(x):\n return x, None\n\n def foo_bwd(_, g):\n return (g, g)\n\n f.defvjp(foo_fwd, foo_bwd)\n\n f(2) # doesn't crash\n self.assertRaisesRegex(\n TypeError,\n re.escape(\n \"Custom VJP rule must produce an output with the same container \"\n \"(pytree) structure as the args tuple of the primal function, \"\n \"and in particular must produce a tuple of length equal to the \"\n \"number of arguments to the primal function, but got VJP output \"\n \"structure {} for primal input structure {}.\".format(\n tree_util.tree_structure((1, 1)),\n tree_util.tree_structure((1,)))\n ),\n lambda: api.grad(f)(2.))\n\n def test_vjp_bwd_returns_non_tuple_error(self):\n @api.custom_vjp\n def f(x):\n return x\n\n def foo_fwd(x):\n return x, None\n\n def foo_bwd(_, g):\n return 2. * g # Should be a tuple\n\n f.defvjp(foo_fwd, foo_bwd)\n with self.assertRaisesRegex(TypeError, \"Custom VJP rule .* must produce a tuple\"):\n api.grad(f)(3.)\n\n def test_issue2511(self):\n arr = jnp.ones((5, 2, 2))\n foo = lambda x: api.vmap(jnp.linalg.det, (0,))(x)\n api.jit(foo)(arr) # doesn't crash\n\n def test_lowering_out_of_traces(self):\n # https://github.com/google/jax/issues/2578\n\n class F(collections.namedtuple(\"F\", [\"a\"])):\n def __call__(self, x):\n return jax.nn.relu(self.a) * x\n\n @jax.jit\n def g(f, x):\n return f(x)\n\n jax.grad(g, argnums=(1,))(F(2.0), 0.) # doesn't crash\n\n def test_clip_gradient(self):\n # https://github.com/google/jax/issues/2784\n @api.custom_vjp\n def _clip_gradient(lo, hi, x):\n return x # identity function when not differentiating\n\n def clip_gradient_fwd(lo, hi, x):\n return x, (lo, hi,)\n\n def clip_gradient_bwd(res, g):\n lo, hi = res\n return (None, None, jnp.clip(g, lo, hi),)\n\n _clip_gradient.defvjp(clip_gradient_fwd, clip_gradient_bwd)\n\n def clip_gradient(x):\n lo = -0.1\n hi = x + 0.1\n return _clip_gradient(lo, hi, x)\n\n g = jax.grad(clip_gradient)(0.1) # doesn't crash\n self.assertAllClose(g, jnp.array(0.2))\n\n def test_nestable_vjp(self):\n # Verify that https://github.com/google/jax/issues/3667 is resolved.\n def f(x):\n return x ** 2\n\n @api.custom_vjp\n def g(x):\n return f(x)\n\n def g_fwd(x):\n y, f_vjp = api.vjp(f, x)\n return y, f_vjp\n\n def g_bwd(f_vjp, y_bar):\n return f_vjp(y_bar)\n\n g.defvjp(g_fwd, g_bwd)\n\n # Check that VJP can be nested in simple situations. For this to pass,\n # vjp has to return a PyTree.\n _, g_vjp = api.vjp(g, 1.0)\n y, = g_vjp(1.0)\n self.assertAllClose(y, jnp.array(2.0))\n\n # Check that VJP can be nested in complex situations. For this to pass,\n # vjp can't treat the closed-over tracer x as a static argument.\n @jit\n def z(x):\n _, g_vjp = api.vjp(g, x)\n return g_vjp\n y, = z(1.0)(3.0)\n self.assertAllClose(y, jnp.array(6.0))\n\n def test_initial_style_vmap_2(self):\n # https://github.com/google/jax/issues/4173\n x = jnp.ones((10, 3))\n\n # Create the custom function\n @api.custom_vjp\n def custom_fun(x):\n return x.sum()\n\n def forward(x):\n return x.sum(), (jnp.ones_like(x),)\n\n def backward(res, g):\n return g * res[0],\n\n custom_fun.defvjp(forward, backward)\n\n def train_fun(x):\n\n def summed_fun(x):\n return api.vmap(custom_fun)(x).sum()\n\n return api.grad(summed_fun)(x)\n\n def scan_body(carry, inputs):\n x = carry\n return carry, train_fun(x)\n\n scan_range = jnp.arange(4)\n lax.scan(scan_body, x, scan_range) # don't crash\n\n def test_initial_style_vmap_3(self):\n # This is like test_initial_style_vmap except the primal function closes\n # over an array constant.\n y = jnp.array([1., 2., 3.])\n\n @api.custom_vjp\n def f(x):\n assert jnp.ndim(x) == 0\n return 3 * x * jnp.sum(y)\n def f_fwd(x):\n return f(x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (2 * cos_x * g,)\n f.defvjp(f_fwd, f_rev)\n\n def foo(x):\n out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)\n return out\n\n ans = api.vmap(foo)(jnp.arange(3.))\n expected = 3. * jnp.arange(3.) * 6\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))\n expected = 2. * jnp.cos(jnp.arange(3.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_initial_style_vmap_with_collective(self):\n\n @api.custom_vjp\n def f(x):\n return lax.psum(x, 'foo')\n\n def f_fwd(x):\n return lax.psum(x, 'foo'), None\n\n def f_bwd(res, dx):\n return dx\n f.defvjp(f_fwd, f_bwd)\n\n def g(x):\n jaxpr = api.make_jaxpr(f)(x)\n return core.eval_jaxpr(jaxpr.jaxpr, [], x)[0]\n\n out = api.vmap(lambda _, x: g(x), axis_name='foo', in_axes=(0, None),\n out_axes=None)(jnp.arange(4.), 2.)\n self.assertAllClose(out, 8.)\n\n def test_bwd_closes_over_tracer(self):\n def f(y):\n @jax.custom_vjp\n def f(x):\n return 2. * jnp.sin(x)\n\n def fwd(x):\n return f(x), ()\n\n def bwd(_, g):\n return (2. * jnp.cos(y) * g,) # capture!\n\n f.defvjp(fwd, bwd)\n\n return jax.grad(f)(1.)\n\n ans = jax.jit(f)(2.)\n self.assertAllClose(ans, 2. * jnp.cos(2.))\n\n ans = jax.vmap(f)(jnp.arange(3.))\n self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))\n\n ans = jax.jit(jax.vmap(f))(jnp.arange(3.))\n self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))\n\n ans = jax.vmap(jax.jit(f))(jnp.arange(3.))\n self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))\n\n ans = jax.grad(f)(4.)\n self.assertAllClose(ans, -2. * jnp.sin(4.))\n\n def test_fwd_closes_over_tracer(self):\n def f(y):\n @jax.custom_vjp\n def f(x):\n return 2. * jnp.sin(x)\n\n def fwd(x):\n return f(x), y\n\n def bwd(y, g):\n return (2. * jnp.cos(y) * g,) # capture!\n\n f.defvjp(fwd, bwd)\n\n return jax.grad(f)(1.)\n\n ans = jax.jit(f)(2.)\n self.assertAllClose(ans, 2. * jnp.cos(2.))\n\n ans = jax.vmap(f)(jnp.arange(3.))\n self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))\n\n ans = jax.jit(jax.vmap(f))(jnp.arange(3.))\n self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))\n\n ans = jax.vmap(jax.jit(f))(jnp.arange(3.))\n self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))\n\n ans = jax.grad(f)(4.)\n self.assertAllClose(ans, -2. * jnp.sin(4.))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_float0(self):\n @api.custom_vjp\n def f(x, _):\n return x\n def f_fwd(x, _):\n # we need a defined (non-float0) tangent to trigger the rule\n return x, (2., 1)\n def f_rev(*_):\n return (2., 1)\n f.defvjp(f_fwd, f_rev)\n\n x = 2.\n y = 3\n self.assertEqual(api.grad(f, allow_int=True, argnums=(0, 1))(x, y),\n (2., np.zeros(shape=(), dtype=float0)))\n\n @unittest.skipIf(numpy_version == (1, 21, 0),\n \"https://github.com/numpy/numpy/issues/19305\")\n def test_float0_initial_style(self):\n @api.custom_vjp\n def f(x):\n return x\n def f_fwd(x):\n return x, (2., x)\n def f_rev(*_):\n return ((2., 1),)\n f.defvjp(f_fwd, f_rev)\n\n def foo(x, y):\n out, _ = lax.scan(lambda c, _: (f(c), None), (x, y), None, length=1)\n return out[0]\n\n x = 2.\n y = 3\n self.assertEqual(api.grad(foo, allow_int=True, argnums=(0, 1))(x, y),\n (2., np.zeros(shape=(), dtype=float0)))\n\n def test_remat(self):\n @api.custom_vjp\n def f(x):\n return jnp.sin(x)\n def f_fwd(x):\n return f(x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (2 * cos_x * g,)\n f.defvjp(f_fwd, f_rev)\n\n @api.remat\n def g(x):\n return f(f(x))\n\n ans = g(2.)\n expected = np.sin(np.sin(2.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(g)(2.)\n expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_remat_higher_order(self):\n @api.custom_vjp\n def f(x):\n return jnp.sin(x)\n def f_fwd(x):\n return f(x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (2 * cos_x * g,)\n f.defvjp(f_fwd, f_rev)\n\n def g(x):\n return f(f(x))\n\n ans = api.grad(api.grad(api.remat(g)))(2.)\n expected = api.grad(api.grad(g))(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.remat(api.grad(g)))(2.)\n expected = api.grad(api.grad(g))(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(api.grad(api.grad(api.remat(g))))(2.)\n expected = api.grad(api.grad(api.grad(g)))(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_bwd_nones(self):\n @api.custom_vjp\n def f(x, y):\n return x * jnp.sin(y)\n def f_fwd(x, y):\n return f(x, y), jnp.cos(y)\n def f_rev(cos, g):\n return (None, 2 * cos * g)\n f.defvjp(f_fwd, f_rev)\n\n ans = api.grad(lambda x: f(x, x))(3.)\n expected = 2 * jnp.cos(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_bwd_nones_vmap(self):\n @api.custom_vjp\n def f(x, y):\n return x * jnp.sin(y)\n def f_fwd(x, y):\n return f(x, y), jnp.cos(y)\n def f_rev(cos, g):\n return (None, 2 * cos * g)\n f.defvjp(f_fwd, f_rev)\n\n ans = api.grad(lambda x: api.vmap(f)(x, x).sum())(jnp.arange(3.))\n expected = 2 * jnp.cos(jnp.arange(3.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_bwd_nones_pytree(self):\n @api.custom_vjp\n def f(xs, y):\n x1, x2 = xs\n return x1 * x2 * jnp.sin(y)\n def f_fwd(xs, y):\n return f(xs, y), jnp.cos(y)\n def f_rev(cos, g):\n return (None, 2 * cos * g)\n f.defvjp(f_fwd, f_rev)\n\n ans = api.grad(lambda x: f((x, x), x))(3.)\n expected = 2 * jnp.cos(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_custom_vjp_closure_4521(self):\n # https://github.com/google/jax/issues/4521\n @api.custom_vjp\n def g(x, y):\n return None\n def g_fwd(x, y):\n return None, y\n def g_bwd(residuals, z_bar):\n assert False\n\n g.defvjp(g_fwd, g_bwd)\n\n def f(xs, y):\n v_g = api.vmap(g, in_axes=(0, None), out_axes=None)\n v_g(xs, y)\n\n def scan_body(xs, _):\n y = jnp.zeros(1)\n _, vjp_f = api.vjp(f, xs, y)\n vjp_f(None)\n return xs, None\n\n lax.scan(scan_body, jnp.ones(5), None, 100) # doesn't crash\n\n def test_float0_bwd_none(self):\n @api.custom_vjp\n def f(i, x):\n return jnp.sin(x)\n def f_fwd(i, x):\n return f(i, x), jnp.cos(x)\n def f_rev(cos_x, g):\n return (None, 2 * cos_x * g)\n f.defvjp(f_fwd, f_rev)\n\n ans = api.grad(f, 1)(jnp.array([1, 2]), 3.) # doesn't crash\n expected = 2 * jnp.cos(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_custom_gradient(self):\n @api.custom_gradient\n def f(x):\n return x ** 2, lambda g: (g * x,)\n\n self.assertAllClose(f(3.), 9., check_dtypes=False)\n self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)\n self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)\n\n def test_custom_gradient_2(self):\n @api.custom_gradient\n def f(x, y):\n return x * y, lambda g: (y, x)\n\n self.assertAllClose(f(3., 4.), 12., check_dtypes=False)\n self.assertAllClose(api.grad(f, argnums=(0, 1))(3., 4.), (4., 3.),\n check_dtypes=False)\n\n def test_custom_gradient_3(self):\n @api.custom_gradient\n def f(x):\n vjp = lambda g: (jnp.cos(x) * jnp.array([3., 4., 5.]),)\n return jnp.sum(jnp.sin(x)), vjp\n\n self.assertAllClose(f(jnp.arange(3)), jnp.sum(jnp.sin(jnp.arange(3.))),\n check_dtypes=False)\n self.assertAllClose(\n api.grad(f)(jnp.arange(3.)),\n api.grad(lambda x: jnp.sum(jnp.sin(x)))(jnp.arange(3.)) * jnp.array([3., 4., 5.]),\n check_dtypes=False)\n\n def test_custom_gradient_can_return_singleton_value_in_vjp(self):\n @api.custom_gradient\n def f(x):\n return x ** 2, lambda g: g * x\n\n self.assertAllClose(f(3.), 9., check_dtypes=False)\n self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)\n self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)\n\n def test_closure_convert(self):\n def cos_after(fn, x):\n converted_fn, aux_args = api.closure_convert(fn, x)\n self.assertLessEqual(len(aux_args), 1)\n return _cos_after(converted_fn, x, *aux_args)\n\n @partial(api.custom_vjp, nondiff_argnums=(0,))\n def _cos_after(fn, x, *args):\n return jnp.cos(fn(x, *args))\n\n def fwd(fn, x, *args):\n y = _cos_after(fn, x, *args)\n return y, (x, args)\n\n def rev(fn, res, g):\n x, args = res\n x_bar = 17. * x\n args_bars = [42. * a for a in args]\n return (x_bar, *args_bars)\n\n _cos_after.defvjp(fwd, rev)\n\n def dist(c, x):\n return jnp.sum((x - c) ** 2.)\n\n def solve(c, x):\n def closure(x):\n return dist(c, x)\n return cos_after(closure, x)\n\n c, x = 2. * jnp.ones(2), jnp.ones(2)\n expected = jnp.cos(dist(c, x))\n self.assertAllClose(solve(c, x), expected, check_dtypes=False)\n g_c, g_x = api.grad(solve, argnums=(0, 1))(c, x)\n self.assertAllClose(g_c, 42. * c, check_dtypes=False)\n self.assertAllClose(g_x, 17. * x, check_dtypes=False)\n\n def test_closure_convert_mixed_consts(self):\n # Like test_closure_convert, but close over values that\n # participate in AD as well as values that do not.\n # See https://github.com/google/jax/issues/6415\n\n def cos_after(fn, x):\n converted_fn, aux_args = api.closure_convert(fn, x)\n self.assertLessEqual(len(aux_args), 1)\n return _cos_after(converted_fn, x, *aux_args)\n\n @partial(api.custom_vjp, nondiff_argnums=(0,))\n def _cos_after(fn, x, *args):\n return jnp.cos(fn(x, *args))\n\n def fwd(fn, x, *args):\n y = _cos_after(fn, x, *args)\n return y, (x, args)\n\n def rev(fn, res, g):\n x, args = res\n x_bar = 17. * x\n args_bars = [42. * a for a in args]\n return (x_bar, *args_bars)\n\n _cos_after.defvjp(fwd, rev)\n\n def dist(c, s, x):\n return jnp.sum(s * (x - c) ** 2.)\n\n def solve(c, s, x):\n def closure(x):\n return dist(c, s, x)\n return cos_after(closure, x)\n\n c, s, x = 2. * jnp.ones(2), 3. * jnp.ones(2), jnp.ones(2)\n expected = jnp.cos(dist(c, s, x))\n self.assertAllClose(solve(c, s, x), expected, check_dtypes=False)\n g_c, g_x = api.grad(solve, argnums=(0, 2))(c, s, x)\n self.assertAllClose(g_c, 42. * c, check_dtypes=False)\n self.assertAllClose(g_x, 17. * x, check_dtypes=False)\n\n def test_float0_cotangents_automatically_handled(self):\n @jax.custom_vjp\n def f(x, y):\n return x\n\n def f_fwd(x, y):\n return x, None\n\n def f_bwd(_, zbar):\n return (0., 1)\n\n f.defvjp(f_fwd, f_bwd)\n\n jax.jit(lambda x: jax.vjp(f, 0., x)[1](1.))(1) # doesn't crash\n\n\nclass CustomTransposeTest(jtu.JaxTestCase):\n\n def transpose(self, f, x_example):\n def transposed(y):\n x, = api.linear_transpose(f, x_example)(y)\n return x\n return transposed\n\n def test_linear_call(self):\n def f(x, y):\n def fn(r, x): return x / r\n def tp(r, t): return t / r\n return x + api.linear_call(fn, tp, y, x)\n\n def f_ref(x, y):\n return x + x / y\n\n x = jnp.ones(2) * 6.\n y = jnp.ones(2) * 3.\n self.assertAllClose(f(x, y), f_ref(x, y))\n\n f1 = lambda x: f(x, y)\n f1_ref = lambda x: f_ref(x, y)\n self.assertAllClose(self.transpose(f1, x)(x),\n self.transpose(f1_ref, x)(x))\n\n def test_linear_call_incorrect_transpose(self):\n def f(x, y):\n def fn(r, x): return x / r\n def tp(r, t): return t / (2. * r) # nb: not the true transpose\n return x + api.linear_call(fn, tp, y, x)\n\n def f_ref(x, y):\n return x + x / y\n\n x = jnp.ones(2) * 6.\n y = jnp.ones(2) * 3.\n self.assertAllClose(f(x, y), f_ref(x, y))\n\n f1 = lambda x: f(x, y)\n f1_ref = lambda x: f_ref(x, 2. * y) # nb: double the reference divisor\n self.assertAllClose(self.transpose(f1, x)(x),\n self.transpose(f1_ref, x)(x))\n\n def test_linear_call_transpose_transpose_transpose(self):\n def fn(r, x): return x / r\n def tp(r, t): return t / (2. * r) # nb: untrue transpose\n def f_(x, y):\n return x + api.linear_call(fn, tp, y, x)\n\n x = jnp.ones(2) * 6.\n y = jnp.ones(2) * 3.\n f = lambda x: f_(x, y)\n ft = self.transpose(f, x)\n ftt = self.transpose(ft, x)\n fttt = self.transpose(ftt, x)\n self.assertAllClose(ft(x), x + tp(y, x))\n self.assertAllClose(f(x), ftt(x))\n self.assertAllClose(ft(x), fttt(x))\n\n def test_linear_call_scalar_to_vector(self):\n def f(c, x):\n def fn(_, x):\n return [x, x]\n\n def tp(_, t):\n t1, t2 = t\n return t1 + t2\n\n return api.linear_call(fn, tp, (), c * x)\n\n def f_ref(c, x):\n return [c * x, c * x]\n\n c, x = 2., 3.\n t = [4., 5.]\n self.assertAllClose(f(c, x), f_ref(c, x))\n self.assertAllClose(self.transpose(partial(f, c), x)(t),\n self.transpose(partial(f_ref, c), x)(t))\n\n def test_linear_call_nested(self):\n # identity function with an untrue transpose of 0\n def id_(x):\n def f(_, x): return x\n def t(_, t): return 0.\n return api.linear_call(f, t, (), x)\n\n # identity function with an untrue transpose of 7, and where both\n # forward and transpose have custom transpositions that should\n # never end up invoked.\n def f(x):\n def f_(_, x): return id_(x)\n def t_(_, t): return id_(7.)\n return api.linear_call(f_, t_, (), x)\n\n x = 5.\n id_t = self.transpose(id_, x)\n id_tt = self.transpose(id_t, x)\n ft = self.transpose(f, x)\n ftt = self.transpose(ft, x)\n fttt = self.transpose(ftt, x)\n\n self.assertAllClose(id_(x), x)\n self.assertAllClose(id_t(x), 0.)\n self.assertAllClose(id_tt(x), x)\n\n self.assertAllClose(f(x), x)\n self.assertAllClose(ft(x), 7.)\n self.assertAllClose(ftt(x), x)\n self.assertAllClose(fttt(x), 7.)\n\n def test_linear_call_jit(self):\n def f(x, y):\n def fn(r, x): return x / r\n def tp(r, t): return t / r\n return x + api.linear_call(fn, tp, y, x)\n\n x = jnp.ones(2) * 6.\n y = jnp.ones(2) * 3.\n self.assertAllClose(f(x, y), jax.jit(f)(x, y))\n\n f1 = lambda x: f(x, y)\n self.assertAllClose(self.transpose(f1, x)(x),\n jax.jit(self.transpose(f1, x))(x))\n\n\nclass InvertibleADTest(jtu.JaxTestCase):\n\n @jtu.ignore_warning(message=\"Values that an @invertible function closes\")\n def test_invertible_basic(self):\n def f(x):\n return lax.mul(lax.mul(lax.exp(x), 4.), x)\n\n finv = jax.invertible(f)\n x = jnp.ones((5,))\n\n jaxpr = jax.make_jaxpr(lambda p, ct: jax.vjp(finv, p)[1](ct))(x, x)\n\n # expected = \"\"\"\n # { lambda ; a b.\n # let c = exp a\n # d = mul c 4.0\n # e = mul d a\n # f = mul b a\n # g = div e a\n # h = mul b g\n # i = mul f 4.0\n # j = div g 4.0\n # k = mul f j\n # _ = reduce_sum[ axes=(0,) ] k\n # _ = log j\n # l = mul i j\n # m = add_any h l\n # in (m,) }\n # \"\"\"\n # self.assertMultiLineStrippedEqual(expected, str(jaxpr)) # no jaxpr test\n\n self.assertIn('div', str(jaxpr))\n self.assertIn('log', str(jaxpr)) # assumes no DCE\n self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f(x)))(x),\n jax.value_and_grad(lambda x: np.sum(finv(x)))(x),\n check_dtypes=True)\n\n def test_invertible_blocks(self):\n # NB: This is the reversible ResNet block\n def mk_reversible_block(f, g):\n @jax.custom_ivjp\n def rev_block(x1, x2):\n y1 = f(x2) + x1\n y2 = g(y1) + x2\n return y1, y2\n\n @rev_block.defivjp\n def rev_block_ivjp(xs, ys, dys):\n (y1, y2) = ys\n (dy1, dy2) = dys\n\n dgo, dx2 = dy2, dy2\n go, gvjp = jax.vjp(g, y1)\n dy1 += gvjp(dgo)[0]\n del gvjp\n x2 = y2 - go\n\n dfo, dx1 = dy1, dy1\n fo, fvjp = jax.vjp(f, x2)\n dx2 += fvjp(dfo)[0]\n del fvjp\n x1 = y1 - fo\n\n return (x1, x2), (dx1, dx2)\n\n return rev_block\n\n rev_block = mk_reversible_block(jnp.sin, jnp.cos)\n\n def g(x1, x2):\n for i in range(2):\n x1, x2 = rev_block(x1, x2)\n return x1, x2\n\n def reduce(f, x1, x2):\n y1, y2 = f(x1, x2)\n return np.sum(y1) + np.sum(y2)\n\n x = np.ones((1,))\n # FIXME: This breaks when argnums is left as default (i.e. 0), because JVP prunes\n # zero tangents from call primitives.\n self.assertAllClose(jax.value_and_grad(partial(reduce, jax.invertible(g)), argnums=(0, 1))(x, x + 2),\n jax.value_and_grad(partial(reduce, g), argnums=(0, 1))(x, x + 2),\n check_dtypes=True)\n\n def test_invertible_partial_diff(self):\n # Check that we don't have to differentiate with respect to inputs\n # of the invertible function.\n def f(x, y):\n return lax.mul(lax.mul(lax.exp(x), 4.), x), lax.add(y, 4.)\n\n finv = jax.invertible(f)\n o = np.ones((5,))\n self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f(x, o)[0]))(o),\n jax.value_and_grad(lambda x: np.sum(finv(x, o)[0]))(o),\n check_dtypes=True)\n\n def test_invertible_pytree(self):\n def f(x, y):\n return lax.add(lax.mul(lax.exp(x[0]), x[1]), y)\n\n finv = jax.invertible(f)\n o = np.ones((5,))\n self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f((x, x), x)[0]))(o),\n jax.value_and_grad(lambda x: np.sum(finv((x, x), x)[0]))(o),\n check_dtypes=True)\n\n\nclass BufferDonationTest(jtu.BufferDonationTestCase):\n\n @jtu.skip_on_devices(\"cpu\") # In/out aliasing not supported on CPU.\n def test_pmap_donate_argnums_invalidates_input(self):\n move = api.pmap(lambda x: x + x - x, donate_argnums=0)\n n = jax.local_device_count()\n x = api.pmap(lambda x: x)(jnp.ones([n]))\n y = move(x)\n self.assertDeleted(x)\n np.testing.assert_allclose(y, [1.] * n)\n\n def test_pmap_nested_donate_ignored(self):\n pmap_fun = jit(lambda x: api.pmap(lambda y: y ** 2, donate_argnums=0)(x))\n a = api.pmap(lambda x: x)(jnp.array([1]))\n\n # NOTE(mattjj): stopped raising error here and instead just ignored\n # with self.assertRaisesRegex(ValueError, \"nested.*not supported\"):\n # pmap_fun(a)\n\n pmap_fun(a) # doesn't crash\n\n\nclass NamedCallTest(jtu.JaxTestCase):\n\n def test_default_name(self):\n\n @api.named_call\n def my_test_function(x):\n return x**2\n\n @jax.jit\n def f(x):\n return my_test_function(x)\n\n c = jax.xla_computation(f)(2)\n self.assertIn(\"my_test_function\", c.as_hlo_text())\n\n def test_non_jaxtype_arg(self):\n # For the test to fail without the invalid JaxType filter we need to pass\n # in a valid JaxType that forces the invalid Jaxtype to be raised to an\n # abstract value.\n def f(not_a_jaxtype, a_jaxtype):\n # then Jax needs to try and evaluate the abstractified non-JaxType\n if not_a_jaxtype:\n return a_jaxtype\n return 0\n\n f = api.named_call(f, name=\"test\")\n out = jax.jit(f, static_argnums=(0,))(\"not a Jaxtype\", 1)\n self.assertEqual(out, 1)\n\n @parameterized.parameters(jax.jit, jax.grad, jax.vmap, jax.remat)\n def test_jax_transforms(self, transform):\n f = jnp.sum\n x = jnp.array([1.])\n\n unnamed_out = transform(f)(x)\n named_out = transform(api.named_call(f, name=\"test\"))(x)\n\n self.assertEqual(unnamed_out, named_out)\n\n def test_static_argnums(self):\n f = api.named_call(lambda x, y: y if x else None, name=\"test\")\n f = jax.jit(f, static_argnums=(0,))\n out = f(True, 5)\n self.assertEqual(out, 5)\n\n def test_partial_eval(self):\n f = api.named_call(lambda x, y: y if x else None, name=\"test\")\n f = jax.jit(functools.partial(f, True))\n out = f(5)\n self.assertEqual(out, 5)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_jit_type={}_func={}\".format(jit_type, func),\n \"jit_type\": jit_type, \"func\": func}\n for func in ['identity', 'asarray', 'device_put']\n for jit_type in [None, \"python\", \"cpp\"]\n if not (jit_type is None and func == 'identity')))\n def test_integer_overflow(self, jit_type, func):\n funcdict = {\n 'identity': lambda x: x,\n 'asarray': jnp.asarray,\n 'device_put': api.device_put,\n }\n jit = {\n 'python': api._python_jit,\n 'cpp': api._cpp_jit,\n None: lambda x: x,\n }\n f = jit[jit_type](funcdict[func])\n\n int_dtype = dtypes.canonicalize_dtype(jnp.int_)\n int_max = np.iinfo(int_dtype).max\n int_min = np.iinfo(int_dtype).min\n\n self.assertEqual(f(int_max).dtype, int_dtype)\n self.assertEqual(f(int_min).dtype, int_dtype)\n self.assertRaises(OverflowError, f, int_max + 1)\n self.assertRaises(OverflowError, f, int_min - 1)\n\n\nclass BackendsTest(jtu.JaxTestCase):\n\n @unittest.skipIf(not sys.executable, \"test requires sys.executable\")\n @jtu.skip_on_devices(\"gpu\", \"tpu\")\n def test_cpu_warning_suppression(self):\n warning_expected = (\n \"import jax; \"\n \"jax.numpy.arange(10)\")\n warning_not_expected = (\n \"import jax; \"\n \"jax.config.update('jax_platform_name', 'cpu'); \"\n \"jax.numpy.arange(10)\")\n\n result = subprocess.run([sys.executable, '-c', warning_expected],\n check=True, capture_output=True)\n assert \"No GPU/TPU found\" in result.stderr.decode()\n\n result = subprocess.run([sys.executable, '-c', warning_not_expected],\n check=True, capture_output=True)\n assert \"No GPU/TPU found\" not in result.stderr.decode()\n\n\nif __name__ == '__main__':\n absltest.main(testLoader=jtu.JaxTestLoader())\n"
] |
[
[
"numpy.__version__.split",
"numpy.asarray",
"numpy.dtype",
"numpy.all",
"numpy.random.randn",
"numpy.iinfo",
"numpy.exp",
"numpy.arange",
"numpy.eye",
"numpy.float16",
"numpy.sin",
"numpy.float32",
"numpy.zeros",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"numpy.int32",
"numpy.cos",
"numpy.ones",
"numpy.random.uniform"
]
] |
borisdayma/datasets
|
[
"ab6d9759b8b15c0109947159ff1cb6cb3486fdb8"
] |
[
"src/datasets/utils/file_utils.py"
] |
[
"\"\"\"\nUtilities for working with the local dataset cache.\nThis file is adapted from the AllenNLP library at https://github.com/allenai/allennlp\nCopyright by the AllenNLP authors.\n\"\"\"\n\nimport copy\nimport gzip\nimport json\nimport lzma\nimport os\nimport re\nimport shutil\nimport sys\nimport tarfile\nimport tempfile\nimport time\nimport urllib\nfrom contextlib import closing, contextmanager\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom hashlib import sha256\nfrom pathlib import Path\nfrom typing import Dict, Optional, Union\nfrom urllib.parse import urlparse\nfrom zipfile import ZipFile, is_zipfile\n\nimport numpy as np\nimport posixpath\nimport pyarrow as pa\nimport requests\nfrom tqdm.auto import tqdm\n\nfrom .. import __version__, config\nfrom .filelock import FileLock\nfrom .logging import WARNING, get_logger\n\n\nlogger = get_logger(__name__) # pylint: disable=invalid-name\n\nINCOMPLETE_SUFFIX = \".incomplete\"\n\n\ndef init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:\n \"\"\"\n Add hf_modules_cache to the python path.\n By default hf_modules_cache='~/.cache/huggingface/modules'.\n It can also be set with the environment variable HF_MODULES_CACHE.\n This is used to add modules such as `datasets_modules`\n \"\"\"\n hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE\n hf_modules_cache = str(hf_modules_cache)\n if hf_modules_cache not in sys.path:\n sys.path.append(hf_modules_cache)\n\n os.makedirs(hf_modules_cache, exist_ok=True)\n if not os.path.exists(os.path.join(hf_modules_cache, \"__init__.py\")):\n with open(os.path.join(hf_modules_cache, \"__init__.py\"), \"w\"):\n pass\n return hf_modules_cache\n\n\n@contextmanager\ndef temp_seed(seed: int, set_pytorch=False, set_tensorflow=False):\n \"\"\"Temporarily set the random seed. This works for python numpy, pytorch and tensorflow.\"\"\"\n np_state = np.random.get_state()\n np.random.seed(seed)\n\n if set_pytorch and config.TORCH_AVAILABLE:\n import torch\n\n torch_state = torch.random.get_rng_state()\n torch.random.manual_seed(seed)\n\n if torch.cuda.is_available():\n torch_cuda_states = torch.cuda.get_rng_state_all()\n torch.cuda.manual_seed_all(seed)\n\n if set_tensorflow and config.TF_AVAILABLE:\n import tensorflow as tf\n from tensorflow.python import context as tfpycontext\n\n tf_state = tf.random.get_global_generator()\n temp_gen = tf.random.Generator.from_seed(seed)\n tf.random.set_global_generator(temp_gen)\n\n if not tf.executing_eagerly():\n raise ValueError(\"Setting random seed for TensorFlow is only available in eager mode\")\n\n tf_context = tfpycontext.context() # eager mode context\n tf_seed = tf_context._seed\n tf_rng_initialized = hasattr(tf_context, \"_rng\")\n if tf_rng_initialized:\n tf_rng = tf_context._rng\n tf_context._set_global_seed(seed)\n\n try:\n yield\n finally:\n np.random.set_state(np_state)\n\n if set_pytorch and config.TORCH_AVAILABLE:\n torch.random.set_rng_state(torch_state)\n if torch.cuda.is_available():\n torch.cuda.set_rng_state_all(torch_cuda_states)\n\n if set_tensorflow and config.TF_AVAILABLE:\n tf.random.set_global_generator(tf_state)\n\n tf_context._seed = tf_seed\n if tf_rng_initialized:\n tf_context._rng = tf_rng\n else:\n delattr(tf_context, \"_rng\")\n\n\ndef is_remote_url(url_or_filename: str) -> bool:\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\", \"s3\", \"gs\", \"hdfs\", \"ftp\")\n\n\ndef is_local_path(url_or_filename: str) -> bool:\n # On unix the scheme of a local path is empty (for both absolute and relative),\n # while on windows the scheme is the drive name (ex: \"c\") for absolute paths.\n # for details on the windows behavior, see https://bugs.python.org/issue42215\n return urlparse(url_or_filename).scheme == \"\" or os.path.ismount(urlparse(url_or_filename).scheme + \":/\")\n\n\ndef is_relative_path(url_or_filename: str) -> bool:\n return urlparse(url_or_filename).scheme == \"\" and not os.path.isabs(url_or_filename)\n\n\ndef hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:\n if dataset:\n endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX\n else:\n endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX\n return \"/\".join((endpoint, identifier, filename))\n\n\ndef head_hf_s3(\n identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0\n) -> Union[requests.Response, Exception]:\n try:\n return http_head(\n hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),\n max_retries=max_retries,\n )\n except Exception as e:\n return e\n\n\ndef hf_github_url(path: str, name: str, dataset=True, version: Optional[str] = None) -> str:\n from .. import SCRIPTS_VERSION\n\n version = version or os.getenv(\"HF_SCRIPTS_VERSION\", SCRIPTS_VERSION)\n if dataset:\n return config.REPO_DATASETS_URL.format(version=version, path=path, name=name)\n else:\n return config.REPO_METRICS_URL.format(version=version, path=path, name=name)\n\n\ndef hf_hub_url(path: str, name: str, version: Optional[str] = None) -> str:\n version = version or config.HUB_DEFAULT_VERSION\n return config.HUB_DATASETS_URL.format(path=path, name=name, version=version)\n\n\ndef url_or_path_join(base_name: str, *pathnames: str) -> str:\n if is_remote_url(base_name):\n return posixpath.join(base_name, *pathnames)\n else:\n return Path(base_name, *pathnames).as_posix()\n\n\ndef url_or_path_parent(url_or_path: str) -> str:\n if is_remote_url(url_or_path):\n return url_or_path[: url_or_path.rindex(\"/\")]\n else:\n return os.path.dirname(url_or_path)\n\n\ndef hash_url_to_filename(url, etag=None):\n \"\"\"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name\n so that TF 2.0 can identify it as a HDF5 file\n (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)\n \"\"\"\n url_bytes = url.encode(\"utf-8\")\n url_hash = sha256(url_bytes)\n filename = url_hash.hexdigest()\n\n if etag:\n etag_bytes = etag.encode(\"utf-8\")\n etag_hash = sha256(etag_bytes)\n filename += \".\" + etag_hash.hexdigest()\n\n if url.endswith(\".py\"):\n filename += \".py\"\n\n return filename\n\n\n@dataclass\nclass DownloadConfig:\n \"\"\"Configuration for our cached path manager.\n\n Attributes:\n cache_dir (:obj:`str` or :obj:`Path`, optional): Specify a cache directory to save the file to (overwrite the\n default cache dir).\n force_download (:obj:`bool`, default ``False``): If True, re-dowload the file even if it's already cached in\n the cache dir.\n resume_download (:obj:`bool`, default ``False``): If True, resume the download if incompletly recieved file is\n found.\n proxies (:obj:`dict`, optional):\n user_agent (:obj:`str`, optional): Optional string or dict that will be appended to the user-agent on remote\n requests.\n extract_compressed_file (:obj:`bool`, default ``False``): If True and the path point to a zip or tar file,\n extract the compressed file in a folder along the archive.\n force_extract (:obj:`bool`, default ``False``): If True when extract_compressed_file is True and the archive\n was already extracted, re-extract the archive and override the folder where it was extracted.\n use_etag (:obj:`bool`, default ``True``):\n num_proc (:obj:`int`, optional):\n max_retries (:obj:`int`, default ``1``): The number of times to retry an HTTP request if it fails.\n use_auth_token (:obj:`str` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token\n for remote files on the Datasets Hub. If True, will get token from ~/.huggingface.\n \"\"\"\n\n cache_dir: Optional[Union[str, Path]] = None\n force_download: bool = False\n resume_download: bool = False\n local_files_only: bool = False\n proxies: Optional[Dict] = None\n user_agent: Optional[str] = None\n extract_compressed_file: bool = False\n force_extract: bool = False\n use_etag: bool = True\n num_proc: Optional[int] = None\n max_retries: int = 1\n use_auth_token: Optional[Union[str, bool]] = None\n\n def copy(self) -> \"DownloadConfig\":\n return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})\n\n\ndef cached_path(\n url_or_filename,\n download_config=None,\n **download_kwargs,\n) -> str:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n\n Return:\n Local path (string)\n\n Raises:\n FileNotFoundError: in case of non-recoverable file\n (non-existent or no cache on disk)\n ConnectionError: in case of unreachable url\n and no cache on disk\n ValueError: if it couldn't parse the url or filename correctly\n requests.exceptions.ConnectionError: in case of internet connection issue\n \"\"\"\n if download_config is None:\n download_config = DownloadConfig(**download_kwargs)\n\n cache_dir = download_config.cache_dir or os.path.join(config.HF_DATASETS_CACHE, \"downloads\")\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n if isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n\n if is_remote_url(url_or_filename):\n # URL, so get it from the cache (downloading if necessary)\n output_path = get_from_cache(\n url_or_filename,\n cache_dir=cache_dir,\n force_download=download_config.force_download,\n proxies=download_config.proxies,\n resume_download=download_config.resume_download,\n user_agent=download_config.user_agent,\n local_files_only=download_config.local_files_only,\n use_etag=download_config.use_etag,\n max_retries=download_config.max_retries,\n use_auth_token=download_config.use_auth_token,\n )\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n output_path = url_or_filename\n elif is_local_path(url_or_filename):\n # File, but it doesn't exist.\n raise FileNotFoundError(\"Local file {} doesn't exist\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))\n\n if download_config.extract_compressed_file and output_path is not None:\n\n if (\n not is_zipfile(output_path)\n and not tarfile.is_tarfile(output_path)\n and not is_gzip(output_path)\n and not is_xz(output_path)\n and not is_rarfile(output_path)\n ):\n return output_path\n\n # Path where we extract compressed archives\n # We extract in the cache dir, and get the extracted path name by hashing the original path\"\n abs_output_path = os.path.abspath(output_path)\n output_path_extracted = os.path.join(cache_dir, \"extracted\", hash_url_to_filename(abs_output_path))\n\n if (\n os.path.isdir(output_path_extracted)\n and os.listdir(output_path_extracted)\n and not download_config.force_extract\n ) or (os.path.isfile(output_path_extracted) and not download_config.force_extract):\n return output_path_extracted\n\n # Prevent parallel extractions\n lock_path = output_path + \".lock\"\n with FileLock(lock_path):\n shutil.rmtree(output_path_extracted, ignore_errors=True)\n os.makedirs(output_path_extracted, exist_ok=True)\n if tarfile.is_tarfile(output_path):\n tar_file = tarfile.open(output_path)\n tar_file.extractall(output_path_extracted)\n tar_file.close()\n elif is_gzip(output_path):\n os.rmdir(output_path_extracted)\n with gzip.open(output_path, \"rb\") as gzip_file:\n with open(output_path_extracted, \"wb\") as extracted_file:\n shutil.copyfileobj(gzip_file, extracted_file)\n elif is_zipfile(output_path): # put zip file to the last, b/c it is possible wrongly detected as zip\n with ZipFile(output_path, \"r\") as zip_file:\n zip_file.extractall(output_path_extracted)\n zip_file.close()\n elif is_xz(output_path):\n os.rmdir(output_path_extracted)\n with lzma.open(output_path) as compressed_file:\n with open(output_path_extracted, \"wb\") as extracted_file:\n shutil.copyfileobj(compressed_file, extracted_file)\n elif is_rarfile(output_path):\n if config.RARFILE_AVAILABLE:\n import rarfile\n\n rf = rarfile.RarFile(output_path)\n rf.extractall(output_path_extracted)\n rf.close()\n else:\n raise EnvironmentError(\"Please pip install rarfile\")\n else:\n raise EnvironmentError(\"Archive format of {} could not be identified\".format(output_path))\n\n return output_path_extracted\n\n return output_path\n\n\ndef get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:\n ua = \"datasets/{}; python/{}\".format(__version__, config.PY_VERSION)\n ua += \"; pyarrow/{}\".format(pa.__version__)\n if config.TORCH_AVAILABLE:\n ua += \"; torch/{}\".format(config.TORCH_VERSION)\n if config.TF_AVAILABLE:\n ua += \"; tensorflow/{}\".format(config.TF_VERSION)\n if config.BEAM_AVAILABLE:\n ua += \"; apache_beam/{}\".format(config.BEAM_VERSION)\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(\"{}/{}\".format(k, v) for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua\n\n\ndef get_authentication_headers_for_url(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> dict:\n \"\"\"Handle the HF authentication\"\"\"\n headers = {}\n if url.startswith(\"https://huggingface.co/\"):\n token = None\n if isinstance(use_auth_token, str):\n token = use_auth_token\n elif bool(use_auth_token):\n from huggingface_hub import hf_api\n\n token = hf_api.HfFolder.get_token()\n if token:\n headers[\"authorization\"] = \"Bearer {}\".format(token)\n return headers\n\n\nclass OfflineModeIsEnabled(ConnectionError):\n pass\n\n\ndef _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):\n \"\"\"Raise a OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True.\"\"\"\n if config.HF_DATASETS_OFFLINE:\n raise OfflineModeIsEnabled(\n \"Offline mode is enabled.\" if msg is None else \"Offline mode is enabled. \" + str(msg)\n )\n\n\ndef _request_with_retry(\n method: str,\n url: str,\n max_retries: int = 0,\n base_wait_time: float = 0.5,\n max_wait_time: float = 2,\n timeout: float = 10.0,\n **params,\n) -> requests.Response:\n \"\"\"Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff.\n\n Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised.\n\n Args:\n method (str): HTTP method, such as 'GET' or 'HEAD'\n url (str): The URL of the ressource to fetch\n max_retries (int): Maximum number of retries, defaults to 0 (no retries)\n base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between\n retries then grows exponentially, capped by max_wait_time.\n max_wait_time (float): Maximum amount of time between two retries, in seconds\n **params: Params to pass to `requests.request`\n \"\"\"\n _raise_if_offline_mode_is_enabled(f\"Tried to reach {url}\")\n tries, success = 0, False\n while not success:\n tries += 1\n try:\n response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)\n success = True\n except requests.exceptions.ConnectTimeout as err:\n if tries > max_retries:\n raise err\n else:\n logger.info(f\"{method} request to {url} timed out, retrying... [{tries/max_retries}]\")\n sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff\n time.sleep(sleep_time)\n return response\n\n\ndef ftp_head(url, timeout=10.0):\n _raise_if_offline_mode_is_enabled(f\"Tried to reach {url}\")\n try:\n with closing(urllib.request.urlopen(url, timeout=timeout)) as r:\n r.read(1)\n except Exception:\n return False\n return True\n\n\ndef ftp_get(url, temp_file, timeout=10.0):\n _raise_if_offline_mode_is_enabled(f\"Tried to reach {url}\")\n try:\n logger.info(f\"Getting through FTP {url} into {temp_file.name}\")\n with closing(urllib.request.urlopen(url, timeout=timeout)) as r:\n shutil.copyfileobj(r, temp_file)\n except urllib.error.URLError as e:\n raise ConnectionError(e)\n\n\ndef http_get(url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=10.0, max_retries=0):\n headers = copy.deepcopy(headers) or {}\n headers[\"user-agent\"] = get_datasets_user_agent(user_agent=headers.get(\"user-agent\"))\n if resume_size > 0:\n headers[\"Range\"] = \"bytes=%d-\" % (resume_size,)\n response = _request_with_retry(\n method=\"GET\",\n url=url,\n stream=True,\n proxies=proxies,\n headers=headers,\n cookies=cookies,\n max_retries=max_retries,\n timeout=timeout,\n )\n if response.status_code == 416: # Range not satisfiable\n return\n content_length = response.headers.get(\"Content-Length\")\n total = resume_size + int(content_length) if content_length is not None else None\n not_verbose = bool(logger.getEffectiveLevel() > WARNING)\n progress = tqdm(\n unit=\"B\",\n unit_scale=True,\n total=total,\n initial=resume_size,\n desc=\"Downloading\",\n disable=not_verbose,\n )\n for chunk in response.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n progress.update(len(chunk))\n temp_file.write(chunk)\n progress.close()\n\n\ndef http_head(\n url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0\n) -> requests.Response:\n headers = copy.deepcopy(headers) or {}\n headers[\"user-agent\"] = get_datasets_user_agent(user_agent=headers.get(\"user-agent\"))\n response = _request_with_retry(\n method=\"HEAD\",\n url=url,\n proxies=proxies,\n headers=headers,\n cookies=cookies,\n allow_redirects=allow_redirects,\n timeout=timeout,\n max_retries=max_retries,\n )\n return response\n\n\ndef get_from_cache(\n url,\n cache_dir=None,\n force_download=False,\n proxies=None,\n etag_timeout=10,\n resume_download=False,\n user_agent=None,\n local_files_only=False,\n use_etag=True,\n max_retries=0,\n use_auth_token=None,\n) -> str:\n \"\"\"\n Given a URL, look for the corresponding file in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n\n Return:\n Local path (string)\n\n Raises:\n FileNotFoundError: in case of non-recoverable file\n (non-existent or no cache on disk)\n ConnectionError: in case of unreachable url\n and no cache on disk\n \"\"\"\n if cache_dir is None:\n cache_dir = config.HF_DATASETS_CACHE\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n os.makedirs(cache_dir, exist_ok=True)\n\n original_url = url # Some parameters may be added\n connected = False\n response = None\n cookies = None\n etag = None\n\n # Try a first time to file the file on the local file system without eTag (None)\n # if we don't ask for 'force_download' then we spare a request\n filename = hash_url_to_filename(original_url, etag=None)\n cache_path = os.path.join(cache_dir, filename)\n\n if os.path.exists(cache_path) and not force_download and not use_etag:\n return cache_path\n\n # Prepare headers for authentication\n headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)\n if user_agent is not None:\n headers[\"user-agent\"] = user_agent\n\n # We don't have the file locally or we need an eTag\n if not local_files_only:\n if url.startswith(\"ftp://\"):\n connected = ftp_head(url)\n try:\n response = http_head(\n url,\n allow_redirects=True,\n proxies=proxies,\n timeout=etag_timeout,\n max_retries=max_retries,\n headers=headers,\n )\n if response.status_code == 200: # ok\n etag = response.headers.get(\"ETag\") if use_etag else None\n for k, v in response.cookies.items():\n # In some edge cases, we need to get a confirmation token\n if k.startswith(\"download_warning\") and \"drive.google.com\" in url:\n url += \"&confirm=\" + v\n cookies = response.cookies\n connected = True\n # In some edge cases, head request returns 400 but the connection is actually ok\n elif (\n (response.status_code == 400 and \"firebasestorage.googleapis.com\" in url)\n or (response.status_code == 405 and \"drive.google.com\" in url)\n or (\n response.status_code == 403\n and re.match(r\"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$\", url)\n )\n ):\n connected = True\n logger.info(\"Couldn't get ETag version for url {}\".format(url))\n except (EnvironmentError, requests.exceptions.Timeout):\n # not connected\n pass\n\n # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.\n # try to get the last downloaded one\n if not connected:\n if os.path.exists(cache_path):\n return cache_path\n if local_files_only:\n raise FileNotFoundError(\n f\"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been\"\n \" disabled. To enable file online look-ups, set 'local_files_only' to False.\"\n )\n elif response is not None and response.status_code == 404:\n raise FileNotFoundError(\"Couldn't find file at {}\".format(url))\n _raise_if_offline_mode_is_enabled(f\"Tried to reach {url}\")\n raise ConnectionError(\"Couldn't reach {}\".format(url))\n\n # Try a second time\n filename = hash_url_to_filename(original_url, etag)\n cache_path = os.path.join(cache_dir, filename)\n\n if os.path.exists(cache_path) and not force_download:\n return cache_path\n\n # From now on, connected is True.\n # Prevent parallel downloads of the same file with a lock.\n lock_path = cache_path + \".lock\"\n with FileLock(lock_path):\n\n if resume_download:\n incomplete_path = cache_path + \".incomplete\"\n\n @contextmanager\n def _resumable_file_manager():\n with open(incomplete_path, \"a+b\") as f:\n yield f\n\n temp_file_manager = _resumable_file_manager\n if os.path.exists(incomplete_path):\n resume_size = os.stat(incomplete_path).st_size\n else:\n resume_size = 0\n else:\n temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)\n resume_size = 0\n\n # Download to temporary file, then copy to cache dir once finished.\n # Otherwise you get corrupt cache entries if the download gets interrupted.\n with temp_file_manager() as temp_file:\n logger.info(\"%s not found in cache or force_download set to True, downloading to %s\", url, temp_file.name)\n\n # GET file object\n if url.startswith(\"ftp://\"):\n ftp_get(url, temp_file)\n else:\n http_get(\n url,\n temp_file,\n proxies=proxies,\n resume_size=resume_size,\n headers=headers,\n cookies=cookies,\n max_retries=max_retries,\n )\n\n logger.info(\"storing %s in cache at %s\", url, cache_path)\n shutil.move(temp_file.name, cache_path)\n\n logger.info(\"creating metadata file for %s\", cache_path)\n meta = {\"url\": url, \"etag\": etag}\n meta_path = cache_path + \".json\"\n with open(meta_path, \"w\", encoding=\"utf-8\") as meta_file:\n json.dump(meta, meta_file)\n\n return cache_path\n\n\ndef is_gzip(path: str) -> bool:\n \"\"\"from https://stackoverflow.com/a/60634210\"\"\"\n with gzip.open(path, \"r\") as fh:\n try:\n fh.read(1)\n return True\n except OSError:\n return False\n\n\ndef is_xz(path: str) -> bool:\n \"\"\"https://tukaani.org/xz/xz-file-format-1.0.4.txt\"\"\"\n with open(path, \"rb\") as f:\n try:\n header_magic_bytes = f.read(6)\n except OSError:\n return False\n if header_magic_bytes == b\"\\xfd7zXZ\\x00\":\n return True\n else:\n return False\n\n\ndef is_rarfile(path: str) -> bool:\n \"\"\"https://github.com/markokr/rarfile/blob/master/rarfile.py\"\"\"\n RAR_ID = b\"Rar!\\x1a\\x07\\x00\"\n RAR5_ID = b\"Rar!\\x1a\\x07\\x01\\x00\"\n\n with open(path, \"rb\", 1024) as fd:\n buf = fd.read(len(RAR5_ID))\n if buf.startswith(RAR_ID) or buf.startswith(RAR5_ID):\n return True\n else:\n return False\n\n\ndef add_start_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator\n\n\ndef add_end_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else \"\") + \"\".join(docstr)\n return fn\n\n return docstring_decorator\n\n\ndef estimate_dataset_size(paths):\n return sum(path.stat().st_size for path in paths)\n"
] |
[
[
"numpy.random.get_state",
"tensorflow.executing_eagerly",
"torch.random.set_rng_state",
"numpy.random.seed",
"torch.random.get_rng_state",
"tensorflow.random.get_global_generator",
"torch.cuda.get_rng_state_all",
"torch.random.manual_seed",
"tensorflow.random.set_global_generator",
"numpy.random.set_state",
"torch.cuda.set_rng_state_all",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"tensorflow.random.Generator.from_seed",
"tensorflow.python.context.context"
]
] |
LL03-Identity-Dowell/100054-dowellvoiceapp
|
[
"391df14aa4d438591bd7f9cb740d1f751b59e419"
] |
[
"SpeakerIdentification.py"
] |
[
"import os\r\nimport wave\r\nimport time\r\nimport pickle\r\n#import pyaudio\r\nimport warnings\r\nimport numpy as np\r\nimport sounddevice as sd\r\nfrom scipy.io.wavfile import write\r\nfrom sklearn import preprocessing\r\nfrom scipy.io.wavfile import read\r\nimport python_speech_features as mfcc\r\nfrom sklearn.mixture import GaussianMixture \r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\ndef calculate_delta(array):\r\n \r\n rows,cols = array.shape\r\n print(rows)\r\n print(cols)\r\n deltas = np.zeros((rows,20))\r\n N = 2\r\n for i in range(rows):\r\n index = []\r\n j = 1\r\n while j <= N:\r\n if i-j < 0:\r\n first =0\r\n else:\r\n first = i-j\r\n if i+j > rows-1:\r\n second = rows-1\r\n else:\r\n second = i+j \r\n index.append((second,first))\r\n j+=1\r\n deltas[i] = ( array[index[0][0]]-array[index[0][1]] + (2 * (array[index[1][0]]-array[index[1][1]])) ) / 10\r\n return deltas\r\n\r\n\r\ndef extract_features(audio,rate):\r\n \r\n mfcc_feature = mfcc.mfcc(audio,rate, 0.025, 0.01,20,nfft = 1200, appendEnergy = True) \r\n mfcc_feature = preprocessing.scale(mfcc_feature)\r\n print(mfcc_feature)\r\n delta = calculate_delta(mfcc_feature)\r\n combined = np.hstack((mfcc_feature,delta)) \r\n return combined\r\n\r\n\r\ndef record_audio_train():\r\n\tName =(input(\"Please Enter Your Name:\"))\r\n\tfor count in range(5):\r\n\t\tfreq = 44100\r\n \r\n\t\t# Recording duration\r\n\t\tduration = 10\r\n \r\n\t\t# Start recorder with the given values \r\n\t\t# of duration and sample frequency\r\n\t\trecording = sd.rec(int(duration * freq), samplerate=freq, channels=2)\r\n \r\n\t\t# Record audio for the given number of seconds\r\n\t\t\r\n\t\tprint (\"recording started\")\r\n\t\tsd.wait()\r\n\t\t\r\n\t\tprint (\"recording stopped\")\r\n\t\t\r\n\t\tOUTPUT_FILENAME=Name+\"-sample\"+str(count)+\".wav\"\r\n\t\tWAVE_OUTPUT_FILENAME=os.path.join(\"training_set\",OUTPUT_FILENAME)\r\n\t\ttrainedfilelist = open(\"training_set_addition.txt\", 'a')\r\n\t\ttrainedfilelist.write(OUTPUT_FILENAME+\"\\n\")\r\n\t\twrite(WAVE_OUTPUT_FILENAME, freq, recording)\r\n\t\t\r\n\r\ndef record_audio_test():\r\n\tfreq = 44100\r\n \r\n\t\t# Recording duration\r\n\tduration = 5\r\n \r\n\t\t# Start recorder with the given values \r\n\t\t# of duration and sample frequency\r\n\trecording = sd.rec(int(duration * freq), samplerate=freq, channels=2)\r\n \r\n\t\t# Record audio for the given number of seconds\r\n\t\t\r\n\tprint (\"recording started\")\r\n\tsd.wait()\r\n\t\t\r\n\tprint (\"recording stopped\")\r\n\r\n\t\r\n\t\r\n\tOUTPUT_FILENAME=\"sample.wav\"\r\n\tWAVE_OUTPUT_FILENAME=os.path.join(\"testing_set\",OUTPUT_FILENAME)\r\n\ttrainedfilelist = open(\"testing_set_addition.txt\", 'a')\r\n\ttrainedfilelist.write(OUTPUT_FILENAME+\"\\n\")\r\n\twrite(WAVE_OUTPUT_FILENAME, freq, recording)\r\n\t\r\n\r\ndef train_model():\r\n\r\n\tsource = \"/home/sky_walker/Music/spkr2/training_set/\" \r\n\tdest = \"/home/sky_walker/Music/spkr2/trained_models/\"\r\n\ttrain_file = \"/home/sky_walker/Music/spkr2/training_set_addition.txt\" \r\n\tfile_paths = open(train_file,'r')\r\n\tcount = 1\r\n\tfeatures = np.asarray(())\r\n\tfor path in file_paths: \r\n\t path = path.strip() \r\n\t print(path)\r\n\r\n\t sr,audio = read(source + path)\r\n\t print(sr)\r\n\t vector = extract_features(audio,sr)\r\n\t \r\n\t if features.size == 0:\r\n\t features = vector\r\n\t else:\r\n\t features = np.vstack((features, vector))\r\n\r\n\t if count == 5: \r\n\t gmm = GaussianMixture(n_components = 6, max_iter = 200, covariance_type='diag',n_init = 3)\r\n\t gmm.fit(features)\r\n\t \r\n\t # dumping the trained gaussian model\r\n\t picklefile = path.split(\"-\")[0]+\".gmm\"\r\n\t pickle.dump(gmm,open(dest + picklefile,'wb'))\r\n\t print('+ modeling completed for speaker:',picklefile,\" with data point = \",features.shape) \r\n\t features = np.asarray(())\r\n\t count = 0\r\n\t count = count + 1\r\n\r\n\r\ndef test_model():\r\n\r\n\tsource = \"/home/sky_walker/Music/spkr2/testing_set/\" \r\n\tmodelpath = \"/home/sky_walker/Music/spkr2/trained_models/\"\r\n\ttest_file = \"/home/sky_walker/Music/spkr2/testing_set_addition.txt\" \r\n\tfile_paths = open(test_file,'r')\r\n\t \r\n\tgmm_files = [os.path.join(modelpath,fname) for fname in\r\n\t os.listdir(modelpath) if fname.endswith('.gmm')]\r\n\t \r\n\t#Load the Gaussian gender Models\r\n\tmodels = [pickle.load(open(fname,'rb')) for fname in gmm_files]\r\n\tspeakers = [fname.split(\"/\")[-1].split(\".gmm\")[0] for fname \r\n\t in gmm_files]\r\n\t \r\n\t# Read the test directory and get the list of test audio files \r\n\tfor path in file_paths: \r\n\t \r\n\t path = path.strip() \r\n\t print(path)\r\n\t sr,audio = read(source + path)\r\n\t vector = extract_features(audio,sr)\r\n\t \r\n\t log_likelihood = np.zeros(len(models)) \r\n\t \r\n\t for i in range(len(models)):\r\n\t gmm = models[i] #checking with each model one by one\r\n\t scores = np.array(gmm.score(vector))\r\n\t log_likelihood[i] = scores.sum()\r\n\t \r\n\t winner = np.argmax(log_likelihood)\r\n\t print(\"\\tdetected as - \", speakers[winner])\r\n\t time.sleep(1.0) \r\n#choice=int(input(\"\\n1.Record audio for training \\n 2.Train Model \\n 3.Record audio for testing \\n 4.Test Model\\n\"))\r\n \r\nwhile True:\r\n\tchoice=int(input(\"\\n 1.Record audio for training \\n 2.Train Model \\n 3.Record audio for testing \\n 4.Test Model\\n\"))\r\n\tif(choice==1):\r\n\t\trecord_audio_train()\r\n\telif(choice==2):\r\n\t\ttrain_model()\r\n\telif(choice==3):\r\n\t\trecord_audio_test()\r\n\telif(choice==4):\r\n\t\ttest_model()\r\n\tif(choice>4):\r\n\t\texit()\r\n"
] |
[
[
"numpy.hstack",
"scipy.io.wavfile.write",
"numpy.asarray",
"numpy.argmax",
"sklearn.mixture.GaussianMixture",
"sklearn.preprocessing.scale",
"numpy.zeros",
"scipy.io.wavfile.read",
"numpy.vstack"
]
] |
andimarafioti/tifresi
|
[
"676db371d5c472a5f3199506bf3863367a2ecde4"
] |
[
"tifresi/phase/modGabPhaseGrad.py"
] |
[
"# -*- coding: utf-8 -*-\n# ######### COPYRIGHT #########\n# Credits\n# #######\n#\n# Copyright(c) 2015-2018\n# ----------------------\n#\n# * `LabEx Archimède <http://labex-archimede.univ-amu.fr/>`_\n# * `Laboratoire d'Informatique Fondamentale <http://www.lif.univ-mrs.fr/>`_\n# (now `Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>`_)\n# * `Institut de Mathématiques de Marseille <http://www.i2m.univ-amu.fr/>`_\n# * `Université d'Aix-Marseille <http://www.univ-amu.fr/>`_\n#\n# This software is a port from LTFAT 2.1.0 :\n# Copyright (C) 2005-2018 Peter L. Soendergaard <[email protected]>.\n#\n# Contributors\n# ------------\n#\n# * Denis Arrivault <contact.dev_AT_lis-lab.fr>\n# * Florent Jaillet <contact.dev_AT_lis-lab.fr>\n#\n# Description\n# -----------\n#\n# ltfatpy is a partial Python port of the\n# `Large Time/Frequency Analysis Toolbox <http://ltfat.sourceforge.net/>`_,\n# a MATLAB®/Octave toolbox for working with time-frequency analysis and\n# synthesis.\n#\n# Version\n# -------\n#\n# * ltfatpy version = 1.0.16\n# * LTFAT version = 2.1.0\n#\n# Licence\n# -------\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ######### COPYRIGHT #########\n\n\n\"\"\"Module of phase gradient computation\n\nPorted from ltfat_2.1.0/gabor/gabphasegrad.m\n\n.. moduleauthor:: Florent Jaillet\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport numpy as np\n\nfrom ltfatpy.comp.comp_sigreshape_pre import comp_sigreshape_pre\nfrom ltfatpy.gabor.dgtlength import dgtlength\nfrom ltfatpy.gabor.gabwin import gabwin\nfrom ltfatpy.tools.postpad import postpad\nfrom ltfatpy.fourier.fftindex import fftindex\nfrom ltfatpy.comp.comp_sepdgt import comp_sepdgt\nfrom ltfatpy.fourier.pderiv import pderiv\n\n\ndef modgabphasegrad(method, *args, **kwargs):\n\t\"\"\"Modified Phase gradient of the discrete Gabor transform\n\tWe modified this to work with dgtreals on the phase and abs case\n\tPhase case we did a lot of changes,\n\tabs case we added M as a mandatory parameter\n\n - Usage:\n\n | ``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M, L=None)``\n | ``(tgrad, fgrad) = gabphasegrad('phase', cphase, a)``\n | ``(tgrad, fgrad) = gabphasegrad('abs', s, g, a, M, difforder=2)``\n\n - Input parameters:\n\n :param str method: Method used to compute the phase gradient, see the\n possible values below\n :param numpy.ndarray f: (defined if ``method='dgt'``) Input signal\n :param numpy.ndarray cphase: (defined if ``method='phase'``) Phase of a\n :func:`~ltfatpy.gabor.dgt.dgt` of the signal\n :param numpy.ndarray s: (defined if ``method='abs'``) Spectrogram of the\n signal\n :param numpy.ndarray g: (defined if ``method='dgt'`` or ``method='phase'``)\n Window function\n :param int a: (defined if ``method='dgt'`` or ``method='phase'`` or\n ``method='abs'``) Length of time shift\n :param int M: (defined if ``method='dgt'``) Number of channels\n :param int L: (defined if ``method='dgt'``, optional) Length of transform\n to do\n :param int difforder: (defined if ``method='abs'``, optional) Order of the\n centered finite difference scheme used to perform the needed numerical\n differentiation\n\n - Output parameters:\n\n :returns: ``(tgrad, fgrad, c)`` if ``method='dgt'``, or ``(tgrad, fgrad)``\n if ``method='phase'`` or ``method='abs'``\n :rtype: tuple\n\n :var numpy.ndarray tgrad: Instantaneous frequency\n :var numpy.ndarray fgrad: Local group delay\n :var numpy.ndarray c: Gabor coefficients\n\n ``gabphasegrad`` computes the time-frequency gradient of the phase of the\n :func:`~ltfatpy.gabor.dgt.dgt` of a signal. The derivative in time\n **tgrad** is the instantaneous frequency while the frequency derivative\n **fgrad** is the local group delay.\n\n **tgrad** and **fgrad** measure the deviation from the current time and\n frequency, so a value of zero means that the instantaneous frequency is\n equal to the center frequency of the considered channel.\n\n **tgrad** is scaled such that distances are measured in samples. Similarly,\n **fgrad** is scaled such that the Nyquist frequency (the highest possible\n frequency) corresponds to a value of ``L/2``.\n\n The computation of **tgrad** and **fgrad** is inaccurate when the absolute\n value of the Gabor coefficients is low. This is due to the fact the the\n phase of complex numbers close to the machine precision is almost\n random. Therefore, **tgrad** and **fgrad** may attain very large random\n values when ``abs(c)`` is close to zero.\n\n The computation can be done using three different methods:\n\n =========== ===========================================================\n ``'dgt'`` Directly from the signal.\n\n ``'phase'`` From the phase of a :func:`~ltfatpy.gabor.dgt.dgt` of the\n signal. This is the classic method used in the phase\n vocoder.\n\n ``'abs'`` From the absolute value of the\n :func:`~ltfatpy.gabor.dgt.dgt`. Currently this method works\n only for Gaussian windows.\n =========== ===========================================================\n\n ``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M)`` computes the\n time-frequency gradient using a :func:`~ltfatpy.gabor.dgt.dgt` of the\n signal **f**. The :func:`~ltfatpy.gabor.dgt.dgt` is computed using the\n window **g** on the lattice specified by the time shift **a** and the\n number of channels **M**. The algorithm used to perform this calculation\n computes several DGTs, and therefore this routine takes the exact same\n input parameters as :func:`~ltfatpy.gabor.dgt.dgt`.\n\n The window **g** may be specified as in :func:`~ltfatpy.gabor.dgt.dgt`. If\n the window used is ``'gauss'``, the computation will be done by a faster\n algorithm.\n\n ``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M)`` additionally\n returns the Gabor coefficients ``c``, as they are always computed as a\n byproduct of the algorithm.\n\n ``(tgrad, fgrad) = gabphasegrad('phase', cphase, a)`` computes the phase\n gradient from the phase **cphase** of a :func:`~ltfatpy.gabor.dgt.dgt` of\n the signal. The original :func:`~ltfatpy.gabor.dgt.dgt` from which the\n phase is obtained must have been computed using a time-shift of **a**.\n\n ``(tgrad, fgrad) = gabphasegrad('abs', s, g, a)`` computes the phase\n gradient from the spectrogram **s**. The spectrogram must have been\n computed using the window **g** and time-shift **a**.\n\n ``(tgrad, fgrad) = gabphasegrad('abs', s, g, a, difforder=ord)`` uses a\n centered finite difference scheme of order ``ord`` to perform the needed\n numerical differentiation. Default is to use a 4th order scheme.\n\n Currently the 'abs' method only works if the window **g** is a Gaussian\n window specified as a string or cell array.\n\n .. seealso:: :func:`resgram`, :func:`gabreassign`,\n :func:`~ltfatpy.gabor.dgt.dgt`\n\n - References:\n :cite:`aufl95,cmdaaufl97,fl65`\n \"\"\"\n\n\t# NOTE: This function doesn't support the parameter lt (lattice type)\n\t# supported by the corresponding octave function and the lattice used is\n\t# seperable (square lattice lt = (0, 1)).\n\n\t# NOTE: As in the octave version of this function, if needed, the\n\t# undocumented optional keyword minlvl is available when using method=dgt.\n\t# So it can be passed using a call of the following form:\n\t# (tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M, minlvl=val)\n\n\tif not isinstance(method, str):\n\t\traise TypeError('First argument must be a str containing the method '\n\t\t\t\t\t\t'name, \"dgt\", \"phase\" or \"abs\".')\n\n\tmethod = method.lower()\n\n\tif method == 'dgt':\n\t\traise Exception(\"We dont know if this works\")\n\t\t# --------------------------- DGT method ------------------------\n\n\t\t(f, g, a, M) = args\n\n\t\tif 'L' in kwargs:\n\t\t\tL = kwargs['L']\n\t\telse:\n\t\t\tL = None\n\n\t\tif 'minlvl' in kwargs:\n\t\t\tminlvl = kwargs['minlvl']\n\t\telse:\n\t\t\tminlvl = np.finfo(np.float64).tiny\n\n\t\t# # ----- step 1 : Verify f and determine its length -------\n\t\t# Change f to correct shape.\n\n\t\tf, Ls, W, wasrow, remembershape = comp_sigreshape_pre(f, 0)\n\n\t\t# # ------ step 2: Verify a, M and L\n\t\tif not L:\n\t\t\t# ----- step 2b : Verify a, M and get L from the signal length f---\n\t\t\tL = dgtlength(Ls, a, M)\n\t\telse:\n\t\t\t# ----- step 2a : Verify a, M and get L\n\t\t\tLuser = dgtlength(L, a, M)\n\t\t\tif Luser != L:\n\t\t\t\traise ValueError('Incorrect transform length L = {0:d} '\n\t\t\t\t\t\t\t\t 'specified. Next valid length is L = {1:d}. '\n\t\t\t\t\t\t\t\t 'See the help of dgtlength for the '\n\t\t\t\t\t\t\t\t 'requirements.'.format(L, Luser))\n\n\t\t# # ----- step 3 : Determine the window\n\t\tg, info = gabwin(g, a, M, L)\n\n\t\tif L < info['gl']:\n\t\t\traise ValueError('Window is too long.')\n\n\t\t# # ----- step 4: final cleanup ---------------\n\n\t\tf = postpad(f, L)\n\n\t\t# # ------ algorithm starts --------------------\n\n\t\t# Compute the time weighted version of the window.\n\t\thg = fftindex(L) * g\n\n\t\t# The computation done this way is insensitive to whether the dgt is\n\t\t# phaselocked or not.\n\t\tc = comp_sepdgt(f, g, a, M, 0)\n\n\t\tc_h = comp_sepdgt(f, hg, a, M, 0)\n\n\t\tc_s = np.abs(c) ** 2\n\n\t\t# Remove small values because we need to divide by c_s\n\t\tc_s = np.maximum(c_s, minlvl * np.max(c_s))\n\n\t\t# Compute the group delay\n\t\tfgrad = np.real(c_h * c.conjugate() / c_s)\n\n\t\tif info['gauss']:\n\t\t\t# The method used below only works for the Gaussian window, because\n\t\t\t# the time derivative and the time multiplicative of the Gaussian\n\t\t\t# are identical.\n\t\t\ttgrad = np.imag(c_h * c.conjugate() / c_s) / info['tfr']\n\n\t\telse:\n\t\t\t# The code below works for any window, and not just the Gaussian\n\n\t\t\tdg = pderiv(g, difforder=float('inf')) / (2 * np.pi)\n\t\t\tc_d = comp_sepdgt(f, dg, a, M, 0)\n\n\t\t\t# NOTE: There is a bug here in the original octave file as it\n\t\t\t# contains a reshape that uses an undefined variable N.\n\t\t\t# You can get the error with LTFAT 2.1.0 in octave by running for\n\t\t\t# example:\n\t\t\t# gabphasegrad('dgt', rand(16,1), rand(16,1), 4, 16)\n\t\t\t#\n\t\t\t# So we just comment out the corresponding line here, as it appears\n\t\t\t# to be unneeded:\n\t\t\t# c_d.shape = (M, N, W)\n\n\t\t\t# Compute the instantaneous frequency\n\t\t\ttgrad = -np.imag(c_d * c.conjugate() / c_s)\n\n\t\treturn (tgrad, fgrad, c)\n\n\telif method == 'phase':\n\n\t\t# --------------------------- phase method ------------------------\n\n\t\t(cphase, a, M) = args\n\n\t\tif not np.isrealobj(cphase):\n\t\t\traise TypeError(\"Input phase must be real valued. Use the 'angle'\"\n\t\t\t\t\t\t\t\" function to compute the argument of complex \"\n\t\t\t\t\t\t\t\"numbers.\")\n\n\t\t# --- linear method ---\n\t\tif cphase.ndim == 3:\n\t\t\tM2, N, W = cphase.shape # M2 is the number of channels from 0 to Nyquist\n\t\telse:\n\t\t\tM2, N = cphase.shape # M2 is the number of channels from 0 to Nyquist\n\t\tL = N * a\n\t\tb = L / M\n\n\t\t# NOTE: The following code found in the original octave version of the function\n\t\t# hasn't been translated here to Python as it is not used:\n\t\t# if 0\n\t\t#\n\t\t# # This is the classic phase vocoder algorithm by Flanagan.\n\t\t#\n\t\t# tgrad = cphase-circshift(cphase,[0,-1]);\n\t\t# tgrad = tgrad- 2*pi*round(tgrad/(2*pi));\n\t\t# tgrad = -tgrad/(2*pi)*L;\n\t\t#\n\t\t# # Phase-lock the angles.\n\t\t# TimeInd = (0:(N-1))*a;\n\t\t# FreqInd = (0:(M-1))/M;\n\t\t#\n\t\t# phl = FreqInd'*TimeInd;\n\t\t# cphase = cphase+2*pi.*phl;\n\t\t#\n\t\t# fgrad = cphase-circshift(cphase,[1,0]);\n\t\t# fgrad = fgrad- 2*pi*round(fgrad/(2*pi));\n\t\t# fgrad = -fgrad/(2*pi)*L;\n\t\t#\n\t\t# end;\n\n\t\t# This is the classic phase vocoder algorithm by Flanagan modified to\n\t\t# yield a second order centered difference approximation.\n\n\t\t# Forward approximation\n\t\ttgrad_1 = cphase - np.roll(cphase, -1, axis=1)\n\n\t\t# numpy round function doesn't use the same convention than octave for\n\t\t# half-integers but the standard Python round function uses the same\n\t\t# convention than octave, so we use the Python standard round in the\n\t\t# computation below\n\t\toctave_round = np.vectorize(round)\n\t\ttgrad_1 = tgrad_1 - 2 * np.pi * octave_round(tgrad_1 / (2 * np.pi))\n\t\t# Backward approximation\n\t\ttgrad_2 = np.roll(cphase, 1, axis=1) - cphase\n\t\ttgrad_2 = tgrad_2 - 2 * np.pi * octave_round(tgrad_2 / (2 * np.pi))\n\t\t# Average\n\t\ttgrad = (tgrad_1 + tgrad_2) / 2\n\n\t\ttgrad = -tgrad / (2 * np.pi * a) * L\n\n\t\t# Phase-lock the angles.\n\t\tTimeInd = np.arange(N) * a\n\t\tFreqInd = np.arange(M2) / M\n\n\t\tphl = np.dot(FreqInd.reshape((FreqInd.shape[0], 1)),\n\t\t\t\t\t TimeInd.reshape((1, TimeInd.shape[0])))\n\t\t# NOTE: in the following lines, the shape of phl is changed so that\n\t\t# broadcasting works in the following addition with cphase when cphase\n\t\t# has more than two dimensions\n\t\tnew_shape = np.ones((len(cphase.shape),), dtype=int)\n\t\tnew_shape[0] = phl.shape[0]\n\t\tnew_shape[1] = phl.shape[1]\n\t\tphl = phl.reshape(tuple(new_shape))\n\t\tcphase = cphase + 2 * np.pi * phl\n\t\tcphase_to_aprox = np.concatenate([-cphase[1:2], cphase, -cphase[-2:-1]])\n\n\t\t# Forward approximation\n\t\tfgrad_1 = cphase_to_aprox - np.roll(cphase_to_aprox, -1, axis=0)\n\t\tfgrad_1 = fgrad_1 - 2 * np.pi * octave_round(fgrad_1 / (2 * np.pi))\n\t\tfgrad_1 = fgrad_1[1:-1]\n\t\t# Backward approximation\n\t\tfgrad_2 = np.roll(cphase_to_aprox, 1, axis=0) - cphase_to_aprox\n\t\tfgrad_2 = fgrad_2 - 2 * np.pi * octave_round(fgrad_2 / (2 * np.pi))\n\t\tfgrad_2 = fgrad_2[1:-1]\n\t\t# Average\n\t\tfgrad = (fgrad_1 + fgrad_2) / 2\n\n\t\tfgrad = fgrad / (2 * np.pi * b) * L\n\n\t\treturn (tgrad, fgrad)\n\n\telif method == 'abs':\n\t\t# --------------------------- abs method ------------------------\n\n\t\t(s, g, a, M) = args\n\n\t\tif 'difforder' in kwargs:\n\t\t\tdifforder = kwargs['difforder']\n\t\telse:\n\t\t\tdifforder = 2\n\n\t\tif not np.all(s >= 0.):\n\t\t\traise ValueError('First input argument must be positive or zero.')\n\n\t\tif s.ndim == 3:\n\t\t\tM2, N, W = s.shape\n\t\telse:\n\t\t\tM2, N = s.shape\n\n\t\tL = N * a\n\n\t\tg, info = gabwin(g, a, M, L)\n\n\t\tif not info['gauss']:\n\t\t\traise ValueError('The window must be a Gaussian window (specified '\n\t\t\t\t\t\t\t 'as a string or as a dictionary).')\n\n\t\tb = L / M\n\t\t# We must avoid taking the log of zero.\n\t\t# Therefore we add the smallest possible\n\t\t# number\n\t\tlogs = np.log(s + np.finfo(s.dtype).tiny)\n\n\t\t# XXX REMOVE Add a small constant to limit the dynamic range. This\n\t\t# should lessen the problem of errors in the differentiation for points\n\t\t# close to (but not exactly) zeros points.\n\t\tmaxmax = np.max(logs)\n\t\ttt = -11.\n\t\tlogs[logs < (maxmax + tt)] = tt\n\n\t\tfgrad = pderiv(logs, 1, difforder) / (2 * np.pi) * info['tfr']\n\t\ttgrad = pderiv(logs, 0, difforder) / (2 * np.pi * info['tfr']) * (M/M2)\n\t\t# Fix the first and last rows .. the\n\t\t# borders are symmetric so the centered difference is 0\n\t\ttgrad[0, :] = 0\n\t\ttgrad[-1, :] = 0\n\n\t\treturn (tgrad, fgrad)\n\n\telse:\n\t\traise ValueError(\"First argument must be the method name, 'dgt', \"\n\t\t\t\t\t\t \"'phase' or 'abs'.\")\n"
] |
[
[
"numpy.abs",
"numpy.arange",
"numpy.finfo",
"numpy.concatenate",
"numpy.max",
"numpy.all",
"numpy.vectorize",
"numpy.isrealobj",
"numpy.roll"
]
] |
sdss/lvmcam
|
[
"c5f421a546a0072a0dbb3d7b2ebc74316f339f64"
] |
[
"python/lvmcam/araviscam/BlackflyCam.py"
] |
[
"#!/usr/bin/env python3\n\n\"\"\"\nPython3 class to work with Aravis/GenICam cameras, subclass of sdss-basecam.\n.. module:: araviscam\n.. moduleauthor:: Richard J. Mathar <[email protected]>\n\"\"\"\n\nimport sys\nimport math\nimport asyncio\nimport numpy\nimport astropy\n\nfrom basecam.mixins import ImageAreaMixIn\nfrom basecam import (\n CameraSystem,\n BaseCamera,\n CameraEvent,\n CameraConnectionError,\n models,\n ExposureError,\n)\n\nfrom lvmcam.actor import modules\n\n\n# Since the aravis wrapper for GenICam cameras (such as the Blackfly)\n# is using glib2 GObjects to represent cameras and streams, the\n# PyGObject module allows to call the C functions of aravis in python.\n# https://pygobject.readthedocs.io/en/latest/\nfrom lvmcam.araviscam.aravis import Aravis\n\nimport basecam.models.card as card\n\nfrom lvmcam.actor.commands import expose\n\n# https://pypi.org/project/sdss-basecam/\n# https://githum.com/sdss/basecam/\n\n# from sdsstools import read_yaml_file\n\n__all__ = [\"BlackflyCameraSystem\", \"BlackflyCamera\", \"BlackflyImageAreaMixIn\"]\n\n\nclass BlackflyCameraSystem(CameraSystem):\n \"\"\"A collection of GenICam cameras, possibly online\n :param camera_class : `.BaseCamera` subclass\n The subclass of `.BaseCamera` to use with this camera system.\n :param camera_config :\n A dictionary with the configuration parameters for the multiple\n cameras that can be present in the system, or the path to a YAML file.\n Refer to the documentation for details on the accepted format.\n :type camera_config : dict or path\n :param include : List of camera UIDs that can be connected.\n :type include : list\n :param exclude : list\n List of camera UIDs that will be ignored.\n :param logger : ~logging.Logger\n The logger instance to use. If `None`, a new logger will be created.\n :param log_header : A string to be prefixed to each message logged.\n :type log_header : str\n :param log_file : The path to which to log.\n :type log_file : str\n :param verbose : Whether to log to stdout.\n :type verbose : bool\n :param ip_list: A list of IP-Adresses to be checked/pinged.\n :type ip_list: List of strings.\n \"\"\"\n\n __version__ = \"0.0.301\"\n\n # A list of ip addresses in the usual \"xxx.yyy.zzz.ttt\" or \"name.subnet.net\"\n # format that have been added manually/explicitly and may not be found by the\n # usual broadcase auto-detection (i.e., possibly on some other global network).\n ips_nonlocal = []\n\n def __init__(\n self,\n camera_class=None,\n camera_config=None,\n include=None,\n exclude=None,\n logger=None,\n log_header=None,\n log_file=None,\n verbose=False,\n ip_list=None,\n ):\n super().__init__(\n camera_class=camera_class,\n camera_config=camera_config,\n include=include,\n exclude=exclude,\n logger=logger,\n log_header=log_header,\n log_file=log_file,\n verbose=verbose,\n )\n\n # If the ctor is fed with an explicit list of IP addresses, add them to\n # the scanner (with delayed inspection in list_available_cameras).\n if ip_list is not None:\n self.ips_nonlocal.extend(ip_list)\n\n # debuging: print yaml configuration\n # print(self._config)\n\n # @modules.timeit\n def list_available_cameras(self):\n \"\"\"Gather serial numbers of online Aravis/Genicam devices.\n :return: a list of serial numbers (as strings). This list may be\n empty if no cameras are online/switched on.\n For cameras explicitly addressed by IP, the serial\n numbers have the format sn@ip, with an @ between number and address.\n :rtype: list\n\n .. todo:: optionally implement a specific filter for Blackfly's if Basler\n cameras should not be listed.\n \"\"\"\n\n # Start with (pessimistic) initially empty set of online devices\n serialNums = []\n addrs = []\n\n # Broadcast ethernet/bus for recognized cameras.\n # Warning/todo: this gathers also cameras that are not of the Blackfly class,\n # and in conjunction with the SDSS may also recognize the Basler cameras..\n Aravis.update_device_list()\n Ndev = Aravis.get_n_devices()\n # print(str(Ndev) + \" cameras online\")\n\n # get_device_id returns a string of type, SN, MAC etc\n for i in range(Ndev):\n cam = Aravis.Camera.new(Aravis.get_device_id(i))\n uid = cam.get_string(\"DeviceSerialNumber\")\n serialNums.append(uid)\n addrs.append(\"\")\n\n # Try to ping cameras explicitly proposed with ctor.\n for ip in self.ips_nonlocal:\n try:\n cam = Aravis.Camera.new(ip)\n uid = cam.get_string(\"DeviceSerialNumber\")\n # If is this was already in the scan: discard, else add\n if uid not in serialNums:\n serialNums.append(uid)\n addrs.append(\"@\" + ip)\n except:\n # apparently no such camera at this address....\n pass\n\n # we zip the two lists to the format 'serialnumber{@ip}'\n ids = []\n for cam in range(len(serialNums)):\n ids.append(serialNums[cam] + addrs[cam])\n\n return ids\n\nfrom basecam.models.builtin import basic_fz_fits_model\n\n\nclass BlackflyCamera(BaseCamera):\n \"\"\"A FLIR (formerly Point Grey Research) Blackfly camera.\n Given the pixel scale on the benches of LVMi and the assumption\n of 9 um pixel sizes of the LVMi cameras, we assume that the\n cameras have roughly 1 arsec per pixel, so they are used without binning.\n\n In addition we let the camera flip the standard image orientation of the data\n values assuming that values are stored into a FITS interface (where\n the first values in the sequential data are the bottom row).\n So this is not done in this python code but by the camera.\n \"\"\"\n \n # fits_model=basic_fz_fits_model\n\n def __init__(\n self,\n uid,\n camera_system,\n name=None,\n force=False,\n image_namer=None,\n camera_params={},\n ):\n super().__init__(\n uid=uid,\n camera_system=camera_system,\n name=name,\n force=force,\n image_namer=image_namer,\n camera_params=camera_params,\n )\n self.header = []\n\n @modules.atimeit\n async def _connect_internal(self, **kwargs):\n \"\"\"Connect to a camera and upload basic binning and ROI parameters.\n :param kwargs: recognizes the key uid with integer value, the serial number\n If the key uid is absent, tries to attach to the first camera.\n This is a subdictionary of 'cameras' in practise.\n \"\"\"\n\n # print(self.name)\n # search for an optional uid key in the arguments\n try:\n uid = kwargs[\"uid\"]\n except:\n uid = None\n\n # reverse lookup of the uid in the list of known cameras\n cs = BlackflyCameraSystem(BlackflyCamera)\n slist = cs.list_available_cameras()\n\n if uid is None:\n # uid was not specified: grab the first device that is found\n # print(\"no uid provided, attaching to first camera\")\n idx = 0\n else:\n # print(\"searching \" + uid + \" in \" + str(slist) )\n idx = -1\n for id in slist:\n # remove the optional ip address of the id\n slistuid = id.split(\"@\")[0]\n if slistuid == uid:\n idx = slist.index(id)\n # not found\n if idx < 0:\n raise CameraConnectionError(\"SN \" + uid + \" not connected\")\n\n cam = None\n try:\n if \"@\" in slist[idx]:\n # if the camera was not on local network use the address part\n cam = Aravis.Camera.new(slist[idx].split(\"@\")[1])\n else:\n # otherwise the index is the same as the search order...\n cam = Aravis.Camera.new(Aravis.get_device_id(idx))\n except:\n raise CameraConnectionError(\" not connected\")\n\n # search for an optional gain key in the arguments\n # todo: one could interpret gain=0 here as to call set_gain_auto(ARV_AUTO_ON)\n try:\n gain = kwargs[\"gain\"]\n if gain > 0.0:\n # todo: it might make sense to squeeze this into the minimum\n # and maximum range of the camera's gain if outside that range.\n self.device.set_gain_auto(0)\n cam.set_gain(gain)\n except Exception as ex:\n # print(\"failed to set gain \" + str(ex))\n pass\n\n # see arvenums.h for the list of pixel formats. This is MONO_16 here, always\n cam.set_pixel_format(0x01100007)\n\n # search for an optional x and y binning factor\n try:\n var = kwargs[\"binning\"]\n cam.set_binning(var[0], var[1])\n except Exception as ex:\n # print(\"failed to set binning \" + str(ex))\n # horizontal and vertical binning set to 1\n cam.set_binning(1, 1)\n\n # scan the general list of genicam featured values\n # of the four native types\n for typp, arvLst in kwargs.items():\n if arvLst is not None:\n if typp == \"bool\":\n for genkey, genval in arvLst.items():\n try:\n cam.set_boolean(genkey, int(genval))\n except:\n # probably a typo in the yaml file... todo: log this\n # print(\"failed for \" + str(genkey)+str(genval))\n pass\n elif typp == \"int\":\n for genkey, genval in arvLst.items():\n try:\n cam.set_integer(genkey, genval)\n except:\n # probably a typo in the yaml file... todo: log this\n # print(\"failed for \" + str(genkey)+str(genval))\n pass\n elif typp == \"float\":\n for genkey, genval in arvLst.items():\n try:\n cam.set_float(genkey, genval)\n except:\n # probably a typo in the yaml file... todo: log this\n # print(\"failed for \" + str(genkey)+str(genval))\n pass\n elif typp == \"string\":\n for genkey, genval in arvLst.items():\n try:\n cam.set_string(genkey, genval)\n except:\n # probably a typo in the yaml file... todo: log this\n # print(\"failed for \" + str(genkey)+str(genval))\n pass\n\n dev = cam.get_device()\n\n # Take full frames by default (maximizing probability of LVM guide camera\n # to find guide stars in the field)\n roiBounds = [-1, -1]\n try:\n roiBounds[0] = dev.get_integer_feature_value(\"WidthMax\")\n roiBounds[1] = dev.get_integer_feature_value(\"HeightMax\")\n # print(\" ROI \" + str(roiBounds[0]) + \" x \" + str(roiBounds[1]) )\n cam.set_region(0, 0, roiBounds[0], roiBounds[1])\n except Exception as ex:\n # print(\"failed to set ROI \" + str(ex))\n pass\n\n self.device = cam\n self.regionBounds = roiBounds\n\n @modules.atimeit\n async def _disconnect_internal(self):\n \"\"\"Close connection to camera.\"\"\"\n self.device = None\n\n # @modules.atimeit\n async def _expose_grabFrame(self, exposure):\n \"\"\"Read a single unbinned full frame.\n The class splits the parent class' exposure into this function and\n the part which generates the FITS file, because applications in guiders\n are usually only interested in the frame's data, and would not\n take the detour of generating a FITS file and reading it back from\n disk.\n\n :param exposure: On entry, exposure.exptim is the intended exposure time in [sec]\n On exit, exposure.data is the numpy array of the 16bit data\n arranged in FITS order (i.e., the data of the bottom row appear first...)\n :return: The dictionary with the window location and size (x=,y=,width=,height=)\n \"\"\"\n # To avoid being left over by other programs with no change\n # to set the exposure time, we switch the auto=0=off first\n self.device.set_exposure_time_auto(0)\n # Aravis assumes exptime in micro second integers\n exptime_ms = int(0.5 + exposure.exptime * 1e6)\n self.device.set_exposure_time(exptime_ms)\n\n # timeout (factor 2: assuming there may be two frames in auto mode taken\n # internally)\n # And 5 seconds margin for any sort of transmission overhead over PoE\n tout_ms = int(1.0e6 * (2.0 * exposure.exptime + 5))\n self.notify(CameraEvent.EXPOSURE_INTEGRATING)\n\n # the buffer allocated/created within the acquisition()\n buf = await self.loop.run_in_executor(None, self.device.acquisition, tout_ms)\n if buf is None:\n raise ExposureError(\n \"Exposing for \"\n + str(exposure.exptime)\n + \" sec failed. Timout \"\n + str(tout_ms / 1.0e6)\n )\n\n # Decipher which methods this aravis buffer has...\n # print(dir(buf))\n\n # reg becomes a x=, y=, width= height= dictionary\n # these are in standard X11 coordinates where upper left =(0,0)\n reg = buf.get_image_region()\n # print('region',reg)\n\n data = buf.get_data()\n\n exposure.data = numpy.ndarray(\n buffer=data, dtype=numpy.uint16, shape=(1, reg.height, reg.width)\n )\n # print(\"exposure data shape\", exposure.data.shape)\n return reg\n\n @modules.atimeit\n async def _expose_internal(self, exposure):\n \"\"\"Read a single unbinned full frame and store in a FITS file.\n :param exposure: On entry exposure.exptim is the intended exposure time in [sec]\n On exit, exposure.data contains the 16bit data of a single frame\n :return: There is no return value\n \"\"\"\n\n # fill exposure.data with the frame's 16bit data\n # reg becomes a x=, y=, width= height= dictionary\n # these are in standard X11 coordinates where upper left =(0,0)\n\n reg = await self._expose_grabFrame(exposure)\n # print('region',reg)\n\n binxy = {}\n try:\n # becomes a dictionary with dx=... dy=... for the 2 horiz/vert binn fact\n binxy = self.device.get_binning()\n except Exception as ex:\n binxy = None\n\n # append FITS header cards\n # For the x/y coordinates transform from X11 to FITS coordinates\n # Todo: reports the camera y-flipped reg.y if ReversY=true above??\n addHeaders = [\n (\"BinX\", binxy.dx, \"[ct] Horizontal Bin Factor 1, 2 or 4\"),\n (\"BinY\", binxy.dy, \"[ct] Vertical Bin Factor 1, 2 or 4\"),\n (\"Width\", reg.width, \"[ct] Pixel Columns\"),\n (\"Height\", reg.height, \"[ct] Pixel Rows\"),\n (\"RegX\", 1 + reg.x, \"[ct] Pixel Region Horiz start\"),\n # The lower left FITS corner is the upper left X11 corner...\n (\n \"RegY\",\n self.regionBounds[1] - (reg.y + reg.height - 1),\n \"[ct] Pixel Region Vert start\",\n ),\n ]\n\n dev = self.device.get_device()\n # print(dir(dev))\n # print(dir(self))\n # print(self.camera_system.get_camera(self.name))\n # print(self.camera_system._config[self.name])\n\n try:\n gain = dev.get_float_feature_value(\"Gain\")\n addHeaders.append((\"Gain\", gain, \"Gain\"))\n except Exception as ex:\n # print(\"failed to read gain\" + str(ex))\n pass\n\n imgrev = [False, False]\n try:\n imgrev[0] = self.device.get_boolean(\"ReverseX\")\n addHeaders.append((\"ReverseX\", imgrev[0] != 0, \" Flipped left-right\"))\n imgrev[1] = self.device.get_boolean(\"ReverseY\")\n addHeaders.append((\"ReverseY\", imgrev[1] != 0, \" Flipped up-down\"))\n # print(\"reversed\" + str(imgrev[0]) + str(imgrev[1]) )\n except Exception as ex:\n # print(\"failed to read ReversXY\" + str(ex))\n pass\n\n # This is an enumeration in the GenICam. See features list of\n # `arv-tool-0.8 --address=192.168.70.50 features`\n\n binMod = [-1, -1]\n try:\n binMod[0] = dev.get_integer_feature_value(\"BinningHorizontalMode\")\n if binMod[0] == 0:\n addHeaders.append(\n (\"BinModeX\", \"Averag\", \"Horiz Bin Mode Sum or Averag\")\n )\n else:\n addHeaders.append((\"BinModeX\", \"Sum\", \"Horiz Bin Mode Sum or Averag\"))\n binMod[1] = dev.get_integer_feature_value(\"BinningVerticalMode\")\n if binMod[1] == 0:\n addHeaders.append((\"BinModeY\", \"Averag\", \"Vert Bin Mode Sum or Averag\"))\n else:\n addHeaders.append((\"BinModeY\", \"Sum\", \"Vert Bin Mode Sum or Averag\"))\n except Exception as ex:\n # print(\"failed to read binmode\" + str(ex))\n pass\n\n tmp = False\n try:\n tmp = self.device.get_boolean(\"BlackLevelClampingEnable\")\n addHeaders.append(\n (\"CAMBLCLM\", tmp != 0, \"Black Level Clamping en/disabled\")\n )\n # print(\"BlackLevelClampingEnable\" + str(imgrev[0]) + str(imgrev[1]) )\n except Exception as ex:\n # print(\"failed to read BlackLevelClampingEnable\" + str(ex))\n pass\n\n try:\n camtyp = self.device.get_model_name()\n addHeaders.append((\"CAMTYP\", camtyp, \"Camera model\"))\n except:\n pass\n\n # call _expose_wcs() to gather WCS header keywords\n addHeaders.extend(self._expose_wcs(exposure, reg))\n\n # for headr in addHeaders:\n # exposure.fits_model[0].header_model.append(models.Card(headr))\n\n self.header = addHeaders\n # print(repr(exposure.to_hdu()[0].header))\n\n # unref() is currently usupported in this GObject library.\n # Hope that this does not lead to any memory leak....\n # buf.unref()\n return\n\n # @modules.timeit\n def _expose_wcs(self, exposure, reg):\n \"\"\"Gather information for the WCS FITS keywords\n :param exposure: On entry exposure.exptim is the intended exposure time in [sec]\n On exit, exposure.data contains the 16bit data of a single frame\n :param reg The binning and region information\n \"\"\"\n # the section/dictionary of the yaml file for this camera\n yamlconfig = self.camera_system._config[self.name]\n wcsHeaders = []\n\n # The distance from the long edge of the FLIR camera to the center\n # of the focus (fiber) is 7.144+4.0 mm according to SDSS-V_0110 figure 6\n # and 11.14471 according to figure 3-1 of LVMi-0081\n # For the *w or *e cameras the pixel row 1 (in FITS) is that far\n # away in the y-coordinate and in the middle of the x-coordinate.\n # For the *c cameras at the fiber bundle we assume them to be in the beam center.\n wcsHeaders.append((\"CRPIX1\", reg.width / 2, \"[px] RA center along axis 1\"))\n if self.name[-1] == \"c\":\n wcsHeaders.append(\n (\"CRPIX2\", reg.height / 2, \"[px] DEC center along axis 2\")\n )\n else:\n # convert 11.14471 mm to microns and to to pixels\n crefy = 11.14471 * 1000.0 / yamlconfig[\"pixsize\"]\n wcsHeaders.append((\"CRPIX2\", -crefy, \"[px] DEC center along axis 2\"))\n\n return wcsHeaders\n\n\nclass BlackflyImageAreaMixIn(ImageAreaMixIn):\n \"\"\"Allows to select image region and binning factors\"\"\"\n\n async def _get_image_area_internal(self):\n pass\n\n async def _set_image_area_internal(self, area=None):\n pass\n\n async def _get_binning_internal(self):\n pass\n\n async def _set_binning_internal(self, hbin, vbin):\n pass\n\n\n# async def singleFrame(\n# exptim,\n# name,\n# verb=False,\n# ip_add=None,\n# config=\"cameras.yaml\",\n# targ=None,\n# kmirr=0.0,\n# flen=None,\n# ):\n# \"\"\"Expose once and write the image to a FITS file.\n# :param exptim: The exposure time in seconds. Non-negative.\n# :type exptim: float\n# :param verb: Verbosity on or off\n# :type verb: boolean\n# :param ip_add: list of explicit IP's (like 192.168.70.51 or lvmt.irws2.mpia.de)\n# :type ip_add: list of strings\n# :param config: Name of the YAML file with the cameras configuration\n# :type config: string of the file name\n# :param targ: alpha/delta ra/dec of the sidereal target\n# :type targ: astropy.coordinates.SkyCoord\n# :param kmirr: Kmirr angle in degrees (0 if up, positive with right hand rule along North on bench)\n# :type kmirr: float\n# :param flen: focal length of telescope/siderostat in mm\n# If not provided it will be taken from the configuration file\n# :type flen: float\n# \"\"\"\n\n# cs = BlackflyCameraSystem(\n# BlackflyCamera, camera_config=config, verbose=verb, ip_list=ip_add\n# )\n# cam = await cs.add_camera(name=name)\n# # print(\"cameras\", cs.cameras)\n# # print(\"config\" ,config)\n\n# exp = await cam.expose(exptim, \"LAB TEST\")\n\n# if targ is not None and kmirr is not None:\n# # if there is already a (partial) header information, keep it,\n# # otherwise create one ab ovo.\n# if exp.wcs is None:\n# wcshdr = astropy.io.fits.Header()\n# else:\n# wcshdr = exp.wcs.to_header()\n\n# key = astropy.io.fits.Card(\"CUNIT1\", \"deg\", \"WCS units along axis 1\")\n# wcshdr.append(key)\n# key = astropy.io.fits.Card(\"CUNIT2\", \"deg\", \"WCS units along axis 2\")\n# wcshdr.append(key)\n# key = astropy.io.fits.Card(\"CTYPE1\", \"RA---TAN\", \"WCS type axis 1\")\n# wcshdr.append(key)\n# key = astropy.io.fits.Card(\"CTYPE2\", \"DEC--TAN\", \"WCS type axis 2\")\n# wcshdr.append(key)\n# key = astropy.io.fits.Card(\"CRVAL1\", targ.ra.deg, \"[deg] RA at reference pixel\")\n# wcshdr.append(key)\n# key = astropy.io.fits.Card(\n# \"CRVAL2\", targ.dec.deg, \"[deg] DEC at reference pixel\"\n# )\n# wcshdr.append(key)\n\n# # field angle: degrees, then radians\n# # direction of NCP on the detectors (where we have already flipped pixels\n# # on all detectors so fieldrot=kmirr=0 implies North is up and East is left)\n# # With right-handed-rule: zero if N=up (y-axis), 90 deg if N=right (x-axis)\n# # so the direction is the vector ( sin(f), cos(f)) before the K-mirror.\n# # Action of K-mirror is ( cos(2*m), sin(2*m); sin(2*m), -cos(2*m))\n# # and action of prism is (-1 0 ; 0 1), i.e. to flip the horizontal coordinate.\n# # todo: get starting value from a siderostat field rotation tracking model\n# fieldrot = 0.0\n\n# if name[-1] == \"c\":\n# # without prism, assuming center camera placed horizontally\n# if name[:4] == \"spec\":\n# # without K-mirror\n# pass\n# else:\n# # with K-mirror\n# # in the configuration the y-axis of the image has been flipped,\n# # the combined action of (1, 0; 0, -1) and the K-mirror is (cos(2m), sin(2m); -sin(2m), cos(2m))\n# # and applied to the input vector this is (sin(2m+f), cos(2m+f))\n# fieldrot += 2.0 * kmirr\n# else:\n# # with prism\n# if name[:4] == \"spec\":\n# # without K-mirror\n# # Applied to input beam this gives (-sin(f), cos(f)) but prism effect\n# # had been undone by vertical flip in the FLIR image.\n# pass\n# else:\n# # with K-mirror\n# # Combined action of K-mirror and prism is (-cos(2*m), -sin(2*m);sin(2*m), -cos(2*m)).\n# # Applied to input beam this gives (-sin(2*m+f), -cos(2*m+f)) = (sin(2*m+f+pi), cos(2*m+f+pi)).\n# fieldrot += 2.0 * kmirr + 180.0\n\n# if name[-1] == \"w\":\n# # Camera is vertically,\n# # so up in the lab is right in the image\n# fieldrot += 90\n# else:\n# # Camera is vertically,\n# # so up in the lab is left in the image\n# fieldrot -= 90\n\n# fieldrot = math.radians(fieldrot)\n\n# # the section/dictionary of the yaml file for this camera\n# yamlconfig = cs._config[name]\n\n# if flen is None:\n# flen = yamlconfig[\"flen\"]\n\n# # pixel scale per arcseconds is focal length *pi/180 /3600\n# # = flen * mm *pi/180 /3600\n# # = flen * um *pi/180 /3.6, so in microns per arcsec...\n# pixscal = math.radians(flen) / 3.6\n\n# # degrees per pixel is arcseconds per pixel/3600 = (mu/pix)/(mu/arcsec)/3600\n# degperpix = yamlconfig[\"pixsize\"] / pixscal / 3600.0\n\n# # for the right handed coordinates\n# # (pixx,pixy) = (cos f', -sin f'; sin f', cos f')*(DEC,RA) where f' =90deg -fieldrot\n# # (pixx,pixy) = (sin f, -cos f; cos f , sin f)*(DEC,RA)\n# # (sin f, cos f; -cos f, sin f)*(pixx,pixy) = (DEC,RA)\n# # (-cos f, sin f; sin f, cos f)*(pixx,pixy) = (RA,DEC)\n# # Note that the det of the WCS matrix is negativ (because RA/DEC is left-handed...)\n# cosperpix = degperpix * math.cos(fieldrot)\n# sinperpix = degperpix * math.sin(fieldrot)\n# key = astropy.io.fits.Card(\"CD1_1\", -cosperpix, \"[deg/px] WCS matrix diagonal\")\n# wcshdr.append(key)\n# key = astropy.io.fits.Card(\"CD2_2\", cosperpix, \"[deg/px] WCS matrix diagonal\")\n# wcshdr.append(key)\n# key = astropy.io.fits.Card(\n# \"CD1_2\", sinperpix, \"[deg/px] WCS matrix outer diagonal\"\n# )\n# wcshdr.append(key)\n# key = astropy.io.fits.Card(\n# \"CD2_1\", sinperpix, \"[deg/px] WCS matrix outer diagonal\"\n# )\n# wcshdr.append(key)\n\n# exp.wcs = astropy.wcs.WCS(wcshdr)\n# # print(exp.wcs.to_header_string())\n# for headr in wcshdr.cards:\n# exp.fits_model[0].header_model.append(models.Card(headr))\n\n# await exp.write()\n# if verb:\n# print(\"wrote \", exp.filename)\n\n\n# # A debugging aid, demonstrator and simple test run\n# # This allows to call this file as an executable from the command line.\n# # The last command line argument must be the name of the camera\n# # as used in the configuration file.\n# # Example\n# # BlackflyCam.py [-e seconds] [-v] [-c ../etc/cameras.yaml] [-r 2h10m10s] [-d -20d10m3s]\n# # [-K kmirrdegrees] [-s \"LCO\"|\"MPIA\"|\"APO\"|\"KHU\"] [-f focallengthmm] {spec.age|spec.agw|...}\n# if __name__ == \"__main__\":\n\n# import argparse\n\n# parser = argparse.ArgumentParser()\n# parser.add_argument(\n# \"-e\",\n# \"--exptime\",\n# type=float,\n# default=5.0,\n# help=\"Expose for for exptime seconds\",\n# )\n\n# parser.add_argument(\n# \"-v\", \"--verbose\", action=\"store_true\", help=\"print some notes to stdout\"\n# )\n\n# # With the -i switch we can add an explicit IP-Adress for a\n# # camera if we want to read a camera that is not reachable\n# # by the broadcast scanner.\n# parser.add_argument(\"-i\", \"--ip\", help=\"IP address of camera\")\n\n# # Name of an optional YAML file\n# parser.add_argument(\n# \"-c\", \"--cfg\", default=\"cameras.yaml\", help=\"YAML file of lvmt cameras\"\n# )\n\n# # right ascension in degrees\n# parser.add_argument(\"-r\", \"--ra\", help=\"RA J2000 in degrees or in xxhxxmxxs format\")\n\n# # declination in degrees\n# parser.add_argument(\n# \"-d\", \"--dec\", help=\"DEC J2000 in degrees or in +-xxdxxmxxs format\"\n# )\n\n# # K-mirror angle in degrees\n# # Note this is only relevant for 3 of the 4 tables/telescopes\n# parser.add_argument(\"-K\", \"--Kmirr\", type=float, help=\"K-mirror angle in degrees\")\n\n# # focal length of telescope in mm\n# # Default is the LCO triple lens configuration of 1.8 meters\n# parser.add_argument(\n# \"-f\", \"--flen\", type=float, default=1839.8, help=\"focal length in mm\"\n# )\n\n# # shortcut for site coordinates: observatory\n# # parser.add_argument(\"-s\", '--site', default=\"LCO\", help=\"LCO or MPIA or APO or KHU\")\n\n# # the last argument is mandatory: must be the name of exactly one camera\n# # as used in the configuration file\n# parser.add_argument(\"camname\", default=\"sci.agw\")\n\n# args = parser.parse_args()\n\n# ip_cmdLine = []\n# if args.ip is not None:\n# ip_cmdLine.append(args.ip)\n\n# # check ranges and combine ra/dec into a single SkyCoord\n# if args.ra is not None and args.dec is not None:\n# if args.ra.find(\"h\") < 0:\n# # apparently simple floating point representation\n# targ = astropy.coordinates.SkyCoord(\n# ra=float(args.ra), dec=float(args.dec), unit=\"deg\"\n# )\n# else:\n# targ = astropy.coordinates.SkyCoord(args.ra + \" \" + args.dec)\n# else:\n# targ = None\n\n# # print(targ)\n\n# # The following 2 lines test that listing the connected cameras works...\n# # bsys = BlackflyCameraSystem(camera_class=BlackflyCamera)\n# # bsys.list_available_cameras()\n\n# asyncio.run(\n# singleFrame(\n# args.exptime,\n# args.camname,\n# verb=args.verbose,\n# ip_add=ip_cmdLine,\n# config=args.cfg,\n# targ=targ,\n# kmirr=args.Kmirr,\n# flen=args.flen,\n# )\n# )\n\n\nclass WcsHdrCards(card.MacroCard):\n def macro(self, exposure, context={}):\n wcshdr = get_wcshdr(modules.variables.cs_list[0], modules.variables.camname, modules.variables.targ, modules.variables.kmirr, modules.variables.flen)\n return wcshdr\n\n# @modules.timeit\ndef get_wcshdr(\n cs,\n name,\n targ,\n kmirr,\n flen,\n):\n if targ is not None and kmirr is not None:\n # wcshdr = astropy.io.fits.Header()\n wcshdr = []\n\n key = astropy.io.fits.Card(\"CUNIT1\", \"deg\", \"WCS units along axis 1\")\n wcshdr.append(key)\n key = astropy.io.fits.Card(\"CUNIT2\", \"deg\", \"WCS units along axis 2\")\n wcshdr.append(key)\n key = astropy.io.fits.Card(\"CTYPE1\", \"RA---TAN\", \"WCS type axis 1\")\n wcshdr.append(key)\n key = astropy.io.fits.Card(\"CTYPE2\", \"DEC--TAN\", \"WCS type axis 2\")\n wcshdr.append(key)\n key = astropy.io.fits.Card(\"CRVAL1\", targ.ra.deg, \"[deg] RA at reference pixel\")\n wcshdr.append(key)\n key = astropy.io.fits.Card(\n \"CRVAL2\", targ.dec.deg, \"[deg] DEC at reference pixel\"\n )\n wcshdr.append(key)\n\n # field angle: degrees, then radians\n # direction of NCP on the detectors (where we have already flipped pixels\n # on all detectors so fieldrot=kmirr=0 implies North is up and East is left)\n # With right-handed-rule: zero if N=up (y-axis), 90 deg if N=right (x-axis)\n # so the direction is the vector ( sin(f), cos(f)) before the K-mirror.\n # Action of K-mirror is ( cos(2*m), sin(2*m); sin(2*m), -cos(2*m))\n # and action of prism is (-1 0 ; 0 1), i.e. to flip the horizontal coordinate.\n # todo: get starting value from a siderostat field rotation tracking model\n fieldrot = 0.0\n\n if name[-1] == \"c\":\n # without prism, assuming center camera placed horizontally\n if name[:4] == \"spec\":\n # without K-mirror\n pass\n else:\n # with K-mirror\n # in the configuration the y-axis of the image has been flipped,\n # the combined action of (1, 0; 0, -1) and the K-mirror is (cos(2m), sin(2m); -sin(2m), cos(2m))\n # and applied to the input vector this is (sin(2m+f), cos(2m+f))\n fieldrot += 2.0 * kmirr\n else:\n # with prism\n if name[:4] == \"spec\":\n # without K-mirror\n # Applied to input beam this gives (-sin(f), cos(f)) but prism effect\n # had been undone by vertical flip in the FLIR image.\n pass\n else:\n # with K-mirror\n # Combined action of K-mirror and prism is (-cos(2*m), -sin(2*m);sin(2*m), -cos(2*m)).\n # Applied to input beam this gives (-sin(2*m+f), -cos(2*m+f)) = (sin(2*m+f+pi), cos(2*m+f+pi)).\n fieldrot += 2.0 * kmirr + 180.0\n\n if name[-1] == \"w\":\n # Camera is vertically,\n # so up in the lab is right in the image\n fieldrot += 90\n else:\n # Camera is vertically,\n # so up in the lab is left in the image\n fieldrot -= 90\n\n fieldrot = math.radians(fieldrot)\n\n # the section/dictionary of the yaml file for this camera\n yamlconfig = cs._config[name]\n\n if flen is None:\n flen = yamlconfig[\"flen\"]\n\n # pixel scale per arcseconds is focal length *pi/180 /3600\n # = flen * mm *pi/180 /3600\n # = flen * um *pi/180 /3.6, so in microns per arcsec...\n pixscal = math.radians(flen) / 3.6\n\n # degrees per pixel is arcseconds per pixel/3600 = (mu/pix)/(mu/arcsec)/3600\n degperpix = yamlconfig[\"pixsize\"] / pixscal / 3600.0\n\n # for the right handed coordinates\n # (pixx,pixy) = (cos f', -sin f'; sin f', cos f')*(DEC,RA) where f' =90deg -fieldrot\n # (pixx,pixy) = (sin f, -cos f; cos f , sin f)*(DEC,RA)\n # (sin f, cos f; -cos f, sin f)*(pixx,pixy) = (DEC,RA)\n # (-cos f, sin f; sin f, cos f)*(pixx,pixy) = (RA,DEC)\n # Note that the det of the WCS matrix is negativ (because RA/DEC is left-handed...)\n cosperpix = degperpix * math.cos(fieldrot)\n sinperpix = degperpix * math.sin(fieldrot)\n key = astropy.io.fits.Card(\"CD1_1\", -cosperpix, \"[deg/px] WCS matrix diagonal\")\n wcshdr.append(key)\n key = astropy.io.fits.Card(\"CD2_2\", cosperpix, \"[deg/px] WCS matrix diagonal\")\n wcshdr.append(key)\n key = astropy.io.fits.Card(\n \"CD1_2\", sinperpix, \"[deg/px] WCS matrix outer diagonal\"\n )\n wcshdr.append(key)\n key = astropy.io.fits.Card(\n \"CD2_1\", sinperpix, \"[deg/px] WCS matrix outer diagonal\"\n )\n wcshdr.append(key)\n return wcshdr\n else:\n return None\n"
] |
[
[
"numpy.ndarray"
]
] |
wuaodi/Transfer-Learning-Library
|
[
"29a946143e63b66a1da9ffa685bfe95f5640028a"
] |
[
"examples/domain_adaptation/digits/mdd.py"
] |
[
"import random\nimport time\nimport warnings\nimport sys\nimport argparse\nimport shutil\nimport os.path as osp\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as T\nimport torch.nn.functional as F\n\nsys.path.append('../../..')\nfrom dalib.adaptation.mdd import ClassificationMarginDisparityDiscrepancy\\\n as MarginDisparityDiscrepancy, GeneralModule\nimport common.vision.datasets.digits as datasets\nimport common.vision.models.digits as models\nfrom common.vision.transforms import ResizeImage\nfrom common.utils.data import ForeverDataIterator\nfrom common.utils.metric import accuracy, ConfusionMatrix\nfrom common.utils.meter import AverageMeter, ProgressMeter\nfrom common.utils.logger import CompleteLogger\nfrom common.utils.analysis import collect_feature, tsne, a_distance\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef main(args: argparse.Namespace):\n logger = CompleteLogger(args.log, args.phase)\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n cudnn.benchmark = True\n\n # Data loading code\n if args.num_channels == 3:\n mode = 'RGB'\n mean = std = [0.5, 0.5, 0.5]\n else:\n mode = 'L'\n mean = std = [0.5, ]\n normalize = T.Normalize(mean=mean, std=std)\n\n train_transform = T.Compose([\n ResizeImage(args.image_size),\n # T.RandomRotation(10), # TODO need results\n T.ToTensor(),\n normalize\n ])\n val_transform = T.Compose([\n ResizeImage(args.image_size),\n T.ToTensor(),\n normalize\n ])\n\n source_dataset = datasets.__dict__[args.source]\n train_source_dataset = source_dataset(root=args.source_root, mode=mode, download=True, transform=train_transform)\n train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers, drop_last=True)\n target_dataset = datasets.__dict__[args.target]\n train_target_dataset = target_dataset(root=args.target_root, mode=mode, download=True, transform=train_transform)\n train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers, drop_last=True)\n val_dataset = target_dataset(root=args.target_root, mode=mode, split='test', download=True, transform=val_transform)\n val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)\n\n train_source_iter = ForeverDataIterator(train_source_loader)\n train_target_iter = ForeverDataIterator(train_target_loader)\n\n # create model\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n arch = models.__dict__[args.arch]()\n bottleneck = nn.Sequential(\n nn.Flatten(),\n nn.Linear(arch.bottleneck_dim, arch.bottleneck_dim),\n nn.BatchNorm1d(arch.bottleneck_dim),\n nn.ReLU(),\n nn.Dropout(0.5)\n )\n head = arch.head()\n adv_head = arch.head()\n classifier = GeneralModule(arch.backbone(), arch.num_classes, bottleneck,\n head, adv_head, finetune=False)\n mdd = MarginDisparityDiscrepancy(args.margin).to(device)\n\n # define optimizer and lr scheduler\n optimizer = Adam(classifier.get_parameters(), args.lr, betas=args.betas, weight_decay=args.wd)\n lr_scheduler = LambdaLR(optimizer, lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))\n\n # resume from the best checkpoint\n if args.phase != 'train':\n checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')\n classifier.load_state_dict(checkpoint)\n\n # analysis the model\n if args.phase == 'analysis':\n # extract features from both domains\n feature_extractor = torch.nn.Sequential(classifier.backbone, classifier.bottleneck).to(device)\n source_feature = collect_feature(train_source_loader, feature_extractor, device, 10)\n target_feature = collect_feature(val_loader, feature_extractor, device, 10)\n # plot t-SNE\n tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.png')\n tsne.visualize(source_feature, target_feature, tSNE_filename)\n print(\"Saving t-SNE to\", tSNE_filename)\n # calculate A-distance, which is a measure for distribution discrepancy\n A_distance = a_distance.calculate(source_feature, target_feature, device)\n print(\"A-distance =\", A_distance)\n return\n\n if args.phase == 'test':\n acc1 = validate(val_loader, classifier, args)\n print(acc1)\n return\n\n # start training\n best_acc1 = 0.\n for epoch in range(args.epochs):\n print(lr_scheduler.get_lr())\n # train for one epoch\n train(train_source_iter, train_target_iter, classifier, mdd, optimizer,\n lr_scheduler, epoch, args)\n\n # evaluate on validation set\n acc1 = validate(val_loader, classifier, args)\n\n # remember best acc@1 and save checkpoint\n torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))\n if acc1 > best_acc1:\n shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))\n best_acc1 = max(acc1, best_acc1)\n\n print(\"best_acc1 = {:3.1f}\".format(best_acc1))\n\n logger.close()\n\n\ndef train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator,\n model, mdd: MarginDisparityDiscrepancy,\n optimizer: Adam, lr_scheduler: LambdaLR, epoch: int, args: argparse.Namespace):\n batch_time = AverageMeter('Time', ':4.2f')\n data_time = AverageMeter('Data', ':3.1f')\n losses = AverageMeter('Loss', ':3.2f')\n trans_losses = AverageMeter('Trans Loss', ':3.2f')\n cls_accs = AverageMeter('Cls Acc', ':3.1f')\n tgt_accs = AverageMeter('Tgt Acc', ':3.1f')\n cls_adv_accs = AverageMeter('Cls Adv Acc', ':3.1f')\n tgt_adv_accs = AverageMeter('Tgt Adv Acc', ':3.1f')\n\n progress = ProgressMeter(\n args.iters_per_epoch,\n [batch_time, data_time, losses, trans_losses, cls_accs, tgt_accs, cls_adv_accs, tgt_adv_accs],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n mdd.train()\n\n end = time.time()\n for i in range(args.iters_per_epoch):\n x_s, labels_s = next(train_source_iter)\n x_t, labels_t = next(train_target_iter)\n\n x_s = x_s.to(device)\n x_t = x_t.to(device)\n labels_s = labels_s.to(device)\n labels_t = labels_t.to(device)\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n # compute output\n x = torch.cat((x_s, x_t), dim=0)\n outputs, outputs_adv = model(x)\n y_s, y_t = outputs.chunk(2, dim=0)\n y_s_adv, y_t_adv = outputs_adv.chunk(2, dim=0)\n\n # compute cross entropy loss on source domain\n cls_loss = F.cross_entropy(y_s, labels_s)\n # compute margin disparity discrepancy between domains\n # for adversarial classifier, minimize negative mdd is equal to maximize mdd\n transfer_loss = -mdd(y_s, y_s_adv, y_t, y_t_adv)\n loss = cls_loss + transfer_loss * args.trade_off\n model.step()\n\n cls_acc = accuracy(y_s, labels_s)[0]\n tgt_acc = accuracy(y_t, labels_t)[0]\n cls_adv_acc = accuracy(y_s_adv, labels_s)[0]\n tgt_adv_acc = accuracy(y_t_adv, labels_t)[0]\n\n losses.update(loss.item(), x_s.size(0))\n cls_accs.update(cls_acc.item(), x_s.size(0))\n tgt_accs.update(tgt_acc.item(), x_t.size(0))\n cls_adv_accs.update(cls_adv_acc.item(), x_s.size(0))\n tgt_adv_accs.update(tgt_adv_acc.item(), x_t.size(0))\n trans_losses.update(transfer_loss.item(), x_s.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # lr_scheduler.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n\ndef validate(val_loader: DataLoader, model, args: argparse.Namespace) -> float:\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n if args.per_class_eval:\n classes = val_loader.dataset.classes\n confmat = ConfusionMatrix(len(classes))\n else:\n confmat = None\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n images = images.to(device)\n target = target.to(device)\n\n # compute output\n output, _ = model(images)\n loss = F.cross_entropy(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n if confmat:\n confmat.update(target, output.argmax(1))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1.item(), images.size(0))\n top5.update(acc5.item(), images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n if confmat:\n print(confmat.format(classes))\n\n return top1.avg\n\n\nif __name__ == '__main__':\n architecture_names = sorted(\n name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name])\n )\n dataset_names = sorted(\n name for name in datasets.__dict__\n if not name.startswith(\"__\") and callable(datasets.__dict__[name])\n )\n\n parser = argparse.ArgumentParser(description='Source Only for Unsupervised Domain Adaptation')\n # dataset parameters\n parser.add_argument('source_root', help='root path of the source dataset')\n parser.add_argument('target_root', help='root path of the target dataset')\n parser.add_argument('-s', '--source', help='source domain(s)')\n parser.add_argument('-t', '--target', help='target domain(s)')\n parser.add_argument('--image-size', type=int, default=28,\n help='the size of input image')\n parser.add_argument('--num-channels', default=1, choices=[1, 3],\n type=int, help='the number of image channels')\n # model parameters\n parser.add_argument('-a', '--arch', metavar='ARCH', default='lenet',\n choices=architecture_names,\n help='backbone architecture: ' +\n ' | '.join(architecture_names) +\n ' (default: lenet)')\n parser.add_argument('--margin', type=float, default=4., help=\"margin gamma\")\n parser.add_argument('--trade-off', default=1., type=float,\n help='the trade-off hyper-parameter for transfer loss')\n # training parameters\n parser.add_argument('-b', '--batch-size', default=128, type=int,\n metavar='N',\n help='mini-batch size (default: 32)')\n parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\n parser.add_argument('--lr-gamma', default=0.0002, type=float)\n parser.add_argument('--lr-decay', default=0.75, type=float, help='parameter for lr scheduler')\n parser.add_argument('--betas', default=(0.9, 0.999), nargs='+', help='betas')\n parser.add_argument('--wd', '--weight-decay', default=0.0, type=float,\n metavar='W', help='weight decay (default: 5e-4)')\n parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n parser.add_argument('--epochs', default=100, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('-i', '--iters-per-epoch', default=500, type=int,\n help='Number of iterations per epoch')\n parser.add_argument('-p', '--print-freq', default=100, type=int,\n metavar='N', help='print frequency (default: 100)')\n parser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\n parser.add_argument('--per-class-eval', action='store_true',\n help='whether output per-class accuracy during evaluation')\n parser.add_argument(\"--log\", type=str, default='mdd',\n help=\"Where to save logs, checkpoints and debugging images.\")\n parser.add_argument(\"--phase\", type=str, default='train', choices=['train', 'test', 'analysis'],\n help=\"When phase is 'test', only test the model.\"\n \"When phase is 'analysis', only analysis the model.\")\n args = parser.parse_args()\n print(args)\n main(args)\n\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.cat",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.nn.Flatten",
"torch.nn.Linear",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.ReLU"
]
] |
ltiao/gp-dre
|
[
"5997a74826636a58662f5fa8c41a81d32ba8baa2"
] |
[
"scripts/plotting/generate_table.py"
] |
[
"import sys\nimport click\nimport pandas as pd\n\nfrom conf import DATASET_PRETTY_NAMES, WEIGHT_PRETTY_NAMES\n\n\[email protected]()\[email protected](\"result\", type=click.File('r'))\[email protected](\"table\", type=click.File('w'))\[email protected](\"--value\", '-v', default=\"error\")\[email protected](\"--index\", '-i', default=\"name\")\[email protected](\"--label\", '-l', default=\"tab:results\")\ndef main(result, table, value, index, label):\n\n baseline = \"uniform\"\n\n data = pd.read_csv(result, index_col=0).set_index([\"weight\", \"seed\"])\n\n # data = data.assign(error=1.0-data[\"acc\"])\n # data.drop(columns=[\"dataset_seed\", \"acc\"], inplace=True)\n data.drop(columns=\"dataset_seed\", inplace=True)\n\n data_baseline = data.query(f\"weight == '{baseline}'\") \\\n .reset_index(level=\"weight\", drop=True)\n\n data_rel = data.divide(data_baseline, axis=\"index\", level=\"seed\") \\\n .rename(columns={\"error\": \"error_relative\"})\n data_rel = data_rel.assign(error_relative_change=1.0 - data_rel.error_relative)\n\n data_new = pd.concat([data, data_rel], axis=\"columns\", join=\"inner\")\n data_new.reset_index(inplace=True)\n data_new.replace({\"weight\": WEIGHT_PRETTY_NAMES}, inplace=True)\n\n # d = data_new.reset_index().replace({\"weight\": WEIGHT_PRETTY_NAMES})\n # data.replace({\"name\": DATASET_PRETTY_NAMES}, inplace=True)\n\n columns = [\"mean\", \"std\"]\n summary = data_new.groupby(\"weight\").describe()\n\n # # summary = summary.reset_index() \\\n # # .pivot(index=index, columns=\"weight\", values=columns)\n\n table.write(summary.to_latex(\n columns=pd.MultiIndex.from_product([[\"error\", \"error_relative_change\"],\n columns]),\n float_format=\"{:0.3f}\".format,\n caption=f\"{value} across 10 trials.\", label=label,\n formatters={\n (\"error\", \"std\"): r\"($\\pm${:0.2f})\".format,\n (\"error_relative_change\", \"std\"): r\"($\\pm${:0.2f})\".format\n },\n escape=False)\n )\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main()) # pragma: no cover\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.MultiIndex.from_product"
]
] |
t-triobox/dowhy
|
[
"77906cd4edff2749683eb4b2f1ab91213e38ec9c"
] |
[
"dowhy/causal_refuters/dummy_outcome_refuter.py"
] |
[
"import copy\nimport math\nimport numpy as np\nimport pandas as pd\nimport logging\nimport pdb\nfrom collections import OrderedDict, namedtuple\nfrom dowhy.causal_refuter import CausalRefutation\nfrom dowhy.causal_refuter import CausalRefuter\nfrom dowhy.causal_estimator import CausalEstimator,CausalEstimate\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.model_selection import train_test_split\n\nfrom dowhy.causal_refuters.add_unobserved_common_cause import AddUnobservedCommonCause\n\nTestFraction = namedtuple('TestFraction', ['base','other'])\n\nclass DummyOutcomeRefuter(CausalRefuter):\n \"\"\"Refute an estimate by replacing the outcome with a simulated variable\n for which the true causal effect is known.\n\n In the simplest case, the dummy outcome is an independent, randomly\n generated variable. By definition, the true causal effect should be zero.\n\n More generally, the dummy outcome uses the observed relationship between\n confounders and outcome (conditional on treatment) to create a more\n realistic outcome for which the treatment effect is known to be zero. If\n the goal is to simulate a dummy outcome with a non-zero true causal effect,\n then we can add an arbitrary function h(t) to the dummy outcome's\n generation process and then the causal effect becomes h(t=1)-h(t=0).\n\n Note that this general procedure only works for the backdoor criterion.\n\n 1. We find f(W) for a each value of treatment. That is, keeping the treatment\n constant, we fit a predictor to estimate the effect of confounders W on\n outcome y. Note that since f(W) simply defines a new DGP for the simulated\n outcome, it need not be the correct structural equation from W to y.\n 2. We obtain the value of dummy outcome as:\n ``y_dummy = h(t) + f(W)``\n\n To prevent overfitting, we fit f(W) for one value of T and then use it to\n generate data for other values of t. Future support for identification\n based on instrumental variable and mediation.\n\n ::\n\n If we originally started out with\n\n W\n / \\\\\n t --->y\n\n On estimating the following with constant t,\n y_dummy = f(W)\n\n W\n / \\\\\n t --|->y\n\n This ensures that we try to capture as much of W--->Y as possible\n\n On adding h(t)\n\n W\n / \\\\\n t --->y\n h(t)\n\n\n Supports additional parameters that can be specified in the refute_estimate() method.\n\n :param num_simulations: The number of simulations to be run, which defaults to ``CausalRefuter.DEFAULT_NUM_SIMULATIONS``\n :type num_simulations: int, optional\n :param transformation_list: It is a list of actions to be performed to obtain the outcome, which defaults to ``DummyOutcomeRefuter.DEFAULT_TRANSFORMATION``.\n The default transformation is as follows:\n\n ``[(\"zero\",\"\"),(\"noise\", {'std_dev':1} )]``\n\n :type transformation_list: list, optional\n Each of the actions within a transformation is one of the following types:\n\n * function argument: function ``pd.Dataframe -> np.ndarray``\n\n It takes in a function that takes the input data frame as the input and outputs the outcome\n variable. This allows us to create an output varable that only depends on the covariates and does not depend\n on the treatment variable.\n\n * string argument\n\n * Currently it supports some common estimators like\n\n 1. Linear Regression\n 2. K Nearest Neighbours\n 3. Support Vector Machine\n 4. Neural Network\n 5. Random Forest\n\n * Or functions such as:\n\n 1. Permute\n This permutes the rows of the outcome, disassociating any effect of the treatment on the outcome.\n 2. Noise\n This adds white noise to the outcome with white noise, reducing any causal relationship with the treatment.\n 3. Zero\n It replaces all the values in the outcome by zero\n\n Examples:\n The ``transformation_list`` is of the following form:\n\n * If the function ``pd.Dataframe -> np.ndarray`` is already defined.\n ``[(func,func_params),('permute',{'permute_fraction':val}),('noise',{'std_dev':val})]``\n\n Every function should be able to support a minimum of two arguments ``X_train`` and ``outcome_train`` which correspond to the training data and the outcome that\n we want to predict, along with additional parameters such as the learning rate or the momentum constant can be set with the help of ``func_args``.\n\n ``[(neural_network,{'alpha': 0.0001, 'beta': 0.9}),('permute',{'permute_fraction': 0.2}),('noise',{'std_dev': 0.1})]``\n\n The neural network is invoked as ``neural_network(X_train, outcome_train, **args)``.\n\n * If a function from the above list is used\n ``[('knn',{'n_neighbors':5}), ('permute', {'permute_fraction': val} ), ('noise', {'std_dev': val} )]``\n\n :param true_causal_effect: A function that is used to get the True Causal Effect for the modelled dummy outcome.\n It defaults to ``DummyOutcomeRefuter.DEFAULT_TRUE_CAUSAL_EFFECT``, which means that there is no relationship between the treatment and outcome in the\n dummy data.\n :type true_causal_effect: function\n\n The equation for the dummy outcome is given by\n ``y_hat = h(t) + f(W)``\n\n where\n\n * ``y_hat`` is the dummy outcome\n * ``h(t)`` is the function that gives the true causal effect\n * ``f(W)`` is the best estimate of ``y`` obtained keeping ``t`` constant. This ensures that the variation in output of function ``f(w)`` is not caused by ``t``.\n\n .. note:: The true causal effect should take an input of the same shape as the treatment and the output should match the shape of the outcome\n\n :param required_variables: The list of variables to be used as the input for ``y~f(W)``\n This is ``True`` by default, which in turn selects all variables leaving the treatment and the outcome\n :type required_variables: int, list, bool, optional\n\n 1. An integer argument refers to how many variables will be used for estimating the value of the outcome\n 2. A list explicitly refers to which variables will be used to estimate the outcome\n Furthermore, it gives the ability to explictly select or deselect the covariates present in the estimation of the\n outcome. This is done by either adding or explicitly removing variables from the list as shown below:\n\n .. note::\n * We need to pass required_variables = ``[W0,W1]`` if we want ``W0`` and ``W1``.\n * We need to pass required_variables = ``[-W0,-W1]`` if we want all variables excluding ``W0`` and ``W1``.\n\n 3. If the value is True, we wish to include all variables to estimate the value of the outcome.\n\n .. warning:: A ``False`` value is ``INVALID`` and will result in an ``error``.\n\n .. note:: These inputs are fed to the function for estimating the outcome variable. The same set of required_variables is used for each\n instance of an internal estimation function.\n\n :param bucket_size_scale_factor: For continuous data, the scale factor helps us scale the size of the bucket used on the data.\n The default scale factor is ``DummyOutcomeRefuter.DEFAULT_BUCKET_SCALE_FACTOR``.\n :type bucket_size_scale_factor: float, optional\n ::\n\n The number of buckets is given by:\n (max value - min value)\n ------------------------\n (scale_factor * std_dev)\n\n :param min_data_point_threshold: The minimum number of data points for an estimator to run.\n This defaults to ``DummyOutcomeRefuter.MIN_DATA_POINT_THRESHOLD``. If the number of data points is too few\n for a certain category, we make use of the ``DummyOutcomeRefuter.DEFAULT_TRANSFORMATION`` for generaring the dummy outcome\n :type min_data_point_threshold: int, optional\n \"\"\"\n\n # The currently supported estimators\n SUPPORTED_ESTIMATORS = [\"linear_regression\", \"knn\", \"svm\", \"random_forest\", \"neural_network\"]\n # The default standard deviation for noise\n DEFAULT_STD_DEV = 0.1\n # The default scaling factor to determine the bucket size\n DEFAULT_BUCKET_SCALE_FACTOR = 0.5\n # The minimum number of points for the estimator to run\n MIN_DATA_POINT_THRESHOLD = 30\n # The Default Transformation, when no arguments are given, or if the number of data points are insufficient for an estimator\n DEFAULT_TRANSFORMATION = [(\"zero\",\"\"),(\"noise\", {'std_dev': 1} )]\n # The Default True Causal Effect, this is taken to be ZERO by default\n DEFAULT_TRUE_CAUSAL_EFFECT = lambda x: 0\n # The Default split for the number of data points that fall into the training and validation sets\n DEFAULT_TEST_FRACTION = [TestFraction(0.5, 0.5)]\n\n DEFAULT_NEW_DATA_WITH_UNOBSERVED_CONFOUNDING = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._num_simulations = kwargs.pop(\"num_simulations\", CausalRefuter.DEFAULT_NUM_SIMULATIONS)\n self._transformation_list = kwargs.pop(\"transformation_list\", DummyOutcomeRefuter.DEFAULT_TRANSFORMATION)\n self._true_causal_effect = kwargs.pop(\"true_causal_effect\", DummyOutcomeRefuter.DEFAULT_TRUE_CAUSAL_EFFECT)\n self._bucket_size_scale_factor = kwargs.pop(\"bucket_size_scale_factor\", DummyOutcomeRefuter.DEFAULT_BUCKET_SCALE_FACTOR)\n self._min_data_point_threshold = kwargs.pop(\"min_data_point_threshold\", DummyOutcomeRefuter.MIN_DATA_POINT_THRESHOLD)\n self._test_fraction = kwargs.pop(\"_test_fraction\", DummyOutcomeRefuter.DEFAULT_TEST_FRACTION)\n self._unobserved_confounder_values = kwargs.pop(\"unobserved_confounder_values\", DummyOutcomeRefuter.DEFAULT_NEW_DATA_WITH_UNOBSERVED_CONFOUNDING)\n required_variables = kwargs.pop(\"required_variables\", True)\n\n if required_variables is False:\n raise ValueError(\"The value of required_variables cannot be False\")\n\n self._chosen_variables = self.choose_variables(required_variables)\n # Assuming that outcome is one-dimensional\n self._outcome_name_str = self._outcome_name[0]\n self.logger = logging.getLogger(__name__)\n\n def refute_estimate(self):\n\n # We need to change the identified estimand\n # We thus, make a copy. This is done as we don't want\n # to change the original DataFrame\n identified_estimand = copy.deepcopy(self._target_estimand)\n identified_estimand.outcome_variable = [\"dummy_outcome\"]\n\n self.logger.info(\"Refutation over {} simulated datasets\".format(self._num_simulations) )\n self.logger.info(\"The transformation passed: {}\".format(self._transformation_list) )\n\n simulation_results = []\n refute_list = []\n\n # We use collections.OrderedDict to maintain the order in which the data is stored\n causal_effect_map = OrderedDict()\n\n # Check if we are using an estimator in the transformation list\n estimator_present = self._has_estimator()\n\n # The rationale behind ordering of the loops is the fact that we induce randomness everytime we create the\n # Train and the Validation Datasets. Thus, we run the simulation loop followed by the training and the validation\n # loops. Thus, we can get different values everytime we get the estimator.\n\n for _ in range( self._num_simulations ):\n estimates = []\n\n if estimator_present == False:\n\n # Warn the user that the specified parameter is not applicable when no estimator is present in the transformation\n if self._test_fraction != DummyOutcomeRefuter.DEFAULT_TEST_FRACTION:\n self.logger.warning(\"'test_fraction' is not applicable as there is no base treatment value.\")\n\n # Adding an unobserved confounder if provided by the user\n if self._unobserved_confounder_values is not None:\n self._data['simulated'] = self._unobserved_confounder_values\n self._chosen_variables.append('simulated')\n # We set X_train = 0 and outcome_train to be 0\n validation_df = self._data\n X_train = None\n outcome_train = None\n X_validation_df = validation_df[self._chosen_variables]\n\n\n X_validation = X_validation_df.values\n outcome_validation = validation_df[self._outcome_name_str].values\n\n # Get the final outcome, after running through all the values in the transformation list\n outcome_validation = self.process_data(X_train, outcome_train, X_validation, outcome_validation, self._transformation_list)\n\n # Check if the value of true effect has been already stored\n # We use None as the key as we have no base category for this refutation\n if None not in causal_effect_map:\n # As we currently support only one treatment\n causal_effect_map[None] = self._true_causal_effect( validation_df[ self._treatment_name[0] ] )\n\n outcome_validation += causal_effect_map[None]\n\n\n new_data = validation_df.assign(dummy_outcome=outcome_validation)\n\n\n new_estimator = CausalEstimator.get_estimator_object(new_data, identified_estimand, self._estimate)\n new_effect = new_estimator.estimate_effect()\n estimates.append(new_effect.value)\n\n else:\n\n groups = self.preprocess_data_by_treatment()\n group_count = 0\n\n if len(self._test_fraction) == 1:\n self._test_fraction = len(groups) * self._test_fraction\n\n for key_train, _ in groups:\n base_train = groups.get_group(key_train).sample(frac=self._test_fraction[group_count].base)\n train_set = set( [ tuple(line) for line in base_train.values ] )\n total_set = set( [ tuple(line) for line in groups.get_group(key_train).values ] )\n base_validation = pd.DataFrame( list( total_set.difference(train_set) ), columns=base_train.columns )\n X_train_df = base_train[self._chosen_variables]\n\n X_train = X_train_df.values\n outcome_train = base_train[self._outcome_name_str].values\n\n validation_df = []\n transformation_list = self._transformation_list\n validation_df.append(base_validation)\n\n for key_validation, _ in groups:\n if key_validation != key_train:\n validation_df.append(groups.get_group(key_validation).sample(frac=self._test_fraction[group_count].other))\n\n validation_df = pd.concat(validation_df)\n X_validation_df = validation_df[self._chosen_variables]\n\n X_validation = X_validation_df.values\n outcome_validation = validation_df[self._outcome_name_str].values\n\n # If the number of data points is too few, run the default transformation: [(\"zero\",\"\"),(\"noise\", {'std_dev':1} )]\n if X_train.shape[0] <= self._min_data_point_threshold:\n transformation_list = DummyOutcomeRefuter.DEFAULT_TRANSFORMATION\n self.logger.warning(\"The number of data points in X_train:{} for category:{} is less than threshold:{}\".format(X_train.shape[0], key_train, self._min_data_point_threshold))\n self.logger.warning(\"Therefore, defaulting to the minimal set of transformations:{}\".format(transformation_list))\n\n outcome_validation = self.process_data(X_train, outcome_train, X_validation, outcome_validation, transformation_list)\n\n # Check if the value of true effect has been already stored\n # This ensures that we calculate the causal effect only once.\n # We use key_train as we map data with respect to the base category of the data\n\n if key_train not in causal_effect_map:\n # As we currently support only one treatment\n causal_effect_map[key_train] = self._true_causal_effect( validation_df[ self._treatment_name[0] ] )\n\n # Add h(t) to f(W) to get the dummy outcome\n outcome_validation += causal_effect_map[key_train]\n\n new_data = validation_df.assign(dummy_outcome=outcome_validation)\n new_estimator = CausalEstimator.get_estimator_object(new_data, identified_estimand, self._estimate)\n new_effect = new_estimator.estimate_effect()\n\n estimates.append(new_effect.value)\n group_count += 1\n\n\n simulation_results.append(estimates)\n\n\n # We convert to ndarray for ease in indexing\n # The data is of the form\n # sim1: cat1 cat2 ... catn\n # sim2: cat1 cat2 ... catn\n simulation_results = np.array(simulation_results)\n\n # Note: We would like the causal_estimator to find the true causal estimate that we have specified through this\n # refuter. Let the value of the true causal effect be h(t). In the following section of code, we wish to find out if h(t) falls in the\n # distribution of the refuter.\n\n if estimator_present == False:\n\n dummy_estimate = CausalEstimate(\n estimate = causal_effect_map[None],\n control_value = self._estimate.control_value,\n treatment_value=self._estimate.treatment_value,\n target_estimand =self._estimate.target_estimand,\n realized_estimand_expr=self._estimate.realized_estimand_expr)\n\n refute = CausalRefutation(\n dummy_estimate.value,\n np.mean(simulation_results),\n refutation_type=\"Refute: Use a Dummy Outcome\"\n )\n\n refute.add_significance_test_results(\n self.test_significance(dummy_estimate, np.ravel(simulation_results))\n )\n\n refute.add_refuter(self)\n\n refute_list.append(refute)\n\n else:\n # True Causal Effect list\n causal_effect_list = list( causal_effect_map.values() )\n # Iterating through the refutation for each category\n for train_category in range(simulation_results.shape[1]):\n dummy_estimate = CausalEstimate(\n estimate=causal_effect_list[train_category],\n control_value=self._estimate.control_value,\n treatment_value=self._estimate.treatment_value,\n target_estimand=self._estimate.target_estimand,\n realized_estimand_expr=self._estimate.realized_estimand_expr)\n\n refute = CausalRefutation(\n dummy_estimate.value,\n np.mean(simulation_results[:, train_category]),\n refutation_type=\"Refute: Use a Dummy Outcome\"\n )\n\n refute.add_significance_test_results(\n self.test_significance(dummy_estimate, simulation_results[:, train_category])\n )\n\n refute.add_refuter(self)\n refute_list.append(refute)\n\n\n return refute_list\n\n def process_data(self, X_train, outcome_train, X_validation, outcome_validation, transformation_list):\n \"\"\"\n We process the data by first training the estimators in the transformation_list on ``X_train`` and ``outcome_train``.\n We then apply the estimators on ``X_validation`` to get the value of the dummy outcome, which we store in ``outcome_validation``.\n\n :param X_train: The data of the covariates which is used to train an estimator. It corresponds to the data of a single category of the treatment\n :type X_train: np.ndarray\n :param outcome_train: This is used to hold the intermediate values of the outcome variable in the transformation list\n :type outcome_train: np.ndarray\n\n For Example:\n\n ``[ ('permute', {'permute_fraction': val} ), (func,func_params)]``\n\n The value obtained from permutation is used as an input for the custom estimator.\n\n :param X_validation: The data of the covariates that is fed to a trained estimator to generate a dummy outcome\n :type X_validation: np.ndarray\n :param outcome_validation: This variable stores the dummy_outcome generated by the transformations\n :type outcome_validation: np.ndarray\n :param transformation_list: The list of transformations on the outcome data required to produce a dummy outcome\n :type transformation_list: np.ndarray\n \"\"\"\n for action, func_args in transformation_list:\n if callable(action):\n estimator = action(X_train, outcome_train, **func_args)\n outcome_train = estimator(X_train)\n outcome_validation = estimator(X_validation)\n elif action in DummyOutcomeRefuter.SUPPORTED_ESTIMATORS:\n estimator = self._estimate_dummy_outcome(action, X_train, outcome_train, **func_args)\n outcome_train = estimator(X_train)\n outcome_validation = estimator(X_validation)\n elif action == 'noise':\n if X_train is not None:\n outcome_train = self.noise(outcome_train, **func_args)\n outcome_validation = self.noise(outcome_validation, **func_args)\n elif action == 'permute':\n if X_train is not None:\n outcome_train = self.permute(outcome_train, **func_args)\n outcome_validation = self.permute(outcome_validation, **func_args)\n elif action =='zero':\n if X_train is not None:\n outcome_train = np.zeros(outcome_train.shape)\n outcome_validation = np.zeros(outcome_validation.shape)\n\n return outcome_validation\n\n def _has_estimator(self):\n \"\"\"\n This function checks if there is an estimator in the transformation list.\n\n If there are no estimators, we can optimize processing by skipping the\n data preprocessing and running the transformations on the whole dataset.\n \"\"\"\n for action,_ in self._transformation_list:\n if callable(action) or action in DummyOutcomeRefuter.SUPPORTED_ESTIMATORS:\n return True\n\n return False\n\n def preprocess_data_by_treatment(self):\n \"\"\"\n This function groups data based on the data type of the treatment.\n\n Expected variable types supported for the treatment:\n\n * bool\n * pd.categorical\n * float\n * int\n\n :returns: ``pandas.core.groupby.generic.DataFrameGroupBy``\n \"\"\"\n assert len(self._treatment_name) == 1, \"At present, DoWhy supports a simgle treatment variable\"\n\n\n if self._unobserved_confounder_values is not None:\n self._data['simulated'] = self._unobserved_confounder_values\n self._chosen_variables.append('simulated')\n\n treatment_variable_name = self._treatment_name[0] # As we only have a single treatment\n variable_type = self._data[treatment_variable_name].dtypes\n\n if bool == variable_type:\n groups = self._data.groupby(treatment_variable_name)\n return groups\n # We use string arguments to account for both 32 and 64 bit varaibles\n elif 'float' in variable_type.name or \\\n 'int' in variable_type.name:\n # action for continuous variables\n data = self._data\n std_dev = data[treatment_variable_name].std()\n num_bins = ( data.max() - data.min() )/ (self._bucket_size_scale_factor * std_dev)\n data['bins'] = pd.cut(data[treatment_variable_name], num_bins)\n groups = data.groupby('bins')\n data.drop('bins', axis=1, inplace=True)\n return groups\n\n elif 'categorical' in variable_type.name:\n # Action for categorical variables\n groups = data.groupby(treatment_variable_name)\n groups = data.groupby('bins')\n return groups\n else:\n raise ValueError(\"Passed {}. Expected bool, float, int or categorical.\".format(variable_type.name))\n\n def _estimate_dummy_outcome(self, action, X_train, outcome, **func_args):\n \"\"\"\n A function that takes in any sklearn estimator and returns a trained estimator\n\n :param 'action': str\n The sklearn estimator to be used.\n :param 'X_train': np.ndarray\n The variable used to estimate the value of outcome.\n :param 'outcome': np.ndarray\n The variable which we wish to estimate.\n :param 'func_args': variable length keyworded argument\n The parameters passed to the estimator.\n \"\"\"\n estimator = self._get_regressor_object(action, **func_args)\n X = X_train\n y = outcome\n\n estimator = estimator.fit(X, y)\n\n return estimator.predict\n\n def _get_regressor_object(self, action, **func_args):\n \"\"\"\n Return a sklearn estimator object based on the estimator and corresponding parameters\n\n :param 'action': str\n The sklearn estimator used.\n :param 'func_args': variable length keyworded argument\n The parameters passed to the sklearn estimator.\n \"\"\"\n if action == \"linear_regression\":\n return LinearRegression(**func_args)\n elif action == \"knn\":\n return KNeighborsRegressor(**func_args)\n elif action == \"svm\":\n return SVR(**func_args)\n elif action == \"random_forest\":\n return RandomForestRegressor(**func_args)\n elif action == \"neural_network\":\n return MLPRegressor(**func_args)\n else:\n raise ValueError(\"The function: {} is not supported by dowhy at the moment.\".format(action))\n\n def permute(self, outcome, permute_fraction):\n '''\n If the permute_fraction is 1, we permute all the values in the outcome.\n Otherwise we make use of the Fisher Yates shuffle.\n Refer to https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle for more details.\n\n :param 'outcome': np.ndarray\n The outcome variable to be permuted.\n :param 'permute_fraction': float [0, 1]\n The fraction of rows permuted.\n '''\n if permute_fraction == 1:\n outcome = pd.DataFrame(outcome)\n outcome.columns = [self._outcome_name_str]\n return outcome[self._outcome_name_str].sample(frac=1).values\n elif permute_fraction < 1:\n permute_fraction /= 2 # We do this as every swap leads to two changes\n changes = np.where( np.random.uniform(0,1,outcome.shape[0]) <= permute_fraction )[0] # As this is tuple containing a single element (array[...])\n num_rows = outcome.shape[0]\n for change in changes:\n if change + 1 < num_rows:\n index = np.random.randint(change+1,num_rows)\n temp = outcome[change]\n outcome[change] = outcome[index]\n outcome[index] = temp\n return outcome\n else:\n raise ValueError(\"The value of permute_fraction is {}. Which is greater than 1.\".format(permute_fraction))\n\n def noise(self, outcome, std_dev):\n \"\"\"\n Add white noise with mean 0 and standard deviation = std_dev\n\n :param 'outcome': np.ndarray\n The outcome variable, to which the white noise is added.\n :param 'std_dev': float\n The standard deviation of the white noise.\n\n :returns: outcome with added noise\n \"\"\"\n return outcome + np.random.normal(scale=std_dev,size=outcome.shape[0])\n"
] |
[
[
"sklearn.ensemble.RandomForestRegressor",
"pandas.concat",
"sklearn.neural_network.MLPRegressor",
"pandas.DataFrame",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.svm.SVR",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.mean",
"sklearn.linear_model.LinearRegression",
"pandas.cut",
"numpy.ravel",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
moorepatrick/beep
|
[
"c54b80d1afb5b175bd9335481efc2474a4317c47"
] |
[
"beep/tests/test_principal_components.py"
] |
[
"# Copyright 2019 Toyota Research Institute. All rights reserved.\nimport json\nimport os\nimport unittest\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom beep.principal_components import PrincipalComponents, pivot_data\n\nTEST_DIR = os.path.dirname(__file__)\nTEST_FILE_DIR = os.path.join(TEST_DIR, \"test_files\")\n\n\nclass PrincipalComponentsTest(unittest.TestCase):\n def setUp(self):\n self.processed_run_path = os.path.join(TEST_FILE_DIR, \"2017-06-30_2C-10per_6C_CH10_structure.json\")\n self.cycles_to_pca = np.linspace(20, 120, 20, dtype=int)\n self.cycles_to_test = np.linspace(121, 131, 6, dtype=int)\n json_obj = {\n \"file_list\": [self.processed_run_path],\n \"run_list\": [1]\n }\n json_string = json.dumps(json_obj)\n self.pc = PrincipalComponents.from_interpolated_data(json_string, cycles_to_pca=self.cycles_to_pca)\n\n def test_pivot_data(self):\n json_obj = {\n \"file_list\": [self.processed_run_path],\n \"run_list\": [1]\n }\n json_string = json.dumps(json_obj)\n df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_pca)\n self.assertEqual(df_to_pca.shape, (len(self.cycles_to_pca), 1000))\n\n def test_fit(self):\n self.assertIsInstance(self.pc.pca, PCA)\n self.assertEqual(self.pc.min_components, 4)\n\n def test_get_pca_embeddings(self):\n json_obj = {\n \"file_list\": [self.processed_run_path],\n \"run_list\": [1]\n }\n json_string = json.dumps(json_obj)\n df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_test)\n pca_embeddings = self.pc.get_pca_embeddings(df_to_pca)\n self.assertEqual(pca_embeddings.shape, (len(self.cycles_to_test), self.pc.n_components))\n\n\n def test_get_pca_reconstruction(self):\n \"\"\"\n Method to inverse transform PCA embeddings to reconstruct data\n \"\"\"\n json_obj = {\n \"file_list\": [self.processed_run_path],\n \"run_list\": [1]\n }\n json_string = json.dumps(json_obj)\n df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_test)\n pca_embeddings = self.pc.get_pca_embeddings(df_to_pca)\n pca_reconstruction = self.pc.get_pca_reconstruction(pca_embeddings)\n self.assertEqual(pca_reconstruction.shape, (len(self.cycles_to_test), 1000))\n\n def test_get_reconstruction_errors(self):\n json_obj = {\n \"file_list\": [self.processed_run_path],\n \"run_list\": [1]\n }\n json_string = json.dumps(json_obj)\n df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_test)\n reconstruction_errors, outliers = self.pc.get_reconstruction_error_outliers(df_to_pca, threshold=1.5)\n self.assertAlmostEqual(reconstruction_errors[0], 0.002553278, places=8)\n self.assertTrue(outliers[0])\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.linspace"
]
] |
Edadeal/deephack-rl
|
[
"86f13be19f5650b9acc9dd3b82ea5637418c7ad3"
] |
[
"lib/atari/state_processor.py"
] |
[
"import numpy as np\nimport tensorflow as tf\n\n\nclass StateProcessor():\n \"\"\"\n Processes a raw Atari iamges. Resizes it and converts it to grayscale.\n \"\"\"\n\n def __init__(self):\n # Build the Tensorflow graph\n with tf.variable_scope(\"state_processor\"):\n self.input_state = tf.placeholder(\n shape=[210, 160, 3], dtype=tf.uint8)\n self.output = tf.image.rgb_to_grayscale(self.input_state)\n self.output = tf.image.crop_to_bounding_box(\n self.output, 34, 0, 160, 160)\n self.output = tf.image.resize_images(\n self.output, [84, 84], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n self.output = tf.squeeze(self.output)\n\n def process(self, state, sess=None):\n \"\"\"\n Args:\n sess: A Tensorflow session object\n state: A [210, 160, 3] Atari RGB State\n\n Returns:\n A processed [84, 84, 1] state representing grayscale values.\n \"\"\"\n sess = sess or tf.get_default_session()\n return sess.run(self.output, {self.input_state: state[30:-10]})\n"
] |
[
[
"tensorflow.get_default_session",
"tensorflow.image.crop_to_bounding_box",
"tensorflow.image.resize_images",
"tensorflow.placeholder",
"tensorflow.squeeze",
"tensorflow.image.rgb_to_grayscale",
"tensorflow.variable_scope"
]
] |
barentsen/vphastools
|
[
"214ddea35d2628034c236c5647a977fc2bad6572"
] |
[
"surveytools/__init__.py"
] |
[
"from __future__ import absolute_import\n\nimport os\n\n# Use Agg if no DISPLAY is available\nDISPLAY = os.environ.get('DISPLAY')\nif DISPLAY is None or DISPLAY.startswith('localhost'):\n import matplotlib\n matplotlib.use('Agg')\n\n###########\n# CONSTANTS\n###########\n\n# Where are VPHAS reduced images and calibration frames?\nVPHAS_DATA_PATH = '/home/gb/tmp/vphasdisk'\nVPHAS_PIXEL_SCALE = 0.213 # arcsec/px, cf. OmegaCAM manual Sect 2.1\nVPHAS_BANDS = ['u', 'g', 'r2', 'ha', 'r', 'i']\n\n# Where is the data that comes with this package?\nSURVEYTOOLS_PATH = os.path.abspath(os.path.dirname(__file__))\nSURVEYTOOLS_DATA = os.path.join(SURVEYTOOLS_PATH, 'data')\nSURVEYTOOLS_CONFIGDIR = os.path.join(SURVEYTOOLS_PATH, 'config')\nSURVEYTOOLS_LIB_DIR = os.path.join(SURVEYTOOLS_PATH, 'lib')\n\n# How to run stilts?\nSTILTS_JAR = os.path.join(SURVEYTOOLS_LIB_DIR, 'stilts.jar')\nSTILTS = 'nice java -Xmx2000M -XX:+UseConcMarkSweepGC -jar ' + STILTS_JAR\n\n# Position of the VST/OmegaCAM CCDs.\n# left-right = East-West and top-bottom = North-South;\n# the numbers refer to the FITS HDU extension number of an OmegaCam image.\nOMEGACAM_CCD_ARRANGEMENT = [32, 31, 30, 29, 16, 15, 14, 13,\n 28, 27, 26, 25, 12, 11, 10, 9,\n 24, 23, 22, 21, 8, 7, 6, 5,\n 20, 19, 18, 17, 4, 3, 2, 1]\n\n\nfrom .catalogue import VphasFrame, VphasOffsetCatalogue\n"
] |
[
[
"matplotlib.use"
]
] |
basnijholt/qcodes-repr
|
[
"ab761b385c3ec60f16e975667bc08a9e30f0cb2f"
] |
[
"formatting.py"
] |
[
"\"\"\"String formatting routines for qcodes.DataSet.__repr__.\n\nThis code heavily borrows from `xarray`, whose license can be found\nin `licenses/XARRAY_LICENSE`.\n\"\"\"\n\nimport contextlib\nfrom datetime import datetime, timedelta\nfrom itertools import zip_longest\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.errors import OutOfBoundsDatetime\n\nfrom xarray.core.options import OPTIONS\n\n\ndef _get_indexer_at_least_n_items(shape, n_desired, from_end):\n assert 0 < n_desired <= np.prod(shape)\n cum_items = np.cumprod(shape[::-1])\n n_steps = np.argmax(cum_items >= n_desired)\n stop = int(np.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]))\n indexer = (\n ((-1 if from_end else 0),) * (len(shape) - 1 - n_steps)\n + ((slice(-stop, None) if from_end else slice(stop)),)\n + (slice(None),) * n_steps\n )\n return indexer\n\n\ndef first_n_items(array, n_desired):\n \"\"\"Returns the first n_desired items of an array\"\"\"\n # Unfortunately, we can't just do array.flat[:n_desired] here because it\n # might not be a numpy.ndarray. Moreover, access to elements of the array\n # could be very expensive (e.g. if it's only available over DAP), so go out\n # of our way to get them in a single call to __getitem__ using only slices.\n if n_desired < 1:\n raise ValueError(\"must request at least one item\")\n\n if array.size == 0:\n # work around for https://github.com/numpy/numpy/issues/5195\n return []\n\n if n_desired < array.size:\n indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=False)\n array = array[indexer]\n return np.asarray(array).flat[:n_desired]\n\n\ndef last_n_items(array, n_desired):\n \"\"\"Returns the last n_desired items of an array\"\"\"\n # Unfortunately, we can't just do array.flat[-n_desired:] here because it\n # might not be a numpy.ndarray. Moreover, access to elements of the array\n # could be very expensive (e.g. if it's only available over DAP), so go out\n # of our way to get them in a single call to __getitem__ using only slices.\n if (n_desired == 0) or (array.size == 0):\n return []\n\n if n_desired < array.size:\n indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=True)\n array = array[indexer]\n return np.asarray(array).flat[-n_desired:]\n\n\n# def last_item(array):\n# \"\"\"Returns the last item of an array in a list or an empty list.\"\"\"\n# if array.size == 0:\n# # work around for https://github.com/numpy/numpy/issues/5195\n# return []\n\n# indexer = (slice(-1, None),) * array.ndim\n# return np.ravel(np.asarray(array[indexer])).tolist()\n\n\ndef format_timestamp(t):\n \"\"\"Cast given object to a Timestamp and return a nicely formatted string\"\"\"\n # Timestamp is only valid for 1678 to 2262\n try:\n datetime_str = str(pd.Timestamp(t))\n except OutOfBoundsDatetime:\n datetime_str = str(t)\n\n try:\n date_str, time_str = datetime_str.split()\n except ValueError:\n # catch NaT and others that don't split nicely\n return datetime_str\n else:\n if time_str == \"00:00:00\":\n return date_str\n else:\n return f\"{date_str}T{time_str}\"\n\n\ndef format_timedelta(t, timedelta_format=None):\n \"\"\"Cast given object to a Timestamp and return a nicely formatted string\"\"\"\n timedelta_str = str(pd.Timedelta(t))\n try:\n days_str, time_str = timedelta_str.split(\" days \")\n except ValueError:\n # catch NaT and others that don't split nicely\n return timedelta_str\n else:\n if timedelta_format == \"date\":\n return days_str + \" days\"\n elif timedelta_format == \"time\":\n return time_str\n else:\n return timedelta_str\n\n\ndef format_item(x, timedelta_format=None, quote_strings=True):\n \"\"\"Returns a succinct summary of an object as a string\"\"\"\n if isinstance(x, (np.datetime64, datetime)):\n return format_timestamp(x)\n if isinstance(x, (np.timedelta64, timedelta)):\n return format_timedelta(x, timedelta_format=timedelta_format)\n elif isinstance(x, (str, bytes)):\n return repr(x) if quote_strings else x\n elif isinstance(x, (float, np.float)):\n return f\"{x:.4}\"\n else:\n return str(x)\n\n\ndef format_items(x):\n \"\"\"Returns a succinct summaries of all items in a sequence as strings\"\"\"\n x = np.asarray(x)\n timedelta_format = \"datetime\"\n if np.issubdtype(x.dtype, np.timedelta64):\n x = np.asarray(x, dtype=\"timedelta64[ns]\")\n day_part = x[~pd.isnull(x)].astype(\"timedelta64[D]\").astype(\"timedelta64[ns]\")\n time_needed = x[~pd.isnull(x)] != day_part\n day_needed = day_part != np.timedelta64(0, \"ns\")\n if np.logical_not(day_needed).all():\n timedelta_format = \"time\"\n elif np.logical_not(time_needed).all():\n timedelta_format = \"date\"\n\n formatted = [format_item(xi, timedelta_format) for xi in x]\n return formatted\n\n\ndef format_array_flat(array, max_width):\n \"\"\"Return a formatted string for as many items in the flattened version of\n array that will fit within max_width characters.\n \"\"\"\n # every item will take up at least two characters, but we always want to\n # print at least first and last items\n max_possibly_relevant = min(\n max(array.size, 1), max(int(np.ceil(max_width / 2.0)), 2)\n )\n relevant_front_items = format_items(\n first_n_items(array, (max_possibly_relevant + 1) // 2)\n )\n relevant_back_items = format_items(last_n_items(array, max_possibly_relevant // 2))\n # interleave relevant front and back items:\n # [a, b, c] and [y, z] -> [a, z, b, y, c]\n relevant_items = sum(\n zip_longest(relevant_front_items, reversed(relevant_back_items)), ()\n )[:max_possibly_relevant]\n\n cum_len = np.cumsum([len(s) + 1 for s in relevant_items]) - 1\n if (array.size > 2) and (\n (max_possibly_relevant < array.size) or (cum_len > max_width).any()\n ):\n padding = \" ... \"\n count = min(\n array.size, max(np.argmax(cum_len + len(padding) - 1 > max_width), 2)\n )\n else:\n count = array.size\n padding = \"\" if (count <= 1) else \" \"\n\n num_front = (count + 1) // 2\n num_back = count - num_front\n # note that num_back is 0 <--> array.size is 0 or 1\n # <--> relevant_back_items is []\n pprint_str = (\n \" \".join(relevant_front_items[:num_front])\n + padding\n + \" \".join(relevant_back_items[-num_back:])\n )\n return pprint_str\n\n\[email protected]\ndef set_numpy_options(*args, **kwargs):\n original = np.get_printoptions()\n np.set_printoptions(*args, **kwargs)\n try:\n yield\n finally:\n np.set_printoptions(**original)\n\n\ndef short_numpy_repr(array):\n array = np.asarray(array)\n\n # default to lower precision so a full (abbreviated) line can fit on\n # one line with the default display_width\n options = {\"precision\": 6, \"linewidth\": OPTIONS[\"display_width\"], \"threshold\": 200}\n if array.ndim < 3:\n edgeitems = 3\n elif array.ndim == 3:\n edgeitems = 2\n else:\n edgeitems = 1\n options[\"edgeitems\"] = edgeitems\n with set_numpy_options(**options):\n return repr(array)\n\n\ndef short_data_repr(array):\n \"\"\"Format \"data\" for DataArray and Variable.\"\"\"\n if isinstance(array, np.ndarray):\n return short_numpy_repr(array)\n elif array._in_memory or array.size < 1e5:\n return short_numpy_repr(array)\n else:\n # internal xarray array type\n return f\"[{array.size} values with dtype={array.dtype}]\"\n"
] |
[
[
"numpy.logical_not",
"numpy.get_printoptions",
"pandas.isnull",
"numpy.asarray",
"numpy.issubdtype",
"numpy.set_printoptions",
"pandas.Timedelta",
"numpy.timedelta64",
"numpy.ceil",
"numpy.cumprod",
"numpy.argmax",
"numpy.prod",
"pandas.Timestamp"
]
] |
PipKat/pyaedt
|
[
"0c56c35cab30ef2ba63a0333b64c3d34f9f9820d"
] |
[
"examples/03-Circuit/Touchstone_Management.py"
] |
[
"\"\"\"\nManage Touchstone Objects\n--------------------------\nThis example shows how to use Touchstone objects without opening AEDT.\n\nTo provide the advanced postprocessing features needed for this example, Matplotlib and NumPy\nmust be installed on the machine.\n\nThis example runs only on Windows using CPython.\n\"\"\"\n# sphinx_gallery_thumbnail_path = 'Resources/nde.png'\n\nimport os\nimport pathlib\nimport sys\n\nlocal_path = os.path.abspath(\"\")\nmodule_path = pathlib.Path(local_path)\nroot_path = module_path.parent\nroot_path2 = root_path.parent\nroot_path3 = root_path2.parent\npath1 = os.path.join(root_path2)\npath2 = os.path.join(root_path3)\nsys.path.append(path1)\nsys.path.append(path2)\nfrom pyaedt import examples\n\nexample_path = examples.download_touchstone()\n\n###############################################################################\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pyaedt.generic.TouchstoneParser import (\n read_touchstone,\n get_return_losses,\n get_insertion_losses_from_prefix,\n get_fext_xtalk_from_prefix,\n get_next_xtalk,\n get_worst_curve_from_solution_data,\n)\n\n###############################################################################\n\ndata = read_touchstone(example_path)\n\n###############################################################################\n# Get Curve Names\n# ~~~~~~~~~~~~~~~\n# These methods identify the list of insertion losses, return losses, fext,\n# and next based on a few inputs and port names.\n\nrl_list = get_return_losses(data.ports)\nil_list = get_insertion_losses_from_prefix(data.ports, \"U1\", \"U7\")\nfext_list = get_fext_xtalk_from_prefix(data.ports, \"U1\", \"U7\")\nnext_list = get_next_xtalk(data.ports, \"U1\")\n\n\n###############################################################################\n# Get Curve Worst Cases\n# ~~~~~~~~~~~~~~~~~~~~~\n# These example get the worst cases for the curve.\n\nworst_rl, global_mean = get_worst_curve_from_solution_data(\n data, freq_min=1, freq_max=20, worst_is_higher=True, curve_list=rl_list\n)\nworst_il, mean2 = get_worst_curve_from_solution_data(\n data, freq_min=1, freq_max=20, worst_is_higher=False, curve_list=il_list\n)\nworst_fext, mean3 = get_worst_curve_from_solution_data(\n data, freq_min=1, freq_max=20, worst_is_higher=True, curve_list=fext_list\n)\nworst_next, mean4 = get_worst_curve_from_solution_data(\n data, freq_min=1, freq_max=20, worst_is_higher=True, curve_list=next_list\n)\n\n###############################################################################\n# Use Matplotlib to Plot the Curves\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# This example uses Matplotlib to plot the curves.\n\nfig, ax = plt.subplots(figsize=(20, 10))\nax.set(xlabel=\"Frequency (Hz)\", ylabel=\"Return Loss (dB)\", title=\"Return Loss\")\nax.grid()\nmag_data = 20 * np.log10(np.array(data.solutions_data_mag[worst_rl]))\nfreq_data = np.array([i * 1e9 for i in data.sweeps[\"Freq\"]])\nax.plot(freq_data, mag_data, label=worst_rl)\nmag_data2 = 20 * np.log10(np.array(data.solutions_data_mag[worst_il]))\nax.plot(freq_data, mag_data2, label=worst_il)\nmag_data3 = 20 * np.log10(np.array(data.solutions_data_mag[worst_fext]))\nax.plot(freq_data, mag_data3, label=worst_fext)\nmag_data4 = 20 * np.log10(np.array(data.solutions_data_mag[worst_next]))\nax.plot(freq_data, mag_data4, label=worst_next)\nax.legend(\n [\"Worst RL = \" + worst_rl, \"Worst IL = \" + worst_il, \"Worst FEXT = \" + worst_fext, \"Worst NEXT = \" + worst_next]\n)\nplt.show()\n"
] |
[
[
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
] |
thsshz/DeblurGAN
|
[
"b0c786e15256639f145737874aa71fc77263b959"
] |
[
"options/base_options.py"
] |
[
"import argparse\nimport os\nfrom util import util\nimport torch\n\nclass BaseOptions():\n\tdef __init__(self):\n\t\tself.parser = argparse.ArgumentParser()\n\t\tself.initialized = False\n\n\tdef initialize(self):\n\t\tself.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')\n\t\tself.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')\n\t\tself.parser.add_argument('--loadSizeX', type=int, default=640, help='scale images to this size')\n\t\tself.parser.add_argument('--loadSizeY', type=int, default=360, help='scale images to this size')\n\t\tself.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')\n\t\tself.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')\n\t\tself.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')\n\t\tself.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')\n\t\tself.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')\n\t\tself.parser.add_argument('--which_model_netD', type=str, default='basic', help='selects model to use for netD')\n\t\tself.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG')\n\t\tself.parser.add_argument('--learn_residual', action='store_true', help='if specified, model would learn only the residual to the input')\n\t\tself.parser.add_argument('--gan_type', type=str, default='wgan-gp', help='wgan-gp : Wasserstein GAN with Gradient Penalty, lsgan : Least Sqaures GAN, gan : Vanilla GAN')\n\t\tself.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')\n\t\tself.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n\t\tself.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')\n\t\tself.parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')\n\t\tself.parser.add_argument('--model', type=str, default='content_gan', help='chooses which model to use. pix2pix, test, content_gan')\n\t\tself.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')\n\t\tself.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')\n\t\tself.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')\n\t\tself.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')\n\t\tself.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')\n\t\tself.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')\n\t\tself.parser.add_argument('--display_id', type=int, default=0, help='window id of the web display')\n\t\tself.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')\n\t\tself.parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.')\n\t\tself.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')\n\t\tself.parser.add_argument('--max_dataset_size', type=int, default=float(\"inf\"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')\n\t\tself.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')\n\t\tself.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')\n\n\t\tself.initialized = True\n\n\tdef parse(self):\n\t\tif not self.initialized:\n\t\t\tself.initialize()\n\t\tself.opt = self.parser.parse_args()\n\t\tself.opt.isTrain = self.isTrain # train or test\n\n\t\tstr_ids = self.opt.gpu_ids.split(',')\n\t\tself.opt.gpu_ids = []\n\t\tfor str_id in str_ids:\n\t\t\tid = int(str_id)\n\t\t\tif id >= 0:\n\t\t\t\tself.opt.gpu_ids.append(id)\n\n\t\t# set gpu ids\n\t\tif len(self.opt.gpu_ids) > 0:\n\t\t\ttorch.cuda.set_device(self.opt.gpu_ids[0])\n\n\t\targs = vars(self.opt)\n\n\t\tprint('------------ Options -------------')\n\t\tfor k, v in sorted(args.items()):\n\t\t\tprint('%s: %s' % (str(k), str(v)))\n\t\tprint('-------------- End ----------------')\n\n\t\t# save to the disk\n\t\texpr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)\n\t\tutil.mkdirs(expr_dir)\n\t\tfile_name = os.path.join(expr_dir, 'opt.txt')\n\t\twith open(file_name, 'wt') as opt_file:\n\t\t\topt_file.write('------------ Options -------------\\n')\n\t\t\tfor k, v in sorted(args.items()):\n\t\t\t\topt_file.write('%s: %s\\n' % (str(k), str(v)))\n\t\t\topt_file.write('-------------- End ----------------\\n')\n\t\treturn self.opt\n"
] |
[
[
"torch.cuda.set_device"
]
] |
A2Zntu/ML_HW
|
[
"00db8a45ba38fc864b71c31b0255488c95880c4c"
] |
[
"Lab4/nnCostFunction.py"
] |
[
"import numpy as np\nfrom sigmoid import sigmoid\nfrom sigmoidGradient import sigmoidGradient\n\ndef nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_value):\n#NNCOSTFUNCTION Implements the neural network cost function for a two layer\n#neural network which performs classification\n# [J grad] = NNCOSTFUNCTON(nn_params, hidden_layer_size, num_labels, ...\n# X, y, lambda_value) computes the cost and gradient of the neural network. The\n# parameters for the neural network are \"unrolled\" into the vector\n# nn_params and need to be converted back into the weight matrices. \n# \n# The returned parameter grad should be a \"unrolled\" vector of the\n# partial derivatives of the neural network.\n#\n\n# Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\n# for our 2 layer neural network\n tmp = nn_params.copy()\n Theta1 = np.reshape(tmp[0:hidden_layer_size * (input_layer_size + 1)], \n (hidden_layer_size, (input_layer_size + 1)), order='F')\n Theta2 = np.reshape(tmp[(hidden_layer_size * (input_layer_size + 1)):len(tmp)], \n (num_labels, (hidden_layer_size + 1)), order='F')\n\n# Setup some useful variables\n m = np.shape(X)[0]\n\n# Computation of the Cost function including regularisation\n# Feedforward \n a2 = sigmoid(np.dot(np.hstack((np.ones((m, 1)), X)), np.transpose(Theta1)))\n a3 = sigmoid(np.dot(np.hstack((np.ones((m, 1)), a2)), np.transpose(Theta2)))\n\n # Cost function for Logistic Regression summed over all output nodes\n Cost = np.empty((num_labels, 1))\n for k in range(num_labels):\n # which examples fit this label\n y_binary=(y==k+1)\n # select all predictions for label k\n hk=a3[:,k]\n # compute two parts of cost function for all examples for node k\n Cost[k][0] = np.sum(np.transpose(y_binary)*np.log(hk)) + np.sum(((1-np.transpose(y_binary))*np.log(1-hk)))\n \n# Sum over all labels and average over examples\n J_no_regularisation = -1./m * sum(Cost)\n# No regularization over intercept\n Theta1_no_intercept = Theta1[:, 1:]\n Theta2_no_intercept = Theta2[:, 1:]\n\n# Sum all parameters squared\n RegSum1 = np.sum(np.sum(np.power(Theta1_no_intercept, 2)))\n RegSum2 = np.sum(np.sum(np.power(Theta2_no_intercept, 2)))\n# Add regularisation term to final cost\n J = J_no_regularisation + (lambda_value/(2*m)) * (RegSum1+RegSum2)\n\n# You need to return the following variables correctly \n Theta1_grad = np.zeros(np.shape(Theta1))\n Theta2_grad = np.zeros(np.shape(Theta2))\n\n# ====================== YOUR CODE HERE ======================\n# Implement the backpropagation algorithm to compute the gradients\n# Theta1_grad and Theta2_grad. You should return the partial derivatives of\n# the cost function with respect to Theta1 and Theta2 in Theta1_grad and\n# Theta2_grad, respectively. After implementing Part 2, you can check\n# that your implementation is correct by running checkNNGradients\n#\n# Note: The vector y passed into the function is a vector of labels\n# containing values from 1..K. You need to map this vector into a \n# binary vector of 1's and 0's to be used with the neural network\n# cost function.\n#\n# Hint: It is recommended implementing backpropagation using a for-loop\n# over the training examples if you are implementing it for the \n# first time.\n#\n I = np.eye(num_labels)\n Y = np.zeros((m, num_labels))\n for i in range(m):\n Y[i, :] = I[y[i]-1, :]\n\n \n for t in range(m):\n a1 = X[t, :]\n a1 = np.append([1], a1)\n z2 = np.dot(Theta1, a1)\n a2 = sigmoid(z2)\n a2 = np.append([1], a2)\n z3 = np.dot(Theta2, a2)\n a3 = sigmoid(z3)\n \n # sigma3 shape is 10 by 1\n sigma3 = a3 - Y[t, :] \n # sigma2 shape is 25 by 1 (eliminate bias)\n sigma2 = np.multiply(np.dot(np.transpose(Theta2), sigma3)[1:], sigmoidGradient(z2))\n # combine the forward pass and backwardpass; the delta l/ delta w\n delta2 = np.multiply(sigma3[np.newaxis].T, a2[np.newaxis])\n delta1 = np.multiply(sigma2[np.newaxis].T, a1[np.newaxis]) \n \n Theta1_grad = Theta1_grad + delta1\n Theta2_grad = Theta2_grad + delta2\n \n # average on the Theta gradient\n Theta1_grad = Theta1_grad/m + (lambda_value/m) * np.hstack((np.zeros((Theta1.shape[0], 1)), Theta1[:, 1:]))\n Theta2_grad = Theta2_grad/m + (lambda_value/m) * np.hstack((np.zeros((Theta2.shape[0], 1)), Theta2[:, 1:]))\n \n\n# -------------------------------------------------------------\n\n# =========================================================================\n\n# Unroll gradients\n Theta1_grad = np.reshape(Theta1_grad, Theta1_grad.size, order='F')\n Theta2_grad = np.reshape(Theta2_grad, Theta2_grad.size, order='F')\n grad = np.expand_dims(np.hstack((Theta1_grad, Theta2_grad)), axis=1)\n \n return J, grad\n"
] |
[
[
"numpy.dot",
"numpy.hstack",
"numpy.log",
"numpy.multiply",
"numpy.power",
"numpy.reshape",
"numpy.eye",
"numpy.ones",
"numpy.append",
"numpy.shape",
"numpy.transpose",
"numpy.zeros",
"numpy.empty"
]
] |
ayanc/edgeml.mdp
|
[
"7f21b88bcf764e927ac8b9997ac9f3b1b2dabcc4"
] |
[
"runtest_single.py"
] |
[
"#!/usr/bin/env python3\n# - Ayan Chakrabarti <[email protected]>\n\"\"\"Run experiments to derive and simulate policies for single cameras.\"\"\"\n\nfrom multiprocessing import Pool\nimport numpy as np\nfrom eomdp import simulate as sim\nfrom eomdp import policy as po\n\nFMPATH = 'save/fm_fold%d_cost%d.npz'\nOPATH = 'save/1cam_r%03d_bdepth%02d_cost%d.npz'\nPLIST = [(r/20, b/2, c)\n for b in range(2, 11)\n for r in range(1, 11)\n for c in range(3)]\n\n\ndef runtest(params_rbc):\n \"\"\"Run test with (rate, bdepth, cost)\"\"\"\n\n rate, bdepth, cost = params_rbc\n\n npz = {'lb': 0., 'wcost': 0., 'scost': 0.,\n 'naivecost': 0., 'mdpcost': 0.}\n for fold in range(3):\n dset = np.load(FMPATH % (fold, cost))\n\n metr_tr = dset['metric_tr']\n rew_tr = dset['wcost_tr']-dset['scost_tr']\n metr_ts = dset['metric_ts']\n rew_ts = dset['wcost_ts']-dset['scost_ts']\n\n policy = po.mdp(rate, bdepth, (metr_tr, rew_tr))\n\n nvpolicy = np.percentile(metr_tr, (1.0-rate)*100.0)\n lbrew = np.mean(rew_ts * (metr_ts >= nvpolicy))\n nvpolicy = nvpolicy * np.ones_like(policy)\n\n mdprew, stats = sim.simulate(rate, bdepth, policy, (metr_ts, rew_ts))\n nvprew, nst = sim.simulate(rate, bdepth, nvpolicy, (metr_ts, rew_ts))\n\n mwcost = np.mean(dset['wcost_ts'])\n npz['wcost'] = npz['wcost'] + mwcost/3.0\n npz['scost'] = npz['scost'] + np.mean(dset['scost_ts'])/3.0\n npz['lb'] = npz['lb'] + (mwcost - lbrew)/3.0\n npz['mdpcost'] = npz['mdpcost'] + (mwcost - mdprew)/3.0\n npz['naivecost'] = npz['naivecost'] + (mwcost-nvprew)/3.0\n\n if fold == 0:\n npz['send_m'] = stats[0][:, 0] / stats[0][:, 1]\n npz['send_s'] = stats[1]\n npz['occup_s'] = stats[2]\n npz['policy'] = np.mean(policy >= metr_tr[:, np.newaxis], 0)\n npz['nsrate'] = np.sum(nst[1])\n npz['naive_m'] = nst[0][:, 0] / nst[0][:, 1]\n\n np.savez_compressed(OPATH % (int(rate*1000), int(bdepth*10), cost), **npz)\n print(\"Completed r=%f, b=%f, cost=%d\" % (rate, bdepth, cost))\n\n\nif __name__ == \"__main__\":\n with Pool() as p:\n p.map(runtest, PLIST, chunksize=1)\n"
] |
[
[
"numpy.ones_like",
"numpy.percentile",
"numpy.mean",
"numpy.load",
"numpy.sum"
]
] |
scikit-hep/uproot-methods
|
[
"ba9a97b3dc71c7030a9ec15a9d97397b5ff8aa0d"
] |
[
"tests/test_issues.py"
] |
[
"#!/usr/bin/env python\n\n# BSD 3-Clause License; see https://github.com/scikit-hep/uproot3-methods/blob/master/LICENSE\n\nimport unittest\n\nimport numpy\n\nimport awkward0\nimport uproot3_methods\nfrom uproot3_methods import *\n\nimport inspect\n\nclass Test(unittest.TestCase):\n def runTest(self):\n pass\n\n def test_issue10(self):\n p4 = TLorentzVectorArray.from_ptetaphim(awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]))\n assert p4.mass.tolist() == [[1.0]]\n assert p4[0].mass.tolist() == [1.0]\n assert p4[0][0].mass == 1.0\n assert p4[0][0]._to_cartesian().mass == 0.9999999999999999\n assert type(p4.mass) is awkward0.JaggedArray\n assert type(p4.x) is awkward0.JaggedArray\n\n p3 = TVector3Array.from_cylindrical(awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]))\n assert p3.rho.tolist() == [[1.0]]\n assert p3[0].rho.tolist() == [1.0]\n assert p3[0][0].rho == 1.0\n assert type(p3.rho) is awkward0.JaggedArray\n assert type(p3.x) is awkward0.JaggedArray\n\n p2 = TVector2Array.from_polar(awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]))\n assert p2.rho.tolist() == [[1.0]]\n assert p2[0].rho.tolist() == [1.0]\n assert p2[0][0].rho == 1.0\n assert type(p2.rho) is awkward0.JaggedArray\n assert type(p2.x) is awkward0.JaggedArray\n\n def test_issue39(self):\n counts = [2,2,2]\n mask = [True, False, True]\n\n pt = awkward0.JaggedArray.fromcounts(counts, [42.71, 31.46, 58.72, 30.19, 47.75, 10.83])\n eta = awkward0.JaggedArray.fromcounts(counts, [0.54, 1.57, -2.33, -1.22, -2.03, -0.37])\n phi = awkward0.JaggedArray.fromcounts(counts, [-2.13, 0.65, 2.74, 0.36, 2.87, -0.47])\n\n pt = pt[mask]\n eta = eta[mask]\n phi = phi[mask]\n\n electrons = uproot3_methods.TLorentzVectorArray.from_ptetaphim(pt, eta, phi, 0.000511)\n\n def test_issue61(self):\n assert TVector2(2, 0).rotate(numpy.pi/6).rotate(-numpy.pi/6) == TVector2(2, 0)\n\n _xs = numpy.array([2, 0, 1])\n _ys = numpy.array([0, 2, 1])\n arr = TVector2Array.from_cartesian(_xs, _ys).rotate(numpy.pi/4).rotate(-numpy.pi/4)\n\n _jxs = awkward0.JaggedArray.fromiter([[2,], [], [0, 1]])\n _jys = awkward0.JaggedArray.fromiter([[0,], [], [2, 1]])\n jarr = TVector2Array.from_cartesian(_jxs, _jys).rotate(numpy.pi/3).rotate(-numpy.pi/3)\n"
] |
[
[
"numpy.array"
]
] |
NISH1001/NeuralNLP-NeuralClassifier
|
[
"e86f750e68879d7390f0037747336110085d2f44"
] |
[
"train_custom.py"
] |
[
"#!/usr/bin/env python\n# coding:utf-8\n\"\"\"\nTencent is pleased to support the open source community by making NeuralClassifier available.\nCopyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\nhttp://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License\nis distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\nor implied. See the License for thespecific language governing permissions and limitations under\nthe License.\n\"\"\"\n\nimport os\nimport shutil\nimport sys\nimport time\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nimport util\nfrom config import Config\nfrom dataset.classification_dataset import ClassificationDataset\nfrom dataset.collator import (\n ClassificationCollator,\n ClassificationType,\n FastTextCollator,\n)\nfrom evaluate.classification_evaluate import ClassificationEvaluator as cEvaluator\nfrom model.classification.attentive_convolution import AttentiveConvNet\nfrom model.classification.dpcnn import DPCNN\nfrom model.classification.drnn import DRNN\nfrom model.classification.fasttext import FastText\nfrom model.classification.hmcn import HMCN\nfrom model.classification.region_embedding import RegionEmbedding\nfrom model.classification.textcnn import TextCNN\nfrom model.classification.textrcnn import TextRCNN\nfrom model.classification.textrnn import TextRNN\nfrom model.classification.textvdcnn import TextVDCNN\nfrom model.classification.transformer import Transformer\nfrom model.loss import ClassificationLoss\nfrom model.model_util import (\n get_hierar_relations_new as get_hierar_relations, # get_hierar_relations,\n)\nfrom model.model_util import get_optimizer\nfrom util import ModeType\n\nClassificationDataset, ClassificationCollator, FastTextCollator, ClassificationLoss, cEvaluator\nFastText, TextCNN, TextRNN, TextRCNN, DRNN, TextVDCNN, Transformer, DPCNN, AttentiveConvNet, RegionEmbedding\n\n\ndef get_data_loader(dataset_name, collate_name, conf):\n \"\"\"Get data loader: Train, Validate, Test\"\"\"\n train_dataset = globals()[dataset_name](\n conf, conf.data.train_json_files, generate_dict=True\n )\n collate_fn = globals()[collate_name](conf, len(train_dataset.label_map))\n\n train_data_loader = DataLoader(\n train_dataset,\n batch_size=conf.train.batch_size,\n shuffle=True,\n num_workers=conf.data.num_worker,\n collate_fn=collate_fn,\n pin_memory=True,\n )\n\n validate_dataset = globals()[dataset_name](conf, conf.data.validate_json_files)\n validate_data_loader = DataLoader(\n validate_dataset,\n batch_size=conf.eval.batch_size,\n shuffle=False,\n num_workers=conf.data.num_worker,\n collate_fn=collate_fn,\n pin_memory=True,\n )\n\n test_dataset = globals()[dataset_name](conf, conf.data.test_json_files)\n test_data_loader = DataLoader(\n test_dataset,\n batch_size=conf.eval.batch_size,\n shuffle=False,\n num_workers=conf.data.num_worker,\n collate_fn=collate_fn,\n pin_memory=True,\n )\n\n return train_data_loader, validate_data_loader, test_data_loader\n\n\ndef get_classification_model(model_name, dataset, conf):\n \"\"\"Get classification model from configuration\"\"\"\n model = globals()[model_name](dataset, conf)\n model = model.cuda(conf.device) if conf.device.startswith(\"cuda\") else model\n return model\n\n\nclass ClassificationTrainer(object):\n def __init__(self, label_map, logger, evaluator, conf, loss_fn):\n self.label_map = label_map\n self.logger = logger\n self.evaluator = evaluator\n self.conf = conf\n self.loss_fn = loss_fn\n if self.conf.task_info.hierarchical:\n self.hierar_relations = get_hierar_relations(\n self.conf.task_info.hierar_taxonomy, label_map\n )\n\n def train(self, data_loader, model, optimizer, stage, epoch):\n model.update_lr(optimizer, epoch)\n model.train()\n return self.run(data_loader, model, optimizer, stage, epoch, ModeType.TRAIN)\n\n def eval(self, data_loader, model, optimizer, stage, epoch):\n model.eval()\n return self.run(data_loader, model, optimizer, stage, epoch)\n\n def run(self, data_loader, model, optimizer, stage, epoch, mode=ModeType.EVAL):\n is_multi = False\n # multi-label classifcation\n if self.conf.task_info.label_type == ClassificationType.MULTI_LABEL:\n is_multi = True\n predict_probs = []\n standard_labels = []\n num_batch = data_loader.__len__()\n total_loss = 0.0\n for batch in data_loader:\n # hierarchical classification using hierarchy penalty loss\n if self.conf.task_info.hierarchical:\n logits = model(batch)\n linear_paras = model.linear.weight\n is_hierar = True\n used_argvs = (\n self.conf.task_info.hierar_penalty,\n linear_paras,\n self.hierar_relations,\n )\n loss = self.loss_fn(\n logits,\n batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),\n is_hierar,\n is_multi,\n *used_argvs\n )\n # hierarchical classification with HMCN\n elif self.conf.model_name == \"HMCN\":\n (global_logits, local_logits, logits) = model(batch)\n loss = self.loss_fn(\n global_logits,\n batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),\n False,\n is_multi,\n )\n loss += self.loss_fn(\n local_logits,\n batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),\n False,\n is_multi,\n )\n # flat classificaiton\n else:\n logits = model(batch)\n loss = self.loss_fn(\n logits,\n batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),\n False,\n is_multi,\n )\n if mode == ModeType.TRAIN:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n continue\n total_loss += loss.item()\n if not is_multi:\n result = torch.nn.functional.softmax(logits, dim=1).cpu().tolist()\n else:\n result = torch.sigmoid(logits).cpu().tolist()\n predict_probs.extend(result)\n standard_labels.extend(batch[ClassificationDataset.DOC_LABEL_LIST])\n if mode == ModeType.EVAL:\n total_loss = total_loss / num_batch\n (\n _,\n precision_list,\n recall_list,\n fscore_list,\n right_list,\n predict_list,\n standard_list,\n ) = self.evaluator.evaluate(\n predict_probs,\n standard_label_ids=standard_labels,\n label_map=self.label_map,\n threshold=self.conf.eval.threshold,\n top_k=self.conf.eval.top_k,\n is_flat=self.conf.eval.is_flat,\n is_multi=is_multi,\n )\n # precision_list[0] save metrics of flat classification\n # precision_list[1:] save metrices of hierarchical classification\n self.logger.warn(\n \"%s performance at epoch %d is precision: %f, \"\n \"recall: %f, fscore: %f, macro-fscore: %f, right: %d, predict: %d, standard: %d.\\n\"\n \"Loss is: %f.\"\n % (\n stage,\n epoch,\n precision_list[0][cEvaluator.MICRO_AVERAGE],\n recall_list[0][cEvaluator.MICRO_AVERAGE],\n fscore_list[0][cEvaluator.MICRO_AVERAGE],\n fscore_list[0][cEvaluator.MACRO_AVERAGE],\n right_list[0][cEvaluator.MICRO_AVERAGE],\n predict_list[0][cEvaluator.MICRO_AVERAGE],\n standard_list[0][cEvaluator.MICRO_AVERAGE],\n total_loss,\n )\n )\n return fscore_list[0][cEvaluator.MICRO_AVERAGE]\n\n\ndef load_checkpoint(file_name, conf, model, optimizer):\n checkpoint = torch.load(file_name)\n conf.train.start_epoch = checkpoint[\"epoch\"]\n best_performance = checkpoint[\"best_performance\"]\n model.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n return best_performance\n\n\ndef save_checkpoint(state, file_prefix):\n file_name = file_prefix + \"_\" + str(state[\"epoch\"])\n torch.save(state, file_name)\n\n\ndef train(conf):\n logger = util.Logger(conf)\n if not os.path.exists(conf.checkpoint_dir):\n os.makedirs(conf.checkpoint_dir)\n\n model_name = conf.model_name\n dataset_name = \"ClassificationDataset\"\n collate_name = (\n \"FastTextCollator\" if model_name == \"FastText\" else \"ClassificationCollator\"\n )\n train_data_loader, validate_data_loader, test_data_loader = get_data_loader(\n dataset_name, collate_name, conf\n )\n empty_dataset = globals()[dataset_name](conf, [], mode=\"train\")\n model = get_classification_model(model_name, empty_dataset, conf)\n loss_fn = globals()[\"ClassificationLoss\"](\n label_size=len(empty_dataset.label_map), loss_type=conf.train.loss_type\n )\n optimizer = get_optimizer(conf, model)\n evaluator = cEvaluator(conf.eval.dir)\n trainer = globals()[\"ClassificationTrainer\"](\n empty_dataset.label_map, logger, evaluator, conf, loss_fn\n )\n\n best_epoch = -1\n best_performance = 0\n model_file_prefix = conf.checkpoint_dir + \"/\" + model_name\n for epoch in range(\n conf.train.start_epoch, conf.train.start_epoch + conf.train.num_epochs\n ):\n start_time = time.time()\n trainer.train(train_data_loader, model, optimizer, \"Train\", epoch)\n trainer.eval(train_data_loader, model, optimizer, \"Train\", epoch)\n performance = trainer.eval(\n validate_data_loader, model, optimizer, \"Validate\", epoch\n )\n trainer.eval(test_data_loader, model, optimizer, \"test\", epoch)\n if performance > best_performance: # record the best model\n best_epoch = epoch\n best_performance = performance\n save_checkpoint(\n {\n \"epoch\": epoch,\n \"model_name\": model_name,\n \"state_dict\": model.state_dict(),\n \"best_performance\": best_performance,\n \"optimizer\": optimizer.state_dict(),\n },\n model_file_prefix,\n )\n time_used = time.time() - start_time\n logger.info(\"Epoch %d cost time: %d second\" % (epoch, time_used))\n\n # best model on validateion set\n best_epoch_file_name = model_file_prefix + \"_\" + str(best_epoch)\n best_file_name = model_file_prefix + \"_best\"\n shutil.copyfile(best_epoch_file_name, best_file_name)\n\n load_checkpoint(model_file_prefix + \"_\" + str(best_epoch), conf, model, optimizer)\n trainer.eval(test_data_loader, model, optimizer, \"Best test\", best_epoch)\n\n\nif __name__ == \"__main__\":\n config = Config(config_file=sys.argv[1])\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(config.train.visible_device_list)\n torch.manual_seed(2019)\n torch.cuda.manual_seed(2019)\n train(config)\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.sigmoid",
"torch.cuda.manual_seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.save"
]
] |
populationgenomics/ancestry
|
[
"faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6"
] |
[
"scripts/hail_batch/hgdp1kg_tobwgs_pca_pop_densified/hgdp_1kg_tob_wgs_pop_pca_densified.py"
] |
[
"\"\"\"\nPerform pca on samples specific to a population\nfrom the HGDP,1KG, and tob-wgs dataset after densifying.\n\nDepends on hgdp1kg_tobwgs_densified_pca/hgdp_1kg_tob_wgs_densified_pca.py\n\"\"\"\n\nimport click\nimport pandas as pd\nimport hail as hl\n\nHGDP1KG_TOBWGS = (\n 'gs://cpg-tob-wgs-main/1kg_hgdp_densified_pca/v2/'\n 'hgdp1kg_tobwgs_joined_all_samples.mt'\n)\n\n\[email protected]()\[email protected]('--output', help='GCS output path', required=True)\[email protected]('--pop', help='Population to subset from the 1KG (e.g. afr, nfe)')\ndef query(output, pop): # pylint: disable=too-many-locals\n \"\"\"Query script entry point.\"\"\"\n\n hl.init(default_reference='GRCh38')\n\n mt = hl.read_matrix_table(HGDP1KG_TOBWGS)\n if pop:\n # Get samples from the specified population only\n mt = mt.filter_cols(\n (mt.hgdp_1kg_metadata.population_inference.pop == pop.lower())\n | (mt.s.contains('TOB'))\n )\n else:\n mt = mt.filter_cols(mt.s.contains('TOB'))\n\n # Perform PCA\n eigenvalues_path = f'{output}/eigenvalues.ht'\n scores_path = f'{output}/scores.ht'\n loadings_path = f'{output}/loadings.ht'\n eigenvalues, scores, loadings = hl.hwe_normalized_pca(\n mt.GT, compute_loadings=True, k=20\n )\n hl.Table.from_pandas(pd.DataFrame(eigenvalues)).export(eigenvalues_path)\n scores.write(scores_path, overwrite=True)\n loadings.write(loadings_path, overwrite=True)\n\n\nif __name__ == '__main__':\n query() # pylint: disable=no-value-for-parameter\n"
] |
[
[
"pandas.DataFrame"
]
] |
shahpnmlab/cryoem-python
|
[
"c4e317ba9563172c70ab4571ece0a2d322a301ff"
] |
[
"place_points_on_surface.py"
] |
[
"'''\nThis script will enable you to place points on the surface of an arbitary sphere\nwhose center is defined by marking the center of the sphere in IMOD and saving it as\nmod file. In writing this script I used CR Drost's response to the question \nof how do you evenly place points on the surface of sphere?\n(https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere)\n\nShortly after commiting to writing this script, I found that:\na. this wasnt the specific tool I needed for my use case.\nb. John Heumann (IMOD author) has already written a similar utility called seedSpikes \nand SpikeInit. I recommend that you give those utils a try. See here for more info\nhttps://bio3d.colorado.edu/RML_2017/2017_IMOD_PEET_Workshop/Lectures/ModelingAids.pdf\n\nIn order to format the colors of the output files, i used Ben Himes's point2model command,\nbecause i couldnt come up with a better color combo. So thanks, Ben!\nhttps://github.com/bHimes/emClarity/wiki/Conventions\n'''\nimport numpy as np\nfrom numpy import pi, cos, sin, arccos, arange\nimport subprocess\nimport argparse\n\n#Read input from the user\nparser=argparse.ArgumentParser()\nparser.add_argument(\"--i\", help=\"IMOD mod file with centers of objects selected\")\nparser.add_argument(\"--r\", help=\"The desired radius (px).\",type=float)\nparser.add_argument(\"--npts\", help=\"Number of points you want to place on the sphere\",type=int)\nparser.add_argument(\"--rec\", help=\"Name of tomogram for which points are being generated\")\n \nargs=parser.parse_args()\n\n#Convert IMOD mod file to a txt file, you need to have IMOD and its utils in\n#$PATH\nsubprocess.run(['model2point', '-float', '-i', args.i, '-output', 'temp.txt'])\nprint(\"Converting your input mod file into a temporary text file\")\n\n#Do the magic\nf=np.loadtxt(\"temp.txt\")\norigin_x=f[:,[0]]\norigin_y=f[:,[1]]\norigin_z=f[:,[2]]\nr=args.r\nnum_pts = args.npts\nif len(origin_x)==len(origin_y)==len(origin_z):\n indices = arange(0, num_pts, dtype=float)\n phi = arccos(1 - 2*indices/num_pts)\n theta = pi * (1 + 5**0.5) * indices\n x = cos(theta) * sin(phi) * r + origin_x\n y = sin(theta) * sin(phi) * r + origin_y\n z = cos(phi) * r + origin_z\n x=np.array([x]).reshape(len(x)*num_pts,1)\n y=np.array([y]).reshape(len(y)*num_pts,1)\n z=np.array([z]).reshape(len(z)*num_pts,1)\n xy=np.hstack((x,y))\n xyz=np.hstack((xy,z))\n subprocess.run(['rm', 'temp.txt'])\nelif len(origin_x)!=len(origin_y)!=len(origin_z):\n print(\"Your input file is erroneous, have you checked if length of X==Y==Z?\") \n\n#Save txt as input for point2model\nnp.savetxt('temp.txt',xyz,delimiter=' ',fmt='%-5i')\nprint(\"Converting the points back into a mod file for you to use\")\nsubprocess.run(['point2model', '-circle', '3', '-sphere', '5', '-scat', '-thick', '2', '-color', '80,191,255,', \\\n'-image', args.rec, 'temp.txt', args.rec[:-4]+\"_sphere.mod\"])\n\n#Clean up after yourself!\nsubprocess.run(['rm', 'temp.txt'])\nprint(\"Process has ended\")\n"
] |
[
[
"numpy.hstack",
"numpy.arange",
"numpy.arccos",
"numpy.cos",
"numpy.sin",
"numpy.savetxt",
"numpy.array",
"numpy.loadtxt"
]
] |
likedan/cp-vton
|
[
"dde95aa0b3ede1e1c0e0b0a91ba94cf91ed1f79e"
] |
[
"produce_comparison_for_test.py"
] |
[
"# coding=utf-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport utils\nimport argparse\nimport os\nfrom torchvision.utils import save_image\nfrom cp_dataset import CPDataset, CPDataLoader\nfrom networks import GMM, UnetGenerator, VGGLoss, load_checkpoint, save_checkpoint\nfrom resnet import Embedder\nfrom unet import UNet, VGGExtractor, Discriminator\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\nfrom distributed import (\n get_rank,\n synchronize,\n reduce_loss_dict,\n reduce_sum,\n get_world_size,\n)\n\ndef normalize(x):\n x = ((x+1)/2).clamp(0,1)\n return x\n\n\ndef single_gpu_flag(args):\n return not args.distributed or (args.distributed and args.local_rank % torch.cuda.device_count() == 0)\n\n\ndef get_opt():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--name\", default=\"test_vton\")\n parser.add_argument(\"--gpu_ids\", default=\"\")\n parser.add_argument('-j', '--workers', type=int, default=16)\n parser.add_argument('-b', '--batch-size', type=int, default=32)\n\n parser.add_argument('--local_rank', type=int, default=1, help=\"gpu to use, used for distributed training\")\n\n parser.add_argument(\"--use_gan\", action='store_true')\n\n parser.add_argument(\"--dataroot\", default=\"data\")\n parser.add_argument(\"--datamode\", default=\"test\")\n parser.add_argument(\"--stage\", default=\"residual\")\n parser.add_argument(\"--data_list\", default=\"test_files/vton_test.txt\")\n parser.add_argument(\"--fine_width\", type=int, default=192)\n parser.add_argument(\"--fine_height\", type=int, default=256)\n parser.add_argument(\"--radius\", type=int, default=5)\n parser.add_argument(\"--grid_size\", type=int, default=5)\n parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')\n parser.add_argument('--tensorboard_dir', type=str, default='tensorboard', help='save tensorboard infos')\n parser.add_argument('--checkpoint_dir', type=str, default='checkpoints', help='save checkpoint infos')\n parser.add_argument('--checkpoint', type=str, default='', help='model checkpoint for initialization')\n parser.add_argument(\"--display_count\", type=int, default=20)\n parser.add_argument(\"--save_count\", type=int, default=5000)\n parser.add_argument(\"--keep_step\", type=int, default=100000)\n parser.add_argument(\"--decay_step\", type=int, default=100000)\n parser.add_argument(\"--shuffle\", action='store_true', help='shuffle input data')\n\n opt = parser.parse_args()\n return opt\n\ndef test_residual(opt, loader, model, gmm_model, generator):\n\n model.eval()\n gmm_model.eval()\n generator.eval()\n\n test_files_dir = \"test_files_dir/\" + opt.name\n os.makedirs(test_files_dir, exist_ok=True)\n os.makedirs(os.path.join(test_files_dir, \"gt\"), exist_ok=True)\n os.makedirs(os.path.join(test_files_dir, \"residual\"), exist_ok=True)\n os.makedirs(os.path.join(test_files_dir, \"baseline\"), exist_ok=True)\n os.makedirs(os.path.join(test_files_dir, \"refined\"), exist_ok=True)\n os.makedirs(os.path.join(test_files_dir, \"diff\"), exist_ok=True)\n\n for i, (inputs, inputs_2) in tqdm(enumerate(loader), total=len(loader)):\n\n im = inputs['image'].cuda()\n agnostic = inputs['agnostic'].cuda()\n\n c = inputs['cloth'].cuda()\n cm = inputs['cloth_mask'].cuda()\n\n c_2 = inputs_2['cloth'].cuda()\n cm_2 = inputs_2['cloth_mask'].cuda()\n\n with torch.no_grad():\n grid, theta = gmm_model(agnostic, c)\n c = F.grid_sample(c, grid, padding_mode='border')\n cm = F.grid_sample(cm, grid, padding_mode='zeros')\n\n outputs = generator(torch.cat([agnostic, c], 1))\n p_rendered, m_composite = torch.split(outputs, 3, 1)\n p_rendered = F.tanh(p_rendered)\n m_composite = F.sigmoid(m_composite)\n transfer_1 = c * m_composite + p_rendered * (1 - m_composite)\n\n grid_2, theta_2 = gmm_model(agnostic, c_2)\n c_2 = F.grid_sample(c_2, grid_2, padding_mode='border')\n cm_2 = F.grid_sample(cm_2, grid_2, padding_mode='zeros')\n\n outputs_2 = generator(torch.cat([agnostic, c_2], 1))\n p_rendered_2, m_composite_2 = torch.split(outputs_2, 3, 1)\n p_rendered_2 = F.tanh(p_rendered_2)\n m_composite_2 = F.sigmoid(m_composite_2)\n transfer_2 = c_2 * m_composite_2 + p_rendered_2 * (1 - m_composite_2)\n\n gt_residual = (torch.mean(im, dim=1) - torch.mean(transfer_2, dim=1)).unsqueeze(1)\n\n output_1 = model(transfer_1.detach(), gt_residual.detach())\n\n output_residual = torch.cat([normalize(gt_residual), normalize(gt_residual), normalize(gt_residual)], dim=1).cpu()\n for b_i in range(transfer_1.shape[0]):\n save_image(normalize(im[b_i].cpu()),\n os.path.join(test_files_dir, \"gt\", str(i * opt.batch_size + b_i) + \".jpg\"))\n save_image(normalize(transfer_1[b_i].cpu()),\n os.path.join(test_files_dir, \"baseline\", str(i * opt.batch_size + b_i) + \".jpg\"))\n save_image(normalize(output_residual)[b_i],\n os.path.join(test_files_dir, \"residual\", str(i * opt.batch_size + b_i) + \".jpg\"))\n save_image(normalize(((transfer_1 - output_1) / 2)[b_i].cpu()),\n os.path.join(test_files_dir, \"diff\", str(i * opt.batch_size + b_i) + \".jpg\"))\n save_image(normalize(output_1[b_i].cpu()),\n os.path.join(test_files_dir, \"refined\", str(i * opt.batch_size + b_i) + \".jpg\"))\n\n\ndef main():\n opt = get_opt()\n print(opt)\n print(\"Start to train stage: %s, named: %s!\" % (opt.stage, opt.name))\n\n n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n opt.distributed = n_gpu > 1\n local_rank = opt.local_rank\n\n if opt.distributed:\n torch.cuda.set_device(opt.local_rank)\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n synchronize()\n\n # create dataset\n dataset = CPDataset(opt)\n\n # create dataloader\n loader = CPDataLoader(opt, dataset)\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=opt.batch_size, shuffle=False,\n num_workers=opt.workers, pin_memory=True, sampler=None)\n\n # visualization\n if not os.path.exists(opt.tensorboard_dir):\n os.makedirs(opt.tensorboard_dir)\n\n\n gmm_model = GMM(opt)\n load_checkpoint(gmm_model, \"checkpoints/gmm_train_new/step_020000.pth\")\n gmm_model.cuda()\n\n generator_model = UnetGenerator(25, 4, 6, ngf=64, norm_layer=nn.InstanceNorm2d)\n load_checkpoint(generator_model, \"checkpoints/tom_train_new_2/step_040000.pth\")\n generator_model.cuda()\n\n embedder_model = Embedder()\n load_checkpoint(embedder_model, \"checkpoints/identity_train_64_dim/step_020000.pth\")\n embedder_model = embedder_model.embedder_b.cuda()\n\n model = UNet(n_channels=4, n_classes=3)\n model.cuda()\n\n if not opt.checkpoint == '' and os.path.exists(opt.checkpoint):\n load_checkpoint(model, opt.checkpoint)\n\n test_residual(opt, data_loader, model, gmm_model, generator_model)\n\n print('Finished training %s, nameed: %s!' % (opt.stage, opt.name))\n\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"torch.mean",
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.cat",
"torch.cuda.device_count",
"torch.utils.data.DataLoader",
"torch.nn.functional.sigmoid",
"torch.nn.functional.grid_sample",
"torch.no_grad",
"torch.split",
"torch.nn.functional.tanh"
]
] |
Pr0d19y/3DUnet-livers
|
[
"8b306b276228275c2f9df01c7b1468816dc1f332"
] |
[
"unet3d/generator_multiprocess.py"
] |
[
"import keras\nfrom keras.preprocessing.image import ImageDataGenerator\nimport numpy as np\nfrom unet3d.data import open_data_file\nimport time\nimport gc\n\nfrom multiprocessing import Pool\nfrom functools import partial\nfrom unet3d.generator import data_generator\nfrom unet3d.generator import create_patch_index_list\nimport copy\nimport os\nimport pandas as pd\n\n\nclass ClassDataGenerator(keras.utils.Sequence):\n \"\"\"\n Classifer Data Generator. inherits keras.utils.Sequence, that provides multi-process iterator over the dataset.\n \"\"\"\n\n def __init__(self, file_name, indices, batch_size=1024, x_shape=None, root_name_x='data',\n root_name_y='truth', root_name_norm='normalization', imgen_params=None, seed=1,\n is_train=True, n_processors=4):\n \"\"\"\n initialization\n :param file_name: name of the hd5 file to load data from\n :param indices: indices to read from file\n :param batch_size: Size of the batches that the training generator will provide\n :param root_name_x: the name of the entry in the hdf5 where the X data is held\n :param root_name_y: the name of the entry in the hdf5 where the y data is held\n :type root_name_norm: the name of the entry in the hdf5 where the normalization data is held\n :param imgen_params: parameters for the keras ImageDataGenerator\n :param seed: seed for random augmentations. will use same seed for data and masks to get the same augemntations\n :param is_train: when set to True, will shuffle index on the end of every epoch\n :type n_processors: Number of processors to use in parallel for augmentations\n \"\"\"\n self.index = indices.astype(np.int)\n\n self.imgen = ImageDataGenerator(**imgen_params) # TODO: doesn't support 3D?\n self.maskgen = ImageDataGenerator(**imgen_params)\n self.seed = seed\n\n self.f = open_data_file(file_name, 'r')\n self.file_name = file_name\n self.root_name_x = root_name_x\n self.root_name_y = root_name_y\n self.root_name_norm = root_name_norm\n\n self.x_table = self.f.root[self.root_name_x]\n self.y_table = self.f.root[self.root_name_y]\n self.norm_table = self.f.root[self.root_name_norm]\n\n self.x_shape = x_shape # on images it is (512, 512, 60), on patches (8, 8, 8)\n\n self.total_len = len(self.index)\n self.batch_size = batch_size\n self.is_train = is_train\n # self.steps_per_epoch = np.floor(self.total_len / self.batch_size).astype(np.int)\n\n if is_train:\n np.random.shuffle(self.index)\n\n self.n_processors = n_processors\n # self.f.close()\n\n def __len__(self):\n \"denotes number of batches per epoch\"\n return int(np.floor(self.total_len / self.batch_size))\n # return 10\n\n @staticmethod\n def normalize(data):\n \"\"\"\n normalize the data using given normalization factors (mean, std)\n :param data: tuple (data, normalization factors)\n :return: normalized data\n \"\"\"\n data, norm_factors = data\n data = data.astype(np.float32)\n data -= norm_factors[0]\n data /= norm_factors[1]\n return data\n\n def __data_generation(self, indices):\n \"\"\"\n generates the data from the given indices\n :param indices:\n :return:\n \"\"\"\n # generate data from indices\n batch_images = self.x_table[indices, :]\n\n # normalize the data\n norm_factors = self.norm_table[indices, :]\n # TODO find a more efficient way to create this array\n data_to_normalize = [(batch_images[i], norm_factors[i]) for i in range(batch_images.shape[0])]\n # with Pool(self.n_processors) as pool:\n # batch_images = pool.map(self.normalize, data_to_normalize)\n batch_images = [self.normalize(dat) for dat in data_to_normalize]\n\n batch_images = np.asarray(batch_images)\n\n # TODO: return augmentation - has error affine matrix has wrong number of rows\n # # augmentation\n # if self.is_train:\n # rand_transform = partial(self.imgen.random_transform, seed=self.seed)\n # ret = self.pool.map(rand_transform, batch_images)\n # batch_images = np.array(ret)\n\n # generate data masks from indices\n batch_y = self.y_table[indices, :]\n\n # TODO: return augmentation\n # # same augmentation for y\n # if self.is_train:\n # rand_transform_y = partial(self.maskgen.random_transform, seed=self.seed)\n # ret_y = self.pool.map(rand_transform_y, batch_images)\n # batch_y = np.array(ret_y)\n\n return batch_images, batch_y\n\n def __getitem__(self, index):\n \"generate one batch of data\"\n if self.file_name == '/cs/casmip/clara.herscu/git/3DUnet/brats/data_liver_segmentation_patches/liver_patches_int_data_000_130_copy.h5':\n time.sleep(5)\n\n # freeing everything we don't need\n gc.collect()\n\n # generate indices of the batch\n index = int(index)\n indices = self.index[index*self.batch_size:np.min(((index+1)*self.batch_size, self.total_len))]\n\n X, y = self.__data_generation(indices)\n return X, y\n\n def on_epoch_end(self):\n \"re-shuffles indices after each epoch\"\n if self.is_train:\n np.random.shuffle(self.index)\n"
] |
[
[
"numpy.asarray",
"numpy.floor",
"numpy.random.shuffle",
"numpy.min"
]
] |
guoxueyu/gatk-sv
|
[
"22fc647dd3ee56a47cb0523ea8f6cc37c4b904eb"
] |
[
"dockerfiles/rdpesr/add_RD_to_SVs.py"
] |
[
"#script to add cov to SVs\n\ndef add_ILL_cov(pb_uni_svs,bincov):\n for i in pb_uni_svs.keys():\n for j in pb_uni_svs[i]:\n cov_list=cov_SV_readin(j, bincov)\n if len(cov_list)>0:\n j+=[len(cov_list),np.median(cov_list), np.mean(cov_list),np.std(cov_list)]\n else:\n j+=[0, 'nan', 'nan', 'nan']\n #print(j)\n return pb_uni_svs\n\ndef bed_info_readin(input):\n fin=open(input)\n out={}\n for line in fin:\n pin=line.strip().split()\n if pin[0][0]=='#': continue\n if not pin[0] in out.keys():\n out[pin[0]]=[]\n out[pin[0]].append([pin[0],int(pin[1]),int(pin[2])]+pin[3:])\n fin.close()\n return out\n\ndef cov_SV_readin(svpos, bincov):\n fin=os.popen(r'''tabix %s %s:%d-%d'''%(bincov, svpos[0],svpos[1],svpos[2]))\n normCov_list=[]\n for line in fin:\n pin=line.strip().split()\n normCov_list.append(float(pin[-1]))\n fin.close() \n return normCov_list\n\ndef path_modify(path):\n if not path[-1]=='/':\n path+='/'\n return path \n\ndef write_output(output,pb_uni_svs):\n fo=open(output,'w') \n for k1 in pb_uni_svs.keys(): \n for k2 in pb_uni_svs[k1]:\n print('\\t'.join([str(i) for i in k2]),file=fo)\n fo.close() \n\ndef main():\n parser = argparse.ArgumentParser(description='S2a.calcu.Seq_Cov.of.PB_Uni.py')\n parser.add_argument('input', help='name of input file containing PacBio unique SVs in bed format')\n parser.add_argument('bincov',help='name of bincov metrics of the sample to be processed')\n parser.add_argument('output',help='name of bincov metrics of the sample to be processed')\n args = parser.parse_args()\n pb_uni_svs=bed_info_readin(args.input)\n pb_uni_svs=add_ILL_cov(pb_uni_svs,args.bincov)\n write_output(args.output,pb_uni_svs)\n\nimport os\nimport numpy as np\nimport argparse\nmain()\n\n\n\n"
] |
[
[
"numpy.std",
"numpy.median",
"numpy.mean"
]
] |
ToFeWe/qpricesim
|
[
"2d4312ed1d1356449f0c168835a0662b238a27bb"
] |
[
"qpricesim/model_code/economic_environment.py"
] |
[
"\"\"\"\n\nA module that defines the economic environment the agents are interacting in.\n\"\"\"\nimport numpy as np\nfrom numba import njit\n\n\n@njit\ndef calc_winning_price(all_prices):\n \"\"\"\n Helper function that takes in the array of all prices in the market\n and returns the winning price and the number of firms that picked\n this winning price.\n\n\n\n Args:\n all_prices (array): Array of all prices in the given round in the market\n\n Returns:\n tuple: winning_price, n_winning_price\n\n winning_price (integer): Current market price\n n_winning_price (integer): Number of firms that played the market price\n \"\"\"\n # Get winning price\n # Lowest price wins the market\n winning_price = np.min(all_prices)\n\n # Get the number of players that played the winning price\n n_winning_price = np.sum(np.where(all_prices == winning_price, 1, 0))\n return winning_price, n_winning_price\n\n\n@njit\ndef calc_reward(p_i, winning_price, n_winning_price, reservation_price, m_consumer):\n \"\"\"\n A function that calculates the reward given a simple Bertrand\n environment with homogenous goods.\n\n Use calc_winning_price() to retrieve winning_price, n_winning_price\n for the given market prices first.\n\n Args:\n p_i (integer): Price the agent picked in the given round\n (Non-index reprenstation of the action).\n winning_price (integer): Market price\n n_winning_price (integer): Number of firms that played the market price\n reservation_price (integer): Maximal price the consumers are willing to pay\n m_consumer (integer): Number of consumers in the market\n\n Returns:\n float: Economics profit/reward for the agent in the given period\n \"\"\"\n\n # If the agents charges a price above reservation price, he comes home with zero.\n # If he plays the winning price, he shares the market with the others who played\n # the winning price.\n # If his price is above the winning price, he also goes home with zero.\n if p_i > reservation_price:\n return 0\n elif p_i == winning_price:\n return (1 / n_winning_price) * p_i * m_consumer\n else:\n return 0\n"
] |
[
[
"numpy.where",
"numpy.min"
]
] |
ijjorama/DosNa
|
[
"8e5322a0e1b93a377a9a443d442253b45957dac2"
] |
[
"dosna/backends/s3.py"
] |
[
"#!/usr/bin/env python\n\"\"\"Backend s3 uses a S3 interface to store the dataset and chunks data\"\"\"\n\nimport logging\n\nimport numpy as np\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom dosna.backends import Backend\nfrom dosna.backends.base import (BackendConnection, BackendDataChunk,\n BackendDataset, ConnectionError,\n DatasetNotFoundError)\nfrom dosna.util import dtype2str, shape2str, str2shape\nfrom dosna.util.data import slices2shape\n\n_DATASET_ROOT = 'dataset_root'\n_SIGNATURE = \"DosNa Dataset\"\n\n_SHAPE = 'shape'\n_DTYPE = 'dtype'\n_FILLVALUE = 'fillvalue'\n_CHUNK_GRID = 'chunk-grid'\n_CHUNK_SIZE = 'chunk-size'\n\nlog = logging.getLogger(__name__)\n\n# Sanitise bucket name to conform to AWS conventions\n\n\ndef bucketName(name):\n return name.replace('_', '-').lower()\n\n\nclass S3Connection(BackendConnection):\n \"\"\"\n A S3 Connection that wraps boto3 S3 client\n \"\"\"\n\n def __init__(self, name, endpoint_url=None, verify=True,\n profile_name='default',\n *args, **kwargs):\n super(S3Connection, self).__init__(name, *args, **kwargs)\n\n self._endpoint_url = endpoint_url\n self._verify = verify\n self._client = None\n self._profile_name = profile_name\n\n super(S3Connection, self).__init__(name, *args, **kwargs)\n\n def connect(self):\n\n if self.connected:\n raise ConnectionError(\n 'Connection {} is already open'.format(self.name))\n session = boto3.session.Session(profile_name=self._profile_name)\n\n # Use access key and secret_key in call to client?\n self._client = session.client(\n service_name='s3',\n endpoint_url=self._endpoint_url,\n verify=self._verify\n )\n\n # Check bucket exists and is writable\n\n super(S3Connection, self).connect()\n\n def disconnect(self):\n\n if self.connected:\n super(S3Connection, self).disconnect()\n\n @property\n def client(self):\n return self._client\n\n def create_dataset(self, name, shape=None, dtype=np.float32, fillvalue=0,\n data=None, chunk_size=None):\n if not ((shape is not None and dtype is not None) or data is not None):\n raise Exception('Provide `shape` and `dtype` or `data`')\n if self.has_dataset(name):\n raise Exception('Dataset `%s` already exists' % name)\n\n if data is not None:\n shape = data.shape\n dtype = data.dtype\n\n if chunk_size is None:\n chunk_size = shape\n\n chunk_grid = (np.ceil(np.asarray(shape, float) / chunk_size))\\\n .astype(int)\n\n name = bucketName(name)\n\n log.debug('creating dataset %s with shape:%s chunk_size:%s '\n 'chunk_grid:%s', name, shape, chunk_size, chunk_grid)\n\n try:\n self._client.create_bucket(Bucket=name, ACL='private')\n except ClientError as e:\n code = e.response['Error']['Code']\n if code is not None:\n log.error('connect: create_bucket returns %s', code)\n return None\n\n metadata = {\n _SHAPE: shape2str(shape),\n _DTYPE: dtype2str(dtype),\n _FILLVALUE: repr(fillvalue),\n _CHUNK_GRID: shape2str(chunk_grid),\n _CHUNK_SIZE: shape2str(chunk_size)\n }\n\n self._client.put_object(\n Bucket=name, Key=_DATASET_ROOT,\n Body=bytes(_SIGNATURE), Metadata=metadata\n )\n\n dataset = S3Dataset(\n self, name, shape, dtype,\n fillvalue, chunk_grid, chunk_size\n )\n\n return dataset\n\n def get_dataset(self, name):\n\n if not self.has_dataset(name):\n raise DatasetNotFoundError('Dataset `%s` does not exist' % name)\n\n metadata = self._dataset_root['Metadata']\n if metadata is None:\n raise DatasetNotFoundError(\n 'Dataset `%s` does not have required DosNa metadata' % name\n )\n\n shape = str2shape(metadata[_SHAPE])\n dtype = metadata[_DTYPE]\n fillvalue = int(metadata[_FILLVALUE])\n chunk_grid = str2shape(metadata[_CHUNK_GRID])\n chunk_size = str2shape(metadata[_CHUNK_SIZE])\n dataset = S3Dataset(\n self, name, shape, dtype, fillvalue,\n chunk_grid, chunk_size\n )\n\n return dataset\n\n def get_dataset_root(self, name):\n\n name = bucketName(name)\n\n dataset_root = None\n try:\n dataset_root = self._client.get_object(\n Bucket=name, Key=_DATASET_ROOT\n )\n\n content = dataset_root['Body'].read()\n if not content == _SIGNATURE:\n dataset_root = None\n\n except Exception:\n pass # Don't need to report errors here\n\n return dataset_root\n\n def has_dataset(self, name):\n\n self._dataset_root = self.get_dataset_root(name)\n if self._dataset_root is None:\n log.info(\"has_dataset: dataset %s does not exist\", name)\n\n return self._dataset_root is not None\n\n def del_dataset(self, name):\n\n if self.has_dataset(name):\n\n name = bucketName(name)\n try:\n self._client.delete_object(Bucket=name, Key=_DATASET_ROOT)\n self._client.delete_bucket(Bucket=name)\n except ClientError as e:\n log.error('del_dataset: cannot delete %s: %s',\n name, e.response['Error'])\n else:\n raise DatasetNotFoundError(\n 'Dataset `{}` does not exist'.format(name))\n\n\nclass S3Dataset(BackendDataset):\n \"\"\"\n S3Dataset\n \"\"\"\n\n @property\n def client(self):\n return self.connection.client\n\n def _idx2name(self, idx):\n return '.'.join(map(str, idx))\n\n def create_chunk(self, idx, data=None, slices=None):\n if self.has_chunk(idx):\n raise Exception('DataChunk `{}{}` already exists'.\n format(self.name, idx))\n name = self._idx2name(idx)\n# print \"Name = %s\" % (name)\n dtype = self.dtype\n shape = self.chunk_size\n fillvalue = self.fillvalue\n datachunk = S3DataChunk(self, idx, name, shape, dtype, fillvalue)\n if data is None:\n data = np.full(shape, fillvalue, dtype)\n datachunk.set_data(data, slices, fill_others=True)\n return datachunk\n\n def get_chunk(self, idx):\n if self.has_chunk(idx):\n name = self._idx2name(idx)\n dtype = self.dtype\n shape = self.chunk_size\n fillvalue = self.fillvalue\n return S3DataChunk(self, idx, name, shape, dtype, fillvalue)\n return self.create_chunk(idx)\n\n def has_chunk(self, idx):\n\n has_chunk = False\n name = self._idx2name(idx)\n try:\n self.client.head_object(Bucket=bucketName(self._name), Key=name)\n has_chunk = True\n except ClientError as e:\n log.debug(\"ClientError: %s\", e.response['Error']['Code'])\n\n return has_chunk\n\n def del_chunk(self, idx):\n if self.has_chunk(idx):\n self.client.delete_object(\n Bucket=bucketName(self._name),\n Key=self._idx2name(idx)\n )\n\n\nclass S3DataChunk(BackendDataChunk):\n\n @property\n def client(self):\n return self.dataset.client\n\n def get_data(self, slices=None):\n if slices is None:\n slices = slice(None)\n data = np.fromstring(self.read(), dtype=self.dtype, count=self.size)\n data.shape = self.shape\n return data[slices]\n\n def set_data(self, values, slices=None, fill_others=False):\n if slices is None or slices2shape(slices) == self.shape:\n self.write_full(values.tobytes())\n else:\n if fill_others:\n cdata = np.full(self.shape, self.fillvalue, self.dtype)\n else:\n cdata = self.get_data()\n cdata[slices] = values\n self.write_full(cdata.tobytes())\n\n def write_full(self, data):\n\n self.client.put_object(\n Bucket=bucketName(self.dataset.name), Key=self.name, Body=data\n )\n\n def read(self, length=None, offset=0):\n if length is None:\n length = self.byte_count\n\n byteRange = 'bytes={}-{}'.format(offset, offset+length-1)\n return self.client.get_object(\n Bucket=bucketName(self.dataset.name),\n Key=self.name,\n Range=byteRange\n )['Body'].read()\n\n\n_backend = Backend('s3', S3Connection, S3Dataset, S3DataChunk)\n"
] |
[
[
"numpy.asarray",
"numpy.full"
]
] |
michaelleerilee/CommunityFirnModel
|
[
"312fc30b62b7e36a609660e5b10e3269eb090bae"
] |
[
"CFM_main/reader.py"
] |
[
"#!usr/bin/env python\n'''\nFunctions to read model inputs.\n'''\n\nimport os\nimport numpy as np\n# from string import join\nfrom constants import *\nimport h5py\n\ndef read_input(filename,StartDate=None):\n '''\n Read in data from csv input files\n\n :param filename: name of the file which holds the accumulation rate data\n\n :return input_data: vector of field of interest (e.g. temperature, accumulation rate from a specified csv file\n :return input_year: corresponding time vector (in years)\n '''\n\n spot = os.getcwd()\n\n FID = os.path.join(spot, filename)\n data = np.loadtxt(FID, delimiter=',') #changed 3/6/17 to loadtxt from genfromtxt; much faster\n xx,yy = np.shape(data)\n if xx>yy:\n input_year = data[:, 0]\n input_data = data[:, 1]\n else: \n input_year = data[0, :]\n input_data = data[1, :]\n\n input_year_full = input_year.copy()\n input_data_full = input_data.copy()\n\n if StartDate==None:\n pass\n else:\n StartInd = np.where(input_year>=StartDate)[0]\n input_year = input_year[StartInd]\n input_data = input_data[StartInd]\n\n return input_data, input_year, input_data_full, input_year_full\n\ndef read_init(folder, resultsFileName, varname):\n\n '''\n Read in data for initial depth, age, density, and temperature to run the model without spinup\n\n :param folder: the folder containing the files holding depth, age, density, and temperature\n\n '''\n f5 = h5py.File(os.path.join(folder, resultsFileName),'r')\n init_value = f5[varname][:]\n f5.close()\n\n return init_value\n\n"
] |
[
[
"numpy.shape",
"numpy.where",
"numpy.loadtxt"
]
] |
sandyhouse/FleetX
|
[
"b3d089cdb0f388c12ad95494ee98053d5bfa450b",
"b3d089cdb0f388c12ad95494ee98053d5bfa450b"
] |
[
"examples/resnet/train_fleet_static_amp.py",
"examples/resnet/train_fleet_lamb.py"
] |
[
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport argparse\nimport ast\nimport paddle\nfrom paddle.distributed import fleet\nimport resnet_static as resnet\nimport os\n\nbase_lr = 0.1\nmomentum_rate = 0.9\nl2_decay = 1e-4\n\nepoch = 10\nbatch_size = 32\nclass_dim = 102\n\ndef optimizer_setting(parameter_list=None):\n optimizer = paddle.optimizer.Momentum(\n learning_rate=base_lr,\n momentum=momentum_rate,\n weight_decay=paddle.regularizer.L2Decay(l2_decay),\n parameters=parameter_list)\n return optimizer\n\n\ndef get_train_loader(feed_list, place):\n def reader_decorator(reader):\n def __reader__():\n for item in reader():\n img = np.array(item[0]).astype('float32').reshape(3, 224, 224)\n label = np.array(item[1]).astype('int64').reshape(1)\n yield img, label\n\n return __reader__\n train_reader = paddle.batch(\n reader_decorator(paddle.dataset.flowers.train(use_xmap=True)),\n batch_size=batch_size,\n drop_last=True)\n train_loader = paddle.io.DataLoader.from_generator(\n capacity=32,\n use_double_buffer=True,\n feed_list=feed_list,\n iterable=True)\n train_loader.set_sample_list_generator(train_reader, place)\n return train_loader\n\ndef train_resnet():\n paddle.enable_static()\n paddle.vision.set_image_backend('cv2')\n\n image = paddle.static.data(name=\"x\", shape=[None, 3, 224, 224], dtype='float32')\n label= paddle.static.data(name=\"y\", shape=[None, 1], dtype='int64')\n\n model = resnet.ResNet(layers=50)\n out = model.net(input=image, class_dim=class_dim)\n avg_cost = paddle.nn.functional.cross_entropy(input=out, label=label)\n acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)\n acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)\n\n place = paddle.CUDAPlace(int(os.environ.get('FLAGS_selected_gpus', 0)))\n \n train_loader = get_train_loader([image, label], place)\n\n strategy = fleet.DistributedStrategy()\n\n strategy.amp = True\n strategy.amp_configs = {\n \"init_loss_scaling\": 32768,\n \"decr_every_n_nan_or_inf\": 2,\n \"incr_every_n_steps\": 1000,\n \"incr_ratio\": 2.0,\n \"use_dynamic_loss_scaling\": True,\n \"decr_ratio\": 0.5,\n \"custom_white_list\": [],\n \"custom_black_list\": [],\n }\n fleet.init(is_collective=True, strategy=strategy)\n optimizer = optimizer_setting()\n optimizer = fleet.distributed_optimizer(optimizer)\n optimizer.minimize(avg_cost)\n\n exe = paddle.static.Executor(place)\n exe.run(paddle.static.default_startup_program())\n\n epoch = 10\n step = 0\n for eop in range(epoch):\n for batch_id, data in enumerate(train_loader()):\n loss, acc1, acc5 = exe.run(paddle.static.default_main_program(), feed=data, fetch_list=[avg_cost.name, acc_top1.name, acc_top5.name]) \n if batch_id % 5 == 0:\n print(\"[Epoch %d, batch %d] loss: %.5f, acc1: %.5f, acc5: %.5f\" % (eop, batch_id, loss, acc1, acc5))\n\nif __name__ == '__main__':\n train_resnet()\n",
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nimport numpy as np\nimport argparse\nimport ast\nimport paddle\nfrom paddle.distributed import fleet\nimport resnet_static as resnet\nimport os\n\nbase_lr = 0.1\nmomentum_rate = 0.9\nl2_decay = 1e-4\n\nepoch = 10\nbatch_size = 32\nclass_dim = 102\n\ndef optimizer_setting(parameter_list=None):\n optimizer = paddle.optimizer.Momentum(\n learning_rate=base_lr,\n momentum=momentum_rate,\n weight_decay=paddle.regularizer.L2Decay(l2_decay),\n parameters=parameter_list)\n return optimizer\n\n\ndef get_train_loader(feed_list, place):\n def reader_decorator(reader):\n def __reader__():\n for item in reader():\n img = np.array(item[0]).astype('float32').reshape(3, 224, 224)\n label = np.array(item[1]).astype('int64').reshape(1)\n yield img, label\n\n return __reader__\n train_reader = paddle.batch(\n reader_decorator(paddle.dataset.flowers.train(use_xmap=True)),\n batch_size=batch_size,\n drop_last=True)\n train_loader = paddle.io.DataLoader.from_generator(\n capacity=32,\n use_double_buffer=True,\n feed_list=feed_list,\n iterable=True)\n train_loader.set_sample_list_generator(train_reader, place)\n return train_loader\n\ndef train_resnet():\n paddle.enable_static()\n paddle.vision.set_image_backend('cv2')\n\n image = paddle.static.data(name=\"x\", shape=[None, 3, 224, 224], dtype='float32')\n label= paddle.static.data(name=\"y\", shape=[None, 1], dtype='int64')\n\n model = resnet.ResNet(layers=50)\n out = model.net(input=image, class_dim=class_dim)\n avg_cost = paddle.nn.functional.cross_entropy(input=out, label=label)\n acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)\n acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)\n\n place = paddle.CUDAPlace(int(os.environ.get('FLAGS_selected_gpus', 0)))\n \n train_loader = get_train_loader([image, label], place)\n\n strategy = fleet.DistributedStrategy()\n # lamb\n strategy.lamb = True\n strategy.lamb_configs = {\n 'lamb_weight_decay': 0.01,\n 'exclude_from_weight_decay': ['layer_norm'],\n }\n\n fleet.init(is_collective=True, strategy=strategy)\n optimizer = paddle.optimizer.Adam(learning_rate=0.01, beta1=0.9, beta2=0.999)\n # optimizer = optimizer_setting()\n optimizer = fleet.distributed_optimizer(optimizer)\n optimizer.minimize(avg_cost)\n print(str(strategy))\n\n filename = \"./main_program.txt\"\n with open(filename + str(int(os.environ.get('FLAGS_selected_gpus', 0))), 'w') as f:\n f.write(str(paddle.fluid.default_main_program()))\n filename = \"./start_program.txt\"\n with open(filename + str(int(os.environ.get('FLAGS_selected_gpus', 0))), 'w') as f:\n f.write(str(paddle.fluid.default_startup_program()))\n\n exe = paddle.static.Executor(place)\n exe.run(paddle.static.default_startup_program())\n\n epoch = 10\n step = 0\n for eop in range(epoch):\n for batch_id, data in enumerate(train_loader()):\n loss, acc1, acc5 = exe.run(paddle.static.default_main_program(), feed=data, fetch_list=[avg_cost.name, acc_top1.name, acc_top5.name]) \n if batch_id % 5 == 0:\n print(\"[Epoch %d, batch %d] loss: %.5f, acc1: %.5f, acc5: %.5f\" % (eop, batch_id, loss, acc1, acc5))\n\nif __name__ == '__main__':\n train_resnet()\n"
] |
[
[
"numpy.array"
],
[
"numpy.array"
]
] |
bond005/elmo_ner
|
[
"c6135cfca5d7bf817a22c8c8631e7f81f6f05f94"
] |
[
"tests/test_bert_ner.py"
] |
[
"import copy\nimport gc\nimport os\nimport pickle\nimport re\nimport sys\nimport tempfile\nimport unittest\n\nimport numpy as np\nfrom sklearn.exceptions import NotFittedError\nfrom spacy_udpipe.language import UDPipeLanguage\n\ntry:\n from deep_ner.bert_ner import BERT_NER\n from deep_ner.utils import load_dataset_from_json, set_total_seed\n from deep_ner.quality import calculate_prediction_quality\n from deep_ner.udpipe_data import UNIVERSAL_DEPENDENCIES, UNIVERSAL_POS_TAGS\nexcept:\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n from deep_ner.bert_ner import BERT_NER\n from deep_ner.utils import load_dataset_from_json, set_total_seed\n from deep_ner.quality import calculate_prediction_quality\n from deep_ner.udpipe_data import UNIVERSAL_DEPENDENCIES, UNIVERSAL_POS_TAGS\n\n\nclass TestBertNer(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n set_total_seed(0)\n\n def tearDown(self):\n if hasattr(self, 'ner'):\n del self.ner\n if hasattr(self, 'another_ner'):\n del self.another_ner\n if hasattr(self, 'temp_file_name'):\n if os.path.isfile(self.temp_file_name):\n os.remove(self.temp_file_name)\n\n def test_creation(self):\n self.ner = BERT_NER(udpipe_lang='en')\n self.assertIsInstance(self.ner, BERT_NER)\n self.assertTrue(hasattr(self.ner, 'udpipe_lang'))\n self.assertTrue(hasattr(self.ner, 'use_shapes'))\n self.assertTrue(hasattr(self.ner, 'use_nlp_features'))\n self.assertTrue(hasattr(self.ner, 'batch_size'))\n self.assertTrue(hasattr(self.ner, 'lstm_units'))\n self.assertTrue(hasattr(self.ner, 'lr'))\n self.assertTrue(hasattr(self.ner, 'l2_reg'))\n self.assertTrue(hasattr(self.ner, 'clip_norm'))\n self.assertTrue(hasattr(self.ner, 'bert_hub_module_handle'))\n self.assertTrue(hasattr(self.ner, 'finetune_bert'))\n self.assertTrue(hasattr(self.ner, 'max_epochs'))\n self.assertTrue(hasattr(self.ner, 'patience'))\n self.assertTrue(hasattr(self.ner, 'random_seed'))\n self.assertTrue(hasattr(self.ner, 'gpu_memory_frac'))\n self.assertTrue(hasattr(self.ner, 'max_seq_length'))\n self.assertTrue(hasattr(self.ner, 'validation_fraction'))\n self.assertTrue(hasattr(self.ner, 'verbose'))\n self.assertIsInstance(self.ner.batch_size, int)\n self.assertIsInstance(self.ner.lstm_units, int)\n self.assertIsInstance(self.ner.lr, float)\n self.assertIsInstance(self.ner.l2_reg, float)\n self.assertIsInstance(self.ner.clip_norm, float)\n self.assertIsInstance(self.ner.bert_hub_module_handle, str)\n self.assertIsInstance(self.ner.udpipe_lang, str)\n self.assertIsInstance(self.ner.finetune_bert, bool)\n self.assertIsInstance(self.ner.max_epochs, int)\n self.assertIsInstance(self.ner.patience, int)\n self.assertIsNone(self.ner.random_seed)\n self.assertIsInstance(self.ner.gpu_memory_frac, float)\n self.assertIsInstance(self.ner.max_seq_length, int)\n self.assertIsInstance(self.ner.validation_fraction, float)\n self.assertIsInstance(self.ner.verbose, bool)\n self.assertIsInstance(self.ner.use_shapes, bool)\n self.assertIsInstance(self.ner.use_nlp_features, bool)\n\n def test_check_params_positive(self):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1', finetune_bert=True,\n batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0, validation_fraction=0.0,\n max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42, lstm_units=None,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n self.assertTrue(True)\n\n def test_check_params_negative001(self):\n true_err_msg = re.escape('`bert_hub_module_handle` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=False, udpipe_lang='en'\n )\n\n def test_check_params_negative002(self):\n true_err_msg = re.escape('`bert_hub_module_handle` is wrong! Expected `{0}`, got `{1}`.'.format(\n type('abc'), type(123)))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle=1, finetune_bert=True,\n batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0, validation_fraction=0.1,\n max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42, lstm_units=128,\n use_shapes=False, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative003(self):\n true_err_msg = re.escape('`batch_size` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0, validation_fraction=0.1,\n max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42, lstm_units=128,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative004(self):\n true_err_msg = re.escape('`batch_size` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size='32', max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative005(self):\n true_err_msg = re.escape('`batch_size` is wrong! Expected a positive integer value, but -3 is not positive.')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=-3, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative006(self):\n true_err_msg = re.escape('`max_epochs` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42, lstm_units=128,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative007(self):\n true_err_msg = re.escape('`max_epochs` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs='10', patience=3, gpu_memory_frac=1.0, verbose=False,\n random_seed=42, lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative008(self):\n true_err_msg = re.escape('`max_epochs` is wrong! Expected a positive integer value, but -3 is not positive.')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=-3, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative009(self):\n true_err_msg = re.escape('`patience` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative010(self):\n true_err_msg = re.escape('`patience` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience='3', gpu_memory_frac=1.0, verbose=False,\n random_seed=42, lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative011(self):\n true_err_msg = re.escape('`patience` is wrong! Expected a positive integer value, but -3 is not positive.')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=-3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative012(self):\n true_err_msg = re.escape('`max_seq_length` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, lr=1e-3, l2_reg=1e-4, clip_norm=5.0, validation_fraction=0.1,\n max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42, lstm_units=128,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative013(self):\n true_err_msg = re.escape('`max_seq_length` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length='512', lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative014(self):\n true_err_msg = re.escape('`max_seq_length` is wrong! Expected a positive integer value, but -3 is not '\n 'positive.')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=-3, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative015(self):\n true_err_msg = re.escape('`validation_fraction` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42, lstm_units=128,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative016(self):\n true_err_msg = re.escape('`validation_fraction` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3.5), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction='0.1', max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,\n random_seed=42, lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative017(self):\n true_err_msg = '`validation_fraction` is wrong! Expected a positive floating-point value greater than or ' \\\n 'equal to 0.0, but {0} is not positive.'.format(-0.1)\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=-0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative018(self):\n true_err_msg = '`validation_fraction` is wrong! Expected a positive floating-point value less than 1.0, but ' \\\n '{0} is not less than 1.0.'.format(1.1)\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=1.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative019(self):\n true_err_msg = re.escape('`gpu_memory_frac` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, verbose=False, random_seed=42, lstm_units=128,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative020(self):\n true_err_msg = re.escape('`gpu_memory_frac` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3.5), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac='1.0', verbose=False,\n random_seed=42, lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative021(self):\n true_err_msg = re.escape('`gpu_memory_frac` is wrong! Expected a floating-point value in the (0.0, 1.0], '\n 'but {0} is not proper.'.format(-1.0))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=-1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative022(self):\n true_err_msg = re.escape('`gpu_memory_frac` is wrong! Expected a floating-point value in the (0.0, 1.0], '\n 'but {0} is not proper.'.format(1.3))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.3, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative023(self):\n true_err_msg = re.escape('`lr` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative024(self):\n true_err_msg = re.escape('`lr` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3.5), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr='1e-3', l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative025(self):\n true_err_msg = re.escape('`lr` is wrong! Expected a positive floating-point value, but {0} is not '\n 'positive.'.format(0.0))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=0.0, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative026(self):\n true_err_msg = re.escape('`lr` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative027(self):\n true_err_msg = re.escape('`lr` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3.5), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr='1e-3', l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative028(self):\n true_err_msg = re.escape('`lr` is wrong! Expected a positive floating-point value, but {0} is not '\n 'positive.'.format(0.0))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=0.0, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative029(self):\n true_err_msg = re.escape('`l2_reg` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, clip_norm=5.0, validation_fraction=0.1,\n max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42, lstm_units=128,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative030(self):\n true_err_msg = re.escape('`l2_reg` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3.5), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg='1e-4', clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative031(self):\n true_err_msg = re.escape('`l2_reg` is wrong! Expected a non-negative floating-point value, but {0} is '\n 'negative.'.format(-2.0))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=-2.0, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative032(self):\n true_err_msg = re.escape('`finetune_bert` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, clip_norm=5.0,\n max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42, lstm_units=128,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative033(self):\n true_err_msg = re.escape('`finetune_bert` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(True), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert='True', batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative034(self):\n true_err_msg = re.escape('`verbose` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, random_seed=42, lstm_units=128,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative035(self):\n true_err_msg = re.escape('`verbose` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(True), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose='False',\n random_seed=42, lstm_units=128, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative036(self):\n true_err_msg = re.escape('`lstm_units` is not specified!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative037(self):\n true_err_msg = re.escape('`lstm_units` is wrong! Expected `{0}`, got `{1}`.'.format(\n type(3), type('3')))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n lstm_units='128', finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4,\n clip_norm=5.0, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,\n random_seed=42, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_params_negative038(self):\n true_err_msg = re.escape('`lstm_units` is wrong! Expected a positive integer value, but -3 is not positive.')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_params(\n bert_hub_module_handle='https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1',\n finetune_bert=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, clip_norm=5.0,\n validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42,\n lstm_units=-3, use_shapes=True, use_nlp_features=True, udpipe_lang='en'\n )\n\n def test_check_X_positive(self):\n X = ['abc', 'defgh', '4wdffg']\n BERT_NER.check_X(X, 'X_train')\n self.assertTrue(True)\n\n def test_check_X_negative01(self):\n X = {'abc', 'defgh', '4wdffg'}\n true_err_msg = re.escape('`X_train` is wrong, because it is not list-like object!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_X(X, 'X_train')\n\n def test_check_X_negative02(self):\n X = np.random.uniform(-1.0, 1.0, (10, 2))\n true_err_msg = re.escape('`X_train` is wrong, because it is not 1-D list!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_X(X, 'X_train')\n\n def test_check_X_negative03(self):\n X = ['abc', 23, '4wdffg']\n true_err_msg = re.escape('Item 1 of `X_train` is wrong, because it is not string-like object!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_X(X, 'X_train')\n\n def text_check_Xy_positive(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'LOC': [(24, 34), (161, 178)]\n }\n ]\n true_classes_list = ('LOC', 'ORG', 'PER')\n self.assertEqual(true_classes_list, BERT_NER.check_Xy(X, 'X_train', y, 'y_train'))\n\n def text_check_Xy_negative01(self):\n X = {\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n }\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'LOC': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('`X_train` is wrong, because it is not list-like object!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative02(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = {\n '1': {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n '2': {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'LOC': [(24, 34), (161, 178)]\n }\n }\n true_err_msg = re.escape('`y_train` is wrong, because it is not a list-like object!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative03(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = np.random.uniform(-1.0, 1.0, (10, 2))\n true_err_msg = re.escape('`y_train` is wrong, because it is not 1-D list!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative04(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'LOC': [(24, 34), (161, 178)]\n },\n {\n 'LOC': [(17, 24), (117, 130)]\n }\n ]\n true_err_msg = re.escape('Length of `X_train` does not correspond to length of `y_train`! 2 != 3')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative05(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n 4\n ]\n true_err_msg = re.escape('Item 1 of `y_train` is wrong, because it is not a dictionary-like object!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative06(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 1: [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'LOC': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 0 of `y_train` is wrong, because its key `1` is not a string-like object!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative07(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'O': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 1 of `y_train` is wrong, because its key `O` incorrectly specifies a named '\n 'entity!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative08(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n '123': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 1 of `y_train` is wrong, because its key `123` incorrectly specifies a named '\n 'entity!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative09(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'loc': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 1 of `y_train` is wrong, because its key `loc` incorrectly specifies a named '\n 'entity!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative10(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': {1, 2}\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'LOC': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 0 of `y_train` is wrong, because its value `{0}` is not a list-like '\n 'object!'.format(y[0]['PER']))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative11(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), 63],\n 'LOC': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 1 of `y_train` is wrong, because named entity bounds `63` are not specified as '\n 'list-like object!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative12(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77, 81)],\n 'LOC': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 1 of `y_train` is wrong, because named entity bounds `{0}` are not specified as '\n '2-D list!'.format((63, 77, 81)))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative13(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (219, 196)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'LOC': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 0 of `y_train` is wrong, because named entity bounds `{0}` are '\n 'incorrect!'.format((219, 196)))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative14(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(122, 137), (196, 519)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'LOC': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 0 of `y_train` is wrong, because named entity bounds `{0}` are '\n 'incorrect!'.format((196, 519)))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def text_check_Xy_negative15(self):\n X = [\n 'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '\n 'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '\n 'Налбандовым.',\n 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '\n 'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '\n 'главное место среди которых занимает состояние мировой экономики и безопасность.'\n ]\n y = [\n {\n 'ORG': [(26, 37)],\n 'PER': [(-1, 137), (196, 219)]\n },\n {\n 'ORG': [(126, 135)],\n 'PER': [(0, 11), (63, 77)],\n 'LOC': [(24, 34), (161, 178)]\n }\n ]\n true_err_msg = re.escape('Item 0 of `y_train` is wrong, because named entity bounds `{0}` are '\n 'incorrect!'.format((-1, 137)))\n with self.assertRaisesRegex(ValueError, true_err_msg):\n BERT_NER.check_Xy(X, 'X_train', y, 'y_train')\n\n def test_detect_token_labels_positive01(self):\n # source_text = 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози.'\n tokenized_text = ['Ба', '##рак', 'Об', '##ама', 'принимает', 'в', 'Б', '##елом', 'доме', 'своего',\n 'французского', 'кол', '##ле', '##гу', 'Н', '##ико', '##ля', 'Са', '##рко', '##зи', '.']\n token_bounds = [(0, 2), (2, 5), (6, 8), (8, 11), (12, 21), (22, 23), (24, 25), (25, 29), (30, 34), (35, 41),\n (42, 54), (55, 58), (58, 60), (60, 62), (63, 64), (64, 67), (67, 69), (70, 72), (72, 75),\n (75, 77), (77, 78)]\n indices_of_named_entities = np.array(\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 0],\n dtype=np.int32\n )\n label_IDs = {1: 1, 2: 2, 3: 1}\n y_true = np.array(\n [0, 2, 1, 1, 1, 0, 0, 4, 3, 3, 0, 0, 0, 0, 0, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n dtype=np.int32\n )\n y_pred = BERT_NER.detect_token_labels(tokenized_text, token_bounds, indices_of_named_entities, label_IDs, 32)\n self.assertIsInstance(y_pred, np.ndarray)\n self.assertEqual(y_true.shape, y_pred.shape)\n self.assertEqual(y_true.tolist(), y_pred.tolist())\n\n def test_detect_token_labels_positive02(self):\n # source_text = 'С 1876 г Павлов ассистирует профессору К. Н. Устимовичу в Медико-хирургической академии и ' \\\n # 'параллельно изучает физиологию кровообращения.'\n tokenized_text = ['С', '1876', 'г', 'Павло', '##в', 'а', '##сси', '##сти', '##рует', 'профессор', '##у', 'К',\n '.', 'Н', '.', 'У', '##сти', '##мов', '##ич', '##у', 'в', 'М', '##еди', '##ко', '-',\n 'х', '##ир', '##ург', '##ической', 'академии', 'и', 'пара', '##лл', '##ельно',\n 'из', '##уч', '##ает', 'ф', '##из', '##ио', '##логи', '##ю',\n 'к', '##рово', '##об', '##ращения', '.']\n token_bounds = [(0, 1), (2, 6), (7, 8), (9, 14), (14, 15), (16, 17), (17, 20), (20, 23), (23, 27), (28, 37),\n (37, 38), (39, 40), (40, 41), (42, 43), (43, 44), (45, 46), (46, 49), (49, 52), (52, 54),\n (54, 55), (56, 57), (58, 59), (59, 62), (62, 64), (64, 65), (65, 66), (66, 68), (68, 71),\n (71, 78), (79, 87), (88, 89), (90, 94), (94, 96), (96, 101), (102, 104), (104, 106), (106, 109),\n (110, 111), (111, 113), (113, 115), (115, 119), (119, 120), (121, 122), (122, 126), (126, 128),\n (128, 135), (135, 136)]\n indices_of_named_entities = np.array(\n [0, 0, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n dtype=np.int32\n )\n label_IDs = {1: 1, 2: 2, 3: 3, 4: 2, 5: 4}\n y_true = np.array(\n [0, 0, 2, 1, 4, 3, 0, 0, 0, 0, 6, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 0, 8, 7, 7, 7, 7, 7, 7, 7, 7, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n dtype=np.int32\n )\n y_pred = BERT_NER.detect_token_labels(tokenized_text, token_bounds, indices_of_named_entities, label_IDs, 64)\n self.assertIsInstance(y_pred, np.ndarray)\n self.assertEqual(y_true.shape, y_pred.shape)\n self.assertEqual(y_true.tolist(), y_pred.tolist())\n\n def test_detect_token_labels_positive03(self):\n # source_text = 'Весной 1890 года Варшавский и Томский университеты избирают его профессором.'\n tokenized_text = ['В', '##есной', '1890', 'года', 'В', '##ар', '##ша', '##вский', 'и', 'Томск', '##ий',\n 'университет', '##ы', 'из', '##бира', '##ют', 'его', 'профессором', '.']\n token_bounds = [(0, 1), (1, 6), (7, 11), (12, 16), (17, 18), (18, 20), (20, 22), (22, 27), (28, 29), (30, 35),\n (35, 37), (38, 49), (49, 50), (51, 52), (53, 57), (57, 59), (60, 63), (64, 75), (75, 76)]\n indices_of_named_entities = np.array(\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0],\n dtype=np.int32\n )\n label_IDs = {1: 1, 2: 2, 3: 2}\n y_true = np.array(\n [0, 2, 1, 1, 1, 4, 3, 3, 3, 3, 4, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n dtype=np.int32\n )\n y_pred = BERT_NER.detect_token_labels(tokenized_text, token_bounds, indices_of_named_entities, label_IDs, 32)\n self.assertIsInstance(y_pred, np.ndarray)\n self.assertEqual(y_true.shape, y_pred.shape)\n self.assertEqual(y_true.tolist(), y_pred.tolist())\n\n def test_calculate_indices_of_named_entities(self):\n source_text = 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози.'\n classes_list = ('LOCATION', 'ORG', 'PERSON')\n named_entities = {'PERSON': [(0, 11), (63, 77)], 'LOCATION': [(24, 34)]}\n true_indices = np.array(\n [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 0],\n dtype=np.int32\n )\n true_labels_to_classes = {1: 1, 2: 3, 3: 3}\n indices, labels_to_classes = BERT_NER.calculate_indices_of_named_entities(source_text, classes_list,\n named_entities)\n self.assertIsInstance(indices, np.ndarray)\n self.assertIsInstance(labels_to_classes, dict)\n self.assertEqual(true_indices.shape, indices.shape)\n self.assertEqual(true_indices.tolist(), indices.tolist())\n self.assertEqual(set(true_labels_to_classes.keys()), set(labels_to_classes.keys()))\n for label_ID in true_labels_to_classes:\n self.assertEqual(true_labels_to_classes[label_ID], labels_to_classes[label_ID])\n\n def test_fit_positive01(self):\n base_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n self.ner = BERT_NER(finetune_bert=False, max_epochs=3, batch_size=4, max_seq_length=128, gpu_memory_frac=0.9,\n validation_fraction=0.3, random_seed=None, lstm_units=32, udpipe_lang='ru')\n X_train, y_train = load_dataset_from_json(os.path.join(base_dir, 'true_named_entities.json'))\n res = self.ner.fit(X_train, y_train)\n self.assertIsInstance(res, BERT_NER)\n self.assertTrue(hasattr(res, 'udpipe_lang'))\n self.assertTrue(hasattr(res, 'batch_size'))\n self.assertTrue(hasattr(res, 'lstm_units'))\n self.assertTrue(hasattr(res, 'lr'))\n self.assertTrue(hasattr(res, 'l2_reg'))\n self.assertTrue(hasattr(res, 'clip_norm'))\n self.assertTrue(hasattr(res, 'bert_hub_module_handle'))\n self.assertTrue(hasattr(res, 'finetune_bert'))\n self.assertTrue(hasattr(res, 'max_epochs'))\n self.assertTrue(hasattr(res, 'patience'))\n self.assertTrue(hasattr(res, 'random_seed'))\n self.assertTrue(hasattr(res, 'gpu_memory_frac'))\n self.assertTrue(hasattr(res, 'max_seq_length'))\n self.assertTrue(hasattr(res, 'validation_fraction'))\n self.assertTrue(hasattr(res, 'verbose'))\n self.assertTrue(hasattr(res, 'use_shapes'))\n self.assertTrue(hasattr(res, 'use_nlp_features'))\n self.assertIsInstance(res.udpipe_lang, str)\n self.assertIsInstance(res.batch_size, int)\n self.assertIsInstance(res.lstm_units, int)\n self.assertIsInstance(res.lr, float)\n self.assertIsInstance(res.l2_reg, float)\n self.assertIsInstance(res.clip_norm, float)\n self.assertIsInstance(res.bert_hub_module_handle, str)\n self.assertIsInstance(res.finetune_bert, bool)\n self.assertIsInstance(res.max_epochs, int)\n self.assertIsInstance(res.patience, int)\n self.assertIsInstance(res.random_seed, int)\n self.assertIsInstance(res.gpu_memory_frac, float)\n self.assertIsInstance(res.max_seq_length, int)\n self.assertIsInstance(res.validation_fraction, float)\n self.assertIsInstance(res.verbose, bool)\n self.assertIsInstance(res.use_shapes, bool)\n self.assertIsInstance(res.use_nlp_features, bool)\n self.assertTrue(hasattr(res, 'classes_list_'))\n self.assertTrue(hasattr(res, 'shapes_list_'))\n self.assertTrue(hasattr(res, 'tokenizer_'))\n self.assertTrue(hasattr(res, 'sess_'))\n self.assertTrue(hasattr(res, 'universal_pos_tags_dict_'))\n self.assertTrue(hasattr(res, 'universal_dependencies_dict_'))\n self.assertTrue(hasattr(res, 'nlp_'))\n self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))\n self.assertIsInstance(res.shapes_list_, tuple)\n self.assertGreater(len(res.shapes_list_), 3)\n self.assertEqual(res.shapes_list_[-3:], ('[CLS]', '[SEP]', '[UNK]'))\n self.assertIsInstance(res.universal_pos_tags_dict_, dict)\n self.assertIsInstance(res.universal_dependencies_dict_, dict)\n self.assertIsInstance(res.nlp_, UDPipeLanguage)\n self.assertEqual(len(res.universal_pos_tags_dict_), len(UNIVERSAL_POS_TAGS))\n self.assertEqual(len(res.universal_dependencies_dict_), len(UNIVERSAL_DEPENDENCIES))\n\n def test_fit_positive02(self):\n base_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n self.ner = BERT_NER(finetune_bert=True, max_epochs=3, batch_size=2, max_seq_length=128, gpu_memory_frac=0.9,\n validation_fraction=0.3, random_seed=42, lstm_units=32, udpipe_lang='ru')\n X_train, y_train = load_dataset_from_json(os.path.join(base_dir, 'true_named_entities.json'))\n res = self.ner.fit(X_train, y_train)\n self.assertIsInstance(res, BERT_NER)\n self.assertTrue(hasattr(res, 'udpipe_lang'))\n self.assertTrue(hasattr(res, 'batch_size'))\n self.assertTrue(hasattr(res, 'lstm_units'))\n self.assertTrue(hasattr(res, 'lr'))\n self.assertTrue(hasattr(res, 'l2_reg'))\n self.assertTrue(hasattr(res, 'clip_norm'))\n self.assertTrue(hasattr(res, 'bert_hub_module_handle'))\n self.assertTrue(hasattr(res, 'finetune_bert'))\n self.assertTrue(hasattr(res, 'max_epochs'))\n self.assertTrue(hasattr(res, 'patience'))\n self.assertTrue(hasattr(res, 'random_seed'))\n self.assertTrue(hasattr(res, 'gpu_memory_frac'))\n self.assertTrue(hasattr(res, 'max_seq_length'))\n self.assertTrue(hasattr(res, 'validation_fraction'))\n self.assertTrue(hasattr(res, 'verbose'))\n self.assertTrue(hasattr(res, 'use_shapes'))\n self.assertTrue(hasattr(res, 'use_nlp_features'))\n self.assertIsInstance(res.udpipe_lang, str)\n self.assertIsInstance(res.batch_size, int)\n self.assertIsInstance(res.lstm_units, int)\n self.assertIsInstance(res.lr, float)\n self.assertIsInstance(res.l2_reg, float)\n self.assertIsInstance(res.clip_norm, float)\n self.assertIsInstance(res.bert_hub_module_handle, str)\n self.assertIsInstance(res.finetune_bert, bool)\n self.assertIsInstance(res.max_epochs, int)\n self.assertIsInstance(res.patience, int)\n self.assertIsInstance(res.random_seed, int)\n self.assertIsInstance(res.gpu_memory_frac, float)\n self.assertIsInstance(res.max_seq_length, int)\n self.assertIsInstance(res.validation_fraction, float)\n self.assertIsInstance(res.verbose, bool)\n self.assertIsInstance(res.use_shapes, bool)\n self.assertIsInstance(res.use_nlp_features, bool)\n self.assertEqual(res.random_seed, 42)\n self.assertTrue(hasattr(res, 'classes_list_'))\n self.assertTrue(hasattr(res, 'shapes_list_'))\n self.assertTrue(hasattr(res, 'tokenizer_'))\n self.assertTrue(hasattr(res, 'sess_'))\n self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))\n self.assertIsInstance(res.shapes_list_, tuple)\n self.assertGreater(len(res.shapes_list_), 3)\n self.assertEqual(res.shapes_list_[-3:], ('[CLS]', '[SEP]', '[UNK]'))\n\n def test_fit_positive03(self):\n base_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n self.ner = BERT_NER(finetune_bert=False, max_epochs=3, batch_size=4, max_seq_length=128, gpu_memory_frac=0.9,\n validation_fraction=0.3, random_seed=None, lstm_units=None, clip_norm=None,\n udpipe_lang='ru', use_shapes=False, use_nlp_features=True)\n X_train, y_train = load_dataset_from_json(os.path.join(base_dir, 'true_named_entities.json'))\n res = self.ner.fit(X_train, y_train)\n self.assertIsInstance(res, BERT_NER)\n self.assertTrue(hasattr(res, 'udpipe_lang'))\n self.assertTrue(hasattr(res, 'batch_size'))\n self.assertTrue(hasattr(res, 'lstm_units'))\n self.assertTrue(hasattr(res, 'lr'))\n self.assertTrue(hasattr(res, 'l2_reg'))\n self.assertTrue(hasattr(res, 'clip_norm'))\n self.assertTrue(hasattr(res, 'bert_hub_module_handle'))\n self.assertTrue(hasattr(res, 'finetune_bert'))\n self.assertTrue(hasattr(res, 'max_epochs'))\n self.assertTrue(hasattr(res, 'patience'))\n self.assertTrue(hasattr(res, 'random_seed'))\n self.assertTrue(hasattr(res, 'gpu_memory_frac'))\n self.assertTrue(hasattr(res, 'max_seq_length'))\n self.assertTrue(hasattr(res, 'validation_fraction'))\n self.assertTrue(hasattr(res, 'verbose'))\n self.assertTrue(hasattr(res, 'use_shapes'))\n self.assertTrue(hasattr(res, 'use_nlp_features'))\n self.assertIsInstance(res.udpipe_lang, str)\n self.assertIsInstance(res.batch_size, int)\n self.assertIsNone(res.lstm_units)\n self.assertIsInstance(res.lr, float)\n self.assertIsInstance(res.l2_reg, float)\n self.assertIsNone(res.clip_norm, None)\n self.assertIsInstance(res.bert_hub_module_handle, str)\n self.assertIsInstance(res.finetune_bert, bool)\n self.assertIsInstance(res.max_epochs, int)\n self.assertIsInstance(res.patience, int)\n self.assertIsInstance(res.random_seed, int)\n self.assertIsInstance(res.gpu_memory_frac, float)\n self.assertIsInstance(res.max_seq_length, int)\n self.assertIsInstance(res.validation_fraction, float)\n self.assertIsInstance(res.verbose, bool)\n self.assertIsInstance(res.use_shapes, bool)\n self.assertIsInstance(res.use_nlp_features, bool)\n self.assertTrue(hasattr(res, 'classes_list_'))\n self.assertTrue(hasattr(res, 'shapes_list_'))\n self.assertTrue(hasattr(res, 'tokenizer_'))\n self.assertTrue(hasattr(res, 'sess_'))\n self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))\n self.assertIsInstance(res.shapes_list_, tuple)\n self.assertGreater(len(res.shapes_list_), 3)\n self.assertEqual(res.shapes_list_[-3:], ('[CLS]', '[SEP]', '[UNK]'))\n\n def test_fit_predict(self):\n base_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n self.ner = BERT_NER(finetune_bert=False, max_epochs=5, batch_size=4, max_seq_length=128, gpu_memory_frac=0.9,\n validation_fraction=0.3, random_seed=42, udpipe_lang='ru', use_shapes=True,\n use_nlp_features=False)\n X_train, y_train = load_dataset_from_json(os.path.join(base_dir, 'true_named_entities.json'))\n res = self.ner.fit(X_train, y_train)\n self.assertIsInstance(res, BERT_NER)\n self.assertTrue(hasattr(res, 'udpipe_lang'))\n self.assertTrue(hasattr(res, 'batch_size'))\n self.assertTrue(hasattr(res, 'lstm_units'))\n self.assertTrue(hasattr(res, 'lr'))\n self.assertTrue(hasattr(res, 'l2_reg'))\n self.assertTrue(hasattr(res, 'clip_norm'))\n self.assertTrue(hasattr(res, 'bert_hub_module_handle'))\n self.assertTrue(hasattr(res, 'finetune_bert'))\n self.assertTrue(hasattr(res, 'max_epochs'))\n self.assertTrue(hasattr(res, 'patience'))\n self.assertTrue(hasattr(res, 'random_seed'))\n self.assertTrue(hasattr(res, 'gpu_memory_frac'))\n self.assertTrue(hasattr(res, 'max_seq_length'))\n self.assertTrue(hasattr(res, 'validation_fraction'))\n self.assertTrue(hasattr(res, 'verbose'))\n self.assertTrue(hasattr(res, 'use_shapes'))\n self.assertTrue(hasattr(res, 'use_nlp_features'))\n self.assertIsInstance(res.udpipe_lang, str)\n self.assertIsInstance(res.batch_size, int)\n self.assertIsInstance(res.lstm_units, int)\n self.assertIsInstance(res.lr, float)\n self.assertIsInstance(res.l2_reg, float)\n self.assertIsInstance(res.clip_norm, float)\n self.assertIsInstance(res.bert_hub_module_handle, str)\n self.assertIsInstance(res.finetune_bert, bool)\n self.assertIsInstance(res.max_epochs, int)\n self.assertIsInstance(res.patience, int)\n self.assertIsInstance(res.random_seed, int)\n self.assertIsInstance(res.gpu_memory_frac, float)\n self.assertIsInstance(res.max_seq_length, int)\n self.assertIsInstance(res.validation_fraction, float)\n self.assertIsInstance(res.verbose, bool)\n self.assertIsInstance(res.use_shapes, bool)\n self.assertIsInstance(res.use_nlp_features, bool)\n self.assertTrue(hasattr(res, 'classes_list_'))\n self.assertTrue(hasattr(res, 'shapes_list_'))\n self.assertTrue(hasattr(res, 'tokenizer_'))\n self.assertTrue(hasattr(res, 'sess_'))\n self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))\n self.assertIsInstance(res.shapes_list_, tuple)\n self.assertGreater(len(res.shapes_list_), 3)\n self.assertEqual(res.shapes_list_[-3:], ('[CLS]', '[SEP]', '[UNK]'))\n y_pred = res.predict(X_train)\n self.assertIsInstance(y_pred, list)\n self.assertEqual(len(X_train), len(y_pred))\n for sample_idx in range(len(y_pred)):\n self.assertIsInstance(y_pred[sample_idx], dict)\n f1, precision, recall, _ = calculate_prediction_quality(y_train, y_pred, res.classes_list_)\n self.assertGreater(f1, 0.0)\n self.assertGreater(precision, 0.0)\n self.assertGreater(recall, 0.0)\n\n def test_predict_negative(self):\n base_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n self.ner = BERT_NER(finetune_bert=False, max_epochs=3, batch_size=4, random_seed=None, udpipe_lang='ru')\n X_train, y_train = load_dataset_from_json(os.path.join(base_dir, 'true_named_entities.json'))\n with self.assertRaises(NotFittedError):\n _ = self.ner.predict(X_train)\n\n def test_tokenize_all_01(self):\n base_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n self.ner = BERT_NER(finetune_bert=False, max_epochs=1, batch_size=4, max_seq_length=128, gpu_memory_frac=0.9,\n validation_fraction=0.3, random_seed=None, udpipe_lang='ru', use_nlp_features=True,\n use_shapes=True)\n X_train, y_train = load_dataset_from_json(os.path.join(base_dir, 'true_named_entities.json'))\n self.ner.fit(X_train, y_train)\n res = self.ner.tokenize_all(X_train, y_train, shapes_vocabulary=self.ner.shapes_list_)\n self.assertIsInstance(res, tuple)\n self.assertEqual(len(res), 4)\n X_train_tokenized, y_train_tokenized, shapes_list, bounds_of_tokens_for_training = res\n self.assertIsInstance(X_train_tokenized, list)\n self.assertIsInstance(y_train_tokenized, np.ndarray)\n self.assertIs(self.ner.shapes_list_, shapes_list)\n self.assertIsInstance(bounds_of_tokens_for_training, np.ndarray)\n self.assertEqual(len(X_train_tokenized), 4)\n self.assertEqual(y_train_tokenized.shape, (len(y_train), self.ner.max_seq_length))\n for data_idx in range(3):\n self.assertIsInstance(X_train_tokenized[data_idx], np.ndarray)\n self.assertEqual(X_train_tokenized[data_idx].shape, (len(X_train), self.ner.max_seq_length))\n data_idx = 3\n self.assertIsInstance(X_train_tokenized[data_idx], np.ndarray)\n self.assertEqual(len(X_train_tokenized[data_idx].shape), 3)\n self.assertEqual(X_train_tokenized[data_idx].shape[0], len(X_train))\n self.assertEqual(X_train_tokenized[data_idx].shape[1], self.ner.max_seq_length)\n self.assertGreater(X_train_tokenized[data_idx].shape[2],\n 4 + len(UNIVERSAL_POS_TAGS) + len(UNIVERSAL_DEPENDENCIES))\n for sample_idx in range(X_train_tokenized[data_idx].shape[0]):\n n = 0\n for token_idx in range(X_train_tokenized[data_idx].shape[1]):\n if X_train_tokenized[data_idx][sample_idx][token_idx].sum() < 1e-3:\n break\n n += 1\n self.assertGreater(n, 0, msg='Sample {0}: additional features are not defined!'.format(sample_idx))\n for token_idx in range(n):\n self.assertAlmostEqual(X_train_tokenized[data_idx][sample_idx][token_idx][0:4].sum(), 1.0,\n msg='Sample {0}, token {1}: additional features are wrong!'.format(\n sample_idx, token_idx))\n for token_idx in range(1, n - 1):\n start_pos = 4\n end_pos = 4 + len(UNIVERSAL_POS_TAGS)\n self.assertAlmostEqual(X_train_tokenized[data_idx][sample_idx][token_idx][start_pos:end_pos].sum(), 1.0,\n msg='Sample {0}, token {1}: part of speech is not defined!'.format(\n sample_idx, token_idx))\n start_pos = 4 + len(UNIVERSAL_POS_TAGS)\n end_pos = 4 + len(UNIVERSAL_POS_TAGS) + len(UNIVERSAL_DEPENDENCIES)\n self.assertGreaterEqual(X_train_tokenized[data_idx][sample_idx][token_idx][start_pos:end_pos].sum(),\n 1.0,\n msg='Sample {0}, token {1}: dependency tag is not defined!'.format(\n sample_idx, token_idx))\n\n def test_tokenize_all_02(self):\n base_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n self.ner = BERT_NER(finetune_bert=False, max_epochs=1, batch_size=4, max_seq_length=128, gpu_memory_frac=0.9,\n validation_fraction=0.3, random_seed=None, udpipe_lang='ru', use_shapes=False,\n use_nlp_features=False)\n X_train, y_train = load_dataset_from_json(os.path.join(base_dir, 'true_named_entities.json'))\n self.ner.fit(X_train, y_train)\n res = self.ner.tokenize_all(X_train, y_train, shapes_vocabulary=self.ner.shapes_list_)\n self.assertIsInstance(res, tuple)\n self.assertEqual(len(res), 4)\n X_train_tokenized, y_train_tokenized, shapes_list, bounds_of_tokens_for_training = res\n self.assertIsInstance(X_train_tokenized, list)\n self.assertIsInstance(y_train_tokenized, np.ndarray)\n self.assertIs(self.ner.shapes_list_, shapes_list)\n self.assertIsInstance(bounds_of_tokens_for_training, np.ndarray)\n self.assertEqual(len(X_train_tokenized), 3)\n self.assertEqual(y_train_tokenized.shape, (len(y_train), self.ner.max_seq_length))\n for data_idx in range(3):\n self.assertIsInstance(X_train_tokenized[data_idx], np.ndarray)\n self.assertEqual(X_train_tokenized[data_idx].shape, (len(X_train), self.ner.max_seq_length))\n\n def test_serialize_positive01(self):\n base_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n self.ner = BERT_NER(finetune_bert=False, max_epochs=5, batch_size=4, max_seq_length=128, gpu_memory_frac=0.9,\n validation_fraction=0.3, random_seed=42, udpipe_lang='ru')\n X_train, y_train = load_dataset_from_json(os.path.join(base_dir, 'true_named_entities.json'))\n res = self.ner.fit(X_train, y_train)\n self.assertIsInstance(res, BERT_NER)\n self.assertTrue(hasattr(res, 'udpipe_lang'))\n self.assertTrue(hasattr(res, 'batch_size'))\n self.assertTrue(hasattr(res, 'lstm_units'))\n self.assertTrue(hasattr(res, 'lr'))\n self.assertTrue(hasattr(res, 'l2_reg'))\n self.assertTrue(hasattr(res, 'clip_norm'))\n self.assertTrue(hasattr(res, 'bert_hub_module_handle'))\n self.assertTrue(hasattr(res, 'finetune_bert'))\n self.assertTrue(hasattr(res, 'max_epochs'))\n self.assertTrue(hasattr(res, 'patience'))\n self.assertTrue(hasattr(res, 'random_seed'))\n self.assertTrue(hasattr(res, 'gpu_memory_frac'))\n self.assertTrue(hasattr(res, 'max_seq_length'))\n self.assertTrue(hasattr(res, 'validation_fraction'))\n self.assertTrue(hasattr(res, 'verbose'))\n self.assertTrue(hasattr(res, 'use_shapes'))\n self.assertTrue(hasattr(res, 'use_nlp_features'))\n self.assertIsInstance(res.udpipe_lang, str)\n self.assertIsInstance(res.batch_size, int)\n self.assertIsInstance(res.lstm_units, int)\n self.assertIsInstance(res.lr, float)\n self.assertIsInstance(res.l2_reg, float)\n self.assertIsInstance(res.clip_norm, float)\n self.assertIsInstance(res.bert_hub_module_handle, str)\n self.assertIsInstance(res.finetune_bert, bool)\n self.assertIsInstance(res.max_epochs, int)\n self.assertIsInstance(res.patience, int)\n self.assertIsInstance(res.random_seed, int)\n self.assertIsInstance(res.gpu_memory_frac, float)\n self.assertIsInstance(res.max_seq_length, int)\n self.assertIsInstance(res.validation_fraction, float)\n self.assertIsInstance(res.verbose, bool)\n self.assertIsInstance(res.use_shapes, bool)\n self.assertIsInstance(res.use_nlp_features, bool)\n self.assertTrue(hasattr(res, 'classes_list_'))\n self.assertTrue(hasattr(res, 'shapes_list_'))\n self.assertTrue(hasattr(res, 'tokenizer_'))\n self.assertTrue(hasattr(res, 'sess_'))\n self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))\n self.assertIsInstance(res.shapes_list_, tuple)\n self.assertGreater(len(res.shapes_list_), 3)\n self.assertEqual(res.shapes_list_[-3:], ('[CLS]', '[SEP]', '[UNK]'))\n y_pred1 = res.predict(X_train)\n self.assertIsInstance(y_pred1, list)\n self.assertEqual(len(X_train), len(y_pred1))\n for sample_idx in range(len(y_pred1)):\n self.assertIsInstance(y_pred1[sample_idx], dict)\n f1, precision, recall, _ = calculate_prediction_quality(y_train, y_pred1, res.classes_list_)\n self.assertGreater(f1, 0.0)\n self.assertGreater(precision, 0.0)\n self.assertGreater(recall, 0.0)\n with tempfile.NamedTemporaryFile(mode='w', delete=True) as fp:\n self.temp_file_name = fp.name\n with open(self.temp_file_name, mode='wb') as fp:\n pickle.dump(res, fp)\n del res, self.ner\n gc.collect()\n with open(self.temp_file_name, mode='rb') as fp:\n self.ner = pickle.load(fp)\n y_pred2 = self.ner.predict(X_train)\n self.assertIsInstance(y_pred2, list)\n self.assertEqual(len(y_pred2), len(y_pred2))\n for sample_idx in range(len(y_pred2)):\n self.assertIsInstance(y_pred2[sample_idx], dict)\n self.assertEqual(set(y_pred1[sample_idx]), set(y_pred2[sample_idx]))\n for ne_type in y_pred1[sample_idx]:\n self.assertEqual(y_pred1[sample_idx][ne_type], y_pred2[sample_idx][ne_type])\n\n def test_serialize_positive02(self):\n self.ner = BERT_NER(random_seed=31, udpipe_lang='ru')\n old_udpipe_lang = self.ner.udpipe_lang\n old_batch_size = self.ner.batch_size\n old_lstm_units = self.ner.lstm_units\n old_lr = self.ner.lr\n old_l2_reg = self.ner.l2_reg\n old_clip_norm = self.ner.clip_norm\n old_bert_hub_module_handle = self.ner.bert_hub_module_handle\n old_finetune_bert = self.ner.finetune_bert\n old_max_epochs = self.ner.max_epochs\n old_patience = self.ner.patience\n old_random_seed = self.ner.random_seed\n old_gpu_memory_frac = self.ner.gpu_memory_frac\n old_max_seq_length = self.ner.max_seq_length\n old_validation_fraction = self.ner.validation_fraction\n old_verbose = self.ner.verbose\n old_use_shapes = self.ner.use_shapes\n old_use_nlp_features = self.ner.use_nlp_features\n with tempfile.NamedTemporaryFile(mode='w', delete=True) as fp:\n self.temp_file_name = fp.name\n with open(self.temp_file_name, mode='wb') as fp:\n pickle.dump(self.ner, fp)\n del self.ner\n gc.collect()\n with open(self.temp_file_name, mode='rb') as fp:\n self.ner = pickle.load(fp)\n self.assertIsInstance(self.ner, BERT_NER)\n self.assertTrue(hasattr(self.ner, 'udpipe_lang'))\n self.assertTrue(hasattr(self.ner, 'batch_size'))\n self.assertTrue(hasattr(self.ner, 'lstm_units'))\n self.assertTrue(hasattr(self.ner, 'lr'))\n self.assertTrue(hasattr(self.ner, 'l2_reg'))\n self.assertTrue(hasattr(self.ner, 'clip_norm'))\n self.assertTrue(hasattr(self.ner, 'bert_hub_module_handle'))\n self.assertTrue(hasattr(self.ner, 'finetune_bert'))\n self.assertTrue(hasattr(self.ner, 'max_epochs'))\n self.assertTrue(hasattr(self.ner, 'patience'))\n self.assertTrue(hasattr(self.ner, 'random_seed'))\n self.assertTrue(hasattr(self.ner, 'gpu_memory_frac'))\n self.assertTrue(hasattr(self.ner, 'max_seq_length'))\n self.assertTrue(hasattr(self.ner, 'validation_fraction'))\n self.assertTrue(hasattr(self.ner, 'verbose'))\n self.assertTrue(hasattr(self.ner, 'use_shapes'))\n self.assertTrue(hasattr(self.ner, 'use_nlp_features'))\n self.assertEqual(self.ner.udpipe_lang, old_udpipe_lang)\n self.assertEqual(self.ner.batch_size, old_batch_size)\n self.assertEqual(self.ner.lstm_units, old_lstm_units)\n self.assertAlmostEqual(self.ner.lr, old_lr)\n self.assertAlmostEqual(self.ner.l2_reg, old_l2_reg)\n self.assertAlmostEqual(self.ner.clip_norm, old_clip_norm)\n self.assertEqual(self.ner.bert_hub_module_handle, old_bert_hub_module_handle)\n self.assertEqual(self.ner.finetune_bert, old_finetune_bert)\n self.assertEqual(self.ner.max_epochs, old_max_epochs)\n self.assertEqual(self.ner.patience, old_patience)\n self.assertAlmostEqual(self.ner.gpu_memory_frac, old_gpu_memory_frac)\n self.assertEqual(self.ner.max_seq_length, old_max_seq_length)\n self.assertAlmostEqual(self.ner.validation_fraction, old_validation_fraction)\n self.assertEqual(self.ner.verbose, old_verbose)\n self.assertEqual(self.ner.use_shapes, old_use_shapes)\n self.assertEqual(self.ner.use_nlp_features, old_use_nlp_features)\n self.assertEqual(self.ner.random_seed, old_random_seed)\n\n def test_copy_positive01(self):\n self.ner = BERT_NER(random_seed=0, udpipe_lang='ru', use_shapes=False, use_nlp_features=True)\n self.another_ner = copy.copy(self.ner)\n self.assertIsInstance(self.another_ner, BERT_NER)\n self.assertIsNot(self.ner, self.another_ner)\n self.assertTrue(hasattr(self.another_ner, 'udpipe_lang'))\n self.assertTrue(hasattr(self.another_ner, 'batch_size'))\n self.assertTrue(hasattr(self.another_ner, 'lstm_units'))\n self.assertTrue(hasattr(self.another_ner, 'lr'))\n self.assertTrue(hasattr(self.another_ner, 'l2_reg'))\n self.assertTrue(hasattr(self.another_ner, 'clip_norm'))\n self.assertTrue(hasattr(self.another_ner, 'bert_hub_module_handle'))\n self.assertTrue(hasattr(self.another_ner, 'finetune_bert'))\n self.assertTrue(hasattr(self.another_ner, 'max_epochs'))\n self.assertTrue(hasattr(self.another_ner, 'patience'))\n self.assertTrue(hasattr(self.another_ner, 'random_seed'))\n self.assertTrue(hasattr(self.another_ner, 'gpu_memory_frac'))\n self.assertTrue(hasattr(self.another_ner, 'max_seq_length'))\n self.assertTrue(hasattr(self.another_ner, 'validation_fraction'))\n self.assertTrue(hasattr(self.another_ner, 'verbose'))\n self.assertTrue(hasattr(self.another_ner, 'use_shapes'))\n self.assertTrue(hasattr(self.another_ner, 'use_nlp_features'))\n self.assertEqual(self.ner.udpipe_lang, self.another_ner.udpipe_lang)\n self.assertEqual(self.ner.batch_size, self.another_ner.batch_size)\n self.assertEqual(self.ner.lstm_units, self.another_ner.lstm_units)\n self.assertAlmostEqual(self.ner.lr, self.another_ner.lr)\n self.assertAlmostEqual(self.ner.l2_reg, self.another_ner.l2_reg)\n self.assertAlmostEqual(self.ner.clip_norm, self.another_ner.clip_norm)\n self.assertEqual(self.ner.bert_hub_module_handle, self.another_ner.bert_hub_module_handle)\n self.assertEqual(self.ner.finetune_bert, self.another_ner.finetune_bert)\n self.assertEqual(self.ner.max_epochs, self.another_ner.max_epochs)\n self.assertEqual(self.ner.patience, self.another_ner.patience)\n self.assertEqual(self.ner.random_seed, self.another_ner.random_seed)\n self.assertAlmostEqual(self.ner.gpu_memory_frac, self.another_ner.gpu_memory_frac)\n self.assertEqual(self.ner.max_seq_length, self.another_ner.max_seq_length)\n self.assertAlmostEqual(self.ner.validation_fraction, self.another_ner.validation_fraction)\n self.assertEqual(self.ner.verbose, self.another_ner.verbose)\n self.assertEqual(self.ner.use_shapes, self.another_ner.use_shapes)\n self.assertEqual(self.ner.use_nlp_features, self.another_ner.use_nlp_features)\n\n def test_copy_positive02(self):\n base_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n self.ner = BERT_NER(finetune_bert=False, max_epochs=3, batch_size=4, max_seq_length=128, gpu_memory_frac=0.9,\n validation_fraction=0.3, random_seed=None, udpipe_lang='ru')\n X_train, y_train = load_dataset_from_json(os.path.join(base_dir, 'true_named_entities.json'))\n self.ner.fit(X_train, y_train)\n self.another_ner = copy.copy(self.ner)\n self.assertIsInstance(self.another_ner, BERT_NER)\n self.assertIsNot(self.ner, self.another_ner)\n self.assertTrue(hasattr(self.another_ner, 'udpipe_lang'))\n self.assertTrue(hasattr(self.another_ner, 'batch_size'))\n self.assertTrue(hasattr(self.another_ner, 'lstm_units'))\n self.assertTrue(hasattr(self.another_ner, 'lr'))\n self.assertTrue(hasattr(self.another_ner, 'l2_reg'))\n self.assertTrue(hasattr(self.another_ner, 'clip_norm'))\n self.assertTrue(hasattr(self.another_ner, 'bert_hub_module_handle'))\n self.assertTrue(hasattr(self.another_ner, 'finetune_bert'))\n self.assertTrue(hasattr(self.another_ner, 'max_epochs'))\n self.assertTrue(hasattr(self.another_ner, 'patience'))\n self.assertTrue(hasattr(self.another_ner, 'random_seed'))\n self.assertTrue(hasattr(self.another_ner, 'gpu_memory_frac'))\n self.assertTrue(hasattr(self.another_ner, 'max_seq_length'))\n self.assertTrue(hasattr(self.another_ner, 'validation_fraction'))\n self.assertTrue(hasattr(self.another_ner, 'verbose'))\n self.assertTrue(hasattr(self.another_ner, 'use_shapes'))\n self.assertTrue(hasattr(self.another_ner, 'use_nlp_features'))\n self.assertTrue(hasattr(self.another_ner, 'classes_list_'))\n self.assertTrue(hasattr(self.another_ner, 'shapes_list_'))\n self.assertTrue(hasattr(self.another_ner, 'tokenizer_'))\n self.assertTrue(hasattr(self.another_ner, 'sess_'))\n self.assertEqual(self.ner.udpipe_lang, self.another_ner.udpipe_lang)\n self.assertEqual(self.ner.batch_size, self.another_ner.batch_size)\n self.assertEqual(self.ner.lstm_units, self.another_ner.lstm_units)\n self.assertAlmostEqual(self.ner.lr, self.another_ner.lr)\n self.assertAlmostEqual(self.ner.l2_reg, self.another_ner.l2_reg)\n self.assertAlmostEqual(self.ner.clip_norm, self.another_ner.clip_norm)\n self.assertEqual(self.ner.bert_hub_module_handle, self.another_ner.bert_hub_module_handle)\n self.assertEqual(self.ner.finetune_bert, self.another_ner.finetune_bert)\n self.assertEqual(self.ner.max_epochs, self.another_ner.max_epochs)\n self.assertEqual(self.ner.patience, self.another_ner.patience)\n self.assertEqual(self.ner.random_seed, self.another_ner.random_seed)\n self.assertAlmostEqual(self.ner.gpu_memory_frac, self.another_ner.gpu_memory_frac)\n self.assertEqual(self.ner.max_seq_length, self.another_ner.max_seq_length)\n self.assertAlmostEqual(self.ner.validation_fraction, self.another_ner.validation_fraction)\n self.assertEqual(self.ner.verbose, self.another_ner.verbose)\n self.assertEqual(self.ner.use_shapes, self.another_ner.use_shapes)\n self.assertEqual(self.ner.use_nlp_features, self.another_ner.use_nlp_features)\n self.assertIs(self.ner.classes_list_, self.another_ner.classes_list_)\n self.assertIs(self.ner.shapes_list_, self.another_ner.shapes_list_)\n self.assertIs(self.ner.tokenizer_, self.another_ner.tokenizer_)\n self.assertIs(self.ner.sess_, self.another_ner.sess_)\n\n def test_calculate_bounds_of_named_entities(self):\n bounds_of_tokens = [(0, 2), (2, 5), (5, 8), (8, 10), (11, 16), (17, 20), (20, 22), (22, 26), (26, 27), (28, 31),\n (31, 34), (34, 37), (38, 48), (49, 52), (52, 54), (55, 57), (58, 59), (59, 61), (61, 63),\n (64, 70), (71, 83), (84, 87), (87, 90), (90, 93), (93, 95), (95, 98), (98, 99)]\n classes_list = ('LOCATION', 'ORG', 'PERSON')\n labels_of_tokens = [0, 0, 2, 1, 1, 2, 1, 0, 0, 0, 4, 3, 0, 6, 5, 5, 5, 0, 5, 5, 0, 2, 2, 3, 3, 6, 5]\n true_entities = {\n 'LOCATION': [(5, 16), (17, 22), (84, 87), (87, 90)],\n 'ORG': [(31, 37), (90, 95)],\n 'PERSON': [(49, 59), (61, 70), (95, 99)]\n }\n calc_entities = BERT_NER.calculate_bounds_of_named_entities(bounds_of_tokens, classes_list, labels_of_tokens)\n self.assertIsInstance(calc_entities, dict)\n self.assertEqual(set(true_entities.keys()), set(calc_entities.keys()))\n for entity_type in true_entities:\n self.assertEqual(true_entities[entity_type], calc_entities[entity_type])\n\n def test_get_shape_of_string_positive01(self):\n src = '##чники'\n dst = 'a'\n self.assertEqual(dst, BERT_NER.get_shape_of_string(src))\n\n def test_get_shape_of_string_positive02(self):\n src = 'уже'\n dst = 'a'\n self.assertEqual(dst, BERT_NER.get_shape_of_string(src))\n\n def test_get_shape_of_string_positive03(self):\n src = 'К'\n dst = 'A'\n self.assertEqual(dst, BERT_NER.get_shape_of_string(src))\n\n def test_get_shape_of_string_positive04(self):\n src = 'Однако'\n dst = 'Aa'\n self.assertEqual(dst, BERT_NER.get_shape_of_string(src))\n\n def test_get_shape_of_string_positive05(self):\n src = '66–67'\n dst = 'D-D'\n self.assertEqual(dst, BERT_NER.get_shape_of_string(src))\n\n def test_get_shape_of_string_positive06(self):\n src = '[UNK]'\n dst = '[UNK]'\n self.assertEqual(dst, BERT_NER.get_shape_of_string(src))\n\n def test_get_shape_of_string_positive07(self):\n src = '…'\n dst = 'U'\n self.assertEqual(dst, BERT_NER.get_shape_of_string(src))\n\n def test_get_shape_of_string_positive08(self):\n src = ','\n dst = 'P'\n self.assertEqual(dst, BERT_NER.get_shape_of_string(src))\n\n def test_get_shape_of_string_negative(self):\n src = ''\n dst = ''\n self.assertEqual(dst, BERT_NER.get_shape_of_string(src))\n\n def test_get_subword_ID_positive01(self):\n src = '##чники'\n dst = 2\n self.assertEqual(dst, BERT_NER.get_subword_ID(src))\n\n def test_get_subword_ID_positive02(self):\n src = 'Однако'\n dst = 3\n self.assertEqual(dst, BERT_NER.get_subword_ID(src))\n\n def test_get_subword_ID_positive03(self):\n src = '[CLS]'\n dst = 0\n self.assertEqual(dst, BERT_NER.get_subword_ID(src))\n\n def test_get_subword_ID_positive04(self):\n src = '[SEP]'\n dst = 1\n self.assertEqual(dst, BERT_NER.get_subword_ID(src))\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n"
] |
[
[
"numpy.random.uniform",
"numpy.array"
]
] |
VictoriaSugrue/sheepclock
|
[
"b0eaec0b96afcc35f0d60982eb3d1215ea329d64"
] |
[
"annotate_updated.py"
] |
[
"import os\nimport glob\nimport numpy as np\nimport pandas as pd\n\nfilenames = [os.path.basename(x) for x in glob.glob(\"analysis_raw_results/*.bed\")]\n\nmetadata = pd.read_table('human_factor_full_QC.txt')\n\nfor file in filenames:\n temp_name = os.path.splitext(file)[0]\n df_temp = pd.read_csv(\"analysis_raw_results/\"+str(file),sep=\"\\t\",header=None)\n df_temp.columns = ['chromCG','CGstart','CGend','CGid','filename','bed_col_1','bed_col_2','bed_col_3','bed_col_4','bed_col_5','bed_col_6','bed_col_7','bed_col_8','bed_col_9','bed_col_10']\n df_temp[\"DCid\"] = df_temp[\"filename\"].str.replace('human_factor_split/human_factor_[0-9][0-9][0-9]/|_sort_peaks.narrowPeak.bed','').astype('int64')\n df_output = df_temp.merge(metadata, on='DCid', how='left')\n df_output = df_output.sort_values(by=['CGid'])\n df_output.to_csv(\"analysis_results/\" + str(temp_name) + \".csv\",index=False)\n"
] |
[
[
"pandas.read_table"
]
] |
RSIA-LIESMARS-WHU/AxisLearning
|
[
"5a108860c959a200811f9643d567ca7883c74875"
] |
[
"maskrcnn_benchmark-dota/modeling/rpn/rfcos/smallerRF_wo_clsloss.py"
] |
[
"\"\"\"\nThis file contains specific functions for computing losses of FCOS\nfile\n\"\"\"\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch import nn\n\nfrom ..utils import concat_box_prediction_layers\nfrom maskrcnn_benchmark.layers import IOULoss\nfrom maskrcnn_benchmark.layers import SigmoidFocalLoss, smooth_l1_loss\nfrom maskrcnn_benchmark.modeling.matcher import Matcher\nfrom maskrcnn_benchmark.modeling.utils import cat\nfrom maskrcnn_benchmark.structures.rboxlist_ops import targets_for_locations\nfrom maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist\nimport time\n\nINF = 100000000\n\n# def onehot(label, cls_num):\n# onehot = np.zeros(self.__num_classes, dtype=np.float)\n# onehot[bbox_class_ind] = 1.0\n# uniform_distribution = np.full(self.__num_classes, 1.0 / self.__num_classes)\n# deta = 0.01\n# smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution\n\nclass FCOSLossComputation(object):\n \"\"\"\n This class computes the FCOS losses.\n \"\"\"\n\n def __init__(self, cfg):\n self.cls_loss_func = SigmoidFocalLoss(\n cfg.MODEL.FCOS.LOSS_GAMMA,\n cfg.MODEL.FCOS.LOSS_ALPHA\n )\n # self.cls_loss_func = nn.CrossEntropyLoss(size_average=False, reduce=True)\n\n self.cfg = cfg\n # we make use of IOU Loss for bounding boxes regression,\n # but we found that L1 in log scale can yield a similar performance\n # self.box_reg_loss_func = IOULoss()\n self.centerness_loss_func = nn.BCEWithLogitsLoss()\n self.num_pts = cfg.MODEL.FCOS.NUM_PTS\n\n # 2\n def prepare_targets(self, points, targets):\n # FoveaBox\n # strides=[8, 16, 32, 64, 128],\n # base_edge_list=[16, 32, 64, 128, 256],\n # scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),\n if self.cfg.MODEL.FCOS.SELECT_FEATURE_METHOD == \"fcos\":\n # FCOS\n object_sizes_of_interest = [\n [-1, 64],\n [64, 128],\n [128, 256],\n [256, 512],\n [512, INF],\n ]\n # object_sizes_of_interest = [\n # [-1, 32],\n # [32, 64],\n # [64, 128],\n # [128, 256],\n # [256, INF],\n # ]\n elif self.cfg.MODEL.FCOS.SELECT_FEATURE_METHOD == \"foveabox\":\n object_sizes_of_interest = [\n [-1, 64],\n [32, 128],\n [64, 256],\n [128, 512],\n [256, INF],\n ]\n elif self.cfg.MODEL.FCOS.SELECT_FEATURE_METHOD == \"all\":\n object_sizes_of_interest = [\n [-1, 64],\n [-1, 128],\n [-1, 256],\n [-1, 512],\n [-1, INF],\n ]\n normal_factor = [16, 32, 64, 128, 256]\n # normal_factor = [16, 48, 96, 192, 384]\n\n\n expanded_object_sizes_of_interest = []\n expanded_normal_factor=[]\n # p3 - p7\n for l, points_per_level in enumerate(points):\n # 2\n object_sizes_of_interest_per_level = \\\n points_per_level.new_tensor(object_sizes_of_interest[l])\n # 1 2 -> len(points_per_level) 2\n expanded_object_sizes_of_interest.append(\n object_sizes_of_interest_per_level[None].expand(len(points_per_level), -1)\n )\n normal_factor_per_level = \\\n points_per_level.new_tensor(normal_factor[l])\n # 1 2 -> len(points_per_level) 2\n expanded_normal_factor.append(\n normal_factor_per_level.expand(len(points_per_level))\n )\n\n expanded_object_sizes_of_interest = torch.cat(expanded_object_sizes_of_interest, dim=0)\n expanded_normal_factor = torch,cat(expanded_normal_factor, dim=0)\n\n num_points_per_level = [len(points_per_level) for points_per_level in points]\n points_all_level = torch.cat(points, dim=0)\n # batch len(locations) 1 batch len(locations) 6 \n labels, reg_targets = self.compute_targets_for_locations(\n points_all_level, targets, expanded_object_sizes_of_interest, expanded_normal_factor\n )\n\n # 对每一张图片进行处理\n for i in range(len(labels)):\n labels[i] = torch.split(labels[i], num_points_per_level, dim=0)\n reg_targets[i] = torch.split(reg_targets[i], num_points_per_level, dim=0)\n\n labels_level_first = []\n reg_targets_level_first = []\n for level in range(len(points)):\n labels_level_first.append(\n torch.cat([labels_per_im[level] for labels_per_im in labels], dim=0)\n )\n reg_targets_level_first.append(\n torch.cat([reg_targets_per_im[level] for reg_targets_per_im in reg_targets], dim=0)\n )\n\n return labels_level_first, reg_targets_level_first\n\n\n # 3\n def compute_targets_for_locations(self, locations, targets, object_sizes_of_interest, normal_factor):\n labels = []\n reg_targets = []\n # xs, ys = locations[:, 0], locations[:, 1]\n \n for im_i in range(len(targets)):\n # 第i张图片\n targets_per_im = targets[im_i]\n # assert targets_per_im.mode == \"xyxy\"\n bboxes = targets_per_im.bbox\n labels_per_im = targets_per_im.get_field(\"labels\")#.cpu()\n # print(labels_per_im)\n\n \n reg_targets_per_im = targets_for_locations(bboxes, locations)#.cpu()\n # torch.cuda.empty_cache()\n\n # max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0]\n \n max_reg_targets_per_im = torch.abs(reg_targets_per_im[:,2:6]).max(dim=1)[0]\n # distance\n \n dist_1 = torch.sqrt(torch.pow(reg_targets_per_im[:,2],2) + torch.pow(reg_targets_per_im[:,3],2))\n dist_2 = torch.sqrt(torch.pow(reg_targets_per_im[:,4],2) + torch.pow(reg_targets_per_im[:,5],2))\n target_h = reg_targets_per_im[:,5] \n max_reg_targets_per_im = torch.stack([dist_1, dist_2, target_h], dim=1).max(dim=1)[0]\n\n # limit the regression range for each location 上下左右都要在感兴趣范围之内\n\n # len(locations) len(locations), 1\n object_sizes_of_interest= object_sizes_of_interest#.cpu()\n is_cared_in_the_level = \\\n (max_reg_targets_per_im >= object_sizes_of_interest[:, 0]) & \\\n (max_reg_targets_per_im <= object_sizes_of_interest[:, 1])\n \n # print(\"labels_per_im\", len(labels_per_im), len(bboxes), torch.min(reg_targets_per_im[:, 0].long()), torch.max(reg_targets_per_im[:, 0].long()), reg_targets_per_im[:, 0].sum())\n labels_per_im = labels_per_im[reg_targets_per_im[:, 0].long()]\n\n # 落在目标框外面label为0\n labels_per_im[reg_targets_per_im[:, 1] < 0.5 ] = 0#bg\n # 或者落在外面且s感受野不够\n labels_per_im[is_cared_in_the_level == 0] = 0#no reg\n\n # 落在框内 但是感受野不够\n labels_per_im[(reg_targets_per_im[:, 1] > 0.5) * (is_cared_in_the_level == 0)] = -1#ignore\n\n # detax1 detay1 detax2 detay2 h\n ones = torch.ones_like(reg_targets_per_im[:,2:7])\n one_minusone = torch.where(reg_targets_per_im[:,2:7]>=0, ones, -ones)#.cpu()\n \n reg_targets_per_im[:,2:7] = one_minusone*torch.pow(torch.abs(reg_targets_per_im[:,2:7])/normal_factor[1][:,None], 1/3)#.cpu()#.cpu()\n\n labels.append(labels_per_im)\n reg_targets.append(reg_targets_per_im[:,2:])\n\n return labels, reg_targets \n\n def compute_centerness_targets(self, reg_targets):\n left_right = reg_targets[:, [0, 2]]\n top_bottom = reg_targets[:, [1, 3]]\n centerness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \\\n (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])\n return torch.sqrt(centerness)\n\n # 1\n def __call__(self, locations, box_cls, box_regression, centerness, targets):\n \"\"\"\n Arguments:\n locations (list[BoxList])\n box_cls (list[Tensor])\n box_regression (list[Tensor])\n centerness (list[Tensor])\n targets (list[BoxList])\n\n Returns:\n cls_loss (Tensor)\n reg_loss (Tensor)\n centerness_loss (Tensor)\n \"\"\"\n # 0 fpn 第一层\n N = box_cls[0].size(0)\n num_classes = box_cls[0].size(1)#//self.num_pts\n\n # level first\n labels, reg_targets = self.prepare_targets(locations, targets)\n\n box_cls_flatten = []\n box_regression_flatten = []\n centerness_flatten = []\n labels_flatten = []\n reg_targets_flatten = []\n # for level\n for l in range(len(labels)):\n # batch*num_pos num_classes\n box_cls_flatten.append(box_cls[l].permute(0, 2, 3, 1).reshape(-1, num_classes))\n box_regression_flatten.append(box_regression[l].permute(0, 2, 3, 1).reshape(-1, 5))\n # layer_h, layer_w = box_cls[l].size(2), box_cls[l].size(3)\n # box_cls_flatten.append(box_cls[l].permute(0, 2, 3, 1).reshape(N, layer_h, layer_w, self.num_pts, num_classes).permute(0, 3, 1, 2, 4).reshape(-1,num_classes))\n # box_regression_flatten.append(box_regression[l].permute(0, 2, 3, 1).reshape(N, layer_h, layer_w, self.num_pts, 5).permute(0, 3, 1, 2, 4).reshape(-1,5))\n labels_flatten.append(labels[l].reshape(-1))\n reg_targets_flatten.append(reg_targets[l].reshape(-1, 6))\n centerness_flatten.append(centerness[l].reshape(-1))\n # level batch*num_pos num_classes\n box_cls_flatten = torch.cat(box_cls_flatten, dim=0)\n box_regression_flatten = torch.cat(box_regression_flatten, dim=0)\n centerness_flatten = torch.cat(centerness_flatten, dim=0)\n labels_flatten = torch.cat(labels_flatten, dim=0)\n reg_targets_flatten = torch.cat(reg_targets_flatten, dim=0)\n\n pos_inds = torch.nonzero(labels_flatten > 0).squeeze(1)\n valid_inds = torch.nonzero(labels_flatten > -1).squeeze(1)\n ignore_inds = torch.nonzero(labels_flatten == -1).squeeze(1)\n\n # wrong\n # cls_weight=torch.where(centerness_flatten==0, torch.ones_like(centerness_flatten), centerness_flatten).unsqueeze(-1)\n # cls_loss = self.cls_loss_func(\n # box_cls_flatten,#.cpu()\n # labels_flatten.int(),#,#.cpu()\n # weight = cls_weight\n # ) / (pos_inds.numel() + N) # add N to avoid dividing by a zero\n\n # true\n all_centerness_targets = reg_targets_flatten[:, -1]\n # # torch.sqrt(\n cls_weight = torch.where(all_centerness_targets==0, torch.ones_like(all_centerness_targets), all_centerness_targets).unsqueeze(-1)\n cls_weight[ignore_inds] = 0.05\n # # cls_weight=torch.where(all_centerness_targets==0, torch.full_like(all_centerness_targets, 1), all_centerness_targets).unsqueeze(-1)\n\n ''' 涉及到将感受野不够或者超过的点看做负样本/忽略样本/正样本\n 看成忽略样本涉及到定位不准确 冗余检测\n 看成负样本 conf is low'''\n # # focal loss 2*\n # cls_loss = self.cls_loss_func(\n # box_cls_flatten[valid_inds],#.cpu()\n # labels_flatten[valid_inds].int(),#.cpu()\n # weight = cls_weight[valid_inds]\n # ) / (pos_inds.numel() + N) # add N to avoid dividing by a zero\n \n \n # weight = cl\n labels_flatten[labels_flatten==-1]=0\n cls_loss = self.cls_loss_func(\n box_cls_flatten,#.cpu()\n labels_flatten.int(),#.cpu()\n weight = cls_weight ) / (pos_inds.numel() + N) # add N to avoid dividing by a zero\n # s_weight, \n # self.cls_loss_func = nn.CrossEntropyLoss(size_average=False, reduce=True)\n # cls_loss = self.cls_loss_func(\n # box_cls_flatten[pos_inds],#\n # labels_flatten[pos_inds]-1,#\n # )/ (pos_inds.numel() + N)\n\n\n box_regression_pos = box_regression_flatten[pos_inds]\n reg_targets_pos = reg_targets_flatten[pos_inds]\n\n if pos_inds.numel() > 0:\n centerness_targets_pos = reg_targets_pos[:, -1]\n\n #只预测\n reg_loss = smooth_l1_loss(\n box_regression_pos,#.cpu()\n reg_targets_pos[:, :-1],#.cpu()\n weight = centerness_targets_pos.unsqueeze(-1)#cls_weight #\n )\n\n # 一定要回归center ness\n # all \n # centerness_flatten[ignore_inds] = 0\n centerness_loss = self.centerness_loss_func(\n centerness_flatten,#.cpu()\n reg_targets_flatten[:,-1]#.cpu()\n )\n # centerness_loss = self.centerness_loss_func(\n # centerness_flatten[pos_inds],#.cpu()\n # centerness_targets_pos#.cpu()\n # )\n\n else:\n reg_loss = box_regression_flatten.sum()\n centerness_loss = centerness_flatten.sum()\n # .cuda()\n return cls_loss, reg_loss, centerness_loss#*0\n\n\ndef make_fcos_loss_evaluator(cfg):\n loss_evaluator = FCOSLossComputation(cfg)\n return loss_evaluator\n"
] |
[
[
"torch.abs",
"torch.cat",
"torch.sqrt",
"torch.nn.BCEWithLogitsLoss",
"torch.pow",
"torch.where",
"torch.split",
"torch.nonzero",
"torch.ones_like",
"torch.stack"
]
] |
mschmidt87/VeRyPy
|
[
"eaac6e210d861441071565575750a2f0e25dfb72"
] |
[
"classic_heuristics/gapvrp.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n###############################################################################\n\"\"\" This file is a part of the VeRyPy classical vehicle routing problem\nheuristic library and provides an implementation of the Fisher&Jaikumar (1981)\nheuristic, which generates an approximate solution for a VRP via solving it as\nan generalized assignment problem (GAP).\n\nThe script is callable and can be used as a standalone solver for TSPLIB \nformatted CVRPs. It has extensive dependencies: MIP solver Gurobi, built-in TSP\nsolver, and numpy and scipy for reading and preparing the problem instance.\"\"\"\n###############################################################################\n\n# Written in Python 2.7, but try to maintain Python 3+ compatibility\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom signal import signal, SIGINT, default_int_handler\nfrom collections import namedtuple\nfrom math import pi, ceil\nfrom logging import log, DEBUG, WARNING\n\nimport numpy as np\nfrom gurobipy import Model, GRB, LinExpr, GurobiError\n\n#from tsp_solvers.tsp_solver_ropt import solve_tsp_ropt as solve_tsp\n#from tsp_solvers.tsp_solver_lkh import solve_tsp_lkh as solve_tsp\nfrom tsp_solvers.tsp_solver_gurobi import solve_tsp_gurobi as solve_tsp\nfrom sweep import get_sweep_from_cartesian_coordinates, bisect_angle\nfrom cvrp_io import calculate_D\nfrom util import is_better_sol, totald\nfrom config import MAX_MIP_SOLVER_RUNTIME, MIP_SOLVER_THREADS\nfrom config import CAPACITY_EPSILON as C_EPS\nfrom config import COST_EPSILON as S_EPS\n\n__author__ = \"Jussi Rasku\"\n__copyright__ = \"Copyright 2018, Jussi Rasku\"\n__credits__ = [\"Jussi Rasku\"]\n__license__ = \"MIT\"\n__maintainer__ = \"Jussi Rasku\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n\n\n# These hard coded parameters define how the relaxation is adjusted if the \n# GAP solution is not L feasible.\nL_MPLR_DEFAULT = 1.0\nL_ADAPTIVE_MPLR_INIT = 0.85 \nL_ADAPTIVE_MPLR_INC = 0.85\nL_ADAPTIVE_MPLR_MAX_TRIES = 3\nINCREASE_K_ON_FAILURE_UPTO = 1.1 # = 10% increase to K (or min of 1)\n\ndef _decision_variables_to_assignments(m, Y_ik, N, K):\n \"\"\" Convert the decision variables in m for keys Y_ik to assignments of \n customers i==2..N (0 is the depot) to the routes k=1..K . \n \n TODO: there is probably a neat numpy trick to get node and k indices\n out of the decision variable array. For now just use nested loops,\n but a cleverer way would problably be faster.\n However, \"premature optimization is the root of all evil\", so profile\n first, and modify only after verifying it to be a real bottleneck.\"\"\"\n \n assignments = []\n Y_ik_values = m.getAttr('x', Y_ik)\n for k in range(K):\n route_nodes = []\n for i in range(1, N):\n if Y_ik_values[i,k]:\n route_nodes.append(i)\n assignments.append(route_nodes)\n return assignments\n \ndef _solve_gap(N, D_s, d, C, K, L=None, L_ctr_multipiler=1.0):\n \"\"\"A helper function that Solves VRP as a Generalized Assignment Problem\n to assign customers to vehicles with a objective function that the delivery\n cost as described in (Fisher & Jaikumar 1981).\n \n D_s is the distance matrix complemented with distances to K seed points.\n That is:\n [D_0]\n D_s = [S ], where D_0 is the first row of the full distance matrix and\n S is the distances from seed points to node points\n d is the list of customer demands with d[0]=0 being the depot node\n C is the capacity of the K identical trucks\n \n also, additional (and optional) constraints can be given:\n \n L is the maximum tour cost/duration/length\n L_ctr_multipiler allows iteratively adjusting the max route cost\n approximation constraint in order to avoid producing assignments that are\n ruled infeasible by the feasibility checker. \n --\n Fisher, M. L. and Jaikumar, R. (1981), A generalized assignment \n heuristic for vehicle routing. Networks, 11: 109-124.\n \"\"\"\n \n ## build the cost approximation matrix \"insertion_cost\"\n \n # it is ~ a insertion cost matrix, where each coefficient is the cost \n # of inserting customer i to the route consisting visit to seed k.\n #\n # we assume that distances are symmetric, but if asymmetric\n # distances are to be used, take min\n # d_{ik} = min(c_{0i}+c_{i{i_k}}+c_{i{i_k}},\n # c_{0{i_k}}+c_[{i_k}i}+c_{i0})\n # -(c_{0{i_k}}+c_{{i_k}0})\n \n m = Model(\"GAPCVRP\")\n \n # the order of the keys is important when we interpret the results\n Y_ik_keys = [(i,k) for k in range(K) for i in range(1,N)]\n \n # delivery cost approximation coefficients for the objective function\n insertion_cost = {(i,k): D_s[0,i]+D_s[k,i]-D_s[k,0] \\\n for i,k in Y_ik_keys}\n \n # variables and the objective\n Y_ik = m.addVars(Y_ik_keys, obj=insertion_cost, vtype=GRB.BINARY, name='y') \n \n ## constraints\n\n # c1, the capacity constraint and optional tour cost constraint cl\n approx_route_cost_constraints = [] \n if C: c1_coeffs = d[1:]\n for k in range(K):\n ck_vars = [Y_ik[i,k] for i in range(1,N)] \n if C:\n c1_lhs = LinExpr(c1_coeffs,ck_vars)\n #c1_lhs = Y_ik.prod(c1_coeffs, '*', k)\n m.addConstr(c1_lhs <= C, \"c1_k%d\"%k) \n \n # ct = optional tour cost constraints\n # it is a bit hidden, but the additional side constraint can be found\n # from Fisher & Jaikumar (1981) p121, 2. paragraph.\n # However, for whatever reason, this does not seem to produce the \n # same results as reported in their paper as the constraint easily\n # starts to make the problem infeasible and the exact mechanism to \n # recover that is not specified in the paper.\n if L:\n ct_coeffs = [insertion_cost[(i,k)]*L_ctr_multipiler for i in range(1,N)]\n ct_lhs = LinExpr(ct_coeffs,ck_vars)\n #ct_lhs = Y_ik.prod(ct_coeffs, '*', k)\n constr_l = m.addConstr(ct_lhs <= L, \"cl_k%d\"%k)\n approx_route_cost_constraints.append(constr_l)\n\n # c2, the assignment constraints \n for i in range(1,N):\n # c2_1..N every node assigned only to 1 route\n m.addConstr(Y_ik.sum(i, '*') == 1, \"c1_i%d\"%i) \n \n ## update the model and solve \n m._vars = Y_ik \n m.modelSense = GRB.MINIMIZE\n m.update()\n #m.write(\"gapvrp_model.lp\")\n # disable output\n m.setParam('OutputFlag', 0) \n m.setParam('Threads', MIP_SOLVER_THREADS)\n # REMOVEME\n m.setParam('MIPFocus', 3)\n m.setParam('TimeLimit', MAX_MIP_SOLVER_RUNTIME)\n\n m.optimize()\n\n # restore SIGINT callback handler which is changed by gurobipy\n signal(SIGINT, default_int_handler)\n \n if __debug__:\n log(DEBUG-1,\"Gurobi runtime = %.2f\"%m.Runtime)\n \n if m.Status == GRB.OPTIMAL:\n return _decision_variables_to_assignments(m, Y_ik, N, K)\n elif m.Status == GRB.INFEASIBLE and L:\n # relax the model and allow violating minimal number of the approximate \n # route length constraints\n pens = [1.0]*len(approx_route_cost_constraints)\n m.feasRelax(1, True, None, None, None, approx_route_cost_constraints, pens)\n # TODO: not sure if feasRelax can change Status, test it someday\n if m.Status == GRB.INTERRUPTED:\n raise KeyboardInterrupt() # pass it on\n m.optimize()\n\n # restore SIGINT callback handler which is changed by gurobipy\n signal(SIGINT, default_int_handler)\n\n status = m.Status \n if __debug__:\n log(DEBUG-1, \"Relaxed problem Gurobi runtime = %.2f\"%m.Runtime)\n if status == GRB.OPTIMAL:\n return _decision_variables_to_assignments(m, Y_ik, N, K)\n elif status == GRB.TIME_LIMIT:\n raise GurobiError(10023, \"Gurobi timeout reached when attempting to solve relaxed SCPCVRP\")\n elif m.Status == GRB.INTERRUPTED:\n raise KeyboardInterrupt() # pass it on\n return None\n elif m.Status == GRB.TIME_LIMIT:\n raise GurobiError(10023, \"Gurobi timeout reached when attempting to solve GAP\")\n elif m.Status == GRB.INTERRUPTED:\n raise KeyboardInterrupt() # pass it on\n return None\n \n_Cone = namedtuple('_Cone', ['phi1', 'phi2', 'demand', 'nodes'])\ndef _sweep_seed_points(points, D, d, C, K, trial=0):\n \"\"\"A seed point generation function that implements the rule used in\n Fisher and Jaikumar (1981) to select the seed customers for the delivery\n cost approximation calculation in their VRP heuristic. It is assumed that\n all customers are located on a plane with euclidean distances D between\n them and that the truck capacity C is the the same for all vehicles. \n \"\"\"\n \n ## Assume planar case and convert to a sweep \n sweep = get_sweep_from_cartesian_coordinates(points)\n\n ## Append each of the K customer cones into K groups of concecutive cones\n \n if C:\n alpha = sum(d)/float(K*C)\n group_target = alpha*C # = sum(d)/K\n EPS = C_EPS\n else: #only L set?\n total_sweep_len = sum( D[int(sweep[2][i-1]),int(sweep[2][i])]\n for i in range(len(sweep[2])) )\n group_target = total_sweep_len/K\n EPS = S_EPS\n \n if __debug__:\n log(DEBUG-2,\"Cone group demand/cost target = %.2f\"%group_target )\n\n #for start_cone in range(len(cones)): \n start_cone_idx = trial\n \n grouped_cones = []\n group_start_ray = None\n group_end_ray = None\n group_cum = 0.0\n group_nodes = []\n\n prev_node_i = None\n prev_node_rho = None\n prev_node_phi = sweep[0][start_cone_idx-1]\n if start_cone_idx==0:\n prev_node_phi-=2*pi \n prev_ray = None\n \n # iterate over all (phi,rho,node_idx) staring from start_cone_idx\n # and doing it twice\n for circle_view in (sweep.T[start_cone_idx:], sweep.T[:start_cone_idx+1]):\n for node_phi,node_rho,i in circle_view:\n i = int(i) # is numpy float\n if (node_phi<prev_node_phi):\n node_phi+=2*pi\n ray = bisect_angle(prev_node_phi,node_phi)\n \n if prev_ray is None:\n group_start_ray = ray\n if __debug__:\n log(DEBUG-2,\"First node %d cone sets group_start_ray=%.2f\"%(i,group_start_ray))\n else:\n # calculate if the entire cone (~customer) can be added to the group\n # or if only a fraction is needed to fill the group.\n if C:\n cone_fraction = 1.0\n if d[prev_node_i]!=0:\n cone_fraction = min(1.0, (group_target-group_cum)/d[prev_node_i])\n cone_wt = cone_fraction*d[prev_node_i]\n else:\n cone_fraction = min(1.0, (group_target-group_cum)/(D[prev_node_i,i]))\n cone_wt = cone_fraction*D[prev_node_i,i]\n \n group_cum+=cone_wt\n group_nodes.append( (prev_node_rho,prev_node_i,\n d[prev_node_i] if C else D[prev_node_i,i]) ) \n \n if __debug__:\n if C:\n log(DEBUG-3,\"Node %d, added %.2f %% of demand (%.2f)\" %\\\n (prev_node_i, cone_fraction*100, d[prev_node_i]))\n else:\n log(DEBUG-3,\"Node %d, added %.2f %% of cost (%.2f)\" %\\\n (prev_node_i, cone_fraction*100, 0.5*D[prev_node_i,i]))\n log(DEBUG-2,\"Group %.2f %% full\"%\\\n (group_cum/group_target*100.0))\n \n if (group_target-group_cum)<EPS: \n group_end_ray = bisect_angle(prev_ray, ray, cone_fraction) \n # group is full, store it\n grouped_cones.append( _Cone(group_start_ray,group_end_ray,\n group_cum, group_nodes) )\n \n if __debug__:\n log(DEBUG-2,\"Node %d cone sets group_end_ray=%.2f\"%\\\n (prev_node_i,group_end_ray))\n log(DEBUG-2,\"Group completed!\\n\")\n \n # next group \n group_start_ray = group_end_ray\n group_nodes = []\n group_cum = 0\n \n if cone_fraction<1.0:\n if C:\n rmdr_wt = (1.0-cone_fraction)*d[prev_node_i]\n else:\n rmdr_wt = (1.0-cone_fraction)*D[prev_node_i,i]\n \n group_cum += rmdr_wt\n group_nodes.append((prev_node_rho,prev_node_i,\n d[prev_node_i] if C else D[prev_node_i,i]))\n\n if __debug__:\n if len(grouped_cones)<K:\n log(DEBUG-2,\"Node %d cone sets group_start_ray=%.2f\"%\\\n (prev_node_i,group_start_ray))\n \n # the group now spans upto this\n group_end_ray = ray\n \n if __debug__:\n if len(grouped_cones)<K:\n log(DEBUG-2,\"Node %d cone grows group to ray=%.2f\"%\\\n (prev_node_i,group_end_ray))\n \n prev_ray = ray\n prev_node_i = i\n prev_node_rho = node_rho\n prev_node_phi = node_phi\n\n ## get seed form the resulting K merged cones\n seed_points = np.zeros((K,2), dtype=np.float64)\n \n depot_x = points[0][0]\n depot_y = points[0][1]\n for k, grouped_cone in enumerate(grouped_cones):\n if __debug__:\n log(DEBUG-3,\" ===========================================\")\n log(DEBUG-3,\" #%d %s\"%(k, str(grouped_cone)))\n log(DEBUG-3,\" ===========================================\\n\")\n \n # Find an arc that splits the k-cone in a way that the linear demand \n # under the arc is \"around\" 0.75 (the exact definition is in the\n # Fisher & Jaikumar (1981) paper. Begin by sorting by distance from\n # the depot and grow arc as long as weight sum is under the limit.\n seed_rho = 0\n grow_arc_wt = 0\n weight_target = 0.75*group_target # 0.75{\\labmda}b\n for cr,ci,cwt in sorted(grouped_cone.nodes):\n if grow_arc_wt+cwt>weight_target:\n # take a fraction of the weight just outside the arc\n seed_rho+=((weight_target-grow_arc_wt)/cwt)*(cr-seed_rho)\n break\n else:\n grow_arc_wt+=cwt\n seed_rho=cr\n \n # Calculate the actual seed point position\n seed_phi = bisect_angle(grouped_cone.phi1,grouped_cone.phi2)\n seed_points[k,0] = depot_x+seed_rho*np.cos(seed_phi)\n seed_points[k,1] = depot_y+seed_rho*np.sin(seed_phi)\n return seed_points.tolist()\n\ndef _kmeans_seed_points(points, D, d, C, K, trial=0):\n \"\"\"A seed point generation function that puts the seed points at customer\n node point cluster centers using k-Means clustering.\"\"\"\n \n from sklearn.cluster import KMeans\n kmeans = KMeans(n_clusters=K, random_state=trial).fit(points[1:])\n return kmeans.cluster_centers_.tolist()\n \ndef _end_of_thoroughfares_seed_points(points, D, d, C, K, trial=0):\n \"\"\"A seed point generation function that automates the human assisted \n idea presented in Fisher and Jaikumar (1981) involving placing the seed \n points to the end of throughtfares leaving from the depot. A DBSCAN\n clustering is made and the seeds are selected among non-core points. Non-\n core points should be, due to the operating principle of DBSCAN, at the\n ends of long cluster \"arms\". By selecting the non-core points farthest from\n the depot and previously selected seeds, we should get a set of seed points\n closely following the Fisher and Jaikumar (1981) idea: \"customers\n often lie along radial corridors corresponding to major thoroughfares, and\n the most distant ... along these corridors are natural seed customers\".\n Fisher and Jaikumar (1981) presented the idea interactive computer systems\n in mind, whereas this implementation is automatic.\n \n TODO: in practice, the results are underwhelming. Instead, one should do\n 1d clustering for phis and then choose the farthest point of each\n \"Sweep cluster\".\n \n parameters:\n - points, D, d, C, K as before\n - trial can be used to get different clusterings from the DBSCAN algorithm.\n the DBSCAN min_size is 2,2,3,3,4,4,... for trial 0,1,2,3,4,5... .\n The inititial eps is determined by getting the median distance of the \n nn=2., 3., 2., 3., 3., 4., 3,... nearest neightbour of all nodes \n depending if the trial is 0,1,2,3,4,5,6,7.. following the formula\n \n nn=2+trial%2+int(trial/4))\n \n The seed points are selected among the non-core points S_nc by\n maximizing the squared distances . If it \n happens that |S_nc|<K, all non-core points are included and the rest\n of the seed points clustered points are \n enough non-core points are found. \n \n WARNING: This seed heuristic may return None seeds as the existence of non-\n core points cannot be guranteed.\n \"\"\"\n \n from sklearn.cluster import DBSCAN\n from util import produce_nn_list\n \n # use a heuristic to get eps that finds all 2. closest nodes and \n # uses the median distance of those as the eps\n N = len(d)\n nnD = produce_nn_list(D)\n nn = 2+trial%2+int(trial/4)\n nn2l = [nnS[nn][0] for nnS in nnD]\n nn2l.sort()\n min_size = 3#+int(trial/2)\n eps = nn2l[int(N/2)]\n\n ## Get non-core DBSCAN points \n if __debug__:\n log(DEBUG-2,\"Doing DBSCAN with eps =\", eps, \" min_size =\",min_size)\n db = DBSCAN(eps=eps, min_samples=min_size).fit(points) \n outliers_mask = db.labels_ == -1\n clustered_mask = db.labels_ != -1\n core_samples_mask = np.zeros(N, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n # we are interested of the nodes at the fringes of the clusters \n candidate_mask = clustered_mask^core_samples_mask\n candidate_idxs = np.where(candidate_mask)[0].tolist()\n candidates_type = \"cluster non-core\"\n \n if __debug__:\n log(DEBUG-3,\"DBSCAN labels = %s\"%str(zip(range(N),db.labels_)))\n log(DEBUG-3,\"DBSCAN core = %s\"%str(db.core_sample_idxs_))\n log(DEBUG-2,\"Select %d seed nodes from non-core nodes %s.\"%\n (min(len(candidate_idxs),K), str(candidate_idxs)))\n seeds = []\n selected_seeds_mask = np.zeros(N, dtype=bool)\n # make depot like a seed -> maximize distance from it\n selected_seeds_mask[0] = True \n if len(candidate_idxs)<=K:\n # if all candidates are needed, add them without checking the distances\n for seed_idx in candidate_idxs:\n seeds.append( points[seed_idx] )\n if __debug__:\n log(DEBUG-2,\"Selecting n%d (%.2f, %.2f) that is a %s point to be a seed\"%\n (seed_idx,points[seed_idx][0],points[seed_idx][1],candidates_type))\n selected_seeds_mask[seed_idx] = True\n candidate_idxs = []\n \n used_core_points = False\n while len(seeds)<K:\n if not candidate_idxs:\n if not used_core_points:\n # ran out of non-core candidates. Use clustered as candidates\n candidate_mask = core_samples_mask\n candidate_idxs = np.where(core_samples_mask)[0].tolist()\n candidates_type = \"cluster core\"\n used_core_points = True\n \n if __debug__:\n log(DEBUG-3,\"Ran out of non-core nodes, select %d seed nodes from core nodes %s\"%\n (min(len(candidate_idxs), K-len(seeds)), str(candidate_idxs)))\n else:\n candidate_mask = outliers_mask\n candidate_idxs = np.where(outliers_mask)[0].tolist()\n candidates_type = \"outliers\"\n \n if __debug__:\n log(DEBUG-3, \"Ran out of core and non-core nodes, select %d seed nodes from outlier nodes %s\"%\n (K-len(seeds), str(candidate_idxs)))\n \n # maximize the distance to other seeds and depot\n if not seeds:\n D_to_seeds = D[selected_seeds_mask,candidate_mask]\n else:\n D_to_seeds = np.sum( np.sqrt((D[selected_seeds_mask,:])[:,candidate_mask]), axis=0)\n seed_idx = candidate_idxs[np.argmax( D_to_seeds )] \n selected_seeds_mask[seed_idx] = True\n seeds.append( points[seed_idx] ) \n \n if __debug__:\n log(DEBUG-2, \"Selecting n%d (%.2f, %.2f) that is a %s point to be a seed\"%\n (seed_idx,points[seed_idx][0],points[seed_idx][1], candidates_type))\n \n # prevent selecting it again \n candidate_mask[seed_idx] = False\n candidate_idxs.remove(seed_idx)\n\n return seeds\n \ndef _large_demand_seed_points(points, D, d, C, K, trial=0):\n \"\"\"A seed point generation function that automates the human assisted \n idea presented in Fisher and Jaikumar (1981)\n \"\"\"\n # make sure we are dealing with np arrays here\n np_d = np.array(d)\n N = len(d)\n \n # we are look mainly the large d nodes where only 1 fits on a route\n can_fit_only_1_mask = np_d > (0.5*C)\n candidate_d_mask = can_fit_only_1_mask.copy()\n candidate_d_idxs = np.where(can_fit_only_1_mask)[0].tolist()\n \n if trial:\n # in addition, add as many OTHER largest d ones as trial is\n not_over_half_idxs = np.where( ~candidate_d_mask )[0].tolist()\n sorted_d = [(d[i], i) for i in not_over_half_idxs]\n sorted_d.sort(reverse=True)\n sorted_d_idxs = list(zip(*sorted_d)[1])\n additional_large_d_idxs = sorted_d_idxs[max(0, trial-N):min(N,trial)]\n candidate_d_idxs+=additional_large_d_idxs\n candidate_d_mask[additional_large_d_idxs] = True\n \n large_d_mask = np.copy(candidate_d_mask)\n \n if __debug__: \n log(DEBUG-2, \"Select %d seed nodes from large demand nodes %s\"%\n (min(len(candidate_d_idxs),K), str(candidate_d_idxs)))\n \n seeds = []\n selected_seeds_mask = np.zeros(len(d), dtype=bool)\n # make depot like a seed -> maximize distance from it\n selected_seeds_mask[0] = True\n if len(candidate_d_idxs)<=K:\n # if all candidates are needed, add them without checking the distances\n for seed_idx in candidate_d_idxs:\n seeds.append( points[seed_idx] ) \n selected_seeds_mask[seed_idx] = True \n if __debug__:\n log(DEBUG-2,\"Selecting n%d (%.2f, %.2f) that %s to be a seed\"%\\\n (seed_idx,points[seed_idx][0],points[seed_idx][1],\n \"fills over the half of the capacity\" if can_fit_only_1_mask[seed_idx]\n else \"is within \"+str(trial)+\" largest demands\"))\n candidate_d_idxs = []\n \n \n select_from_non_large = False\n while len(seeds)<K:\n if not candidate_d_idxs:\n candidate_d_mask = ~large_d_mask\n candidate_d_mask[0]=False\n candidate_d_idxs = np.where(candidate_d_mask)[0].tolist()\n select_from_non_large = True\n \n if __debug__:\n log(DEBUG-2,\"Ran out of nodes with large demand, select %d seed nodes from rest of the nodes %s using inter seed distances weighted by the node demand\"%\n (min(len(candidate_d_idxs), K-len(seeds)), str(candidate_d_idxs)))\n \n # maximize the distance to other seeds and depot\n if not seeds:\n D_to_seeds = D[selected_seeds_mask,candidate_d_mask]\n else:\n D_to_seeds = np.sum( np.sqrt((D[selected_seeds_mask,:])[:,candidate_d_mask]), axis=0)\n if select_from_non_large:\n # multiply by demand\n D_to_seeds = np.multiply(D_to_seeds,np_d[candidate_d_mask]/C)\n \n seed_idx = candidate_d_idxs[np.argmax( D_to_seeds )] \n selected_seeds_mask[seed_idx] = True\n seeds.append( points[seed_idx] ) \n \n if __debug__:\n if can_fit_only_1_mask[seed_idx]:\n candidates_type = \"fills over the half of the capacity\"\n elif large_d_mask[seed_idx]:\n candidates_type = \"is within \"+str(trial)+\" largest demands\"\n else:\n candidates_type = \"when weighted by demand has largest distance from other seeds\"\n log(DEBUG-2,\"Selecting a node n%d (%.2f, %.2f) that %s to be a seed\"%\\\n (seed_idx,points[seed_idx][0],points[seed_idx][1], candidates_type))\n \n # prevent selecting it again \n candidate_d_mask[seed_idx] = False\n candidate_d_idxs.remove(seed_idx)\n\n return seeds\n \ndef gap_init(points, D, d, C, L=None, st=None, K=None, minimize_K=True,\n find_optimal_seeds=True,\n seed_method=\"cones\",\n seed_edge_weight_type='EUC_2D',\n use_adaptive_L_constraint_weights=True,\n increase_K_on_failure=False):\n #REMOVEME, disable!\n #increase_K_on_failure=True):\n \"\"\" An implementation of a three phase cluster-first-route-second CVRP\n construction / route initialization algorithm. The first two phases involve\n the clustering. First, a seed point is generated for each route, which is\n then used in approximating customer node service costs in solving\n generalized assignment problem (GAP) relaxation of the VRP. The resulting\n assignments are then routed using a TSP solver. The algorithm has been \n first proposed in (Fisher and Jaikumar 1981).\n \n The algorithm assumes that the problem is planar and this implementation\n allows seed in two ways: \n * seed_method=\"cones\", the initialization method of Fisher and Jaikumar\n (1981) which can be described as Sweep with fractional distribution of\n customer demand and placing the seed points approximately to the center\n of demand mass of created sectors.\n * seed_method=\"kmeans\", intialize seed points to k-means cluster centers.\n * seed_method=\"large_demands\", according to Fisher and Jaikumar (1981) \n \"Customers for which d_i > 1/2 C can also be made seed customers\". \n However applying this rule relies on human operator who then decides\n the intuitively best seed points. This implementation selects the\n seed points satisfying the criteria d_i>mC, where m is the fractional\n capacity multipier, that are farthest from the depot and each other.\n The m is made iteratively smaller if there are no at least K seed point\n candidates.\n * seed_method=\"ends_of_thoroughfares\", this option was descibed in \n (Fisher and Jaikumar 1981) as \"Most distant customers at the end of\n thoroughfares leaving from the depot are natural seed customers\". They\n relied on human operator. To automate this selection we make a \n DBSCAN clustering with eps = median 2. nearest neighbor of all nodes\n and min_samples of 3. \n \n \n The other parameters are:\n * points is a list of x,y coordinates of the depot [0] and the customers.\n * D is a numpy ndarray (or equvalent) of the full 2D distance matrix.\n including the service times (st/2.0 for leaving and entering nodes).\n * d is a list of demands. d[0] should be 0.0 as it is the depot.\n * C is the capacity constraint limit for the identical vehicles.\n * L is the optional constraint for the maximum route length/duration/cost.\n * st is the service time. However, also the D should be modified with \n service times to allow straight computation of the TSP solutions (see\n above)\n * K is the optional parameter specifying the required number of vehicles.\n The algorithm is only allowed to find solutions with this many vehicles.\n * minimize_K, if set to True (default), makes the minimum number of routes\n the primary and the solution cost the secondary objective. If set False\n the algorithm optimizes for mimimum solution / route cost by increasing\n K as long as it seems beneficial. WARNING: the algorithm suits this use\n case (cost at the objective) poorly and setting this option to False may\n significantly increase the required CPU time.\n \n * find_optimal_seeds if set to True, tries all possible Sweep start\n positions / k-Means with N different seeds. If False, only one sweep \n from the node closest to the depot is done / k-Means clustering is done\n only once with one random seed value.\n * seed_edge_weight_type specifies how to round off the distances from the\n customer nodes (points) to the seed points. Supports all TSPLIB edge\n weight types.\n \n Note1: The GAP is optimized using Gurobi solver. If L constraint is set,\n the side constraints may make the GAP instance tricky to solve and it \n is advisable to set a sensible timeout with config.MAX_MIP_SOLVER_RUNTIME\n * use_adaptive_L_constraint_weights if set True, and the L constraint is \n set, the algorithm adaptively adjusts the route cost approximation of the\n relevant side constraints so that a solution which is not L infeasible or\n GAP infeasible is found. The exact handling of L consraint is vague in\n (Fisher and Jaikumar 1981) and this was our best guess on how the\n feasible region of the problem can be found. Note that if GAP solver is\n terminated due to a timeout, the adaptive multipier is increased and \n GAP solution is attempted again. However, if increase_K_on_failure is set,\n (see below) it takes priority over this.\n * increase_K_on_failure (default False) is another countermeasure against\n long running GAP solving attempts for problem instances without L \n constraint (if there is L constraint, and use_adaptive_L_constraint_-\n weights is enabled, this is ignored) or instances where K estimation \n does not work and it takes excessively long time to check all initial \n seed configurations before increasing K. If Gurobi timeout is encountered\n or the solution is GAP infeasible, and this option is enabled, the K is\n temporately increased, new seeds points generated for current sweep start\n location and another GAP solution attempt is made. K is allowed to\n increased temporarely up to 10% of the mimimum K allowed (or 1, whichever\n is larger).\n \n Note2: logger controls the debug level but running the script with\n Python -O option disables all debug output.\n \n Fisher, M. L. and Jaikumar, R. (1981), A generalized assignment heuristic\n for vehicle routing. Networks, 11: 109-124. doi:10.1002/net.3230110205\n \"\"\" #TODO: other alternatives\n # customers with maximum demand or most distant customer from origin\n \n if seed_method==\"cones\":\n seed_f = _sweep_seed_points\n if seed_method==\"kmeans\":\n seed_f = _kmeans_seed_points\n if seed_method==\"large_demands\":\n if not C: raise ValueError(\"\"\"The \"large_demands\" seed initialization method requires demands and C constraint to be known.\"\"\")\n seed_f = _large_demand_seed_points\n if seed_method==\"ends_of_thoroughfares\":\n seed_f = _end_of_thoroughfares_seed_points\n \n int_dists = issubclass(D.dtype.type, np.integer)\n if seed_edge_weight_type==\"EXPLICIT\":\n seed_edge_weight_type = \"EUC_2D\" if int_dists else \"EXACT_2D\"\n \n if not points:\n raise ValueError(\"The algorithm requires 2D coordinates for the points\")\n N = len(D) \n if K:\n startK = K\n maxK = K\n else:\n # start from the smallest K possible\n if C:\n startK = int(ceil(sum(d)/C))\n elif L:\n # find a lower bound by checking how many visits from the TSP\n # tour need to add to have any chance of making this L feasible.\n _,tsp_f = solve_tsp(D, range(1,N))\n shortest_depot_edges = list(D[0,1:])\n shortest_depot_edges.sort()\n startK = int(ceil(tsp_f/L))\n while True:\n if tsp_f+sum(shortest_depot_edges[:startK*2])<=startK*L:\n break\n startK+=1\n else:\n raise ValueError(\"If C and L have not been set, K is required\") \n maxK = N-1\n \n # We only need first row of the distance matrix to calculcate insertion \n # costs for GAP objective function\n D_0 = np.copy( D[0,:] )\n \n best_sol = None\n best_f = None\n best_K = None\n seed_trial = 0\n incK = 0\n maxKinc = max(startK+1, int(startK*INCREASE_K_ON_FAILURE_UPTO))\n \n L_ctr_multipiler = L_MPLR_DEFAULT\n if L and use_adaptive_L_constraint_weights:\n # Adaptive L constraint multipier \n L_ctr_multipiler = L_ADAPTIVE_MPLR_INIT\n L_ctr_multipiler_tries = 0\n \n try:\n for currentK in range(startK, maxK+1):\n found_improving_solution_for_this_K = False\n seed_trial=0\n while True:\n if __debug__:\n log(DEBUG, \"ITERATION:K=%d, trial=%d, L_ctr_mul=%.6f\\n\"%\n (currentK+incK,seed_trial,L_ctr_multipiler))\n log(DEBUG-1, \"Getting %d seed points...\\n\"%(currentK+incK))\n \n # Get seed points\n seed_points = seed_f(points, D, d, C, currentK+incK, seed_trial)\n if __debug__:\n log(DEBUG-1, \"...got seed points %s\\n\"%str(seed_points))\n \n # Extend the distance matrix with seed distances\n \n S = calculate_D(seed_points, points, seed_edge_weight_type)\n if st:\n # include the \"leaving half\" of the service_time in the \n # distances (the other half is already added to the D\n # prior to gapvrp_init)\n halftst = int(st/2) if int_dists else st/2.0\n S[:,1:] += halftst\n D_s = np.vstack( (D_0, S) )\n \n GAP_infeasible = False \n L_infeasible = False\n solution = [0]\n sol_f = 0\n solved = False\n sol_K = 0\n take_next_seed = False\n try:\n # Distribute the nodes to vehicles using the approxmate \n # service costs in D_s and by solving it as GAP\n #\n #TODO: the model has the same dimensions for all iterations\n # with the same K and only the weights differ. Consider\n # replacing the coefficient matrix e.g. via C interface\n #https://stackoverflow.com/questions/33461329\n assignments = _solve_gap(N, D_s, d, C, currentK+incK, L,\n L_ctr_multipiler)\n if not assignments:\n if __debug__:\n log(DEBUG, \"INFEASIBILITY: GAP infeasible solution\")\n corrective_action = \"try with another seed = %d\"%seed_trial\n GAP_infeasible = True \n else:\n if __debug__:\n log(DEBUG-1, \"Assignments = %s\"%str(assignments))\n \n # Due to floating point inaccuracies in L constrained\n # cases the feasrelax may be used, which, in turn, can\n # in some corner cases return solutions that are not\n # really feasible. Make sure it is not the case\n if L: served = set([0])\n \n for route_nodes in assignments:\n if not route_nodes:\n continue\n route,route_l = solve_tsp(D, [0]+route_nodes)\n \n # Check for feasibility violations due to feasrelax\n if L:\n served |= set(route_nodes)\n if C and d and totald(route,d)-C_EPS>C:\n if __debug__: \n log(DEBUG, \"INFEASIBILITY: feasRelax \"+\n \"caused GAP infeasible solution \"+\n \" (capacity constraint violation)\")\n GAP_infeasible = True\n break # the route loop\n \n solution += route[1:]\n sol_f += route_l\n sol_K += 1\n \n if __debug__:\n log(DEBUG-2, \"DEBUG: Got TSP solution %s (%.2f)\"%\n (str(route),route_l))\n \n if L and route_l-S_EPS>L:\n if __debug__:\n log(DEBUG, \"INFEASIBILITY: L infeasible solution\")\n L_infeasible = True\n break # break route for loop\n \n # Check for feasibility violations due to feasrelax.\n # Have all customers been served?\n if not GAP_infeasible and not L_infeasible and\\\n L and len(served)<len(D):\n if __debug__: \n log(DEBUG, \"INFEASIBILITY: feasRelax caused GAP \"+\n \"infeasible solution (all customers \"+\n \"are not served)\")\n GAP_infeasible = True \n \n if not GAP_infeasible and not L_infeasible:\n if __debug__:\n log(DEBUG, \"Yielded feasible solution = %s (%.2f)\"%(str(solution), sol_f))\n solved = True\n \n except GurobiError as grbe:\n if __debug__: log(WARNING, str(grbe))\n\n if L and use_adaptive_L_constraint_weights and \\\n L_ctr_multipiler_tries<L_ADAPTIVE_MPLR_MAX_TRIES:\n L_ctr_multipiler+=L_ADAPTIVE_MPLR_INC\n L_ctr_multipiler_tries+=1\n if __debug__: corrective_action = \"Gurobi timeout, try with another L_ctr_multipiler = %.2f\"%L_ctr_multipiler\n elif increase_K_on_failure and currentK+incK+1<=maxKinc:\n if L and use_adaptive_L_constraint_weights and\\\n L_ctr_multipiler_tries>=L_ADAPTIVE_MPLR_MAX_TRIES:\n # try with all multiplier values for larger K\n L_ctr_multipiler = L_ADAPTIVE_MPLR_INIT\n L_ctr_multipiler_tries = 0\n incK+=1\n if __debug__: corrective_action = \"Gurobi timeout, temporarely increase K by %d\"%incK\n elif find_optimal_seeds:\n take_next_seed = True\n else:\n grbe.message+=\", consider increasing the MAX_MIP_SOLVER_RUNTIME in config.py\"\n raise grbe\n else:\n if L and use_adaptive_L_constraint_weights:\n ## Adaptive GAP/L constraint multiplier reset \n # reset multiplier in case it the L feasibility was not violated\n # or it has reached the max_value. \n if solved or L_ctr_multipiler_tries>=L_ADAPTIVE_MPLR_MAX_TRIES:\n L_ctr_multipiler = L_ADAPTIVE_MPLR_INIT\n L_ctr_multipiler_tries = 0\n take_next_seed = True\n if not solved and increase_K_on_failure and currentK+incK+1<=maxKinc:\n incK+=1\n take_next_seed = False\n if __debug__: corrective_action = \"temporarely increase K by %d\"%incK\n else:\n if __debug__: corrective_action = \"try with another seed = %d\"%seed_trial\n ## Adaptive GAP/L constraint multiplier update\n else:\n L_ctr_multipiler+=L_ADAPTIVE_MPLR_INC\n L_ctr_multipiler_tries+=1\n if __debug__: corrective_action = \"try with another L_ctr_multipiler = %.2f\"%L_ctr_multipiler\n else:\n if not solved and increase_K_on_failure and currentK+incK+1<=maxKinc:\n incK+=1\n if __debug__: corrective_action = \"temporarely increase K by %d\"%incK\n else:\n take_next_seed = True\n\n\n # Store the best so far\n if solved:\n if is_better_sol(best_f, best_K, sol_f, sol_K, minimize_K):\n best_sol = solution\n best_f = sol_f\n best_K = sol_K\n found_improving_solution_for_this_K = True\n else:\n # No feasible solution was found for this trial (max route cost \n # or capacity constraint was violated). \n if __debug__:\n if GAP_infeasible or L_infeasible:\n log(DEBUG, \"Constraint is violated, \"+corrective_action)\n else:\n log(DEBUG, \"Continuing search, \"+corrective_action)\n \n if take_next_seed:\n incK = 0\n seed_trial+=1\n if not find_optimal_seeds: \n break # seed loop, possibly try next K\n if seed_trial==N:\n incK = 0\n break # seed loop, possibly try next K\n \n if minimize_K:\n # do not try different K if we found a solution\n if best_sol:\n break # K loop\n else: # not minimize_K\n # We already have an feasible solution for K<K_current, and could \n # not find a better solution than that on K_current. Therefore, it \n # is improbable we will find one even if we increase K and we\n # should stop here.\n if best_sol and not found_improving_solution_for_this_K:\n break\n except KeyboardInterrupt: #or SIGINT\n # pass on the current best_sol\n raise KeyboardInterrupt(best_sol)\n \n return best_sol \n\n# ---------------------------------------------------------------------\n# Wrapper for the command line user interface (CLI)\ndef get_gap_algorithm(seed_method=\"cones\"):\n algo_name = \"FJ81-GAP\"\n algo_desc = \"Fisher & Jaikumar (1981) generalized assignment problem heuristic\"\n def call_init(points, D, d, C, L, st, wtt, single, minimize_K):\n return gap_init(points, D, d, C, L=L, st=st,\n K=None, minimize_K=minimize_K,\n seed_edge_weight_type=wtt,\n find_optimal_seeds=(not single),\n seed_method=seed_method)\n return (algo_name, algo_desc, call_init)\n \nif __name__==\"__main__\":\n from shared_cli import cli\n cli(*get_gap_algorithm())\n"
] |
[
[
"numpy.sqrt",
"sklearn.cluster.KMeans",
"numpy.multiply",
"numpy.cos",
"sklearn.cluster.DBSCAN",
"numpy.sin",
"numpy.copy",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.vstack"
]
] |
alpiges/probnum
|
[
"2e4153cb0df559984e09ec74487ef6c9d3f6d464"
] |
[
"src/probnum/randprocs/markov/integrator/_iwp.py"
] |
[
"\"\"\"Integrated Brownian motion.\"\"\"\n\ntry:\n # cached_property is only available in Python >=3.8\n from functools import cached_property\nexcept ImportError:\n from cached_property import cached_property\n\nimport warnings\n\nimport numpy as np\nimport scipy.special\n\nfrom probnum import config, linops, randvars\nfrom probnum.randprocs.markov import _markov_process, continuous, discrete\nfrom probnum.randprocs.markov.integrator import _integrator, _preconditioner\n\n\nclass IntegratedWienerProcess(_markov_process.MarkovProcess):\n r\"\"\"Integrated Wiener process.\n\n Convenience access to :math:`\\nu` times integrated (:math:`d` dimensional) Wiener processes.\n\n Parameters\n ----------\n initarg\n Initial time point.\n num_derivatives\n Number of modelled derivatives of the integrated process (''order'', ''number of integrations'').\n Optional. Default is :math:`\\nu=1`.\n wiener_process_dimension\n Dimension of the underlying Wiener process.\n Optional. Default is :math:`d=1`.\n The dimension of the integrated Wiener process itself is :math:`d(\\nu + 1)`.\n initrv\n Law of the integrated Wiener process at the initial time point.\n Optional. Default is a :math:`d(\\nu + 1)` dimensional standard-normal distribution.\n diffuse\n Whether to instantiate a diffuse prior. A diffuse prior has large initial variances.\n Optional. Default is `False`.\n If `True`, and if an initial random variable is not passed, an initial random variable is created,\n where the initial covariance is of the form :math:`\\kappa I_{d(\\nu + 1)}`\n with :math:`\\kappa=10^6`.\n Diffuse priors are used when initial distributions are not known.\n They are common for filtering-based probabilistic ODE solvers.\n forward_implementation\n Implementation of the forward-propagation in the underlying transitions.\n Optional. Default is `classic`. `sqrt` implementation is more computationally expensive, but also more stable.\n backward_implementation\n Implementation of the backward-conditioning in the underlying transitions.\n Optional. Default is `classic`. `sqrt` implementation is more computationally expensive, but also more stable.\n\n Raises\n ------\n Warning\n If `initrv` is not None and `diffuse` is True.\n\n Examples\n --------\n >>> iwp1 = IntegratedWienerProcess(initarg=0.)\n >>> print(iwp1)\n <IntegratedWienerProcess with input_dim=1, output_dim=2, dtype=float64>\n\n >>> iwp2 = IntegratedWienerProcess(initarg=0., num_derivatives=2)\n >>> print(iwp2)\n <IntegratedWienerProcess with input_dim=1, output_dim=3, dtype=float64>\n\n >>> iwp3 = IntegratedWienerProcess(initarg=0., wiener_process_dimension=10)\n >>> print(iwp3)\n <IntegratedWienerProcess with input_dim=1, output_dim=20, dtype=float64>\n\n >>> iwp4 = IntegratedWienerProcess(initarg=0., num_derivatives=4, wiener_process_dimension=1)\n >>> print(iwp4)\n <IntegratedWienerProcess with input_dim=1, output_dim=5, dtype=float64>\n \"\"\"\n\n def __init__(\n self,\n initarg,\n num_derivatives=1,\n wiener_process_dimension=1,\n initrv=None,\n diffuse=False,\n forward_implementation=\"classic\",\n backward_implementation=\"classic\",\n ):\n iwp_transition = IntegratedWienerTransition(\n num_derivatives=num_derivatives,\n wiener_process_dimension=wiener_process_dimension,\n forward_implementation=forward_implementation,\n backward_implementation=backward_implementation,\n )\n if initrv is not None and diffuse:\n warnings.warn(\n \"Parameter `diffuse` has no effect, because an `initrv` has been provided.\"\n )\n if initrv is None:\n if diffuse:\n scale_cholesky = 1e3\n else:\n scale_cholesky = 1.0\n zeros = np.zeros(iwp_transition.state_dimension)\n cov_cholesky = scale_cholesky * np.eye(iwp_transition.state_dimension)\n initrv = randvars.Normal(\n mean=zeros, cov=cov_cholesky ** 2, cov_cholesky=cov_cholesky\n )\n\n super().__init__(transition=iwp_transition, initrv=initrv, initarg=initarg)\n\n\nclass IntegratedWienerTransition(_integrator.IntegratorTransition, continuous.LTISDE):\n \"\"\"Integrated Brownian motion in :math:`d` dimensions.\"\"\"\n\n def __init__(\n self,\n num_derivatives,\n wiener_process_dimension,\n forward_implementation=\"classic\",\n backward_implementation=\"classic\",\n ):\n # initialise BOTH superclasses' inits.\n # I don't like it either, but it does the job.\n _integrator.IntegratorTransition.__init__(\n self,\n num_derivatives=num_derivatives,\n wiener_process_dimension=wiener_process_dimension,\n )\n continuous.LTISDE.__init__(\n self,\n drift_matrix=self._drift_matrix,\n force_vector=self._force_vector,\n dispersion_matrix=self._dispersion_matrix,\n forward_implementation=forward_implementation,\n backward_implementation=backward_implementation,\n )\n\n @cached_property\n def _drift_matrix(self):\n drift_matrix_1d = np.diag(np.ones(self.num_derivatives), 1)\n if config.matrix_free:\n return linops.Kronecker(\n A=linops.Identity(self.wiener_process_dimension),\n B=linops.Matrix(A=drift_matrix_1d),\n )\n return np.kron(np.eye(self.wiener_process_dimension), drift_matrix_1d)\n\n @cached_property\n def _force_vector(self):\n return np.zeros((self.wiener_process_dimension * (self.num_derivatives + 1)))\n\n @cached_property\n def _dispersion_matrix(self):\n dispersion_matrix_1d = np.zeros(self.num_derivatives + 1)\n dispersion_matrix_1d[-1] = 1.0 # Unit diffusion\n\n if config.matrix_free:\n return linops.Kronecker(\n A=linops.Identity(self.wiener_process_dimension),\n B=linops.Matrix(A=dispersion_matrix_1d.reshape(-1, 1)),\n )\n return np.kron(np.eye(self.wiener_process_dimension), dispersion_matrix_1d).T\n\n @cached_property\n def equivalent_discretisation_preconditioned(self):\n \"\"\"Discretised IN THE PRECONDITIONED SPACE.\n\n The preconditioned state transition is the flipped Pascal matrix.\n The preconditioned process noise covariance is the flipped Hilbert matrix.\n The shift is always zero.\n\n Reference: https://arxiv.org/abs/2012.10106\n \"\"\"\n\n state_transition_1d = np.flip(\n scipy.linalg.pascal(self.num_derivatives + 1, kind=\"lower\", exact=False)\n )\n if config.matrix_free:\n state_transition = linops.Kronecker(\n A=linops.Identity(self.wiener_process_dimension),\n B=linops.aslinop(state_transition_1d),\n )\n else:\n state_transition = np.kron(\n np.eye(self.wiener_process_dimension), state_transition_1d\n )\n process_noise_1d = np.flip(scipy.linalg.hilbert(self.num_derivatives + 1))\n if config.matrix_free:\n process_noise = linops.Kronecker(\n A=linops.Identity(self.wiener_process_dimension),\n B=linops.aslinop(process_noise_1d),\n )\n else:\n process_noise = np.kron(\n np.eye(self.wiener_process_dimension), process_noise_1d\n )\n empty_shift = np.zeros(\n self.wiener_process_dimension * (self.num_derivatives + 1)\n )\n\n process_noise_cholesky_1d = np.linalg.cholesky(process_noise_1d)\n if config.matrix_free:\n process_noise_cholesky = linops.Kronecker(\n A=linops.Identity(self.wiener_process_dimension),\n B=linops.aslinop(process_noise_cholesky_1d),\n )\n else:\n process_noise_cholesky = np.kron(\n np.eye(self.wiener_process_dimension), process_noise_cholesky_1d\n )\n\n return discrete.LTIGaussian(\n state_trans_mat=state_transition,\n shift_vec=empty_shift,\n proc_noise_cov_mat=process_noise,\n proc_noise_cov_cholesky=process_noise_cholesky,\n forward_implementation=self.forward_implementation,\n backward_implementation=self.backward_implementation,\n )\n\n def forward_rv(\n self,\n rv,\n t,\n dt=None,\n compute_gain=False,\n _diffusion=1.0,\n **kwargs,\n ):\n if dt is None:\n raise ValueError(\n \"Continuous-time transitions require a time-increment ``dt``.\"\n )\n\n rv = _preconditioner.apply_precon(self.precon.inverse(dt), rv)\n rv, info = self.equivalent_discretisation_preconditioned.forward_rv(\n rv, t, compute_gain=compute_gain, _diffusion=_diffusion\n )\n\n info[\"crosscov\"] = self.precon(dt) @ info[\"crosscov\"] @ self.precon(dt).T\n if \"gain\" in info:\n info[\"gain\"] = self.precon(dt) @ info[\"gain\"] @ self.precon.inverse(dt).T\n\n return _preconditioner.apply_precon(self.precon(dt), rv), info\n\n def backward_rv(\n self,\n rv_obtained,\n rv,\n rv_forwarded=None,\n gain=None,\n t=None,\n dt=None,\n _diffusion=1.0,\n **kwargs,\n ):\n if dt is None:\n raise ValueError(\n \"Continuous-time transitions require a time-increment ``dt``.\"\n )\n\n rv_obtained = _preconditioner.apply_precon(self.precon.inverse(dt), rv_obtained)\n rv = _preconditioner.apply_precon(self.precon.inverse(dt), rv)\n rv_forwarded = (\n _preconditioner.apply_precon(self.precon.inverse(dt), rv_forwarded)\n if rv_forwarded is not None\n else None\n )\n gain = (\n self.precon.inverse(dt) @ gain @ self.precon.inverse(dt).T\n if gain is not None\n else None\n )\n\n rv, info = self.equivalent_discretisation_preconditioned.backward_rv(\n rv_obtained=rv_obtained,\n rv=rv,\n rv_forwarded=rv_forwarded,\n gain=gain,\n t=t,\n _diffusion=_diffusion,\n )\n\n return _preconditioner.apply_precon(self.precon(dt), rv), info\n\n def discretise(self, dt):\n \"\"\"Equivalent discretisation of the process.\n\n Overwrites matrix-fraction decomposition in the super-class. Only present for\n user's convenience and to maintain a clean interface. Not used for forward_rv,\n etc..\n \"\"\"\n state_trans_mat = (\n self.precon(dt)\n @ self.equivalent_discretisation_preconditioned.state_trans_mat\n @ self.precon.inverse(dt)\n )\n proc_noise_cov_mat = (\n self.precon(dt)\n @ self.equivalent_discretisation_preconditioned.proc_noise_cov_mat\n @ self.precon(dt).T\n )\n zero_shift = np.zeros(state_trans_mat.shape[0])\n\n # The Cholesky factor of the process noise covariance matrix of the IBM\n # always exists, even for non-square root implementations.\n proc_noise_cov_cholesky = (\n self.precon(dt)\n @ self.equivalent_discretisation_preconditioned.proc_noise_cov_cholesky\n )\n\n return discrete.LTIGaussian(\n state_trans_mat=state_trans_mat,\n shift_vec=zero_shift,\n proc_noise_cov_mat=proc_noise_cov_mat,\n proc_noise_cov_cholesky=proc_noise_cov_cholesky,\n forward_implementation=self.forward_implementation,\n backward_implementation=self.forward_implementation,\n )\n"
] |
[
[
"numpy.eye",
"numpy.ones",
"numpy.zeros",
"numpy.linalg.cholesky"
]
] |
qiaw99/CS231n-Convolutional-Neural-Networks-for-Visual-Recognition
|
[
"5949b4f68f04e23751879dbc679e7708735d313f"
] |
[
"assignment1/cs231n/classifiers/fc_net.py"
] |
[
"from builtins import range\nfrom builtins import object\nimport numpy as np\n\nfrom ..layers import *\nfrom ..layer_utils import *\n\n\nclass TwoLayerNet(object):\n \"\"\"\n A two-layer fully-connected neural network with ReLU nonlinearity and\n softmax loss that uses a modular layer design. We assume an input dimension\n of D, a hidden dimension of H, and perform classification over C classes.\n\n The architecure should be affine - relu - affine - softmax.\n\n Note that this class does not implement gradient descent; instead, it\n will interact with a separate Solver object that is responsible for running\n optimization.\n\n The learnable parameters of the model are stored in the dictionary\n self.params that maps parameter names to numpy arrays.\n \"\"\"\n\n def __init__(\n self,\n input_dim=3 * 32 * 32,\n hidden_dim=100,\n num_classes=10,\n weight_scale=1e-3,\n reg=0.0,\n ):\n \"\"\"\n Initialize a new network.\n\n Inputs:\n - input_dim: An integer giving the size of the input\n - hidden_dim: An integer giving the size of the hidden layer\n - num_classes: An integer giving the number of classes to classify\n - weight_scale: Scalar giving the standard deviation for random\n initialization of the weights.\n - reg: Scalar giving L2 regularization strength.\n \"\"\"\n self.params = {}\n self.reg = reg\n\n ############################################################################\n # TODO: Initialize the weights and biases of the two-layer net. Weights #\n # should be initialized from a Gaussian centered at 0.0 with #\n # standard deviation equal to weight_scale, and biases should be #\n # initialized to zero. All weights and biases should be stored in the #\n # dictionary self.params, with first layer weights #\n # and biases using the keys 'W1' and 'b1' and second layer #\n # weights and biases using the keys 'W2' and 'b2'. #\n ############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n self.params = {\"W1\": np.random.normal(0, weight_scale, size=(input_dim, hidden_dim)),\n \"b1\": np.zeros(hidden_dim),\n \"W2\": np.random.normal(0, weight_scale, size=(hidden_dim, num_classes)),\n \"b2\": np.zeros(num_classes)\n }\n \n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n def loss(self, X, y=None):\n \"\"\"\n Compute loss and gradient for a minibatch of data.\n\n Inputs:\n - X: Array of input data of shape (N, d_1, ..., d_k)\n - y: Array of labels, of shape (N,). y[i] gives the label for X[i].\n\n Returns:\n If y is None, then run a test-time forward pass of the model and return:\n - scores: Array of shape (N, C) giving classification scores, where\n scores[i, c] is the classification score for X[i] and class c.\n\n If y is not None, then run a training-time forward and backward pass and\n return a tuple of:\n - loss: Scalar value giving the loss\n - grads: Dictionary with the same keys as self.params, mapping parameter\n names to gradients of the loss with respect to those parameters.\n \"\"\"\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n relu_output, relu_cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, cache = affine_forward(relu_output, self.params['W2'], self.params['b2'])\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n loss, d_scores = softmax_loss(scores, y)\n loss += 0.5 * self.reg * np.sum(self.params['W1'] * self.params['W1']) + 0.5 * self.reg * np.sum(self.params['W2'] * self.params['W2'])\n \n dx, dw, db = affine_backward(d_scores, cache)\n grads[\"W2\"] = dw\n grads[\"b2\"] = db\n \n dx, dw, db = affine_relu_backward(dx, relu_cache)\n grads[\"W1\"] = dw\n grads[\"b1\"] = db\n \n grads[\"W1\"] += self.reg * self.params['W1']\n grads[\"W2\"] += self.reg * self.params['W2']\n \n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads\n"
] |
[
[
"numpy.random.normal",
"numpy.zeros",
"numpy.sum"
]
] |
VincentSch4rf/torchtime
|
[
"bebd006cd67b31c342e0658285c9771c27411df0"
] |
[
"torchtime/transforms/functional.py"
] |
[
"import warnings\nfrom typing import Any, List, Sequence, Tuple, Optional, Union, Set\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nimport torch.nn.functional as F\n\nfrom ..exceptions import DataConversionWarning\nfrom ..utils import _check_unknown\n\n\[email protected]\ndef _is_numpy(ts: Any) -> bool:\n return isinstance(ts, np.ndarray)\n\n\[email protected]\ndef _is_numpy_timeseries(ts: Any) -> bool:\n return _is_numpy(ts) and ts.ndim in {1, 2}\n\n\ndef pad(series: Tensor, padding: List[int], fill: int = 0, padding_mode: str = \"constant\") -> Tensor:\n if not isinstance(padding, (tuple, list)):\n raise TypeError(\"Got inappropriate padding arg\")\n if not isinstance(fill, (int, float)):\n raise TypeError(\"Got inappropriate fill arg\")\n if not isinstance(padding_mode, str):\n raise TypeError(\"Got inappropriate padding_mode arg\")\n\n if isinstance(padding, tuple):\n padding = list(padding)\n\n if isinstance(padding, list) and len(padding) not in [1, 2]:\n raise ValueError(\"Padding must be an int or a 1 or 2 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n if padding_mode not in [\"constant\", \"replicate\", \"reflect\"]:\n raise ValueError(\"Padding mode should be either constant, replicate or reflect\")\n\n out_dtype = series.dtype\n need_cast = False\n if (padding_mode != \"constant\") and series.dtype not in (torch.float32, torch.float64):\n # Temporary cast input tensor to float until pytorch issue is resolved :\n # https://github.com/pytorch/pytorch/issues/40763\n need_cast = True\n series = series.to(torch.float32)\n\n series = F.pad(series, padding, mode=padding_mode, value=float(fill))\n\n if need_cast:\n series = series.to(out_dtype)\n\n return series\n\n\ndef normalize(tensor: Tensor, mean: Sequence[float], std: Sequence[float], inplace: bool = False) -> Tensor:\n \"\"\"Normalize a float tensor time series with mean and standard deviation.\n\n .. note::\n This transform acts out of place by default, i.e., it does not mutate the input tensor.\n\n See :class:`~torchtime.transforms.Normalize` for more details.\n\n Args:\n tensor (Tensor): Float tensor time series of size (C, L) or (B, C, L) to be normalized.\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n inplace(bool,optional): Bool to make this operation inplace.\n\n Returns:\n Tensor: Normalized Tensor time series.\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))\n\n if not tensor.is_floating_point():\n raise TypeError('Input tensor should be a float tensor. Got {}.'.format(tensor.dtype))\n\n if tensor.ndim < 2:\n raise ValueError('Expected tensor to be a tensor time series of size (..., C, L). Got tensor.size() = '\n '{}.'.format(tensor.size()))\n\n if not inplace:\n tensor = tensor.clone()\n\n dtype = tensor.dtype\n mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)\n std = torch.as_tensor(std, dtype=dtype, device=tensor.device)\n if (std == 0).any():\n raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))\n if mean.ndim == 1:\n mean = mean.view(-1, 1)\n if std.ndim == 1:\n std = std.view(-1, 1)\n tensor.sub_(mean).div_(std)\n return tensor\n\n\ndef column_or_1d(y, *, warn=False) -> np.ndarray:\n \"\"\"Ravel column or 1d numpy array, else raises an error.\n Parameters\n ----------\n y : array-like\n warn : bool, default=False\n To control display of warnings.\n Returns\n -------\n y : ndarray\n \"\"\"\n y = np.asarray(y)\n shape = np.shape(y)\n if len(shape) == 1:\n return np.ravel(y)\n if len(shape) == 2 and shape[1] == 1:\n if warn:\n warnings.warn(\n \"A column-vector y was passed when a 1d array was\"\n \" expected. Please change the shape of y to \"\n \"(n_samples, ), for example using ravel().\",\n DataConversionWarning,\n stacklevel=2,\n )\n return np.ravel(y)\n\n raise ValueError(\n \"y should be a 1d array, got an array of shape {} instead.\".format(shape)\n )\n\n\ndef encode_labels(targets: List[Any], classes: Optional[List[Any]] = None) -> Tuple[List[Any], Tensor]:\n if classes is None:\n classes = set(targets)\n diff = _check_unknown(targets, classes)\n if diff:\n raise ValueError(f\"y contains previously unseen labels: {str(diff)}\")\n table = {val: i for i, val in enumerate(classes)}\n return classes, torch.as_tensor([table[v] for v in targets])\n # r = np.searchsorted(classes, targets)\n # return classes, torch.as_tensor(r)\n"
] |
[
[
"numpy.asarray",
"numpy.ravel",
"numpy.shape",
"torch.as_tensor"
]
] |
neuronalX/reservoirpy
|
[
"37751e9a6be76298e1c14b3816f191f351bfb606"
] |
[
"reservoirpy/_base.py"
] |
[
"# Author: Nathan Trouvain at 15/02/2022 <[email protected]>\n# Licence: MIT License\n# Copyright: Xavier Hinaut (2018) <[email protected]>\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Iterator, Sequence, Union\nfrom uuid import uuid4\n\nimport numpy as np\n\nfrom .type import MappedData, Shape\nfrom .utils import progress\nfrom .utils.validation import check_vector, is_mapping\n\n\ndef _distant_model_inputs(model):\n \"\"\"Get inputs for distant Nodes in a Model used as feedabck or teacher.\n These inputs should be already computed by other Nodes.\"\"\"\n input_data = {}\n for p, c in model.edges:\n if p in model.input_nodes:\n input_data[c.name] = p.state_proxy()\n return input_data\n\n\ndef _remove_input_for_feedback(node) -> Union[\"Node\", \"Model\"]:\n \"\"\"Remove inputs nodes from feedback Model and gather remaining nodes\n into a new Model. Allow getting inputs for feedback model from its input\n nodes states.\"\"\"\n from .model import Model\n\n all_nodes = set(node.nodes)\n input_nodes = set(node.input_nodes)\n filtered_nodes = list(all_nodes - input_nodes)\n filtered_edges = [edge for edge in node.edges if edge[0] not in input_nodes]\n\n # return a single Node if Model - Inputs = Node\n # else return Model - Inputs = Reduced Model\n if len(filtered_nodes) == 1:\n return list(filtered_nodes)[0]\n return Model(filtered_nodes, filtered_edges, name=str(uuid4()))\n\n\ndef check_one_sequence(\n x: Union[np.ndarray, Sequence[np.ndarray]],\n expected_dim=None,\n caller=None,\n allow_timespans=True,\n):\n\n caller_name = caller.name + \"is \" if caller is not None else \"\"\n\n if expected_dim is not None and not hasattr(expected_dim, \"__iter__\"):\n expected_dim = (expected_dim,)\n\n x_new = check_vector(\n x, allow_reshape=True, allow_timespans=allow_timespans, caller=caller\n )\n data_dim = x_new.shape[1:]\n\n # Check x dimension\n if expected_dim is not None:\n if len(expected_dim) != len(data_dim):\n raise ValueError(\n f\"{caller_name} expecting {len(expected_dim)} inputs \"\n f\"but received {len(data_dim)}: {x_new}.\"\n )\n for dim in expected_dim:\n if all([dim != ddim for ddim in data_dim]):\n raise ValueError(\n f\"{caller_name} expecting data of shape \"\n f\"{expected_dim} but received shape {data_dim}.\"\n )\n return x_new\n\n\n# expected_dim = ((m, n), o, (p, q, r), ...)\ndef check_n_sequences(\n x,\n expected_dim=None,\n allow_n_sequences=True,\n allow_n_inputs=True,\n allow_timespans=True,\n caller=None,\n):\n if expected_dim is not None:\n if not hasattr(expected_dim, \"__iter__\"):\n expected_dim = (expected_dim,)\n n_inputs = len(expected_dim)\n\n # I\n if n_inputs > 1:\n if isinstance(x, (list, tuple)):\n x_new = [x[i] for i in range(len(x))]\n timesteps = []\n for i in range(n_inputs):\n dim = (expected_dim[i],)\n x_new[i] = check_n_sequences(\n x[i],\n expected_dim=dim,\n caller=caller,\n allow_n_sequences=allow_n_sequences,\n allow_timespans=allow_timespans,\n allow_n_inputs=allow_n_inputs,\n )\n if isinstance(x_new[i], (list, tuple)):\n timesteps.append(tuple([x_.shape[0] for x_ in x_new[i]]))\n else:\n dim = dim[0]\n if not hasattr(dim, \"__len__\"):\n dim = (dim,)\n if len(dim) + 2 > len(x_new[i].shape) >= len(dim) + 1:\n timesteps.append((x_new[i].shape[0],))\n else:\n timesteps.append((x_new[i].shape[1],))\n\n if len(np.unique([len(t) for t in timesteps])) > 1 or any(\n [\n len(np.unique([t[i] for t in timesteps])) > 1\n for i in range(len(timesteps[0]))\n ]\n ):\n raise ValueError(\"Inputs with different timesteps\")\n else:\n raise ValueError(\"Expecting several inputs.\")\n else: # L\n dim = expected_dim[0]\n if not hasattr(dim, \"__len__\"):\n dim = (dim,)\n\n if isinstance(x, (list, tuple)):\n if not allow_n_sequences:\n raise TypeError(\"No lists, only arrays.\")\n x_new = [x[i] for i in range(len(x))]\n for i in range(len(x)):\n x_new[i] = check_one_sequence(\n x[i],\n allow_timespans=allow_timespans,\n expected_dim=dim,\n caller=caller,\n )\n else:\n if len(x.shape) <= len(dim) + 1: # only one sequence\n x_new = check_one_sequence(\n x,\n expected_dim=dim,\n allow_timespans=allow_timespans,\n caller=caller,\n )\n elif len(x.shape) == len(dim) + 2: # several sequences\n if not allow_n_sequences:\n raise TypeError(\"No lists, only arrays.\")\n x_new = x\n for i in range(len(x)):\n x_new[i] = check_one_sequence(\n x[i],\n allow_timespans=allow_timespans,\n expected_dim=dim,\n caller=caller,\n )\n else: # pragma: no cover\n x_new = check_vector(\n x,\n allow_reshape=True,\n allow_timespans=allow_timespans,\n caller=caller,\n )\n else:\n if isinstance(x, (list, tuple)):\n x_new = [x[i] for i in range(len(x))]\n for i in range(len(x)):\n if allow_n_inputs:\n x_new[i] = check_n_sequences(\n x[i],\n allow_n_sequences=allow_n_sequences,\n allow_timespans=allow_timespans,\n allow_n_inputs=False,\n caller=caller,\n )\n elif allow_n_sequences:\n x_new[i] = check_n_sequences(\n x[i],\n allow_n_sequences=False,\n allow_timespans=allow_timespans,\n allow_n_inputs=False,\n caller=caller,\n )\n else:\n raise ValueError(\"No lists, only arrays.\")\n else:\n x_new = check_one_sequence(\n x, allow_timespans=allow_timespans, caller=caller\n )\n\n return x_new\n\n\ndef _check_node_io(\n x,\n receiver_nodes=None,\n expected_dim=None,\n caller=None,\n io_type=\"input\",\n allow_n_sequences=True,\n allow_n_inputs=True,\n allow_timespans=True,\n):\n\n noteacher_msg = f\"Nodes can not be used as {io_type}\" + \" for {}.\"\n notonline_msg = \"{} is not trained online.\"\n\n x_new = None\n # Caller is a Model\n if receiver_nodes is not None:\n if not is_mapping(x):\n x_new = {n.name: x for n in receiver_nodes}\n else:\n x_new = x.copy()\n\n for node in receiver_nodes:\n if node.name not in x_new:\n # Maybe don't fit nodes a second time\n if io_type == \"target\" and node.fitted:\n continue\n else:\n raise ValueError(f\"Missing {io_type} data for node {node.name}.\")\n\n if (\n callable(x_new[node.name])\n and hasattr(x_new[node.name], \"initialize\")\n and hasattr(x_new[node.name], \"is_initialized\")\n and hasattr(x_new[node.name], \"output_dim\")\n ):\n if io_type == \"target\":\n if node.is_trained_online:\n register_teacher(\n node,\n x_new.pop(node.name),\n expected_dim=node.output_dim,\n )\n else:\n raise TypeError(\n (noteacher_msg + notonline_msg).format(node.name, node.name)\n )\n else:\n raise TypeError(noteacher_msg.format(node.name))\n else:\n if io_type == \"target\":\n dim = node.output_dim\n else:\n dim = node.input_dim\n\n x_new[node.name] = check_n_sequences(\n x_new[node.name],\n expected_dim=dim,\n caller=node,\n allow_n_sequences=allow_n_sequences,\n allow_n_inputs=allow_n_inputs,\n allow_timespans=allow_timespans,\n )\n # Caller is a Node\n else:\n if (\n callable(x)\n and hasattr(x, \"initialize\")\n and hasattr(x, \"is_initialized\")\n and hasattr(x, \"output_dim\")\n ):\n if io_type == \"target\":\n if caller.is_trained_online:\n register_teacher(\n caller,\n x,\n expected_dim=expected_dim,\n )\n else:\n raise TypeError(\n (noteacher_msg + notonline_msg).format(caller.name, caller.name)\n )\n else:\n raise TypeError(noteacher_msg.format(caller.name))\n else:\n x_new = check_n_sequences(\n x,\n expected_dim=expected_dim,\n caller=caller,\n allow_n_sequences=allow_n_sequences,\n allow_n_inputs=allow_n_inputs,\n allow_timespans=allow_timespans,\n )\n\n # All x are teacher nodes, no data to return\n if is_mapping(x_new) and io_type == \"target\" and len(x_new) == 0:\n return None\n\n return x_new\n\n\ndef register_teacher(caller, teacher, expected_dim=None):\n\n target_dim = None\n if teacher.is_initialized:\n target_dim = teacher.output_dim\n\n if (\n expected_dim is not None\n and target_dim is not None\n and expected_dim != target_dim\n ):\n raise ValueError()\n\n caller._teacher = DistantFeedback(\n sender=teacher, receiver=caller, callback_type=\"teacher\"\n )\n\n\ndef check_xy(\n caller,\n x,\n y=None,\n input_dim=None,\n output_dim=None,\n allow_n_sequences=True,\n allow_n_inputs=True,\n allow_timespans=True,\n):\n \"\"\"Prepare one step of input and target data for a Node or a Model.\n\n Preparation may include:\n - reshaping data to ([inputs], [sequences], timesteps, features);\n - converting non-array objects to array objects;\n - checking if n_features is equal to node input or output dimension.\n\n This works on numerical data and teacher nodes.\n\n Parameters\n ----------\n caller: Node or Model\n Node or Model requesting inputs/targets preparation.\n x : array-like of shape ([inputs], [sequences], timesteps, features)\n Input array or sequence of input arrays containing a single timestep of\n data.\n y : array-like of shape ([sequences], timesteps, features) or Node, optional\n Target array containing a single timestep of data, or teacher Node or\n Model\n yielding target values.\n input_dim, output_dim : int or tuple of ints, optional\n Expected input and target dimensions, if available.\n\n Returns\n -------\n array-like of shape ([inputs], 1, n), array-like of shape (1, n) or Node\n Processed input and target vectors.\n \"\"\"\n\n if input_dim is None and hasattr(caller, \"input_dim\"):\n input_dim = caller.input_dim\n\n # caller is a Model\n if hasattr(caller, \"input_nodes\"):\n input_nodes = caller.input_nodes\n # caller is a Node\n else:\n input_nodes = None\n\n x_new = _check_node_io(\n x,\n receiver_nodes=input_nodes,\n expected_dim=input_dim,\n caller=caller,\n io_type=\"input\",\n allow_n_sequences=allow_n_sequences,\n allow_n_inputs=allow_n_inputs,\n allow_timespans=allow_timespans,\n )\n\n y_new = y\n if y is not None:\n # caller is a Model\n if hasattr(caller, \"trainable_nodes\"):\n output_dim = None\n trainable_nodes = caller.trainable_nodes\n\n # caller is a Node\n else:\n trainable_nodes = None\n if output_dim is None and hasattr(caller, \"output_dim\"):\n output_dim = caller.output_dim\n\n y_new = _check_node_io(\n y,\n receiver_nodes=trainable_nodes,\n expected_dim=output_dim,\n caller=caller,\n io_type=\"target\",\n allow_n_sequences=allow_n_sequences,\n allow_timespans=allow_timespans,\n allow_n_inputs=False,\n )\n\n return x_new, y_new\n\n\nclass DistantFeedback:\n def __init__(self, sender, receiver, callback_type=\"feedback\"):\n self._sender = sender\n self._receiver = receiver\n self._callback_type = callback_type\n\n # used to store a reduced version of the feedback if needed\n # when feedback is a Model (inputs of the feedback Model are suppressed\n # in the reduced version, as we do not need then to re-run them\n # because we assume they have already run during the forward call)\n self._reduced_sender = None\n\n self._clamped = False\n self._clamped_value = None\n\n def __call__(self):\n if not self.is_initialized:\n self.initialize()\n return self.call_distant_node()\n\n @property\n def is_initialized(self):\n return self._sender.is_initialized\n\n @property\n def output_dim(self):\n return self._sender.output_dim\n\n @property\n def name(self):\n return self._sender.name\n\n def call_distant_node(self):\n \"\"\"Call a distant Model for feedback or teaching\n (no need to run the input nodes again)\"\"\"\n if self._clamped:\n self._clamped = False\n return self._clamped_value\n\n if self._reduced_sender is not None:\n if len(np.unique([n._fb_flag for n in self._sender.nodes])) > 1:\n input_data = _distant_model_inputs(self._sender)\n\n if hasattr(self._reduced_sender, \"nodes\"):\n return self._reduced_sender.call(input_data)\n else:\n reduced_name = self._reduced_sender.name\n return self._reduced_sender.call(input_data[reduced_name])\n else:\n fb_outputs = [n.state() for n in self._sender.output_nodes]\n if len(fb_outputs) > 1:\n return fb_outputs\n else:\n return fb_outputs[0]\n else:\n return self._sender.state_proxy()\n\n def initialize(self):\n \"\"\"Initialize a distant Model or Node (used as feedback sender or teacher).\"\"\"\n msg = f\"Impossible to get {self._callback_type} \"\n msg += \"from {} for {}: {} is not initialized or has no input/output_dim\"\n\n reduced_model = None\n if hasattr(self._sender, \"input_nodes\"):\n for n in self._sender.input_nodes:\n if not n.is_initialized:\n try:\n n.initialize()\n except RuntimeError:\n raise RuntimeError(\n msg.format(\n self._sender.name,\n self._receiver.name,\n self._sender.name,\n )\n )\n\n input_data = _distant_model_inputs(self._sender)\n reduced_model = _remove_input_for_feedback(self._sender)\n\n if not reduced_model.is_initialized:\n if hasattr(reduced_model, \"nodes\"):\n reduced_model.initialize(x=input_data)\n else:\n reduced_name = reduced_model.name\n reduced_model.initialize(x=input_data[reduced_name])\n self._sender._is_initialized = True\n else:\n try:\n self._sender.initialize()\n except RuntimeError: # raise more specific error\n raise RuntimeError(\n msg.format(\n self._sender.name, self._receiver.name, self._sender.name\n )\n )\n\n self._reduced_sender = reduced_model\n\n def zero_feedback(self):\n \"\"\"A null feedback vector. Returns None if the Node receives\n no feedback.\"\"\"\n if hasattr(self._sender, \"output_nodes\"):\n zeros = []\n for output in self._sender.output_nodes:\n zeros.append(output.zero_state())\n if len(zeros) == 1:\n return zeros[0]\n else:\n return zeros\n else:\n return self._sender.zero_state()\n\n def clamp(self, value):\n self._clamped_value = check_n_sequences(\n value,\n expected_dim=self._sender.output_dim,\n caller=self._sender,\n allow_n_sequences=False,\n )\n self._clamped = True\n\n\ndef call(node, x, from_state=None, stateful=True, reset=False):\n \"\"\"One-step call, without input check.\"\"\"\n with node.with_state(from_state, stateful=stateful, reset=reset):\n state = node._forward(node, x)\n node._state = state.astype(node.dtype)\n node._flag_feedback()\n\n return state\n\n\ndef train(\n node,\n X,\n Y=None,\n call_node=True,\n force_teachers=True,\n learn_every=1,\n from_state=None,\n stateful=True,\n reset=False,\n):\n\n seq_len = X.shape[0]\n seq = (\n progress(range(seq_len), f\"Training {node.name}\")\n if seq_len > 1\n else range(seq_len)\n )\n\n with node.with_state(from_state, stateful=stateful, reset=reset):\n states = np.zeros((seq_len, node.output_dim))\n for i in seq:\n x = np.atleast_2d(X[i, :])\n\n y = None\n if node._teacher is not None:\n y = node._teacher()\n elif Y is not None:\n y = np.atleast_2d(Y[i, :])\n\n if call_node:\n s = call(node, x)\n else:\n s = node.state()\n\n if force_teachers:\n node.set_state_proxy(y)\n\n if i % learn_every == 0 or seq_len == 1:\n node._train(node, x=x, y=y)\n\n states[i, :] = s\n\n return states\n\n\nclass _Node(ABC):\n \"\"\"Node base class for type checking and interface inheritance.\"\"\"\n\n _factory_id = -1\n _registry = list()\n _name: str\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls._factory_id = -1\n cls._registry = list()\n\n def __repr__(self):\n klas = type(self).__name__\n hypers = [(str(k), str(v)) for k, v in self._hypers.items()]\n all_params = [\"=\".join((k, v)) for k, v in hypers]\n all_params += [f\"in={self.input_dim}\", f\"out={self.output_dim}\"]\n return f\"'{self.name}': {klas}(\" + \", \".join(all_params) + \")\"\n\n def __setstate__(self, state):\n curr_name = state.get(\"name\")\n if curr_name in type(self)._registry:\n new_name = curr_name + \"-(copy)\"\n state[\"name\"] = new_name\n self.__dict__ = state\n\n def __del__(self):\n try:\n type(self)._registry.remove(self._name)\n except (ValueError, AttributeError):\n pass\n\n def __getattr__(self, item):\n if item in [\"_params\", \"_hypers\"]:\n raise AttributeError()\n if item in self._params:\n return self._params.get(item)\n elif item in self._hypers:\n return self._hypers.get(item)\n else:\n raise AttributeError(f\"{self.name} has no attribute '{str(item)}'\")\n\n def __call__(self, *args, **kwargs) -> np.ndarray:\n return self.call(*args, **kwargs)\n\n def __rshift__(self, other: Union[\"_Node\", Sequence[\"_Node\"]]) -> \"Model\":\n from .ops import link\n\n return link(self, other)\n\n def __rrshift__(self, other: Union[\"_Node\", Sequence[\"_Node\"]]) -> \"Model\":\n from .ops import link\n\n return link(other, self)\n\n def __and__(self, other: Union[\"_Node\", Sequence[\"_Node\"]]) -> \"Model\":\n from .ops import merge\n\n return merge(self, other)\n\n def _get_name(self, name=None):\n if name is None:\n type(self)._factory_id += 1\n _id = self._factory_id\n name = f\"{type(self).__name__}-{_id}\"\n\n if name in type(self)._registry:\n raise NameError(\n f\"Name '{name}' is already taken \"\n f\"by another node. Node names should \"\n f\"be unique.\"\n )\n\n type(self)._registry.append(name)\n return name\n\n @property\n def name(self) -> str:\n \"\"\"Name of the Node or Model.\"\"\"\n return self._name\n\n @name.setter\n def name(self, value):\n type(self)._registry.remove(self.name)\n self._name = self._get_name(value)\n\n @property\n def params(self) -> Dict[str, Any]:\n \"\"\"Parameters of the Node or Model.\"\"\"\n return self._params\n\n @property\n def hypers(self) -> Dict[str, Any]:\n \"\"\"Hyperparameters of the Node or Model.\"\"\"\n return self._hypers\n\n @property\n def is_initialized(self) -> bool:\n return self._is_initialized\n\n @property\n @abstractmethod\n def input_dim(self) -> Shape:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def output_dim(self) -> Shape:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def is_trained_offline(self) -> bool:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def is_trained_online(self) -> bool:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def is_trainable(self) -> bool:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def fitted(self) -> bool:\n raise NotImplementedError()\n\n @is_trainable.setter\n @abstractmethod\n def is_trainable(self, value: bool):\n raise NotImplementedError()\n\n def get_param(self, name: str) -> Any:\n if name in self._params:\n return self._params.get(name)\n elif name in self._hypers:\n return self._hypers.get(name)\n else:\n raise NameError(f\"No parameter named '{name}' found in node {self}\")\n\n @abstractmethod\n def copy(\n self, name: str = None, copy_feedback: bool = False, shallow: bool = False\n ) -> \"_Node\":\n raise NotImplementedError()\n\n @abstractmethod\n def initialize(self, x: MappedData = None, y: MappedData = None):\n raise NotImplementedError()\n\n @abstractmethod\n def reset(self, to_state: np.ndarray = None) -> \"_Node\":\n raise NotImplementedError()\n\n @contextmanager\n @abstractmethod\n def with_state(self, state=None, stateful=False, reset=False) -> Iterator[\"_Node\"]:\n raise NotImplementedError()\n\n @contextmanager\n @abstractmethod\n def with_feedback(\n self, feedback=None, stateful=False, reset=False\n ) -> Iterator[\"_Node\"]:\n raise NotImplementedError()\n"
] |
[
[
"numpy.atleast_2d",
"numpy.zeros",
"numpy.unique"
]
] |
jaeikjeon9919/uncertainty-baselines
|
[
"15aad70bb585452d84c0afa74208338f5db5f70e"
] |
[
"experimental/language_structure/psl/psl_model_multiwoz_test.py"
] |
[
"# coding=utf-8\n# Copyright 2021 The Uncertainty Baselines Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for MultiWoz rules.\"\"\"\n\nimport tensorflow as tf\nimport constrained_evaluation as eval_model # local file import from experimental.language_structure.psl\nimport data # local file import from experimental.language_structure.psl\nimport psl_model_multiwoz as model # local file import from experimental.language_structure.psl\nimport psl_model_multiwoz_test_util as test_util # local file import from experimental.language_structure.psl\n\n\nclass PslRulesTest(tf.test.TestCase):\n\n def setUp(self):\n super(PslRulesTest, self).setUp()\n self.config = test_util.TEST_MULTIWOZ_CONFIG\n self.data = test_util.DATA\n\n tf.random.set_seed(self.config['default_seed'])\n\n train_dialogs = data.add_features(\n self.data['train_data'],\n vocab_mapping=self.data['vocab_mapping'],\n accept_words=self.config['accept_words'],\n cancel_words=self.config['cancel_words'],\n end_words=self.config['end_words'],\n greet_words=self.config['greet_words'],\n info_question_words=self.config['info_question_words'],\n insist_words=self.config['insist_words'],\n slot_question_words=self.config['slot_question_words'],\n includes_word=self.config['includes_word'],\n excludes_word=self.config['excludes_word'],\n accept_index=self.config['accept_index'],\n cancel_index=self.config['cancel_index'],\n end_index=self.config['end_index'],\n greet_index=self.config['greet_index'],\n info_question_index=self.config['info_question_index'],\n insist_index=self.config['insist_index'],\n slot_question_index=self.config['slot_question_index'],\n utterance_mask=self.config['utterance_mask'],\n pad_utterance_mask=self.config['pad_utterance_mask'],\n last_utterance_mask=self.config['last_utterance_mask'],\n mask_index=self.config['mask_index'])\n train_data = data.pad_dialogs(train_dialogs, self.config['max_dialog_size'],\n self.config['max_utterance_size'])\n raw_train_labels = data.one_hot_string_encoding(self.data['train_labels'],\n self.config['class_map'])\n train_labels = data.pad_one_hot_labels(raw_train_labels,\n self.config['max_dialog_size'],\n self.config['class_map'])\n self.train_ds = data.list_to_dataset(train_data[0], train_labels[0],\n self.config['shuffle_train'],\n self.config['batch_size'])\n\n test_dialogs = data.add_features(\n self.data['test_data'],\n vocab_mapping=self.data['vocab_mapping'],\n accept_words=self.config['accept_words'],\n cancel_words=self.config['cancel_words'],\n end_words=self.config['end_words'],\n greet_words=self.config['greet_words'],\n info_question_words=self.config['info_question_words'],\n insist_words=self.config['insist_words'],\n slot_question_words=self.config['slot_question_words'],\n includes_word=self.config['includes_word'],\n excludes_word=self.config['excludes_word'],\n accept_index=self.config['accept_index'],\n cancel_index=self.config['cancel_index'],\n end_index=self.config['end_index'],\n greet_index=self.config['greet_index'],\n info_question_index=self.config['info_question_index'],\n insist_index=self.config['insist_index'],\n slot_question_index=self.config['slot_question_index'],\n utterance_mask=self.config['utterance_mask'],\n pad_utterance_mask=self.config['pad_utterance_mask'],\n last_utterance_mask=self.config['last_utterance_mask'],\n mask_index=self.config['mask_index'])\n test_data = data.pad_dialogs(test_dialogs, self.config['max_dialog_size'],\n self.config['max_utterance_size'])\n raw_test_labels = data.one_hot_string_encoding(self.data['test_labels'],\n self.config['class_map'])\n self.test_labels = data.pad_one_hot_labels(raw_test_labels,\n self.config['max_dialog_size'],\n self.config['class_map'])\n self.test_ds = data.list_to_dataset(test_data[0], self.test_labels[0],\n self.config['shuffle_test'],\n self.config['batch_size'])\n\n def check_greet(self, predictions, mask, class_map):\n for dialog_pred, dialog_mask in zip(predictions, mask):\n first = True\n for utterance_pred, utterance_mask in zip(dialog_pred, dialog_mask):\n if first or utterance_mask == 0:\n first = False\n continue\n if utterance_pred == class_map['greet']:\n return False\n\n return True\n\n def test_psl_rule_1_run_model(self):\n rule_weights = (1.0,)\n rule_names = ('rule_1',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n\n constrained_model = test_util.build_constrained_model(\n [self.config['max_dialog_size'], self.config['max_utterance_size']])\n constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])\n\n logits = eval_model.evaluate_constrained_model(constrained_model,\n self.test_ds,\n psl_constraints)\n predictions = tf.math.argmax(logits[0], axis=-1)\n result = self.check_greet(predictions, self.test_labels[1],\n self.config['class_map'])\n self.assertTrue(result)\n\n def test_psl_rule_1(self):\n rule_weights = (1.0,)\n rule_names = ('rule_1',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_1(logits=tf.constant(logits))\n self.assertEqual(loss, 1.4)\n\n def test_psl_rule_2_run_model(self):\n rule_weights = (10.0,)\n rule_names = ('rule_2',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n\n constrained_model = test_util.build_constrained_model(\n [self.config['max_dialog_size'], self.config['max_utterance_size']])\n constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])\n\n logits = eval_model.evaluate_constrained_model(constrained_model,\n self.test_ds,\n psl_constraints)\n predictions = tf.math.argmax(logits[0], axis=-1)\n self.assertEqual(predictions[2][0], self.config['class_map']['greet'])\n self.assertEqual(predictions[3][0], self.config['class_map']['greet'])\n\n def test_psl_rule_2(self):\n rule_weights = (1.0,)\n rule_names = ('rule_2',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_2(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertEqual(loss, 0.6)\n\n def test_psl_rule_3_run_model(self):\n rule_weights = (1.0,)\n rule_names = ('rule_3',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n\n constrained_model = test_util.build_constrained_model(\n [self.config['max_dialog_size'], self.config['max_utterance_size']])\n constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])\n\n logits = eval_model.evaluate_constrained_model(constrained_model,\n self.test_ds,\n psl_constraints)\n predictions = tf.math.argmax(logits[0], axis=-1)\n self.assertEqual(predictions[0][0],\n self.config['class_map']['init_request'])\n self.assertEqual(predictions[1][0],\n self.config['class_map']['init_request'])\n\n def test_psl_rule_3(self):\n rule_weights = (1.0,)\n rule_names = ('rule_3',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_3(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertEqual(loss, 0.8)\n\n def test_psl_rule_4_run_model(self):\n rule_weights = (1.0,)\n rule_names = ('rule_4',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n\n constrained_model = test_util.build_constrained_model(\n [self.config['max_dialog_size'], self.config['max_utterance_size']])\n constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])\n\n logits = eval_model.evaluate_constrained_model(constrained_model,\n self.test_ds,\n psl_constraints)\n predictions = tf.math.argmax(logits[0], axis=-1)\n self.assertEqual(predictions[1][1],\n self.config['class_map']['second_request'])\n self.assertEqual(predictions[2][1],\n self.config['class_map']['second_request'])\n\n def test_psl_rule_4(self):\n rule_weights = (1.0,)\n rule_names = ('rule_4',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_4(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertNear(loss, 1.8, err=1e-6)\n\n def test_psl_rule_5_run_model(self):\n rule_weights = (1.0,)\n rule_names = ('rule_5',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n\n constrained_model = test_util.build_constrained_model(\n [self.config['max_dialog_size'], self.config['max_utterance_size']])\n constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])\n\n logits = eval_model.evaluate_constrained_model(constrained_model,\n self.test_ds,\n psl_constraints)\n predictions = tf.math.argmax(logits[0], axis=-1)\n self.assertNotEqual(predictions[1][1],\n self.config['class_map']['init_request'])\n self.assertNotEqual(predictions[2][1],\n self.config['class_map']['init_request'])\n\n def test_psl_rule_5(self):\n rule_weights = (1.0,)\n rule_names = ('rule_5',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_5(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertNear(loss, 1.4, err=1e-6)\n\n def test_psl_rule_6_run_model(self):\n rule_weights = (1.0,)\n rule_names = ('rule_6',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n\n constrained_model = test_util.build_constrained_model(\n [self.config['max_dialog_size'], self.config['max_utterance_size']])\n constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])\n\n logits = eval_model.evaluate_constrained_model(constrained_model,\n self.test_ds,\n psl_constraints)\n predictions = tf.math.argmax(logits[0], axis=-1)\n self.assertNotEqual(predictions[1][0], self.config['class_map']['greet'])\n self.assertNotEqual(predictions[2][0], self.config['class_map']['greet'])\n\n def test_psl_rule_6(self):\n rule_weights = (1.0,)\n rule_names = ('rule_6',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_6(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertNear(loss, 1.4, err=1e-6)\n\n def test_psl_rule_7_run_model(self):\n rule_weights = (1.0,)\n rule_names = ('rule_7',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n\n constrained_model = test_util.build_constrained_model(\n [self.config['max_dialog_size'], self.config['max_utterance_size']])\n constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])\n\n logits = eval_model.evaluate_constrained_model(constrained_model,\n self.test_ds,\n psl_constraints)\n predictions = tf.math.argmax(logits[0], axis=-1)\n self.assertEqual(predictions[1][2], self.config['class_map']['end'])\n self.assertEqual(predictions[2][3], self.config['class_map']['end'])\n\n def test_psl_rule_7(self):\n rule_weights = (1.0,)\n rule_names = ('rule_7',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_7(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertNear(loss, 1.1, err=1e-6)\n\n def test_psl_rule_8(self):\n rule_weights = (1.0,)\n rule_names = ('rule_8',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_8(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertNear(loss, 0.9, err=1e-6)\n\n def test_psl_rule_9(self):\n rule_weights = (1.0,)\n rule_names = ('rule_9',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_9(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertNear(loss, 0.8, err=1e-6)\n\n def test_psl_rule_10(self):\n rule_weights = (1.0,)\n rule_names = ('rule_10',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_10(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertNear(loss, 0.3, err=1e-6)\n\n def test_psl_rule_11(self):\n rule_weights = (1.0,)\n rule_names = ('rule_11',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_11(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertNear(loss, 0.7, err=1e-6)\n\n def test_psl_rule_12(self):\n rule_weights = (1.0,)\n rule_names = ('rule_12',)\n psl_constraints = model.PSLModelMultiWoZ(\n rule_weights, rule_names, config=self.config)\n logits = test_util.LOGITS\n\n loss = psl_constraints.rule_12(\n logits=tf.constant(logits), data=test_util.FEATURES)\n self.assertNear(loss, 0.1, err=1e-6)\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.math.argmax",
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.random.set_seed"
]
] |
ebruyasar11/Duelist-Algorithm
|
[
"d740e0a26a33a230434441b5bcf67b1e7a7c5fd2"
] |
[
"Duelist_Algorithm.py"
] |
[
"import numpy as np\nimport random\nimport time\nimport matplotlib.pyplot as mp\n\nclass Duelist_Algorithm():\n\tdef __init__(self,f,x,altdeger,ustdeger,pop=200,sans=0.01,mutasyon=0.1,ogren=0.8,iterasyon=500,nc=5,karistir=False):\n\t\t#Sınıf değişkenlerinin tanımlamaları\n\t\tself.f = f\n\t\tself.x = x\n\t\tself.altdeger = altdeger\n\t\tself.ustdeger = ustdeger\n\t\tself.populasyon = pop\n\t\tself.sans = sans\n\t\tself.mutasyon = mutasyon\n\t\tself.ogren = ogren\n\t\tself.max_iterasyon = iterasyon\n\t\tself.nc = nc\n\t\tself.sampiyon = np.empty((x.__len__()+1,nc),dtype=np.float64)\n\t\tself.kazan_kaybet = np.empty((pop,1),dtype=np.float64)\n\t\tself.savas_puani = np.empty((pop,1),dtype=np.float64)\n\t\tself.iterasyon = 0\n\t\tself.karistir = karistir\n\t\tself.x_dizi = []\n\t\tself.y_dizi = []\n\t\tself.fmin = []\n\t\t#Çok değişkenli optimizasyonun yapılıp yapılmayacağının kontrolünü yapar \n\t\tif type(x) is list:\n\t\t\tself.mult=1\n\t\t\tassert x.__len__()==altdeger.__len__()==ustdeger.__len__() , \"Sinir hatasi, lutfen altdeger ve ustdegeri kontrol edin\"\n\t\telse:\n\t\t\tself.mult=0\n\t\t\n\t\t#Hesaplama için başlangıç matrisi oluşturur\n\t\tif self.mult==1:\n\t\t\tshape=(x.__len__(),pop)\n\t\telse:\n\t\t\tshape=(1,pop)\n\t\tself.matrix=np.empty(shape,dtype=np.float64)\n\t\tself.egitim=np.empty(shape,dtype=np.float64)\n\t\tself.puan=np.empty(pop,dtype=np.float64)\n\t\tself.cozum_degeri=np.empty((x.__len__()+1,pop),dtype=np.float64)\n\t\tself.en_iyi_cozum=np.empty((0,x.__len__()+1),dtype=np.float64)\n\t\t\n\tdef baslangic(self):\n\t\t#Düellocu algoritma adımları\n\t\tself.kayit()\n\t\tself.yeterlilik()\n\t\twhile self.iterasyon < self.max_iterasyon:\n\t\t\tself.sampiyon_sec()\n\t\t\tself.duello()\n\t\t\tself.duellocu_egitimi()\n\t\t\tself.yeterlilik_sonrasi()\n\t\t\tself.ele()\n\t\t\tself.iterasyon=self.iterasyon+1\n\t\tself.sonuc_goster()\n\n\tdef kayit(self):\n\t\t#Düello kayıt\n\t\tfor i in range(0,self.x.__len__()):\n\t\t#Popülasyonu başlatmak için sözde rastgele oluşturucu\n\t\t\tt = int( time.time() * 1000.0 )\n\t\t\tnp.random.seed( ((t & 0xff000000) >> 24) +\n ((t & 0x00ff0000) >> 8) +\n ((t & 0x0000ff00) << 8) +\n ((t & 0x000000ff) << 24) )\n\t\t\t#Oluşturulan matrisi alt ve ust degere göre sınırla\n\t\t\tself.matrix[i,:]=np.random.uniform(size=self.populasyon,low=self.altdeger[i],high=self.ustdeger[i])\n\n\n\tdef yeterlilik(self):\n\t\t#Bu bölüm yalnızca nüfus iki katına çıktığında yeterlilik sonrası için işe yarar\n\t\tif self.puan.shape[0]<self.matrix.shape[1]:\n\t\t\tself.puan=np.append(self.puan,self.puan)\n\t\t#Uygunluk fonksiyonuna göre bir uygunluk degeri hesapla\n\t\tfor i in range(0,self.matrix.shape[1]):\n\t\t\tself.puan[i]=self.f(*self.matrix.T.tolist()[i])\n\t\tself.puani_degerlendir()\n\n\tdef puani_degerlendir(self):\n\t\t#Çözümleri en düşükten en yükseğe doğru sırala\n\t\tself.puan=np.asarray([self.puan])\n\t\tself.cozum_degeri=np.concatenate((self.puan,self.matrix),axis=0).T\n\t\t\n\t\tself.cozum_degeri=self.cozum_degeri[self.cozum_degeri[:,0].argsort()].T\n\t\tself.puan=self.cozum_degeri[0,:]\n\t\tself.matrix=self.cozum_degeri[1:,:]\n\t\n\tdef yeterlilik_sonrasi(self):\n\t\t#Matrisi sıralayabilmek için transpozunu al\n\t\tself.matrix=self.matrix.T\n\t\t#Tekrar karşılaştır\n\t\tself.yeterlilik()\n\t\t\n\tdef sampiyon_sec(self):\n\t\t#En iyi şampiyonu kaydet\n\t\tfor i in range(0,self.nc):\n\t\t\tself.en_iyi_cozum=np.concatenate((self.en_iyi_cozum,np.asarray([self.cozum_degeri[:,i]])))\n\t\t#Şampiyonları tüm sonuçlardan ayır\n\t\tself.sampiyon=self.cozum_degeri[:,0:self.nc]\n\t\tprint(f\"{self.iterasyon + 1}. iterasyon, cozum degeri {self.cozum_degeri[:,0][0]}, fmin {self.cozum_degeri[:,0][1::]}\")\n\t\tself.cozum = []\t\n\t\tself.cozum.append(self.cozum_degeri[:,0][1::])\n\t\tself.x_dizi.append(self.cozum[0][0])\n\t\tself.y_dizi.append(self.cozum[0][1])\n\n\t\tif self.fmin.__len__()==0:\n\t\t\tself.fmin.append(self.cozum_degeri[:,0][0])\n\t\telif self.cozum_degeri[:,0][0]<min(self.fmin):\n\t\t\tself.fmin.append(self.cozum_degeri[:,0][0])\n\t\telse:\n\t\t\tself.fmin.append(min(self.fmin))\n\t\t#Benzer şampiyonları tekrar eğit\n\t\tfor j in range(0,self.nc):\n\t\t\tfor i in range(0,self.x.__len__()):\n\t\t\t\tif (random.uniform(0,1)<self.mutasyon):\n\t\t\t\t\tself.matrix[i,j]=random.uniform(self.altdeger[i],self.ustdeger[i])\n\t\t\n\tdef duello(self):\n\t\t#Düellocuları popülasyondan rastgele eşleştir\n\t\tself.matrix=self.matrix.T\n\t\tif(self.karistir==True):\n\t\t\tnp.random.mut(self.matrix)\n\t\t\n\t\t#Düello kuralları\n\t\ti=0\n\t\twhile i<self.matrix.shape[0]:\n\t\t\t#nüfus tekse, eşleşmeyen düellocu otomatik olarak kazanır\n\t\t\tif(i==self.matrix.shape[0]-1):\n\t\t\t\tself.kazan_kaybet[i]=1\n\t\t\telse:\n\t\t\t#iki düellocu için savaş puanını hesapla\n\t\t\t\ttempmatrix=self.matrix.tolist()\n\t\t\t\tself.savas_puani[i]=self.f(*tempmatrix[i])*(1+(self.sans+(random.uniform(0,1)*self.sans)))\n\t\t\t\tself.savas_puani[i+1]=self.f(*tempmatrix[i+1])*(1+(self.sans+(random.uniform(0,1)*self.sans)))\n\t\t\t#savaş puanına göre kazananı ve kaybedeni belirle\n\t\t\t\tif(self.savas_puani[i]>self.savas_puani[i+1]):\n\t\t\t\t\tself.kazan_kaybet[i]=1\n\t\t\t\t\tself.kazan_kaybet[i+1]=0\n\t\t\t\telse:\n\t\t\t\t\tself.kazan_kaybet[i]=0\n\t\t\t\t\tself.kazan_kaybet[i+1]=1\n\t\t\ti=i+2\n\t\n\tdef duellocu_egitimi(self):\n\t\t#Kazanan ve kaybedene göre eğit\n\t\tself.egitim=np.copy(self.matrix)\n\t\tfor j in range(0,self.x.__len__()):\n\t\t\tfor i in range(0,self.populasyon):\n\t\t\t\tif self.kazan_kaybet[i]==1:\n\t\t\t\t#kazanan mutasyona uğrayarak kendini geliştirsin\n\t\t\t\t\tif random.uniform(0,1)<self.mutasyon:\n\t\t\t\t\t\tself.egitim[i,j]=random.uniform(self.altdeger[j],self.ustdeger[j])\n\t\t\t\telse:\n\t\t\t\t#Kaybeden kazanandan öğrensin\n\t\t\t\t\tif random.uniform(0,1)<self.ogren:\n\t\t\t\t\t\tif (i%2==0):\n\t\t\t\t\t\t\tself.egitim[i,j]=self.matrix[i+1,j]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.egitim[i,j]=self.matrix[i-1,j]\n\t\t#Matrise yeni eğitilmiş düellocu ekle\n\t\tself.matrix=np.concatenate((self.matrix,self.egitim),axis=0)\n\t\n\tdef ele(self):\n\t\tself.matrix=self.matrix[:,:self.populasyon]\n\t\t\n\tdef sonuc_goster(self):\n\t\tsonuc=self.en_iyi_cozum[self.en_iyi_cozum[:,0].argsort()]\n\t\tprint(\"En iyi cozum degerleri:\",sonuc[0][1::], \"En iyi cozum\", sonuc[0][0])\n\t\tfig = fig = mp.figure()\n\t\tax1 = fig.add_subplot(211)\n\t\tax1.plot(self.fmin,'r.-')\n\t\tax1.legend(['MinUygunluk'])\n\t\tax2 = fig.add_subplot(212)\n\t\tax2.plot(self.x_dizi,'b.-')\n\t\tax2.plot(self.y_dizi,'g--')\n\t\tmp.legend(['x1','x2'])\n\t\tmp.show()\n\t\t\n\t\n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.random.seed",
"numpy.asarray",
"numpy.concatenate",
"numpy.copy",
"numpy.append",
"numpy.random.mut",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] |
Ernstsen/Pedestron
|
[
"0c5aa35881561bcd0acf5de8939472efd6409256"
] |
[
"tools/test_crowdhuman.py"
] |
[
"import argparse\nimport os\nimport os.path as osp\nimport shutil\nimport tempfile\nimport json\nimport time\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv.runner import load_checkpoint, get_dist_info\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\n\nfrom mmdet.apis import init_dist\nfrom mmdet.core import results2json, coco_eval, wrap_fp16_model\nfrom mmdet.datasets import build_dataloader, build_dataset\nfrom mmdet.models import build_detector\n\nfrom tools.crowdhuman.eval_demo import validate\n\n\ndef single_gpu_test(model, data_loader, show=False, save_img=False, save_img_dir=''):\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=not show, **data)\n results.append(result)\n\n if show:\n model.module.show_result(data, result, dataset.img_norm_cfg, save_result=save_img, result_name=save_img_dir + '/' + str(i)+'.jpg')\n\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size):\n prog_bar.update()\n return results\n\n\ndef multi_gpu_test(model, data_loader, tmpdir=None):\n model.eval()\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n results.append(result)\n\n if rank == 0:\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n results = collect_results(results, len(dataset), tmpdir)\n\n return results\n\n\ndef collect_results(result_part, size, tmpdir=None):\n rank, world_size = get_dist_info()\n # create a tmp dir if it is not specified\n if tmpdir is None:\n MAX_LEN = 512\n # 32 is whitespace\n dir_tensor = torch.full((MAX_LEN, ),\n 32,\n dtype=torch.uint8,\n device='cuda')\n if rank == 0:\n tmpdir = tempfile.mkdtemp()\n tmpdir = torch.tensor(\n bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')\n dir_tensor[:len(tmpdir)] = tmpdir\n dist.broadcast(dir_tensor, 0)\n tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()\n else:\n mmcv.mkdir_or_exist(tmpdir)\n # dump the part result to the dir\n mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))\n dist.barrier()\n # collect all parts\n if rank != 0:\n return None\n else:\n # load results of all parts from tmp dir\n part_list = []\n for i in range(world_size):\n part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))\n part_list.append(mmcv.load(part_file))\n # sort the results\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res))\n # the dataloader may pad some samples\n ordered_results = ordered_results[:size]\n # remove tmp dir\n shutil.rmtree(tmpdir)\n return ordered_results\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMDet test detector')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument('checkpoint_start', type=int, default=1)\n parser.add_argument('checkpoint_end', type=int, default=100)\n parser.add_argument('--out', help='output result file')\n parser.add_argument(\n '--eval',\n type=str,\n nargs='+',\n choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],\n help='eval types')\n parser.add_argument('--show', action='store_true', help='show results')\n parser.add_argument('--save_img', action='store_true', help='save result image')\n parser.add_argument('--save_img_dir', type=str, help='the dir for result image', default='')\n parser.add_argument('--tmpdir', help='tmp dir for writing some results')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--mean_teacher', action='store_true', help='test the mean teacher pth')\n\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n return args\n\n\ndef main():\n args = parse_args()\n\n if args.out is not None and not args.out.endswith(('.json', '.pickle')):\n raise ValueError('The output file must be a pkl file.')\n for i in range(args.checkpoint_start, args.checkpoint_end):\n cfg = mmcv.Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # build the dataloader\n # TODO: support multiple images per gpu (only minor changes are needed)\n dataset = build_dataset(cfg.data.test)\n data_loader = build_dataloader(\n dataset,\n imgs_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n # build the model and load checkpoint\n model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n wrap_fp16_model(model)\n if not args.mean_teacher:\n while not osp.exists(args.checkpoint + str(i) + '.pth'):\n time.sleep(5)\n while i+1 != args.checkpoint_end and not osp.exists(args.checkpoint + str(i+1) + '.pth'):\n time.sleep(5)\n checkpoint = load_checkpoint(model, args.checkpoint + str(i) + '.pth', map_location='cpu')\n else:\n while not osp.exists(args.checkpoint + str(i) + '.pth.stu'):\n time.sleep(5)\n while i+1 != args.checkpoint_end and not osp.exists(args.checkpoint + str(i+1) + '.pth.stu'):\n time.sleep(5)\n checkpoint = load_checkpoint(model, args.checkpoint + str(i) + '.pth.stu', map_location='cpu')\n checkpoint['meta'] = dict()\n # old versions did not save class info in checkpoints, this walkaround is\n # for backward compatibility\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = dataset.CLASSES\n\n if not distributed:\n model = MMDataParallel(model, device_ids=[0])\n outputs = single_gpu_test(model, data_loader, args.show, args.save_img, args.save_img_dir)\n else:\n model = MMDistributedDataParallel(model.cuda())\n outputs = multi_gpu_test(model, data_loader, args.tmpdir)\n\n res = []\n for id, boxes in enumerate(outputs):\n boxes=boxes[0]\n if type(boxes) == list:\n boxes = boxes[0]\n boxes[:, [2, 3]] -= boxes[:, [0, 1]]\n if len(boxes) > 0:\n for box in boxes:\n # box[:4] = box[:4] / 0.6\n temp = dict()\n temp['image_id'] = id+1\n temp['category_id'] = 1\n temp['bbox'] = box[:4].tolist()\n temp['score'] = float(box[4])\n res.append(temp)\n\n with open(args.out, 'w') as f:\n json.dump(res, f)\n\n MRs = validate('datasets/crowdhuman/validation.json', args.out)\n print(MRs)\n print('Checkpoint %d: [Reasonable: %.2f%%], [Bare: %.2f%%], [Partial: %.2f%%], [Heavy: %.2f%%]'\n % (i, MRs[0] * 100, MRs[1] * 100, MRs[2] * 100, MRs[3] * 100))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.full",
"torch.distributed.broadcast",
"torch.no_grad",
"torch.distributed.barrier"
]
] |
rahulkumar1112/Audio-Classification
|
[
"04d16703ccc2fb10cc1ba92850364ea49b9a5bfa"
] |
[
"model.py"
] |
[
"import os\nfrom scipy.io import wavfile\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keras.layers import Conv2D, MaxPool2D, Flatten, LSTM\nfrom keras.layers import Dropout, Dense, TimeDistributed\nfrom keras.models import Sequential\nfrom keras.utils import to_categorical\nfrom sklearn.utils.class_weight import compute_class_weight\nfrom tqdm import tqdm\nfrom python_speech_features import mfcc\n\ndf = pd.read_csv('instruments.csv')\ndf.set_index('fname', inplace=True)\n\nfor f in df.index:\n rate, signal = wavfile.read('clean/'+f)\n df.at[f, 'length'] = signal.shape[0]/rate\n\nclasses = list(np.unique(df.label))\nclass_dist = df.groupby(['label'])['length'].mean()\n\nfig, ax = plt.subplots()\nax.set_title('Class Distribution', y=1.08)\nax.pie(class_dist, labels=class_dist.index, autopct='%1.1f%%',\n shadow=False, startangle=90)\nax.axis('equal')\nplt.show()\n"
] |
[
[
"pandas.read_csv",
"numpy.unique",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"scipy.io.wavfile.read"
]
] |
lancerane/NIPS-2018-AI-for-Prosthetics
|
[
"7689646e2d079ffcbcde898ece25d2cf78c132c7"
] |
[
"osim-rl/examples/arm.py"
] |
[
"import os\r\nfrom osim.env import OsimEnv\r\nimport pprint\r\nimport numpy as np\r\n\r\nclass Arm3dEnv(OsimEnv):\r\n model_path = os.path.join(os.path.dirname(__file__), '../osim/models/MoBL_ARMS_J_Simple_032118.osim')\r\n time_limit = 200\r\n current_objective = np.array([0,0,0])\r\n \r\n def is_done(self):\r\n # End the simulation if the pelvis is too low\r\n state_desc = self.get_state_desc()\r\n return False\r\n\r\n def get_observation(self):\r\n state_desc = self.get_state_desc()\r\n\r\n # Augmented environment from the L2R challenge\r\n res = []\r\n\r\n # Map some of the state variables to the observation vector\r\n for body_part in state_desc[\"body_pos_rot\"].keys():\r\n res = res + state_desc[\"body_pos_rot\"][body_part][2:]\r\n res = res + state_desc[\"body_pos\"][body_part][0:2]\r\n res = res + state_desc[\"body_vel_rot\"][body_part][2:]\r\n res = res + state_desc[\"body_vel\"][body_part][0:2]\r\n res = res + state_desc[\"body_acc_rot\"][body_part][2:]\r\n res = res + state_desc[\"body_acc\"][body_part][0:2]\r\n\r\n for joint in state_desc[\"joint_pos\"].keys():\r\n res = res + state_desc[\"joint_pos\"][joint]\r\n res = res + state_desc[\"joint_vel\"][joint]\r\n res = res + state_desc[\"joint_acc\"][joint]\r\n\r\n res = res + state_desc[\"misc\"][\"mass_center_pos\"] + state_desc[\"misc\"][\"mass_center_vel\"] + state_desc[\"misc\"][\"mass_center_acc\"]\r\n res += self.current_objective.tolist()\r\n\r\n res = np.array(res)\r\n res[np.isnan(res)] = 0\r\n\r\n return res\r\n\r\n def get_observation_space_size(self):\r\n return 168\r\n\r\n def reset_objective(self):\r\n self.current_objective = np.random.uniform(-0.5,0.5,3)\r\n\r\n def reset(self):\r\n print(self.reward())\r\n self.reset_objective()\r\n return super(Arm3dEnv, self).reset()\r\n\r\n def reward(self):\r\n # Get the current state and the last state\r\n prev_state_desc = self.get_prev_state_desc()\r\n if not prev_state_desc:\r\n return 0\r\n state_desc = self.get_state_desc()\r\n\r\n res = 0\r\n\r\n # # Penalize movement of the pelvis\r\n # res = -(prev_state_desc[\"misc\"][\"mass_center_pos\"][0] - state_desc[\"misc\"][\"mass_center_pos\"][0])**2\\\r\n # -(prev_state_desc[\"misc\"][\"mass_center_pos\"][1] - state_desc[\"misc\"][\"mass_center_pos\"][1])**2\r\n\r\n # # Penalize very low position of the pelvis\r\n # res += -(state_desc[\"joint_pos\"][\"ground_pelvis\"][2] < 0.8)\r\n \r\n return -np.linalg.norm(np.array(state_desc[\"markers\"][\"Handle\"][\"pos\"]) - self.current_objective)\r\n\r\nenv = Arm3dEnv(visualize=True)\r\n\r\nif __name__ == '__main__':\r\n observation = env.reset()\r\n for i in range(200):\r\n action = env.action_space.sample()\r\n observation, reward, done, info = env.step(action)\r\n if done:\r\n env.reset()\r\n"
] |
[
[
"numpy.isnan",
"numpy.random.uniform",
"numpy.array"
]
] |
flowerah/PythoMS
|
[
"7d500f20219157657023c8c0a930f580d3768191"
] |
[
"pythoms/mzml.py"
] |
[
"\"\"\"\nIGNORE:\nCHANGELOG:\n-\n---2.7 building\n\nto add:\n try to extract timepoints and tic from chromatogramList (x values are sorted, so this probably won't work)\nIGNORE\n\"\"\"\nimport sys\nimport os\nimport zlib\nimport gzip\nimport base64\nimport struct\nimport subprocess\nimport xml.dom.minidom\nimport scipy as sci\nfrom random import random\nfrom .progress import Progress\nfrom .spectrum import Spectrum\nfrom .psims import CVParameterSet, stringtodigit\nfrom .tome import resolution, locate_in_list, trimspectrum\n\n# decoding formats for decoding mzML binary data array strings\ndecode_formats = {\n 'MS:1000519': ['<', 'i'], # signed 32-bit little-endian integer\n # 'MS:1000520':['',''], # [OBSOLETE] Signed 16-bit float\n 'MS:1000521': ['<', 'f'], # 32-bit precision little-endian floating point conforming to IEEE-754\n 'MS:1000522': ['<', 'l'], # Signed 64-bit little-endian integer\n 'MS:1000523': ['<', 'd'], # 64-bit precision little-endian floating point conforming to IEEE-754.\n}\n\n\nclass BoundsError(Warning):\n \"\"\"A warning class to handle bounds errors when integrating (used only by PyRSIR)\"\"\"\n\n def __init__(self):\n self.warned = {}\n\n def printwarns(self):\n \"\"\"prints the number of warnings if merited\"\"\"\n if len(self.warned) > 0:\n sys.stdout.write('The following peaks exceeded the bounds of the spectrum n number of times:\\n')\n for name in self.warned:\n sys.stdout.write('\"%s\": %d\\n' % (name, self.warned[name]))\n\n def warn(self, name, intstart, intend, mzstart, mzend):\n \"\"\"warns the user if there was a mismatch\"\"\"\n if name not in self.warned:\n sys.stdout.write(\n '\\nThe peak \"%s\" (%s-%s) is outside of the bounds of the spectrum being summed m/z %.1f-%.1f\\n' % (\n name, str(intstart), str(intend), mzstart, mzend))\n self.warned[name] = 1\n else:\n self.warned[name] += 1\n\n\ndef branch_attributes(branch: xml.dom.minidom.Element):\n \"\"\"\n Pulls all the attributes of an xml.dom.minidom xml branch.\n These are generally things like index, id, etc.\n\n :param xml.dom.minidom branch: An xml.dom.minidom object.\n :return: A dictionary of attributes with each key being the attribute name and its value being the value of that\n attribute.\n :rtype: dict\n\n **Notes**\n\n The script will attempt to convert any values to float or\n integer in order to reduce TypeErrors when trying to use\n the extracted values.\n \"\"\"\n return {key: stringtodigit(val) for key, val in branch.attributes.items()}\n\n\ndef branch_cvparams(branch):\n \"\"\"\n Interprets an xml branch as CVParams\n\n :param branch:\n :return: controlled value parameter set with values\n :rtype: CVParameterSet\n \"\"\"\n out = {}\n for cvParam in branch.getElementsByTagName('cvParam'):\n acc = cvParam.getAttribute('accession') # accession key\n out[acc] = {}\n for attribute, value in cvParam.attributes.items(): # pull all the attributes\n if attribute != 'accession':\n # attempt to convert to integer or float, keep as string otherwise\n out[acc][attribute] = stringtodigit(value)\n return CVParameterSet(**out)\n\n\ndef file_present(filepath):\n \"\"\"checks for the presence of the specified file or directory in the current working directory\"\"\"\n tf = os.path.isfile(filepath) # look for file first\n if tf is False: # if file cannot be found, look for directory\n tf = os.path.isdir(filepath)\n return tf\n\n\ndef decodeformat(p: CVParameterSet, speclen: int):\n \"\"\"\n Determines the decode format from the accession parameter\n\n :param p: extracted CVParamterSet of the data array\n :param speclen: length of the spectrum (retrievable from the XML file)\n :return: decode format\n :rtype: str\n \"\"\"\n for key in set(decode_formats) & p.keys(): # find the set combination of the possibilities\n return f'{decode_formats[key][0]}{speclen}{decode_formats[key][1]}' # create the decode format\n\n\ndef gettext(nodelist):\n \"\"\"gets text from a simple XML object\"\"\"\n rc = []\n for node in nodelist:\n if node.nodeType == node.TEXT_NODE:\n rc.append(node.data)\n return ''.join(rc)\n\n\ndef extract_spectrum(spectrum: xml.dom.minidom.Element, units: bool = False):\n \"\"\"\n Extracts and converts binary data to two lists.\n\n :param spectrum: A spectrum branch element. This element is expected to have two child nodes containing\n binaryDataArrays.\n :param units: whether to extract the units from the spectrum\n :return:\n \"\"\"\n \"\"\"pulls and converts binary data to a list\"\"\"\n # spectrum length (defined in the spectrum attricubes)\n speclen = int(spectrum.getAttribute('defaultArrayLength'))\n out = []\n if units is True:\n units = []\n for binary in spectrum.getElementsByTagName('binaryDataArray'):\n p = branch_cvparams(binary) # grab cvparameters\n\n # determine whether the binary string is zlib compressed\n compressed = True if 'MS:1000574' in p else False\n\n # determine unpack format\n unpack_format = decodeformat(p, speclen)\n\n # pull the binary string\n string = gettext(binary.getElementsByTagName('binary')[0].childNodes)\n\n # decode the string\n decoded = base64.standard_b64decode(string)\n\n # if the string is compressed, decompress\n if compressed is True:\n decoded = zlib.decompress(decoded)\n\n # unpack the string\n out.append(list(struct.unpack(unpack_format, decoded)))\n\n if units is not False:\n for cv in p:\n if cv.unit is not None:\n units.append(cv.unit)\n break\n if units is not False: # extends the units onto out\n out.extend(units)\n return out\n\n\ndef pw_convert(filename, bit=64, compression=True, gzip=True, verbose=True):\n \"\"\"\n Runs msconvert.exe from ProteoWizard to convert Waters .RAW format to .mzXML\n which can then be parsed by python.\n\n module requirements: os, subprocess, sys\n\n ProteoWizard must be installed for this script to function.\n go to\n http://proteowizard.sourceforge.net/downloads.shtml\n to download\n\n This script assumes that the ProteoWizard is installed under either\n c:\\program files\\proteowizard\n or\n c:\\program files (x86)\\proteowizard\n\n If you use this python script to convert to mzML, you should cite the paper of the folks who wrote the program\n Chambers, M.C. Nature Biotechnology 2012, 30, 918-920\n doi 10.1038/nbt.2377\n \"\"\"\n\n def find_all(fname, path):\n \"\"\"\n Finds all files of a given name within a specified directory.\n Adapted from http://stackoverflow.com/questions/1724693/find-a-file-in-python\n\n Module dependancies: os\n \"\"\"\n locations = []\n for root, dirs, files in os.walk(path):\n if fname in files:\n locations.append(os.path.join(root, fname))\n return locations\n\n if sys.platform != 'win32':\n raise OSError(\n 'The function that converts to mzML is limited to Windows operating systems.\\n'\n 'You can manually convert to *.mzML using the proteowizard standalone package '\n 'and supply that mzML file to this script')\n locs = []\n for val in ['c:\\\\program files\\\\proteowizard',\n 'c:\\\\program files (x86)\\\\proteowizard']: # searches for msconvert.exe in expected folders\n locs.extend(find_all('msconvert.exe', val))\n\n if len(locs) == 0: # if script cannot find msconvert.exe\n raise IOError(\n 'The python script could not find msconvert.exe\\n'\n 'Please ensure that ProteoWizard is installed in either:\\n'\n 'c:\\\\program files\\\\proteowizard\\nor\\nc:\\\\program files (x86)\\\\proteowizard')\n\n outname = filename[:-4] + '.mzML'\n callstring = locs[-1] + ' \"' + filename + '\" --mzML'\n if bit in [32, 64]:\n callstring += ' --' + str(bit)\n else:\n raise ValueError(\n 'ProteoWizard conversion was called with an invalid floating point precision \"%s\".' % str(bit))\n\n if compression is True: # call for compression\n callstring += ' --zlib'\n\n exten = '*.mzML'\n if gzip is True: # call to gzip entire mzml\n callstring += ' --gzip'\n outname += '.gz'\n exten += '.gz'\n print('callstring', callstring)\n\n if verbose is True:\n callstring += ' --verbose'\n sys.stdout.write('Generating %s file from %s' % (exten, filename))\n sys.stdout.flush()\n subprocess.call(callstring)\n sys.stdout.write(' DONE\\n')\n sys.stdout.flush()\n else:\n subprocess.call(callstring)\n return outname\n\n\ndef fix_extension(fn):\n \"\"\"tries to fix invalid file extensions\"\"\"\n oopsx = {'.mzm': 'l', '.mz': 'ml', '.m': 'zml', '.': 'mzml'} # incomplete mzml extensions\n oopsr = {'.ra': 'w', '.r': 'aw', '.': 'raw'} # incomplete raw extionsions\n oopsg = {'.mzml.g': 'z', '.mzml.': 'gz', '.mzml': '.gz', '.mzm': 'l.gz', '.mz': 'ml.gz', '.m': 'zml.gz',\n '.': 'mzml.gz'} # incomplete gz extensions\n # looks for missing extensions first\n if file_present(fn + '.mzml.gz') is True:\n return fn + '.mzml.gz'\n if file_present(fn + '.mzml') is True:\n return fn + '.mzml'\n for key in oopsg: # tries to complete mzml.gz shortenings\n if fn.lower().endswith(key) is True:\n if file_present(fn + oopsg[key]) is True:\n return fn + oopsg[key]\n for key in oopsx: # tries to complete mzml shortenings\n if fn.lower().endswith(key) is True:\n if file_present(fn + oopsx[key]) is True:\n return fn + oopsx[key]\n for key in oopsr: # tries to complete raw shortenings\n if fn.lower().endswith(key) is True:\n if file_present(fn + oopsr[key]) is True:\n return fn + oopsr[key]\n if file_present(fn + '.raw') is True: # finally looks for raw file\n return fn + '.raw'\n raise FileNotFoundError(f'The file {fn} could not be located in the current working directory')\n\n\ndef fps(branch):\n \"\"\"\n extracts function #, process #, and scan # from the idstring of a spectrum branch\n returns function, process, scan as integers\n \"\"\"\n idstring = branch.getAttribute('id').split() # pull id string from scan attribute\n return [int(x.split('=')[1]) for x in idstring] # return each value after converting to integer\n\n\ndef scan_properties(hand):\n \"\"\"determines the scan properties of the provided spectrum\"\"\"\n mstypes = { # ms accession keys and their respective names (for spectrum identification)\n 'MS:1000928': 'calibration spectrum',\n 'MS:1000294': 'mass spectrum',\n 'MS:1000322': 'charge inversion mass spectrum',\n 'MS:1000325': 'constant neutral gain spectrum',\n 'MS:1000326': 'constant neutral loss spectrum',\n 'MS:1000328': 'e/2 mass spectrum',\n 'MS:1000341': 'precursor ion spectrum',\n 'MS:1000343': 'product ion spectrum',\n 'MS:1000579': 'MS1 spectrum',\n 'MS:1000580': 'MSn spectrum',\n 'MS:1000581': 'CRM spectrum',\n 'MS:1000582': 'SIM spectrum',\n 'MS:1000583': 'SRM spectrum',\n }\n othertypes = { # other accession keys (non-MS)\n 'MS:1000620': 'PDA spectrum',\n 'MS:1000804': 'electromagnetic radiation spectrum',\n 'MS:1000805': 'emission spectrum',\n 'MS:1000806': 'absorption spectrum',\n }\n out = {}\n if isinstance(hand, CVParameterSet): # handed a cvparam class object (expected)\n p = hand\n else: # handed a tree or branch (generate the cvparam class object)\n p = CVParameterSet(hand)\n for acc in p.keys() & mstypes.keys(): # check for ms spectrum\n out['acc'] = acc # accession code\n out['name'] = mstypes[acc] # name of spectrum\n out['type'] = 'MS' # it is a mass spectrum\n out['level'] = p['MS:1000511'].value # ms level\n out['window'] = [p['MS:1000501'].value, p['MS:1000500'].value] # scan window\n if 'MS:1000129' in p: # negative scan\n out['mode'] = '-'\n elif 'MS:1000130' in p: # positive scan\n out['mode'] = '+'\n if 'MS:1000827' in p: # if there is an isolation window target m/z\n out['target'] = p['MS:1000827'].value\n # if MSn > 2, not sure how to handle this (will have to be hard coded later as I have no examples)\n elif out['level'] > 2:\n raise ValueError(\n 'This script has not been coded to handle MSn > 2, please contact the author of the class')\n return out\n\n for acc in p.keys() & othertypes.keys(): # if the scan is something else\n out['acc'] = acc # accession code\n out['name'] = othertypes[acc] # name of spectrum\n if 'MS:1000804' in p: # if it is a UV-Vis\n out['type'] = 'UV'\n else: # other other type (not handled by script)\n raise KeyError(\n 'The script has not been coded to handle spectra types other than MS and UV-Vis. '\n 'Please contact the authors to get this functionality included.')\n return out\n\n\nclass mzML(object):\n def __init__(self,\n filename: str,\n verbose: bool = True,\n precision: int = 64,\n compression: bool = True,\n gzip_file: bool = True,\n obo: str = None,\n ftt: bool = False,\n **kwargs\n ):\n \"\"\"\n A class for loading and extracting data from an mzML file.\n\n :param str filename: The name of the mzML or mass spectrometric data file. Accepted file types are listed below,\n and this script can automatically convert some proprietary file types to mzML by calling ProteoWizard\n (see notes).\n :param bool verbose: Chatty enable or disable. It can be useful to enable this when processing large files or long\n acquisitions, as many of the methods have progress reporters.\n :param int precision: The floating point precision to use if converting to mzML. Default 64 (although this\n appears to have a minimal effect in the experience of the author). This can be set to 32 to decrease mzML\n file sizes.\n :param bool compression: Whether or not to compress the mzML files when converting. This can decrease file\n sizes at a slight cost in processing time.\n :param bool gzip: Whether or not to gzip the mzML files when converting. This substantially decreases file\n sizes (mass spectrometric data compresses very well when gzipped). This will slightly increase processing\n time.\n :param str obo: A specific path or URL to an *.obo file defining the accession keys used in mzML files. If this\n is not specified, the default accession URL will be used to download the required obo file. This should not\n be necessary normally, as most of the commonly encountered accession keys are hard-coded into this\n script. The script will raise an error if it encounters an undefined accession key.\n :param bool ftt: Whether to run the function_timetic() method on initialization. This is useful if you require\n access to the total ion current and time lists for each function in the mzML file. This does increase file\n load times quite significantly (~6x slower).\n\n **Notes**\n\n An mzML file is a data format for mass spectrometric data which can be parsed by python (avoiding the pitfalls\n associated with the proprietary files usually generated by the mass spectrometers themselves). The mzML file\n structures are expected to conform to those outlined in the HUPO Proteomics Standards Working Group. More\n information can be found at https://raw.githubusercontent.com/HUPO-PSI/psi-ms-CV/master/psi-ms.obo\n\n If you wish to use the format conversion functionality of this script, you will need to download and install\n ProteoWizard, which can be found at http://proteowizard.sourceforge.net/\n\n \"\"\"\n # store keyword settings\n self.verbose = verbose\n self.precision = precision\n self.compression = compression\n self.gzip_file = gzip_file\n self.obo = obo\n\n self.filename = self.check_for_file(filename)\n\n # load file and determine key properties\n if self.verbose is True:\n # todo why is this not an instantiation\n self.Progress = Progress\n sys.stdout.write('Loading %s into memory' % self.filename)\n sys.stdout.flush()\n if self.filename.lower().endswith('.mzml.gz'): # if mzml is gzipped\n handle = gzip.open(self.filename) # unzip the file\n else:\n handle = self.filename\n try:\n self.tree = xml.dom.minidom.parse(handle) # full mzML file\n except:\n raise IOError(\n 'The mzML file \"%s\" could not be loaded. The file is either unsupported, corrupt, or incomplete.' % self.filename)\n\n self.nscans = int(self.tree.getElementsByTagName('spectrumList')[0].getAttribute('count')) # number of spectra\n self.nchroms = int(\n self.tree.getElementsByTagName('chromatogramList')[0].getAttribute('count')) # number of chromatograms\n self.functions = {}\n for spectrum in self.tree.getElementsByTagName('spectrum'):\n func, proc, scan = fps(spectrum) # extract each value and convert to integer\n if func not in self.functions: # if function is not defined yet\n p = branch_cvparams(spectrum) # pull spectrum's cvparameters\n self.functions[func] = {\n 'sr': [int(spectrum.getAttribute('index')), None], # the scan index range that the function spans\n 'nscans': 1, # number of scans\n }\n self.functions[func].update(scan_properties(p)) # update with scan properties\n else:\n self.functions[func]['sr'][1] = int(\n spectrum.getAttribute('index')) # otherwise set the scan index range to the current index\n self.functions[func]['nscans'] += 1\n p = branch_cvparams(spectrum) # pull properties of final spectrum\n self.duration = p['MS:1000016'].value # final start scan time\n\n if self.verbose is True:\n sys.stdout.write(' DONE\\n')\n\n self.BE = BoundsError() # load warning instance for integration\n self.ftt = False\n if ftt is True:\n self.function_timetic()\n\n def __str__(self):\n \"\"\"The string that is returned when printed\"\"\"\n return f'{self.__class__.__name__} {self.nscans} spectra, {self.nchroms} chromatograms'\n\n def __repr__(self):\n \"\"\"The representation that is returned\"\"\"\n return \"%s('%s')\" % (self.__class__.__name__, self.filename)\n\n def __len__(self):\n return self.nscans\n\n def __getitem__(self, ind):\n \"\"\"retrieves a scan or summed scans\"\"\"\n if isinstance(ind, slice): # if getitem is trying to slice\n \"\"\"\n returns the summed scans with the supplied indicies\n slice will assume that the intended function is 1\n \"\"\"\n if ind.start is None: # no start\n start = 0\n else:\n start = ind.start\n if ind.stop is None: # no stop\n stop = self.functions[1]['sr'][1]\n else:\n stop = ind.stop\n return self.sum_scans(start, stop, mute=True)\n\n elif type(ind) is int: # scan index number\n \"\"\"will return the spectrum of the scan index provided\"\"\"\n if ind < 0 or ind > self.nscans:\n raise IndexError(\"The scan index number #%d is outside of the mzML's scan index range (0-%d)\" % (\n ind, self.nscans - 1))\n for spectrum in self.tree.getElementsByTagName('spectrum'):\n attr = branch_attributes(spectrum)\n if attr['index'] == ind:\n return extract_spectrum(spectrum)\n\n elif type(ind) is float: # timepoint in function 1\n \"\"\"float will assume the intended function was 1\"\"\"\n if ind < 0 or ind > self.duration:\n raise ValueError(\n \"The supplied time %.3f is outside of this file's time range (0 - %.3f)\" % (ind, self.duration))\n ind = self.scan_index(ind)\n for spectrum in self.tree.getElementsByTagName('spectrum'):\n attr = branch_attributes(spectrum)\n if attr['index'] == ind:\n return extract_spectrum(spectrum)\n\n def foreachchrom(self, fn):\n \"\"\"\n a decorator function that will apply the supplied function to every chromatogram in the mzml file\n the supplied function will be handed the chromatogram XML object as the first argument\n the decorated function will return a list of outputs of the supplied function where each index corresponds to a scan\n\n e.g.::\n loaded = mzML(filename)\n\n @loaded.foreachchrom\n def do_this(chrom):\n # extract the attributes using the mzML.attributes() method\n attr = loaded.attributes(chrom)\n return attr['id'] # return the name of the chromatogram\n\n do_this()\n\n \"\"\"\n\n def foreachchrom(*args, **kwargs):\n \"\"\"decorates the supplied function to run for every scan\"\"\"\n prog = Progress(string='Applying function \"%s\" to chromatogram' % fn.__name__, last=self.nchroms)\n out = []\n for chromatogram in self.tree.getElementsByTagName('chromatogram'):\n if self.verbose is True:\n prog.write(int(chromatogram.getAttribute('index')) + 1)\n out.append(fn(chromatogram, *args, **kwargs))\n if self.verbose is True:\n prog.fin()\n return out\n\n return foreachchrom\n\n def foreachscan(self, fn):\n \"\"\"\n a decorator function that will apply the supplied function to every spectrum in the mzml file\n the supplied function will be handed the spectrum XML object as the first argument\n the decorated function will return a list of outputs of the supplied function where each index corresponds to a scan\n\n e.g.::\n\n loaded = mzML(filename)\n\n @loaded.foreachscan\n def do_this(scan):\n p = loaded.cvparam(scan) # pull spectrum's cvparameters\n sst = p['MS:1000016'] # start scan time\n x,y = loaded.extract_spectrum(scan,False) # extract the x,y spectrum\n # return the start scan time, x list, and y list\n return sst,x,y\n\n do_this() # do it\n \"\"\"\n\n def foreachscan(*args, **kwargs):\n \"\"\"decorates the supplied function to run for every scan\"\"\"\n prog = Progress(string='Applying function \"%s\" to scan' % fn.__name__, last=self.nscans)\n out = []\n for spectrum in self.tree.getElementsByTagName('spectrum'):\n if self.verbose is True:\n prog.write(int(spectrum.getAttribute('index')) + 1)\n out.append(fn(spectrum, *args, **kwargs))\n if self.verbose is True:\n prog.fin()\n return out\n\n return foreachscan\n\n def associate_to_function(self, affin=None, level=None, dct=None):\n \"\"\"\n Associates a given species to the appropriate function number\n in the mzML data file.\n\n **Parameters**\n\n affin: '+', '-', or 'UV'\n The affinity of the species. i.e. to positive mode,\n negative mode, or UV-Vis spectra respectively.\n\n level: *integer* or None\n If the species is found in an MS/MS function,\n the MS^n level can be specified here.\n\n dct: *dictionary*\n If details are known about the species' affinity,\n they can be provided in dictionary format.\n Specifically, this function looks for the keys:\n 'function', 'affin', and 'level'.\n\n\n **Returns**\n\n function number: *integer*\n Returns the appropriate function number in which\n the given species should be found.\n\n\n **Notes**\n\n If nothing is provided to this method, it will return\n the integer 1 (assuming that the species will be found\n in the first function).\n\n \"\"\"\n if dct is not None: # if function was handed a dictionary\n if 'function' in dct:\n return dct['function']\n if 'affin' in dct:\n affin = dct['affin']\n if 'level' in dct:\n level = dct['level']\n\n if affin is None and level is None:\n return min(self.functions.keys()) # assume first function\n\n elif affin == 'UV': # if UV-Vis affinity\n for fn in self.functions: # determine which function is UV-Vis\n if self.functions[fn]['acc'] == 'MS:1000804':\n return fn\n raise ValueError('There is no electromagnetic radiation spectrum function in this mzML file')\n\n elif affin in ['+', '-']: # if affinity to mass spectrum\n levelcount = 0 # counter for number of matches to this affinity and level\n for fn in self.functions:\n if self.functions[fn]['type'] == 'MS': # if fn is ms\n if self.functions[fn]['mode'] == affin: # if mode mathes\n # if there is no level specified, assume 1\n if level is None and self.functions[fn]['level'] == 1:\n fnout = fn\n levelcount += 1\n elif self.functions[fn]['level'] == level: # if level matches\n fnout = fn\n levelcount += 1\n if levelcount > 1:\n raise ValueError(\n f\"There affinity specification of mode: {affin}, level: '{level}' matches more than one function \"\n f\"in the mzML file. \\nTo process this species, be more specific in your level specification or \"\n f\"assign it to a specific function number by adding a 'function' key to its dictionary.\")\n return fnout\n else: # if some other affinity\n raise ValueError('The specified affinity \"%s\" is not supported.' % affin)\n\n def auto_resolution(self, n=10, function=None, npeaks=4):\n \"\"\"\n Attempts to automatically determine the resolution of the spectrometer\n that the provided mzML data file was recorded on.\n The method will find n random samples of the entire spectrum and\n calculate the resolution of each of those samples and return the\n average resolution.\n\n :param int n: The number of psuedo-random samples of the spectrum to determine\n the resolution of. Default 10.\n :param int function: The mzML function number to calculate the resolution of. Default 1.\n :param int npeaks: number of peaks to to try to find\n :return: Estimated resolution of the spectrum\n :rtype: float\n \"\"\"\n def findsomepeaks(y):\n \"\"\"roughly locates 4 peaks by maximum values in the spectrum and returns their index\"\"\"\n split = int(len(y) / npeaks)\n start = 0\n end = start + split\n splity = []\n for i in range(npeaks):\n splity.append(sci.asarray(y[start:end]))\n start += split\n end += split\n out = []\n for ind, section in enumerate(splity):\n maxy = max(section)\n if maxy == max(section[1:-1]): # if max is not at the edge of the spectrum\n out.append(sci.where(section == maxy)[0][0] + split * ind)\n return out\n\n if function is None: # if no function is provided, use first\n function = self.associate_to_function()\n if self.functions[function]['type'] != 'MS':\n raise ValueError(\n 'The auto_resolution function only operates on mass spectrum functions. '\n 'Type of specified function %d: %s' % (function, self.functions[function]['type']))\n ranges = [] # list of scan intervals\n\n if self.functions[function]['nscans'] <= 20: # if the number of scans is less than 20\n ranges = [[1, self.functions[function]['nscans']]]\n else:\n while len(ranges) < n: # generate 10 pseudo-random intervals to sample\n ran = int(random() * self.functions[function]['nscans']) + self.functions[function]['sr'][0]\n if ran - 10 >= self.functions[function]['sr'][0] and ran + 10 <= self.functions[function]['sr'][1]:\n ranges.append([ran - 10, ran + 10])\n if self.verbose is True:\n prog = Progress(string='Estimating resolution of the instrument', fraction=False, last=n)\n summed = []\n for ind, rng in enumerate(ranges):\n if self.verbose is True:\n prog.write(ind + 1)\n summed.append(self.sum_scans(rng[0], rng[1], function, 2, True)) # sum those scans and append output\n res = []\n for spec in summed: # calculate resolution for each scan range\n inds = findsomepeaks(spec[1]) # find some peaks\n for ind in inds: # for each of those peaks\n res.append(resolution(spec[0], spec[1], ind, threshold=10))\n if self.verbose is True:\n prog.fin()\n res = [y for y in res if y is not None] # removes None values (below S/N)\n return sum(res) / len(res) # return average\n\n def check_for_file(self, fn):\n \"\"\"checks for the mzML file in the working directory and converts it if necessary\"\"\"\n\n def version_input(string):\n \"\"\"checks the python version and uses the appropriate version of user input\"\"\"\n # if sys.version.startswith('2.7'):\n # return raw_input('%s' % string)\n if sys.version.startswith('3.'):\n return input('%s' % string)\n else:\n raise EnvironmentError('The version_input method encountered an unsupported version of python.')\n\n valid = [ # supported extensions\n '.raw',\n '.mzml.gz',\n '.mzml',\n ]\n if fn.lower().endswith('.raw') is True: # extension is raw\n if file_present(fn[:-4] + '.mzML.gz') is True: # if corresponding gzipped mzml is present\n return fn[:-4] + '.mzML.gz'\n if file_present(fn[:-4] + '.mzML') is True: # if corresponding mzml is present\n return fn[:-4] + '.mzML'\n # otherwise convert and return mzml\n return pw_convert(fn, self.precision, self.compression, self.gzip_file, verbose=self.verbose)\n elif file_present(fn) is True: # if the specified file is present\n for exten in valid: # checks for supported extensions\n if fn.lower().endswith(exten) is True:\n return fn\n # otherwise asks user whether to continue\n if version_input(\n 'The extension of the supplied filename \"%s\" is unexpected and may not be supported.\\n'\n 'Do you wish to proceed with file loading? [Y/N] ' % fn).lower() in ['y', 'yes']:\n return fn\n else:\n sys.exit('The user cancelled mzML loading.')\n else:\n fn = fix_extension(fn) # try to fix extension\n if fn.lower().endswith('.raw') is True: # convert if only raw file is found\n return pw_convert(fn, self.precision, self.compression, self.gzip_file, verbose=self.verbose)\n return fn\n\n def function_timetic(self):\n \"\"\"\n extracts timepoints and tic lists for each function\n this function is separate from mzml contents because it would increase load times significantly (~6x)\n \"\"\"\n if self.verbose is True:\n prog = Progress(string='Extracting timepoints and total ion current values from mzML', fraction=False)\n for function in self.functions: # add timepoint and tic lists\n self.functions[function]['timepoints'] = [] # list for timepoints\n self.functions[function]['tic'] = [] # list for total ion current values\n if 'level' in self.functions[function] and self.functions[function]['level'] > 1:\n self.functions[function]['ce'] = [] # list for collision energies\n for spectrum in self.tree.getElementsByTagName('spectrum'):\n attr = branch_attributes(spectrum)\n function, proc, scan = fps(spectrum) # determine function, process, and scan numbers\n if self.verbose is True:\n prog.write(attr['index'] + 1)\n p = branch_cvparams(spectrum) # pull spectrum's cvparameters\n self.functions[function]['timepoints'].append(p['MS:1000016'].value) # start scan time\n self.functions[function]['tic'].append(p['MS:1000285'].value) # total ion current\n if 'MS:1000045' in p:\n self.functions[function]['ce'].append(p['MS:1000045'].value) # collision energy\n self.ftt = True\n if self.verbose is True:\n prog.fin()\n\n def integrate(self, name, start, end, x, y):\n \"\"\"\n Integrates y values given x bounds in a paired set of lists (e.g. a m/z list and an intensity list)\n\n name: name of the peak being integrated (only used for warning purposes)\n start: float\n start x value\n end: float or None\n end x value\n None will return the nearest value to the provided start value\n x: list of x values\n y: list of y values (paired with x)\n\n returns: integral\n \"\"\"\n if start > max(x) or start < min(x): # check that start is within the m/z bounds\n self.BE.warn(name, start, end, min(x), max(x))\n if end is None: # if only a start value is supplied, return closest to that value\n try: # try to find the value in the list\n return y[locate_in_list(x, start)]\n except TypeError: # if the value is not in the list, return 0\n return 0\n if end > max(x): # check that end is within the m/z bounds\n self.BE.warn(name, start, end, min(x), max(x))\n else:\n l = locate_in_list(x, start, 'greater')\n r = locate_in_list(x, end, 'lesser')\n if l <= r:\n return sum(y[l:r])\n else: # catch for if there are no values in the bounds\n return 0\n\n def pull_chromatograms(self):\n \"\"\"\n Pulls mzML chromatograms\n\n returns:\n dictionary = {'chromatogram 1 id', 'chromatogram 2 id', ...}\n dictionary['chromatogram 1 id'] = {\n 'x': list of x values\n 'y': list of y values (paired with x)\n 'xunit': unit of the x values\n 'yunit': unit of the y values\n }\n \"\"\"\n if self.verbose is True:\n prog = Progress(string='Extracting chromatogram', last=self.nchroms)\n chroms = {} # dictionary of chromatograms\n for chromatogram in self.tree.getElementsByTagName('chromatogram'):\n attr = branch_attributes(chromatogram) # pull attributes\n if self.verbose is True:\n prog.write(attr['index'] + 1)\n x, y, xunit, yunit = extract_spectrum(chromatogram, True) # extract x list, y list, and units\n chroms[attr['id']] = {'x': x, 'y': y, 'xunit': xunit, 'yunit': yunit}\n if self.verbose is True:\n prog.fin()\n return chroms\n\n def pull_species_data(self, sp, sumspec=False):\n \"\"\"\n Extracts integrated data at every timepoint for all species specified in the sp dictionary\n This function is intended to by called by PyRSIR.py\n\n sp: dictionary\n sp = {species1, species2, ...} //one key for every species to track\n sp[species] = {\n 'bounds':[species x start, species x end], //start and end x values to integrate between\n 'affin':['+' or '-' or 'UV'}, //which spectrum to look for this species in\n 'level':integer, //if applicable, the MSn level (optional, but adds specificity)\n 'function':integer, //the specific function in which to find this species (optional; overrides affin and level)\n }\n\n sumspec: bool\n toggles summing of all spectra together (creates an additional output item)\n also sums the spectra of mass spectrum species to generate an isotope pattern used by the bounds\n\n output:\n filled dictionary, each subkey will have:\n 'raw': list of raw integrated values dictacted by the bounds\n 'function': the function that the species was associated with\n\n if sumspec is true, will also output a dictionary of Spectrum objects\n the keys of this dictionary are the function numbers\n\n explicitly interprets full scan mass spectra and UV species\n \"\"\"\n if sumspec is True:\n spec = {}\n for function in self.functions: # create spectrum objects for all MS species\n if self.functions[function]['type'] == 'MS':\n spec[function] = Spectrum(3)\n for species in sp: # look for and assign function affinity\n sp[species]['function'] = self.associate_to_function(\n dct=sp[species]) # associate each species in the spectrum with a function\n if 'raw' not in sp[species]: # look for empty raw list\n sp[species]['raw'] = []\n if self.ftt is False: # if timepoints and tic values have not been extracted yet, extract those\n self.function_timetic()\n\n if self.verbose is True:\n prog = self.Progress( # generate progress instance\n string='Extracting species data from spectrum',\n last=self.nscans,\n writeevery=5\n )\n for spectrum in self.tree.getElementsByTagName('spectrum'):\n function, proc, scan = fps(spectrum) # pull function, process, and scan numbers\n attr = branch_attributes(spectrum) # get attributes\n if self.verbose is True:\n prog.write(attr['index'] + 1) # outtput progress\n # self.sys.stdout.write('\\rExtracting species data from spectrum #%d/%d %.1f%%' %(attr['index']+1,self.nscans,float(attr['index']+1)/float(self.nscans)*100.))\n x, y = extract_spectrum(spectrum) # generate spectrum\n if sumspec is True and function == 1:\n spec[function].add_spectrum(x, y)\n for key in sp: # integrate each peak\n if sp[key]['function'] == function: # if species is related to this function\n if self.functions[function]['type'] == 'MS':\n sp[key]['raw'].append(\n self.integrate(key, sp[key]['bounds'][0], sp[key]['bounds'][1], x, y)) # integrate\n if self.functions[function]['type'] == 'UV':\n sp[key]['raw'].append(self.integrate(key, sp[key]['bounds'][0], sp[key]['bounds'][1], x,\n y) / 1000000.) # integrates and divides by 1 million bring it into au\n if self.verbose is True:\n prog.fin() # write done\n # self.sys.stdout.write(' DONE\\n')\n self.BE.printwarns() # print bounds warnings (if any)\n if sumspec is True:\n return sp, spec\n return sp, None\n\n def retrieve_scans(self, start=None, end=None, mzstart=None, mzend=None, function=None, mute=False, outside=False):\n \"\"\"\n Retrieves the specified scans or time range from the specified function\n\n start: integer or float\n the point to start retrieving scans\n if integer, this will be a start scan number\n if float, this will be the start time\n end: (optional) integer or float\n the end point to stop retrieving scans\n same options as start\n mzstart: (optional) integer or float\n left m/z bound\n mzend: (optional) integer or float\n right m/z bound\n fn: integer\n the function to pull scans from (default 1)\n mute: bool\n overrides the verbose setting of the mzml instance\n outside: bool\n Whether to include the next point outside of the specified m/z bounds.\n This is useful for line continuity if the spectrum is to be used for\n rendering images.\n\n returns a list with each index corresponding to a scan, with two sublists for x and y data\n \"\"\"\n if function is None: # if not specified, retrieve first function\n function = self.associate_to_function()\n # find spectrum indicies to extract between\n if function not in self.functions:\n raise ValueError('The function \"%d\" is not in this mzml file.' % function)\n start = self.scan_index(start, function, bias='greater')\n end = self.scan_index(end, function, bias='lesser')\n if self.ftt is False: # extract the timepoints and etc from the mzml\n self.function_timetic()\n if self.verbose is True and mute is False:\n prog = Progress(string='Extracting scan data from spectrum', last=self.nscans)\n out = []\n for spectrum in self.tree.getElementsByTagName('spectrum'): # go through each spectrum\n attr = branch_attributes(spectrum)\n # func,proc,scan = self.fps(spectrum) # determine function, process, and scan numbers\n # p = self.cvparam(spectrum)\n if attr['index'] > end:\n break\n if self.verbose is True and mute is False:\n prog.write(attr['index'] + 1)\n if start <= attr['index'] <= end: # within the index bounds\n x, y = extract_spectrum(spectrum)\n if mzstart is not None or mzend is not None:\n if mzstart is None:\n l = min(x)\n else:\n l = mzstart\n if mzend is None:\n r = max(x)\n else:\n r = mzend\n spec = trimspectrum(x, y, l, r, outside)\n out.append(spec)\n if self.verbose is True and mute is False:\n prog.fin()\n if len(out) == 0: # if only one scan, return that scan\n return out[0]\n return out\n\n def scan_index(self, scan=None, function=1, bias='lesser'):\n \"\"\"\n Determines the index for a scan or timepoint in a given function\n\n :param int, float scan: The scan number (int) or time point (float) to find.\n :param int function: The mzml function to look in\n :param str bias: Bias of index finding (options dictacted by locate_in_list() )\n :return: scan index\n :rtype: int\n \"\"\"\n if function not in self.functions:\n raise KeyError('The function %d is not in this mzML file.' % function)\n if scan is None: # if no scan number is specified\n if bias == 'greater': # used for start point\n return self.functions[function]['sr'][0]\n if bias == 'lesser': # used for end point\n return self.functions[function]['sr'][1]\n if type(scan) is float: # timepoint\n if self.ftt is False:\n self.function_timetic()\n # return located index plus start of the scan range\n return locate_in_list(self.functions[function]['timepoints'], scan, bias=bias) + self.functions[function]['sr'][0]\n elif type(scan) is int: # scan number\n if scan < 1:\n raise ValueError('The scan number must be greater or equal to 1 (specified: %d)' % scan)\n if scan > self.functions[function]['nscans']:\n raise ValueError(f'The scan number {scan} exceeds the number of scans in function {function} '\n f'({self.functions[function][\"nscans\"]})')\n # return scan minus 1 (to shift into index domain) plus the start location index\n return scan - 1 + self.functions[function]['sr'][0]\n else:\n raise ValueError(f'An unexpected scan type was handed to the scan_index function (\"{scan}\", '\n f'type: {type(scan)})')\n\n def sum_scans(self,\n start=None,\n end=None,\n function=None,\n dec=3,\n mute=False\n ):\n \"\"\"\n Sums the specified scans together. If the scan range moves into another function, an error is raised.\n This method has a lower memory overhead than retrieve_scans().\n\n :param float, int start: start point to begin summing. ``int`` is interpreted as a scan number, ``float`` is\n interpreted as a time point in the acquisition.\n :param float, int end: end point to finish summing. Parameters are the same as with start.\n :param int function: mzML function to sum. If this is not provided, the first function will be used.\n :param int dec: number of decimal places to track in the spectrum (lower values lower memory overhead).\n :param bool mute: override chatty mode of mzML object\n :return: summed spectrum in the format ``[[m/z values], [intensity values]]``\n :rtype: list\n \"\"\"\n # if no function is specified, use the first function\n if function is None:\n function = min(self.functions.keys())\n elif function not in self.functions: # if fn is not defined\n raise KeyError(f'The function {function} is not defined in the mzML object. Available options: '\n f'{\", \".join([str(key) for key in self.functions.keys()])}')\n if self.functions[function]['type'] != 'MS':\n raise ValueError(f'The sum_scans function does not have the functionality to sum non-mass spec scans.'\n f'The specified function {function} is of type {self.functions[function][\"type\"]}')\n start = self.scan_index(start, function, 'greater')\n end = self.scan_index(end, function, 'lesser')\n\n spec = Spectrum(dec, start=self.functions[function]['window'][0],\n end=self.functions[function]['window'][1]) # create Spectrum object\n\n if self.verbose is True and mute is False:\n prog = Progress(string='Combining spectrum', fraction=False, first=start, last=end)\n\n for spectrum in self.tree.getElementsByTagName('spectrum'): # go through each spectrum\n attr = branch_attributes(spectrum) # get attributes\n if attr['index'] > end:\n break\n if self.verbose is True and mute is False:\n prog.write(attr['index'] + 1)\n if start <= attr['index'] <= end: # if within the specified bounds\n x, y = extract_spectrum(spectrum) # pull spectrum\n spec.add_spectrum(x, y) # add spectrum to Spectrum object\n out = spec.trim()\n if self.verbose is True and mute is False:\n prog.fin()\n return out\n\n\nif __name__ == '__main__':\n filename = 'MultiTest'\n mzml = mzML(filename, verbose=True, ftt=True)\n # sp = {\n # 'pos':{'bounds':[325,327],'affin':'+','spectrum':Spectrum(3),'raw':[]},\n # 'neg':{'bounds':[348,350],'affin':'-','spectrum':Spectrum(3),'raw':[]},\n # 'uv':{'bounds':[378,None],'affin':'UV','raw':[]}\n # }\n"
] |
[
[
"scipy.asarray",
"scipy.where"
]
] |
Shang-XH/BAFTT
|
[
"62392325342f48b8a89f0c2bf71e48026dd90629",
"62392325342f48b8a89f0c2bf71e48026dd90629"
] |
[
"model/deeplab.py",
"dataset/MassBuilding_dataset.py"
] |
[
"import torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport torch\nimport numpy as np\naffine_par = True\n\n\ndef outS(i):\n i = int(i)\n i = (i+1)/2\n i = int(np.ceil((i+1)/2.0))\n i = (i+1)/2\n return i\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change\n self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)\n # for i in self.bn1.parameters():\n # i.requires_grad = False\n\n padding = dilation\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change\n padding=padding, bias=False, dilation = dilation)\n self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)\n for i in self.bn2.parameters():\n i.requires_grad = False\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)\n # for i in self.bn3.parameters():\n # i.requires_grad = False\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass Classifier_Module(nn.Module):\n\n def __init__(self, dilation_series, padding_series, num_classes):\n super(Classifier_Module, self).__init__()\n self.conv2d_list = nn.ModuleList()\n for dilation, padding in zip(dilation_series, padding_series):\n self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True))\n\n for m in self.conv2d_list:\n m.weight.data.normal_(0, 0.01)\n\n def forward(self, x):\n out = self.conv2d_list[0](x)\n for i in range(len(self.conv2d_list)-1):\n out += self.conv2d_list[i+1](x)\n return out\n\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, affine = affine_par)\n for i in self.bn1.parameters():\n i.requires_grad = False\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)\n self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, 0.01)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n # for i in m.parameters():\n # i.requires_grad = False\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion,affine = affine_par))\n # for i in downsample._modules['1'].parameters():\n # i.requires_grad = False\n layers = []\n layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilation))\n\n return nn.Sequential(*layers)\n def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):\n return block(dilation_series,padding_series,num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.layer5(x)\n\n return x\n\n def get_1x_lr_params_NOscale(self):\n \"\"\"\n This generator returns all the parameters of the net except for \n the last classification layer. Note that for each batchnorm layer, \n requires_grad is set to False in deeplab_resnet.py, therefore this function does not return \n any batchnorm parameter\n \"\"\"\n b = []\n\n b.append(self.conv1)\n b.append(self.bn1)\n b.append(self.layer1)\n b.append(self.layer2)\n b.append(self.layer3)\n b.append(self.layer4)\n\n \n for i in range(len(b)):\n for j in b[i].modules():\n jj = 0\n for k in j.parameters():\n jj+=1\n if k.requires_grad:\n yield k\n\n def get_10x_lr_params(self):\n \"\"\"\n This generator returns all the parameters for the last layer of the net,\n which does the classification of pixel into classes\n \"\"\"\n b = []\n b.append(self.layer5.parameters())\n\n for j in range(len(b)):\n for i in b[j]:\n yield i\n \n\n\n def optim_parameters(self, args):\n return [{'params': self.get_1x_lr_params_NOscale(), 'lr': args.learning_rate},\n {'params': self.get_10x_lr_params(), 'lr': 10*args.learning_rate}] \n\n\ndef Res_Deeplab(num_classes=21):\n model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes)\n return model\n\n",
"## -----Training-----\nimport os\nimport os.path as osp\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport collections\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom PIL import Image\n\n\nclass MassBuildingDataSet(data.Dataset):\n def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=-1,ssl_dir=''):\n self.root = root\n self.list_path = list_path\n self.crop_size = crop_size\n self.scale = scale\n self.ignore_label = ignore_label\n self.mean = mean\n self.is_mirror = mirror\n # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n self.img_ids = [i_id.strip() for i_id in open(list_path)]\n if not max_iters==None:\n self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))\n self.files = []\n\n self.id_to_trainid = {0: 0, 1: 1}\n\n # for split in [\"train\", \"trainval\", \"val\"]:\n for name in self.img_ids:\n img_file = osp.join(self.root, \"Images/%s.png\" % name)\n label_file = osp.join(self.root, \"SegmentationClass/%s.png\" % name)\n self.files.append({\n \"img\": img_file,\n \"label\": label_file,\n \"name\": name\n })\n\n def __len__(self):\n return len(self.files)\n\n\n def __getitem__(self, index):\n datafiles = self.files[index]\n\n image = Image.open(datafiles[\"img\"]).convert('RGB')\n label = Image.open(datafiles[\"label\"])\n name = datafiles[\"name\"]\n\n # resize\n image = image.resize(self.crop_size, Image.BICUBIC)\n label = label.resize(self.crop_size, Image.NEAREST)\n\n image = np.asarray(image, np.float32)\n label = np.asarray(label, np.float32)\n\n # re-assign labels to match the format of Cityscapes\n label_copy = 255 * np.ones(label.shape, dtype=np.float32)\n for k, v in self.id_to_trainid.items():\n label_copy[label == k] = v\n\n size = image.shape\n image = image[:, :, ::-1] # change to BGR\n image -= self.mean\n image = image.transpose((2, 0, 1))\n\n return image.copy(), label_copy.copy(), np.array(size), name\n\n\nif __name__ == '__main__':\n dst = MassBuildingDataSet(\"./data\", is_transform=True)\n trainloader = data.DataLoader(dst, batch_size=4)\n for i, data in enumerate(trainloader):\n imgs, labels = data\n if i == 0:\n img = torchvision.utils.make_grid(imgs).numpy()\n img = np.transpose(img, (1, 2, 0))\n img = img[:, :, ::-1]\n plt.imshow(img)\n plt.show()\n\n## ---Testing-----\n# import os\n# import os.path as osp\n# import numpy as np\n# import random\n# import matplotlib.pyplot as plt\n# import collections\n# import torch\n# import torchvision\n# from torch.utils import data\n# from PIL import Image\n#\n#\n# class MassBuildingDataSet(data.Dataset):\n# def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=-1,ssl_dir=''):\n# self.root = root\n# self.list_path = list_path\n# self.crop_size = crop_size\n# self.scale = scale\n# self.ignore_label = ignore_label\n# self.mean = mean\n# self.is_mirror = mirror\n# # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n# self.img_ids = [i_id.strip() for i_id in open(list_path)]\n# if not max_iters==None:\n# self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))\n# self.files = []\n#\n# self.id_to_trainid = {0: 0, 1: 1}\n#\n# # for split in [\"train\", \"trainval\", \"val\"]:\n# for name in self.img_ids:\n# img_file = osp.join(self.root, \"Images_cleannull/%s.png\" % name)\n# self.files.append({\n# \"img\": img_file,\n# \"name\": name\n# })\n#\n# def __len__(self):\n# return len(self.files)\n#\n#\n# def __getitem__(self, index):\n# datafiles = self.files[index]\n#\n# image = Image.open(datafiles[\"img\"]).convert('RGB')\n# name = datafiles[\"name\"]\n#\n# # resize\n# image = image.resize(self.crop_size, Image.BICUBIC)\n#\n# image = np.asarray(image, np.float32)\n#\n# # re-assign labels to match the format of Cityscapes\n#\n#\n# size = image.shape\n# image = image[:, :, ::-1] # change to BGR\n# image -= self.mean\n# image = image.transpose((2, 0, 1))\n#\n# return image.copy(), np.array(size), name\n#\n#\n# if __name__ == '__main__':\n# dst = MassBuildingDataSet(\"./data\", is_transform=True)\n# trainloader = data.DataLoader(dst, batch_size=4)\n# for i, data in enumerate(trainloader):\n# imgs, labels = data\n# if i == 0:\n# img = torchvision.utils.make_grid(imgs).numpy()\n# img = np.transpose(img, (1, 2, 0))\n# img = img[:, :, ::-1]\n# plt.imshow(img)\n# plt.show()\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"numpy.ceil",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"matplotlib.pyplot.imshow",
"numpy.asarray",
"torch.utils.data.DataLoader",
"numpy.ones",
"numpy.transpose",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
tjniemi/pysteps
|
[
"76324d8f315f63c6723887f4c99d155749a31e83",
"76324d8f315f63c6723887f4c99d155749a31e83"
] |
[
"pysteps/utils/transformation.py",
"examples/thunderstorm_detection_and_tracking.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\npysteps.utils.transformation\r\n============================\r\n\r\nMethods for transforming data values.\r\n\r\n.. autosummary::\r\n :toctree: ../generated/\r\n\r\n boxcox_transform\r\n dB_transform\r\n NQ_transform\r\n sqrt_transform\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy.stats as scipy_stats\r\nimport warnings\r\nfrom scipy.interpolate import interp1d\r\n\r\nwarnings.filterwarnings(\r\n \"ignore\", category=RuntimeWarning\r\n) # To deactivate warnings for comparison operators with NaNs\r\n\r\n\r\ndef boxcox_transform(\r\n R, metadata=None, Lambda=None, threshold=None, zerovalue=None, inverse=False\r\n):\r\n \"\"\"The one-parameter Box-Cox transformation.\r\n\r\n The Box-Cox transform is a well-known power transformation introduced by\r\n Box and Cox (1964). In its one-parameter version, the Box-Cox transform\r\n takes the form T(x) = ln(x) for Lambda = 0,\r\n or T(x) = (x**Lambda - 1)/Lambda otherwise.\r\n\r\n Default parameters will produce a log transform (i.e. Lambda=0).\r\n\r\n Parameters\r\n ----------\r\n R: array-like\r\n Array of any shape to be transformed.\r\n metadata: dict, optional\r\n Metadata dictionary containing the transform, zerovalue and threshold\r\n attributes as described in the documentation of\r\n :py:mod:`pysteps.io.importers`.\r\n Lambda: float, optional\r\n Parameter Lambda of the Box-Cox transformation.\r\n It is 0 by default, which produces the log transformation.\r\n\r\n Choose Lambda < 1 for positively skewed data, Lambda > 1 for negatively\r\n skewed data.\r\n threshold: float, optional\r\n The value that is used for thresholding with the same units as R.\r\n If None, the threshold contained in metadata is used.\r\n If no threshold is found in the metadata,\r\n a value of 0.1 is used as default.\r\n zerovalue: float, optional\r\n The value to be assigned to no rain pixels as defined by the threshold.\r\n It is equal to the threshold - 1 by default.\r\n inverse: bool, optional\r\n If set to True, it performs the inverse transform. False by default.\r\n\r\n Returns\r\n -------\r\n R: array-like\r\n Array of any shape containing the (back-)transformed units.\r\n metadata: dict\r\n The metadata with updated attributes.\r\n\r\n References\r\n ----------\r\n Box, G. E. and Cox, D. R. (1964), An Analysis of Transformations. Journal\r\n of the Royal Statistical Society: Series B (Methodological), 26: 211-243.\r\n doi:10.1111/j.2517-6161.1964.tb00553.x\r\n \"\"\"\r\n\r\n R = R.copy()\r\n\r\n if metadata is None:\r\n if inverse:\r\n metadata = {\"transform\": \"BoxCox\"}\r\n else:\r\n metadata = {\"transform\": None}\r\n\r\n else:\r\n metadata = metadata.copy()\r\n\r\n if not inverse:\r\n\r\n if metadata[\"transform\"] == \"BoxCox\":\r\n return R, metadata\r\n\r\n if Lambda is None:\r\n Lambda = metadata.get(\"BoxCox_lambda\", 0.0)\r\n\r\n if threshold is None:\r\n threshold = metadata.get(\"threshold\", 0.1)\r\n\r\n zeros = R < threshold\r\n\r\n # Apply Box-Cox transform\r\n if Lambda == 0.0:\r\n R[~zeros] = np.log(R[~zeros])\r\n threshold = np.log(threshold)\r\n\r\n else:\r\n R[~zeros] = (R[~zeros] ** Lambda - 1) / Lambda\r\n threshold = (threshold ** Lambda - 1) / Lambda\r\n\r\n # Set value for zeros\r\n if zerovalue is None:\r\n zerovalue = threshold - 1 # TODO: set to a more meaningful value\r\n R[zeros] = zerovalue\r\n\r\n metadata[\"transform\"] = \"BoxCox\"\r\n metadata[\"BoxCox_lambda\"] = Lambda\r\n metadata[\"zerovalue\"] = zerovalue\r\n metadata[\"threshold\"] = threshold\r\n\r\n elif inverse:\r\n\r\n if metadata[\"transform\"] not in [\"BoxCox\", \"log\"]:\r\n return R, metadata\r\n\r\n if Lambda is None:\r\n Lambda = metadata.pop(\"BoxCox_lambda\", 0.0)\r\n if threshold is None:\r\n threshold = metadata.get(\"threshold\", -10.0)\r\n if zerovalue is None:\r\n zerovalue = 0.0\r\n\r\n # Apply inverse Box-Cox transform\r\n if Lambda == 0.0:\r\n R = np.exp(R)\r\n threshold = np.exp(threshold)\r\n\r\n else:\r\n R = np.exp(np.log(Lambda * R + 1) / Lambda)\r\n threshold = np.exp(np.log(Lambda * threshold + 1) / Lambda)\r\n\r\n R[R < threshold] = zerovalue\r\n\r\n metadata[\"transform\"] = None\r\n metadata[\"zerovalue\"] = zerovalue\r\n metadata[\"threshold\"] = threshold\r\n\r\n return R, metadata\r\n\r\n\r\ndef dB_transform(R, metadata=None, threshold=None, zerovalue=None, inverse=False):\r\n \"\"\"Methods to transform precipitation intensities to/from dB units.\r\n\r\n Parameters\r\n ----------\r\n R: array-like\r\n Array of any shape to be (back-)transformed.\r\n metadata: dict, optional\r\n Metadata dictionary containing the transform, zerovalue and threshold\r\n attributes as described in the documentation of\r\n :py:mod:`pysteps.io.importers`.\r\n threshold: float, optional\r\n Optional value that is used for thresholding with the same units as R.\r\n If None, the threshold contained in metadata is used.\r\n If no threshold is found in the metadata,\r\n a value of 0.1 is used as default.\r\n zerovalue: float, optional\r\n The value to be assigned to no rain pixels as defined by the threshold.\r\n It is equal to the threshold - 1 by default.\r\n inverse: bool, optional\r\n If set to True, it performs the inverse transform. False by default.\r\n\r\n Returns\r\n -------\r\n R: array-like\r\n Array of any shape containing the (back-)transformed units.\r\n metadata: dict\r\n The metadata with updated attributes.\r\n \"\"\"\r\n\r\n R = R.copy()\r\n\r\n if metadata is None:\r\n if inverse:\r\n metadata = {\"transform\": \"dB\"}\r\n else:\r\n metadata = {\"transform\": None}\r\n\r\n else:\r\n metadata = metadata.copy()\r\n\r\n # to dB units\r\n if not inverse:\r\n\r\n if metadata[\"transform\"] == \"dB\":\r\n return R, metadata\r\n\r\n if threshold is None:\r\n threshold = metadata.get(\"threshold\", 0.1)\r\n\r\n zeros = R < threshold\r\n\r\n # Convert to dB\r\n R[~zeros] = 10.0 * np.log10(R[~zeros])\r\n threshold = 10.0 * np.log10(threshold)\r\n\r\n # Set value for zeros\r\n if zerovalue is None:\r\n zerovalue = threshold - 5 # TODO: set to a more meaningful value\r\n R[zeros] = zerovalue\r\n\r\n metadata[\"transform\"] = \"dB\"\r\n metadata[\"zerovalue\"] = zerovalue\r\n metadata[\"threshold\"] = threshold\r\n\r\n return R, metadata\r\n\r\n # from dB units\r\n elif inverse:\r\n\r\n if metadata[\"transform\"] != \"dB\":\r\n return R, metadata\r\n\r\n if threshold is None:\r\n threshold = metadata.get(\"threshold\", -10.0)\r\n if zerovalue is None:\r\n zerovalue = 0.0\r\n\r\n R = 10.0 ** (R / 10.0)\r\n threshold = 10.0 ** (threshold / 10.0)\r\n R[R < threshold] = zerovalue\r\n\r\n metadata[\"transform\"] = None\r\n metadata[\"threshold\"] = threshold\r\n metadata[\"zerovalue\"] = zerovalue\r\n\r\n return R, metadata\r\n\r\n\r\ndef NQ_transform(R, metadata=None, inverse=False, **kwargs):\r\n \"\"\"The normal quantile transformation as in Bogner et al (2012).\r\n Zero rain vales are set to zero in norm space.\r\n\r\n Parameters\r\n ----------\r\n R: array-like\r\n Array of any shape to be transformed.\r\n metadata: dict, optional\r\n Metadata dictionary containing the transform, zerovalue and threshold\r\n attributes as described in the documentation of\r\n :py:mod:`pysteps.io.importers`.\r\n inverse: bool, optional\r\n If set to True, it performs the inverse transform. False by default.\r\n\r\n Other Parameters\r\n ----------------\r\n a: float, optional\r\n The offset fraction to be used for plotting positions;\r\n typically in (0,1).\r\n The default is 0., that is, it spaces the points evenly in the uniform\r\n distribution.\r\n\r\n Returns\r\n -------\r\n R: array-like\r\n Array of any shape containing the (back-)transformed units.\r\n metadata: dict\r\n The metadata with updated attributes.\r\n\r\n References\r\n ----------\r\n Bogner, K., Pappenberger, F., and Cloke, H. L.: Technical Note: The normal\r\n quantile transformation and its application in a flood forecasting system,\r\n Hydrol. Earth Syst. Sci., 16, 1085-1094,\r\n https://doi.org/10.5194/hess-16-1085-2012, 2012.\r\n \"\"\"\r\n\r\n # defaults\r\n a = kwargs.get(\"a\", 0.0)\r\n\r\n R = R.copy()\r\n shape0 = R.shape\r\n R = R.ravel().astype(float)\r\n idxNan = np.isnan(R)\r\n R_ = R[~idxNan]\r\n\r\n if metadata is None:\r\n if inverse:\r\n metadata = {\"transform\": \"NQT\"}\r\n else:\r\n metadata = {\"transform\": None}\r\n metadata[\"zerovalue\"] = np.min(R_)\r\n\r\n else:\r\n metadata = metadata.copy()\r\n\r\n if not inverse:\r\n # Plotting positions\r\n # https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot#Plotting_position\r\n n = R_.size\r\n Rpp = ((np.arange(n) + 1 - a) / (n + 1 - 2 * a)).reshape(R_.shape)\r\n\r\n # NQ transform\r\n Rqn = scipy_stats.norm.ppf(Rpp)\r\n R__ = np.interp(R_, R_[np.argsort(R_)], Rqn)\r\n\r\n # set zero rain to 0 in norm space\r\n R__[R[~idxNan] == metadata[\"zerovalue\"]] = 0\r\n\r\n # build inverse transform\r\n metadata[\"inqt\"] = interp1d(\r\n Rqn, R_[np.argsort(R_)], bounds_error=False, fill_value=(R_.min(), R_.max())\r\n )\r\n\r\n metadata[\"transform\"] = \"NQT\"\r\n metadata[\"zerovalue\"] = 0\r\n metadata[\"threshold\"] = R__[R__ > 0].min()\r\n\r\n else:\r\n f = metadata.pop(\"inqt\")\r\n R__ = f(R_)\r\n metadata[\"transform\"] = None\r\n metadata[\"zerovalue\"] = R__.min()\r\n metadata[\"threshold\"] = R__[R__ > R__.min()].min()\r\n\r\n R[~idxNan] = R__\r\n\r\n return R.reshape(shape0), metadata\r\n\r\n\r\ndef sqrt_transform(R, metadata=None, inverse=False, **kwargs):\r\n \"\"\"Square-root transform.\r\n\r\n Parameters\r\n ----------\r\n R: array-like\r\n Array of any shape to be transformed.\r\n metadata: dict, optional\r\n Metadata dictionary containing the transform, zerovalue and threshold\r\n attributes as described in the documentation of\r\n :py:mod:`pysteps.io.importers`.\r\n inverse: bool, optional\r\n If set to True, it performs the inverse transform. False by default.\r\n\r\n Returns\r\n -------\r\n R: array-like\r\n Array of any shape containing the (back-)transformed units.\r\n metadata: dict\r\n The metadata with updated attributes.\r\n\r\n \"\"\"\r\n\r\n R = R.copy()\r\n\r\n if metadata is None:\r\n if inverse:\r\n metadata = {\"transform\": \"sqrt\"}\r\n else:\r\n metadata = {\"transform\": None}\r\n metadata[\"zerovalue\"] = np.nan\r\n metadata[\"threshold\"] = np.nan\r\n else:\r\n metadata = metadata.copy()\r\n\r\n if not inverse:\r\n # sqrt transform\r\n R = np.sqrt(R)\r\n\r\n metadata[\"transform\"] = \"sqrt\"\r\n metadata[\"zerovalue\"] = np.sqrt(metadata[\"zerovalue\"])\r\n metadata[\"threshold\"] = np.sqrt(metadata[\"threshold\"])\r\n else:\r\n # inverse sqrt transform\r\n R = R ** 2\r\n\r\n metadata[\"transform\"] = None\r\n metadata[\"zerovalue\"] = metadata[\"zerovalue\"] ** 2\r\n metadata[\"threshold\"] = metadata[\"threshold\"] ** 2\r\n\r\n return R, metadata\r\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThunderstorm Detection and Tracking - DATing\n============================================\n\nThis example shows how to use the thunderstorm DATing module. The example is based on\nMeteoSwiss radar data and uses the Cartesian composite of maximum reflectivity on a\n1 km grid. All default values are tuned to this grid, but can be modified.\nThe first section demonstrates thunderstorm cell detection and how to plot contours.\nThe second section demonstrates detection and tracking in combination,\nas well as how to plot the resulting tracks.\nThis module was implemented following the procedures used in the TRT Thunderstorms\nRadar Tracking algorithm (:cite:`TRT2004`) used operationally at MeteoSwiss.\nModifications include advecting the identified thunderstorms with the optical flow\nobtained from pysteps, as well as additional options in the thresholding.\n\nReferences\n..........\n:cite:`TRT2004`\n\n@author: mfeldman\n\"\"\"\n################################################################################\n# Import all required functions\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nfrom datetime import datetime\nfrom pprint import pprint\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom pysteps import io, rcparams\nfrom pysteps.feature import tstorm as tstorm_detect\nfrom pysteps.tracking import tdating as tstorm_dating\nfrom pysteps.utils import to_reflectivity\nfrom pysteps.visualization import plot_precip_field, plot_track, plot_cart_contour\n\n################################################################################\n# Read the radar input images\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# A series of 20 files containing Swiss Cartesian gridded rain rates are imported. Since\n# the algorithm is tuned to Swiss max-reflectivity data, the rain rates are transformed\n# to reflectivity fields using the 'to_reflectivity' utility in pysteps.utils.\n\n# Select the input data\ndate = datetime.strptime(\"201607112100\", \"%Y%m%d%H%M\")\ndata_source = rcparams.data_sources[\"mch\"]\n\n# Extract corresponding settings\nroot_path = data_source[\"root_path\"]\npath_fmt = data_source[\"path_fmt\"]\nfn_pattern = data_source[\"fn_pattern\"]\nfn_ext = data_source[\"fn_ext\"]\nimporter_name = data_source[\"importer\"]\nimporter_kwargs = data_source[\"importer_kwargs\"]\ntimestep = data_source[\"timestep\"]\n\n# Load the data from the archive\nfns = io.archive.find_by_date(\n date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_next_files=20\n)\nimporter = io.get_method(importer_name, \"importer\")\nR, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs)\n\n# Convert to reflectivity (it is possible to give the a- and b- parameters of the\n# Marshall-Palmer relationship here: zr_a = and zr_b =).\nZ, metadata = to_reflectivity(R, metadata)\n\n# Extract the list of timestamps\ntimelist = metadata[\"timestamps\"]\n\npprint(metadata)\n\n###############################################################################\n# Example of thunderstorm identification in a single timestep\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# The function tstorm_detect.detection requires a 2-D input image, all further inputs are\n# optional.\n\ninput_image = Z[2, :, :].copy()\ntime = timelist[2]\ncells_id, labels = tstorm_detect.detection(input_image, time=time)\n\n###############################################################################\n# Properties of one of the identified cells:\nprint(cells_id.iloc[0])\n\n###############################################################################\n# Example of thunderstorm tracking over a timeseries\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# The tstorm-dating function requires the entire pre-loaded time series.\n# The first two timesteps are required to initialize the\n# flow prediction and are not used to compute tracks.\n\ntrack_list, cell_list, label_list = tstorm_dating.dating(\n input_video=Z, timelist=timelist\n)\n\n###############################################################################\n# Plotting the results\n# ~~~~~~~~~~~~~~~~~~~~\n\n# Plot precipitation field\nplot_precip_field(Z[2, :, :], geodata=metadata, units=metadata[\"unit\"])\nplt.xlabel(\"Swiss easting [m]\")\nplt.ylabel(\"Swiss northing [m]\")\n\n# Add the identified cells\nplot_cart_contour(cells_id.cont, geodata=metadata)\n\n# Filter the tracks to only contain cells existing in this timestep\nIDs = cells_id.ID.values\ntrack_filt = []\nfor track in track_list:\n if np.unique(track.ID) in IDs:\n track_filt.append(track)\n\n# Add their tracks\nplot_track(track_filt, geodata=metadata)\nplt.show()\n\n################################################################################\n# Evaluating temporal behaviour of cell\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Maximum reflectivity of cells in time\n\n# Make an empty list\ntlen = []\n# Get a list of colors that we will use for the plot\ncolor = iter(plt.cm.ocean(np.linspace(0, 0.8, len(track_filt))))\n# Now, loop through all the tracks and plot the maximum reflectivity of the cell\n# in time.\nfor track in track_filt:\n plt.plot(np.arange(len(track)), track.max_ref, c=next(color))\n tlen.append(len(track))\nplt.xticks(np.arange(max(tlen) + 1), labels=np.arange(max(tlen) + 1) * 5)\nplt.ylabel(\"Maximum reflectivity (dBZ)\")\nplt.xlabel(\"Time since cell detection (min)\")\nplt.legend(IDs, loc=\"lower right\", ncol=3, title=\"Track number\")\nplt.show()\n\n###############################################################################\n# The size of the thunderstorm cells in time\n\n# Make an empty list\ntlen = []\n# Get a list of colors that we will use for the plot\ncolor = iter(plt.cm.ocean(np.linspace(0, 0.8, len(track_filt))))\n# Now, loop through all the tracks and plot the cell size of the thunderstorms\n# in time.\nfor track in track_filt:\n size = []\n for ID, t in track.iterrows():\n size.append(len(t.x))\n plt.plot(np.arange(len(track)), size, c=next(color))\n tlen.append(len(track))\nplt.xticks(np.arange(max(tlen) + 1), labels=np.arange(max(tlen) + 1) * 5)\nplt.ylabel(\"Thunderstorm cell size (pixels)\")\nplt.xlabel(\"Time since cell detection (min)\")\nplt.legend(IDs, loc=\"upper left\", ncol=3, title=\"Track number\")\nplt.show()\n"
] |
[
[
"scipy.stats.norm.ppf",
"numpy.log",
"numpy.sqrt",
"numpy.min",
"numpy.isnan",
"numpy.arange",
"numpy.log10",
"numpy.argsort",
"numpy.exp"
],
[
"matplotlib.pyplot.legend",
"numpy.unique",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
Ice833/Semantic-Segmentation
|
[
"23d23f6da3b34884c044a2253d65a1e4097adb2d"
] |
[
"SegNet_Mobile/train.py"
] |
[
"from nets.segnet import mobilenet_segnet\r\nfrom keras.optimizers import Adam\r\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\r\nfrom PIL import Image\r\nimport keras\r\nfrom keras import backend as K\r\nimport numpy as np\r\n\r\nNCLASSES = 2\r\nHEIGHT = 416\r\nWIDTH = 416\r\n\r\ndef generate_arrays_from_file(lines,batch_size):\r\n # 获取总长度\r\n n = len(lines)\r\n i = 0\r\n while 1:\r\n X_train = []\r\n Y_train = []\r\n # 获取一个batch_size大小的数据\r\n for _ in range(batch_size):\r\n if i==0:\r\n np.random.shuffle(lines)\r\n name = lines[i].split(';')[0]\r\n # 从文件中读取图像\r\n img = Image.open(r\".\\dataset2\\jpg\" + '/' + name)\r\n img = img.resize((WIDTH,HEIGHT))\r\n img = np.array(img)\r\n img = img/255\r\n X_train.append(img)\r\n\r\n name = (lines[i].split(';')[1]).replace(\"\\n\", \"\")\r\n # 从文件中读取图像\r\n img = Image.open(r\".\\dataset2\\png\" + '/' + name)\r\n img = img.resize((int(WIDTH/2),int(HEIGHT/2)))\r\n img = np.array(img)\r\n seg_labels = np.zeros((int(HEIGHT/2),int(WIDTH/2),NCLASSES))\r\n for c in range(NCLASSES):\r\n seg_labels[: , : , c ] = (img[:,:,0] == c ).astype(int)\r\n seg_labels = np.reshape(seg_labels, (-1,NCLASSES))\r\n Y_train.append(seg_labels)\r\n\r\n # 读完一个周期后重新开始\r\n i = (i+1) % n\r\n yield (np.array(X_train),np.array(Y_train))\r\n\r\ndef loss(y_true, y_pred):\r\n loss = K.categorical_crossentropy(y_true,y_pred)\r\n return loss\r\n\r\nif __name__ == \"__main__\":\r\n log_dir = \"logs/\"\r\n # 获取model\r\n model = mobilenet_segnet(n_classes=NCLASSES,input_height=HEIGHT, input_width=WIDTH)\r\n # model.summary()\r\n BASE_WEIGHT_PATH = ('https://github.com/fchollet/deep-learning-models/'\r\n\t\t\t\t\t\t\t\t\t\t'releases/download/v0.6/')\r\n model_name = 'mobilenet_%s_%d_tf_no_top.h5' % ( '1_0' , 224 )\r\n\r\n weight_path = BASE_WEIGHT_PATH + model_name\r\n weights_path = keras.utils.get_file(model_name, weight_path )\r\n model.load_weights(weights_path,by_name=True,skip_mismatch=True)\r\n\r\n \r\n # 打开数据集的txt\r\n with open(r\".\\dataset2\\train.txt\",\"r\") as f:\r\n lines = f.readlines()\r\n\r\n # 打乱行,这个txt主要用于帮助读取数据来训练\r\n # 打乱的数据更有利于训练\r\n np.random.seed(10101)\r\n np.random.shuffle(lines)\r\n np.random.seed(None)\r\n\r\n # 90%用于训练,10%用于估计。\r\n num_val = int(len(lines)*0.1)\r\n num_train = len(lines) - num_val\r\n\r\n # 保存的方式,3世代保存一次\r\n checkpoint_period = ModelCheckpoint(\r\n log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',\r\n monitor='val_loss', \r\n save_weights_only=True, \r\n save_best_only=True, \r\n period=3\r\n )\r\n # 学习率下降的方式,val_loss3次不下降就下降学习率继续训练\r\n reduce_lr = ReduceLROnPlateau(\r\n monitor='val_loss', \r\n factor=0.5, \r\n patience=3, \r\n verbose=1\r\n )\r\n # 是否需要早停,当val_loss一直不下降的时候意味着模型基本训练完毕,可以停止\r\n early_stopping = EarlyStopping(\r\n monitor='val_loss', \r\n min_delta=0, \r\n patience=10, \r\n verbose=1\r\n )\r\n\r\n # 交叉熵\r\n model.compile(loss = loss,\r\n optimizer = Adam(lr=1e-4),\r\n metrics = ['accuracy'])\r\n batch_size = 4\r\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\r\n \r\n # 开始训练\r\n model.fit_generator(generate_arrays_from_file(lines[:num_train], batch_size),\r\n steps_per_epoch=max(1, num_train//batch_size),\r\n validation_data=generate_arrays_from_file(lines[num_train:], batch_size),\r\n validation_steps=max(1, num_val//batch_size),\r\n epochs=50,\r\n initial_epoch=0,\r\n callbacks=[checkpoint_period, reduce_lr])\r\n\r\n model.save_weights(log_dir+'last1.h5')\r\n"
] |
[
[
"numpy.reshape",
"numpy.random.shuffle",
"numpy.array",
"numpy.random.seed"
]
] |
stormage2/huji_lab
|
[
"07734f1891d05177dd1395a4a3e4694e258ca0f8"
] |
[
"huji_lab/Display.py"
] |
[
"import matplotlib.pyplot as _plt\nfrom IPython.display import Image as _Image\nfrom IPython.display import display as _display\nfrom IPython.display import Markdown as _Markdown\nfrom IPython.display import Latex as _Latex\n\n\"\"\"\n# A dangerous override function, currently unimplemented.\nfrom uncertainties.core import Variable as _varu\n\n\ndef is_ufloat(num):\n if type(num) is _varu:\n if num.std_dev / abs(num.nominal_value) > 0.02:\n print_color_bold(num, 'red')\n else:\n print_color_bold(num, 'green')\n else:\n print_color_bold(num, 'none')\n\n\ndef print_color_bold(string, color):\n if color != 'none':\n num = str(string)\n text_line = _Markdown(\"<span style=\\\"color: \" + color + \"\\\">**\" + num + \"**</span>\") # type: tuple\n _display(text_line)\n else:\n _display(string)\n \nglobal print\nprint = is_ufloat\n\"\"\"\n\n\ndef print_color_bold(string, color='black'):\n text_line = _Markdown(\"<span style=\\\"color: \" + color + \"\\\">**\" + string + \"**</span>\") # type: tuple\n _display(text_line)\n\n\ndef _print_latex_old(text_to_print):\n \"\"\"\n DEPRECATED, Please don't use\n Nicely prints LaTeX syntax, inline with python output.\n :param text_to_print:\n :return: None.\n \"\"\"\n fig, ax = _plt.subplots(figsize=(1, 1))\n _plt.rc('text', usetex=True)\n _plt.tight_layout()\n _plt.axis('off')\n ax.grid(False)\n _plt.figtext(0, 0, text_to_print, fontsize=40, bbox=dict(facecolor='white', linewidth=0))\n\n\ndef print_latex(text_to_print):\n return _Latex(text_to_print)\n\n\ndef print_wolfram(wolf_query):\n \"\"\"\n Nicely prints a wolframAlpha query as a series of photos.\n :param wolf_query: A wolfram_query() object.\n :return: None.\n \"\"\"\n for result in wolf_query['pod']:\n outer = result['subpod']\n if type(outer) is dict:\n disp = _Image(url=outer['img']['@src']) # type: tuple\n _display(disp)\n else:\n for i in range(len(outer)):\n disp = _Image(url=outer[i]['img']['@src']) # type: tuple\n _display(disp)\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.axis"
]
] |
jhamman/xrviz
|
[
"7b933f29330fed8c9ee6c11822058478f58d51a1"
] |
[
"xrviz/dashboard.py"
] |
[
"import ast\nimport dask\nimport panel as pn\nimport pandas as pd\nimport numpy as np\nimport xarray as xr\nimport hvplot.xarray\nimport hvplot.pandas\nimport holoviews as hv\nfrom holoviews import streams\nfrom bokeh.models import HoverTool\nimport warnings\nfrom itertools import cycle\nimport numpy\nfrom .sigslot import SigSlot\nfrom .control import Control\nfrom .utils import convert_widget, player_with_name_and_value, is_float\nfrom .compatibility import ccrs, gv, gf, has_cartopy, logger, has_crick_tdigest\n\n\nclass Dashboard(SigSlot):\n \"\"\"\n Main entry point to XrViz, an interactive GUI for a given dataset.\n\n Parameters\n ----------\n data: xarray.DataSet\n The data to be visualised\n\n initial_params: `dict`\n To pre-select values of widgets upon initialization. The keys are\n generally names of widgets within the input area of the interface.\n For more details, refer to\n `Set Initial Parameters <../html/set_initial_parameters.html>`_ .\n\n Attributes\n ----------\n\n 1. panel:\n A ``panel.Tabs`` instance containing the user input panes and\n output graphs of the interface.\n 2. control:\n A ``Control`` instance responsible for input panes (control panel).\n 3. plot_button:\n A ``pn.widgets.Button`` that generates graph according to values\n selected in input panes, upon click.\n 4. graph:\n A ``HoloViews(DynamicMap)`` instance containing the main graph.\n 5. output:\n The ``graph`` along with the select widgets for index selection.\n 6. taps_graph:\n A ``holoviews.Points`` instance to record the location of taps.\n 7. series_graph:\n A ``HoloViews(Overlay)`` instance having series extracted.\n 8. clear_series_button:\n A ``pn.widgets.Button`` to clear the `taps_graph` and\n `series_graph`.\n \"\"\"\n def __init__(self, data, initial_params={}):\n super().__init__()\n if not isinstance(data, xr.core.dataarray.DataWithCoords):\n raise ValueError(\"Input should be an xarray data object, not %s\" % type(data))\n self.set_data(data)\n self.initial_params = initial_params\n self.control = Control(self.data)\n self.plot_button = pn.widgets.Button(name='Plot', width=200,\n disabled=True)\n self.index_selectors = []\n self.graph = pn.Spacer(name='Graph')\n self.taps_graph = hv.Points([])\n self.series_graph = pn.Row(pn.Spacer(name='Series Graph'))\n self.clear_series_button = pn.widgets.Button(name='Clear',\n width=200,\n disabled=True)\n self.output = pn.Row(self.graph,\n pn.Column(name='Index_selectors'))\n\n self._register(self.plot_button, 'plot_clicked', 'clicks')\n self.connect('plot_clicked', self.create_graph)\n\n self._register(self.control.coord_setter.coord_selector, 'set_coords')\n self.connect(\"set_coords\", self.set_coords)\n\n self._register(self.clear_series_button, 'clear_series', 'clicks')\n self.connect('clear_series', self.clear_series)\n\n self.control.displayer.connect('variable_selected',\n self.check_is_plottable)\n self.control.displayer.connect('variable_selected',\n self._link_aggregation_selectors)\n self.control.fields.connect('x', self._link_aggregation_selectors)\n self.control.fields.connect('y', self._link_aggregation_selectors)\n\n self.panel = pn.Column(self.control.panel,\n pn.Row(self.plot_button,\n self.clear_series_button),\n self.output,\n self.series_graph, width_policy='max')\n\n # To auto-select in case of single variable\n if len(list(self.data.variables)) == 1:\n self.control.displayer.select.value = list(self.data.variables)\n\n self.control.setup_initial_values(self.initial_params)\n self.taps = []\n self.tap_stream = streams.Tap(transient=True)\n colors = ['#60fffc', '#6da252', '#ff60d4', '#ff9400', '#f4e322',\n '#229cf4', '#af9862', '#629baf', '#7eed5a', '#e29ec8',\n '#ff4300']\n self.color_pool = cycle(colors)\n self.clear_points = hv.streams.Stream.define(\n 'Clear_points', clear=False)(transient=True)\n\n def clear_series(self, *args):\n \"\"\"\n Clears the markers on the image, and the extracted series.\n \"\"\"\n if not self.clear_series_button.disabled:\n self.series_graph[0] = pn.Spacer(name='Series Graph')\n self.series = hv.Points([]).opts(height=self.kwargs['height'],\n width=self.kwargs['width'])\n self.taps.clear()\n self.clear_points.event(clear=True)\n\n def _link_aggregation_selectors(self, *args):\n for dim_selector in self.control.kwargs['remaining_dims']:\n self.control.fields.connect(dim_selector, self.control.style.setup)\n\n def create_graph(self, *args):\n \"\"\"\n Creates a graph according to the values selected in the widgets.\n\n This method is usually invoked by the user clicking \"Plot\"\n\n It handles the following two cases:\n\n 1. Both `x`, `y` are present in selected variable's coordinates.\n Geographic projection is possible only in this case. It uses\n ``create_selectors_players`` method for creation of the graph.\n Here the selectors generated automatically by hvplot are used.\n\n 2. One or both of `x`, `y` are NOT present in selected variable's\n coordinates (both `x` and `y` are considered as dimensions). It\n uses ``create_indexed_graph`` method for creation of the graph.\n The selectors are created and linked with graph by XrViz.\n \"\"\"\n self.kwargs = self.control.kwargs\n self.var = self.kwargs['Variables']\n if self.index_selectors:\n for selector in self.index_selectors:\n del selector\n self.index_selectors = []\n self.output[1].clear() # clears Index_selectors\n self.series_graph[0] = pn.Spacer(name='Series Graph')\n self.series = hv.Points([]).opts(height=self.kwargs['height'],\n width=self.kwargs['width'])\n self.taps.clear()\n self.control.fields.connect('extract_along', self.clear_series)\n\n are_var_coords = self.kwargs['are_var_coords']\n if are_var_coords:\n graph_opts = {'x': self.kwargs['x'],\n 'y': self.kwargs['y'],\n 'title': self.var,\n 'height': self.kwargs['height'],\n 'width': self.kwargs['width'],\n 'cmap': self.kwargs['cmap'],\n 'colorbar': self.kwargs['colorbar'],\n 'rasterize': self.kwargs['rasterize']}\n color_scale = self.kwargs['color_scale']\n dims_to_agg = self.kwargs['dims_to_agg']\n use_all_data = self.kwargs['compute min/max from all data']\n sel_data = self.data[self.var]\n\n if has_cartopy:\n is_geo = self.kwargs['is_geo']\n base_map = self.kwargs['basemap']\n show_map = True if base_map != None else False\n\n if is_geo:\n crs_params = self.kwargs['crs params']\n crs_params = process_proj_params(crs_params)\n crs = getattr(ccrs, self.kwargs['crs'])(**crs_params)\n geo_ops = {'alpha': self.kwargs['alpha'],\n 'project': self.kwargs['project'],\n 'global_extent': self.kwargs['global_extent'],\n 'geo': True,\n 'crs': crs}\n if not show_map:\n # find projection and crs, add it to geo_ops\n proj_val = self.kwargs['projection']\n if proj_val:\n proj_params = self.kwargs['projection params']\n proj_params = process_proj_params(proj_params)\n projection = getattr(ccrs, self.kwargs['projection'])(**proj_params)\n geo_ops.update({'projection': projection})\n\n graph_opts.update(geo_ops)\n\n feature_map = gv.Overlay([getattr(gf, feat) for feat in self.kwargs['features'] if feat is not 'None'])\n\n for dim in dims_to_agg:\n if self.kwargs[dim] == 'count':\n sel_data = (~ sel_data.isnull()).sum(dim)\n else:\n agg = self.kwargs[dim]\n sel_data = getattr(sel_data, agg)(dim)\n\n if self.var in list(sel_data.coords): # When a var(coord) is plotted wrt itself\n sel_data = sel_data.to_dataset(name=f'{sel_data.name}_')\n\n if color_scale is not 'linear':\n sel_data = getattr(numpy, color_scale)(sel_data) # Color Scaling\n\n if not use_all_data:\n # sel the values at first step, to use for cmap limits\n sels = {dim: 0 for dim in self.kwargs['dims_to_select_animate']}\n sel_data_for_cmap = sel_data.isel(**sels, drop=True)\n else:\n sel_data_for_cmap = sel_data\n\n cmin, cmax = self.kwargs['cmap lower limit'], self.kwargs['cmap upper limit']\n cmin, cmax = (cmin, cmax) if is_float(cmin) and is_float(cmax) else ('', '')\n\n # It is better to set initial values as 0.1,0.9 rather than\n # 0,1(min, max) to get a color balance graph\n c_lim_lower, c_lim_upper = (\n (float(cmin), float(cmax)) if cmin and cmax\n else find_cmap_limits(sel_data_for_cmap))\n\n color_range = {sel_data.name: (c_lim_lower, c_lim_upper)}\n\n if not cmin: # if user left blank or initial values are empty\n self.control.style.lower_limit.value = str(round(c_lim_lower, 5))\n self.control.style.upper_limit.value = str(round(c_lim_upper, 5))\n\n assign_opts = {dim: self.data[dim] for dim in sel_data.dims}\n # Following tasks are happening here:\n # 1. assign_opts: reassignment of coords(if not done result in\n # errors for some of the selections in fields panel)\n # 2. graph_opts: customise the plot according to selections in\n # style and projection(if available)\n # 3. color_range: customise the colormap range according to cmap\n # lower and upper limits\n # 4. active_tools: activate the tools required such as 'wheel_zoom',\n # 'pan'\n graph = sel_data.assign_coords(\n **assign_opts).hvplot.quadmesh(\n **graph_opts).redim.range(**color_range).opts(\n active_tools=['wheel_zoom', 'pan'])\n\n self.tap_stream.source = graph\n\n if has_cartopy and is_geo:\n graph = (\n feature_map * graph\n if self.kwargs['features'] != ['None'] else graph\n )\n if show_map:\n graph = base_map * graph\n self.tap_stream.source = graph\n\n self.create_selectors_players(graph)\n\n else: # if one or both x,y are var_dims\n self.var_dims = list(self.data[self.var].dims)\n # var_selector_dims refers to dims for which index_selectors\n # would be created\n self.var_selector_dims = self.kwargs['dims_to_select_animate']\n\n for dim in self.var_selector_dims:\n ops = list(self.data[self.var][dim].values)\n\n if self.kwargs[dim] == 'select':\n selector = pn.widgets.Select(name=dim, options=ops)\n else:\n selector = pn.widgets.DiscretePlayer(name=dim,\n value=ops[0],\n options=ops)\n self.index_selectors.append(selector)\n self._register(selector, selector.name)\n self.connect(selector.name, self.create_indexed_graph)\n\n self.create_indexed_graph()\n for selector in self.index_selectors:\n if isinstance(selector, pn.widgets.Select):\n self.output[1].append(selector)\n else:\n player = player_with_name_and_value(selector)\n self.output[1].append(player)\n\n def create_indexed_graph(self, *args):\n \"\"\"\n Creates a graph for the dimensions selected in widgets `x` and `y`.\n\n This is used when values selected in `x` and `y` are not data\n coordinates (i.e. one or both values are data dimensions).\n \"\"\"\n self.kwargs = self.control.kwargs\n selection = {} # to collect the value of index selectors\n for i, dim in enumerate(list(self.var_selector_dims)):\n selection[dim] = self.index_selectors[i].value\n graph_opts = {'x': self.kwargs['x'],\n 'y': self.kwargs['y'],\n 'title': self.var,\n 'height': self.kwargs['height'],\n 'width': self.kwargs['width'],\n 'cmap': self.kwargs['cmap'],\n 'colorbar': self.kwargs['colorbar'],\n 'rasterize': self.kwargs['rasterize']}\n dims_to_agg = self.kwargs['dims_to_agg']\n color_scale = self.kwargs['color_scale']\n use_all_data = self.kwargs['compute min/max from all data']\n\n sel_data = self.data[self.var]\n\n for dim in dims_to_agg:\n if self.kwargs[dim] == 'count':\n sel_data = (~ sel_data.isnull()).sum(dim)\n else:\n agg = self.kwargs[dim]\n sel_data = getattr(sel_data, agg)(dim)\n\n # rename the sel_data in case it is a coordinate, because we\n # cannot create a Dataset from a DataArray with the same name\n # as one of its coordinates\n if sel_data.name in self.data.coords:\n sel_data = sel_data.to_dataset(name=f'{sel_data.name}_')\n\n if not use_all_data: # do the selection earlier\n sel_data = sel_data.sel(**selection, drop=True)\n\n if color_scale is not 'linear':\n sel_data = getattr(numpy, color_scale)(sel_data) # Color Scaling\n\n cmin, cmax = self.kwargs['cmap lower limit'], self.kwargs['cmap upper limit']\n cmin, cmax = (cmin, cmax) if is_float(cmin) and is_float(cmax) else ('', '')\n\n # It is better to set initial values as 0.1,0.9 rather than\n # 0,1(min, max) to get a color balance graph\n c_lim_lower, c_lim_upper = (\n (float(cmin), float(cmax)) if cmin and cmax\n else find_cmap_limits(sel_data))\n\n color_range = {sel_data.name: (c_lim_lower, c_lim_upper)}\n\n if not cmin: # if user left blank or initial values are empty\n self.control.style.lower_limit.value = str(round(c_lim_lower, 5))\n self.control.style.upper_limit.value = str(round(c_lim_upper, 5))\n\n if use_all_data: # do the selection later\n sel_data = sel_data.sel(**selection, drop=True)\n\n assign_opts = {dim: self.data[dim] for dim in sel_data.dims}\n graph = sel_data.assign_coords(\n **assign_opts).hvplot.quadmesh(**graph_opts).redim.range(\n **color_range).opts(active_tools=['wheel_zoom', 'pan'])\n self.graph = graph\n if len(self.data[self.var].dims) > 2 and self.kwargs['extract along']:\n self.tap_stream.source = graph\n self.taps_graph = hv.DynamicMap(\n self.create_taps_graph,\n streams=[self.tap_stream, self.clear_points])\n self.output[0] = self.graph * self.taps_graph\n self.clear_series_button.disabled = False\n else:\n self.output[0] = self.graph\n self.clear_series_button.disabled = True\n\n def create_taps_graph(self, x, y, clear=False):\n \"\"\"\n Create an output layer in the graph which responds to taps\n\n Whenever the user taps (or clicks) the graph, a glyph will be overlaid,\n and a series is extracted at that point.\n \"\"\"\n color = next(iter(self.color_pool))\n if None not in [x, y]:\n self.taps.append((x, y, color))\n if self.control.kwargs['extract along'] is None:\n self.taps = []\n is_geo = self.kwargs['is_geo'] if 'is_geo' in self.kwargs else None\n geo_disabled = self.control.projection.is_geo.disabled if is_geo else None\n\n # Choose between gv.Points and hv.Points\n if is_geo and geo_disabled is False:\n tapped_map = gv.Points(self.taps, vdims=['z'])\n else:\n tapped_map = hv.Points(self.taps, vdims=['z'])\n tapped_map.opts(color='z', marker='triangle', line_color='black',\n size=8)\n self.series_graph[0] = self.create_series_graph(x, y, color, clear)\n return tapped_map\n\n def create_series_graph(self, x, y, color, clear=False):\n \"\"\"\n Extract a series at a given point, and plot it.\n\n The series plotted has same color as that of the marker depicting the\n location of the tap.\n\n The following cases have been handled:\n `Case 1`:\n When both x and y are NOT coords (i.e. are dims)\n\n `Case 2`:\n When both x and y are coords\n\n ``2a``: Both are 1-dimensional\n\n ``2b``: Both are 2-dimensional with same dimensions.\n\n ``2c``: Both are 2-dimensional with different dims or are multi-dimcoordinates. Here we are unable to extract.\n Note that ``Case 1`` and ``Case 2a`` can be handled with the same\n code.\n \"\"\"\n extract_along = self.control.kwargs['extract along']\n if None not in [x, y] and extract_along:\n color = self.taps[-1][-1] if self.taps[-1][-1] else None\n other_dims = [dim for dim in self.kwargs['remaining_dims'] if\n dim is not extract_along]\n\n # to use the value selected in index selector for selecting\n # data to create series. In case of aggregation, plot is\n # created along 0th val of the dim.\n if len(other_dims):\n other_dim_sels = {}\n for dim in other_dims:\n dim_found = False\n for dim_sel in self.index_selectors:\n long_name = self.data[dim].long_name if hasattr(\n self.data[dim], 'long_name') else None\n if dim_sel.name == dim or dim_sel.name == long_name:\n val = dim_sel.value\n other_dim_sels.update({dim: val})\n dim_found = True\n if not dim_found: # when dim is used for aggregation\n val = self.data[dim][0].values\n other_dim_sels.update({dim: val})\n\n # Case 1 and 2a\n if not self.kwargs['are_var_coords'] or self.both_coords_1d():\n series_sel = {\n self.kwargs['x']: self.correct_val(self.kwargs['x'], x),\n self.kwargs['y']: self.correct_val(self.kwargs['y'], y)}\n # Case 2b\n elif self.both_coords_2d_with_same_dims():\n y_dim, x_dim = self.data[self.kwargs['x']].dims\n\n y_mean = self.data[self.kwargs['y']].mean() * np.pi / 180.\n a = (self.data[self.kwargs['y']] - y) ** 2 + (\n (self.data[self.kwargs['x']] - x) * np.cos(\n y_mean)) ** 2\n j, i = np.unravel_index(a.argmin(), a.shape)\n\n series_sel = {x_dim: self.correct_val(x_dim, i),\n y_dim: self.correct_val(y_dim, j)}\n # Case 2c\n else:\n logger.debug(\"Cannot extract 2d coords with different dims and\"\n \" multi-dimensional coords.\")\n return self.series\n\n if len(other_dims):\n series_sel.update(other_dim_sels)\n\n sel_series_data = self.data[self.var]\n for dim, val in series_sel.items():\n sel_series_data = sel_val_from_dim(sel_series_data, dim, val)\n\n series_df = pd.DataFrame({extract_along: self.data[extract_along],\n self.var: np.asarray(sel_series_data)})\n\n tooltips = [(extract_along, f\"@{extract_along}\"),\n (self.var, f\"@{self.var}\")]\n if len(other_dims):\n for dim, val in other_dim_sels.items():\n tooltips.append((dim, str(val)))\n hover = HoverTool(tooltips=tooltips)\n\n series_map = series_df.hvplot(x=extract_along, y=self.var,\n height=self.kwargs['height'],\n width=self.kwargs['width'],\n tools=[hover])\n self.series = series_map.opts(color=color) * self.series\n\n return self.series\n\n def create_selectors_players(self, graph):\n \"\"\"\n Converts the sliders generated by hvplot into selectors/players.\n\n This is applicable only when both `x` and `y` are present in variable\n coordinates. It converts any sliders generated by hvplot into\n selectors/players and moves them to the bottom of graph.\n \"\"\"\n if len(self.data[self.var].dims) > 2 and self.kwargs['extract along']:\n self.taps_graph = hv.DynamicMap(self.create_taps_graph,\n streams=[self.tap_stream,\n self.clear_points])\n self.clear_series_button.disabled = False\n graph = graph * self.taps_graph\n else:\n self.clear_series_button.disabled = True\n graph = pn.Row(graph)\n try:\n if graph[0][1]: # if sliders are generated\n self.output[0] = graph[0][0]\n\n # link the generated slider with agg selector in fields\n for slider in graph[0][1]:\n for dim in self.kwargs['dims_to_select_animate']:\n long_name = self.data[dim].long_name if hasattr(\n self.data[dim], 'long_name') else None\n if slider.name == dim or slider.name == long_name:\n if self.kwargs[dim] == 'select':\n selector = convert_widget(slider,\n pn.widgets.Select())\n else:\n selector = convert_widget(\n slider, pn.widgets.DiscretePlayer())\n self.index_selectors.append(selector)\n\n for selector in self.index_selectors:\n if isinstance(selector, pn.widgets.Select):\n self.output[1].append(selector)\n else:\n player = player_with_name_and_value(selector)\n self.output[1].append(player)\n\n except: # else return simple graph\n self.output[0] = graph\n\n def set_data(self, data):\n self.data = (\n xr.Dataset({f'{data.name}': data})\n if isinstance(data, xr.DataArray) else data\n )\n\n def set_coords(self, *args):\n # We can't reset indexed coordinates so add them every time\n # in coord_selector.value\n self.data = self.data.reset_coords()\n indexed_coords = set(self.data.dims).intersection(set(self.data.coords))\n new_coords = set(args[0]).union(indexed_coords)\n self.data = self.data.set_coords(new_coords) # this `set_coords` belongs to xr.dataset\n self.control.set_coords(self.data)\n\n def check_is_plottable(self, var):\n \"\"\"\n Check if a data variable can be plotted.\n\n If a variable is 1-d, disable plot_button for it.\n \"\"\"\n self.plot_button.disabled = False # important to enable button once disabled\n data = self.data[var[0]]\n self.plot_button.disabled = len(data.dims) <= 1\n\n def correct_val(self, dim, x):\n \"\"\" Convert tapped coordinates to int, if not time-type\n \"\"\"\n dtype = self.data[dim].dtype.kind\n if dtype == 'i':\n return int(x)\n elif dtype == 'f':\n return float(x)\n else:\n return str(x)\n\n def both_coords_1d(self):\n return len(self.data[self.kwargs['x']].dims) == 1 and len(self.data[self.kwargs['y']].dims) == 1\n\n def both_coords_2d_with_same_dims(self):\n x_dims = self.data[self.kwargs['x']].dims\n y_dims = self.data[self.kwargs['y']].dims\n return len(x_dims) == len(y_dims) == 2 and sorted(x_dims) == sorted(y_dims)\n\n\ndef find_cmap_limits(sel_data):\n if isinstance(sel_data.data, dask.array.core.Array):\n method = 'tdigest' if has_crick_tdigest else 'default'\n return dask.array.percentile(sel_data.data.ravel(), (10, 90),\n method=method).compute()\n else: # if sel_data.data is numpy.ndarray\n return [float(q) for q in sel_data.quantile([0.1, 0.9])]\n\n\ndef sel_val_from_dim(data, dim, x):\n \"\"\" Select values from a dim.\n \"\"\"\n try:\n return data.sel({dim: x})\n except:\n return data.sel({dim: x}, method='nearest')\n\n\ndef process_proj_params(params):\n params = ast.literal_eval(params)\n for k, v in params.items():\n if k == 'globe' and params['globe']:\n globe = ccrs.Globe(**v)\n params.update({'globe': globe})\n return params\n"
] |
[
[
"numpy.asarray",
"numpy.cos"
]
] |
RichardScottOZ/xrft
|
[
"5b18b88957661b8f5e1967ec28e81c552c63834f"
] |
[
"xrft/xrft.py"
] |
[
"import warnings\nimport operator\nimport sys\nimport functools as ft\nfrom functools import reduce\n\nimport numpy as np\nimport xarray as xr\nimport pandas as pd\n\nimport dask.array as dsar\nfrom dask import delayed\n\nimport scipy.signal as sps\nimport scipy.linalg as spl\n\nfrom .detrend import detrend as _detrend\n\n\n__all__ = [\n \"fft\",\n \"ifft\",\n \"dft\",\n \"idft\",\n \"power_spectrum\",\n \"cross_spectrum\",\n \"cross_phase\",\n \"isotropize\",\n \"isotropic_power_spectrum\",\n \"isotropic_cross_spectrum\",\n \"isotropic_powerspectrum\",\n \"isotropic_crossspectrum\",\n \"fit_loglog\",\n]\n\n\ndef _fft_module(da):\n if da.chunks:\n return dsar.fft\n else:\n return np.fft\n\n\ndef _apply_window(da, dims, window_type=\"hann\"):\n \"\"\"Creating windows in dimensions dims.\"\"\"\n\n if window_type == True:\n window_type = \"hann\"\n warnings.warn(\n \"Please provide the name of window adhering to scipy.signal.windows. The boolean option will be deprecated in future releases.\",\n FutureWarning,\n )\n elif window_type not in [\n \"hann\",\n \"hamming\",\n \"kaiser\",\n \"tukey\",\n \"parzen\",\n \"taylor\",\n \"boxcar\",\n \"barthann\",\n \"bartlett\",\n \"blackman\",\n \"blackmanharris\",\n \"bohman\",\n \"chebwin\",\n \"cosine\",\n \"dpss\",\n \"exponential\",\n \"flattop\",\n \"gaussian\",\n \"general_cosine\",\n \"general_gaussian\",\n \"general_hamming\",\n \"triang\",\n \"nuttall\",\n ]:\n raise NotImplementedError(\n \"Window type {window_type} not supported. Please adhere to scipy.signal.windows for naming convention.\"\n )\n\n if dims is None:\n dims = list(da.dims)\n else:\n if isinstance(dims, str):\n dims = [dims]\n\n scipy_win_func = getattr(sps.windows, window_type)\n\n if da.chunks:\n\n def dask_win_func(n, sym=False):\n return dsar.from_delayed(\n delayed(scipy_win_func, pure=True)(n, sym=sym), (n,), float\n )\n\n win_func = dask_win_func\n else:\n win_func = scipy_win_func\n\n windows = [\n xr.DataArray(\n win_func(len(da[d]), sym=False), dims=da[d].dims, coords=da[d].coords\n )\n for d in dims\n ]\n\n return reduce(operator.mul, windows[::-1]), da * reduce(operator.mul, windows[::-1])\n\n\ndef _stack_chunks(da, dim, suffix=\"_segment\"):\n \"\"\"Reshape a DataArray so there is only one chunk along dimension `dim`\"\"\"\n data = da.data\n attr = da.attrs\n newdims = []\n newcoords = {}\n newshape = []\n for d in da.dims:\n if d in dim:\n axis_num = da.get_axis_num(d)\n if np.diff(da.chunks[axis_num]).sum() != 0:\n raise ValueError(\"Chunk lengths need to be the same.\")\n n = len(da[d])\n chunklen = da.chunks[axis_num][0]\n coord_rs = da[d].data.reshape((int(n / chunklen), int(chunklen)))\n newdims.append(d + suffix)\n newdims.append(d)\n newshape.append(int(n / chunklen))\n newshape.append(int(chunklen))\n newcoords[d + suffix] = range(int(n / chunklen))\n newcoords[d] = coord_rs[0]\n else:\n newdims.append(d)\n newshape.append(len(da[d]))\n newcoords[d] = da[d].data\n\n da = xr.DataArray(\n data.reshape(newshape), dims=newdims, coords=newcoords, attrs=attr\n )\n\n return da\n\n\ndef _freq(N, delta_x, real, shift):\n # calculate frequencies from coordinates\n # coordinates are always loaded eagerly, so we use numpy\n if real is None:\n fftfreq = [np.fft.fftfreq] * len(N)\n else:\n # Discard negative frequencies from transform along last axis to be\n # consistent with np.fft.rfftn\n fftfreq = [np.fft.fftfreq] * (len(N) - 1)\n fftfreq.append(np.fft.rfftfreq)\n\n k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]\n\n if shift:\n k = [np.fft.fftshift(l) for l in k]\n\n return k\n\n\ndef _ifreq(N, delta_x, real, shift):\n # calculate frequencies from coordinates\n # coordinates are always loaded eagerly, so we use numpy\n if real is None:\n fftfreq = [np.fft.fftfreq] * len(N)\n else:\n irfftfreq = lambda Nx, dx: np.fft.fftfreq(\n 2 * (Nx - 1), dx\n ) # Not in standard numpy !\n fftfreq = [np.fft.fftfreq] * (len(N) - 1)\n fftfreq.append(irfftfreq)\n\n k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]\n\n if shift:\n k = [np.fft.fftshift(l) for l in k]\n\n return k\n\n\ndef _new_dims_and_coords(da, dim, wavenm, prefix):\n # set up new dimensions and coordinates for dataarray\n swap_dims = dict()\n new_coords = dict()\n wavenm = dict(zip(dim, wavenm))\n\n for d in dim:\n k = wavenm[d]\n new_name = prefix + d if d[: len(prefix)] != prefix else d[len(prefix) :]\n new_dim = xr.DataArray(k, dims=new_name, coords={new_name: k}, name=new_name)\n new_dim.attrs.update({\"spacing\": k[1] - k[0]})\n new_coords[new_name] = new_dim\n swap_dims[d] = new_name\n\n return new_coords, swap_dims\n\n\ndef _diff_coord(coord):\n \"\"\"Returns the difference as a xarray.DataArray.\"\"\"\n\n v0 = coord.values[0]\n calendar = getattr(v0, \"calendar\", None)\n if calendar:\n import cftime\n\n ref_units = \"seconds since 1800-01-01 00:00:00\"\n decoded_time = cftime.date2num(coord, ref_units, calendar)\n coord = xr.DataArray(decoded_time, dims=coord.dims, coords=coord.coords)\n return np.diff(coord)\n elif pd.api.types.is_datetime64_dtype(v0):\n return np.diff(coord).astype(\"timedelta64[s]\").astype(\"f8\")\n else:\n return np.diff(coord)\n\n\ndef _lag_coord(coord):\n \"\"\"Returns the coordinate lag\"\"\"\n\n v0 = coord.values[0]\n calendar = getattr(v0, \"calendar\", None)\n if coord[-1] > coord[0]:\n coord_data = coord.data\n else:\n coord_data = np.flip(coord.data, axis=-1)\n lag = coord_data[len(coord.data) // 2]\n if calendar:\n import cftime\n\n ref_units = \"seconds since 1800-01-01 00:00:00\"\n decoded_time = cftime.date2num(lag, ref_units, calendar)\n return decoded_time\n elif pd.api.types.is_datetime64_dtype(v0):\n return lag.astype(\"timedelta64[s]\").astype(\"f8\").data\n else:\n return lag.data\n\n\ndef dft(\n da, dim=None, true_phase=False, true_amplitude=False, **kwargs\n): # pragma: no cover\n \"\"\"\n Deprecated function. See fft doc\n \"\"\"\n msg = (\n \"This function has been renamed and will disappear in the future.\"\n + \" Please use `fft` instead\"\n )\n warnings.warn(msg, FutureWarning)\n return fft(\n da, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs\n )\n\n\ndef idft(\n daft, dim=None, true_phase=False, true_amplitude=False, **kwargs\n): # pragma: no cover\n \"\"\"\n Deprecated function. See ifft doc\n \"\"\"\n msg = (\n \"This function has been renamed and will disappear in the future.\"\n + \" Please use `ifft` instead\"\n )\n warnings.warn(msg, FutureWarning)\n return ifft(\n daft, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs\n )\n\n\ndef fft(\n da,\n spacing_tol=1e-3,\n dim=None,\n real_dim=None,\n shift=True,\n detrend=None,\n window=None,\n true_phase=False,\n true_amplitude=False,\n chunks_to_segments=False,\n prefix=\"freq_\",\n **kwargs,\n):\n \"\"\"\n Perform discrete Fourier transform of xarray data-array `da` along the\n specified dimensions.\n\n .. math::\n daft = \\mathbb{F}(da - \\overline{da})\n\n Parameters\n ----------\n da : `xarray.DataArray`\n The data to be transformed\n spacing_tol: float, optional\n Spacing tolerance. Fourier transform should not be applied to uneven grid but\n this restriction can be relaxed with this setting. Use caution.\n dim : str or sequence of str, optional\n The dimensions along which to take the transformation. If `None`, all\n dimensions will be transformed. If the inputs are dask arrays, the\n arrays must not be chunked along these dimensions.\n real_dim : str, optional\n Real Fourier transform will be taken along this dimension.\n shift : bool, default\n Whether to shift the fft output. Default is `True`, unless `real_dim is not None`,\n in which case shift will be set to False always.\n detrend : {None, 'constant', 'linear'}\n If `constant`, the mean across the transform dimensions will be\n subtracted before calculating the Fourier transform (FT).\n If `linear`, the linear least-square fit will be subtracted before\n the FT. For `linear`, only dims of length 1 and 2 are supported.\n window : str, optional\n Whether to apply a window to the data before the Fourier\n transform is taken. A window will be applied to all the dimensions in\n dim. Please follow `scipy.signal.windows`' naming convention.\n true_phase : bool, optional\n If set to False, standard fft algorithm is applied on signal without consideration of coordinates.\n If set to True, coordinates location are correctly taken into account to evaluate Fourier Tranforrm phase and\n fftshift is applied on input signal prior to fft (fft algorithm intrinsically considers that input signal is on fftshifted grid).\n true_amplitude : bool, optional\n If set to True, output is multiplied by the spacing of the transformed variables to match theoretical FT amplitude.\n If set to False, amplitude regularisation by spacing is not applied (as in numpy.fft)\n chunks_to_segments : bool, optional\n Whether the data is chunked along the axis to take FFT.\n prefix : str\n The prefix for the new transformed dimensions.\n\n Returns\n -------\n daft : `xarray.DataArray`\n The output of the Fourier transformation, with appropriate dimensions.\n \"\"\"\n\n if not true_phase and not true_amplitude:\n msg = \"Flags true_phase and true_amplitude will be set to True in future versions of xrft.dft to preserve the theoretical phasing and amplitude of Fourier Transform. Consider using xrft.fft to ensure future compatibility with numpy.fft like behavior and to deactivate this warning.\"\n warnings.warn(msg, FutureWarning)\n\n if dim is None:\n dim = list(da.dims)\n else:\n if isinstance(dim, str):\n dim = [dim]\n\n if \"real\" in kwargs:\n real_dim = kwargs.get(\"real\")\n msg = \"`real` flag will be deprecated in future version of xrft.dft and replaced by `real_dim` flag.\"\n warnings.warn(msg, FutureWarning)\n\n if real_dim is not None:\n if real_dim not in da.dims:\n raise ValueError(\n \"The dimension along which real FT is taken must be one of the existing dimensions.\"\n )\n else:\n dim = [d for d in dim if d != real_dim] + [\n real_dim\n ] # real dim has to be moved or added at the end !\n\n if chunks_to_segments:\n da = _stack_chunks(da, dim)\n\n rawdims = da.dims # take care of segmented dimesions, if any\n\n if real_dim is not None:\n da = da.transpose(\n *[d for d in da.dims if d not in [real_dim]] + [real_dim]\n ) # dimension for real transformed is moved at the end\n\n fftm = _fft_module(da)\n\n if real_dim is None:\n fft_fn = fftm.fftn\n else:\n shift = False\n fft_fn = fftm.rfftn\n\n # the axes along which to take ffts\n axis_num = [\n da.get_axis_num(d) for d in dim\n ] # if there is a real dim , it has to be the last one\n\n N = [da.shape[n] for n in axis_num]\n\n # raise error if there are multiple coordinates attached to the dimension(s) over which the FFT is taken\n for d in dim:\n bad_coords = [\n cname for cname in da.coords if cname != d and d in da[cname].dims\n ]\n if bad_coords:\n raise ValueError(\n f\"The input array contains coordinate variable(s) ({bad_coords}) whose dims include the transform dimension(s) `{d}`. \"\n f\"Please drop these coordinates (`.drop({bad_coords}`) before invoking xrft.\"\n )\n\n # verify even spacing of input coordinates\n delta_x = []\n lag_x = []\n for d in dim:\n diff = _diff_coord(da[d])\n delta = np.abs(diff[0])\n lag = _lag_coord(da[d])\n if not np.allclose(diff, diff[0], rtol=spacing_tol):\n raise ValueError(\n \"Can't take Fourier transform because \"\n \"coodinate %s is not evenly spaced\" % d\n )\n if delta == 0.0:\n raise ValueError(\n \"Can't take Fourier transform because spacing in coordinate %s is zero\"\n % d\n )\n delta_x.append(delta)\n lag_x.append(lag)\n\n if detrend is not None:\n if detrend == \"linear\":\n orig_dims = da.dims\n da = _detrend(da, dim, detrend_type=detrend).transpose(*orig_dims)\n else:\n da = _detrend(da, dim, detrend_type=detrend)\n\n if window is not None:\n _, da = _apply_window(da, dim, window_type=window)\n\n if true_phase:\n reversed_axis = [\n da.get_axis_num(d) for d in dim if da[d][-1] < da[d][0]\n ] # handling decreasing coordinates\n f = fft_fn(\n fftm.ifftshift(np.flip(da, axis=reversed_axis), axes=axis_num),\n axes=axis_num,\n )\n else:\n f = fft_fn(da.data, axes=axis_num)\n\n if shift:\n f = fftm.fftshift(f, axes=axis_num)\n\n k = _freq(N, delta_x, real_dim, shift)\n\n newcoords, swap_dims = _new_dims_and_coords(da, dim, k, prefix)\n daft = xr.DataArray(\n f, dims=da.dims, coords=dict([c for c in da.coords.items() if c[0] not in dim])\n )\n daft = daft.swap_dims(swap_dims).assign_coords(newcoords)\n daft = daft.drop([d for d in dim if d in daft.coords])\n\n updated_dims = [\n daft.dims[i] for i in da.get_axis_num(dim)\n ] # List of transformed dimensions\n\n if true_phase:\n for up_dim, lag in zip(updated_dims, lag_x):\n daft = daft * xr.DataArray(\n np.exp(-1j * 2.0 * np.pi * newcoords[up_dim] * lag),\n dims=up_dim,\n coords={up_dim: newcoords[up_dim]},\n ) # taking advantage of xarray broadcasting and ordered coordinates\n daft[up_dim].attrs.update({\"direct_lag\": lag.obj})\n\n if true_amplitude:\n daft = daft * np.prod(delta_x)\n\n return daft.transpose(\n *[swap_dims.get(d, d) for d in rawdims]\n ) # Do nothing if da was not transposed\n\n\ndef ifft(\n daft,\n spacing_tol=1e-3,\n dim=None,\n real_dim=None,\n shift=True,\n true_phase=False,\n true_amplitude=False,\n chunks_to_segments=False,\n prefix=\"freq_\",\n lag=None,\n **kwargs,\n):\n \"\"\"\n Perform inverse discrete Fourier transform of xarray data-array `daft` along the\n specified dimensions.\n\n .. math::\n da = \\mathbb{F}(daft - \\overline{daft})\n\n Parameters\n ----------\n daft : `xarray.DataArray`\n The data to be transformed\n spacing_tol: float, optional\n Spacing tolerance. Fourier transform should not be applied to uneven grid but\n this restriction can be relaxed with this setting. Use caution.\n dim : str or sequence of str, optional\n The dimensions along which to take the transformation. If `None`, all\n dimensions will be transformed.\n real_dim : str, optional\n Real Fourier transform will be taken along this dimension.\n shift : bool, default\n Whether to shift the fft output. Default is `True`.\n chunks_to_segments : bool, optional\n Whether the data is chunked along the axis to take FFT.\n prefix : str\n The prefix for the new transformed dimensions.\n true_phase : bool, optional\n If set to False, standard ifft algorithm is applied on signal without consideration of coordinates order.\n If set to True, coordinates are correctly taken into account to evaluate Inverse Fourier Tranforrm phase and\n fftshift is applied on input signal prior to ifft (ifft algorithm intrinsically considers that input signal is on fftshifted grid).\n true_amplitude : bool, optional\n If set to True, output is divided by the spacing of the transformed variables to match theoretical IFT amplitude.\n If set to False, amplitude regularisation by spacing is not applied (as in numpy.ifft)\n lag : None, float or sequence of float and/or None, optional\n Output coordinates of transformed dimensions will be shifted by corresponding lag values and correct signal phasing will be preserved if true_phase is set to True.\n If lag is None (default), 'direct_lag' attributes of each dimension is used (or set to zero if not found).\n If defined, lag must have same length as dim.\n If lag is a sequence, a None element means that 'direct_lag' attribute will be used for the corresponding dimension\n Manually set lag to zero to get output coordinates centered on zero.\n\n\n Returns\n -------\n da : `xarray.DataArray`\n The output of the Inverse Fourier transformation, with appropriate dimensions.\n \"\"\"\n\n if not true_phase and not true_amplitude:\n msg = \"Flags true_phase and true_amplitude will be set to True in future versions of xrft.idft to preserve the theoretical phasing and amplitude of Inverse Fourier Transform. Consider using xrft.ifft to ensure future compatibility with numpy.ifft like behavior and to deactivate this warning.\"\n warnings.warn(msg, FutureWarning)\n\n if dim is None:\n dim = list(daft.dims)\n else:\n if isinstance(dim, str):\n dim = [dim]\n\n if \"real\" in kwargs:\n real_dim = kwargs.get(\"real\")\n msg = \"`real` flag will be deprecated in future version of xrft.idft and replaced by `real_dim` flag.\"\n warnings.warn(msg, FutureWarning)\n if real_dim is not None:\n if real_dim not in daft.dims:\n raise ValueError(\n \"The dimension along which real IFT is taken must be one of the existing dimensions.\"\n )\n else:\n dim = [d for d in dim if d != real_dim] + [\n real_dim\n ] # real dim has to be moved or added at the end !\n if lag is None:\n lag = [daft[d].attrs.get(\"direct_lag\", 0.0) for d in dim]\n msg = \"Default idft's behaviour (lag=None) changed! Default value of lag was zero (centered output coordinates) and is now set to transformed coordinate's attribute: 'direct_lag'.\"\n warnings.warn(msg, FutureWarning)\n else:\n if isinstance(lag, float) or isinstance(lag, int):\n lag = [lag]\n if len(dim) != len(lag):\n raise ValueError(\"dim and lag must have the same length.\")\n if not true_phase:\n msg = \"Setting lag with true_phase=False does not guarantee accurate idft.\"\n warnings.warn(msg, Warning)\n lag = [\n daft[d].attrs.get(\"direct_lag\") if l is None else l\n for d, l in zip(dim, lag)\n ] # enable lag of the form [3.2, None, 7]\n\n if true_phase:\n for d, l in zip(dim, lag):\n daft = daft * np.exp(1j * 2.0 * np.pi * daft[d] * l)\n\n if chunks_to_segments:\n daft = _stack_chunks(daft, dim)\n\n rawdims = daft.dims # take care of segmented dimensions, if any\n\n if real_dim is not None:\n daft = daft.transpose(\n *[d for d in daft.dims if d not in [real_dim]] + [real_dim]\n ) # dimension for real transformed is moved at the end\n\n fftm = _fft_module(daft)\n\n if real_dim is None:\n fft_fn = fftm.ifftn\n else:\n fft_fn = fftm.irfftn\n\n # the axes along which to take ffts\n axis_num = [daft.get_axis_num(d) for d in dim]\n\n N = [daft.shape[n] for n in axis_num]\n\n # verify even spacing of input coordinates (It handle fftshifted grids)\n delta_x = []\n for d in dim:\n diff = _diff_coord(daft[d])\n delta = np.abs(diff[0])\n l = _lag_coord(daft[d]) if d is not real_dim else daft[d][0].data\n if not np.allclose(\n diff, delta, rtol=spacing_tol\n ): # means that input is not on regular increasing grid\n reordered_coord = daft[d].copy()\n reordered_coord = reordered_coord.sortby(d)\n diff = _diff_coord(reordered_coord)\n l = _lag_coord(reordered_coord)\n if np.allclose(\n diff, diff[0], rtol=spacing_tol\n ): # means that input is on fftshifted grid\n daft = daft.sortby(d) # reordering the input\n else:\n raise ValueError(\n \"Can't take Fourier transform because \"\n \"coodinate %s is not evenly spaced\" % d\n )\n if np.abs(l) > spacing_tol:\n raise ValueError(\n \"Inverse Fourier Transform can not be computed because coordinate %s is not centered on zero frequency\"\n % d\n )\n if delta == 0.0:\n raise ValueError(\n \"Can't take Inverse Fourier transform because spacing in coordinate %s is zero\"\n % d\n )\n delta_x.append(delta)\n\n axis_shift = [\n daft.get_axis_num(d) for d in dim if d is not real_dim\n ] # remove real dim of the list\n\n f = fftm.ifftshift(\n daft.data, axes=axis_shift\n ) # Force to be on fftshift grid before Fourier Transform\n f = fft_fn(f, axes=axis_num)\n\n if not true_phase:\n f = fftm.ifftshift(f, axes=axis_num)\n\n if shift:\n f = fftm.fftshift(f, axes=axis_num)\n\n k = _ifreq(N, delta_x, real_dim, shift)\n\n newcoords, swap_dims = _new_dims_and_coords(daft, dim, k, prefix)\n da = xr.DataArray(\n f,\n dims=daft.dims,\n coords=dict([c for c in daft.coords.items() if c[0] not in dim]),\n )\n da = da.swap_dims(swap_dims).assign_coords(newcoords)\n da = da.drop([d for d in dim if d in da.coords])\n\n with xr.set_options(\n keep_attrs=True\n ): # This line ensures keeping spacing attribute in output coordinates\n for d, l in zip(dim, lag):\n tfd = swap_dims[d]\n da = da.assign_coords({tfd: da[tfd] + l})\n\n if true_amplitude:\n da = da / np.prod([float(da[up_dim].spacing) for up_dim in swap_dims.values()])\n\n return da.transpose(\n *[swap_dims.get(d, d) for d in rawdims]\n ) # Do nothing if daft was not transposed\n\n\ndef power_spectrum(\n da, dim=None, real_dim=None, scaling=\"density\", window_correction=False, **kwargs\n):\n \"\"\"\n Calculates the power spectrum of da.\n\n .. math::\n da' = da - \\overline{da}\n .. math::\n ps = \\mathbb{F}(da') {\\mathbb{F}(da')}^*\n\n Parameters\n ----------\n da : `xarray.DataArray`\n The data to be transformed\n dim : str or sequence of str, optional\n The dimensions along which to take the transformation. If `None`, all\n dimensions will be transformed.\n real_dim : str, optional\n Real Fourier transform will be taken along this dimension.\n scaling : str, optional\n If 'density', it will normalize the output to power spectral density\n If 'spectrum', it will normalize the output to power spectrum\n window_correction : boolean\n If True, it will correct for the energy reduction resulting from applying a non-uniform window.\n This is the default behaviour of many tools for computing power spectrum (e.g scipy.signal.welch and scipy.signal.periodogram).\n If scaling = 'spectrum', correct the amplitude of peaks in the spectrum. This ensures, for example, that the peak in the one-sided power spectrum of a 10 Hz sine wave with RMS**2 = 10 has a magnitude of 10.\n If scaling = 'density', correct for the energy (integral) of the spectrum. This ensures, for example, that the power spectral density integrates to the square of the RMS of the signal (ie that Parseval's theorem is satisfied). Note that in most cases, Parseval's theorem will only be approximately satisfied with this correction as it assumes that the signal being windowed is independent of the window. The correction becomes more accurate as the width of the window gets large in comparison with any noticeable period in the signal.\n If False, the spectrum gives a representation of the power in the windowed signal.\n Note that when True, Parseval's theorem may only be approximately satisfied.\n kwargs : dict : see xrft.dft for argument list\n \"\"\"\n\n if \"density\" in kwargs:\n density = kwargs.pop(\"density\")\n msg = (\n \"density flag will be deprecated in future version of xrft.power_spectrum and replaced by scaling flag. \"\n + 'density=True should be replaced by scaling=\"density\" and '\n + \"density=False will not be maintained.\\nscaling flag is ignored !\"\n )\n warnings.warn(msg, FutureWarning)\n scaling = \"density\" if density else \"false_density\"\n\n if \"real\" in kwargs:\n real_dim = kwargs.get(\"real\")\n msg = \"`real` flag will be deprecated in future version of xrft.power_spectrum and replaced by `real_dim` flag.\"\n warnings.warn(msg, FutureWarning)\n\n kwargs.update(\n {\"true_amplitude\": True, \"true_phase\": False}\n ) # true_phase do not matter in power_spectrum\n\n daft = fft(da, dim=dim, real_dim=real_dim, **kwargs)\n updated_dims = [\n d for d in daft.dims if (d not in da.dims and \"segment\" not in d)\n ] # Transformed dimensions\n ps = np.abs(daft) ** 2\n\n if real_dim is not None:\n real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][\n 0\n ] # find transformed real dimension\n f = np.full(ps.sizes[real], 2.0)\n if len(da[real_dim]) % 2 == 0:\n f[0], f[-1] = 1.0, 1.0\n else:\n f[0] = 1.0\n ps = ps * xr.DataArray(f, dims=real, coords=ps[real].coords)\n\n if scaling == \"density\":\n if window_correction:\n if kwargs.get(\"window\") == None:\n raise ValueError(\n \"window_correction can only be applied when windowing is turned on.\"\n )\n else:\n windows, _ = _apply_window(da, dim, window_type=kwargs.get(\"window\"))\n ps = ps / (windows ** 2).mean()\n fs = np.prod([float(ps[d].spacing) for d in updated_dims])\n ps *= fs\n elif scaling == \"spectrum\":\n if window_correction:\n if kwargs.get(\"window\") == None:\n raise ValueError(\n \"window_correction can only be applied when windowing is turned on.\"\n )\n else:\n windows, _ = _apply_window(da, dim, window_type=kwargs.get(\"window\"))\n ps = ps / windows.mean() ** 2\n fs = np.prod([float(ps[d].spacing) for d in updated_dims])\n ps *= fs ** 2\n elif scaling == \"false_density\": # Corresponds to density=False\n pass\n else:\n raise ValueError(\"Unknown {} scaling flag\".format(scaling))\n return ps\n\n\ndef cross_spectrum(\n da1,\n da2,\n dim=None,\n real_dim=None,\n scaling=\"density\",\n window_correction=False,\n true_phase=False,\n **kwargs,\n):\n \"\"\"\n Calculates the cross spectra of da1 and da2.\n\n .. math::\n da1' = da1 - \\overline{da1};\\ \\ da2' = da2 - \\overline{da2}\n .. math::\n cs = \\mathbb{F}(da1') {\\mathbb{F}(da2')}^*\n\n Parameters\n ----------\n da1 : `xarray.DataArray`\n The data to be transformed\n da2 : `xarray.DataArray`\n The data to be transformed\n dim : str or sequence of str, optional\n The dimensions along which to take the transformation. If `None`, all\n dimensions will be transformed.\n real_dim : str, optional\n Real Fourier transform will be taken along this dimension.\n scaling : str, optional\n If 'density', it will normalize the output to power spectral density\n If 'spectrum', it will normalize the output to power spectrum\n window_correction : boolean\n If True, it will correct for the energy reduction resulting from applying a non-uniform window.\n This is the default behaviour of many tools for computing power spectrum (e.g scipy.signal.welch and scipy.signal.periodogram).\n If scaling = 'spectrum', correct the amplitude of peaks in the spectrum. This ensures, for example, that the peak in the one-sided power spectrum of a 10 Hz sine wave with RMS**2 = 10 has a magnitude of 10.\n If scaling = 'density', correct for the energy (integral) of the spectrum. This ensures, for example, that the power spectral density integrates to the square of the RMS of the signal (ie that Parseval's theorem is satisfied). Note that in most cases, Parseval's theorem will only be approximately satisfied with this correction as it assumes that the signal being windowed is independent of the window. The correction becomes more accurate as the width of the window gets large in comparison with any noticeable period in the signal.\n If False, the spectrum gives a representation of the power in the windowed signal.\n Note that when True, Parseval's theorem may only be approximately satisfied.\n kwargs : dict : see xrft.dft for argument list\n \"\"\"\n\n if not true_phase:\n msg = (\n \"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_spectrum output. \"\n + \"Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility \"\n + \"with numpy-like behavior where the coordinates are disregarded.\"\n )\n warnings.warn(msg, FutureWarning)\n\n if \"real\" in kwargs:\n real_dim = kwargs.get(\"real\")\n msg = \"`real` flag will be deprecated in future version of xrft.cross_spectrum and replaced by `real_dim` flag.\"\n warnings.warn(msg, FutureWarning)\n\n if \"density\" in kwargs:\n density = kwargs.pop(\"density\")\n msg = (\n \"density flag will be deprecated in future version of xrft.cross_spectrum and replaced by scaling flag. \"\n + 'density=True should be replaced by scaling=\"density\" and '\n + \"density=False will not be maintained.\\nscaling flag is ignored !\"\n )\n warnings.warn(msg, FutureWarning)\n\n scaling = \"density\" if density else \"false_density\"\n\n kwargs.update({\"true_amplitude\": True})\n\n daft1 = fft(da1, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)\n daft2 = fft(da2, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)\n\n if daft1.dims != daft2.dims:\n raise ValueError(\"The two datasets have different dimensions\")\n\n updated_dims = [\n d for d in daft1.dims if (d not in da1.dims and \"segment\" not in d)\n ] # Transformed dimensions\n cs = daft1 * np.conj(daft2)\n\n if real_dim is not None:\n real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][\n 0\n ] # find transformed real dimension\n f = np.full(cs.sizes[real], 2.0)\n if len(da1[real_dim]) % 2 == 0:\n f[0], f[-1] = 1.0, 1.0\n else:\n f[0] = 1.0\n cs = cs * xr.DataArray(f, dims=real, coords=cs[real].coords)\n\n if scaling == \"density\":\n if window_correction:\n if kwargs.get(\"window\") == None:\n raise ValueError(\n \"window_correction can only be applied when windowing is turned on.\"\n )\n else:\n windows, _ = _apply_window(da, dim, window_type=kwargs.get(\"window\"))\n cs = cs / (windows ** 2).mean()\n fs = np.prod([float(cs[d].spacing) for d in updated_dims])\n cs *= fs\n elif scaling == \"spectrum\":\n if window_correction:\n if kwargs.get(\"window\") == None:\n raise ValueError(\n \"window_correction can only be applied when windowing is turned on.\"\n )\n else:\n windows, _ = _apply_window(da, dim, window_type=kwargs.get(\"window\"))\n cs = cs / windows.mean() ** 2\n fs = np.prod([float(cs[d].spacing) for d in updated_dims])\n cs *= fs ** 2\n elif scaling == \"false_density\": # Corresponds to density=False\n pass\n else:\n raise ValueError(\"Unknown {} scaling flag\".format(scaling))\n return cs\n\n\ndef cross_phase(da1, da2, dim=None, true_phase=False, **kwargs):\n \"\"\"\n Calculates the cross-phase between da1 and da2.\n\n Returned values are in [-pi, pi].\n\n .. math::\n da1' = da1 - \\overline{da1};\\ \\ da2' = da2 - \\overline{da2}\n .. math::\n cp = \\text{Arg} [\\mathbb{F}(da1')^*, \\mathbb{F}(da2')]\n\n Parameters\n ----------\n da1 : `xarray.DataArray`\n The data to be transformed\n da2 : `xarray.DataArray`\n The data to be transformed\n kwargs : dict : see xrft.dft for argument list\n \"\"\"\n if not true_phase:\n msg = (\n \"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_phase output. \"\n + \"Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility \"\n + \"with numpy-like behavior where the coordinates are disregarded.\"\n )\n warnings.warn(msg, FutureWarning)\n\n cp = xr.ufuncs.angle(\n cross_spectrum(da1, da2, dim=dim, true_phase=true_phase, **kwargs)\n )\n\n if da1.name and da2.name:\n cp.name = \"{}_{}_phase\".format(da1.name, da2.name)\n\n return cp\n\n\ndef _binned_agg(\n array: np.ndarray,\n indices: np.ndarray,\n num_bins: int,\n *,\n func,\n fill_value,\n dtype,\n) -> np.ndarray:\n \"\"\"NumPy helper function for aggregating over bins.\"\"\"\n\n try:\n import numpy_groupies\n except ImportError:\n raise ImportError(\n \"This function requires the `numpy_groupies` package to be installed. Please install it with pip or conda.\"\n )\n\n mask = np.logical_not(np.isnan(indices))\n int_indices = indices[mask].astype(int)\n shape = array.shape[: -indices.ndim] + (num_bins,)\n result = numpy_groupies.aggregate(\n int_indices,\n array[..., mask],\n func=func,\n size=num_bins,\n fill_value=fill_value,\n dtype=dtype,\n axis=-1,\n )\n return result\n\n\ndef _groupby_bins_agg(\n array: xr.DataArray,\n group: xr.DataArray,\n bins,\n func=\"sum\",\n fill_value=0,\n dtype=None,\n **cut_kwargs,\n) -> xr.DataArray:\n \"\"\"Faster equivalent of Xarray's groupby_bins(...).sum().\"\"\"\n # https://github.com/pydata/xarray/issues/4473\n binned = pd.cut(np.ravel(group), bins, **cut_kwargs)\n new_dim_name = group.name + \"_bins\"\n indices = group.copy(data=binned.codes.reshape(group.shape))\n\n result = xr.apply_ufunc(\n _binned_agg,\n array,\n indices,\n input_core_dims=[indices.dims, indices.dims],\n output_core_dims=[[new_dim_name]],\n output_dtypes=[array.dtype],\n dask_gufunc_kwargs=dict(\n allow_rechunk=True,\n output_sizes={new_dim_name: binned.categories.size},\n ),\n kwargs={\n \"num_bins\": binned.categories.size,\n \"func\": func,\n \"fill_value\": fill_value,\n \"dtype\": dtype,\n },\n dask=\"parallelized\",\n )\n result.coords[new_dim_name] = binned.categories\n return result\n\n\ndef isotropize(ps, fftdim, nfactor=4, truncate=False):\n \"\"\"\n Isotropize a 2D power spectrum or cross spectrum\n by taking an azimuthal average.\n\n .. math::\n \\text{iso}_{ps} = k_r N^{-1} \\sum_{N} |\\mathbb{F}(da')|^2\n\n where :math:`N` is the number of azimuthal bins.\n\n Parameters\n ----------\n ps : `xarray.DataArray`\n The power spectrum or cross spectrum to be isotropized.\n fftdim : list\n The fft dimensions overwhich the isotropization must be performed.\n nfactor : int, optional\n Ratio of number of bins to take the azimuthal averaging with the\n data size. Default is 4.\n truncate : bool, optional\n If True, the spectrum will be truncated for wavenumbers larger than\n the Nyquist wavenumber.\n \"\"\"\n\n # compute radial wavenumber bins\n k = ps[fftdim[1]]\n l = ps[fftdim[0]]\n\n N = [k.size, l.size]\n nbins = int(min(N) / nfactor)\n freq_r = np.sqrt(k ** 2 + l ** 2).rename(\"freq_r\")\n kr = _groupby_bins_agg(freq_r, freq_r, bins=nbins, func=\"mean\")\n\n if truncate:\n if k.max() > l.max():\n kmax = l.max()\n else:\n kmax = k.max()\n kr = kr.where(kr <= kmax)\n else:\n msg = (\n \"The flag `truncate` will be set to True by default in future version \"\n + \"in order to truncate the isotropic wavenumber larger than the \"\n + \"Nyquist wavenumber.\"\n )\n warnings.warn(msg, FutureWarning)\n\n iso_ps = (\n _groupby_bins_agg(ps, freq_r, bins=nbins, func=\"mean\")\n .rename({\"freq_r_bins\": \"freq_r\"})\n .drop_vars(\"freq_r\")\n )\n iso_ps.coords[\"freq_r\"] = kr.data\n if truncate:\n return (iso_ps * iso_ps.freq_r).dropna(\"freq_r\")\n else:\n return iso_ps * iso_ps.freq_r\n\n\ndef isotropic_powerspectrum(*args, **kwargs): # pragma: no cover\n \"\"\"\n Deprecated function. See isotropic_power_spectrum doc\n \"\"\"\n msg = (\n \"This function has been renamed and will disappear in the future.\"\n + \" Please use isotropic_power_spectrum instead\"\n )\n warnings.warn(msg, Warning)\n return isotropic_power_spectrum(*args, **kwargs)\n\n\ndef isotropic_power_spectrum(\n da,\n spacing_tol=1e-3,\n dim=None,\n shift=True,\n detrend=None,\n scaling=\"density\",\n window=None,\n window_correction=False,\n nfactor=4,\n truncate=False,\n **kwargs,\n):\n \"\"\"\n Calculates the isotropic spectrum from the\n two-dimensional power spectrum by taking the\n azimuthal average.\n\n .. math::\n \\text{iso}_{ps} = k_r N^{-1} \\sum_{N} |\\mathbb{F}(da')|^2\n\n where :math:`N` is the number of azimuthal bins.\n\n Note: the method is not lazy does trigger computations.\n\n Parameters\n ----------\n da : `xarray.DataArray`\n The data to be transformed\n spacing_tol: float, optional\n Spacing tolerance. Fourier transform should not be applied to uneven grid but\n this restriction can be relaxed with this setting. Use caution.\n dim : list, optional\n The dimensions along which to take the transformation. If `None`, all\n dimensions will be transformed.\n shift : bool, optional\n Whether to shift the fft output.\n detrend : str, optional\n If `constant`, the mean across the transform dimensions will be\n subtracted before calculating the Fourier transform (FT).\n If `linear`, the linear least-square fit will be subtracted before\n the FT.\n density : list, optional\n If true, it will normalize the spectrum to spectral density\n window : str, optional\n Whether to apply a window to the data before the Fourier\n transform is taken. Please adhere to scipy.signal.windows for naming convention.\n nfactor : int, optional\n Ratio of number of bins to take the azimuthal averaging with the\n data size. Default is 4.\n truncate : bool, optional\n If True, the spectrum will be truncated for wavenumbers larger than\n the Nyquist wavenumber.\n\n Returns\n -------\n iso_ps : `xarray.DataArray`\n Isotropic power spectrum\n \"\"\"\n if \"density\" in kwargs:\n density = kwargs.pop(\"density\")\n scaling = \"density\" if density else \"false_density\"\n\n if dim is None:\n dim = da.dims\n if len(dim) != 2:\n raise ValueError(\"The Fourier transform should be two dimensional\")\n\n ps = power_spectrum(\n da,\n spacing_tol=spacing_tol,\n dim=dim,\n shift=shift,\n detrend=detrend,\n scaling=scaling,\n window_correction=window_correction,\n window=window,\n **kwargs,\n )\n\n fftdim = [\"freq_\" + d for d in dim]\n\n return isotropize(ps, fftdim, nfactor=nfactor, truncate=truncate)\n\n\ndef isotropic_crossspectrum(*args, **kwargs): # pragma: no cover\n \"\"\"\n Deprecated function. See isotropic_cross_spectrum doc\n \"\"\"\n msg = (\n \"This function has been renamed and will disappear in the future.\"\n + \" Please use isotropic_cross_spectrum instead\"\n )\n warnings.warn(msg, Warning)\n return isotropic_cross_spectrum(*args, **kwargs)\n\n\ndef isotropic_cross_spectrum(\n da1,\n da2,\n spacing_tol=1e-3,\n dim=None,\n shift=True,\n detrend=None,\n scaling=\"density\",\n window=None,\n window_correction=False,\n nfactor=4,\n truncate=False,\n **kwargs,\n):\n \"\"\"\n Calculates the isotropic spectrum from the\n two-dimensional power spectrumby taking the\n azimuthal average.\n\n .. math::\n \\text{iso}_{cs} = k_r N^{-1} \\sum_{N} (\\mathbb{F}(da1') {\\mathbb{F}(da2')}^*)\n\n where :math:`N` is the number of azimuthal bins.\n\n Note: the method is not lazy does trigger computations.\n\n Parameters\n ----------\n da1 : `xarray.DataArray`\n The data to be transformed\n da2 : `xarray.DataArray`\n The data to be transformed\n spacing_tol: float (default)\n Spacing tolerance. Fourier transform should not be applied to uneven grid but\n this restriction can be relaxed with this setting. Use caution.\n dim : list (optional)\n The dimensions along which to take the transformation. If `None`, all\n dimensions will be transformed.\n shift : bool (optional)\n Whether to shift the fft output.\n detrend : str (optional)\n If `constant`, the mean across the transform dimensions will be\n subtracted before calculating the Fourier transform (FT).\n If `linear`, the linear least-square fit will be subtracted before\n the FT.\n density : list (optional)\n If true, it will normalize the spectrum to spectral density\n window : str (optional)\n Whether to apply a window to the data before the Fourier\n transform is taken. Please adhere to scipy.signal.windows for naming convention.\n nfactor : int (optional)\n Ratio of number of bins to take the azimuthal averaging with the\n data size. Default is 4.\n truncate : bool, optional\n If True, the spectrum will be truncated for wavenumbers larger than\n the Nyquist wavenumber.\n\n Returns\n -------\n iso_cs : `xarray.DataArray`\n Isotropic cross spectrum\n \"\"\"\n if \"density\" in kwargs:\n density = kwargs.pop(\"density\")\n scaling = \"density\" if density else \"false_density\"\n\n if dim is None:\n dim = da1.dims\n dim2 = da2.dims\n if dim != dim2:\n raise ValueError(\"The two datasets have different dimensions\")\n if len(dim) != 2:\n raise ValueError(\"The Fourier transform should be two dimensional\")\n\n cs = cross_spectrum(\n da1,\n da2,\n spacing_tol=spacing_tol,\n dim=dim,\n shift=shift,\n detrend=detrend,\n scaling=scaling,\n window_correction=window_correction,\n window=window,\n **kwargs,\n )\n\n fftdim = [\"freq_\" + d for d in dim]\n\n return isotropize(cs, fftdim, nfactor=nfactor, truncate=truncate)\n\n\ndef fit_loglog(x, y):\n \"\"\"\n Fit a line to isotropic spectra in log-log space\n\n Parameters\n ----------\n x : `numpy.array`\n Coordinate of the data\n y : `numpy.array`\n data\n\n Returns\n -------\n y_fit : `numpy.array`\n The linear fit\n a : float64\n Slope of the fit\n b : float64\n Intercept of the fit\n \"\"\"\n # fig log vs log\n p = np.polyfit(np.log2(x), np.log2(y), 1)\n y_fit = 2 ** (np.log2(x) * p[0] + p[1])\n\n return y_fit, p[0], p[1]\n"
] |
[
[
"numpy.log2",
"pandas.api.types.is_datetime64_dtype",
"numpy.abs",
"numpy.conj",
"numpy.allclose",
"numpy.isnan",
"numpy.sqrt",
"numpy.fft.fftshift",
"numpy.full",
"numpy.diff",
"numpy.prod",
"numpy.fft.fftfreq",
"numpy.ravel",
"numpy.exp",
"numpy.flip"
]
] |
hashim361/text
|
[
"5a12211ac370f989ca359d232d3081a889e859dd"
] |
[
"tensorflow_text/python/ops/bert_tokenizer_test.py"
] |
[
"# coding=utf-8\n# Copyright 2020 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# encoding=utf-8\nr\"\"\"Tests for BertTokenizer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_map_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import test\nfrom tensorflow_text.python.ops import bert_tokenizer\n\n\ndef _utf8(x):\n return x.encode('utf-8')\n\n\n# TODO(thuang513): It appears there isn't a Ragged version of substr; consider\n# checking this into core TF.\ndef _ragged_substr(text_input, begin, end):\n text_input_flat = None\n if ragged_tensor.is_ragged(text_input):\n text_input_flat = text_input.flat_values\n else:\n text_input_flat = text_input\n\n def _ragged_tile(x):\n input_text, indices = x\n multiple = math_ops.reduce_sum(indices.row_lengths())\n return array_ops.tile([input_text], [multiple])\n\n broadcasted_text = ragged_map_ops.map_fn(\n _ragged_tile,\n (text_input_flat, begin),\n dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.string, ragged_rank=1),\n infer_shape=False,\n )\n size = math_ops.sub(\n array_ops.squeeze(end.flat_values), array_ops.squeeze(begin.flat_values))\n new_tokens = string_ops.substr_v2(broadcasted_text,\n array_ops.squeeze(begin.flat_values), size)\n return begin.with_flat_values(new_tokens.flat_values)\n\n\n_VOCAB = [\n b'[unused1]',\n b'[unused23]',\n b\"'\",\n b'##%',\n b'##af',\n b'##book',\n b'##c',\n b'##fr',\n b'##hey',\n b'##is',\n b'##o',\n b'##ost',\n b'##s',\n b'##tri',\n b'##y',\n b'$',\n b'%',\n b'&',\n b'(',\n b')',\n b'*',\n b'-',\n b'.',\n b'20',\n b':',\n b'?',\n b'[CLS]',\n b'[SEP]',\n _utf8(u'國'),\n _utf8(u'暐'),\n _utf8(u'瀚'),\n _utf8(u'韓'),\n _utf8(u'食'),\n _utf8(u'黃'),\n _utf8(u'🤔'),\n _utf8(u'🤣'),\n b'^',\n b'a',\n b'ago',\n b'among',\n b'an',\n b'and',\n b'are',\n b'aren',\n b'awesome',\n b'between',\n b'candy',\n b'china',\n b'companies',\n b'company',\n b'crushed',\n b'dug',\n b'earnings',\n b'engaged',\n b'even',\n b'few',\n b'forecast',\n b'getting',\n b'had',\n b'han',\n b'has',\n b'hers',\n b'high',\n b'hit',\n b'hs',\n b'hurting',\n b'in',\n b'indie',\n b'is',\n b'isn',\n b'ka',\n b'ku',\n b'major',\n b'maker',\n b'moth',\n b'nearly',\n b'new',\n b'now',\n b'president',\n b'record',\n b'regulators',\n b'reported',\n b'rift',\n b'rust',\n b'sales',\n b'shares',\n b'slightly',\n b'sprint',\n b'states',\n b'stock',\n b't',\n b'taste',\n b'tension',\n b'that',\n b'the',\n b'this',\n b'today',\n b'told',\n b'topped',\n b'trade',\n b'trump',\n b'united',\n b'up',\n b'weeks',\n b'what',\n b'why',\n b'with',\n b'year',\n b'yo',\n b'yu',\n _utf8(u'\\u7231'),\n _utf8(u'\\u4e0a'),\n _utf8(u'\\u4e00'),\n _utf8(u'\\u4e2a'),\n _utf8(u'\\u4e0d'),\n _utf8(u'\\u56de'),\n _utf8(u'\\u5bb6'),\n _utf8(u'\\u7684'),\n _utf8(u'\\u4eba'),\n]\n\n\ndef _create_table(vocab, num_oov=1):\n init = lookup_ops.KeyValueTensorInitializer(\n vocab,\n math_ops.range(\n array_ops.size(vocab, out_type=dtypes.int64), dtype=dtypes.int64),\n key_dtype=dtypes.string,\n value_dtype=dtypes.int64)\n return lookup_ops.StaticVocabularyTableV1(\n init, num_oov, lookup_key_dtype=dtypes.string)\n\n\nclass BertTokenizerTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def test_bert_tokenizer_outputs(self):\n text_inputs = constant_op.constant([_utf8('Test')])\n vocab = _VOCAB\n table = _create_table(vocab, 2)\n self.evaluate(table.initializer)\n tokenizer = bert_tokenizer.BertTokenizer(\n table,\n token_out_type=dtypes.int32)\n results = tokenizer.tokenize(text_inputs)\n self.assertAllEqual(results.dtype, dtypes.int32)\n\n @parameterized.parameters([\n dict(\n text_inputs=[\n _utf8(u'taste the rustisc indiefrost'),\n _utf8(u'Han Kuo-yu (韓國食)🤔'),\n _utf8(u'Añade la información del formulario y tus preguntas'),\n ],\n expected_tokens=[[b'taste', b'the', b'rustisc', b'indiefrost'],\n [\n b'Han', b'Kuo', b'-', b'yu', b'(',\n b'\\xe9\\x9f\\x93', b'\\xe5\\x9c\\x8b',\n b'\\xe9\\xa3\\x9f', b')', b'\\xf0\\x9f\\xa4\\x94'\n ],\n [\n b'A\\xc3\\xb1ade', b'la', b'informaci\\xc3\\xb3n',\n b'del', b'formulario', b'y', b'tus', b'preguntas'\n ]],\n ),\n dict(\n text_inputs=[\n _utf8(u'UNwant\\u00E9d,running'),\n _utf8(u'Añade la información del formulario y tus preguntas'),\n ],\n expected_tokens=[[b'unwanted', b',', b'running'],\n [\n b'anade', b'la', b'informacion', b'del',\n b'formulario', b'y', b'tus', b'preguntas'\n ]],\n lower_case=True,\n ),\n dict(\n text_inputs=[\n _utf8(u'Añade la información del formulario y tus preguntas')\n ],\n expected_tokens=[[\n b'An\\xcc\\x83ade', b'la', b'informacio\\xcc\\x81n', b'del',\n b'formulario', b'y', b'tus', b'preguntas'\n ]],\n normalization_form='NFD',\n ),\n # Test CJK are tokenized by unicode characters\n dict(\n text_inputs=[\n _utf8(u'香港では4日'),\n _utf8(u'영어독해 자만심 왜 문제일까'),\n _utf8(u'據港媒《東網》報導')\n ],\n expected_tokens=[\n [_utf8(u'香'),\n _utf8(u'港'),\n _utf8(u'では4'),\n _utf8(u'日')],\n [\n _utf8(u'영어독해'),\n _utf8(u'자만심'),\n _utf8(u'왜'),\n _utf8(u'문제일까'),\n ],\n [\n _utf8(u'據'),\n _utf8(u'港'),\n _utf8(u'媒'),\n _utf8(u'《'),\n _utf8(u'東'),\n _utf8(u'網'),\n _utf8(u'》'),\n _utf8(u'報'),\n _utf8(u'導')\n ],\n ],\n normalization_form=None,\n ),\n # Test Katakana followed by Hiragana.\n dict(\n text_inputs=[_utf8(u'のテキストとして')],\n expected_tokens=[\n [_utf8(u'のテキストとして')],\n ],\n normalization_form=None,\n ),\n ])\n @test_util.run_in_graph_and_eager_modes\n def test_basic_tokenize(self,\n text_inputs,\n expected_tokens,\n lower_case=False,\n normalization_form='NFC'):\n text_inputs = ragged_factory_ops.constant(text_inputs)\n tokenizer = bert_tokenizer.BasicTokenizer(\n lower_case=lower_case, normalization_form=normalization_form)\n tokens = tokenizer.tokenize(text_inputs)\n self.assertAllEqual(tokens, expected_tokens)\n\n @parameterized.parameters([\n dict(\n text_inputs=[\n b'taste the rustisc indiefrost',\n _utf8(u'Han Kuo-yu (韓國食)🤔'),\n _utf8(u'dugtrio had an awesome 🤣 dugbook'),\n b'yo^what$is*up?',\n b'mothaf*&%ka',\n ],\n expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],\n [b'indie', b'##fr', b'##ost']],\n [[b'han'], [b'ku', b'##o'], [b'-'], [b'yu'], [b'('],\n [_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')], [b')'],\n [_utf8(u'🤔')]],\n [[b'dug', b'##tri', b'##o'], [b'had'], [b'an'],\n [b'awesome'], [_utf8(u'🤣')], [b'dug', b'##book']],\n [[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'], [b'*'],\n [b'up'], [b'?']],\n [[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka']]],\n expected_extracted=[[[b'taste'], [b'the'], [b'rust', b'is', b'c'],\n [b'indie', b'fr', b'ost']],\n [[b'Han'], [b'Ku', b'o'], [b'-'], [b'yu'], [b'('],\n [_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')],\n [b')'], [_utf8(u'🤔')]],\n [[b'dug', b'tri', b'o'], [b'had'], [b'an'],\n [b'awesome'], [_utf8(u'🤣')], [b'dug', b'book']],\n [[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'],\n [b'*'], [b'up'], [b'?']],\n [[b'moth', b'af'], [b'*'], [b'&'], [b'%'],\n [b'ka']]],\n lower_case=True,\n ),\n # Test when we are expecting multiple OOV vocab ids and tf.string just\n # maps out [UNK] token.\n dict(\n text_inputs=[\n b'mothaf*&%ka cantfindme whodis',\n ],\n expected=[[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka'],\n [b'[UNK]'], [b'[UNK]']]],\n expected_extracted=[[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],\n [b'ka'], [b'cantfindme'], [b'whodis']]],\n lower_case=True,\n num_oov=2,\n ),\n dict(\n text_inputs=[\n b'candy',\n ],\n expected=[[[b'candy']]],\n lower_case=True,\n num_oov=2,\n ),\n dict(\n text_inputs=[\n _utf8(u'爱上一个不回家的人'),\n ],\n expected=[[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],\n [_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],\n [_utf8(u'人')]]],\n lower_case=True,\n num_oov=2,\n ),\n # Test 'preserve_unused_token' option\n dict(\n text_inputs=[\n b'taste the rustisc indiefrost [unused1]',\n _utf8(u'爱上一个不回家的人[unused23]'),\n ],\n expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],\n [b'indie', b'##fr', b'##ost'], [b'[unused1]']],\n [[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],\n [_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],\n [_utf8(u'人')], [b'[unused23]']]],\n preserve_unused_token=True,\n ),\n ])\n @test_util.run_in_graph_and_eager_modes\n def test_bert_tokenizer(self,\n text_inputs,\n expected,\n vocab=None,\n expected_extracted=None,\n lower_case=True,\n num_oov=1,\n preserve_unused_token=False):\n text_inputs = constant_op.constant(text_inputs)\n if not vocab:\n vocab = _VOCAB\n table = _create_table(vocab, num_oov)\n self.evaluate(table.initializer)\n tokenizer = bert_tokenizer.BertTokenizer(\n table,\n token_out_type=dtypes.string,\n lower_case=lower_case,\n preserve_unused_token=preserve_unused_token)\n results = tokenizer.tokenize(text_inputs)\n self.assertAllEqual(results, expected)\n\n # Verify that the int ids are the same.\n expected_rt = ragged_factory_ops.constant(expected)\n expected_int = table.lookup(expected_rt.flat_values)\n expected_int_rt = ragged_tensor.RaggedTensor.from_nested_row_splits(\n expected_int, expected_rt.nested_row_splits)\n int_tokenizer = bert_tokenizer.BertTokenizer(\n vocab_lookup_table=table,\n token_out_type=dtypes.int64,\n lower_case=lower_case,\n preserve_unused_token=preserve_unused_token)\n results_int = int_tokenizer.tokenize(text_inputs)\n self.assertAllEqual(results_int, expected_int_rt)\n\n # Verify that the offsets can extract the expected tokens\n _, begin, end = tokenizer.tokenize_with_offsets(text_inputs)\n\n extracted_wordpieces = _ragged_substr(text_inputs, begin, end)\n if expected_extracted:\n self.assertAllEqual(extracted_wordpieces, expected_extracted)\n else:\n # The extracted won't have any wordpieces with '##' prefix. Strip them\n # out.\n stripped_prefix_flat = string_ops.regex_replace(expected_rt.flat_values,\n '##', '')\n stripped_prefix = expected_rt.with_flat_values(stripped_prefix_flat)\n self.assertAllEqual(extracted_wordpieces, stripped_prefix)\n\n\nif __name__ == '__main__':\n test.main()\n"
] |
[
[
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensorType",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_nested_row_splits",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.ops.string_ops.regex_replace",
"tensorflow.python.ops.ragged.ragged_tensor.is_ragged",
"tensorflow.python.ops.lookup_ops.StaticVocabularyTableV1",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.framework.constant_op.constant"
]
] |
goodarzilab/pyteiser
|
[
"3ac78604c768957022cc7751ccdd337960a816f2"
] |
[
"pyteiser/wrappers/preprocess_custom_expression_profile.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport argparse\n\nfrom .. import IO\n\n\ndef handler(raw_args = None):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--rna_bin_file\", help=\"binarized sequence file\", type=str)\n parser.add_argument(\"--exp_values_file\", help=\"expression values in a csv format\", type=str)\n parser.add_argument(\"--exp_mask_file\", help=\"output file: indicates which sequences are present in the \"\n \"expression file and the expression values for these sequences\", type=str)\n\n parser.add_argument(\"--anno_name_column\", help=\"column name in exp_values file that contains annotations\", type=str)\n parser.add_argument(\"--measur_column\", help=\"column name in exp_values file that contains expression measurements\", type=str)\n\n parser.set_defaults(\n rna_bin_file='/Users/student/Documents/hani/iTEISER/step_2_preprocessing/reference_files/reference_transcriptomes/binarized/SNRNPA1_SE.hg38.fl250.bin',\n exp_values_file='/Users/student/Documents/hani/programs/pyteiser/data/expression_data/hg38_miso_se.txt',\n exp_mask_file='/Users/student/Documents/hani/programs/pyteiser/data/mask_files/SNRNPA1_PSI_mask.bin',\n\n anno_name_column='eid',\n measur_column='diff',\n )\n\n args = parser.parse_args(raw_args)\n return args\n\n\ndef read_exp_values_file(args, return_meas_dict = True):\n exp_df = pd.read_csv(args.exp_values_file, sep='\\t',\n dtype = {args.anno_name_column : str})\n exp_df.index = exp_df[args.anno_name_column]\n\n if return_meas_dict:\n measurements_dict_full = exp_df.to_dict()\n measurements_dict = measurements_dict_full[args.measur_column]\n\n return exp_df, measurements_dict\n else:\n return exp_df\n\n\ndef construct_mask_arrays(args):\n seqs_dict, seqs_order = IO.read_rna_bin_file(args.rna_bin_file)\n exp_df, measurements_dict = read_exp_values_file(args)\n\n transcripts_measured_list = exp_df[args.anno_name_column].tolist()\n transcripts_measured_set = set(transcripts_measured_list)\n\n list_indices_occuring = [1 if x in transcripts_measured_set else 0 for x in seqs_order]\n list_measurement_values = [measurements_dict[x] if x in transcripts_measured_set else 0 for x in\n seqs_order]\n\n array_indices_occuring = np.array(list_indices_occuring, dtype=np.bool)\n array_measurement_values = np.array(list_measurement_values, dtype=np.float32)\n\n return array_indices_occuring, array_measurement_values\n\n\ndef compress_write_mask_arrays(index_array, values_array, args):\n assert(index_array.shape == values_array.shape)\n length_uint32 = np.array([index_array.shape], dtype=np.uint32)\n length_bitstring = length_uint32.tobytes()\n index_array_bytes = index_array.tobytes()\n values_array_bytes = values_array.tobytes()\n full_bytes_string = length_bitstring + index_array_bytes + values_array_bytes\n\n with open(args.exp_mask_file, 'wb') as wb:\n wb.write(full_bytes_string)\n\n\ndef main(raw_args = None):\n args = handler(raw_args)\n array_indices_occuring, array_measurement_values = construct_mask_arrays(args)\n compress_write_mask_arrays(array_indices_occuring, array_measurement_values, args)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array",
"pandas.read_csv"
]
] |
nikolaevra/tf-faster-rcnn
|
[
"4a5a5f9cfd4dc6548ee9cf63f1122eadbc06ea39"
] |
[
"lib/wrapper/faster_rcnn_wrapper.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport cv2\nimport os\n\nimport tensorflow as tf\nfrom model.config import cfg\nfrom model.test import im_detect\nfrom nets.resnet_v1 import resnetv1\nfrom nets.vgg16 import vgg16\nfrom utils.timer import Timer\n\nCLASSES = ('__background__',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\nNETS = {\n 'vgg16': ('vgg16_faster_rcnn_iter_70000.ckpt',),\n 'res101': ('res101_faster_rcnn_iter_110000.ckpt',)\n}\n\nDATASETS = {\n 'pascal_voc': ('voc_2007_trainval',),\n 'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)\n}\n\n\nclass DetectorWrapper:\n def __init__(self, extraction_net='res101', dataset='pascal_voc_0712', num_classes=21,\n tag='default', anchor_scales=[8, 16, 32], anchor_ratios=(0.5, 1, 2)):\n cfg.TEST.HAS_RPN = True\n\n # model path\n self.extraction_net = extraction_net\n self.dataset = dataset\n self.tfmodel = os.path.join(\n 'output',\n extraction_net,\n DATASETS[dataset][0],\n 'default',\n NETS[extraction_net][0]\n )\n\n if not os.path.isfile(self.tfmodel + '.meta'):\n raise IOError('{:s} not found.\\n'.format(self.tfmodel + '.meta'))\n\n # Make sure we allow using CPU when GPU is not available\n self.tfconfig = tf.ConfigProto(allow_soft_placement=True)\n # make sure we first allocate small amount of GPU power and grow it as needed\n self.tfconfig.gpu_options.allow_growth = True\n\n # init tf session\n self.sess = tf.Session(config=self.tfconfig)\n\n self.net = None\n # load network\n if extraction_net == 'vgg16':\n self.net = vgg16()\n elif extraction_net == 'res101':\n self.net = resnetv1(num_layers=101)\n else:\n raise NotImplementedError\n\n self.net.create_architecture(\n \"TEST\",\n num_classes=num_classes,\n tag=tag,\n anchor_scales=anchor_scales,\n anchor_ratios=anchor_ratios\n )\n\n # Saver is an easy interface to save/load models and its weights based on a checkpoint\n # number.\n self.saver = tf.train.Saver()\n # Load model and weights for the pre-trained extraction model.\n self.saver.restore(self.sess, self.tfmodel)\n\n print('Loaded network {:s}'.format(self.tfmodel))\n\n def detect(self, images):\n \"\"\" Detect images from array of image filenames.\n\n :param images: list of image filenames to be detected.\n :return: dict(dict()) of detections\n \"\"\"\n detections = {}\n\n for image in images:\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image)\n im = cv2.imread(im_file)\n\n timer = Timer()\n timer.tic()\n\n # Get image detections\n scores, boxes = im_detect(self.sess, self.net, im)\n timer.toc()\n total_t = timer.total_time\n\n print('Detection took {:.3f}s for {:d} proposals'.format(total_t, boxes.shape[0]))\n\n detections[image] = {\n \"scores\": scores,\n \"boxes\": boxes,\n \"detection_time\": total_t\n }\n\n return detections\n"
] |
[
[
"tensorflow.ConfigProto",
"tensorflow.train.Saver",
"tensorflow.Session"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.